#!/usr/bin/env python # coding=utf8 # # This file is part of PlayStream - enigma2 plugin to play video streams from various sources # Copyright (c) 2016 ivars777 (ivars777@gmail.com) # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html # import sys, os, os.path, re, sys import urllib,urllib2 from xml.sax.saxutils import unescape,escape from urllib import quote, unquote import datetime import HTMLParser import json import datetime,time from SourceBase import SourceBase, stream_type, stream0 from collections import OrderedDict API_URL = 'https://m.lattelecom.tv/' user_agent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3" headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()]) h = HTMLParser.HTMLParser() class Source(SourceBase): def __init__(self): self.name = "iplayer" self.title = "BBC iPlayer" self.img = "http://www.userlogos.org/files/logos/inductiveload/BBC_iPlayer_logo.png" self.desc = "BBC iPlayer portal content" self.api_url = "http://ibl.api.bbci.co.uk/ibl/v1/" self.headers = headers2dict(""" User-Agent: BBCiPlayer/4.19.0.3021 (SM-G900FD; Android 4.4.2) Connection: Keep-Alive """) self.headers2 = headers2dict(""" User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36 Connection: Keep-Alive """) self.ch = [] self.ch_id={} self.ch_id2={} self.ch_name={} self.logos ={ "bbc_one_london":"http://www.lyngsat-logo.com/hires/bb/bbc_one.png", "bbc_two_england":"http://www.lyngsat-logo.com/hires/bb/bbc_two_uk.png", "bbc_three":"http://www.lyngsat-logo.com/hires/bb/bbc_three_uk.png", "bbc_four":"http://www.lyngsat-logo.com/hires/bb/bbc_four_uk.png", "bbc_radio_one":"http://www.lyngsat-logo.com/hires/bb/bbc_radio1.png", "cbbc":"http://www.lyngsat-logo.com/hires/bb/bbc_cbbc.png", "cbeebies":"http://www.lyngsat-logo.com/hires/bb/bbc_cbeebies_uk.png", "bbc_news24":"http://www.lyngsat-logo.com/hires/bb/bbc_news.png", "bbc_parliament":"http://www.lyngsat-logo.com/hires/bb/bbc_parliament.png", "bbc_alba":"http://www.lyngsat-logo.com/hires/bb/bbc_alba.png", "s4cpbs":"http://www.lyngsat-logo.com/hires/ss/s4c_uk.png" } cur_directory = os.path.dirname(os.path.abspath(__file__)) self.config_file = os.path.join(cur_directory,self.name+".cfg") self.options = OrderedDict([("user","lietotajs"),("password","parole")]) self.options_read() def get_content(self, data): print "[iplayer] get_content:", data if "::" in data: data = data.split("::")[1] path = data.split("?")[0] clist = path.split("/")[0] params = data[data.find("?"):] if "?" in data else "" qs = dict(map(lambda x:x.split("="),re.findall("\w+=[\w-]+",params))) #lang = qs["lang"] if "lang" in qs else self.country content=[] content.append(("..return", "back","","Return back")) ### Home ### if data=="home": content.extend([ ("Search TV", "iplayer::search/{0}","","Search in iPlayer"), ("Live streams", "iplayer::live","","TV live streams"), ("Channels", "iplayer::channels","","Programmes by channel/date"), ("Categories", "iplayer::categories","","Programmes by categories"), ("A-Z", "iplayer::a-z","","All programmes by name"), ("Highlights", "iplayer::home/highlights","","Current highlights"), ("Most popular", "iplayer::groups/popular/episodes?per_page=40&page=1","","Most popular programmes") ]) return content ### Search ### elif clist=="search": data_ = "search-suggest/?q=%s&rights=mobile&initial_child_count=1"%data.split("/")[1] r = self.call(data_) for item in r["search_suggest"]["results"]: title,data2,img,desc = self.get_data_element(item) content.append((title,self.name+"::"+data2,img,desc)) return content ### Live main ### elif data=="live": for ch in self.get_channels(): title = ch["title"] img = self.logos[ch["id"]] if ch["id"] in self.logos else "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"] desc = title data2 = "live/%s"%ch["id"] ee = self.get_epg_live(ch["id"]) desc = ee[2] content.append((title,self.name+"::"+data2,img,desc)) return content ### Categories ### elif data == "categories": r = self.call(data) if not "categories": raise Exception("Error reading categories") for item in r["categories"]: data2 = "categories/%s"%(item["id"]) title = item["title"] desc = title img = self.img content.append((title,self.name+"::"+data2,img,desc)) return content ### Catetory root ### elif clist == "categories" and len(data.split("/"))==2: r = self.call(data) title = "%s - highlights"%r["category"]["title"] content.append((title,self.name+"::"+data+"/highlights?lang=en&rights=mobile&availability=available",self.img,title)) title = "%s - recent (%s programmes, %s episodes)"%(r["category"]["title"],r["category"]["child_programme_count"],r["category"]["child_episode_count"]) content.append((title,self.name+"::"+data+"/programmes?rights=mobile&page=1&per_page=40&sort=recent&sort_direction=asc&initial_child_count=1&availability=available",self.img,title)) title = "%s - a-z (%s programmes, %s episodes)"%(r["category"]["title"],r["category"]["child_programme_count"],r["category"]["child_episode_count"]) content.append((title,self.name+"::"+data+"/programmes?rights=mobile&page=1&per_page=40&sort=title&sort_direction=asc&initial_child_count=1&availability=available",self.img,title)) return content ### Program/episodes list ### elif re.search("categories/([\w\-]+)/(highlights|programmes).+",data) or\ re.search("programmes/(\w+)/episodes.+",data) or\ re.search("groups/(\w+)/episodes.+",data) or\ re.search("atoz/([\w]+)/programmes.+",data) or\ re.search("channels/(\w+)/schedule/[\d\-].+",data) or\ re.search("channels/(\w+)/programmes.+",data) or\ re.search("channels/(\w+)/highlights.+",data) or\ data == "home/highlights": r = self.call(data) lst = r["category_highlights"] if "category_highlights" in r else\ r["category_programmes"] if "category_programmes" in r else\ r["programme_episodes"] if "programme_episodes" in r else\ r["atoz_programmes"] if "atoz_programmes" in r else\ r["group_episodes"] if "group_episodes" in r else\ r["schedule"] if "schedule" in r else\ r["channel_highlights"] if "channel_highlights" in r else\ r["channel_programmes"] if "channel_programmes" in r else\ r["home_highlights"] if "home_highlights" in r else\ [] if not lst: return content for el in lst["elements"]: if el["type"] == "broadcast": if not len(el["episode"]["versions"]):continue title,data2,img,desc = self.get_data_element(el["episode"]) t1 = gt(el['scheduled_start']) t2 = gt(el['scheduled_end']) title = "[%s-%s]%s"%(t1.strftime("%d.%m.%Y %H:%M"),t2.strftime("%H:%M"),title) else: title,data2,img,desc = self.get_data_element(el) content.append((title,self.name+"::"+data2,img,desc)) if "&page=" in data and lst["page"]*lst["per_page"] "video": continue for c in s["connection"]: if c["transferFormat"] <> "hls": continue #if not (c["supplier"].startswith("mf_") or c["supplier"].startswith("ll_")) : continue # TODO ir kaut kādas VPN problēmas ar akamaihd #if c["priority"] <> "1": continue url=c["href"].encode("utf8") r2 = self._http_request(url) if not r2: continue slist = re.findall("#EXT-X-STREAM-INF:([^\n]+)\n([^\n]+)", r2, re.DOTALL) if not slist: stream = stream0.copy() stream["url"]=url stream["name"]=title stream["desc"]=desc stream["img"]=img stream["type"]="hls" stream["quality"]=("%s %sx%s %s,%s"%(s["bitrate"],s["width"],s["height"],c["supplier"],c["priority"])).encode("utf8") stream["lang"]="en" stream["subs"]=captions stream["order"]=int(s["bitrate"]) streams.append(stream) else: for cc in slist: m = re.search("RESOLUTION=([\dx]+)",cc[0]) resolution = m.group(1) if m else "%sx%s"%(s["width"],s["height"]) m = re.search("BANDWIDTH=([\d]+)",cc[0]) bitrate = m.group(1) if m else s["bitrate"] url2 = cc[1].encode("utf8") if not url2.startswith("http"): uu = url.split("/")[:-1] uu.append(url2) url2 = "/".join(uu) stream = stream0.copy() stream["url"]=url2 stream["name"]=title stream["desc"]=desc stream["img"]=img stream["type"]="hls" stream["quality"]=("%s %s %s,%s"%(bitrate,resolution,c["supplier"],c["priority"])).encode("utf8") stream["lang"]="en" stream["subs"]=captions stream["order"]=int(bitrate) streams.append(stream) if captions: for s in streams: s["subs"]=captions streams = sorted(streams,key=lambda item: item["order"],reverse=True) return streams def is_video(self,data): if "::" in data: data = data.split("::")[1] cmd = data.split("/") if cmd[0]=="live" and len(cmd)==2: return True elif cmd[0]=="episodes" and len(cmd)==2: return True else: return False def get_data_element(self,item): if ("programme" in item["type"] or "group" in item["type"]) and item["count"]>1: ep = item.copy() elif ("programme" in item["type"] or "group" in item["type"]) and item["count"]==1: ep = item["initial_children"][0].copy() elif item["type"] == "episode": ep = item.copy() elif item["type"] == "broadcast": ep = item["episode"].copy() else: ep = item.copy() title = ep["title"] if "subtitle" in ep and ep["subtitle"]: title = title+". "+ ep["subtitle"] desc = ep["synopses"]["large"] if "large" in ep["synopses"] else ep["synopses"]["medium"] if "medium" in ep["synopses"] else ep["synopses"]["small"] #TODO papildus info pie apraksta img = ep["images"]["standard"].replace("{recipe}","512x288") if "images" in ep else self.img if ep["type"] == "episode": data2 = "episodes/%s"%ep["id"] elif "programme" in ep["type"]: data2 = "programmes/%s/episodes?per_page=40&page=1"%ep["id"] title = "%s [%s episodes]"%(title,ep["count"]) elif "group" in ep["type"]: data2 = "groups/%s/episodes?per_page=40&page=1"%ep["id"] title = "%s [%s episodes]"%(title,ep["count"]) else: data2 = "programmes/%s/episodes?per_page=40&page=1"%ep["id"] title = "%s [%s episodes]"%(title,ep["count"]) return title,data2,img,desc def get_epg_video(self,vid): data = "episodes/%s"%vid r = self.call(data) if "episodes" in r : ep = r["episodes"][0] title = ep["title"] if "subtitle" in ep: title = title +". "+ ep["subtitle"] title = title desc = ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title desc = desc ver = ep["versions"][0] vid = ver["id"] remaining = ver["availability"]["remaining"]["text"] duration = ver["duration"] first_broadcast = ver["first_broadcast"] desc =u"%s\n%s\%s\n%s\n%s"%(title,duration,remaining,first_broadcast,desc) img = ep["images"]["standard"].replace("{recipe}","512x288") return title.encode("utf8"),img.encode("utf8"),desc.encode("utf8"),vid.encode("utf8") else: raise Exception("No video info") def get_epg_live(self,channelid): data = "channels/%s/highlights?live=true"%channelid r = self.call(data) if "channel_highlights" in r and r["channel_highlights"]["elements"][0]["id"] == "live": epg = r["channel_highlights"]["elements"][0]["initial_children"][0].copy() t1 = gt(epg['scheduled_start']) t2 = gt(epg['scheduled_end']) ep = epg["episode"] title = ep["title"] if "subtitle" in ep: title = title +". "+ ep["subtitle"] title = "%s (%s-%s)"%(title,t1.strftime("%H:%M"),t2.strftime("%H:%M")) title = title desc = ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title desc = desc desc ="%s\n%s"%(title,desc) img = ep["images"]["standard"].replace("{recipe}","512x288") #return title,img,desc else: title = r["channel_highlights"]["channel"]["title"] img = "" desc = title return title.encode("utf8"),img.encode("utf8"),desc.encode("utf8") def get_channels(self): if self.ch: return self.ch r= self.call("channels") self.ch=[] for i,item in enumerate(r["channels"]): self.ch.append(item) self.ch_id[item["id"]]=i self.ch_id2[item["master_brand_id"]]=i self.ch_name[item["title"]]=i return self.ch def get_channel_by_id(self,chid): if not self.ch: self.get_channels() if not self.ch: return None return self.ch[self.ch_id[chid]] if self.ch_id.has_key(chid) else None def get_channel_by_id2(self,chid): if not self.ch: self.get_channels() if not self.ch: return None return self.ch[self.ch_id2[chid]] if self.ch_id2.has_key(chid) else None def get_channel_by_name(self,name): if not self.ch: self.get_channels() ch2 = self.get_channel_by_name2(name) if not ch2: return None ch = self.get_channel_by_id2(ch2["id2"]) return ch def call(self, data,params = None, headers=None): if not headers: headers = self.headers #if not lang: lang = self.country url = self.api_url + data content = self._http_request(url,params, headers) if content: try: result = json.loads(content) return result except Exception, ex: return None else: return None def call2(self, data,params = None, headers=None): if not headers: headers = self.headers2 #if not lang: lang = self.country url = self.api_url2 + data content = self._http_request(url,params, headers) return content def _http_request(self, url,params = None, headers=None): if not headers: headers = self.headers import requests try: r = requests.get(url, headers=headers) return r.content except Exception as ex: if ex.code==403: return ex.read() else: return None def gt(dt_str): dt, _, us= dt_str.partition(".") dt= datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") dt = dt - datetime.timedelta(seconds=time.altzone) #us= int(us.rstrip("Z"), 10) #r = dt + datetime.timedelta(microseconds=us)a return dt if __name__ == "__main__": c = Source() from subprocess import call #ch = c.get_channels() #c.get_epg_live("bbc_two_england") if len(sys.argv)>1 and not "iplayer::" in sys.argv[1]: vid = sys.argv[1] print "login - %s"%c.login("ivars777","xxx") vid = "1069" vid = "1462566072086" channelid="101" vid = "1350462656767" #data = c.get_stream_url(vid,"vod") #call([r"c:\Program Files\VideoLAN\VLC\vlc.exe",data["stream"]]) pass else: if len(sys.argv)>1: data= sys.argv[1] else: data = "iplayer::home" content = c.get_content(data) for item in content: print item #cat = api.get_categories(country) #chan = api.get_channels("lv") #prog = api.get_programs(channel=6400) #prog = api.get_programs(category=55) #seas = api.get_seasons(program=6453) #str = api.get_streams(660243) #res = api.get_videos(802) #formats = api.getAllFormats() #det = api.detailed("1516") #vid = api.getVideos("13170") pass