Enigma2 plugin to to play various online streams (mostly Latvian).

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555
  1. #!/usr/bin/env python
  2. # coding=utf8
  3. #
  4. # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
  5. # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
  6. # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
  7. #
  8. import sys, os, os.path, re, sys
  9. import urllib,urllib2
  10. from xml.sax.saxutils import unescape,escape
  11. from urllib import quote, unquote
  12. import datetime
  13. import HTMLParser
  14. import json
  15. import datetime,time
  16. from SourceBase import SourceBase, stream_type
  17. import util
  18. from collections import OrderedDict
  19. import ssl
  20. if "_create_unverified_context" in dir(ssl):
  21. ssl._create_default_https_context = ssl._create_unverified_context
  22. API_URL = 'https://m.lattelecom.tv/'
  23. user_agent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3"
  24. headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
  25. h = HTMLParser.HTMLParser()
  26. class Source(SourceBase):
  27. def __init__(self,cfg_path=None):
  28. self.name = "iplayer"
  29. self.title = "BBC iPlayer"
  30. self.img = "http://www.userlogos.org/files/logos/inductiveload/BBC_iPlayer_logo.png"
  31. self.desc = "BBC iPlayer portal content"
  32. self.api_url = "http://ibl.api.bbci.co.uk/ibl/v1/"
  33. self.headers = headers2dict("""
  34. User-Agent: BBCiPlayer/4.19.0.3021 (SM-G900FD; Android 4.4.2)
  35. Connection: Keep-Alive
  36. """)
  37. self.headers2 = headers2dict("""
  38. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36
  39. Connection: Keep-Alive
  40. """)
  41. self.ch = []
  42. self.ch_id={}
  43. self.ch_id2={}
  44. self.ch_name={}
  45. self.logos ={
  46. "bbc_one_london":"http://www.lyngsat-logo.com/hires/bb/bbc_one.png",
  47. "bbc_two_england":"http://www.lyngsat-logo.com/hires/bb/bbc_two_uk.png",
  48. "bbc_three":"http://www.lyngsat-logo.com/hires/bb/bbc_three_uk.png",
  49. "bbc_four":"http://www.lyngsat-logo.com/hires/bb/bbc_four_uk.png",
  50. "bbc_radio_one":"http://www.lyngsat-logo.com/hires/bb/bbc_radio1.png",
  51. "cbbc":"http://www.lyngsat-logo.com/hires/bb/bbc_cbbc.png",
  52. "cbeebies":"http://www.lyngsat-logo.com/hires/bb/bbc_cbeebies_uk.png",
  53. "bbc_news24":"http://www.lyngsat-logo.com/hires/bb/bbc_news.png",
  54. "bbc_parliament":"http://www.lyngsat-logo.com/hires/bb/bbc_parliament.png",
  55. "bbc_alba":"http://www.lyngsat-logo.com/hires/bb/bbc_alba.png",
  56. "s4cpbs":"http://www.lyngsat-logo.com/hires/ss/s4c_uk.png"
  57. }
  58. cur_directory = os.path.dirname(os.path.abspath(__file__))
  59. if not cfg_path: cfg_path = cur_directory
  60. self.config_file = os.path.join(cfg_path,self.name+".cfg")
  61. self.options = OrderedDict([("user","lietotajs"),("password","parole")])
  62. self.options_read()
  63. def get_content(self, data):
  64. print "[iplayer] get_content:", data
  65. if "::" in data:
  66. data = data.split("::")[1]
  67. path = data.split("?")[0]
  68. clist = path.split("/")[0]
  69. params = data[data.find("?"):] if "?" in data else ""
  70. qs = dict(map(lambda x:x.split("="),re.findall("\w+=[\w-]+",params)))
  71. #lang = qs["lang"] if "lang" in qs else self.country
  72. content=[]
  73. content.append(("..return", "back","","Return back"))
  74. ### Home ###
  75. if data=="home":
  76. content.extend([
  77. ("Search TV", "iplayer::search/{0}","","Search in iPlayer"),
  78. ("Live streams", "iplayer::live","","TV live streams"),
  79. ("Channels", "iplayer::channels","","Programmes by channel/date"),
  80. ("Categories", "iplayer::categories","","Programmes by categories"),
  81. ("A-Z", "iplayer::a-z","","All programmes by name"),
  82. ("Highlights", "iplayer::home/highlights","","Current highlights"),
  83. ("Most popular", "iplayer::groups/popular/episodes?per_page=40&page=1","","Most popular programmes")
  84. ])
  85. return content
  86. ### Search ###
  87. elif clist=="search":
  88. data_ = "search-suggest/?q=%s&rights=mobile&initial_child_count=1"%data.split("/")[1]
  89. r = self.call(data_)
  90. for item in r["search_suggest"]["results"]:
  91. title,data2,img,desc = self.get_data_element(item)
  92. content.append((title,self.name+"::"+data2,img,desc))
  93. return content
  94. ### Live main ###
  95. elif data=="live":
  96. for ch in self.get_channels():
  97. title = ch["title"]
  98. img = self.logos[ch["id"]] if ch["id"] in self.logos else "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  99. desc = title
  100. data2 = "live/%s"%ch["id"]
  101. ee = self.get_epg_live(ch["id"])
  102. desc = ee[2]
  103. content.append((title,self.name+"::"+data2,img,desc))
  104. return content
  105. ### Categories ###
  106. elif data == "categories":
  107. r = self.call(data)
  108. if not "categories":
  109. raise Exception("Error reading categories")
  110. for item in r["categories"]:
  111. data2 = "categories/%s"%(item["id"])
  112. title = item["title"]
  113. desc = title
  114. img = self.img
  115. content.append((title,self.name+"::"+data2,img,desc))
  116. return content
  117. ### Catetory root ###
  118. elif clist == "categories" and len(data.split("/"))==2:
  119. r = self.call(data)
  120. title = "%s - highlights"%r["category"]["title"]
  121. content.append((title,self.name+"::"+data+"/highlights?lang=en&rights=mobile&availability=available",self.img,title))
  122. title = "%s - recent (%s programmes, %s episodes)"%(r["category"]["title"],r["category"]["child_programme_count"],r["category"]["child_episode_count"])
  123. content.append((title,self.name+"::"+data+"/programmes?rights=mobile&page=1&per_page=40&sort=recent&sort_direction=asc&initial_child_count=1&availability=available",self.img,title))
  124. title = "%s - a-z (%s programmes, %s episodes)"%(r["category"]["title"],r["category"]["child_programme_count"],r["category"]["child_episode_count"])
  125. content.append((title,self.name+"::"+data+"/programmes?rights=mobile&page=1&per_page=40&sort=title&sort_direction=asc&initial_child_count=1&availability=available",self.img,title))
  126. return content
  127. ### Program/episodes list ###
  128. elif re.search("categories/([\w\-]+)/(highlights|programmes).+",data) or\
  129. re.search("programmes/(\w+)/episodes.+",data) or\
  130. re.search("groups/(\w+)/episodes.+",data) or\
  131. re.search("atoz/([\w]+)/programmes.+",data) or\
  132. re.search("channels/(\w+)/schedule/[\d\-].+",data) or\
  133. re.search("channels/(\w+)/programmes.+",data) or\
  134. re.search("channels/(\w+)/highlights.+",data) or\
  135. data == "home/highlights":
  136. r = self.call(data)
  137. lst = r["category_highlights"] if "category_highlights" in r else\
  138. r["category_programmes"] if "category_programmes" in r else\
  139. r["programme_episodes"] if "programme_episodes" in r else\
  140. r["atoz_programmes"] if "atoz_programmes" in r else\
  141. r["group_episodes"] if "group_episodes" in r else\
  142. r["schedule"] if "schedule" in r else\
  143. r["channel_highlights"] if "channel_highlights" in r else\
  144. r["channel_programmes"] if "channel_programmes" in r else\
  145. r["home_highlights"] if "home_highlights" in r else\
  146. []
  147. if not lst:
  148. return content
  149. for el in lst["elements"]:
  150. if el["type"] == "broadcast":
  151. if not len(el["episode"]["versions"]):continue
  152. title,data2,img,desc = self.get_data_element(el["episode"])
  153. t1 = gt(el['scheduled_start'])
  154. t2 = gt(el['scheduled_end'])
  155. title = "[%s-%s]%s"%(t1.strftime("%d.%m.%Y %H:%M"),t2.strftime("%H:%M"),title)
  156. else:
  157. title,data2,img,desc = self.get_data_element(el)
  158. content.append((title,self.name+"::"+data2,img,desc))
  159. if "&page=" in data and lst["page"]*lst["per_page"]<lst["count"]:
  160. data2 = re.sub("&page=\d+","&page=%s"%(lst["page"]+1),data)
  161. content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
  162. return content
  163. ### A-z root ###
  164. elif data=="a-z":
  165. url = "http://www.bbc.co.uk/programmes/a-z/by/x/all.json?page=1"
  166. r = self._http_request(url)
  167. if not r:
  168. raise Exception("Can not read %s"%s)
  169. js = json.loads(r)
  170. for ch in js["atoz"]["letters"]:
  171. title = ch.upper()
  172. desc = "Programmes beginning with %s"%title
  173. img = self.img
  174. data2 = "atoz/%s/programmes?rights=mobile&page=1&per_page=40&initial_child_count=1&sort=title&sort_direction=asc&availability=available"%ch
  175. content.append((title,self.name+"::"+data2,img,desc))
  176. return content
  177. ### Channels home ###
  178. elif data=="channels":
  179. for ch in self.get_channels():
  180. title = ch["title"]
  181. img = self.logos[ch["id"]] if ch["id"] in self.logos else "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  182. desc = title
  183. data2 = "channels/%s"%ch["id"]
  184. #ee = self.get_epg_live(ch["id"])
  185. desc = title
  186. content.append((title,self.name+"::"+data2,img,desc))
  187. return content
  188. ### Channel higlihts/progrmmes/days ###
  189. elif clist=="channels" and len(data.split("/"))==2:
  190. r = self.call(data)
  191. chid = data.split("/")[1]
  192. ch = self.get_channel_by_id(chid)
  193. # Highlights
  194. title = ch["title"] + " - highlights"
  195. img = "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  196. data2 = "channels/%s/highlights?lang=en&rights=mobile&availability=available"%ch["id"]
  197. desc = title
  198. content.append((title,self.name+"::"+data2,img,desc))
  199. #AtoZ
  200. title = ch["title"] + " - programmes AtoZ"
  201. data2 = "channels/%s/programmes?rights=mobile&page=1&per_page=40&sort=recent&sort_direction=asc&initial_child_count=1&availability=available"%ch["id"]
  202. desc = title
  203. content.append((title,self.name+"::"+data2,img,desc))
  204. day0 = datetime.date.today()
  205. for i in range(10):
  206. day = day0-datetime.timedelta(days=i)
  207. days = day.strftime("%Y-%m-%d")
  208. title = ch["title"] + " - " + days
  209. img = "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  210. data2 = "channels/%s/schedule/%s?availability=available"%(ch["id"],days)
  211. #ee = self.get_epg_live(ch["id"])
  212. desc = title
  213. content.append((title,self.name+"::"+data2,img,desc))
  214. return content
  215. def get_streams(self, data):
  216. print "[iplayer] get_streams:", data
  217. if "::" in data: data = data.split("::")[1]
  218. if not self.is_video(data):
  219. return []
  220. cmd = data.split("/")
  221. vid = cmd[1].split("?")[0]
  222. if cmd[0] == "live":
  223. title,img,desc,nfo = self.get_epg_live(vid)
  224. else:
  225. #data_ = "episodes/%s"%vid
  226. #r = self.call(data_)
  227. title,img,desc,vid,nfo = self.get_epg_video(vid)
  228. url = "http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/format/json/mediaset/iptv-all/vpid/%s"%vid
  229. print "vid=%s"%vid
  230. print url
  231. r = self._http_request(url) #,headers=self.headers2
  232. if not r:
  233. raise Exception("No streams found")
  234. js = json.loads(r)
  235. if "result" in js and js["result"]=="geolocation":
  236. raise Exception("BBC iPlayer service available only from UK")
  237. if not "media" in js:
  238. raise Exception("No streams found")
  239. streams = []
  240. captions = []
  241. for s in js["media"]:
  242. if s["kind"] == "captions":
  243. if s["connection"][0]["href"]:
  244. sub = {}
  245. sub["url"] = s["connection"][0]["href"].encode('utf8')
  246. sub["type"] = s["type"]
  247. sub["name"] = s["service"] if "service" in s else "captions (taff)"
  248. sub["lang"] = "en"
  249. captions.append(sub)
  250. if s["kind"] <> "video":
  251. continue
  252. for c in s["connection"]:
  253. if c["transferFormat"] <> "hls": continue
  254. #if not (c["supplier"].startswith("mf_") or c["supplier"].startswith("ll_")) : continue # TODO ir kaut kādas VPN problēmas ar akamaihd
  255. #if c["priority"] <> "1": continue
  256. url=c["href"].encode("utf8")
  257. r2 = self._http_request(url)
  258. if not r2: continue
  259. slist = re.findall("#EXT-X-STREAM-INF:([^\n]+)\n([^\n]+)", r2, re.DOTALL)
  260. if not slist:
  261. stream = util.item()
  262. stream["url"]=url
  263. stream["name"]=title
  264. stream["desc"]=desc
  265. stream["img"]=img
  266. stream["type"]="hls"
  267. stream["quality"]=("%s %sx%s %s,%s"%(s["bitrate"],s["width"],s["height"],c["supplier"],c["priority"])).encode("utf8")
  268. stream["lang"]="en"
  269. stream["subs"]=captions
  270. stream["order"]=int(s["bitrate"])
  271. stream["nfo"] = nfo
  272. streams.append(stream)
  273. else:
  274. for cc in slist:
  275. m = re.search("RESOLUTION=([\dx]+)",cc[0])
  276. resolution = m.group(1) if m else "%sx%s"%(s["width"],s["height"])
  277. m = re.search("BANDWIDTH=([\d]+)",cc[0])
  278. bitrate = m.group(1) if m else s["bitrate"]
  279. url2 = cc[1].encode("utf8")
  280. if not url2.startswith("http"):
  281. uu = url.split("/")[:-1]
  282. uu.append(url2)
  283. url2 = "/".join(uu)
  284. stream = util.item()
  285. stream["url"]=url2
  286. stream["name"]=title
  287. stream["desc"]=desc
  288. stream["img"]=img
  289. stream["type"]="hls"
  290. stream["quality"]=("%s %s %s,%s"%(bitrate,resolution,c["supplier"],c["priority"])).encode("utf8")
  291. stream["lang"]="en"
  292. stream["subs"]=captions
  293. stream["order"]=int(bitrate)
  294. stream["nfo"] = nfo
  295. streams.append(stream)
  296. if captions:
  297. for s in streams:
  298. s["subs"]=captions
  299. streams = sorted(streams,key=lambda item: item["order"],reverse=True)
  300. return streams
  301. def is_video(self,data):
  302. if "::" in data:
  303. data = data.split("::")[1]
  304. cmd = data.split("/")
  305. if cmd[0]=="live" and len(cmd)==2:
  306. return True
  307. elif cmd[0]=="episodes" and len(cmd)==2:
  308. return True
  309. else:
  310. return False
  311. def get_data_element(self,item):
  312. if ("programme" in item["type"] or "group" in item["type"]) and item["count"]>1:
  313. ep = item.copy()
  314. elif ("programme" in item["type"] or "group" in item["type"]) and item["count"]==1:
  315. ep = item["initial_children"][0].copy()
  316. elif item["type"] == "episode":
  317. ep = item.copy()
  318. elif item["type"] == "broadcast":
  319. ep = item["episode"].copy()
  320. else:
  321. ep = item.copy()
  322. title = ep["title"]
  323. if "subtitle" in ep and ep["subtitle"]:
  324. title = title+". "+ ep["subtitle"]
  325. desc = ep["synopses"]["large"] if "large" in ep["synopses"] else ep["synopses"]["medium"] if "medium" in ep["synopses"] else ep["synopses"]["small"]
  326. #TODO papildus info pie apraksta
  327. img = ep["images"]["standard"].replace("{recipe}","512x288") if "images" in ep else self.img
  328. if ep["type"] == "episode":
  329. data2 = "episodes/%s"%ep["id"]
  330. elif "programme" in ep["type"]:
  331. data2 = "programmes/%s/episodes?per_page=40&page=1"%ep["id"]
  332. title = "%s [%s episodes]"%(title,ep["count"])
  333. elif "group" in ep["type"]:
  334. data2 = "groups/%s/episodes?per_page=40&page=1"%ep["id"]
  335. title = "%s [%s episodes]"%(title,ep["count"])
  336. else:
  337. data2 = "programmes/%s/episodes?per_page=40&page=1"%ep["id"]
  338. title = "%s [%s episodes]"%(title,ep["count"])
  339. return title,data2,img,desc
  340. def get_epg_video(self,vid):
  341. data = "episodes/%s"%vid
  342. nfo = {}
  343. r = self.call(data)
  344. if "episodes" in r :
  345. ep = r["episodes"][0]
  346. title = ep["title"]
  347. if "subtitle" in ep:
  348. title = title +". "+ ep["subtitle"]
  349. title = title
  350. desc = ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
  351. desc = desc
  352. ver = ep["versions"][0]
  353. vid = ver["id"]
  354. remaining = ver["availability"]["end"].split("T")[0] #["remaining"]["text"]
  355. duration = ver["duration"]["text"]
  356. first_broadcast = ver["first_broadcast"]
  357. desc =u"%s\n%s\%s\n%s\n%s"%(title,duration,remaining,first_broadcast,desc)
  358. img = ep["images"]["standard"].replace("{recipe}","512x288")
  359. #Create nfo dictionary
  360. tt = lambda dd,k,d: dd[k] if k in dd else d
  361. nfo_type = "movie" if True else "tvswhow" # TODO
  362. t = OrderedDict()
  363. t["title"] = title
  364. t["originaltitle"] = tt(ep,"original_title","")
  365. t["thumb"] = img
  366. t["id"] = vid
  367. t["outline"] = ep["synopses"]["small"] if "small" in ep["synopses"] else ep["synopses"]["editorial"] if "editorial" in ep["synopses"] else ""
  368. t["plot"] = ep["synopses"]["large"] if "large" in ep["synopses"] else ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
  369. t["tagline"] = ep["synopses"]["editorial"] if "editorial" in ep["synopses"] else ""
  370. t["runtime"] = tt(ver["duration"],"text","")
  371. t["premiered"] = tt(ep,"release_date","")
  372. t["aired"] = ver["availability"]["start"].split("T")[0] if "start" in ver["availability"] else ""
  373. if "parent_position" in ep: t["episode"] = ep["parent_position"]
  374. nfo[nfo_type] = t
  375. return title.encode("utf8"),img.encode("utf8"),desc.encode("utf8"),vid.encode("utf8"),nfo
  376. else:
  377. raise Exception("No video info")
  378. def get_epg_live(self,channelid):
  379. data = "channels/%s/highlights?live=true"%channelid
  380. r = self.call(data)
  381. nfo = {}
  382. if "channel_highlights" in r and r["channel_highlights"]["elements"][0]["id"] == "live":
  383. epg = r["channel_highlights"]["elements"][0]["initial_children"][0].copy()
  384. t1 = gt(epg['scheduled_start'])
  385. t2 = gt(epg['scheduled_end'])
  386. ep = epg["episode"]
  387. title = ep["title"]
  388. if "subtitle" in ep:
  389. title = title +". "+ ep["subtitle"]
  390. title = "%s (%s-%s)"%(title,t1.strftime("%H:%M"),t2.strftime("%H:%M"))
  391. title = title
  392. desc = ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
  393. desc = desc
  394. desc ="%s\n%s"%(title,desc)
  395. img = ep["images"]["standard"].replace("{recipe}","512x288")
  396. #return title,img,desc
  397. else:
  398. title = r["channel_highlights"]["channel"]["title"]
  399. img = ""
  400. desc = title
  401. return title.encode("utf8"),img.encode("utf8"),desc.encode("utf8"),nfo
  402. def get_channels(self):
  403. if self.ch:
  404. return self.ch
  405. r= self.call("channels")
  406. self.ch=[]
  407. for i,item in enumerate(r["channels"]):
  408. self.ch.append(item)
  409. self.ch_id[item["id"]]=i
  410. self.ch_id2[item["master_brand_id"]]=i
  411. self.ch_name[item["title"]]=i
  412. return self.ch
  413. def get_channel_by_id(self,chid):
  414. if not self.ch:
  415. self.get_channels()
  416. if not self.ch:
  417. return None
  418. return self.ch[self.ch_id[chid]] if self.ch_id.has_key(chid) else None
  419. def get_channel_by_id2(self,chid):
  420. if not self.ch:
  421. self.get_channels()
  422. if not self.ch:
  423. return None
  424. return self.ch[self.ch_id2[chid]] if self.ch_id2.has_key(chid) else None
  425. def get_channel_by_name(self,name):
  426. if not self.ch:
  427. self.get_channels()
  428. ch2 = self.get_channel_by_name2(name)
  429. if not ch2:
  430. return None
  431. ch = self.get_channel_by_id2(ch2["id2"])
  432. return ch
  433. def call(self, data,params = None, headers=None):
  434. if not headers: headers = self.headers
  435. #if not lang: lang = self.country
  436. url = self.api_url + data
  437. content = self._http_request(url,params, headers)
  438. if content:
  439. try:
  440. result = json.loads(content)
  441. return result
  442. except Exception, ex:
  443. return None
  444. else:
  445. return None
  446. def call2(self, data,params = None, headers=None):
  447. if not headers: headers = self.headers2
  448. #if not lang: lang = self.country
  449. url = self.api_url2 + data
  450. content = self._http_request(url,params, headers)
  451. return content
  452. def _http_request(self, url,params = None, headers=None):
  453. if not headers: headers = self.headers
  454. import requests
  455. try:
  456. from requests.packages.urllib3.exceptions import InsecureRequestWarning
  457. requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
  458. except:
  459. pass
  460. try:
  461. r = requests.get(url, headers=headers)
  462. return r.content
  463. except Exception as ex:
  464. if "code" in dir(ex) and ex.code==403:
  465. return ex.read()
  466. else:
  467. return None
  468. def gt(dt_str):
  469. dt, _, us= dt_str.partition(".")
  470. dt= datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S")
  471. dt = dt - datetime.timedelta(seconds=time.altzone)
  472. #us= int(us.rstrip("Z"), 10)
  473. #r = dt + datetime.timedelta(microseconds=us)a
  474. return dt
  475. if __name__ == "__main__":
  476. c = Source()
  477. from subprocess import call
  478. #ch = c.get_channels()
  479. #c.get_epg_live("bbc_two_england")
  480. if len(sys.argv)>1 and not "iplayer::" in sys.argv[1]:
  481. vid = sys.argv[1]
  482. print "login - %s"%c.login("ivars777","xxx")
  483. vid = "1069"
  484. vid = "1462566072086"
  485. channelid="101"
  486. vid = "1350462656767"
  487. #data = c.get_stream_url(vid,"vod")
  488. #call([r"c:\Program Files\VideoLAN\VLC\vlc.exe",data["stream"]])
  489. pass
  490. else:
  491. if len(sys.argv)>1:
  492. data= sys.argv[1]
  493. else:
  494. data = "iplayer::home"
  495. content = c.get_content(data)
  496. for item in content:
  497. print item
  498. #cat = api.get_categories(country)
  499. #chan = api.get_channels("lv")
  500. #prog = api.get_programs(channel=6400)
  501. #prog = api.get_programs(category=55)
  502. #seas = api.get_seasons(program=6453)
  503. #str = api.get_streams(660243)
  504. #res = api.get_videos(802)
  505. #formats = api.getAllFormats()
  506. #det = api.detailed("1516")
  507. #vid = api.getVideos("13170")
  508. pass