Enigma2 plugin to to play various online streams (mostly Latvian).

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. #!/usr/bin/env python
  2. # coding=utf8
  3. #
  4. # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
  5. # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
  6. # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
  7. #
  8. import sys, os, os.path, re, sys
  9. import urllib,urllib2
  10. from xml.sax.saxutils import unescape,escape
  11. from urllib import quote, unquote
  12. import datetime
  13. import HTMLParser
  14. import json
  15. import datetime,time
  16. from SourceBase import SourceBase, stream_type
  17. import util
  18. from collections import OrderedDict
  19. API_URL = 'https://m.lattelecom.tv/'
  20. user_agent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3"
  21. headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
  22. h = HTMLParser.HTMLParser()
  23. class Source(SourceBase):
  24. def __init__(self):
  25. self.name = "iplayer"
  26. self.title = "BBC iPlayer"
  27. self.img = "http://www.userlogos.org/files/logos/inductiveload/BBC_iPlayer_logo.png"
  28. self.desc = "BBC iPlayer portal content"
  29. self.api_url = "http://ibl.api.bbci.co.uk/ibl/v1/"
  30. self.headers = headers2dict("""
  31. User-Agent: BBCiPlayer/4.19.0.3021 (SM-G900FD; Android 4.4.2)
  32. Connection: Keep-Alive
  33. """)
  34. self.headers2 = headers2dict("""
  35. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36
  36. Connection: Keep-Alive
  37. """)
  38. self.ch = []
  39. self.ch_id={}
  40. self.ch_id2={}
  41. self.ch_name={}
  42. self.logos ={
  43. "bbc_one_london":"http://www.lyngsat-logo.com/hires/bb/bbc_one.png",
  44. "bbc_two_england":"http://www.lyngsat-logo.com/hires/bb/bbc_two_uk.png",
  45. "bbc_three":"http://www.lyngsat-logo.com/hires/bb/bbc_three_uk.png",
  46. "bbc_four":"http://www.lyngsat-logo.com/hires/bb/bbc_four_uk.png",
  47. "bbc_radio_one":"http://www.lyngsat-logo.com/hires/bb/bbc_radio1.png",
  48. "cbbc":"http://www.lyngsat-logo.com/hires/bb/bbc_cbbc.png",
  49. "cbeebies":"http://www.lyngsat-logo.com/hires/bb/bbc_cbeebies_uk.png",
  50. "bbc_news24":"http://www.lyngsat-logo.com/hires/bb/bbc_news.png",
  51. "bbc_parliament":"http://www.lyngsat-logo.com/hires/bb/bbc_parliament.png",
  52. "bbc_alba":"http://www.lyngsat-logo.com/hires/bb/bbc_alba.png",
  53. "s4cpbs":"http://www.lyngsat-logo.com/hires/ss/s4c_uk.png"
  54. }
  55. cur_directory = os.path.dirname(os.path.abspath(__file__))
  56. self.config_file = os.path.join(cur_directory,self.name+".cfg")
  57. self.options = OrderedDict([("user","lietotajs"),("password","parole")])
  58. self.options_read()
  59. def get_content(self, data):
  60. print "[iplayer] get_content:", data
  61. if "::" in data:
  62. data = data.split("::")[1]
  63. path = data.split("?")[0]
  64. clist = path.split("/")[0]
  65. params = data[data.find("?"):] if "?" in data else ""
  66. qs = dict(map(lambda x:x.split("="),re.findall("\w+=[\w-]+",params)))
  67. #lang = qs["lang"] if "lang" in qs else self.country
  68. content=[]
  69. content.append(("..return", "back","","Return back"))
  70. ### Home ###
  71. if data=="home":
  72. content.extend([
  73. ("Search TV", "iplayer::search/{0}","","Search in iPlayer"),
  74. ("Live streams", "iplayer::live","","TV live streams"),
  75. ("Channels", "iplayer::channels","","Programmes by channel/date"),
  76. ("Categories", "iplayer::categories","","Programmes by categories"),
  77. ("A-Z", "iplayer::a-z","","All programmes by name"),
  78. ("Highlights", "iplayer::home/highlights","","Current highlights"),
  79. ("Most popular", "iplayer::groups/popular/episodes?per_page=40&page=1","","Most popular programmes")
  80. ])
  81. return content
  82. ### Search ###
  83. elif clist=="search":
  84. data_ = "search-suggest/?q=%s&rights=mobile&initial_child_count=1"%data.split("/")[1]
  85. r = self.call(data_)
  86. for item in r["search_suggest"]["results"]:
  87. title,data2,img,desc = self.get_data_element(item)
  88. content.append((title,self.name+"::"+data2,img,desc))
  89. return content
  90. ### Live main ###
  91. elif data=="live":
  92. for ch in self.get_channels():
  93. title = ch["title"]
  94. img = self.logos[ch["id"]] if ch["id"] in self.logos else "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  95. desc = title
  96. data2 = "live/%s"%ch["id"]
  97. ee = self.get_epg_live(ch["id"])
  98. desc = ee[2]
  99. content.append((title,self.name+"::"+data2,img,desc))
  100. return content
  101. ### Categories ###
  102. elif data == "categories":
  103. r = self.call(data)
  104. if not "categories":
  105. raise Exception("Error reading categories")
  106. for item in r["categories"]:
  107. data2 = "categories/%s"%(item["id"])
  108. title = item["title"]
  109. desc = title
  110. img = self.img
  111. content.append((title,self.name+"::"+data2,img,desc))
  112. return content
  113. ### Catetory root ###
  114. elif clist == "categories" and len(data.split("/"))==2:
  115. r = self.call(data)
  116. title = "%s - highlights"%r["category"]["title"]
  117. content.append((title,self.name+"::"+data+"/highlights?lang=en&rights=mobile&availability=available",self.img,title))
  118. title = "%s - recent (%s programmes, %s episodes)"%(r["category"]["title"],r["category"]["child_programme_count"],r["category"]["child_episode_count"])
  119. content.append((title,self.name+"::"+data+"/programmes?rights=mobile&page=1&per_page=40&sort=recent&sort_direction=asc&initial_child_count=1&availability=available",self.img,title))
  120. title = "%s - a-z (%s programmes, %s episodes)"%(r["category"]["title"],r["category"]["child_programme_count"],r["category"]["child_episode_count"])
  121. content.append((title,self.name+"::"+data+"/programmes?rights=mobile&page=1&per_page=40&sort=title&sort_direction=asc&initial_child_count=1&availability=available",self.img,title))
  122. return content
  123. ### Program/episodes list ###
  124. elif re.search("categories/([\w\-]+)/(highlights|programmes).+",data) or\
  125. re.search("programmes/(\w+)/episodes.+",data) or\
  126. re.search("groups/(\w+)/episodes.+",data) or\
  127. re.search("atoz/([\w]+)/programmes.+",data) or\
  128. re.search("channels/(\w+)/schedule/[\d\-].+",data) or\
  129. re.search("channels/(\w+)/programmes.+",data) or\
  130. re.search("channels/(\w+)/highlights.+",data) or\
  131. data == "home/highlights":
  132. r = self.call(data)
  133. lst = r["category_highlights"] if "category_highlights" in r else\
  134. r["category_programmes"] if "category_programmes" in r else\
  135. r["programme_episodes"] if "programme_episodes" in r else\
  136. r["atoz_programmes"] if "atoz_programmes" in r else\
  137. r["group_episodes"] if "group_episodes" in r else\
  138. r["schedule"] if "schedule" in r else\
  139. r["channel_highlights"] if "channel_highlights" in r else\
  140. r["channel_programmes"] if "channel_programmes" in r else\
  141. r["home_highlights"] if "home_highlights" in r else\
  142. []
  143. if not lst:
  144. return content
  145. for el in lst["elements"]:
  146. if el["type"] == "broadcast":
  147. if not len(el["episode"]["versions"]):continue
  148. title,data2,img,desc = self.get_data_element(el["episode"])
  149. t1 = gt(el['scheduled_start'])
  150. t2 = gt(el['scheduled_end'])
  151. title = "[%s-%s]%s"%(t1.strftime("%d.%m.%Y %H:%M"),t2.strftime("%H:%M"),title)
  152. else:
  153. title,data2,img,desc = self.get_data_element(el)
  154. content.append((title,self.name+"::"+data2,img,desc))
  155. if "&page=" in data and lst["page"]*lst["per_page"]<lst["count"]:
  156. data2 = re.sub("&page=\d+","&page=%s"%(lst["page"]+1),data)
  157. content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
  158. return content
  159. ### A-z root ###
  160. elif data=="a-z":
  161. url = "http://www.bbc.co.uk/programmes/a-z/by/x/all.json?page=1"
  162. r = self._http_request(url)
  163. if not r:
  164. raise Exception("Can not read %s"%s)
  165. js = json.loads(r)
  166. for ch in js["atoz"]["letters"]:
  167. title = ch.upper()
  168. desc = "Programmes beginning with %s"%title
  169. img = self.img
  170. data2 = "atoz/%s/programmes?rights=mobile&page=1&per_page=40&initial_child_count=1&sort=title&sort_direction=asc&availability=available"%ch
  171. content.append((title,self.name+"::"+data2,img,desc))
  172. return content
  173. ### Channels home ###
  174. elif data=="channels":
  175. for ch in self.get_channels():
  176. title = ch["title"]
  177. img = self.logos[ch["id"]] if ch["id"] in self.logos else "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  178. desc = title
  179. data2 = "channels/%s"%ch["id"]
  180. #ee = self.get_epg_live(ch["id"])
  181. desc = title
  182. content.append((title,self.name+"::"+data2,img,desc))
  183. return content
  184. ### Channel higlihts/progrmmes/days ###
  185. elif clist=="channels" and len(data.split("/"))==2:
  186. r = self.call(data)
  187. chid = data.split("/")[1]
  188. ch = self.get_channel_by_id(chid)
  189. # Highlights
  190. title = ch["title"] + " - highlights"
  191. img = "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  192. data2 = "channels/%s/highlights?lang=en&rights=mobile&availability=available"%ch["id"]
  193. desc = title
  194. content.append((title,self.name+"::"+data2,img,desc))
  195. #AtoZ
  196. title = ch["title"] + " - programmes AtoZ"
  197. data2 = "channels/%s/programmes?rights=mobile&page=1&per_page=40&sort=recent&sort_direction=asc&initial_child_count=1&availability=available"%ch["id"]
  198. desc = title
  199. content.append((title,self.name+"::"+data2,img,desc))
  200. day0 = datetime.date.today()
  201. for i in range(10):
  202. day = day0-datetime.timedelta(days=i)
  203. days = day.strftime("%Y-%m-%d")
  204. title = ch["title"] + " - " + days
  205. img = "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  206. data2 = "channels/%s/schedule/%s?availability=available"%(ch["id"],days)
  207. #ee = self.get_epg_live(ch["id"])
  208. desc = title
  209. content.append((title,self.name+"::"+data2,img,desc))
  210. return content
  211. def get_streams(self, data):
  212. print "[iplayer] get_streams:", data
  213. if "::" in data: data = data.split("::")[1]
  214. if not self.is_video(data):
  215. return []
  216. cmd = data.split("/")
  217. vid = cmd[1].split("?")[0]
  218. if cmd[0] == "live":
  219. title,img,desc,nfo = self.get_epg_live(vid)
  220. else:
  221. #data_ = "episodes/%s"%vid
  222. #r = self.call(data_)
  223. title,img,desc,vid,nfo = self.get_epg_video(vid)
  224. url = "http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/format/json/mediaset/iptv-all/vpid/%s"%vid
  225. print "vid=%s"%vid
  226. print url
  227. r = self._http_request(url) #,headers=self.headers2
  228. if not r:
  229. raise Exception("No streams found")
  230. js = json.loads(r)
  231. if "result" in js and js["result"]=="geolocation":
  232. raise Exception("BBC iPlayer service available only from UK")
  233. if not "media" in js:
  234. raise Exception("No streams found")
  235. streams = []
  236. captions = []
  237. for s in js["media"]:
  238. if s["kind"] == "captions":
  239. if s["connection"][0]["href"]:
  240. sub = {}
  241. sub["url"] = s["connection"][0]["href"].encode('utf8')
  242. sub["type"] = s["type"]
  243. sub["name"] = s["service"] if "service" in s else "captions (taff)"
  244. sub["lang"] = "en"
  245. captions.append(sub)
  246. if s["kind"] <> "video":
  247. continue
  248. for c in s["connection"]:
  249. if c["transferFormat"] <> "hls": continue
  250. #if not (c["supplier"].startswith("mf_") or c["supplier"].startswith("ll_")) : continue # TODO ir kaut kādas VPN problēmas ar akamaihd
  251. #if c["priority"] <> "1": continue
  252. url=c["href"].encode("utf8")
  253. r2 = self._http_request(url)
  254. if not r2: continue
  255. slist = re.findall("#EXT-X-STREAM-INF:([^\n]+)\n([^\n]+)", r2, re.DOTALL)
  256. if not slist:
  257. stream = util.item()
  258. stream["url"]=url
  259. stream["name"]=title
  260. stream["desc"]=desc
  261. stream["img"]=img
  262. stream["type"]="hls"
  263. stream["quality"]=("%s %sx%s %s,%s"%(s["bitrate"],s["width"],s["height"],c["supplier"],c["priority"])).encode("utf8")
  264. stream["lang"]="en"
  265. stream["subs"]=captions
  266. stream["order"]=int(s["bitrate"])
  267. stream["nfo"] = nfo
  268. streams.append(stream)
  269. else:
  270. for cc in slist:
  271. m = re.search("RESOLUTION=([\dx]+)",cc[0])
  272. resolution = m.group(1) if m else "%sx%s"%(s["width"],s["height"])
  273. m = re.search("BANDWIDTH=([\d]+)",cc[0])
  274. bitrate = m.group(1) if m else s["bitrate"]
  275. url2 = cc[1].encode("utf8")
  276. if not url2.startswith("http"):
  277. uu = url.split("/")[:-1]
  278. uu.append(url2)
  279. url2 = "/".join(uu)
  280. stream = util.item()
  281. stream["url"]=url2
  282. stream["name"]=title
  283. stream["desc"]=desc
  284. stream["img"]=img
  285. stream["type"]="hls"
  286. stream["quality"]=("%s %s %s,%s"%(bitrate,resolution,c["supplier"],c["priority"])).encode("utf8")
  287. stream["lang"]="en"
  288. stream["subs"]=captions
  289. stream["order"]=int(bitrate)
  290. stream["nfo"] = nfo
  291. streams.append(stream)
  292. if captions:
  293. for s in streams:
  294. s["subs"]=captions
  295. streams = sorted(streams,key=lambda item: item["order"],reverse=True)
  296. return streams
  297. def is_video(self,data):
  298. if "::" in data:
  299. data = data.split("::")[1]
  300. cmd = data.split("/")
  301. if cmd[0]=="live" and len(cmd)==2:
  302. return True
  303. elif cmd[0]=="episodes" and len(cmd)==2:
  304. return True
  305. else:
  306. return False
  307. def get_data_element(self,item):
  308. if ("programme" in item["type"] or "group" in item["type"]) and item["count"]>1:
  309. ep = item.copy()
  310. elif ("programme" in item["type"] or "group" in item["type"]) and item["count"]==1:
  311. ep = item["initial_children"][0].copy()
  312. elif item["type"] == "episode":
  313. ep = item.copy()
  314. elif item["type"] == "broadcast":
  315. ep = item["episode"].copy()
  316. else:
  317. ep = item.copy()
  318. title = ep["title"]
  319. if "subtitle" in ep and ep["subtitle"]:
  320. title = title+". "+ ep["subtitle"]
  321. desc = ep["synopses"]["large"] if "large" in ep["synopses"] else ep["synopses"]["medium"] if "medium" in ep["synopses"] else ep["synopses"]["small"]
  322. #TODO papildus info pie apraksta
  323. img = ep["images"]["standard"].replace("{recipe}","512x288") if "images" in ep else self.img
  324. if ep["type"] == "episode":
  325. data2 = "episodes/%s"%ep["id"]
  326. elif "programme" in ep["type"]:
  327. data2 = "programmes/%s/episodes?per_page=40&page=1"%ep["id"]
  328. title = "%s [%s episodes]"%(title,ep["count"])
  329. elif "group" in ep["type"]:
  330. data2 = "groups/%s/episodes?per_page=40&page=1"%ep["id"]
  331. title = "%s [%s episodes]"%(title,ep["count"])
  332. else:
  333. data2 = "programmes/%s/episodes?per_page=40&page=1"%ep["id"]
  334. title = "%s [%s episodes]"%(title,ep["count"])
  335. return title,data2,img,desc
  336. def get_epg_video(self,vid):
  337. data = "episodes/%s"%vid
  338. nfo = {}
  339. r = self.call(data)
  340. if "episodes" in r :
  341. ep = r["episodes"][0]
  342. title = ep["title"]
  343. if "subtitle" in ep:
  344. title = title +". "+ ep["subtitle"]
  345. title = title
  346. desc = ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
  347. desc = desc
  348. ver = ep["versions"][0]
  349. vid = ver["id"]
  350. remaining = ver["availability"]["end"].split("T")[0] #["remaining"]["text"]
  351. duration = ver["duration"]["text"]
  352. first_broadcast = ver["first_broadcast"]
  353. desc =u"%s\n%s\%s\n%s\n%s"%(title,duration,remaining,first_broadcast,desc)
  354. img = ep["images"]["standard"].replace("{recipe}","512x288")
  355. #Create nfo dictionary
  356. tt = lambda dd,k,d: dd[k] if k in dd else d
  357. nfo_type = "movie" if True else "tvswhow" # TODO
  358. t = OrderedDict()
  359. t["title"] = title
  360. t["originaltitle"] = tt(ep,"original_title","")
  361. t["thumb"] = img
  362. t["id"] = vid
  363. t["outline"] = ep["synopses"]["small"] if "small" in ep["synopses"] else ep["synopses"]["editorial"] if "editorial" in ep["synopses"] else ""
  364. t["plot"] = ep["synopses"]["large"] if "large" in ep["synopses"] else ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
  365. t["tagline"] = ep["synopses"]["editorial"] if "editorial" in ep["synopses"] else ""
  366. t["runtime"] = tt(ver["duration"],"text","")
  367. t["premiered"] = tt(ep,"release_date","")
  368. t["aired"] = ver["availability"]["start"].split("T")[0] if "start" in ver["availability"] else ""
  369. if "parent_position" in ep: t["episode"] = ep["parent_position"]
  370. nfo[nfo_type] = t
  371. return title.encode("utf8"),img.encode("utf8"),desc.encode("utf8"),vid.encode("utf8"),nfo
  372. else:
  373. raise Exception("No video info")
  374. def get_epg_live(self,channelid):
  375. data = "channels/%s/highlights?live=true"%channelid
  376. r = self.call(data)
  377. nfo = {}
  378. if "channel_highlights" in r and r["channel_highlights"]["elements"][0]["id"] == "live":
  379. epg = r["channel_highlights"]["elements"][0]["initial_children"][0].copy()
  380. t1 = gt(epg['scheduled_start'])
  381. t2 = gt(epg['scheduled_end'])
  382. ep = epg["episode"]
  383. title = ep["title"]
  384. if "subtitle" in ep:
  385. title = title +". "+ ep["subtitle"]
  386. title = "%s (%s-%s)"%(title,t1.strftime("%H:%M"),t2.strftime("%H:%M"))
  387. title = title
  388. desc = ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
  389. desc = desc
  390. desc ="%s\n%s"%(title,desc)
  391. img = ep["images"]["standard"].replace("{recipe}","512x288")
  392. #return title,img,desc
  393. else:
  394. title = r["channel_highlights"]["channel"]["title"]
  395. img = ""
  396. desc = title
  397. return title.encode("utf8"),img.encode("utf8"),desc.encode("utf8"),nfo
  398. def get_channels(self):
  399. if self.ch:
  400. return self.ch
  401. r= self.call("channels")
  402. self.ch=[]
  403. for i,item in enumerate(r["channels"]):
  404. self.ch.append(item)
  405. self.ch_id[item["id"]]=i
  406. self.ch_id2[item["master_brand_id"]]=i
  407. self.ch_name[item["title"]]=i
  408. return self.ch
  409. def get_channel_by_id(self,chid):
  410. if not self.ch:
  411. self.get_channels()
  412. if not self.ch:
  413. return None
  414. return self.ch[self.ch_id[chid]] if self.ch_id.has_key(chid) else None
  415. def get_channel_by_id2(self,chid):
  416. if not self.ch:
  417. self.get_channels()
  418. if not self.ch:
  419. return None
  420. return self.ch[self.ch_id2[chid]] if self.ch_id2.has_key(chid) else None
  421. def get_channel_by_name(self,name):
  422. if not self.ch:
  423. self.get_channels()
  424. ch2 = self.get_channel_by_name2(name)
  425. if not ch2:
  426. return None
  427. ch = self.get_channel_by_id2(ch2["id2"])
  428. return ch
  429. def call(self, data,params = None, headers=None):
  430. if not headers: headers = self.headers
  431. #if not lang: lang = self.country
  432. url = self.api_url + data
  433. content = self._http_request(url,params, headers)
  434. if content:
  435. try:
  436. result = json.loads(content)
  437. return result
  438. except Exception, ex:
  439. return None
  440. else:
  441. return None
  442. def call2(self, data,params = None, headers=None):
  443. if not headers: headers = self.headers2
  444. #if not lang: lang = self.country
  445. url = self.api_url2 + data
  446. content = self._http_request(url,params, headers)
  447. return content
  448. def _http_request(self, url,params = None, headers=None):
  449. if not headers: headers = self.headers
  450. import requests
  451. try:
  452. from requests.packages.urllib3.exceptions import InsecureRequestWarning
  453. requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
  454. except:
  455. pass
  456. try:
  457. r = requests.get(url, headers=headers)
  458. return r.content
  459. except Exception as ex:
  460. if ex.code==403:
  461. return ex.read()
  462. else:
  463. return None
  464. def gt(dt_str):
  465. dt, _, us= dt_str.partition(".")
  466. dt= datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S")
  467. dt = dt - datetime.timedelta(seconds=time.altzone)
  468. #us= int(us.rstrip("Z"), 10)
  469. #r = dt + datetime.timedelta(microseconds=us)a
  470. return dt
  471. if __name__ == "__main__":
  472. c = Source()
  473. from subprocess import call
  474. #ch = c.get_channels()
  475. #c.get_epg_live("bbc_two_england")
  476. if len(sys.argv)>1 and not "iplayer::" in sys.argv[1]:
  477. vid = sys.argv[1]
  478. print "login - %s"%c.login("ivars777","xxx")
  479. vid = "1069"
  480. vid = "1462566072086"
  481. channelid="101"
  482. vid = "1350462656767"
  483. #data = c.get_stream_url(vid,"vod")
  484. #call([r"c:\Program Files\VideoLAN\VLC\vlc.exe",data["stream"]])
  485. pass
  486. else:
  487. if len(sys.argv)>1:
  488. data= sys.argv[1]
  489. else:
  490. data = "iplayer::home"
  491. content = c.get_content(data)
  492. for item in content:
  493. print item
  494. #cat = api.get_categories(country)
  495. #chan = api.get_channels("lv")
  496. #prog = api.get_programs(channel=6400)
  497. #prog = api.get_programs(category=55)
  498. #seas = api.get_seasons(program=6453)
  499. #str = api.get_streams(660243)
  500. #res = api.get_videos(802)
  501. #formats = api.getAllFormats()
  502. #det = api.detailed("1516")
  503. #vid = api.getVideos("13170")
  504. pass