Enigma2 plugin to to play various online streams (mostly Latvian).

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555
  1. #!/usr/bin/env python
  2. # coding=utf8
  3. #
  4. # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
  5. # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
  6. # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
  7. #
  8. import sys, os, os.path, re, sys
  9. import urllib,urllib2
  10. from xml.sax.saxutils import unescape,escape
  11. from urllib import quote, unquote
  12. import datetime
  13. import HTMLParser
  14. import json
  15. import datetime,time
  16. from SourceBase import SourceBase, stream_type
  17. import util
  18. from collections import OrderedDict
  19. import ssl
  20. ssl._create_default_https_context = ssl._create_unverified_context
  21. API_URL = 'https://m.lattelecom.tv/'
  22. user_agent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3"
  23. headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
  24. h = HTMLParser.HTMLParser()
  25. class Source(SourceBase):
  26. def __init__(self,cfg_path=None):
  27. self.name = "iplayer"
  28. self.title = "BBC iPlayer"
  29. self.img = "http://www.userlogos.org/files/logos/inductiveload/BBC_iPlayer_logo.png"
  30. self.desc = "BBC iPlayer portal content"
  31. self.api_url = "http://ibl.api.bbci.co.uk/ibl/v1/"
  32. self.headers = headers2dict("""
  33. User-Agent: BBCiPlayer/4.19.0.3021 (SM-G900FD; Android 4.4.2)
  34. Connection: Keep-Alive
  35. """)
  36. self.headers2 = headers2dict("""
  37. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36
  38. Connection: Keep-Alive
  39. """)
  40. self.ch = []
  41. self.ch_id={}
  42. self.ch_id2={}
  43. self.ch_name={}
  44. self.logos ={
  45. "bbc_one_london":"http://www.lyngsat-logo.com/hires/bb/bbc_one.png",
  46. "bbc_two_england":"http://www.lyngsat-logo.com/hires/bb/bbc_two_uk.png",
  47. "bbc_three":"http://www.lyngsat-logo.com/hires/bb/bbc_three_uk.png",
  48. "bbc_four":"http://www.lyngsat-logo.com/hires/bb/bbc_four_uk.png",
  49. "bbc_radio_one":"http://www.lyngsat-logo.com/hires/bb/bbc_radio1.png",
  50. "cbbc":"http://www.lyngsat-logo.com/hires/bb/bbc_cbbc.png",
  51. "cbeebies":"http://www.lyngsat-logo.com/hires/bb/bbc_cbeebies_uk.png",
  52. "bbc_news24":"http://www.lyngsat-logo.com/hires/bb/bbc_news.png",
  53. "bbc_parliament":"http://www.lyngsat-logo.com/hires/bb/bbc_parliament.png",
  54. "bbc_alba":"http://www.lyngsat-logo.com/hires/bb/bbc_alba.png",
  55. "s4cpbs":"http://www.lyngsat-logo.com/hires/ss/s4c_uk.png"
  56. }
  57. cur_directory = os.path.dirname(os.path.abspath(__file__))
  58. if not cfg_path: cfg_path = cur_directory
  59. self.config_file = os.path.join(cfg_path,self.name+".cfg")
  60. self.options = OrderedDict([("user","lietotajs"),("password","parole")])
  61. self.options_read()
  62. def get_content(self, data):
  63. print "[iplayer] get_content:", data
  64. if "::" in data:
  65. data = data.split("::")[1]
  66. path = data.split("?")[0]
  67. clist = path.split("/")[0]
  68. params = data[data.find("?"):] if "?" in data else ""
  69. qs = dict(map(lambda x:x.split("="),re.findall("\w+=[\w-]+",params)))
  70. #lang = qs["lang"] if "lang" in qs else self.country
  71. content=[]
  72. content.append(("..return", "back","","Return back"))
  73. ### Home ###
  74. if data=="home":
  75. content.extend([
  76. ("Search TV", "iplayer::search/{0}","","Search in iPlayer"),
  77. ("Live streams", "iplayer::live","","TV live streams"),
  78. ("Channels", "iplayer::channels","","Programmes by channel/date"),
  79. ("Categories", "iplayer::categories","","Programmes by categories"),
  80. ("A-Z", "iplayer::a-z","","All programmes by name"),
  81. ("Highlights", "iplayer::home/highlights","","Current highlights"),
  82. ("Most popular", "iplayer::groups/popular/episodes?per_page=40&page=1","","Most popular programmes")
  83. ])
  84. return content
  85. ### Search ###
  86. elif clist=="search":
  87. data_ = "search-suggest/?q=%s&rights=mobile&initial_child_count=1"%data.split("/")[1]
  88. r = self.call(data_)
  89. for item in r["search_suggest"]["results"]:
  90. title,data2,img,desc = self.get_data_element(item)
  91. content.append((title,self.name+"::"+data2,img,desc))
  92. return content
  93. ### Live main ###
  94. elif data=="live":
  95. for ch in self.get_channels():
  96. title = ch["title"]
  97. img = self.logos[ch["id"]] if ch["id"] in self.logos else "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  98. desc = title
  99. data2 = "live/%s"%ch["id"]
  100. ee = self.get_epg_live(ch["id"])
  101. desc = ee[2]
  102. content.append((title,self.name+"::"+data2,img,desc))
  103. return content
  104. ### Categories ###
  105. elif data == "categories":
  106. r = self.call(data)
  107. if not "categories":
  108. raise Exception("Error reading categories")
  109. for item in r["categories"]:
  110. data2 = "categories/%s"%(item["id"])
  111. title = item["title"]
  112. desc = title
  113. img = self.img
  114. content.append((title,self.name+"::"+data2,img,desc))
  115. return content
  116. ### Catetory root ###
  117. elif clist == "categories" and len(data.split("/"))==2:
  118. r = self.call(data)
  119. title = "%s - highlights"%r["category"]["title"]
  120. content.append((title,self.name+"::"+data+"/highlights?lang=en&rights=mobile&availability=available",self.img,title))
  121. title = "%s - recent (%s programmes, %s episodes)"%(r["category"]["title"],r["category"]["child_programme_count"],r["category"]["child_episode_count"])
  122. content.append((title,self.name+"::"+data+"/programmes?rights=mobile&page=1&per_page=40&sort=recent&sort_direction=asc&initial_child_count=1&availability=available",self.img,title))
  123. title = "%s - a-z (%s programmes, %s episodes)"%(r["category"]["title"],r["category"]["child_programme_count"],r["category"]["child_episode_count"])
  124. content.append((title,self.name+"::"+data+"/programmes?rights=mobile&page=1&per_page=40&sort=title&sort_direction=asc&initial_child_count=1&availability=available",self.img,title))
  125. return content
  126. ### Program/episodes list ###
  127. elif re.search("categories/([\w\-]+)/(highlights|programmes).+",data) or\
  128. re.search("programmes/(\w+)/episodes.+",data) or\
  129. re.search("groups/(\w+)/episodes.+",data) or\
  130. re.search("atoz/([\w]+)/programmes.+",data) or\
  131. re.search("channels/(\w+)/schedule/[\d\-].+",data) or\
  132. re.search("channels/(\w+)/programmes.+",data) or\
  133. re.search("channels/(\w+)/highlights.+",data) or\
  134. data == "home/highlights":
  135. r = self.call(data)
  136. lst = r["category_highlights"] if "category_highlights" in r else\
  137. r["category_programmes"] if "category_programmes" in r else\
  138. r["programme_episodes"] if "programme_episodes" in r else\
  139. r["atoz_programmes"] if "atoz_programmes" in r else\
  140. r["group_episodes"] if "group_episodes" in r else\
  141. r["schedule"] if "schedule" in r else\
  142. r["channel_highlights"] if "channel_highlights" in r else\
  143. r["channel_programmes"] if "channel_programmes" in r else\
  144. r["home_highlights"] if "home_highlights" in r else\
  145. []
  146. if not lst:
  147. return content
  148. for el in lst["elements"]:
  149. if el["type"] == "broadcast":
  150. if not len(el["episode"]["versions"]):continue
  151. title,data2,img,desc = self.get_data_element(el["episode"])
  152. t1 = gt(el['scheduled_start'])
  153. t2 = gt(el['scheduled_end'])
  154. title = "[%s-%s]%s"%(t1.strftime("%d.%m.%Y %H:%M"),t2.strftime("%H:%M"),title)
  155. else:
  156. title,data2,img,desc = self.get_data_element(el)
  157. content.append((title,self.name+"::"+data2,img,desc))
  158. if "&page=" in data and lst["page"]*lst["per_page"]<lst["count"]:
  159. data2 = re.sub("&page=\d+","&page=%s"%(lst["page"]+1),data)
  160. content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
  161. return content
  162. ### A-z root ###
  163. elif data=="a-z":
  164. url = "http://www.bbc.co.uk/programmes/a-z/by/x/all.json?page=1"
  165. r = self._http_request(url)
  166. if not r:
  167. raise Exception("Can not read %s"%s)
  168. js = json.loads(r)
  169. for ch in js["atoz"]["letters"]:
  170. title = ch.upper()
  171. desc = "Programmes beginning with %s"%title
  172. img = self.img
  173. data2 = "atoz/%s/programmes?rights=mobile&page=1&per_page=40&initial_child_count=1&sort=title&sort_direction=asc&availability=available"%ch
  174. content.append((title,self.name+"::"+data2,img,desc))
  175. return content
  176. ### Channels home ###
  177. elif data=="channels":
  178. for ch in self.get_channels():
  179. title = ch["title"]
  180. img = self.logos[ch["id"]] if ch["id"] in self.logos else "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  181. desc = title
  182. data2 = "channels/%s"%ch["id"]
  183. #ee = self.get_epg_live(ch["id"])
  184. desc = title
  185. content.append((title,self.name+"::"+data2,img,desc))
  186. return content
  187. ### Channel higlihts/progrmmes/days ###
  188. elif clist=="channels" and len(data.split("/"))==2:
  189. r = self.call(data)
  190. chid = data.split("/")[1]
  191. ch = self.get_channel_by_id(chid)
  192. # Highlights
  193. title = ch["title"] + " - highlights"
  194. img = "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  195. data2 = "channels/%s/highlights?lang=en&rights=mobile&availability=available"%ch["id"]
  196. desc = title
  197. content.append((title,self.name+"::"+data2,img,desc))
  198. #AtoZ
  199. title = ch["title"] + " - programmes AtoZ"
  200. data2 = "channels/%s/programmes?rights=mobile&page=1&per_page=40&sort=recent&sort_direction=asc&initial_child_count=1&availability=available"%ch["id"]
  201. desc = title
  202. content.append((title,self.name+"::"+data2,img,desc))
  203. day0 = datetime.date.today()
  204. for i in range(10):
  205. day = day0-datetime.timedelta(days=i)
  206. days = day.strftime("%Y-%m-%d")
  207. title = ch["title"] + " - " + days
  208. img = "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  209. data2 = "channels/%s/schedule/%s?availability=available"%(ch["id"],days)
  210. #ee = self.get_epg_live(ch["id"])
  211. desc = title
  212. content.append((title,self.name+"::"+data2,img,desc))
  213. return content
  214. def get_streams(self, data):
  215. print "[iplayer] get_streams:", data
  216. if "::" in data: data = data.split("::")[1]
  217. if not self.is_video(data):
  218. return []
  219. cmd = data.split("/")
  220. vid = cmd[1].split("?")[0]
  221. if cmd[0] == "live":
  222. title,img,desc,nfo = self.get_epg_live(vid)
  223. else:
  224. #data_ = "episodes/%s"%vid
  225. #r = self.call(data_)
  226. title,img,desc,vid,nfo = self.get_epg_video(vid)
  227. url = "http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/format/json/mediaset/iptv-all/vpid/%s"%vid
  228. print "vid=%s"%vid
  229. print url
  230. r = self._http_request(url) #,headers=self.headers2
  231. if not r:
  232. raise Exception("No streams found")
  233. js = json.loads(r)
  234. if "result" in js and js["result"]=="geolocation":
  235. raise Exception("BBC iPlayer service available only from UK")
  236. if not "media" in js:
  237. raise Exception("No streams found")
  238. streams = []
  239. captions = []
  240. for s in js["media"]:
  241. if s["kind"] == "captions":
  242. if s["connection"][0]["href"]:
  243. sub = {}
  244. sub["url"] = s["connection"][0]["href"].encode('utf8')
  245. sub["type"] = s["type"]
  246. sub["name"] = s["service"] if "service" in s else "captions (taff)"
  247. sub["lang"] = "en"
  248. captions.append(sub)
  249. if s["kind"] <> "video":
  250. continue
  251. for c in s["connection"]:
  252. if c["transferFormat"] <> "hls": continue
  253. #if not (c["supplier"].startswith("mf_") or c["supplier"].startswith("ll_")) : continue # TODO ir kaut kādas VPN problēmas ar akamaihd
  254. #if c["priority"] <> "1": continue
  255. url=c["href"].encode("utf8")
  256. r2 = self._http_request(url)
  257. if not r2: continue
  258. slist = re.findall("#EXT-X-STREAM-INF:([^\n]+)\n([^\n]+)", r2, re.DOTALL)
  259. if not slist:
  260. stream = util.item()
  261. stream["url"]=url
  262. stream["name"]=title
  263. stream["desc"]=desc
  264. stream["img"]=img
  265. stream["type"]="hls"
  266. stream["quality"]=("%s %sx%s %s,%s"%(s["bitrate"],s["width"],s["height"],c["supplier"],c["priority"])).encode("utf8")
  267. stream["lang"]="en"
  268. stream["subs"]=captions
  269. stream["order"]=int(s["bitrate"])
  270. stream["nfo"] = nfo
  271. streams.append(stream)
  272. else:
  273. for cc in slist:
  274. m = re.search("RESOLUTION=([\dx]+)",cc[0])
  275. resolution = m.group(1) if m else "%sx%s"%(s["width"],s["height"])
  276. m = re.search("BANDWIDTH=([\d]+)",cc[0])
  277. bitrate = m.group(1) if m else s["bitrate"]
  278. url2 = cc[1].encode("utf8")
  279. if not url2.startswith("http"):
  280. uu = url.split("/")[:-1]
  281. uu.append(url2)
  282. url2 = "/".join(uu)
  283. stream = util.item()
  284. stream["url"]=url2
  285. stream["name"]=title
  286. stream["desc"]=desc
  287. stream["img"]=img
  288. stream["type"]="hls"
  289. stream["quality"]=("%s %s %s,%s"%(bitrate,resolution,c["supplier"],c["priority"])).encode("utf8")
  290. stream["lang"]="en"
  291. stream["subs"]=captions
  292. stream["order"]=int(bitrate)
  293. stream["nfo"] = nfo
  294. streams.append(stream)
  295. if captions:
  296. for s in streams:
  297. s["subs"]=captions
  298. streams = sorted(streams,key=lambda item: item["order"],reverse=True)
  299. return streams
  300. def is_video(self,data):
  301. if "::" in data:
  302. data = data.split("::")[1]
  303. cmd = data.split("/")
  304. if cmd[0]=="live" and len(cmd)==2:
  305. return True
  306. elif cmd[0]=="episodes" and len(cmd)==2:
  307. return True
  308. else:
  309. return False
  310. def get_data_element(self,item):
  311. if ("programme" in item["type"] or "group" in item["type"]) and item["count"]>1:
  312. ep = item.copy()
  313. elif ("programme" in item["type"] or "group" in item["type"]) and item["count"]==1:
  314. ep = item["initial_children"][0].copy()
  315. elif item["type"] == "episode":
  316. ep = item.copy()
  317. elif item["type"] == "broadcast":
  318. ep = item["episode"].copy()
  319. else:
  320. ep = item.copy()
  321. title = ep["title"]
  322. if "subtitle" in ep and ep["subtitle"]:
  323. title = title+". "+ ep["subtitle"]
  324. desc = ep["synopses"]["large"] if "large" in ep["synopses"] else ep["synopses"]["medium"] if "medium" in ep["synopses"] else ep["synopses"]["small"]
  325. #TODO papildus info pie apraksta
  326. img = ep["images"]["standard"].replace("{recipe}","512x288") if "images" in ep else self.img
  327. if ep["type"] == "episode":
  328. data2 = "episodes/%s"%ep["id"]
  329. elif "programme" in ep["type"]:
  330. data2 = "programmes/%s/episodes?per_page=40&page=1"%ep["id"]
  331. title = "%s [%s episodes]"%(title,ep["count"])
  332. elif "group" in ep["type"]:
  333. data2 = "groups/%s/episodes?per_page=40&page=1"%ep["id"]
  334. title = "%s [%s episodes]"%(title,ep["count"])
  335. else:
  336. data2 = "programmes/%s/episodes?per_page=40&page=1"%ep["id"]
  337. title = "%s [%s episodes]"%(title,ep["count"])
  338. return title,data2,img,desc
  339. def get_epg_video(self,vid):
  340. data = "episodes/%s"%vid
  341. nfo = {}
  342. r = self.call(data)
  343. if "episodes" in r :
  344. ep = r["episodes"][0]
  345. title = ep["title"]
  346. if "subtitle" in ep:
  347. title = title +". "+ ep["subtitle"]
  348. title = title
  349. desc = ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
  350. desc = desc
  351. ver = ep["versions"][0]
  352. vid = ver["id"]
  353. remaining = ver["availability"]["end"].split("T")[0] #["remaining"]["text"]
  354. duration = ver["duration"]["text"]
  355. first_broadcast = ver["first_broadcast"]
  356. desc =u"%s\n%s\%s\n%s\n%s"%(title,duration,remaining,first_broadcast,desc)
  357. img = ep["images"]["standard"].replace("{recipe}","512x288")
  358. #Create nfo dictionary
  359. tt = lambda dd,k,d: dd[k] if k in dd else d
  360. nfo_type = "movie" if True else "tvswhow" # TODO
  361. t = OrderedDict()
  362. t["title"] = title
  363. t["originaltitle"] = tt(ep,"original_title","")
  364. t["thumb"] = img
  365. t["id"] = vid
  366. t["outline"] = ep["synopses"]["small"] if "small" in ep["synopses"] else ep["synopses"]["editorial"] if "editorial" in ep["synopses"] else ""
  367. t["plot"] = ep["synopses"]["large"] if "large" in ep["synopses"] else ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
  368. t["tagline"] = ep["synopses"]["editorial"] if "editorial" in ep["synopses"] else ""
  369. t["runtime"] = tt(ver["duration"],"text","")
  370. t["premiered"] = tt(ep,"release_date","")
  371. t["aired"] = ver["availability"]["start"].split("T")[0] if "start" in ver["availability"] else ""
  372. if "parent_position" in ep: t["episode"] = ep["parent_position"]
  373. nfo[nfo_type] = t
  374. return title.encode("utf8"),img.encode("utf8"),desc.encode("utf8"),vid.encode("utf8"),nfo
  375. else:
  376. raise Exception("No video info")
  377. def get_epg_live(self,channelid):
  378. data = "channels/%s/highlights?live=true"%channelid
  379. r = self.call(data)
  380. nfo = {}
  381. if "channel_highlights" in r and r["channel_highlights"]["elements"][0]["id"] == "live":
  382. epg = r["channel_highlights"]["elements"][0]["initial_children"][0].copy()
  383. t1 = gt(epg['scheduled_start'])
  384. t2 = gt(epg['scheduled_end'])
  385. ep = epg["episode"]
  386. title = ep["title"]
  387. if "subtitle" in ep:
  388. title = title +". "+ ep["subtitle"]
  389. title = "%s (%s-%s)"%(title,t1.strftime("%H:%M"),t2.strftime("%H:%M"))
  390. title = title
  391. desc = ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
  392. desc = desc
  393. desc ="%s\n%s"%(title,desc)
  394. img = ep["images"]["standard"].replace("{recipe}","512x288")
  395. #return title,img,desc
  396. else:
  397. title = r["channel_highlights"]["channel"]["title"]
  398. img = ""
  399. desc = title
  400. return title.encode("utf8"),img.encode("utf8"),desc.encode("utf8"),nfo
  401. def get_channels(self):
  402. if self.ch:
  403. return self.ch
  404. r= self.call("channels")
  405. self.ch=[]
  406. for i,item in enumerate(r["channels"]):
  407. self.ch.append(item)
  408. self.ch_id[item["id"]]=i
  409. self.ch_id2[item["master_brand_id"]]=i
  410. self.ch_name[item["title"]]=i
  411. return self.ch
  412. def get_channel_by_id(self,chid):
  413. if not self.ch:
  414. self.get_channels()
  415. if not self.ch:
  416. return None
  417. return self.ch[self.ch_id[chid]] if self.ch_id.has_key(chid) else None
  418. def get_channel_by_id2(self,chid):
  419. if not self.ch:
  420. self.get_channels()
  421. if not self.ch:
  422. return None
  423. return self.ch[self.ch_id2[chid]] if self.ch_id2.has_key(chid) else None
  424. def get_channel_by_name(self,name):
  425. if not self.ch:
  426. self.get_channels()
  427. ch2 = self.get_channel_by_name2(name)
  428. if not ch2:
  429. return None
  430. ch = self.get_channel_by_id2(ch2["id2"])
  431. return ch
  432. def call(self, data,params = None, headers=None):
  433. if not headers: headers = self.headers
  434. #if not lang: lang = self.country
  435. url = self.api_url + data
  436. content = self._http_request(url,params, headers)
  437. if content:
  438. try:
  439. result = json.loads(content)
  440. return result
  441. except Exception, ex:
  442. return None
  443. else:
  444. return None
  445. def call2(self, data,params = None, headers=None):
  446. if not headers: headers = self.headers2
  447. #if not lang: lang = self.country
  448. url = self.api_url2 + data
  449. content = self._http_request(url,params, headers)
  450. return content
  451. def _http_request(self, url,params = None, headers=None):
  452. if not headers: headers = self.headers
  453. import requests
  454. try:
  455. from requests.packages.urllib3.exceptions import InsecureRequestWarning
  456. requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
  457. except:
  458. pass
  459. try:
  460. r = requests.get(url, headers=headers)
  461. return r.content
  462. except Exception as ex:
  463. if "code" in dir(ex) and ex.code==403:
  464. return ex.read()
  465. else:
  466. return None
  467. def gt(dt_str):
  468. dt, _, us= dt_str.partition(".")
  469. dt= datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S")
  470. dt = dt - datetime.timedelta(seconds=time.altzone)
  471. #us= int(us.rstrip("Z"), 10)
  472. #r = dt + datetime.timedelta(microseconds=us)a
  473. return dt
  474. if __name__ == "__main__":
  475. c = Source()
  476. from subprocess import call
  477. #ch = c.get_channels()
  478. #c.get_epg_live("bbc_two_england")
  479. if len(sys.argv)>1 and not "iplayer::" in sys.argv[1]:
  480. vid = sys.argv[1]
  481. print "login - %s"%c.login("ivars777","xxx")
  482. vid = "1069"
  483. vid = "1462566072086"
  484. channelid="101"
  485. vid = "1350462656767"
  486. #data = c.get_stream_url(vid,"vod")
  487. #call([r"c:\Program Files\VideoLAN\VLC\vlc.exe",data["stream"]])
  488. pass
  489. else:
  490. if len(sys.argv)>1:
  491. data= sys.argv[1]
  492. else:
  493. data = "iplayer::home"
  494. content = c.get_content(data)
  495. for item in content:
  496. print item
  497. #cat = api.get_categories(country)
  498. #chan = api.get_channels("lv")
  499. #prog = api.get_programs(channel=6400)
  500. #prog = api.get_programs(category=55)
  501. #seas = api.get_seasons(program=6453)
  502. #str = api.get_streams(660243)
  503. #res = api.get_videos(802)
  504. #formats = api.getAllFormats()
  505. #det = api.detailed("1516")
  506. #vid = api.getVideos("13170")
  507. pass