Enigma2 plugin to to play various online streams (mostly Latvian).

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552
  1. #!/usr/bin/env python
  2. # coding=utf8
  3. #
  4. # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
  5. # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
  6. # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
  7. #
  8. import sys, os, os.path, re, sys
  9. import urllib,urllib2
  10. from xml.sax.saxutils import unescape,escape
  11. from urllib import quote, unquote
  12. import datetime
  13. import HTMLParser
  14. import json
  15. import datetime,time
  16. from SourceBase import SourceBase, stream_type
  17. import util
  18. from collections import OrderedDict
  19. API_URL = 'https://m.lattelecom.tv/'
  20. user_agent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3"
  21. headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
  22. h = HTMLParser.HTMLParser()
  23. class Source(SourceBase):
  24. def __init__(self,cfg_path=None):
  25. self.name = "iplayer"
  26. self.title = "BBC iPlayer"
  27. self.img = "http://www.userlogos.org/files/logos/inductiveload/BBC_iPlayer_logo.png"
  28. self.desc = "BBC iPlayer portal content"
  29. self.api_url = "http://ibl.api.bbci.co.uk/ibl/v1/"
  30. self.headers = headers2dict("""
  31. User-Agent: BBCiPlayer/4.19.0.3021 (SM-G900FD; Android 4.4.2)
  32. Connection: Keep-Alive
  33. """)
  34. self.headers2 = headers2dict("""
  35. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36
  36. Connection: Keep-Alive
  37. """)
  38. self.ch = []
  39. self.ch_id={}
  40. self.ch_id2={}
  41. self.ch_name={}
  42. self.logos ={
  43. "bbc_one_london":"http://www.lyngsat-logo.com/hires/bb/bbc_one.png",
  44. "bbc_two_england":"http://www.lyngsat-logo.com/hires/bb/bbc_two_uk.png",
  45. "bbc_three":"http://www.lyngsat-logo.com/hires/bb/bbc_three_uk.png",
  46. "bbc_four":"http://www.lyngsat-logo.com/hires/bb/bbc_four_uk.png",
  47. "bbc_radio_one":"http://www.lyngsat-logo.com/hires/bb/bbc_radio1.png",
  48. "cbbc":"http://www.lyngsat-logo.com/hires/bb/bbc_cbbc.png",
  49. "cbeebies":"http://www.lyngsat-logo.com/hires/bb/bbc_cbeebies_uk.png",
  50. "bbc_news24":"http://www.lyngsat-logo.com/hires/bb/bbc_news.png",
  51. "bbc_parliament":"http://www.lyngsat-logo.com/hires/bb/bbc_parliament.png",
  52. "bbc_alba":"http://www.lyngsat-logo.com/hires/bb/bbc_alba.png",
  53. "s4cpbs":"http://www.lyngsat-logo.com/hires/ss/s4c_uk.png"
  54. }
  55. cur_directory = os.path.dirname(os.path.abspath(__file__))
  56. if not cfg_path: cfg_path = cur_directory
  57. self.config_file = os.path.join(cfg_path,self.name+".cfg")
  58. self.options = OrderedDict([("user","lietotajs"),("password","parole")])
  59. self.options_read()
  60. def get_content(self, data):
  61. print "[iplayer] get_content:", data
  62. if "::" in data:
  63. data = data.split("::")[1]
  64. path = data.split("?")[0]
  65. clist = path.split("/")[0]
  66. params = data[data.find("?"):] if "?" in data else ""
  67. qs = dict(map(lambda x:x.split("="),re.findall("\w+=[\w-]+",params)))
  68. #lang = qs["lang"] if "lang" in qs else self.country
  69. content=[]
  70. content.append(("..return", "back","","Return back"))
  71. ### Home ###
  72. if data=="home":
  73. content.extend([
  74. ("Search TV", "iplayer::search/{0}","","Search in iPlayer"),
  75. ("Live streams", "iplayer::live","","TV live streams"),
  76. ("Channels", "iplayer::channels","","Programmes by channel/date"),
  77. ("Categories", "iplayer::categories","","Programmes by categories"),
  78. ("A-Z", "iplayer::a-z","","All programmes by name"),
  79. ("Highlights", "iplayer::home/highlights","","Current highlights"),
  80. ("Most popular", "iplayer::groups/popular/episodes?per_page=40&page=1","","Most popular programmes")
  81. ])
  82. return content
  83. ### Search ###
  84. elif clist=="search":
  85. data_ = "search-suggest/?q=%s&rights=mobile&initial_child_count=1"%data.split("/")[1]
  86. r = self.call(data_)
  87. for item in r["search_suggest"]["results"]:
  88. title,data2,img,desc = self.get_data_element(item)
  89. content.append((title,self.name+"::"+data2,img,desc))
  90. return content
  91. ### Live main ###
  92. elif data=="live":
  93. for ch in self.get_channels():
  94. title = ch["title"]
  95. img = self.logos[ch["id"]] if ch["id"] in self.logos else "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  96. desc = title
  97. data2 = "live/%s"%ch["id"]
  98. ee = self.get_epg_live(ch["id"])
  99. desc = ee[2]
  100. content.append((title,self.name+"::"+data2,img,desc))
  101. return content
  102. ### Categories ###
  103. elif data == "categories":
  104. r = self.call(data)
  105. if not "categories":
  106. raise Exception("Error reading categories")
  107. for item in r["categories"]:
  108. data2 = "categories/%s"%(item["id"])
  109. title = item["title"]
  110. desc = title
  111. img = self.img
  112. content.append((title,self.name+"::"+data2,img,desc))
  113. return content
  114. ### Catetory root ###
  115. elif clist == "categories" and len(data.split("/"))==2:
  116. r = self.call(data)
  117. title = "%s - highlights"%r["category"]["title"]
  118. content.append((title,self.name+"::"+data+"/highlights?lang=en&rights=mobile&availability=available",self.img,title))
  119. title = "%s - recent (%s programmes, %s episodes)"%(r["category"]["title"],r["category"]["child_programme_count"],r["category"]["child_episode_count"])
  120. content.append((title,self.name+"::"+data+"/programmes?rights=mobile&page=1&per_page=40&sort=recent&sort_direction=asc&initial_child_count=1&availability=available",self.img,title))
  121. title = "%s - a-z (%s programmes, %s episodes)"%(r["category"]["title"],r["category"]["child_programme_count"],r["category"]["child_episode_count"])
  122. content.append((title,self.name+"::"+data+"/programmes?rights=mobile&page=1&per_page=40&sort=title&sort_direction=asc&initial_child_count=1&availability=available",self.img,title))
  123. return content
  124. ### Program/episodes list ###
  125. elif re.search("categories/([\w\-]+)/(highlights|programmes).+",data) or\
  126. re.search("programmes/(\w+)/episodes.+",data) or\
  127. re.search("groups/(\w+)/episodes.+",data) or\
  128. re.search("atoz/([\w]+)/programmes.+",data) or\
  129. re.search("channels/(\w+)/schedule/[\d\-].+",data) or\
  130. re.search("channels/(\w+)/programmes.+",data) or\
  131. re.search("channels/(\w+)/highlights.+",data) or\
  132. data == "home/highlights":
  133. r = self.call(data)
  134. lst = r["category_highlights"] if "category_highlights" in r else\
  135. r["category_programmes"] if "category_programmes" in r else\
  136. r["programme_episodes"] if "programme_episodes" in r else\
  137. r["atoz_programmes"] if "atoz_programmes" in r else\
  138. r["group_episodes"] if "group_episodes" in r else\
  139. r["schedule"] if "schedule" in r else\
  140. r["channel_highlights"] if "channel_highlights" in r else\
  141. r["channel_programmes"] if "channel_programmes" in r else\
  142. r["home_highlights"] if "home_highlights" in r else\
  143. []
  144. if not lst:
  145. return content
  146. for el in lst["elements"]:
  147. if el["type"] == "broadcast":
  148. if not len(el["episode"]["versions"]):continue
  149. title,data2,img,desc = self.get_data_element(el["episode"])
  150. t1 = gt(el['scheduled_start'])
  151. t2 = gt(el['scheduled_end'])
  152. title = "[%s-%s]%s"%(t1.strftime("%d.%m.%Y %H:%M"),t2.strftime("%H:%M"),title)
  153. else:
  154. title,data2,img,desc = self.get_data_element(el)
  155. content.append((title,self.name+"::"+data2,img,desc))
  156. if "&page=" in data and lst["page"]*lst["per_page"]<lst["count"]:
  157. data2 = re.sub("&page=\d+","&page=%s"%(lst["page"]+1),data)
  158. content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
  159. return content
  160. ### A-z root ###
  161. elif data=="a-z":
  162. url = "http://www.bbc.co.uk/programmes/a-z/by/x/all.json?page=1"
  163. r = self._http_request(url)
  164. if not r:
  165. raise Exception("Can not read %s"%s)
  166. js = json.loads(r)
  167. for ch in js["atoz"]["letters"]:
  168. title = ch.upper()
  169. desc = "Programmes beginning with %s"%title
  170. img = self.img
  171. data2 = "atoz/%s/programmes?rights=mobile&page=1&per_page=40&initial_child_count=1&sort=title&sort_direction=asc&availability=available"%ch
  172. content.append((title,self.name+"::"+data2,img,desc))
  173. return content
  174. ### Channels home ###
  175. elif data=="channels":
  176. for ch in self.get_channels():
  177. title = ch["title"]
  178. img = self.logos[ch["id"]] if ch["id"] in self.logos else "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  179. desc = title
  180. data2 = "channels/%s"%ch["id"]
  181. #ee = self.get_epg_live(ch["id"])
  182. desc = title
  183. content.append((title,self.name+"::"+data2,img,desc))
  184. return content
  185. ### Channel higlihts/progrmmes/days ###
  186. elif clist=="channels" and len(data.split("/"))==2:
  187. r = self.call(data)
  188. chid = data.split("/")[1]
  189. ch = self.get_channel_by_id(chid)
  190. # Highlights
  191. title = ch["title"] + " - highlights"
  192. img = "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  193. data2 = "channels/%s/highlights?lang=en&rights=mobile&availability=available"%ch["id"]
  194. desc = title
  195. content.append((title,self.name+"::"+data2,img,desc))
  196. #AtoZ
  197. title = ch["title"] + " - programmes AtoZ"
  198. data2 = "channels/%s/programmes?rights=mobile&page=1&per_page=40&sort=recent&sort_direction=asc&initial_child_count=1&availability=available"%ch["id"]
  199. desc = title
  200. content.append((title,self.name+"::"+data2,img,desc))
  201. day0 = datetime.date.today()
  202. for i in range(10):
  203. day = day0-datetime.timedelta(days=i)
  204. days = day.strftime("%Y-%m-%d")
  205. title = ch["title"] + " - " + days
  206. img = "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  207. data2 = "channels/%s/schedule/%s?availability=available"%(ch["id"],days)
  208. #ee = self.get_epg_live(ch["id"])
  209. desc = title
  210. content.append((title,self.name+"::"+data2,img,desc))
  211. return content
  212. def get_streams(self, data):
  213. print "[iplayer] get_streams:", data
  214. if "::" in data: data = data.split("::")[1]
  215. if not self.is_video(data):
  216. return []
  217. cmd = data.split("/")
  218. vid = cmd[1].split("?")[0]
  219. if cmd[0] == "live":
  220. title,img,desc,nfo = self.get_epg_live(vid)
  221. else:
  222. #data_ = "episodes/%s"%vid
  223. #r = self.call(data_)
  224. title,img,desc,vid,nfo = self.get_epg_video(vid)
  225. url = "http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/format/json/mediaset/iptv-all/vpid/%s"%vid
  226. print "vid=%s"%vid
  227. print url
  228. r = self._http_request(url) #,headers=self.headers2
  229. if not r:
  230. raise Exception("No streams found")
  231. js = json.loads(r)
  232. if "result" in js and js["result"]=="geolocation":
  233. raise Exception("BBC iPlayer service available only from UK")
  234. if not "media" in js:
  235. raise Exception("No streams found")
  236. streams = []
  237. captions = []
  238. for s in js["media"]:
  239. if s["kind"] == "captions":
  240. if s["connection"][0]["href"]:
  241. sub = {}
  242. sub["url"] = s["connection"][0]["href"].encode('utf8')
  243. sub["type"] = s["type"]
  244. sub["name"] = s["service"] if "service" in s else "captions (taff)"
  245. sub["lang"] = "en"
  246. captions.append(sub)
  247. if s["kind"] <> "video":
  248. continue
  249. for c in s["connection"]:
  250. if c["transferFormat"] <> "hls": continue
  251. #if not (c["supplier"].startswith("mf_") or c["supplier"].startswith("ll_")) : continue # TODO ir kaut kādas VPN problēmas ar akamaihd
  252. #if c["priority"] <> "1": continue
  253. url=c["href"].encode("utf8")
  254. r2 = self._http_request(url)
  255. if not r2: continue
  256. slist = re.findall("#EXT-X-STREAM-INF:([^\n]+)\n([^\n]+)", r2, re.DOTALL)
  257. if not slist:
  258. stream = util.item()
  259. stream["url"]=url
  260. stream["name"]=title
  261. stream["desc"]=desc
  262. stream["img"]=img
  263. stream["type"]="hls"
  264. stream["quality"]=("%s %sx%s %s,%s"%(s["bitrate"],s["width"],s["height"],c["supplier"],c["priority"])).encode("utf8")
  265. stream["lang"]="en"
  266. stream["subs"]=captions
  267. stream["order"]=int(s["bitrate"])
  268. stream["nfo"] = nfo
  269. streams.append(stream)
  270. else:
  271. for cc in slist:
  272. m = re.search("RESOLUTION=([\dx]+)",cc[0])
  273. resolution = m.group(1) if m else "%sx%s"%(s["width"],s["height"])
  274. m = re.search("BANDWIDTH=([\d]+)",cc[0])
  275. bitrate = m.group(1) if m else s["bitrate"]
  276. url2 = cc[1].encode("utf8")
  277. if not url2.startswith("http"):
  278. uu = url.split("/")[:-1]
  279. uu.append(url2)
  280. url2 = "/".join(uu)
  281. stream = util.item()
  282. stream["url"]=url2
  283. stream["name"]=title
  284. stream["desc"]=desc
  285. stream["img"]=img
  286. stream["type"]="hls"
  287. stream["quality"]=("%s %s %s,%s"%(bitrate,resolution,c["supplier"],c["priority"])).encode("utf8")
  288. stream["lang"]="en"
  289. stream["subs"]=captions
  290. stream["order"]=int(bitrate)
  291. stream["nfo"] = nfo
  292. streams.append(stream)
  293. if captions:
  294. for s in streams:
  295. s["subs"]=captions
  296. streams = sorted(streams,key=lambda item: item["order"],reverse=True)
  297. return streams
  298. def is_video(self,data):
  299. if "::" in data:
  300. data = data.split("::")[1]
  301. cmd = data.split("/")
  302. if cmd[0]=="live" and len(cmd)==2:
  303. return True
  304. elif cmd[0]=="episodes" and len(cmd)==2:
  305. return True
  306. else:
  307. return False
  308. def get_data_element(self,item):
  309. if ("programme" in item["type"] or "group" in item["type"]) and item["count"]>1:
  310. ep = item.copy()
  311. elif ("programme" in item["type"] or "group" in item["type"]) and item["count"]==1:
  312. ep = item["initial_children"][0].copy()
  313. elif item["type"] == "episode":
  314. ep = item.copy()
  315. elif item["type"] == "broadcast":
  316. ep = item["episode"].copy()
  317. else:
  318. ep = item.copy()
  319. title = ep["title"]
  320. if "subtitle" in ep and ep["subtitle"]:
  321. title = title+". "+ ep["subtitle"]
  322. desc = ep["synopses"]["large"] if "large" in ep["synopses"] else ep["synopses"]["medium"] if "medium" in ep["synopses"] else ep["synopses"]["small"]
  323. #TODO papildus info pie apraksta
  324. img = ep["images"]["standard"].replace("{recipe}","512x288") if "images" in ep else self.img
  325. if ep["type"] == "episode":
  326. data2 = "episodes/%s"%ep["id"]
  327. elif "programme" in ep["type"]:
  328. data2 = "programmes/%s/episodes?per_page=40&page=1"%ep["id"]
  329. title = "%s [%s episodes]"%(title,ep["count"])
  330. elif "group" in ep["type"]:
  331. data2 = "groups/%s/episodes?per_page=40&page=1"%ep["id"]
  332. title = "%s [%s episodes]"%(title,ep["count"])
  333. else:
  334. data2 = "programmes/%s/episodes?per_page=40&page=1"%ep["id"]
  335. title = "%s [%s episodes]"%(title,ep["count"])
  336. return title,data2,img,desc
  337. def get_epg_video(self,vid):
  338. data = "episodes/%s"%vid
  339. nfo = {}
  340. r = self.call(data)
  341. if "episodes" in r :
  342. ep = r["episodes"][0]
  343. title = ep["title"]
  344. if "subtitle" in ep:
  345. title = title +". "+ ep["subtitle"]
  346. title = title
  347. desc = ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
  348. desc = desc
  349. ver = ep["versions"][0]
  350. vid = ver["id"]
  351. remaining = ver["availability"]["end"].split("T")[0] #["remaining"]["text"]
  352. duration = ver["duration"]["text"]
  353. first_broadcast = ver["first_broadcast"]
  354. desc =u"%s\n%s\%s\n%s\n%s"%(title,duration,remaining,first_broadcast,desc)
  355. img = ep["images"]["standard"].replace("{recipe}","512x288")
  356. #Create nfo dictionary
  357. tt = lambda dd,k,d: dd[k] if k in dd else d
  358. nfo_type = "movie" if True else "tvswhow" # TODO
  359. t = OrderedDict()
  360. t["title"] = title
  361. t["originaltitle"] = tt(ep,"original_title","")
  362. t["thumb"] = img
  363. t["id"] = vid
  364. t["outline"] = ep["synopses"]["small"] if "small" in ep["synopses"] else ep["synopses"]["editorial"] if "editorial" in ep["synopses"] else ""
  365. t["plot"] = ep["synopses"]["large"] if "large" in ep["synopses"] else ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
  366. t["tagline"] = ep["synopses"]["editorial"] if "editorial" in ep["synopses"] else ""
  367. t["runtime"] = tt(ver["duration"],"text","")
  368. t["premiered"] = tt(ep,"release_date","")
  369. t["aired"] = ver["availability"]["start"].split("T")[0] if "start" in ver["availability"] else ""
  370. if "parent_position" in ep: t["episode"] = ep["parent_position"]
  371. nfo[nfo_type] = t
  372. return title.encode("utf8"),img.encode("utf8"),desc.encode("utf8"),vid.encode("utf8"),nfo
  373. else:
  374. raise Exception("No video info")
  375. def get_epg_live(self,channelid):
  376. data = "channels/%s/highlights?live=true"%channelid
  377. r = self.call(data)
  378. nfo = {}
  379. if "channel_highlights" in r and r["channel_highlights"]["elements"][0]["id"] == "live":
  380. epg = r["channel_highlights"]["elements"][0]["initial_children"][0].copy()
  381. t1 = gt(epg['scheduled_start'])
  382. t2 = gt(epg['scheduled_end'])
  383. ep = epg["episode"]
  384. title = ep["title"]
  385. if "subtitle" in ep:
  386. title = title +". "+ ep["subtitle"]
  387. title = "%s (%s-%s)"%(title,t1.strftime("%H:%M"),t2.strftime("%H:%M"))
  388. title = title
  389. desc = ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
  390. desc = desc
  391. desc ="%s\n%s"%(title,desc)
  392. img = ep["images"]["standard"].replace("{recipe}","512x288")
  393. #return title,img,desc
  394. else:
  395. title = r["channel_highlights"]["channel"]["title"]
  396. img = ""
  397. desc = title
  398. return title.encode("utf8"),img.encode("utf8"),desc.encode("utf8"),nfo
  399. def get_channels(self):
  400. if self.ch:
  401. return self.ch
  402. r= self.call("channels")
  403. self.ch=[]
  404. for i,item in enumerate(r["channels"]):
  405. self.ch.append(item)
  406. self.ch_id[item["id"]]=i
  407. self.ch_id2[item["master_brand_id"]]=i
  408. self.ch_name[item["title"]]=i
  409. return self.ch
  410. def get_channel_by_id(self,chid):
  411. if not self.ch:
  412. self.get_channels()
  413. if not self.ch:
  414. return None
  415. return self.ch[self.ch_id[chid]] if self.ch_id.has_key(chid) else None
  416. def get_channel_by_id2(self,chid):
  417. if not self.ch:
  418. self.get_channels()
  419. if not self.ch:
  420. return None
  421. return self.ch[self.ch_id2[chid]] if self.ch_id2.has_key(chid) else None
  422. def get_channel_by_name(self,name):
  423. if not self.ch:
  424. self.get_channels()
  425. ch2 = self.get_channel_by_name2(name)
  426. if not ch2:
  427. return None
  428. ch = self.get_channel_by_id2(ch2["id2"])
  429. return ch
  430. def call(self, data,params = None, headers=None):
  431. if not headers: headers = self.headers
  432. #if not lang: lang = self.country
  433. url = self.api_url + data
  434. content = self._http_request(url,params, headers)
  435. if content:
  436. try:
  437. result = json.loads(content)
  438. return result
  439. except Exception, ex:
  440. return None
  441. else:
  442. return None
  443. def call2(self, data,params = None, headers=None):
  444. if not headers: headers = self.headers2
  445. #if not lang: lang = self.country
  446. url = self.api_url2 + data
  447. content = self._http_request(url,params, headers)
  448. return content
  449. def _http_request(self, url,params = None, headers=None):
  450. if not headers: headers = self.headers
  451. import requests
  452. try:
  453. from requests.packages.urllib3.exceptions import InsecureRequestWarning
  454. requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
  455. except:
  456. pass
  457. try:
  458. r = requests.get(url, headers=headers)
  459. return r.content
  460. except Exception as ex:
  461. if ex.code==403:
  462. return ex.read()
  463. else:
  464. return None
  465. def gt(dt_str):
  466. dt, _, us= dt_str.partition(".")
  467. dt= datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S")
  468. dt = dt - datetime.timedelta(seconds=time.altzone)
  469. #us= int(us.rstrip("Z"), 10)
  470. #r = dt + datetime.timedelta(microseconds=us)a
  471. return dt
  472. if __name__ == "__main__":
  473. c = Source()
  474. from subprocess import call
  475. #ch = c.get_channels()
  476. #c.get_epg_live("bbc_two_england")
  477. if len(sys.argv)>1 and not "iplayer::" in sys.argv[1]:
  478. vid = sys.argv[1]
  479. print "login - %s"%c.login("ivars777","xxx")
  480. vid = "1069"
  481. vid = "1462566072086"
  482. channelid="101"
  483. vid = "1350462656767"
  484. #data = c.get_stream_url(vid,"vod")
  485. #call([r"c:\Program Files\VideoLAN\VLC\vlc.exe",data["stream"]])
  486. pass
  487. else:
  488. if len(sys.argv)>1:
  489. data= sys.argv[1]
  490. else:
  491. data = "iplayer::home"
  492. content = c.get_content(data)
  493. for item in content:
  494. print item
  495. #cat = api.get_categories(country)
  496. #chan = api.get_channels("lv")
  497. #prog = api.get_programs(channel=6400)
  498. #prog = api.get_programs(category=55)
  499. #seas = api.get_seasons(program=6453)
  500. #str = api.get_streams(660243)
  501. #res = api.get_videos(802)
  502. #formats = api.getAllFormats()
  503. #det = api.detailed("1516")
  504. #vid = api.getVideos("13170")
  505. pass