Enigma2 plugin to to play various online streams (mostly Latvian).

iplayer.py 23KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519
  1. #!/usr/bin/env python
  2. # coding=utf8
  3. #
  4. # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
  5. # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
  6. # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
  7. #
  8. import sys, os, os.path, re, sys
  9. import urllib,urllib2
  10. from xml.sax.saxutils import unescape,escape
  11. from urllib import quote, unquote
  12. import datetime
  13. import HTMLParser
  14. import json
  15. import datetime,time
  16. from SourceBase import SourceBase, stream_type, stream0
  17. from collections import OrderedDict
  18. API_URL = 'https://m.lattelecom.tv/'
  19. user_agent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3"
  20. headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
  21. h = HTMLParser.HTMLParser()
  22. class Source(SourceBase):
  23. def __init__(self):
  24. self.name = "iplayer"
  25. self.title = "BBC iPlayer"
  26. self.img = "http://www.userlogos.org/files/logos/inductiveload/BBC_iPlayer_logo.png"
  27. self.desc = "BBC iPlayer portal content"
  28. self.api_url = "http://ibl.api.bbci.co.uk/ibl/v1/"
  29. self.headers = headers2dict("""
  30. User-Agent: BBCiPlayer/4.19.0.3021 (SM-G900FD; Android 4.4.2)
  31. Connection: Keep-Alive
  32. """)
  33. self.headers2 = headers2dict("""
  34. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36
  35. Connection: Keep-Alive
  36. """)
  37. self.ch = []
  38. self.ch_id={}
  39. self.ch_id2={}
  40. self.ch_name={}
  41. self.logos ={
  42. "bbc_one_london":"http://www.lyngsat-logo.com/hires/bb/bbc_one.png",
  43. "bbc_two_england":"http://www.lyngsat-logo.com/hires/bb/bbc_two_uk.png",
  44. "bbc_three":"http://www.lyngsat-logo.com/hires/bb/bbc_three_uk.png",
  45. "bbc_four":"http://www.lyngsat-logo.com/hires/bb/bbc_four_uk.png",
  46. "bbc_radio_one":"http://www.lyngsat-logo.com/hires/bb/bbc_radio1.png",
  47. "cbbc":"http://www.lyngsat-logo.com/hires/bb/bbc_cbbc.png",
  48. "cbeebies":"http://www.lyngsat-logo.com/hires/bb/bbc_cbeebies_uk.png",
  49. "bbc_news24":"http://www.lyngsat-logo.com/hires/bb/bbc_news.png",
  50. "bbc_parliament":"http://www.lyngsat-logo.com/hires/bb/bbc_parliament.png",
  51. "bbc_alba":"http://www.lyngsat-logo.com/hires/bb/bbc_alba.png",
  52. "s4cpbs":"http://www.lyngsat-logo.com/hires/ss/s4c_uk.png"
  53. }
  54. def get_content(self, data):
  55. print "[iplayer] get_content:", data
  56. if "::" in data:
  57. data = data.split("::")[1]
  58. path = data.split("?")[0]
  59. clist = path.split("/")[0]
  60. params = data[data.find("?"):] if "?" in data else ""
  61. qs = dict(map(lambda x:x.split("="),re.findall("\w+=[\w-]+",params)))
  62. #lang = qs["lang"] if "lang" in qs else self.country
  63. content=[]
  64. content.append(("..return", "back","","Return back"))
  65. ### Home ###
  66. if data=="home":
  67. content.extend([
  68. ("Search TV", "iplayer::search/{0}","","Search in iPlayer"),
  69. ("Live streams", "iplayer::live","","TV live streams"),
  70. ("Channels", "iplayer::channels","","Programmes by channel/date"),
  71. ("Categories", "iplayer::categories","","Programmes by categories"),
  72. ("A-Z", "iplayer::a-z","","All programmes by name"),
  73. ("Highlights", "iplayer::home/highlights","","Current highlights"),
  74. ("Most popular", "iplayer::groups/popular/episodes?per_page=40&page=1","","Most popular programmes")
  75. ])
  76. return content
  77. ### Search ###
  78. elif clist=="search":
  79. data_ = "search-suggest/?q=%s&rights=mobile&initial_child_count=1"%data.split("/")[1]
  80. r = self.call(data_)
  81. for item in r["search_suggest"]["results"]:
  82. title,data2,img,desc = self.get_data_element(item)
  83. content.append((title,self.name+"::"+data2,img,desc))
  84. return content
  85. ### Live main ###
  86. elif data=="live":
  87. for ch in self.get_channels():
  88. title = ch["title"]
  89. img = self.logos[ch["id"]] if ch["id"] in self.logos else "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  90. desc = title
  91. data2 = "live/%s"%ch["id"]
  92. ee = self.get_epg_live(ch["id"])
  93. desc = ee[2]
  94. content.append((title,self.name+"::"+data2,img,desc))
  95. return content
  96. ### Categories ###
  97. elif data == "categories":
  98. r = self.call(data)
  99. if not "categories":
  100. raise Exception("Error reading categories")
  101. for item in r["categories"]:
  102. data2 = "categories/%s"%(item["id"])
  103. title = item["title"]
  104. desc = title
  105. img = self.img
  106. content.append((title,self.name+"::"+data2,img,desc))
  107. return content
  108. ### Catetory root ###
  109. elif clist == "categories" and len(data.split("/"))==2:
  110. r = self.call(data)
  111. title = "%s - highlights"%r["category"]["title"]
  112. content.append((title,self.name+"::"+data+"/highlights?lang=en&rights=mobile&availability=available",self.img,title))
  113. title = "%s - recent (%s programmes, %s episodes)"%(r["category"]["title"],r["category"]["child_programme_count"],r["category"]["child_episode_count"])
  114. content.append((title,self.name+"::"+data+"/programmes?rights=mobile&page=1&per_page=40&sort=recent&sort_direction=asc&initial_child_count=1&availability=available",self.img,title))
  115. title = "%s - a-z (%s programmes, %s episodes)"%(r["category"]["title"],r["category"]["child_programme_count"],r["category"]["child_episode_count"])
  116. content.append((title,self.name+"::"+data+"/programmes?rights=mobile&page=1&per_page=40&sort=title&sort_direction=asc&initial_child_count=1&availability=available",self.img,title))
  117. return content
  118. ### Program/episodes list ###
  119. elif re.search("categories/([\w\-]+)/(highlights|programmes).+",data) or\
  120. re.search("programmes/(\w+)/episodes.+",data) or\
  121. re.search("groups/(\w+)/episodes.+",data) or\
  122. re.search("atoz/([\w]+)/programmes.+",data) or\
  123. re.search("channels/(\w+)/schedule/[\d\-].+",data) or\
  124. re.search("channels/(\w+)/programmes.+",data) or\
  125. re.search("channels/(\w+)/highlights.+",data) or\
  126. data == "home/highlights":
  127. r = self.call(data)
  128. lst = r["category_highlights"] if "category_highlights" in r else\
  129. r["category_programmes"] if "category_programmes" in r else\
  130. r["programme_episodes"] if "programme_episodes" in r else\
  131. r["atoz_programmes"] if "atoz_programmes" in r else\
  132. r["group_episodes"] if "group_episodes" in r else\
  133. r["schedule"] if "schedule" in r else\
  134. r["channel_highlights"] if "channel_highlights" in r else\
  135. r["channel_programmes"] if "channel_programmes" in r else\
  136. r["home_highlights"] if "home_highlights" in r else\
  137. []
  138. if not lst:
  139. return content
  140. for el in lst["elements"]:
  141. if el["type"] == "broadcast":
  142. if not len(el["episode"]["versions"]):continue
  143. title,data2,img,desc = self.get_data_element(el["episode"])
  144. t1 = gt(el['scheduled_start'])
  145. t2 = gt(el['scheduled_end'])
  146. title = "[%s-%s]%s"%(t1.strftime("%d.%m.%Y %H:%M"),t2.strftime("%H:%M"),title)
  147. else:
  148. title,data2,img,desc = self.get_data_element(el)
  149. content.append((title,self.name+"::"+data2,img,desc))
  150. if "&page=" in data and lst["page"]*lst["per_page"]<lst["count"]:
  151. data2 = re.sub("&page=\d+","&page=%s"%(lst["page"]+1),data)
  152. content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
  153. return content
  154. ### A-z root ###
  155. elif data=="a-z":
  156. url = "http://www.bbc.co.uk/programmes/a-z/by/x/all.json?page=1"
  157. r = self._http_request(url)
  158. if not r:
  159. raise Exception("Can not read %s"%s)
  160. js = json.loads(r)
  161. for ch in js["atoz"]["letters"]:
  162. title = ch.upper()
  163. desc = "Programmes beginning with %s"%title
  164. img = self.img
  165. data2 = "atoz/%s/programmes?rights=mobile&page=1&per_page=40&initial_child_count=1&sort=title&sort_direction=asc&availability=available"%ch
  166. content.append((title,self.name+"::"+data2,img,desc))
  167. return content
  168. ### Channels home ###
  169. elif data=="channels":
  170. for ch in self.get_channels():
  171. title = ch["title"]
  172. img = self.logos[ch["id"]] if ch["id"] in self.logos else "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  173. desc = title
  174. data2 = "channels/%s"%ch["id"]
  175. #ee = self.get_epg_live(ch["id"])
  176. desc = title
  177. content.append((title,self.name+"::"+data2,img,desc))
  178. return content
  179. ### Channel higlihts/progrmmes/days ###
  180. elif clist=="channels" and len(data.split("/"))==2:
  181. r = self.call(data)
  182. chid = data.split("/")[1]
  183. ch = self.get_channel_by_id(chid)
  184. # Highlights
  185. title = ch["title"] + " - highlights"
  186. img = "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  187. data2 = "channels/%s/highlights?lang=en&rights=mobile&availability=available"%ch["id"]
  188. desc = title
  189. content.append((title,self.name+"::"+data2,img,desc))
  190. #AtoZ
  191. title = ch["title"] + " - programmes AtoZ"
  192. data2 = "channels/%s/programmes?rights=mobile&page=1&per_page=40&sort=recent&sort_direction=asc&initial_child_count=1&availability=available"%ch["id"]
  193. desc = title
  194. content.append((title,self.name+"::"+data2,img,desc))
  195. day0 = datetime.date.today()
  196. for i in range(10):
  197. day = day0-datetime.timedelta(days=i)
  198. days = day.strftime("%Y-%m-%d")
  199. title = ch["title"] + " - " + days
  200. img = "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  201. data2 = "channels/%s/schedule/%s?availability=available"%(ch["id"],days)
  202. #ee = self.get_epg_live(ch["id"])
  203. desc = title
  204. content.append((title,self.name+"::"+data2,img,desc))
  205. return content
  206. def get_streams(self, data):
  207. print "[iplayer] get_streams:", data
  208. if "::" in data: data = data.split("::")[1]
  209. if not self.is_video(data):
  210. return []
  211. cmd = data.split("/")
  212. vid = cmd[1].split("?")[0]
  213. if cmd[0] == "live":
  214. title,img,desc = self.get_epg_live(vid)
  215. else:
  216. data_ = "episodes/%s"%vid
  217. r = self.call(data_)
  218. title,img,desc,vid = self.get_epg_video(vid)
  219. url = "http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/format/json/mediaset/iptv-all/vpid/%s"%vid
  220. print "vid=%s"%vid
  221. print url
  222. r = self._http_request(url) #,headers=self.headers2
  223. if not r:
  224. raise Exception("No streams found")
  225. js = json.loads(r)
  226. if "result" in js and js["result"]=="geolocation":
  227. raise Exception("BBC iPlayer service available only from UK")
  228. if not "media" in js:
  229. raise Exception("No streams found")
  230. streams = []
  231. captions = []
  232. for s in js["media"]:
  233. if s["kind"] == "captions":
  234. if s["connection"][0]["href"]:
  235. sub = {}
  236. sub["url"] = s["connection"][0]["href"].encode('utf8')
  237. sub["type"] = s["type"]
  238. sub["name"] = s["service"] if "service" in s else "captions (taff)"
  239. sub["lang"] = "en"
  240. captions.append(sub)
  241. if s["kind"] <> "video":
  242. continue
  243. for c in s["connection"]:
  244. if c["transferFormat"] <> "hls": continue
  245. #if not (c["supplier"].startswith("mf_") or c["supplier"].startswith("ll_")) : continue # TODO ir kaut kādas VPN problēmas ar akamaihd
  246. #if c["priority"] <> "1": continue
  247. url=c["href"].encode("utf8")
  248. r2 = self._http_request(url)
  249. if not r2: continue
  250. slist = re.findall("#EXT-X-STREAM-INF:([^\n]+)\n([^\n]+)", r2, re.DOTALL)
  251. if not slist:
  252. stream = stream0.copy()
  253. stream["url"]=url
  254. stream["name"]=title
  255. stream["desc"]=desc
  256. stream["img"]=img
  257. stream["type"]="hls"
  258. stream["quality"]=("%s %sx%s %s,%s"%(s["bitrate"],s["width"],s["height"],c["supplier"],c["priority"])).encode("utf8")
  259. stream["lang"]="en"
  260. stream["subs"]=captions
  261. stream["order"]=int(s["bitrate"])
  262. streams.append(stream)
  263. else:
  264. for cc in slist:
  265. m = re.search("RESOLUTION=([\dx]+)",cc[0])
  266. resolution = m.group(1) if m else "%sx%s"%(s["width"],s["height"])
  267. m = re.search("BANDWIDTH=([\d]+)",cc[0])
  268. bitrate = m.group(1) if m else s["bitrate"]
  269. url2 = cc[1].encode("utf8")
  270. if not url2.startswith("http"):
  271. uu = url.split("/")[:-1]
  272. uu.append(url2)
  273. url2 = "/".join(uu)
  274. stream = stream0.copy()
  275. stream["url"]=url2
  276. stream["name"]=title
  277. stream["desc"]=desc
  278. stream["img"]=img
  279. stream["type"]="hls"
  280. stream["quality"]=("%s %s %s,%s"%(bitrate,resolution,c["supplier"],c["priority"])).encode("utf8")
  281. stream["lang"]="en"
  282. stream["subs"]=captions
  283. stream["order"]=int(bitrate)
  284. streams.append(stream)
  285. if captions:
  286. for s in streams:
  287. s["subs"]=captions
  288. streams = sorted(streams,key=lambda item: item["order"],reverse=True)
  289. return streams
  290. def is_video(self,data):
  291. if "::" in data:
  292. data = data.split("::")[1]
  293. cmd = data.split("/")
  294. if cmd[0]=="live" and len(cmd)==2:
  295. return True
  296. elif cmd[0]=="episodes" and len(cmd)==2:
  297. return True
  298. else:
  299. return False
  300. def get_data_element(self,item):
  301. if ("programme" in item["type"] or "group" in item["type"]) and item["count"]>1:
  302. ep = item.copy()
  303. elif ("programme" in item["type"] or "group" in item["type"]) and item["count"]==1:
  304. ep = item["initial_children"][0].copy()
  305. elif item["type"] == "episode":
  306. ep = item.copy()
  307. elif item["type"] == "broadcast":
  308. ep = item["episode"].copy()
  309. else:
  310. ep = item.copy()
  311. title = ep["title"]
  312. if "subtitle" in ep and ep["subtitle"]:
  313. title = title+". "+ ep["subtitle"]
  314. desc = ep["synopses"]["large"] if "large" in ep["synopses"] else ep["synopses"]["medium"] if "medium" in ep["synopses"] else ep["synopses"]["small"]
  315. #TODO papildus info pie apraksta
  316. img = ep["images"]["standard"].replace("{recipe}","512x288") if "images" in ep else self.img
  317. if ep["type"] == "episode":
  318. data2 = "episodes/%s"%ep["id"]
  319. elif "programme" in ep["type"]:
  320. data2 = "programmes/%s/episodes?per_page=40&page=1"%ep["id"]
  321. title = "%s [%s episodes]"%(title,ep["count"])
  322. elif "group" in ep["type"]:
  323. data2 = "groups/%s/episodes?per_page=40&page=1"%ep["id"]
  324. title = "%s [%s episodes]"%(title,ep["count"])
  325. else:
  326. data2 = "programmes/%s/episodes?per_page=40&page=1"%ep["id"]
  327. title = "%s [%s episodes]"%(title,ep["count"])
  328. return title,data2,img,desc
  329. def get_epg_video(self,vid):
  330. data = "episodes/%s"%vid
  331. r = self.call(data)
  332. if "episodes" in r :
  333. ep = r["episodes"][0]
  334. title = ep["title"]
  335. if "subtitle" in ep:
  336. title = title +". "+ ep["subtitle"]
  337. title = title
  338. desc = ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
  339. desc = desc
  340. ver = ep["versions"][0]
  341. vid = ver["id"]
  342. remaining = ver["availability"]["remaining"]["text"]
  343. duration = ver["duration"]
  344. first_broadcast = ver["first_broadcast"]
  345. desc =u"%s\n%s\%s\n%s\n%s"%(title,duration,remaining,first_broadcast,desc)
  346. img = ep["images"]["standard"].replace("{recipe}","512x288")
  347. return title.encode("utf8"),img.encode("utf8"),desc.encode("utf8"),vid.encode("utf8")
  348. else:
  349. raise Exception("No video info")
  350. def get_epg_live(self,channelid):
  351. data = "channels/%s/highlights?live=true"%channelid
  352. r = self.call(data)
  353. if "channel_highlights" in r and r["channel_highlights"]["elements"][0]["id"] == "live":
  354. epg = r["channel_highlights"]["elements"][0]["initial_children"][0].copy()
  355. t1 = gt(epg['scheduled_start'])
  356. t2 = gt(epg['scheduled_end'])
  357. ep = epg["episode"]
  358. title = ep["title"]
  359. if "subtitle" in ep:
  360. title = title +". "+ ep["subtitle"]
  361. title = "%s (%s-%s)"%(title,t1.strftime("%H:%M"),t2.strftime("%H:%M"))
  362. title = title
  363. desc = ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
  364. desc = desc
  365. desc ="%s\n%s"%(title,desc)
  366. img = ep["images"]["standard"].replace("{recipe}","512x288")
  367. #return title,img,desc
  368. else:
  369. title = r["channel_highlights"]["channel"]["title"]
  370. img = ""
  371. desc = title
  372. return title.encode("utf8"),img.encode("utf8"),desc.encode("utf8")
  373. def get_channels(self):
  374. if self.ch:
  375. return self.ch
  376. r= self.call("channels")
  377. self.ch=[]
  378. for i,item in enumerate(r["channels"]):
  379. self.ch.append(item)
  380. self.ch_id[item["id"]]=i
  381. self.ch_id2[item["master_brand_id"]]=i
  382. self.ch_name[item["title"]]=i
  383. return self.ch
  384. def get_channel_by_id(self,chid):
  385. if not self.ch:
  386. self.get_channels()
  387. if not self.ch:
  388. return None
  389. return self.ch[self.ch_id[chid]] if self.ch_id.has_key(chid) else None
  390. def get_channel_by_id2(self,chid):
  391. if not self.ch:
  392. self.get_channels()
  393. if not self.ch:
  394. return None
  395. return self.ch[self.ch_id2[chid]] if self.ch_id2.has_key(chid) else None
  396. def get_channel_by_name(self,name):
  397. if not self.ch:
  398. self.get_channels()
  399. ch2 = self.get_channel_by_name2(name)
  400. if not ch2:
  401. return None
  402. ch = self.get_channel_by_id2(ch2["id2"])
  403. return ch
  404. def call(self, data,params = None, headers=None):
  405. if not headers: headers = self.headers
  406. #if not lang: lang = self.country
  407. url = self.api_url + data
  408. content = self._http_request(url,params, headers)
  409. if content:
  410. try:
  411. result = json.loads(content)
  412. return result
  413. except Exception, ex:
  414. return None
  415. else:
  416. return None
  417. def call2(self, data,params = None, headers=None):
  418. if not headers: headers = self.headers2
  419. #if not lang: lang = self.country
  420. url = self.api_url2 + data
  421. content = self._http_request(url,params, headers)
  422. return content
  423. def _http_request(self, url,params = None, headers=None):
  424. if not headers: headers = self.headers
  425. import requests
  426. try:
  427. r = requests.get(url, headers=headers)
  428. return r.content
  429. except Exception as ex:
  430. if ex.code==403:
  431. return ex.read()
  432. else:
  433. return None
  434. def gt(dt_str):
  435. dt, _, us= dt_str.partition(".")
  436. dt= datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S")
  437. dt = dt - datetime.timedelta(seconds=time.altzone)
  438. #us= int(us.rstrip("Z"), 10)
  439. #r = dt + datetime.timedelta(microseconds=us)a
  440. return dt
  441. if __name__ == "__main__":
  442. c = Source()
  443. from subprocess import call
  444. #ch = c.get_channels()
  445. #c.get_epg_live("bbc_two_england")
  446. if len(sys.argv)>1 and not "iplayer::" in sys.argv[1]:
  447. vid = sys.argv[1]
  448. print "login - %s"%c.login("ivars777","xxx")
  449. vid = "1069"
  450. vid = "1462566072086"
  451. channelid="101"
  452. vid = "1350462656767"
  453. #data = c.get_stream_url(vid,"vod")
  454. #call([r"c:\Program Files\VideoLAN\VLC\vlc.exe",data["stream"]])
  455. pass
  456. else:
  457. if len(sys.argv)>1:
  458. data= sys.argv[1]
  459. else:
  460. data = "iplayer::home"
  461. content = c.get_content(data)
  462. for item in content:
  463. print item
  464. #cat = api.get_categories(country)
  465. #chan = api.get_channels("lv")
  466. #prog = api.get_programs(channel=6400)
  467. #prog = api.get_programs(category=55)
  468. #seas = api.get_seasons(program=6453)
  469. #str = api.get_streams(660243)
  470. #res = api.get_videos(802)
  471. #formats = api.getAllFormats()
  472. #det = api.detailed("1516")
  473. #vid = api.getVideos("13170")
  474. pass