Enigma2 plugin to to play various online streams (mostly Latvian).

iplayer.py 23KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523
  1. #!/usr/bin/env python
  2. # coding=utf8
  3. #
  4. # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
  5. # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
  6. # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
  7. #
  8. import sys, os, os.path, re, sys
  9. import urllib,urllib2
  10. from xml.sax.saxutils import unescape,escape
  11. from urllib import quote, unquote
  12. import datetime
  13. import HTMLParser
  14. import json
  15. import datetime,time
  16. from SourceBase import SourceBase, stream_type, stream0
  17. from collections import OrderedDict
  18. API_URL = 'https://m.lattelecom.tv/'
  19. user_agent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3"
  20. headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
  21. h = HTMLParser.HTMLParser()
  22. class Source(SourceBase):
  23. def __init__(self):
  24. self.name = "iplayer"
  25. self.title = "BBC iPlayer"
  26. self.img = "http://www.userlogos.org/files/logos/inductiveload/BBC_iPlayer_logo.png"
  27. self.desc = "BBC iPlayer portal content"
  28. self.api_url = "http://ibl.api.bbci.co.uk/ibl/v1/"
  29. self.headers = headers2dict("""
  30. User-Agent: BBCiPlayer/4.19.0.3021 (SM-G900FD; Android 4.4.2)
  31. Connection: Keep-Alive
  32. """)
  33. self.headers2 = headers2dict("""
  34. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36
  35. Connection: Keep-Alive
  36. """)
  37. self.ch = []
  38. self.ch_id={}
  39. self.ch_id2={}
  40. self.ch_name={}
  41. self.logos ={
  42. "bbc_one_london":"http://www.lyngsat-logo.com/hires/bb/bbc_one.png",
  43. "bbc_two_england":"http://www.lyngsat-logo.com/hires/bb/bbc_two_uk.png",
  44. "bbc_three":"http://www.lyngsat-logo.com/hires/bb/bbc_three_uk.png",
  45. "bbc_four":"http://www.lyngsat-logo.com/hires/bb/bbc_four_uk.png",
  46. "bbc_radio_one":"http://www.lyngsat-logo.com/hires/bb/bbc_radio1.png",
  47. "cbbc":"http://www.lyngsat-logo.com/hires/bb/bbc_cbbc.png",
  48. "cbeebies":"http://www.lyngsat-logo.com/hires/bb/bbc_cbeebies_uk.png",
  49. "bbc_news24":"http://www.lyngsat-logo.com/hires/bb/bbc_news.png",
  50. "bbc_parliament":"http://www.lyngsat-logo.com/hires/bb/bbc_parliament.png",
  51. "bbc_alba":"http://www.lyngsat-logo.com/hires/bb/bbc_alba.png",
  52. "s4cpbs":"http://www.lyngsat-logo.com/hires/ss/s4c_uk.png"
  53. }
  54. cur_directory = os.path.dirname(os.path.abspath(__file__))
  55. self.config_file = os.path.join(cur_directory,self.name+".cfg")
  56. self.options = OrderedDict([("user","lietotajs"),("password","parole")])
  57. self.options_read()
  58. def get_content(self, data):
  59. print "[iplayer] get_content:", data
  60. if "::" in data:
  61. data = data.split("::")[1]
  62. path = data.split("?")[0]
  63. clist = path.split("/")[0]
  64. params = data[data.find("?"):] if "?" in data else ""
  65. qs = dict(map(lambda x:x.split("="),re.findall("\w+=[\w-]+",params)))
  66. #lang = qs["lang"] if "lang" in qs else self.country
  67. content=[]
  68. content.append(("..return", "back","","Return back"))
  69. ### Home ###
  70. if data=="home":
  71. content.extend([
  72. ("Search TV", "iplayer::search/{0}","","Search in iPlayer"),
  73. ("Live streams", "iplayer::live","","TV live streams"),
  74. ("Channels", "iplayer::channels","","Programmes by channel/date"),
  75. ("Categories", "iplayer::categories","","Programmes by categories"),
  76. ("A-Z", "iplayer::a-z","","All programmes by name"),
  77. ("Highlights", "iplayer::home/highlights","","Current highlights"),
  78. ("Most popular", "iplayer::groups/popular/episodes?per_page=40&page=1","","Most popular programmes")
  79. ])
  80. return content
  81. ### Search ###
  82. elif clist=="search":
  83. data_ = "search-suggest/?q=%s&rights=mobile&initial_child_count=1"%data.split("/")[1]
  84. r = self.call(data_)
  85. for item in r["search_suggest"]["results"]:
  86. title,data2,img,desc = self.get_data_element(item)
  87. content.append((title,self.name+"::"+data2,img,desc))
  88. return content
  89. ### Live main ###
  90. elif data=="live":
  91. for ch in self.get_channels():
  92. title = ch["title"]
  93. img = self.logos[ch["id"]] if ch["id"] in self.logos else "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  94. desc = title
  95. data2 = "live/%s"%ch["id"]
  96. ee = self.get_epg_live(ch["id"])
  97. desc = ee[2]
  98. content.append((title,self.name+"::"+data2,img,desc))
  99. return content
  100. ### Categories ###
  101. elif data == "categories":
  102. r = self.call(data)
  103. if not "categories":
  104. raise Exception("Error reading categories")
  105. for item in r["categories"]:
  106. data2 = "categories/%s"%(item["id"])
  107. title = item["title"]
  108. desc = title
  109. img = self.img
  110. content.append((title,self.name+"::"+data2,img,desc))
  111. return content
  112. ### Catetory root ###
  113. elif clist == "categories" and len(data.split("/"))==2:
  114. r = self.call(data)
  115. title = "%s - highlights"%r["category"]["title"]
  116. content.append((title,self.name+"::"+data+"/highlights?lang=en&rights=mobile&availability=available",self.img,title))
  117. title = "%s - recent (%s programmes, %s episodes)"%(r["category"]["title"],r["category"]["child_programme_count"],r["category"]["child_episode_count"])
  118. content.append((title,self.name+"::"+data+"/programmes?rights=mobile&page=1&per_page=40&sort=recent&sort_direction=asc&initial_child_count=1&availability=available",self.img,title))
  119. title = "%s - a-z (%s programmes, %s episodes)"%(r["category"]["title"],r["category"]["child_programme_count"],r["category"]["child_episode_count"])
  120. content.append((title,self.name+"::"+data+"/programmes?rights=mobile&page=1&per_page=40&sort=title&sort_direction=asc&initial_child_count=1&availability=available",self.img,title))
  121. return content
  122. ### Program/episodes list ###
  123. elif re.search("categories/([\w\-]+)/(highlights|programmes).+",data) or\
  124. re.search("programmes/(\w+)/episodes.+",data) or\
  125. re.search("groups/(\w+)/episodes.+",data) or\
  126. re.search("atoz/([\w]+)/programmes.+",data) or\
  127. re.search("channels/(\w+)/schedule/[\d\-].+",data) or\
  128. re.search("channels/(\w+)/programmes.+",data) or\
  129. re.search("channels/(\w+)/highlights.+",data) or\
  130. data == "home/highlights":
  131. r = self.call(data)
  132. lst = r["category_highlights"] if "category_highlights" in r else\
  133. r["category_programmes"] if "category_programmes" in r else\
  134. r["programme_episodes"] if "programme_episodes" in r else\
  135. r["atoz_programmes"] if "atoz_programmes" in r else\
  136. r["group_episodes"] if "group_episodes" in r else\
  137. r["schedule"] if "schedule" in r else\
  138. r["channel_highlights"] if "channel_highlights" in r else\
  139. r["channel_programmes"] if "channel_programmes" in r else\
  140. r["home_highlights"] if "home_highlights" in r else\
  141. []
  142. if not lst:
  143. return content
  144. for el in lst["elements"]:
  145. if el["type"] == "broadcast":
  146. if not len(el["episode"]["versions"]):continue
  147. title,data2,img,desc = self.get_data_element(el["episode"])
  148. t1 = gt(el['scheduled_start'])
  149. t2 = gt(el['scheduled_end'])
  150. title = "[%s-%s]%s"%(t1.strftime("%d.%m.%Y %H:%M"),t2.strftime("%H:%M"),title)
  151. else:
  152. title,data2,img,desc = self.get_data_element(el)
  153. content.append((title,self.name+"::"+data2,img,desc))
  154. if "&page=" in data and lst["page"]*lst["per_page"]<lst["count"]:
  155. data2 = re.sub("&page=\d+","&page=%s"%(lst["page"]+1),data)
  156. content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
  157. return content
  158. ### A-z root ###
  159. elif data=="a-z":
  160. url = "http://www.bbc.co.uk/programmes/a-z/by/x/all.json?page=1"
  161. r = self._http_request(url)
  162. if not r:
  163. raise Exception("Can not read %s"%s)
  164. js = json.loads(r)
  165. for ch in js["atoz"]["letters"]:
  166. title = ch.upper()
  167. desc = "Programmes beginning with %s"%title
  168. img = self.img
  169. data2 = "atoz/%s/programmes?rights=mobile&page=1&per_page=40&initial_child_count=1&sort=title&sort_direction=asc&availability=available"%ch
  170. content.append((title,self.name+"::"+data2,img,desc))
  171. return content
  172. ### Channels home ###
  173. elif data=="channels":
  174. for ch in self.get_channels():
  175. title = ch["title"]
  176. img = self.logos[ch["id"]] if ch["id"] in self.logos else "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  177. desc = title
  178. data2 = "channels/%s"%ch["id"]
  179. #ee = self.get_epg_live(ch["id"])
  180. desc = title
  181. content.append((title,self.name+"::"+data2,img,desc))
  182. return content
  183. ### Channel higlihts/progrmmes/days ###
  184. elif clist=="channels" and len(data.split("/"))==2:
  185. r = self.call(data)
  186. chid = data.split("/")[1]
  187. ch = self.get_channel_by_id(chid)
  188. # Highlights
  189. title = ch["title"] + " - highlights"
  190. img = "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  191. data2 = "channels/%s/highlights?lang=en&rights=mobile&availability=available"%ch["id"]
  192. desc = title
  193. content.append((title,self.name+"::"+data2,img,desc))
  194. #AtoZ
  195. title = ch["title"] + " - programmes AtoZ"
  196. data2 = "channels/%s/programmes?rights=mobile&page=1&per_page=40&sort=recent&sort_direction=asc&initial_child_count=1&availability=available"%ch["id"]
  197. desc = title
  198. content.append((title,self.name+"::"+data2,img,desc))
  199. day0 = datetime.date.today()
  200. for i in range(10):
  201. day = day0-datetime.timedelta(days=i)
  202. days = day.strftime("%Y-%m-%d")
  203. title = ch["title"] + " - " + days
  204. img = "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
  205. data2 = "channels/%s/schedule/%s?availability=available"%(ch["id"],days)
  206. #ee = self.get_epg_live(ch["id"])
  207. desc = title
  208. content.append((title,self.name+"::"+data2,img,desc))
  209. return content
  210. def get_streams(self, data):
  211. print "[iplayer] get_streams:", data
  212. if "::" in data: data = data.split("::")[1]
  213. if not self.is_video(data):
  214. return []
  215. cmd = data.split("/")
  216. vid = cmd[1].split("?")[0]
  217. if cmd[0] == "live":
  218. title,img,desc = self.get_epg_live(vid)
  219. else:
  220. data_ = "episodes/%s"%vid
  221. r = self.call(data_)
  222. title,img,desc,vid = self.get_epg_video(vid)
  223. url = "http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/format/json/mediaset/iptv-all/vpid/%s"%vid
  224. print "vid=%s"%vid
  225. print url
  226. r = self._http_request(url) #,headers=self.headers2
  227. if not r:
  228. raise Exception("No streams found")
  229. js = json.loads(r)
  230. if "result" in js and js["result"]=="geolocation":
  231. raise Exception("BBC iPlayer service available only from UK")
  232. if not "media" in js:
  233. raise Exception("No streams found")
  234. streams = []
  235. captions = []
  236. for s in js["media"]:
  237. if s["kind"] == "captions":
  238. if s["connection"][0]["href"]:
  239. sub = {}
  240. sub["url"] = s["connection"][0]["href"].encode('utf8')
  241. sub["type"] = s["type"]
  242. sub["name"] = s["service"] if "service" in s else "captions (taff)"
  243. sub["lang"] = "en"
  244. captions.append(sub)
  245. if s["kind"] <> "video":
  246. continue
  247. for c in s["connection"]:
  248. if c["transferFormat"] <> "hls": continue
  249. #if not (c["supplier"].startswith("mf_") or c["supplier"].startswith("ll_")) : continue # TODO ir kaut kādas VPN problēmas ar akamaihd
  250. #if c["priority"] <> "1": continue
  251. url=c["href"].encode("utf8")
  252. r2 = self._http_request(url)
  253. if not r2: continue
  254. slist = re.findall("#EXT-X-STREAM-INF:([^\n]+)\n([^\n]+)", r2, re.DOTALL)
  255. if not slist:
  256. stream = stream0.copy()
  257. stream["url"]=url
  258. stream["name"]=title
  259. stream["desc"]=desc
  260. stream["img"]=img
  261. stream["type"]="hls"
  262. stream["quality"]=("%s %sx%s %s,%s"%(s["bitrate"],s["width"],s["height"],c["supplier"],c["priority"])).encode("utf8")
  263. stream["lang"]="en"
  264. stream["subs"]=captions
  265. stream["order"]=int(s["bitrate"])
  266. streams.append(stream)
  267. else:
  268. for cc in slist:
  269. m = re.search("RESOLUTION=([\dx]+)",cc[0])
  270. resolution = m.group(1) if m else "%sx%s"%(s["width"],s["height"])
  271. m = re.search("BANDWIDTH=([\d]+)",cc[0])
  272. bitrate = m.group(1) if m else s["bitrate"]
  273. url2 = cc[1].encode("utf8")
  274. if not url2.startswith("http"):
  275. uu = url.split("/")[:-1]
  276. uu.append(url2)
  277. url2 = "/".join(uu)
  278. stream = stream0.copy()
  279. stream["url"]=url2
  280. stream["name"]=title
  281. stream["desc"]=desc
  282. stream["img"]=img
  283. stream["type"]="hls"
  284. stream["quality"]=("%s %s %s,%s"%(bitrate,resolution,c["supplier"],c["priority"])).encode("utf8")
  285. stream["lang"]="en"
  286. stream["subs"]=captions
  287. stream["order"]=int(bitrate)
  288. streams.append(stream)
  289. if captions:
  290. for s in streams:
  291. s["subs"]=captions
  292. streams = sorted(streams,key=lambda item: item["order"],reverse=True)
  293. return streams
  294. def is_video(self,data):
  295. if "::" in data:
  296. data = data.split("::")[1]
  297. cmd = data.split("/")
  298. if cmd[0]=="live" and len(cmd)==2:
  299. return True
  300. elif cmd[0]=="episodes" and len(cmd)==2:
  301. return True
  302. else:
  303. return False
  304. def get_data_element(self,item):
  305. if ("programme" in item["type"] or "group" in item["type"]) and item["count"]>1:
  306. ep = item.copy()
  307. elif ("programme" in item["type"] or "group" in item["type"]) and item["count"]==1:
  308. ep = item["initial_children"][0].copy()
  309. elif item["type"] == "episode":
  310. ep = item.copy()
  311. elif item["type"] == "broadcast":
  312. ep = item["episode"].copy()
  313. else:
  314. ep = item.copy()
  315. title = ep["title"]
  316. if "subtitle" in ep and ep["subtitle"]:
  317. title = title+". "+ ep["subtitle"]
  318. desc = ep["synopses"]["large"] if "large" in ep["synopses"] else ep["synopses"]["medium"] if "medium" in ep["synopses"] else ep["synopses"]["small"]
  319. #TODO papildus info pie apraksta
  320. img = ep["images"]["standard"].replace("{recipe}","512x288") if "images" in ep else self.img
  321. if ep["type"] == "episode":
  322. data2 = "episodes/%s"%ep["id"]
  323. elif "programme" in ep["type"]:
  324. data2 = "programmes/%s/episodes?per_page=40&page=1"%ep["id"]
  325. title = "%s [%s episodes]"%(title,ep["count"])
  326. elif "group" in ep["type"]:
  327. data2 = "groups/%s/episodes?per_page=40&page=1"%ep["id"]
  328. title = "%s [%s episodes]"%(title,ep["count"])
  329. else:
  330. data2 = "programmes/%s/episodes?per_page=40&page=1"%ep["id"]
  331. title = "%s [%s episodes]"%(title,ep["count"])
  332. return title,data2,img,desc
  333. def get_epg_video(self,vid):
  334. data = "episodes/%s"%vid
  335. r = self.call(data)
  336. if "episodes" in r :
  337. ep = r["episodes"][0]
  338. title = ep["title"]
  339. if "subtitle" in ep:
  340. title = title +". "+ ep["subtitle"]
  341. title = title
  342. desc = ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
  343. desc = desc
  344. ver = ep["versions"][0]
  345. vid = ver["id"]
  346. remaining = ver["availability"]["remaining"]["text"]
  347. duration = ver["duration"]
  348. first_broadcast = ver["first_broadcast"]
  349. desc =u"%s\n%s\%s\n%s\n%s"%(title,duration,remaining,first_broadcast,desc)
  350. img = ep["images"]["standard"].replace("{recipe}","512x288")
  351. return title.encode("utf8"),img.encode("utf8"),desc.encode("utf8"),vid.encode("utf8")
  352. else:
  353. raise Exception("No video info")
  354. def get_epg_live(self,channelid):
  355. data = "channels/%s/highlights?live=true"%channelid
  356. r = self.call(data)
  357. if "channel_highlights" in r and r["channel_highlights"]["elements"][0]["id"] == "live":
  358. epg = r["channel_highlights"]["elements"][0]["initial_children"][0].copy()
  359. t1 = gt(epg['scheduled_start'])
  360. t2 = gt(epg['scheduled_end'])
  361. ep = epg["episode"]
  362. title = ep["title"]
  363. if "subtitle" in ep:
  364. title = title +". "+ ep["subtitle"]
  365. title = "%s (%s-%s)"%(title,t1.strftime("%H:%M"),t2.strftime("%H:%M"))
  366. title = title
  367. desc = ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
  368. desc = desc
  369. desc ="%s\n%s"%(title,desc)
  370. img = ep["images"]["standard"].replace("{recipe}","512x288")
  371. #return title,img,desc
  372. else:
  373. title = r["channel_highlights"]["channel"]["title"]
  374. img = ""
  375. desc = title
  376. return title.encode("utf8"),img.encode("utf8"),desc.encode("utf8")
  377. def get_channels(self):
  378. if self.ch:
  379. return self.ch
  380. r= self.call("channels")
  381. self.ch=[]
  382. for i,item in enumerate(r["channels"]):
  383. self.ch.append(item)
  384. self.ch_id[item["id"]]=i
  385. self.ch_id2[item["master_brand_id"]]=i
  386. self.ch_name[item["title"]]=i
  387. return self.ch
  388. def get_channel_by_id(self,chid):
  389. if not self.ch:
  390. self.get_channels()
  391. if not self.ch:
  392. return None
  393. return self.ch[self.ch_id[chid]] if self.ch_id.has_key(chid) else None
  394. def get_channel_by_id2(self,chid):
  395. if not self.ch:
  396. self.get_channels()
  397. if not self.ch:
  398. return None
  399. return self.ch[self.ch_id2[chid]] if self.ch_id2.has_key(chid) else None
  400. def get_channel_by_name(self,name):
  401. if not self.ch:
  402. self.get_channels()
  403. ch2 = self.get_channel_by_name2(name)
  404. if not ch2:
  405. return None
  406. ch = self.get_channel_by_id2(ch2["id2"])
  407. return ch
  408. def call(self, data,params = None, headers=None):
  409. if not headers: headers = self.headers
  410. #if not lang: lang = self.country
  411. url = self.api_url + data
  412. content = self._http_request(url,params, headers)
  413. if content:
  414. try:
  415. result = json.loads(content)
  416. return result
  417. except Exception, ex:
  418. return None
  419. else:
  420. return None
  421. def call2(self, data,params = None, headers=None):
  422. if not headers: headers = self.headers2
  423. #if not lang: lang = self.country
  424. url = self.api_url2 + data
  425. content = self._http_request(url,params, headers)
  426. return content
  427. def _http_request(self, url,params = None, headers=None):
  428. if not headers: headers = self.headers
  429. import requests
  430. try:
  431. r = requests.get(url, headers=headers)
  432. return r.content
  433. except Exception as ex:
  434. if ex.code==403:
  435. return ex.read()
  436. else:
  437. return None
  438. def gt(dt_str):
  439. dt, _, us= dt_str.partition(".")
  440. dt= datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S")
  441. dt = dt - datetime.timedelta(seconds=time.altzone)
  442. #us= int(us.rstrip("Z"), 10)
  443. #r = dt + datetime.timedelta(microseconds=us)a
  444. return dt
  445. if __name__ == "__main__":
  446. c = Source()
  447. from subprocess import call
  448. #ch = c.get_channels()
  449. #c.get_epg_live("bbc_two_england")
  450. if len(sys.argv)>1 and not "iplayer::" in sys.argv[1]:
  451. vid = sys.argv[1]
  452. print "login - %s"%c.login("ivars777","xxx")
  453. vid = "1069"
  454. vid = "1462566072086"
  455. channelid="101"
  456. vid = "1350462656767"
  457. #data = c.get_stream_url(vid,"vod")
  458. #call([r"c:\Program Files\VideoLAN\VLC\vlc.exe",data["stream"]])
  459. pass
  460. else:
  461. if len(sys.argv)>1:
  462. data= sys.argv[1]
  463. else:
  464. data = "iplayer::home"
  465. content = c.get_content(data)
  466. for item in content:
  467. print item
  468. #cat = api.get_categories(country)
  469. #chan = api.get_channels("lv")
  470. #prog = api.get_programs(channel=6400)
  471. #prog = api.get_programs(category=55)
  472. #seas = api.get_seasons(program=6453)
  473. #str = api.get_streams(660243)
  474. #res = api.get_videos(802)
  475. #formats = api.getAllFormats()
  476. #det = api.detailed("1516")
  477. #vid = api.getVideos("13170")
  478. pass