Enigma2 plugin to to play various online streams (mostly Latvian).

filmix.py 15KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358
  1. #!/usr/bin/env python
  2. # coding=utf8
  3. #
  4. # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
  5. # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
  6. # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
  7. #
  8. try:
  9. import json
  10. except:
  11. import simplejson as json
  12. import urllib2, urllib
  13. import datetime, re, sys,os
  14. import ConfigParser
  15. from SourceBase import SourceBase
  16. import base64
  17. from collections import OrderedDict
  18. import sys
  19. try:
  20. import util
  21. except:
  22. sys.path.insert(0,'..')
  23. import util
  24. headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
  25. class Source(SourceBase):
  26. def __init__(self,country="",cfg_path=None):
  27. self.name = "filmix"
  28. self.title = "filmix.me"
  29. self.img = "http://cs5324.vk.me/g33668783/a_903fcc63.jpg"
  30. self.desc = "filmix.me satura skatīšanās"
  31. self.country=country
  32. self.headers = headers2dict("""
  33. Host: filmix.me
  34. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  35. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  36. Accept-Language: en-US,en;q=0.5
  37. """)
  38. self.headers2 = headers2dict("""
  39. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  40. X-Requested-With: XMLHttpRequest
  41. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  42. """)
  43. self.url = "https://filmix.me/"
  44. #self.login()
  45. def login(self,user="",password=""):
  46. return True
  47. def get_content(self, data):
  48. print "[filmix] get_content:", data
  49. source, data, path, plist, clist, params, qs = self.parse_data(data)
  50. content=[]
  51. content.append(("..return", "back","","Return back"))
  52. if clist=="home":
  53. content.extend([
  54. ("Search", "filmix::search/{0}","","Search"),
  55. ("Movies", "filmix::movies","","Movies"),
  56. ("Series", "filmix::series","","TV Series"),
  57. ("Cartoons", "filmix::cartoons","","Cartoons"),
  58. ])
  59. return content
  60. #elif clist=="search":
  61. # TODO
  62. #return content
  63. elif data in ("movies","series","cartoons"):
  64. r = self.call("")
  65. r = r.decode("cp1251").encode("utf8")
  66. if data == "movies":
  67. sname = "Фильмы"
  68. elif data=="series":
  69. sname = "Сериалы"
  70. else:
  71. sname = "Мультфильмы"
  72. # <span class="menu-title">Фильмы</span>
  73. m = re.search('<span class="menu-title">%s</span>(.+?)<li>\s+?<span'%sname, r, re.DOTALL|re.UNICODE)
  74. if not m: return content
  75. r2 = m.group(1)
  76. result = re.findall(r'<a .*?href="https://filmix\.me/([^"]+)".*?>([^<]+)</', r2, re.DOTALL)
  77. for item in result:
  78. if "catalog" in item[0]: continue
  79. title = item[1]
  80. data2 = item[0]
  81. img = self.img
  82. desc = title
  83. content.append((title,self.name+"::"+data2,img,desc))
  84. return content
  85. ## Seriāls
  86. elif clist=="play":
  87. r = self.call(path)
  88. r = r.decode("cp1251").encode("utf8")
  89. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  90. m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
  91. img = m.group(1) if m else self.img
  92. m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
  93. duration = "(%s)"%m.group(1) if m else ""
  94. m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
  95. desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
  96. vid = plist[-1]
  97. m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE)
  98. key = m.group(3) if m else ""
  99. js = self.get_movie_info(vid,key)
  100. translations = js["message"]["translations"]["flash"]
  101. for pl in translations:
  102. if translations[pl].startswith("http"):
  103. continue
  104. pl_link = translations[pl]
  105. lang = pl.encode("utf8")
  106. break
  107. else:
  108. raise Exception("No episodes list found!")
  109. #pl_link = js["message"]["translations"]["flash"].values()[0]
  110. # TODO process several players, currently taking the first
  111. if not pl_link.startswith("http"):
  112. pl_link = self.decode_uppod_text(pl_link)
  113. js = self._http_request(pl_link)
  114. js = self.decode_uppod_text(js)
  115. js = json.loads(js)
  116. if "s" in qs:
  117. s = int(qs["s"])
  118. for i,ep in enumerate(js["playlist"][s-1]["playlist"]):
  119. title = title0+" - "+js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
  120. serie = js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
  121. data2 = data+"&e=%s"%(i+1)
  122. desc = serie +"\n"+desc0
  123. content.append((title,self.name+"::"+data2,img,desc))
  124. else:
  125. for i,ep in enumerate(js["playlist"]):
  126. title = title0 +" - "+js["playlist"][i]["comment"].encode("utf8")
  127. serie = js["playlist"][i]["comment"].encode("utf8")
  128. data2 = data+"?s=%s"%(i+1)
  129. desc = serie +"\n"+desc0
  130. content.append((title,self.name+"::"+data2,img,desc))
  131. return content
  132. #r = self._http_request(url)
  133. ### saraksts ###
  134. else:
  135. r = self.call(data)
  136. r = r.decode("cp1251").encode("utf8")
  137. for r2 in re.findall('<article class="shortstory line".+?</article>', r, re.DOTALL):
  138. m = re.search(r'<a href="https://filmix\.me/play/(\d+)" class="watch icon-play">', r2, re.DOTALL)
  139. if not m: continue
  140. vid = m.group(1)
  141. data2 = "play/%s"%vid
  142. #title = re.search('itemprop="name">([^<]+)</div>', r2, re.DOTALL).group(1)
  143. title = re.search('itemprop="name" content="([^"]+)"', r2, re.DOTALL).group(1)
  144. m = re.search('itemprop="alternativeHeadline" content="([^"]+)"', r2, re.DOTALL)
  145. if m:
  146. title = title + "/"+m.group(1)
  147. m = re.search('<img src="([^"]+.jpg)"', r2, re.DOTALL)
  148. img = m.group(1) if m else self.img
  149. m = re.search(r'<a itemprop="copyrightYear".+?>(\d+)<', r2, re.DOTALL)
  150. if m:
  151. year = m.group(1) if m else ""
  152. title = "%s (%s)"%(title,year)
  153. title = util.unescape(title)
  154. genre = re.findall('<a itemprop="genre"[^>]+?">([^<]+)</a>', r2, re.DOTALL)
  155. genre = ",".join(genre)
  156. m = re.search('<p itemprop="description">([^<]+)</p>', r2, re.DOTALL)
  157. desc0 = util.unescape(m.group(1)) if m else ""
  158. m = re.search('<div class="quality">([^<]+)</div>', r2, re.DOTALL)
  159. quality = m.group(1) if m else ""
  160. actors = re.findall('itemprop="actor">([^<]+)<', r2, re.DOTALL)
  161. actors = ",".join(actors)
  162. desc="%s\n%s\n%s\n%s\n%s"%(title,genre,desc0,actors,quality)
  163. content.append((title,self.name+"::"+data2,img,desc))
  164. if '<div class="navigation">' in r:
  165. m = re.search(r'href="https://filmix\.me/([^"]+)" class="next icon-arowRight btn-tooltip"', r, re.DOTALL)
  166. if m:
  167. data2 = m.group(1)
  168. else:
  169. m = re.search("/page/(\d)+",data)
  170. if m:
  171. page = int(m.group(1))+1
  172. data2 = re.sub("/page/(\d)+", "/page/%s"%page, data)
  173. else:
  174. data2 = data + "/page/2"
  175. content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
  176. return content
  177. def is_video(self,data):
  178. source,data,path,plist,clist,params,qs = self.parse_data(data)
  179. if clist == "play" and "s=" in data and "e=" in data:
  180. return True
  181. elif clist=="play" and not params:
  182. r = self.call(path)
  183. #r = r.decode("cp1251").encode("utf8")
  184. m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
  185. if not m:
  186. raise Exception("Can not find video link")
  187. #return False
  188. video_link = m.group(1)
  189. if video_link=='{video-link}':
  190. return False
  191. else:
  192. return True
  193. else:
  194. return False
  195. def get_streams(self, data):
  196. print "[filmix] get_streams:", data
  197. source,data,path,plist,clist,params,qs = self.parse_data(data)
  198. r = self.call(path)
  199. if not r:
  200. return []
  201. streams = []
  202. r = r.decode("cp1251").encode("utf8")
  203. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  204. m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
  205. img = m.group(1) if m else self.img
  206. m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
  207. duration = "(%s)"%m.group(1) if m else ""
  208. m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
  209. desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
  210. m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
  211. if not m:
  212. raise Exception("Can not find video link")
  213. #return []
  214. video_link = m.group(1)
  215. series = True if video_link == '{video-link}' else False
  216. vid = plist[1]
  217. m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE)
  218. key = m.group(3) if m else ""
  219. js = self.get_movie_info(vid,key)
  220. translations = js["message"]["translations"]["flash"]
  221. for pl in translations:
  222. if translations[pl].startswith("http"):
  223. continue
  224. pl_link = translations[pl]
  225. lang = pl.encode("utf8")
  226. break
  227. else:
  228. raise Exception("No episodes list found!")
  229. if not pl_link.startswith("http"):
  230. pl_link = self.decode_uppod_text(pl_link)
  231. if not series : # Filma
  232. url0 = pl_link
  233. streams2 = self.get_streams2(url0)
  234. for st in streams2:
  235. stream = util.item()
  236. stream["url"]=st[1]
  237. stream["lang"]=lang
  238. stream["quality"]=st[0]
  239. stream["name"]= title
  240. stream["desc"]=desc
  241. streams.append(stream)
  242. return streams
  243. else: # Seriāls
  244. #pl_link = video_link
  245. js = self._http_request(pl_link)
  246. js = self.decode_uppod_text(js)
  247. js = json.loads(js)
  248. if "s" in qs and "e" in qs:
  249. s = int(qs["s"])
  250. e = int(qs["e"])
  251. serie = js["playlist"][s-1]["playlist"][e-1]["comment"].encode("utf8")
  252. title = title0+" - "+ serie
  253. url0 = js["playlist"][s-1]["playlist"][e-1]["file"].encode("utf8")
  254. streams2 = self.get_streams2(url0)
  255. for st in streams2:
  256. stream = util.item()
  257. stream["url"]=st[1]
  258. stream["lang"]=lang
  259. stream["quality"]=st[0]
  260. stream["name"]= title
  261. stream["desc"]=desc
  262. streams.append(stream)
  263. return streams
  264. def call(self, data,params=None,headers=None,lang=""):
  265. if not headers: headers = self.headers
  266. url = self.url+data
  267. result = self._http_request(url,params,headers=headers)
  268. return result
  269. def get_movie_info(self,vid,key=""):
  270. headers = headers2dict("""
  271. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0
  272. Accept: application/json, text/javascript, */*; q=0.01
  273. Accept-Language: en-US,en;q=0.5
  274. Content-Type: application/x-www-form-urlencoded; charset=UTF-8
  275. X-Requested-With: XMLHttpRequest
  276. Referer: https://filmix.me/play/%s
  277. Cookie: ad_win12=1;
  278. """%vid )
  279. post_data = {"post_id":vid,"key=":key}
  280. r = util.post("https://filmix.me/api/movies/player_data", data=post_data, headers = headers)
  281. if not r:
  282. raise Exception("Can not get movie info")
  283. #return []
  284. js = json.loads(r)
  285. return js
  286. def decode_uppod_text(self, text):
  287. Client_codec_a = ["l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "="]
  288. Client_codec_b = ["w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h"]
  289. text = text.replace("\n", "").strip()
  290. for i in range(len(Client_codec_a)):
  291. char1 = Client_codec_b[i]
  292. char2 = Client_codec_a[i]
  293. text = text.replace(char1, "___")
  294. text = text.replace(char2, char1)
  295. text = text.replace("___", char2)
  296. result = base64.b64decode(text)
  297. print result
  298. return result
  299. def get_streams2(self,url0):
  300. m = re.search("\[([\d\w,]+)\]",url0)
  301. if not m:
  302. return [("?",url0)]
  303. res = m.group(1)
  304. streams=[]
  305. for res in res.split(","):
  306. if not res: continue
  307. if res in ["1080p"]: continue #TODO fullhd only in PRO+ version
  308. url=re.sub("\[[\d\w,]+\]",res,url0)
  309. streams.append((res,url))
  310. return streams
  311. if __name__ == "__main__":
  312. c = Source()
  313. #s = "ZnVuY3Rpb24gc2VuZE1lc3NhZ2U2MDc3ODkoZSl7dmFyIGg9bWdfd3M2MDc3ODkub25tZXNzYWdlOyBtZ193czYwNzc4OS5yZWFkeVN0YXRlPT1tZ193czYwNzc4OS5DTE9TRUQmJihtZ193czYwNzc4OT1uZXcgV2ViU29ja2V0KG1nX3dzNjA3Nzg5X2xvY2F0aW9uKSksbWdfd3M2MDc3ODkub25tZXNzYWdlPWgsd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkobWdfd3M2MDc3ODksZnVuY3Rpb24oKXttZ193czYwNzc4OS5zZW5kKGUpfSl9ZnVuY3Rpb24gd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkoZSx0KXtzZXRUaW1lb3V0KGZ1bmN0aW9uKCl7cmV0dXJuIDE9PT1lLnJlYWR5U3RhdGU/dm9pZChudWxsIT10JiZ0KCkpOnZvaWQgd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkoZSx0KX0sNSl9OyB2YXIgbWdfd3M2MDc3ODlfbG9jYXRpb24gPSAid3NzOi8vd3NwLm1hcmtldGdpZC5jb20vd3MiOyBtZ193czYwNzc4OSA9IG5ldyBXZWJTb2NrZXQobWdfd3M2MDc3ODlfbG9jYXRpb24pLCBtZ193czYwNzc4OS5vbm1lc3NhZ2UgPSBmdW5jdGlvbiAodCkge3Bvc3RNZXNzYWdlKHQuZGF0YSk7fSwgb25tZXNzYWdlID0gZnVuY3Rpb24oZSl7c2VuZE1lc3NhZ2U2MDc3ODkoZS5kYXRhKX0="
  314. #txt = c.decode_uppod_text(s)
  315. if len(sys.argv)>1:
  316. data= sys.argv[1]
  317. else:
  318. data = "home"
  319. content = c.get_content(data)
  320. for item in content:
  321. print item
  322. #cat = api.get_categories(country)
  323. #chan = api.get_channels("lv")
  324. #prog = api.get_programs(channel=6400)
  325. #prog = api.get_programs(category=55)
  326. #seas = api.get_seasons(program=6453)
  327. #str = api.get_streams(660243)
  328. #res = api.get_videos(802)
  329. #formats = api.getAllFormats()
  330. #det = api.detailed("1516")
  331. #vid = api.getVideos("13170")
  332. pass