Enigma2 plugin to to play various online streams (mostly Latvian).

filmix.py 18KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. #!/usr/bin/env python
  2. # coding=utf8
  3. #
  4. # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
  5. # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
  6. # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
  7. #
  8. try:
  9. import json
  10. except:
  11. import simplejson as json
  12. import urllib2, urllib
  13. import datetime, re, sys,os
  14. import ConfigParser
  15. from SourceBase import SourceBase
  16. import base64
  17. from collections import OrderedDict
  18. import sys
  19. import ssl
  20. if "_create_unverified_context" in dir(ssl):
  21. ssl._create_default_https_context = ssl._create_unverified_context
  22. try:
  23. import util
  24. except:
  25. sys.path.insert(0,'..')
  26. import util
  27. headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
  28. class Source(SourceBase):
  29. def __init__(self,country="",cfg_path=None):
  30. self.name = "filmix"
  31. self.title = "filmix.me"
  32. self.img = "http://cs5324.vk.me/g33668783/a_903fcc63.jpg"
  33. self.desc = "filmix.me satura skatīšanās"
  34. self.country=country
  35. self.headers = headers2dict("""
  36. Host: filmix.me
  37. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  38. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  39. Accept-Language: en-US,en;q=0.5
  40. """)
  41. self.headers2 = headers2dict("""
  42. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  43. X-Requested-With: XMLHttpRequest
  44. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  45. """)
  46. self.url = "https://filmix.me/"
  47. #self.login()
  48. def login(self,user="",password=""):
  49. return True
  50. def get_content(self, data):
  51. print "[filmix] get_content:", data
  52. source, data, path, plist, clist, params, qs = self.parse_data(data)
  53. content=[]
  54. content.append(("..return", "back","","Return back"))
  55. if clist=="home":
  56. content.extend([
  57. ("Search", "filmix::search/{0}","","Search"),
  58. ("Movies", "filmix::movies","","Movies"),
  59. ("Series", "filmix::series","","TV Series"),
  60. ("Cartoons", "filmix::cartoons","","Cartoons"),
  61. ])
  62. return content
  63. #elif clist=="search":
  64. # TODO
  65. #return content
  66. elif data in ("movies","series","cartoons"):
  67. r = self.call("")
  68. r = r.decode("cp1251").encode("utf8")
  69. if data == "movies":
  70. sname = "Фильмы"
  71. elif data=="series":
  72. sname = "Сериалы"
  73. else:
  74. sname = "Мультфильмы"
  75. # <span class="menu-title">Фильмы</span>
  76. m = re.search('<span class="menu-title">%s</span>(.+?)<li>\s+?<span'%sname, r, re.DOTALL|re.UNICODE)
  77. if not m: return content
  78. r2 = m.group(1)
  79. result = re.findall(r'<a .*?href="https://filmix\.me/([^"]+)".*?>([^<]+)</', r2, re.DOTALL)
  80. for item in result:
  81. if "catalog" in item[0]: continue
  82. title = item[1]
  83. data2 = item[0]
  84. img = self.img
  85. desc = title
  86. content.append((title,self.name+"::"+data2,img,desc))
  87. return content
  88. ## Seriāls
  89. elif clist=="play":
  90. r = self.call(path)
  91. r = r.decode("cp1251").encode("utf8")
  92. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  93. m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
  94. img = m.group(1) if m else self.img
  95. m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
  96. duration = "(%s)"%m.group(1) if m else ""
  97. m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
  98. desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
  99. vid = plist[-1]
  100. m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE)
  101. key = m.group(3) if m else ""
  102. js = self.get_movie_info(vid,key)
  103. translations = js["message"]["translations"]["html5"]
  104. for pl in translations:
  105. if translations[pl].startswith("http"):
  106. continue
  107. pl_link = translations[pl]
  108. lang = pl.encode("utf8")
  109. break
  110. else:
  111. raise Exception("No episodes list found!")
  112. #pl_link = js["message"]["translations"]["flash"].values()[0]
  113. # TODO process several players, currently taking the first
  114. #TODO - kļuda, vairs nesradā
  115. if not pl_link.startswith("http"):
  116. pl_link = self.decode_direct_media_url(pl_link)
  117. js = self._http_request(pl_link)
  118. js = self.decode_direct_media_url(js)
  119. js = json.loads(js)
  120. if "s" in qs:
  121. s = int(qs["s"])
  122. for i,ep in enumerate(js["playlist"][s-1]["playlist"]):
  123. title = title0+" - "+js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
  124. serie = js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
  125. data2 = data+"&e=%s"%(i+1)
  126. desc = serie +"\n"+desc0
  127. content.append((title,self.name+"::"+data2,img,desc))
  128. else:
  129. for i,ep in enumerate(js["playlist"]):
  130. title = title0 +" - "+js["playlist"][i]["comment"].encode("utf8")
  131. serie = js["playlist"][i]["comment"].encode("utf8")
  132. if "file" in ep and ep["file"]:
  133. data2 = data+"?e=%s"%(i+1)
  134. else:
  135. data2 = data+"?s=%s"%(i+1)
  136. desc = serie +"\n"+desc0
  137. content.append((title,self.name+"::"+data2,img,desc))
  138. return content
  139. #r = self._http_request(url)
  140. ### saraksts ###
  141. else:
  142. r = self.call(data)
  143. r = r.decode("cp1251").encode("utf8")
  144. for r2 in re.findall('<article class="shortstory line".+?</article>', r, re.DOTALL):
  145. #m2 = re.search(r'<a class="watch icon-play" itemprop="url" href="([^"]+)"', r2, re.DOTALL)
  146. #<a class="watch icon-play" itemprop="url" href="https://filmix.me/dramy/110957-stolik-19-2017.html"
  147. #m = re.search(r'<a href="https://filmix\.me/play/(\d+)" class="watch icon-play">', r2, re.DOTALL)
  148. m = re.search(r'<a class="watch icon-play" itemprop="url" href="https://filmix.me/\w+/(\d+)-', r2, re.DOTALL)
  149. if not m: continue
  150. vid = m.group(1)
  151. data2 = "play/%s"%vid
  152. #title = re.search('itemprop="name">([^<]+)</div>', r2, re.DOTALL).group(1)
  153. title = re.search('itemprop="name" content="([^"]+)"', r2, re.DOTALL).group(1)
  154. m = re.search('itemprop="alternativeHeadline" content="([^"]+)"', r2, re.DOTALL)
  155. if m:
  156. title = title + "/"+m.group(1)
  157. m = re.search('<img src="([^"]+.jpg)"', r2, re.DOTALL)
  158. img = m.group(1) if m else self.img
  159. m = re.search(r'<a itemprop="copyrightYear".+?>(\d+)<', r2, re.DOTALL)
  160. if m:
  161. year = m.group(1) if m else ""
  162. title = "%s (%s)"%(title,year)
  163. title = util.unescape(title)
  164. genre = re.findall('<a itemprop="genre"[^>]+?">([^<]+)</a>', r2, re.DOTALL)
  165. genre = ",".join(genre)
  166. m = re.search('<p itemprop="description">([^<]+)</p>', r2, re.DOTALL)
  167. desc0 = util.unescape(m.group(1)) if m else ""
  168. m = re.search('<div class="quality">([^<]+)</div>', r2, re.DOTALL)
  169. quality = m.group(1) if m else ""
  170. actors = re.findall('itemprop="actor">([^<]+)<', r2, re.DOTALL)
  171. actors = ",".join(actors)
  172. desc="%s\n%s\n%s\n%s\n%s"%(title,genre,desc0,actors,quality)
  173. content.append((title,self.name+"::"+data2,img,desc))
  174. if '<div class="navigation">' in r:
  175. m = re.search(r'href="https://filmix\.me/([^"]+)" class="next icon-arowRight btn-tooltip"', r, re.DOTALL)
  176. if m:
  177. data2 = m.group(1)
  178. else:
  179. m = re.search("/page/(\d)+",data)
  180. if m:
  181. page = int(m.group(1))+1
  182. data2 = re.sub("/page/(\d)+", "/page/%s"%page, data)
  183. else:
  184. data2 = data + "/page/2"
  185. content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
  186. return content
  187. def is_video(self,data):
  188. source,data,path,plist,clist,params,qs = self.parse_data(data)
  189. if clist == "play" and "e=" in data:
  190. return True
  191. elif clist=="play" and not params:
  192. r = self.call(path)
  193. #r = r.decode("cp1251").encode("utf8")
  194. #m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
  195. #if not m:
  196. if u"Фильм <a href=" in r.decode("cp1251"):
  197. return True
  198. else:
  199. return False
  200. else:
  201. return False
  202. def get_streams(self, data):
  203. print "[filmix] get_streams:", data
  204. source,data,path,plist,clist,params,qs = self.parse_data(data)
  205. r = self.call(path)
  206. if not r:
  207. return []
  208. streams = []
  209. r = r.decode("cp1251").encode("utf8")
  210. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  211. m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
  212. img = m.group(1) if m else self.img
  213. m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
  214. duration = "(%s)"%m.group(1) if m else ""
  215. m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
  216. desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
  217. m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
  218. if not m:
  219. raise Exception("Can not find video link")
  220. #return []
  221. video_link = m.group(1)
  222. series = False if u"Фильм <a href=" in r.decode("utf8") else True
  223. vid = plist[1]
  224. m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE)
  225. key = m.group(3) if m else ""
  226. js = self.get_movie_info(vid,key)
  227. translations = js["message"]["translations"]["html5"]
  228. for pl in translations:
  229. if translations[pl].startswith("http"):
  230. continue
  231. pl_link = translations[pl]
  232. lang = pl.encode("utf8")
  233. break
  234. else:
  235. raise Exception("No episodes list found!")
  236. if not pl_link.startswith("http"):
  237. pl_link = self.decode_direct_media_url(pl_link)
  238. if not series : # Filma
  239. url0 = pl_link
  240. streams2 = self.get_streams2(url0)
  241. for st in streams2:
  242. stream = util.item()
  243. stream["url"]=st[1]
  244. stream["lang"]=lang
  245. stream["quality"]=st[0]
  246. stream["name"]= title
  247. stream["desc"]=desc
  248. streams.append(stream)
  249. return streams
  250. else: # Seriāls
  251. #pl_link = video_link
  252. js = self._http_request(pl_link)
  253. js = self.decode_direct_media_url(js)
  254. js = json.loads(js)
  255. if "e" in qs:
  256. if "s" in qs:
  257. s = int(qs["s"])
  258. else:
  259. s = None
  260. e = int(qs["e"])
  261. if s: # sezona + epizode
  262. serie = js["playlist"][s-1]["playlist"][e-1]["comment"].encode("utf8")
  263. title = title0+" - "+ serie
  264. url0 = js["playlist"][s-1]["playlist"][e-1]["file"].encode("utf8")
  265. else: # tikai epizode, nav sezonas
  266. title = title0 +" - "+js["playlist"][e-1]["comment"].encode("utf8")
  267. serie = js["playlist"][e-1]["comment"].encode("utf8")
  268. url0 = js["playlist"][e-1]["file"].encode("utf8")
  269. streams2 = self.get_streams2(url0)
  270. for st in streams2:
  271. stream = util.item()
  272. stream["url"]=st[1]
  273. stream["lang"]=lang
  274. stream["quality"]=st[0]
  275. stream["name"]= title
  276. stream["desc"]=desc
  277. streams.append(stream)
  278. return streams
  279. def call(self, data,params=None,headers=None,lang=""):
  280. if not headers: headers = self.headers
  281. url = self.url+data
  282. result = self._http_request(url,params,headers=headers)
  283. return result
  284. def get_movie_info(self,vid,key=""):
  285. headers = headers2dict("""
  286. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0
  287. Accept: application/json, text/javascript, */*; q=0.01
  288. Accept-Language: en-US,en;q=0.5
  289. Content-Type: application/x-www-form-urlencoded; charset=UTF-8
  290. X-Requested-With: XMLHttpRequest
  291. Referer: https://filmix.me/play/%s
  292. Cookie: ad_win12=1;
  293. """%vid )
  294. post_data = {"post_id":vid,"key=":key}
  295. r = util.post("https://filmix.me/api/movies/player_data", data=post_data, headers = headers)
  296. if not r:
  297. raise Exception("Can not get movie info")
  298. #return []
  299. js = json.loads(r)
  300. return js
  301. def decode_base64(self, encoded_url):
  302. codec_a = ("l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "=")
  303. codec_b = ("w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h")
  304. i = 0
  305. for a in codec_a:
  306. b = codec_b[i]
  307. i += 1
  308. encoded_url = encoded_url.replace(a, '___')
  309. encoded_url = encoded_url.replace(b, a)
  310. encoded_url = encoded_url.replace('___', b)
  311. return base64.b64decode(encoded_url)
  312. def decode_unicode(self, encoded_url):
  313. from itertools import izip_longest
  314. def grouper(n, iterable, fillvalue=None):
  315. "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
  316. args = [iter(iterable)] * n
  317. return izip_longest(fillvalue=fillvalue, *args)
  318. _ = (encoded_url[1:] if encoded_url.find('#') != -1 else encoded_url)
  319. tokens = map(lambda items: '\u0'+''.join(items), grouper(3, _))
  320. return ''.join(tokens).decode('unicode_escape')
  321. def decode_direct_media_url(self, encoded_url, checkhttp=False):
  322. if(checkhttp == True and (encoded_url.find('http://') != -1 or encoded_url.find('https://') != -1)):
  323. return False
  324. try:
  325. if encoded_url.find('#') != -1:
  326. return self.decode_unicode(encoded_url)
  327. else:
  328. return self.decode_base64(encoded_url)
  329. except:
  330. return False
  331. def decode_uppod_text(self, text):
  332. Client_codec_a = ["l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "="]
  333. Client_codec_b = ["w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h"]
  334. text = text.replace("\n", "").strip()
  335. for i in range(len(Client_codec_a)):
  336. char1 = Client_codec_b[i]
  337. char2 = Client_codec_a[i]
  338. text = text.replace(char1, "___")
  339. text = text.replace(char2, char1)
  340. text = text.replace("___", char2)
  341. result = base64.b64decode(text)
  342. print result
  343. return result
  344. def get_streams2(self,url0):
  345. m = re.search("\[([\d\w,]+)\]",url0)
  346. if not m:
  347. return [("?",url0)]
  348. res = m.group(1)
  349. streams=[]
  350. for res in res.split(","):
  351. if not res: continue
  352. if res in ["1080p"]: continue #TODO fullhd only in PRO+ version
  353. url=re.sub("\[[\d\w,]+\]",res,url0)
  354. streams.append((res,url))
  355. return streams
  356. if __name__ == "__main__":
  357. c = Source()
  358. #s = "ZnVuY3Rpb24gc2VuZE1lc3NhZ2U2MDc3ODkoZSl7dmFyIGg9bWdfd3M2MDc3ODkub25tZXNzYWdlOyBtZ193czYwNzc4OS5yZWFkeVN0YXRlPT1tZ193czYwNzc4OS5DTE9TRUQmJihtZ193czYwNzc4OT1uZXcgV2ViU29ja2V0KG1nX3dzNjA3Nzg5X2xvY2F0aW9uKSksbWdfd3M2MDc3ODkub25tZXNzYWdlPWgsd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkobWdfd3M2MDc3ODksZnVuY3Rpb24oKXttZ193czYwNzc4OS5zZW5kKGUpfSl9ZnVuY3Rpb24gd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkoZSx0KXtzZXRUaW1lb3V0KGZ1bmN0aW9uKCl7cmV0dXJuIDE9PT1lLnJlYWR5U3RhdGU/dm9pZChudWxsIT10JiZ0KCkpOnZvaWQgd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkoZSx0KX0sNSl9OyB2YXIgbWdfd3M2MDc3ODlfbG9jYXRpb24gPSAid3NzOi8vd3NwLm1hcmtldGdpZC5jb20vd3MiOyBtZ193czYwNzc4OSA9IG5ldyBXZWJTb2NrZXQobWdfd3M2MDc3ODlfbG9jYXRpb24pLCBtZ193czYwNzc4OS5vbm1lc3NhZ2UgPSBmdW5jdGlvbiAodCkge3Bvc3RNZXNzYWdlKHQuZGF0YSk7fSwgb25tZXNzYWdlID0gZnVuY3Rpb24oZSl7c2VuZE1lc3NhZ2U2MDc3ODkoZS5kYXRhKX0="
  359. #txt = c.decode_uppod_text(s)
  360. if len(sys.argv)>1:
  361. data= sys.argv[1]
  362. else:
  363. data = "home"
  364. content = c.get_content(data)
  365. for item in content:
  366. print item
  367. #cat = api.get_categories(country)
  368. #chan = api.get_channels("lv")
  369. #prog = api.get_programs(channel=6400)
  370. #prog = api.get_programs(category=55)
  371. #seas = api.get_seasons(program=6453)
  372. #str = api.get_streams(660243)
  373. #res = api.get_videos(802)
  374. #formats = api.getAllFormats()
  375. #det = api.detailed("1516")
  376. #vid = api.getVideos("13170")
  377. pass