Enigma2 plugin to to play various online streams (mostly Latvian).

filmix.py 16KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. #!/usr/bin/env python
  2. # coding=utf8
  3. #
  4. # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
  5. # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
  6. # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
  7. #
  8. try:
  9. import json
  10. except:
  11. import simplejson as json
  12. import urllib2, urllib
  13. import datetime, re, sys,os
  14. import ConfigParser
  15. from SourceBase import SourceBase
  16. import base64
  17. from collections import OrderedDict
  18. import sys
  19. import ssl
  20. ssl._create_default_https_context = ssl._create_unverified_context
  21. try:
  22. import util
  23. except:
  24. sys.path.insert(0,'..')
  25. import util
  26. headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
  27. class Source(SourceBase):
  28. def __init__(self,country="",cfg_path=None):
  29. self.name = "filmix"
  30. self.title = "filmix.me"
  31. self.img = "http://cs5324.vk.me/g33668783/a_903fcc63.jpg"
  32. self.desc = "filmix.me satura skatīšanās"
  33. self.country=country
  34. self.headers = headers2dict("""
  35. Host: filmix.me
  36. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  37. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  38. Accept-Language: en-US,en;q=0.5
  39. """)
  40. self.headers2 = headers2dict("""
  41. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  42. X-Requested-With: XMLHttpRequest
  43. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  44. """)
  45. self.url = "https://filmix.me/"
  46. #self.login()
  47. def login(self,user="",password=""):
  48. return True
  49. def get_content(self, data):
  50. print "[filmix] get_content:", data
  51. source, data, path, plist, clist, params, qs = self.parse_data(data)
  52. content=[]
  53. content.append(("..return", "back","","Return back"))
  54. if clist=="home":
  55. content.extend([
  56. ("Search", "filmix::search/{0}","","Search"),
  57. ("Movies", "filmix::movies","","Movies"),
  58. ("Series", "filmix::series","","TV Series"),
  59. ("Cartoons", "filmix::cartoons","","Cartoons"),
  60. ])
  61. return content
  62. #elif clist=="search":
  63. # TODO
  64. #return content
  65. elif data in ("movies","series","cartoons"):
  66. r = self.call("")
  67. r = r.decode("cp1251").encode("utf8")
  68. if data == "movies":
  69. sname = "Фильмы"
  70. elif data=="series":
  71. sname = "Сериалы"
  72. else:
  73. sname = "Мультфильмы"
  74. # <span class="menu-title">Фильмы</span>
  75. m = re.search('<span class="menu-title">%s</span>(.+?)<li>\s+?<span'%sname, r, re.DOTALL|re.UNICODE)
  76. if not m: return content
  77. r2 = m.group(1)
  78. result = re.findall(r'<a .*?href="https://filmix\.me/([^"]+)".*?>([^<]+)</', r2, re.DOTALL)
  79. for item in result:
  80. if "catalog" in item[0]: continue
  81. title = item[1]
  82. data2 = item[0]
  83. img = self.img
  84. desc = title
  85. content.append((title,self.name+"::"+data2,img,desc))
  86. return content
  87. ## Seriāls
  88. elif clist=="play":
  89. r = self.call(path)
  90. r = r.decode("cp1251").encode("utf8")
  91. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  92. m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
  93. img = m.group(1) if m else self.img
  94. m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
  95. duration = "(%s)"%m.group(1) if m else ""
  96. m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
  97. desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
  98. vid = plist[-1]
  99. m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE)
  100. key = m.group(3) if m else ""
  101. js = self.get_movie_info(vid,key)
  102. translations = js["message"]["translations"]["flash"]
  103. for pl in translations:
  104. if translations[pl].startswith("http"):
  105. continue
  106. pl_link = translations[pl]
  107. lang = pl.encode("utf8")
  108. break
  109. else:
  110. raise Exception("No episodes list found!")
  111. #pl_link = js["message"]["translations"]["flash"].values()[0]
  112. # TODO process several players, currently taking the first
  113. if not pl_link.startswith("http"):
  114. pl_link = self.decode_uppod_text(pl_link)
  115. js = self._http_request(pl_link)
  116. js = self.decode_uppod_text(js)
  117. js = json.loads(js)
  118. if "s" in qs:
  119. s = int(qs["s"])
  120. for i,ep in enumerate(js["playlist"][s-1]["playlist"]):
  121. title = title0+" - "+js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
  122. serie = js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
  123. data2 = data+"&e=%s"%(i+1)
  124. desc = serie +"\n"+desc0
  125. content.append((title,self.name+"::"+data2,img,desc))
  126. else:
  127. for i,ep in enumerate(js["playlist"]):
  128. title = title0 +" - "+js["playlist"][i]["comment"].encode("utf8")
  129. serie = js["playlist"][i]["comment"].encode("utf8")
  130. if "file" in ep and ep["file"]:
  131. data2 = data+"?e=%s"%(i+1)
  132. else:
  133. data2 = data+"?s=%s"%(i+1)
  134. desc = serie +"\n"+desc0
  135. content.append((title,self.name+"::"+data2,img,desc))
  136. return content
  137. #r = self._http_request(url)
  138. ### saraksts ###
  139. else:
  140. r = self.call(data)
  141. r = r.decode("cp1251").encode("utf8")
  142. for r2 in re.findall('<article class="shortstory line".+?</article>', r, re.DOTALL):
  143. m = re.search(r'<a href="https://filmix\.me/play/(\d+)" class="watch icon-play">', r2, re.DOTALL)
  144. if not m: continue
  145. vid = m.group(1)
  146. data2 = "play/%s"%vid
  147. #title = re.search('itemprop="name">([^<]+)</div>', r2, re.DOTALL).group(1)
  148. title = re.search('itemprop="name" content="([^"]+)"', r2, re.DOTALL).group(1)
  149. m = re.search('itemprop="alternativeHeadline" content="([^"]+)"', r2, re.DOTALL)
  150. if m:
  151. title = title + "/"+m.group(1)
  152. m = re.search('<img src="([^"]+.jpg)"', r2, re.DOTALL)
  153. img = m.group(1) if m else self.img
  154. m = re.search(r'<a itemprop="copyrightYear".+?>(\d+)<', r2, re.DOTALL)
  155. if m:
  156. year = m.group(1) if m else ""
  157. title = "%s (%s)"%(title,year)
  158. title = util.unescape(title)
  159. genre = re.findall('<a itemprop="genre"[^>]+?">([^<]+)</a>', r2, re.DOTALL)
  160. genre = ",".join(genre)
  161. m = re.search('<p itemprop="description">([^<]+)</p>', r2, re.DOTALL)
  162. desc0 = util.unescape(m.group(1)) if m else ""
  163. m = re.search('<div class="quality">([^<]+)</div>', r2, re.DOTALL)
  164. quality = m.group(1) if m else ""
  165. actors = re.findall('itemprop="actor">([^<]+)<', r2, re.DOTALL)
  166. actors = ",".join(actors)
  167. desc="%s\n%s\n%s\n%s\n%s"%(title,genre,desc0,actors,quality)
  168. content.append((title,self.name+"::"+data2,img,desc))
  169. if '<div class="navigation">' in r:
  170. m = re.search(r'href="https://filmix\.me/([^"]+)" class="next icon-arowRight btn-tooltip"', r, re.DOTALL)
  171. if m:
  172. data2 = m.group(1)
  173. else:
  174. m = re.search("/page/(\d)+",data)
  175. if m:
  176. page = int(m.group(1))+1
  177. data2 = re.sub("/page/(\d)+", "/page/%s"%page, data)
  178. else:
  179. data2 = data + "/page/2"
  180. content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
  181. return content
  182. def is_video(self,data):
  183. source,data,path,plist,clist,params,qs = self.parse_data(data)
  184. if clist == "play" and "e=" in data:
  185. return True
  186. elif clist=="play" and not params:
  187. r = self.call(path)
  188. #r = r.decode("cp1251").encode("utf8")
  189. m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
  190. if not m:
  191. raise Exception("Can not find video link")
  192. #return False
  193. video_link = m.group(1)
  194. if video_link=='{video-link}':
  195. return False
  196. else:
  197. return True
  198. else:
  199. return False
  200. def get_streams(self, data):
  201. print "[filmix] get_streams:", data
  202. source,data,path,plist,clist,params,qs = self.parse_data(data)
  203. r = self.call(path)
  204. if not r:
  205. return []
  206. streams = []
  207. r = r.decode("cp1251").encode("utf8")
  208. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  209. m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
  210. img = m.group(1) if m else self.img
  211. m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
  212. duration = "(%s)"%m.group(1) if m else ""
  213. m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
  214. desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
  215. m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
  216. if not m:
  217. raise Exception("Can not find video link")
  218. #return []
  219. video_link = m.group(1)
  220. series = True if video_link == '{video-link}' else False
  221. vid = plist[1]
  222. m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE)
  223. key = m.group(3) if m else ""
  224. js = self.get_movie_info(vid,key)
  225. translations = js["message"]["translations"]["flash"]
  226. for pl in translations:
  227. if translations[pl].startswith("http"):
  228. continue
  229. pl_link = translations[pl]
  230. lang = pl.encode("utf8")
  231. break
  232. else:
  233. raise Exception("No episodes list found!")
  234. if not pl_link.startswith("http"):
  235. pl_link = self.decode_uppod_text(pl_link)
  236. if not series : # Filma
  237. url0 = pl_link
  238. streams2 = self.get_streams2(url0)
  239. for st in streams2:
  240. stream = util.item()
  241. stream["url"]=st[1]
  242. stream["lang"]=lang
  243. stream["quality"]=st[0]
  244. stream["name"]= title
  245. stream["desc"]=desc
  246. streams.append(stream)
  247. return streams
  248. else: # Seriāls
  249. #pl_link = video_link
  250. js = self._http_request(pl_link)
  251. js = self.decode_uppod_text(js)
  252. js = json.loads(js)
  253. if "e" in qs:
  254. if "s" in qs:
  255. s = int(qs["s"])
  256. else:
  257. s = None
  258. e = int(qs["e"])
  259. if s: # sezona + epizode
  260. serie = js["playlist"][s-1]["playlist"][e-1]["comment"].encode("utf8")
  261. title = title0+" - "+ serie
  262. url0 = js["playlist"][s-1]["playlist"][e-1]["file"].encode("utf8")
  263. else: # tikai epizode, nav sezonas
  264. title = title0 +" - "+js["playlist"][e-1]["comment"].encode("utf8")
  265. serie = js["playlist"][e-1]["comment"].encode("utf8")
  266. url0 = js["playlist"][e-1]["file"].encode("utf8")
  267. streams2 = self.get_streams2(url0)
  268. for st in streams2:
  269. stream = util.item()
  270. stream["url"]=st[1]
  271. stream["lang"]=lang
  272. stream["quality"]=st[0]
  273. stream["name"]= title
  274. stream["desc"]=desc
  275. streams.append(stream)
  276. return streams
  277. def call(self, data,params=None,headers=None,lang=""):
  278. if not headers: headers = self.headers
  279. url = self.url+data
  280. result = self._http_request(url,params,headers=headers)
  281. return result
  282. def get_movie_info(self,vid,key=""):
  283. headers = headers2dict("""
  284. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0
  285. Accept: application/json, text/javascript, */*; q=0.01
  286. Accept-Language: en-US,en;q=0.5
  287. Content-Type: application/x-www-form-urlencoded; charset=UTF-8
  288. X-Requested-With: XMLHttpRequest
  289. Referer: https://filmix.me/play/%s
  290. Cookie: ad_win12=1;
  291. """%vid )
  292. post_data = {"post_id":vid,"key=":key}
  293. r = util.post("https://filmix.me/api/movies/player_data", data=post_data, headers = headers)
  294. if not r:
  295. raise Exception("Can not get movie info")
  296. #return []
  297. js = json.loads(r)
  298. return js
  299. def decode_uppod_text(self, text):
  300. Client_codec_a = ["l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "="]
  301. Client_codec_b = ["w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h"]
  302. text = text.replace("\n", "").strip()
  303. for i in range(len(Client_codec_a)):
  304. char1 = Client_codec_b[i]
  305. char2 = Client_codec_a[i]
  306. text = text.replace(char1, "___")
  307. text = text.replace(char2, char1)
  308. text = text.replace("___", char2)
  309. result = base64.b64decode(text)
  310. print result
  311. return result
  312. def get_streams2(self,url0):
  313. m = re.search("\[([\d\w,]+)\]",url0)
  314. if not m:
  315. return [("?",url0)]
  316. res = m.group(1)
  317. streams=[]
  318. for res in res.split(","):
  319. if not res: continue
  320. if res in ["1080p"]: continue #TODO fullhd only in PRO+ version
  321. url=re.sub("\[[\d\w,]+\]",res,url0)
  322. streams.append((res,url))
  323. return streams
  324. if __name__ == "__main__":
  325. c = Source()
  326. #s = "ZnVuY3Rpb24gc2VuZE1lc3NhZ2U2MDc3ODkoZSl7dmFyIGg9bWdfd3M2MDc3ODkub25tZXNzYWdlOyBtZ193czYwNzc4OS5yZWFkeVN0YXRlPT1tZ193czYwNzc4OS5DTE9TRUQmJihtZ193czYwNzc4OT1uZXcgV2ViU29ja2V0KG1nX3dzNjA3Nzg5X2xvY2F0aW9uKSksbWdfd3M2MDc3ODkub25tZXNzYWdlPWgsd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkobWdfd3M2MDc3ODksZnVuY3Rpb24oKXttZ193czYwNzc4OS5zZW5kKGUpfSl9ZnVuY3Rpb24gd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkoZSx0KXtzZXRUaW1lb3V0KGZ1bmN0aW9uKCl7cmV0dXJuIDE9PT1lLnJlYWR5U3RhdGU/dm9pZChudWxsIT10JiZ0KCkpOnZvaWQgd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkoZSx0KX0sNSl9OyB2YXIgbWdfd3M2MDc3ODlfbG9jYXRpb24gPSAid3NzOi8vd3NwLm1hcmtldGdpZC5jb20vd3MiOyBtZ193czYwNzc4OSA9IG5ldyBXZWJTb2NrZXQobWdfd3M2MDc3ODlfbG9jYXRpb24pLCBtZ193czYwNzc4OS5vbm1lc3NhZ2UgPSBmdW5jdGlvbiAodCkge3Bvc3RNZXNzYWdlKHQuZGF0YSk7fSwgb25tZXNzYWdlID0gZnVuY3Rpb24oZSl7c2VuZE1lc3NhZ2U2MDc3ODkoZS5kYXRhKX0="
  327. #txt = c.decode_uppod_text(s)
  328. if len(sys.argv)>1:
  329. data= sys.argv[1]
  330. else:
  331. data = "home"
  332. content = c.get_content(data)
  333. for item in content:
  334. print item
  335. #cat = api.get_categories(country)
  336. #chan = api.get_channels("lv")
  337. #prog = api.get_programs(channel=6400)
  338. #prog = api.get_programs(category=55)
  339. #seas = api.get_seasons(program=6453)
  340. #str = api.get_streams(660243)
  341. #res = api.get_videos(802)
  342. #formats = api.getAllFormats()
  343. #det = api.detailed("1516")
  344. #vid = api.getVideos("13170")
  345. pass