Python module (submodule repositary), which provides content (video streams) from various online stream sources to corresponding Enigma2, Kodi, Plex plugins

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441
  1. #!/usr/bin/env python
  2. # coding=utf8
  3. #
  4. # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
  5. # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
  6. # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
  7. #
  8. try:
  9. import json
  10. except:
  11. import simplejson as json
  12. import urllib2, urllib
  13. import datetime, re, sys,os
  14. import ConfigParser
  15. from SourceBase import SourceBase
  16. import base64
  17. from collections import OrderedDict
  18. import sys
  19. import ssl
  20. if "_create_unverified_context" in dir(ssl):
  21. ssl._create_default_https_context = ssl._create_unverified_context
  22. try:
  23. import util
  24. except:
  25. sys.path.insert(0,'..')
  26. import util
  27. headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
  28. class Source(SourceBase):
  29. def __init__(self,country="",cfg_path=None):
  30. self.name = "filmix"
  31. self.title = "filmix.me"
  32. self.img = "filmix.png"
  33. self.desc = "filmix.me satura skatīšanās"
  34. self.country=country
  35. self.headers = headers2dict("""
  36. Host: filmix.me
  37. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  38. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  39. Accept-Language: en-US,en;q=0.5
  40. """)
  41. self.headers2 = headers2dict("""
  42. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  43. X-Requested-With: XMLHttpRequest
  44. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  45. """)
  46. self.url = "https://filmix.me/"
  47. #self.login()
  48. def login(self,user="",password=""):
  49. return True
  50. def get_content(self, data):
  51. print "[filmix] get_content:", data
  52. source, data, path, plist, clist, params, qs = self.parse_data(data)
  53. content=[]
  54. content.append(("..return", "back","back.png","Return back"))
  55. if clist=="home":
  56. content.extend([
  57. ("Search", "filmix::search/{0}","","Search"),
  58. ("Movies", "filmix::movies","","Movies"),
  59. ("Series", "filmix::series","","TV Series"),
  60. ("Cartoons", "filmix::cartoons","","Cartoons"),
  61. ])
  62. return content
  63. elif clist=="search":
  64. if len(plist) < 2:
  65. return content
  66. import requests
  67. #ses = requests.session()
  68. r = requests.get(self.url+data)
  69. cookie = r.cookies["FILMIXNET"]
  70. url = "https://filmix.me/engine/ajax/sphinx_search.php"
  71. headers = headers2dict("""
  72. User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0
  73. X-Requested-With: XMLHttpRequest
  74. Content-Type: application/x-www-form-urlencoded; charset=UTF-8
  75. Cookie: FILMIXNET=%s
  76. """% cookie)
  77. #data = "scf=fx&story=%s&search_start=0&do=search&subaction=search" % (plist[1].replace(" ", "+"))
  78. data = "sdc=fx&story=%s&do=search&subaction=search"% (plist[1].replace(" ", "+"))
  79. r = requests.post(url, data, headers=headers)
  80. #r = r.content.decode("cp1251").encode("utf8")
  81. content = self.process_list(r.content, content)
  82. return content
  83. elif data in ("movies","series","cartoons"):
  84. r = self.call("")
  85. if not r:
  86. raise Exception("Can not read content")
  87. r = r.decode("cp1251").encode("utf8")
  88. if data == "movies":
  89. sname = "Фильмы"
  90. elif data=="series":
  91. sname = "Сериалы"
  92. else:
  93. sname = "Мультфильмы"
  94. # <span class="menu-title">Фильмы</span>
  95. m = re.search('<span class="menu-title">%s</span>(.+?)<li>\s+?<span'%sname, r, re.DOTALL|re.UNICODE)
  96. if not m: return content
  97. r2 = m.group(1)
  98. result = re.findall(r'<a .*?href="https://filmix\.me/([^"]+)".*?>([^<]+)</', r2, re.DOTALL)
  99. for item in result:
  100. if "catalog" in item[0]: continue
  101. title = item[1]
  102. data2 = item[0]
  103. img = self.img
  104. desc = title
  105. content.append((title,self.name+"::"+data2,img,desc))
  106. return content
  107. ## Seriāls
  108. elif clist=="play":
  109. r = self.call(path)
  110. r = r.decode("cp1251").encode("utf8")
  111. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  112. m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
  113. img = m.group(1) if m else self.img
  114. m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
  115. duration = "(%s)"%m.group(1) if m else ""
  116. m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
  117. desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
  118. vid = plist[-1]
  119. m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE)
  120. key = m.group(3) if m else ""
  121. js = self.get_movie_info(vid,key)
  122. translations = js["message"]["translations"]["html5"]
  123. for pl in translations:
  124. if translations[pl].startswith("http"):
  125. continue
  126. pl_link = translations[pl]
  127. lang = pl.encode("utf8")
  128. break
  129. else:
  130. raise Exception("No episodes list found!")
  131. #pl_link = js["message"]["translations"]["flash"].values()[0]
  132. # TODO process several players, currently taking the first
  133. #TODO - kļuda, vairs nesradā
  134. if not pl_link.startswith("http"):
  135. pl_link = self.decode_direct_media_url(pl_link)
  136. js = self._http_request(pl_link)
  137. js = self.decode_direct_media_url(js)
  138. js = json.loads(js)
  139. if "s" in qs:
  140. s = int(qs["s"])
  141. for i,ep in enumerate(js["playlist"][s-1]["playlist"]):
  142. title = title0+" - "+js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
  143. serie = js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
  144. data2 = data+"&e=%s"%(i+1)
  145. desc = serie +"\n"+desc0
  146. content.append((title,self.name+"::"+data2,img,desc))
  147. else:
  148. for i,ep in enumerate(js["playlist"]):
  149. title = title0 +" - "+js["playlist"][i]["comment"].encode("utf8")
  150. serie = js["playlist"][i]["comment"].encode("utf8")
  151. if "file" in ep and ep["file"]:
  152. data2 = data+"?e=%s"%(i+1)
  153. else:
  154. data2 = data+"?s=%s"%(i+1)
  155. desc = serie +"\n"+desc0
  156. content.append((title,self.name+"::"+data2,img,desc))
  157. return content
  158. #r = self._http_request(url)
  159. ### saraksts ###
  160. else:
  161. r = self.call(data)
  162. r = r.decode("cp1251").encode("utf8")
  163. content = self.process_list(r, content)
  164. if '<div class="navigation">' in r:
  165. m = re.search(r'href="https://filmix\.me/([^"]+)" class="next icon-arowRight btn-tooltip"', r, re.DOTALL)
  166. if m:
  167. data2 = m.group(1)
  168. else:
  169. m = re.search("/page/(\d)+",data)
  170. if m:
  171. page = int(m.group(1))+1
  172. data2 = re.sub("/page/(\d)+", "/page/%s"%page, data)
  173. else:
  174. data2 = data + "/page/2"
  175. content.append(("Next page",self.name+"::"+data2,"next.png","Next page"))
  176. return content
  177. def process_list(self, r, content):
  178. for r2 in re.findall('<article class="shortstory line".+?</article>', r, re.DOTALL):
  179. #m2 = re.search(r'<a class="watch icon-play" itemprop="url" href="([^"]+)"', r2, re.DOTALL)
  180. #<a class="watch icon-play" itemprop="url" href="https://filmix.me/dramy/110957-stolik-19-2017.html"
  181. #m = re.search(r'<a href="https://filmix\.me/play/(\d+)" class="watch icon-play">', r2, re.DOTALL)
  182. m = re.search(r'<a class="watch icon-play" itemprop="url" href="https://filmix.me/\w+/(\d+)-', r2, re.DOTALL)
  183. if not m: continue
  184. vid = m.group(1)
  185. data2 = "play/%s"%vid
  186. #title = re.search('itemprop="name">([^<]+)</div>', r2, re.DOTALL).group(1)
  187. title = re.search('itemprop="name" content="([^"]+)"', r2, re.DOTALL).group(1)
  188. m = re.search('itemprop="alternativeHeadline" content="([^"]+)"', r2, re.DOTALL)
  189. if m:
  190. title = title + "/"+m.group(1)
  191. m = re.search(r'img src="(https://filmix\.me/uploads/posters/thumbs/[^"]+)"', r2)
  192. img = m.group(1) if m else self.img
  193. m = re.search(r'<a itemprop="copyrightYear".+?>(\d+)<', r2, re.DOTALL)
  194. if m:
  195. title = "%s (%s)"%(title,m.group(1))
  196. title = util.unescape(title)
  197. m = re.search('<p itemprop="description">([^<]+)</p>', r2, re.DOTALL)
  198. desc0 = util.unescape(m.group(1)) if m else ""
  199. props = []
  200. genre = re.findall('<a itemprop="genre"[^>]+?">([^<]+)</a>', r2, re.DOTALL)
  201. genre = ",".join(genre)
  202. if genre: props.append(genre)
  203. m = re.search('<div class="quality">([^<]+)</div>', r2, re.DOTALL)
  204. if m: props.append(m.group(1))
  205. m = re.search('<div class="item translate".+?class="item-content">([^<]+)<', r2)
  206. if m: props.append(m.group(1))
  207. m = re.search('itemprop="director">([^<]+)</span></div>', r2)
  208. if m: props.append(m.group(1))
  209. m = re.search('<div class="item actors">(.+?)</div>', r2)
  210. if m:
  211. result = re.findall("<span>(.+?)(&nbsp;)*</span>", m.group(1))
  212. if result:
  213. actors = []
  214. for a in zip(*result)[0]:
  215. actors.append(re.sub("<.+?>", "", a))
  216. props.append(" ".join(actors))
  217. else:
  218. x = 1
  219. pass
  220. desc="%s\n%s\n\n%s"%(title, desc0, "\n".join(props))
  221. content.append((title,self.name+"::"+data2,img,desc))
  222. return content
  223. def is_video(self,data):
  224. source,data,path,plist,clist,params,qs = self.parse_data(data)
  225. if clist == "play" and "e=" in data:
  226. return True
  227. elif clist=="play" and not params:
  228. r = self.call(path)
  229. #r = r.decode("cp1251").encode("utf8")
  230. #m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
  231. #if not m:
  232. if u"Фильм <a href=" in r.decode("cp1251"):
  233. return True
  234. else:
  235. return False
  236. else:
  237. return False
  238. def get_streams(self, data):
  239. print "[filmix] get_streams:", data
  240. source,data,path,plist,clist,params,qs = self.parse_data(data)
  241. r = self.call(path)
  242. if not r:
  243. return []
  244. streams = []
  245. r = r.decode("cp1251").encode("utf8")
  246. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  247. m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
  248. img = m.group(1) if m else self.img
  249. m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
  250. duration = "(%s)"%m.group(1) if m else ""
  251. m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
  252. desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
  253. m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
  254. if not m:
  255. raise Exception("Can not find video link")
  256. #return []
  257. video_link = m.group(1)
  258. series = False if u"Фильм <a href=" in r.decode("utf8") else True
  259. vid = plist[1]
  260. m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE)
  261. key = m.group(3) if m else ""
  262. js = self.get_movie_info(vid,key)
  263. translations = js["message"]["translations"]["html5"]
  264. for pl in translations:
  265. if translations[pl].startswith("http"):
  266. continue
  267. pl_link = translations[pl]
  268. lang = pl.encode("utf8")
  269. break
  270. else:
  271. raise Exception("No episodes list found!")
  272. if not pl_link.startswith("http"):
  273. pl_link = self.decode_direct_media_url(pl_link)
  274. if not series : # Filma
  275. url0 = pl_link
  276. streams2 = self.get_streams2(url0)
  277. for st in streams2:
  278. stream = util.item()
  279. stream["url"]=st[1]
  280. stream["lang"]=lang
  281. stream["quality"]=st[0]
  282. stream["name"]= title
  283. stream["desc"]=desc
  284. streams.append(stream)
  285. return streams
  286. else: # Seriāls
  287. #pl_link = video_link
  288. js = self._http_request(pl_link)
  289. js = self.decode_direct_media_url(js)
  290. js = json.loads(js)
  291. if "e" in qs:
  292. if "s" in qs:
  293. s = int(qs["s"])
  294. else:
  295. s = None
  296. e = int(qs["e"])
  297. if s: # sezona + epizode
  298. serie = js["playlist"][s-1]["playlist"][e-1]["comment"].encode("utf8")
  299. title = title0+" - "+ serie
  300. url0 = js["playlist"][s-1]["playlist"][e-1]["file"].encode("utf8")
  301. else: # tikai epizode, nav sezonas
  302. title = title0 +" - "+js["playlist"][e-1]["comment"].encode("utf8")
  303. serie = js["playlist"][e-1]["comment"].encode("utf8")
  304. url0 = js["playlist"][e-1]["file"].encode("utf8")
  305. streams2 = self.get_streams2(url0)
  306. for st in streams2:
  307. stream = util.item()
  308. stream["url"]=st[1]
  309. stream["lang"]=lang
  310. stream["quality"]=st[0]
  311. stream["name"]= title
  312. stream["desc"]=desc
  313. streams.append(stream)
  314. return streams
  315. def call(self, data,params=None,headers=None,lang=""):
  316. if not headers: headers = self.headers
  317. url = self.url+data
  318. result = self._http_request(url,params,headers=headers)
  319. return result
  320. def get_movie_info(self,vid,key=""):
  321. headers = headers2dict("""
  322. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0
  323. Accept: application/json, text/javascript, */*; q=0.01
  324. Accept-Language: en-US,en;q=0.5
  325. Content-Type: application/x-www-form-urlencoded; charset=UTF-8
  326. X-Requested-With: XMLHttpRequest
  327. Referer: https://filmix.me/play/%s
  328. Cookie: ad_win12=1;
  329. """%vid )
  330. post_data = {"post_id":vid,"key=":key}
  331. r = util.post("https://filmix.me/api/movies/player_data", data=post_data, headers = headers)
  332. if not r:
  333. raise Exception("Can not get movie info")
  334. #return []
  335. js = json.loads(r)
  336. return js
  337. def decode_base64(self, encoded_url):
  338. codec_a = ("l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "=")
  339. codec_b = ("w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h")
  340. i = 0
  341. for a in codec_a:
  342. b = codec_b[i]
  343. i += 1
  344. encoded_url = encoded_url.replace(a, '___')
  345. encoded_url = encoded_url.replace(b, a)
  346. encoded_url = encoded_url.replace('___', b)
  347. return base64.b64decode(encoded_url)
  348. def decode_unicode(self, encoded_url):
  349. from itertools import izip_longest
  350. def grouper(n, iterable, fillvalue=None):
  351. "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
  352. args = [iter(iterable)] * n
  353. return izip_longest(fillvalue=fillvalue, *args)
  354. _ = (encoded_url[1:] if encoded_url.find('#') != -1 else encoded_url)
  355. tokens = map(lambda items: '\u0'+''.join(items), grouper(3, _))
  356. return ''.join(tokens).decode('unicode_escape')
  357. def decode_direct_media_url(self, encoded_url, checkhttp=False):
  358. if(checkhttp == True and (encoded_url.find('http://') != -1 or encoded_url.find('https://') != -1)):
  359. return False
  360. try:
  361. if encoded_url.find('#') != -1:
  362. return self.decode_unicode(encoded_url)
  363. else:
  364. return self.decode_base64(encoded_url)
  365. except:
  366. return False
  367. def decode_uppod_text(self, text):
  368. Client_codec_a = ["l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "="]
  369. Client_codec_b = ["w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h"]
  370. text = text.replace("\n", "").strip()
  371. for i in range(len(Client_codec_a)):
  372. char1 = Client_codec_b[i]
  373. char2 = Client_codec_a[i]
  374. text = text.replace(char1, "___")
  375. text = text.replace(char2, char1)
  376. text = text.replace("___", char2)
  377. result = base64.b64decode(text)
  378. print result
  379. return result
  380. def get_streams2(self,url0):
  381. m = re.search("\[([\d\w,]+)\]",url0)
  382. if not m:
  383. return [("?",url0)]
  384. res = m.group(1)
  385. streams=[]
  386. for res in res.split(","):
  387. if not res: continue
  388. if res in ["1080p"]: continue #TODO fullhd only in PRO+ version
  389. url=re.sub("\[[\d\w,]+\]",res,url0)
  390. streams.append((res,url))
  391. return streams
  392. if __name__ == "__main__":
  393. sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
  394. import run
  395. source = Source()
  396. data= sys.argv[1] if len(sys.argv)>1 else source.name+"::home"
  397. run.run(source, data)
  398. sys.exit()