Python module (submodule repositary), which provides content (video streams) from various online stream sources to corresponding Enigma2, Kodi, Plex plugins

filmix.py 21KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. #!/usr/bin/env python
  2. # coding=utf8
  3. #
  4. # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
  5. # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
  6. # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
  7. #
  8. try:
  9. import json
  10. except:
  11. import simplejson as json
  12. import urllib2, urllib
  13. import datetime, re, sys,os
  14. import ConfigParser
  15. from SourceBase import SourceBase
  16. import base64
  17. from collections import OrderedDict
  18. import sys
  19. import ssl
  20. if "_create_unverified_context" in dir(ssl):
  21. ssl._create_default_https_context = ssl._create_unverified_context
  22. try:
  23. import util
  24. except:
  25. sys.path.insert(0,'..')
  26. import util
  27. headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
  28. class Source(SourceBase):
  29. def __init__(self,country="",cfg_path=None):
  30. self.name = "filmix"
  31. self.title = "filmix.co"
  32. self.img = "filmix.png"
  33. self.desc = "filmix.co satura skatīšanās"
  34. self.country=country
  35. self.headers = headers2dict("""
  36. Host: filmix.co
  37. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  38. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  39. Accept-Language: en-US,en;q=0.5
  40. """)
  41. self.headers2 = headers2dict("""
  42. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  43. X-Requested-With: XMLHttpRequest
  44. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  45. """)
  46. self.url = "https://filmix.co/"
  47. #self.login()
  48. def login(self,user="",password=""):
  49. return True
  50. def get_content(self, data):
  51. print "[filmix] get_content:", data
  52. source, data, path, plist, clist, params, qs = self.parse_data(data)
  53. content=[]
  54. content.append(("..return", "back","back.png","Return back"))
  55. if clist=="home":
  56. content.extend([
  57. ("Search", "filmix::search/{0}","","Search"),
  58. ("Movies", "filmix::movies","","Movies"),
  59. ("Series", "filmix::series","","TV Series"),
  60. ("Cartoons", "filmix::cartoons","","Cartoons"),
  61. ])
  62. return content
  63. elif clist=="search":
  64. if len(plist) < 2:
  65. return content
  66. import requests
  67. #ses = requests.session()
  68. r = requests.get(self.url+data)
  69. cookie = r.cookies["FILMIXNET"]
  70. url = "https://filmix.co/engine/ajax/sphinx_search.php"
  71. headers = headers2dict("""
  72. User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0
  73. Content-Type: application/x-www-form-urlencoded; charset=UTF-8
  74. X-Requested-With: XMLHttpRequest
  75. Cookie: FILMIXNET=%s;
  76. """% cookie)
  77. #data = "scf=fx&story=%s&search_start=0&do=search&subaction=search" % (plist[1].replace(" ", "+"))
  78. #data = "sdc=fx&story=%s&do=search&subaction=search"% (plist[1].replace(" ", "+"))
  79. data = "scf=fx&story=%s&subaction=search" % (plist[1].replace(" ", "+"))
  80. r = requests.post(url, data, headers=headers)
  81. #r = r.content.decode("cp1251").encode("utf8")
  82. content = self.process_list(r.content, content)
  83. return content
  84. elif data in ("movies","series","cartoons"):
  85. r = self.call("")
  86. if not r:
  87. raise Exception("Can not read content")
  88. r = r.decode("cp1251").encode("utf8")
  89. if data == "movies":
  90. sname = "Фильмы"
  91. elif data=="series":
  92. sname = "Сериалы"
  93. else:
  94. sname = "Мультфильмы"
  95. # <span class="menu-title">Фильмы</span>
  96. m = re.search('<span class="menu-title">%s</span>(.+?)<li>\s+?<span'%sname, r, re.DOTALL|re.UNICODE)
  97. if not m: return content
  98. r2 = m.group(1)
  99. result = re.findall(r'<a .*?href="https://filmix\.co/([^"]+)".*?>([^<]+)</', r2, re.DOTALL)
  100. for item in result:
  101. if "catalog" in item[0]: continue
  102. title = item[1]
  103. data2 = item[0]
  104. img = self.img
  105. desc = title
  106. content.append((title,self.name+"::"+data2,img,desc))
  107. return content
  108. ## Seriāls
  109. elif clist=="play":
  110. r = self.call(path)
  111. r = r.decode("cp1251").encode("utf8")
  112. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  113. m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
  114. img = m.group(1) if m else self.img
  115. m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
  116. duration = "(%s)"%m.group(1) if m else ""
  117. m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
  118. desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
  119. vid = plist[-1]
  120. m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE)
  121. key = m.group(3) if m else ""
  122. js = self.get_movie_info(vid,key)
  123. #translations = js["message"]["translations"]["html5"]
  124. if "html5" in js["message"]["translations"]:
  125. translations = js["message"]["translations"]["html5"]
  126. else:
  127. translations = js["message"]["translations"]
  128. translations = js["message"]["translations"]["video"]
  129. items = []
  130. for pl in translations:
  131. if translations[pl].startswith("http"):
  132. continue
  133. pl_link = translations[pl]
  134. lang = pl.encode("utf8")
  135. pl_link = self.decode_direct_media_url(pl_link)
  136. stxt = self._http_request(pl_link)
  137. stxt = self.decode_direct_media_url(stxt)
  138. js = json.loads(stxt)
  139. if "s" in qs: # season, enumerate episodes
  140. s = int(qs["s"])
  141. if s > len(js): continue
  142. for i,ep in enumerate(js[s-1]["folder"]):
  143. title2 = js[s-1]["folder"][i]["title"].encode("utf8")
  144. title = title0+" - "+ title2
  145. title_season = js[s-1]["title"].encode("utf8")
  146. data2 = data+"&e=%s"%(i+1)
  147. desc = title2 +"\n"+desc0
  148. if data2 not in items:
  149. items.append(data2)
  150. content.append((title,self.name+"::"+data2,img,desc))
  151. else: # enumerate seasons
  152. for i, se in enumerate(js):
  153. title2 = se["title"].encode("utf8")
  154. title = title0 +" - " + title2
  155. if "file" in se and se["file"]:
  156. data2 = data+"?e=%s"%(i+1)
  157. else:
  158. data2 = data+"?s=%s"%(i+1)
  159. desc = title2 +"\n"+desc0
  160. if data2 not in items:
  161. items.append(data2)
  162. content.append((title,self.name+"::"+data2,img,desc))
  163. return content
  164. #r = self._http_request(url)
  165. ### saraksts ###
  166. else:
  167. r = self.call(data)
  168. r = r.decode("cp1251").encode("utf8")
  169. content = self.process_list(r, content)
  170. if '<div class="navigation">' in r:
  171. m = re.search(r'href="https://filmix\.co/([^"]+)" class="next icon-arowRight btn-tooltip"', r, re.DOTALL)
  172. if m:
  173. data2 = m.group(1)
  174. else:
  175. m = re.search("/page/(\d)+",data)
  176. if m:
  177. page = int(m.group(1))+1
  178. data2 = re.sub("/page/(\d)+", "/page/%s"%page, data)
  179. else:
  180. data2 = data + "/page/2"
  181. content.append(("Next page",self.name+"::"+data2,"next.png","Next page"))
  182. return content
  183. def process_list(self, r, content):
  184. for r2 in re.findall('<article class="shortstory line".+?</article>', r, re.DOTALL):
  185. #m2 = re.search(r'<a class="watch icon-play" itemprop="url" href="([^"]+)"', r2, re.DOTALL)
  186. #<a class="watch icon-play" itemprop="url" href="https://filmix.co/dramy/110957-stolik-19-2017.html"
  187. #m = re.search(r'<a href="https://filmix\.co/play/(\d+)" class="watch icon-play">', r2, re.DOTALL)
  188. m = re.search(r'<a class="watch icon-play" itemprop="url" href="https://filmix.co/\w+/(\d+)-', r2, re.DOTALL)
  189. if not m: continue
  190. vid = m.group(1)
  191. data2 = "play/%s"%vid
  192. #title = re.search('itemprop="name">([^<]+)</div>', r2, re.DOTALL).group(1)
  193. title = re.search('itemprop="name" content="([^"]+)"', r2, re.DOTALL).group(1)
  194. m = re.search('itemprop="alternativeHeadline" content="([^"]+)"', r2, re.DOTALL)
  195. if m:
  196. title = title + " ~ "+m.group(1)
  197. m = re.search(r'img src="(https://filmix\.co/uploads/posters/thumbs/[^"]+)"', r2)
  198. img = m.group(1) if m else self.img
  199. m = re.search(r'<a itemprop="copyrightYear".+?>(\d+)<', r2, re.DOTALL)
  200. if m:
  201. title = "%s (%s)"%(title,m.group(1))
  202. title = util.unescape(title)
  203. m = re.search('<p itemprop="description">([^<]+)</p>', r2, re.DOTALL)
  204. desc0 = util.unescape(m.group(1)) if m else ""
  205. props = []
  206. genre = re.findall('<a itemprop="genre"[^>]+?">([^<]+)</a>', r2, re.DOTALL)
  207. genre = ",".join(genre)
  208. if genre: props.append(genre)
  209. m = re.search('<div class="quality">([^<]+)</div>', r2, re.DOTALL)
  210. if m: props.append(m.group(1))
  211. m = re.search('<div class="item translate".+?class="item-content">([^<]+)<', r2)
  212. if m: props.append(m.group(1))
  213. m = re.search('itemprop="director">([^<]+)</span></div>', r2)
  214. if m: props.append(m.group(1))
  215. m = re.search('<div class="item actors">(.+?)</div>', r2)
  216. if m:
  217. result = re.findall("<span>(.+?)(&nbsp;)*</span>", m.group(1))
  218. if result:
  219. actors = []
  220. for a in zip(*result)[0]:
  221. actors.append(re.sub("<.+?>", "", a))
  222. props.append(" ".join(actors))
  223. else:
  224. x = 1
  225. pass
  226. desc="%s\n%s\n\n%s"%(title, desc0, "\n".join(props))
  227. content.append((title,self.name+"::"+data2,img,desc))
  228. return content
  229. def is_video(self,data):
  230. source,data,path,plist,clist,params,qs = self.parse_data(data)
  231. if clist == "play" and "e=" in data:
  232. return True
  233. elif clist == "play" and "s=" in data:
  234. return False
  235. elif clist=="play":
  236. vid = path.split("/")[1]
  237. js = self.get_movie_info(vid)
  238. pl = js["message"]["translations"]["pl"]
  239. if pl == "no":
  240. return True
  241. else:
  242. return False
  243. else:
  244. return False
  245. def get_streams(self, data):
  246. print "[filmix] get_streams:", data
  247. source,data,path,plist,clist,params,qs = self.parse_data(data)
  248. r = self.call(path)
  249. if not r:
  250. return []
  251. streams = []
  252. r = r.decode("cp1251").encode("utf8")
  253. try:
  254. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  255. m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
  256. img = m.group(1) if m else self.img
  257. m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
  258. duration = "(%s)"%m.group(1) if m else ""
  259. m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
  260. desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
  261. except:
  262. raise Exception("No stream found")
  263. #m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
  264. #if not m:
  265. # raise Exception("Can not find video link")
  266. # #return []
  267. #video_link = m.group(1)
  268. vid = plist[1]
  269. m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE)
  270. key = m.group(3) if m else ""
  271. js = self.get_movie_info(vid, key)
  272. series = True if js["message"]["translations"]["pl"] == "yes" else False
  273. if "html5" in js["message"]["translations"]:
  274. translations = js["message"]["translations"]["html5"]
  275. else:
  276. translations = js["message"]["translations"]
  277. translations = js["message"]["translations"]["video"]
  278. if not series : # Filma
  279. for pl in translations:
  280. if translations[pl].startswith("http"):
  281. continue
  282. pl_link = translations[pl]
  283. lang = pl.encode("utf8")
  284. pl_link = self.decode_direct_media_url(pl_link)
  285. streams2 = self.get_streams2(pl_link)
  286. for st in streams2:
  287. stream = util.item()
  288. stream["url"]=st[1]
  289. stream["lang"]=lang
  290. stream["quality"]=st[0]
  291. stream["name"]= title
  292. stream["desc"]=desc
  293. stream["img"] = img
  294. m = re.search("\d+", st[0])
  295. stream["order"] = int(m.group(0)) if m else 0
  296. streams.append(stream)
  297. #return streams
  298. else: # Seriāls
  299. for pl in translations:
  300. if translations[pl].startswith("http"):
  301. continue
  302. pl_link = translations[pl]
  303. lang = pl.encode("utf8")
  304. pl_link = self.decode_direct_media_url(pl_link)
  305. #pl_link = video_link
  306. js = self._http_request(pl_link)
  307. js = self.decode_direct_media_url(js)
  308. js = json.loads(js)
  309. playlist = js
  310. if "e" in qs:
  311. if "s" in qs:
  312. s = int(qs["s"])
  313. else:
  314. s = None
  315. e = int(qs["e"])
  316. if s: # sezona + epizode
  317. if s > len(js) or e > len(js[s-1]["folder"]): continue
  318. title2 = js[s-1]["folder"][e-1]["title"].encode("utf8")
  319. title = title0+" - " + title2
  320. url0 = js[s-1]["folder"][e-1]["file"].encode("utf8")
  321. else: # tikai epizode, nav sezonas
  322. if e > len(js["playlist"]): continue
  323. title = title0 +" - "+js["playlist"][e-1]["comment"].encode("utf8")
  324. serie = js["playlist"][e-1]["comment"].encode("utf8")
  325. url0 = js["playlist"][e-1]["file"].encode("utf8")
  326. streams2 = self.get_streams2(url0)
  327. for st in streams2:
  328. stream = util.item()
  329. stream["url"]=st[1]
  330. stream["lang"]=lang
  331. stream["quality"]=st[0]
  332. stream["name"]= title
  333. stream["img"] = img
  334. stream["desc"]=desc
  335. m = re.search("\d+", st[0])
  336. stream["order"] = int(m.group(0)) if m else 0
  337. streams.append(stream)
  338. streams = sorted(streams,key=lambda item: item["order"],reverse=True)
  339. return streams
  340. def call(self, data,params=None,headers=None,lang=""):
  341. if not headers: headers = self.headers
  342. url = self.url+data
  343. result = self._http_request(url,params,headers=headers)
  344. return result
  345. def get_movie_info(self,vid,key=""):
  346. headers = headers2dict("""
  347. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0
  348. Accept: application/json, text/javascript, */*; q=0.01
  349. Accept-Language: en-US,en;q=0.5
  350. Content-Type: application/x-www-form-urlencoded; charset=UTF-8
  351. X-Requested-With: XMLHttpRequest
  352. Referer: https://filmix.co/play/%s
  353. Cookie: ad_win12=1;
  354. """%vid )
  355. post_data = {"post_id":vid,"key=":key}
  356. r = util.post("https://filmix.co/api/movies/player_data", data=post_data, headers = headers)
  357. if not r:
  358. raise Exception("Can not get movie info")
  359. #return []
  360. if r.startswith("<html>"):
  361. import time
  362. time.sleep(0.1)
  363. r = util.post("https://filmix.co/api/movies/player_data", data=post_data, headers = headers)
  364. try:
  365. js = json.loads(r)
  366. except Exception as e:
  367. raise Exception("Can not get movie info")
  368. return js
  369. def decode_base64(self, encoded_url):
  370. codec_a = ("l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "=")
  371. codec_b = ("w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h")
  372. i = 0
  373. for a in codec_a:
  374. b = codec_b[i]
  375. i += 1
  376. encoded_url = encoded_url.replace(a, '___')
  377. encoded_url = encoded_url.replace(b, a)
  378. encoded_url = encoded_url.replace('___', b)
  379. return base64.b64decode(encoded_url)
  380. def decode_base64_2(self, encoded_url):
  381. tokens = ("//Y2VyY2EudHJvdmEuc2FnZ2V6emE=", "//c2ljYXJpby4yMi5tb3ZpZXM=", "//a2lub2NvdmVyLnc5OC5uamJo")
  382. clean_encoded_url = encoded_url[2:].replace("\/","/")
  383. for token in tokens:
  384. clean_encoded_url = clean_encoded_url.replace(token, "")
  385. return base64.b64decode(clean_encoded_url)
  386. def decode_unicode(self, encoded_url):
  387. from itertools import izip_longest
  388. def grouper(n, iterable, fillvalue=None):
  389. "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
  390. args = [iter(iterable)] * n
  391. return izip_longest(fillvalue=fillvalue, *args)
  392. _ = (encoded_url[1:] if encoded_url.find('#') != -1 else encoded_url)
  393. tokens = map(lambda items: '\u0'+''.join(items), grouper(3, _))
  394. return ''.join(tokens).decode('unicode_escape')
  395. def decode_direct_media_url(self, encoded_url, checkhttp=False):
  396. if(checkhttp == True and (encoded_url.find('http://') != -1 or encoded_url.find('https://') != -1)):
  397. return False
  398. try:
  399. if encoded_url.find('#') != -1:
  400. if encoded_url[:2] == '#2':
  401. return self.decode_base64_2(encoded_url)
  402. else:
  403. return self.decode_unicode(encoded_url)
  404. else:
  405. return self.decode_base64(encoded_url)
  406. except:
  407. return False
  408. def decode_uppod_text(self, text):
  409. Client_codec_a = ["l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "="]
  410. Client_codec_b = ["w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h"]
  411. text = text.replace("\n", "").strip()
  412. for i in range(len(Client_codec_a)):
  413. char1 = Client_codec_b[i]
  414. char2 = Client_codec_a[i]
  415. text = text.replace(char1, "___")
  416. text = text.replace(char2, char1)
  417. text = text.replace("___", char2)
  418. result = base64.b64decode(text)
  419. print result
  420. return result
  421. def get_streams2_(self,url0):
  422. # Old version
  423. m = re.search("\[([\d\w,]+)\]",url0)
  424. if not m:
  425. return [("?",url0)]
  426. res = m.group(1)
  427. streams=[]
  428. for res in res.split(","):
  429. if not res: continue
  430. if res in ["1080p"]: continue #TODO fullhd only in PRO+ version
  431. url=re.sub("\[[\d\w,]+\]",res,url0)
  432. streams.append((res,url))
  433. return streams
  434. def get_streams2(self,url0):
  435. m = re.search("\[([\d\w]+)\]",url0)
  436. if not m:
  437. return [("?",url0)]
  438. streams=[]
  439. for st in url0.split(","):
  440. if not st: continue
  441. m = re.search(r"\[([\d\w]+)\]", st)
  442. if not m:
  443. continue
  444. res = m.group(1)
  445. if res in ["1080p"]: continue #TODO fullhd only in PRO+ version
  446. url=st.replace(m.group(0), "")
  447. streams.append((res,url))
  448. return streams
  449. if __name__ == "__main__":
  450. sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
  451. import run
  452. source = Source()
  453. data= sys.argv[1] if len(sys.argv)>1 else source.name+"::home"
  454. run.run(source, data)
  455. sys.exit()