123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437 |
- #!/usr/bin/env python
- # coding=utf8
- #
- # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
- # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
- # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
- #
- try:
- import json
- except:
- import simplejson as json
-
- import urllib2, urllib
- import datetime, re, sys,os
- import ConfigParser
- from SourceBase import SourceBase
- import base64
- from collections import OrderedDict
- import sys
- import ssl
- if "_create_unverified_context" in dir(ssl):
- ssl._create_default_https_context = ssl._create_unverified_context
-
- try:
- import util
- except:
- sys.path.insert(0,'..')
- import util
-
- headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
-
- class Source(SourceBase):
-
- def __init__(self,country="",cfg_path=None):
- self.name = "filmix"
- self.title = "filmix.me"
- self.img = "filmix.png"
- self.desc = "filmix.me satura skatīšanās"
- self.country=country
- self.headers = headers2dict("""
- Host: filmix.me
- User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
- Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
- Accept-Language: en-US,en;q=0.5
- """)
- self.headers2 = headers2dict("""
- User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
- X-Requested-With: XMLHttpRequest
- Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
- """)
- self.url = "https://filmix.me/"
- #self.login()
-
- def login(self,user="",password=""):
- return True
-
- def get_content(self, data):
- print "[filmix] get_content:", data
- source, data, path, plist, clist, params, qs = self.parse_data(data)
- content=[]
- content.append(("..return", "back","back.png","Return back"))
-
- if clist=="home":
- content.extend([
- ("Search", "filmix::search/{0}","","Search"),
- ("Movies", "filmix::movies","","Movies"),
- ("Series", "filmix::series","","TV Series"),
- ("Cartoons", "filmix::cartoons","","Cartoons"),
- ])
- return content
-
- elif clist=="search":
- if len(plist) < 2:
- return content
- import requests
- ses = requests.session()
- r = ses.get(self.url+data)
- url = "https://filmix.me/engine/ajax/sphinx_search.php"
- headers = headers2dict("""
- User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0
- Content-Type: application/x-www-form-urlencoded; charset=UTF-8
- """)
- data = "scf=fx&story=%s&search_start=0&do=search&subaction=search" % (plist[1].replace(" ", "+"))
- r = ses.post(url, data, headers=headers)
- #r = r.content.decode("cp1251").encode("utf8")
- content = self.process_list(r.content, content)
- return content
-
- elif data in ("movies","series","cartoons"):
- r = self.call("")
- if not r:
- raise Exception("Can not read content")
- r = r.decode("cp1251").encode("utf8")
- if data == "movies":
- sname = "Фильмы"
- elif data=="series":
- sname = "Сериалы"
- else:
- sname = "Мультфильмы"
- # <span class="menu-title">Фильмы</span>
- m = re.search('<span class="menu-title">%s</span>(.+?)<li>\s+?<span'%sname, r, re.DOTALL|re.UNICODE)
- if not m: return content
- r2 = m.group(1)
- result = re.findall(r'<a .*?href="https://filmix\.me/([^"]+)".*?>([^<]+)</', r2, re.DOTALL)
- for item in result:
- if "catalog" in item[0]: continue
- title = item[1]
- data2 = item[0]
- img = self.img
- desc = title
- content.append((title,self.name+"::"+data2,img,desc))
- return content
-
- ## Seriāls
- elif clist=="play":
- r = self.call(path)
- r = r.decode("cp1251").encode("utf8")
- title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
- m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
- img = m.group(1) if m else self.img
- m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
- duration = "(%s)"%m.group(1) if m else ""
- m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
- desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
- vid = plist[-1]
- m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE)
- key = m.group(3) if m else ""
- js = self.get_movie_info(vid,key)
- translations = js["message"]["translations"]["html5"]
- for pl in translations:
- if translations[pl].startswith("http"):
- continue
- pl_link = translations[pl]
- lang = pl.encode("utf8")
- break
- else:
- raise Exception("No episodes list found!")
- #pl_link = js["message"]["translations"]["flash"].values()[0]
- # TODO process several players, currently taking the first
- #TODO - kļuda, vairs nesradā
- if not pl_link.startswith("http"):
- pl_link = self.decode_direct_media_url(pl_link)
- js = self._http_request(pl_link)
- js = self.decode_direct_media_url(js)
- js = json.loads(js)
- if "s" in qs:
- s = int(qs["s"])
- for i,ep in enumerate(js["playlist"][s-1]["playlist"]):
- title = title0+" - "+js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
- serie = js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
- data2 = data+"&e=%s"%(i+1)
- desc = serie +"\n"+desc0
- content.append((title,self.name+"::"+data2,img,desc))
- else:
- for i,ep in enumerate(js["playlist"]):
- title = title0 +" - "+js["playlist"][i]["comment"].encode("utf8")
- serie = js["playlist"][i]["comment"].encode("utf8")
- if "file" in ep and ep["file"]:
- data2 = data+"?e=%s"%(i+1)
- else:
- data2 = data+"?s=%s"%(i+1)
- desc = serie +"\n"+desc0
- content.append((title,self.name+"::"+data2,img,desc))
- return content
- #r = self._http_request(url)
-
-
- ### saraksts ###
- else:
- r = self.call(data)
- r = r.decode("cp1251").encode("utf8")
- content = self.process_list(r, content)
- if '<div class="navigation">' in r:
- m = re.search(r'href="https://filmix\.me/([^"]+)" class="next icon-arowRight btn-tooltip"', r, re.DOTALL)
- if m:
- data2 = m.group(1)
- else:
- m = re.search("/page/(\d)+",data)
- if m:
- page = int(m.group(1))+1
- data2 = re.sub("/page/(\d)+", "/page/%s"%page, data)
- else:
- data2 = data + "/page/2"
- content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
-
- return content
-
- def process_list(self, r, content):
- for r2 in re.findall('<article class="shortstory line".+?</article>', r, re.DOTALL):
- #m2 = re.search(r'<a class="watch icon-play" itemprop="url" href="([^"]+)"', r2, re.DOTALL)
- #<a class="watch icon-play" itemprop="url" href="https://filmix.me/dramy/110957-stolik-19-2017.html"
- #m = re.search(r'<a href="https://filmix\.me/play/(\d+)" class="watch icon-play">', r2, re.DOTALL)
- m = re.search(r'<a class="watch icon-play" itemprop="url" href="https://filmix.me/\w+/(\d+)-', r2, re.DOTALL)
- if not m: continue
- vid = m.group(1)
- data2 = "play/%s"%vid
- #title = re.search('itemprop="name">([^<]+)</div>', r2, re.DOTALL).group(1)
- title = re.search('itemprop="name" content="([^"]+)"', r2, re.DOTALL).group(1)
- m = re.search('itemprop="alternativeHeadline" content="([^"]+)"', r2, re.DOTALL)
- if m:
- title = title + "/"+m.group(1)
- m = re.search(r'img src="(https://filmix\.me/uploads/posters/thumbs/[^"]+)"', r2)
- img = m.group(1) if m else self.img
-
- m = re.search(r'<a itemprop="copyrightYear".+?>(\d+)<', r2, re.DOTALL)
- if m:
- title = "%s (%s)"%(title,m.group(1))
- title = util.unescape(title)
-
- m = re.search('<p itemprop="description">([^<]+)</p>', r2, re.DOTALL)
- desc0 = util.unescape(m.group(1)) if m else ""
-
- props = []
- genre = re.findall('<a itemprop="genre"[^>]+?">([^<]+)</a>', r2, re.DOTALL)
- genre = ",".join(genre)
- if genre: props.append(genre)
-
- m = re.search('<div class="quality">([^<]+)</div>', r2, re.DOTALL)
- if m: props.append(m.group(1))
-
- m = re.search('<div class="item translate".+?class="item-content">([^<]+)<', r2)
- if m: props.append(m.group(1))
-
- m = re.search('itemprop="director">([^<]+)</span></div>', r2)
- if m: props.append(m.group(1))
-
- m = re.search('<div class="item actors">(.+?)</div>', r2)
- if m:
- result = re.findall("<span>(.+?)( )*</span>", m.group(1))
- if result:
- actors = []
- for a in zip(*result)[0]:
- actors.append(re.sub("<.+?>", "", a))
- props.append(" ".join(actors))
- else:
- x = 1
- pass
-
- desc="%s\n%s\n\n%s"%(title, desc0, "\n".join(props))
- content.append((title,self.name+"::"+data2,img,desc))
- return content
-
- def is_video(self,data):
- source,data,path,plist,clist,params,qs = self.parse_data(data)
- if clist == "play" and "e=" in data:
- return True
- elif clist=="play" and not params:
- r = self.call(path)
- #r = r.decode("cp1251").encode("utf8")
- #m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
- #if not m:
- if u"Фильм <a href=" in r.decode("cp1251"):
- return True
- else:
- return False
-
- else:
- return False
-
- def get_streams(self, data):
- print "[filmix] get_streams:", data
- source,data,path,plist,clist,params,qs = self.parse_data(data)
-
- r = self.call(path)
- if not r:
- return []
- streams = []
- r = r.decode("cp1251").encode("utf8")
- title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
- m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
- img = m.group(1) if m else self.img
- m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
- duration = "(%s)"%m.group(1) if m else ""
- m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
- desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
- m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
- if not m:
- raise Exception("Can not find video link")
- #return []
- video_link = m.group(1)
- series = False if u"Фильм <a href=" in r.decode("utf8") else True
- vid = plist[1]
- m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE)
- key = m.group(3) if m else ""
- js = self.get_movie_info(vid,key)
- translations = js["message"]["translations"]["html5"]
- for pl in translations:
- if translations[pl].startswith("http"):
- continue
- pl_link = translations[pl]
- lang = pl.encode("utf8")
- break
- else:
- raise Exception("No episodes list found!")
- if not pl_link.startswith("http"):
- pl_link = self.decode_direct_media_url(pl_link)
-
- if not series : # Filma
- url0 = pl_link
- streams2 = self.get_streams2(url0)
- for st in streams2:
- stream = util.item()
- stream["url"]=st[1]
- stream["lang"]=lang
- stream["quality"]=st[0]
- stream["name"]= title
- stream["desc"]=desc
- streams.append(stream)
- return streams
-
- else: # Seriāls
- #pl_link = video_link
- js = self._http_request(pl_link)
- js = self.decode_direct_media_url(js)
- js = json.loads(js)
- if "e" in qs:
- if "s" in qs:
- s = int(qs["s"])
- else:
- s = None
- e = int(qs["e"])
- if s: # sezona + epizode
- serie = js["playlist"][s-1]["playlist"][e-1]["comment"].encode("utf8")
- title = title0+" - "+ serie
- url0 = js["playlist"][s-1]["playlist"][e-1]["file"].encode("utf8")
- else: # tikai epizode, nav sezonas
- title = title0 +" - "+js["playlist"][e-1]["comment"].encode("utf8")
- serie = js["playlist"][e-1]["comment"].encode("utf8")
- url0 = js["playlist"][e-1]["file"].encode("utf8")
- streams2 = self.get_streams2(url0)
- for st in streams2:
- stream = util.item()
- stream["url"]=st[1]
- stream["lang"]=lang
- stream["quality"]=st[0]
- stream["name"]= title
- stream["desc"]=desc
- streams.append(stream)
- return streams
-
- def call(self, data,params=None,headers=None,lang=""):
- if not headers: headers = self.headers
- url = self.url+data
- result = self._http_request(url,params,headers=headers)
- return result
-
- def get_movie_info(self,vid,key=""):
- headers = headers2dict("""
- User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0
- Accept: application/json, text/javascript, */*; q=0.01
- Accept-Language: en-US,en;q=0.5
- Content-Type: application/x-www-form-urlencoded; charset=UTF-8
- X-Requested-With: XMLHttpRequest
- Referer: https://filmix.me/play/%s
- Cookie: ad_win12=1;
- """%vid )
- post_data = {"post_id":vid,"key=":key}
- r = util.post("https://filmix.me/api/movies/player_data", data=post_data, headers = headers)
- if not r:
- raise Exception("Can not get movie info")
- #return []
- js = json.loads(r)
- return js
-
- def decode_base64(self, encoded_url):
- codec_a = ("l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "=")
- codec_b = ("w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h")
- i = 0
- for a in codec_a:
- b = codec_b[i]
- i += 1
- encoded_url = encoded_url.replace(a, '___')
- encoded_url = encoded_url.replace(b, a)
- encoded_url = encoded_url.replace('___', b)
- return base64.b64decode(encoded_url)
-
- def decode_unicode(self, encoded_url):
- from itertools import izip_longest
- def grouper(n, iterable, fillvalue=None):
- "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
- args = [iter(iterable)] * n
- return izip_longest(fillvalue=fillvalue, *args)
-
- _ = (encoded_url[1:] if encoded_url.find('#') != -1 else encoded_url)
- tokens = map(lambda items: '\u0'+''.join(items), grouper(3, _))
- return ''.join(tokens).decode('unicode_escape')
-
- def decode_direct_media_url(self, encoded_url, checkhttp=False):
- if(checkhttp == True and (encoded_url.find('http://') != -1 or encoded_url.find('https://') != -1)):
- return False
-
- try:
- if encoded_url.find('#') != -1:
- return self.decode_unicode(encoded_url)
- else:
- return self.decode_base64(encoded_url)
- except:
- return False
-
-
-
- def decode_uppod_text(self, text):
- Client_codec_a = ["l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "="]
- Client_codec_b = ["w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h"]
- text = text.replace("\n", "").strip()
- for i in range(len(Client_codec_a)):
- char1 = Client_codec_b[i]
- char2 = Client_codec_a[i]
- text = text.replace(char1, "___")
- text = text.replace(char2, char1)
- text = text.replace("___", char2)
- result = base64.b64decode(text)
- print result
- return result
-
- def get_streams2(self,url0):
- m = re.search("\[([\d\w,]+)\]",url0)
- if not m:
- return [("?",url0)]
- res = m.group(1)
- streams=[]
- for res in res.split(","):
- if not res: continue
- if res in ["1080p"]: continue #TODO fullhd only in PRO+ version
- url=re.sub("\[[\d\w,]+\]",res,url0)
- streams.append((res,url))
- return streams
-
-
- if __name__ == "__main__":
-
- sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
- import run
- source = Source()
- data= sys.argv[1] if len(sys.argv)>1 else source.name+"::home"
- run.run(source, data)
- sys.exit()
|