123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503 |
- #!/usr/bin/env python
- # coding=utf8
- #
- # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
- # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
- # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
- #
- try:
- import json
- except:
- import simplejson as json
-
- import urllib2, urllib
- import datetime, re, sys,os
- import ConfigParser
- from SourceBase import SourceBase
- import base64
- from collections import OrderedDict
- import sys
- import ssl
- if "_create_unverified_context" in dir(ssl):
- ssl._create_default_https_context = ssl._create_unverified_context
-
- try:
- import util
- except:
- sys.path.insert(0,'..')
- import util
-
- headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
-
- class Source(SourceBase):
-
- def __init__(self,country="",cfg_path=None):
- self.name = "filmix"
- self.title = "filmix.co"
- self.img = "filmix.png"
- self.desc = "filmix.co satura skatīšanās"
- self.country=country
- self.headers = headers2dict("""
- Host: filmix.co
- User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
- Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
- Accept-Language: en-US,en;q=0.5
- """)
- self.headers2 = headers2dict("""
- User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
- X-Requested-With: XMLHttpRequest
- Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
- """)
- self.url = "https://filmix.co/"
- #self.login()
-
- def login(self,user="",password=""):
- return True
-
- def get_content(self, data):
- print "[filmix] get_content:", data
- source, data, path, plist, clist, params, qs = self.parse_data(data)
- content=[]
- content.append(("..return", "back","back.png","Return back"))
-
- if clist=="home":
- content.extend([
- ("Search", "filmix::search/{0}","","Search"),
- ("Movies", "filmix::movies","","Movies"),
- ("Series", "filmix::series","","TV Series"),
- ("Cartoons", "filmix::cartoons","","Cartoons"),
- ])
- return content
-
- elif clist=="search":
- if len(plist) < 2:
- return content
- import requests
- #ses = requests.session()
- r = requests.get(self.url+data)
- cookie = r.cookies["FILMIXNET"]
- url = "https://filmix.co/engine/ajax/sphinx_search.php"
- headers = headers2dict("""
- User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0
- Content-Type: application/x-www-form-urlencoded; charset=UTF-8
- X-Requested-With: XMLHttpRequest
- Cookie: FILMIXNET=%s;
- """% cookie)
- #data = "scf=fx&story=%s&search_start=0&do=search&subaction=search" % (plist[1].replace(" ", "+"))
- #data = "sdc=fx&story=%s&do=search&subaction=search"% (plist[1].replace(" ", "+"))
- data = "scf=fx&story=%s&subaction=search" % (plist[1].replace(" ", "+"))
- r = requests.post(url, data, headers=headers)
- #r = r.content.decode("cp1251").encode("utf8")
- content = self.process_list(r.content, content)
- return content
-
- elif data in ("movies","series","cartoons"):
- r = self.call("")
- if not r:
- raise Exception("Can not read content")
- r = r.decode("cp1251").encode("utf8")
- if data == "movies":
- sname = "Фильмы"
- elif data=="series":
- sname = "Сериалы"
- else:
- sname = "Мультфильмы"
- # <span class="menu-title">Фильмы</span>
- m = re.search('<span class="menu-title">%s</span>(.+?)<li>\s+?<span'%sname, r, re.DOTALL|re.UNICODE)
- if not m: return content
- r2 = m.group(1)
- result = re.findall(r'<a .*?href="https://filmix\.co/([^"]+)".*?>([^<]+)</', r2, re.DOTALL)
- for item in result:
- if "catalog" in item[0]: continue
- title = item[1]
- data2 = item[0]
- img = self.img
- desc = title
- content.append((title,self.name+"::"+data2,img,desc))
- return content
-
- ## Seriāls
- elif clist=="play":
- r = self.call(path)
- r = r.decode("cp1251").encode("utf8")
- title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
- m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
- img = m.group(1) if m else self.img
- m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
- duration = "(%s)"%m.group(1) if m else ""
- m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
- desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
- vid = plist[-1]
- m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE)
- key = m.group(3) if m else ""
- js = self.get_movie_info(vid,key)
- #translations = js["message"]["translations"]["html5"]
- if "html5" in js["message"]["translations"]:
- translations = js["message"]["translations"]["html5"]
- else:
- translations = js["message"]["translations"]
- translations = js["message"]["translations"]["video"]
-
- items = []
- for pl in translations:
- if translations[pl].startswith("http"):
- continue
- pl_link = translations[pl]
- lang = pl.encode("utf8")
- pl_link = self.decode_direct_media_url(pl_link)
- stxt = self._http_request(pl_link)
- stxt = self.decode_direct_media_url(stxt)
- js = json.loads(stxt)
- if "s" in qs: # season, enumerate episodes
- s = int(qs["s"])
- if s > len(js): continue
- for i,ep in enumerate(js[s-1]["folder"]):
- title2 = js[s-1]["folder"][i]["title"].encode("utf8")
- title = title0+" - "+ title2
- title_season = js[s-1]["title"].encode("utf8")
- data2 = data+"&e=%s"%(i+1)
- desc = title2 +"\n"+desc0
- if data2 not in items:
- items.append(data2)
- content.append((title,self.name+"::"+data2,img,desc))
- else: # enumerate seasons
- for i, se in enumerate(js):
- title2 = se["title"].encode("utf8")
- title = title0 +" - " + title2
- if "file" in se and se["file"]:
- data2 = data+"?e=%s"%(i+1)
- else:
- data2 = data+"?s=%s"%(i+1)
- desc = title2 +"\n"+desc0
- if data2 not in items:
- items.append(data2)
- content.append((title,self.name+"::"+data2,img,desc))
- return content
- #r = self._http_request(url)
-
-
- ### saraksts ###
- else:
- r = self.call(data)
- r = r.decode("cp1251").encode("utf8")
- content = self.process_list(r, content)
- if '<div class="navigation">' in r:
- m = re.search(r'href="https://filmix\.co/([^"]+)" class="next icon-arowRight btn-tooltip"', r, re.DOTALL)
- if m:
- data2 = m.group(1)
- else:
- m = re.search("/page/(\d)+",data)
- if m:
- page = int(m.group(1))+1
- data2 = re.sub("/page/(\d)+", "/page/%s"%page, data)
- else:
- data2 = data + "/page/2"
- content.append(("Next page",self.name+"::"+data2,"next.png","Next page"))
-
- return content
-
- def process_list(self, r, content):
- for r2 in re.findall('<article class="shortstory line".+?</article>', r, re.DOTALL):
- #m2 = re.search(r'<a class="watch icon-play" itemprop="url" href="([^"]+)"', r2, re.DOTALL)
- #<a class="watch icon-play" itemprop="url" href="https://filmix.co/dramy/110957-stolik-19-2017.html"
- #m = re.search(r'<a href="https://filmix\.co/play/(\d+)" class="watch icon-play">', r2, re.DOTALL)
- m = re.search(r'<a class="watch icon-play" itemprop="url" href="https://filmix.co/\w+/(\d+)-', r2, re.DOTALL)
- if not m: continue
- vid = m.group(1)
- data2 = "play/%s"%vid
- #title = re.search('itemprop="name">([^<]+)</div>', r2, re.DOTALL).group(1)
- title = re.search('itemprop="name" content="([^"]+)"', r2, re.DOTALL).group(1)
- m = re.search('itemprop="alternativeHeadline" content="([^"]+)"', r2, re.DOTALL)
- if m:
- title = title + " ~ "+m.group(1)
- m = re.search(r'img src="(https://filmix\.co/uploads/posters/thumbs/[^"]+)"', r2)
- img = m.group(1) if m else self.img
-
- m = re.search(r'<a itemprop="copyrightYear".+?>(\d+)<', r2, re.DOTALL)
- if m:
- title = "%s (%s)"%(title,m.group(1))
- title = util.unescape(title)
-
- m = re.search('<p itemprop="description">([^<]+)</p>', r2, re.DOTALL)
- desc0 = util.unescape(m.group(1)) if m else ""
-
- props = []
- genre = re.findall('<a itemprop="genre"[^>]+?">([^<]+)</a>', r2, re.DOTALL)
- genre = ",".join(genre)
- if genre: props.append(genre)
-
- m = re.search('<div class="quality">([^<]+)</div>', r2, re.DOTALL)
- if m: props.append(m.group(1))
-
- m = re.search('<div class="item translate".+?class="item-content">([^<]+)<', r2)
- if m: props.append(m.group(1))
-
- m = re.search('itemprop="director">([^<]+)</span></div>', r2)
- if m: props.append(m.group(1))
-
- m = re.search('<div class="item actors">(.+?)</div>', r2)
- if m:
- result = re.findall("<span>(.+?)( )*</span>", m.group(1))
- if result:
- actors = []
- for a in zip(*result)[0]:
- actors.append(re.sub("<.+?>", "", a))
- props.append(" ".join(actors))
- else:
- x = 1
- pass
-
- desc="%s\n%s\n\n%s"%(title, desc0, "\n".join(props))
- content.append((title,self.name+"::"+data2,img,desc))
- return content
-
- def is_video(self,data):
- source,data,path,plist,clist,params,qs = self.parse_data(data)
- if clist == "play" and "e=" in data:
- return True
- elif clist == "play" and "s=" in data:
- return False
- elif clist=="play":
- vid = path.split("/")[1]
- js = self.get_movie_info(vid)
- pl = js["message"]["translations"]["pl"]
- if pl == "no":
- return True
- else:
- return False
- else:
- return False
-
- def get_streams(self, data):
- print "[filmix] get_streams:", data
- source,data,path,plist,clist,params,qs = self.parse_data(data)
-
- r = self.call(path)
- if not r:
- return []
- streams = []
- r = r.decode("cp1251").encode("utf8")
- try:
- title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
- m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
- img = m.group(1) if m else self.img
- m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
- duration = "(%s)"%m.group(1) if m else ""
- m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
- desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
- except:
- raise Exception("No stream found")
- #m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
- #if not m:
- # raise Exception("Can not find video link")
- # #return []
- #video_link = m.group(1)
- vid = plist[1]
- m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE)
- key = m.group(3) if m else ""
- js = self.get_movie_info(vid, key)
- series = True if js["message"]["translations"]["pl"] == "yes" else False
- if "html5" in js["message"]["translations"]:
- translations = js["message"]["translations"]["html5"]
- else:
- translations = js["message"]["translations"]
- translations = js["message"]["translations"]["video"]
-
- if not series : # Filma
- for pl in translations:
- if translations[pl].startswith("http"):
- continue
- pl_link = translations[pl]
- lang = pl.encode("utf8")
- pl_link = self.decode_direct_media_url(pl_link)
- streams2 = self.get_streams2(pl_link)
- for st in streams2:
- stream = util.item()
- stream["url"]=st[1]
- stream["lang"]=lang
- stream["quality"]=st[0]
- stream["name"]= title
- stream["desc"]=desc
- stream["img"] = img
- m = re.search("\d+", st[0])
- stream["order"] = int(m.group(0)) if m else 0
- streams.append(stream)
- #return streams
-
- else: # Seriāls
- for pl in translations:
- if translations[pl].startswith("http"):
- continue
- pl_link = translations[pl]
- lang = pl.encode("utf8")
- pl_link = self.decode_direct_media_url(pl_link)
-
- #pl_link = video_link
- js = self._http_request(pl_link)
- js = self.decode_direct_media_url(js)
- js = json.loads(js)
- playlist = js
- if "e" in qs:
- if "s" in qs:
- s = int(qs["s"])
- else:
- s = None
- e = int(qs["e"])
- if s: # sezona + epizode
- if s > len(js) or e > len(js[s-1]["folder"]): continue
- title2 = js[s-1]["folder"][e-1]["title"].encode("utf8")
- title = title0+" - " + title2
- url0 = js[s-1]["folder"][e-1]["file"].encode("utf8")
- else: # tikai epizode, nav sezonas
- if e > len(js["playlist"]): continue
- title = title0 +" - "+js["playlist"][e-1]["comment"].encode("utf8")
- serie = js["playlist"][e-1]["comment"].encode("utf8")
- url0 = js["playlist"][e-1]["file"].encode("utf8")
- streams2 = self.get_streams2(url0)
- for st in streams2:
- stream = util.item()
- stream["url"]=st[1]
- stream["lang"]=lang
- stream["quality"]=st[0]
- stream["name"]= title
- stream["img"] = img
- stream["desc"]=desc
- m = re.search("\d+", st[0])
- stream["order"] = int(m.group(0)) if m else 0
- streams.append(stream)
-
- streams = sorted(streams,key=lambda item: item["order"],reverse=True)
- return streams
-
- def call(self, data,params=None,headers=None,lang=""):
- if not headers: headers = self.headers
- url = self.url+data
- result = self._http_request(url,params,headers=headers)
- return result
-
- def get_movie_info(self,vid,key=""):
- headers = headers2dict("""
- User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0
- Accept: application/json, text/javascript, */*; q=0.01
- Accept-Language: en-US,en;q=0.5
- Content-Type: application/x-www-form-urlencoded; charset=UTF-8
- X-Requested-With: XMLHttpRequest
- Referer: https://filmix.co/play/%s
- Cookie: ad_win12=1;
- """%vid )
- post_data = {"post_id":vid,"key=":key}
- r = util.post("https://filmix.co/api/movies/player_data", data=post_data, headers = headers)
- if not r:
- raise Exception("Can not get movie info")
- #return []
- if r.startswith("<html>"):
- import time
- time.sleep(0.1)
- r = util.post("https://filmix.co/api/movies/player_data", data=post_data, headers = headers)
- try:
- js = json.loads(r)
- except Exception as e:
- raise Exception("Can not get movie info")
- return js
-
- def decode_base64(self, encoded_url):
- codec_a = ("l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "=")
- codec_b = ("w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h")
- i = 0
- for a in codec_a:
- b = codec_b[i]
- i += 1
- encoded_url = encoded_url.replace(a, '___')
- encoded_url = encoded_url.replace(b, a)
- encoded_url = encoded_url.replace('___', b)
- return base64.b64decode(encoded_url)
-
- def decode_base64_2(self, encoded_url):
- tokens = ("//Y2VyY2EudHJvdmEuc2FnZ2V6emE=", "//c2ljYXJpby4yMi5tb3ZpZXM=", "//a2lub2NvdmVyLnc5OC5uamJo")
- clean_encoded_url = encoded_url[2:].replace("\/","/")
-
- for token in tokens:
- clean_encoded_url = clean_encoded_url.replace(token, "")
-
- return base64.b64decode(clean_encoded_url)
-
- def decode_unicode(self, encoded_url):
- from itertools import izip_longest
- def grouper(n, iterable, fillvalue=None):
- "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
- args = [iter(iterable)] * n
- return izip_longest(fillvalue=fillvalue, *args)
-
- _ = (encoded_url[1:] if encoded_url.find('#') != -1 else encoded_url)
- tokens = map(lambda items: '\u0'+''.join(items), grouper(3, _))
- return ''.join(tokens).decode('unicode_escape')
-
- def decode_direct_media_url(self, encoded_url, checkhttp=False):
- if(checkhttp == True and (encoded_url.find('http://') != -1 or encoded_url.find('https://') != -1)):
- return False
-
- try:
- if encoded_url.find('#') != -1:
- if encoded_url[:2] == '#2':
- return self.decode_base64_2(encoded_url)
- else:
- return self.decode_unicode(encoded_url)
- else:
- return self.decode_base64(encoded_url)
- except:
- return False
-
-
- def decode_uppod_text(self, text):
- Client_codec_a = ["l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "="]
- Client_codec_b = ["w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h"]
- text = text.replace("\n", "").strip()
- for i in range(len(Client_codec_a)):
- char1 = Client_codec_b[i]
- char2 = Client_codec_a[i]
- text = text.replace(char1, "___")
- text = text.replace(char2, char1)
- text = text.replace("___", char2)
- result = base64.b64decode(text)
- print result
- return result
-
- def get_streams2_(self,url0):
- # Old version
- m = re.search("\[([\d\w,]+)\]",url0)
- if not m:
- return [("?",url0)]
- res = m.group(1)
- streams=[]
- for res in res.split(","):
- if not res: continue
- if res in ["1080p"]: continue #TODO fullhd only in PRO+ version
- url=re.sub("\[[\d\w,]+\]",res,url0)
- streams.append((res,url))
- return streams
-
- def get_streams2(self,url0):
- m = re.search("\[([\d\w]+)\]",url0)
- if not m:
- return [("?",url0)]
- streams=[]
- for st in url0.split(","):
- if not st: continue
- m = re.search(r"\[([\d\w]+)\]", st)
- if not m:
- continue
- res = m.group(1)
- if res in ["1080p"]: continue #TODO fullhd only in PRO+ version
- url=st.replace(m.group(0), "")
- streams.append((res,url))
- return streams
-
-
- if __name__ == "__main__":
-
- sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
- import run
- source = Source()
- data= sys.argv[1] if len(sys.argv)>1 else source.name+"::home"
- run.run(source, data)
- sys.exit()
|