#!/usr/bin/env python # coding=utf8 # # This file is part of PlayStream - enigma2 plugin to play video streams from various sources # Copyright (c) 2016 ivars777 (ivars777@gmail.com) # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html # try: import json except: import simplejson as json import urllib2, urllib import datetime, re, sys,os import ConfigParser from SourceBase import SourceBase import base64 from collections import OrderedDict import sys import ssl if "_create_unverified_context" in dir(ssl): ssl._create_default_https_context = ssl._create_unverified_context try: import util except: sys.path.insert(0,'..') import util headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()]) class Source(SourceBase): def __init__(self,country="",cfg_path=None): self.name = "filmix" self.title = "filmix.me" self.img = "filmix.png" self.desc = "filmix.me satura skatīšanās" self.country=country self.headers = headers2dict(""" Host: filmix.me User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0 Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 Accept-Language: en-US,en;q=0.5 """) self.headers2 = headers2dict(""" User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0 X-Requested-With: XMLHttpRequest Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 """) self.url = "https://filmix.me/" #self.login() def login(self,user="",password=""): return True def get_content(self, data): print "[filmix] get_content:", data source, data, path, plist, clist, params, qs = self.parse_data(data) content=[] content.append(("..return", "back","back.png","Return back")) if clist=="home": content.extend([ ("Search", "filmix::search/{0}","","Search"), ("Movies", "filmix::movies","","Movies"), ("Series", "filmix::series","","TV Series"), ("Cartoons", "filmix::cartoons","","Cartoons"), ]) return content elif clist=="search": if len(plist) < 2: return content import requests #ses = requests.session() r = requests.get(self.url+data) cookie = r.cookies["FILMIXNET"] url = "https://filmix.me/engine/ajax/sphinx_search.php" headers = headers2dict(""" User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0 X-Requested-With: XMLHttpRequest Content-Type: application/x-www-form-urlencoded; charset=UTF-8 Cookie: FILMIXNET=%s """% cookie) #data = "scf=fx&story=%s&search_start=0&do=search&subaction=search" % (plist[1].replace(" ", "+")) data = "sdc=fx&story=%s&do=search&subaction=search"% (plist[1].replace(" ", "+")) r = requests.post(url, data, headers=headers) #r = r.content.decode("cp1251").encode("utf8") content = self.process_list(r.content, content) return content elif data in ("movies","series","cartoons"): r = self.call("") if not r: raise Exception("Can not read content") r = r.decode("cp1251").encode("utf8") if data == "movies": sname = "Фильмы" elif data=="series": sname = "Сериалы" else: sname = "Мультфильмы" # Фильмы m = re.search('%s(.+?)
  • \s+?([^<]+)', r, re.DOTALL) duration = "(%s)"%m.group(1) if m else "" m = re.search('

    ]+>([^<]+)<', r, re.DOTALL) desc = desc0 = util.unescape(m.group(1).strip()) if m else "" vid = plist[-1] m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE) key = m.group(3) if m else "" js = self.get_movie_info(vid,key) translations = js["message"]["translations"]["html5"] for pl in translations: if translations[pl].startswith("http"): continue pl_link = translations[pl] lang = pl.encode("utf8") break else: raise Exception("No episodes list found!") #pl_link = js["message"]["translations"]["flash"].values()[0] # TODO process several players, currently taking the first #TODO - kļuda, vairs nesradā if not pl_link.startswith("http"): pl_link = self.decode_direct_media_url(pl_link) js = self._http_request(pl_link) js = self.decode_direct_media_url(js) js = json.loads(js) if "s" in qs: s = int(qs["s"]) for i,ep in enumerate(js["playlist"][s-1]["playlist"]): title = title0+" - "+js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8") serie = js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8") data2 = data+"&e=%s"%(i+1) desc = serie +"\n"+desc0 content.append((title,self.name+"::"+data2,img,desc)) else: for i,ep in enumerate(js["playlist"]): title = title0 +" - "+js["playlist"][i]["comment"].encode("utf8") serie = js["playlist"][i]["comment"].encode("utf8") if "file" in ep and ep["file"]: data2 = data+"?e=%s"%(i+1) else: data2 = data+"?s=%s"%(i+1) desc = serie +"\n"+desc0 content.append((title,self.name+"::"+data2,img,desc)) return content #r = self._http_request(url) ### saraksts ### else: r = self.call(data) r = r.decode("cp1251").encode("utf8") content = self.process_list(r, content) if '

    (\d+)<', r2, re.DOTALL) if m: title = "%s (%s)"%(title,m.group(1)) title = util.unescape(title) m = re.search('

    ([^<]+)

    ', r2, re.DOTALL) desc0 = util.unescape(m.group(1)) if m else "" props = [] genre = re.findall('
    ]+?">([^<]+)', r2, re.DOTALL) genre = ",".join(genre) if genre: props.append(genre) m = re.search('
    ([^<]+)
    ', r2, re.DOTALL) if m: props.append(m.group(1)) m = re.search('
    ([^<]+)<', r2) if m: props.append(m.group(1)) m = re.search('itemprop="director">([^<]+)
    ', r2) if m: props.append(m.group(1)) m = re.search('
    (.+?)
    ', r2) if m: result = re.findall("(.+?)( )*", m.group(1)) if result: actors = [] for a in zip(*result)[0]: actors.append(re.sub("<.+?>", "", a)) props.append(" ".join(actors)) else: x = 1 pass desc="%s\n%s\n\n%s"%(title, desc0, "\n".join(props)) content.append((title,self.name+"::"+data2,img,desc)) return content def is_video(self,data): source,data,path,plist,clist,params,qs = self.parse_data(data) if clist == "play" and "e=" in data: return True elif clist=="play" and not params: r = self.call(path) #r = r.decode("cp1251").encode("utf8") #m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL) #if not m: if u"Фильм ', r, re.DOTALL) duration = "(%s)"%m.group(1) if m else "" m = re.search('

    ]+>([^<]+)<', r, re.DOTALL) desc = desc0 = util.unescape(m.group(1).strip()) if m else "" m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL) if not m: raise Exception("Can not find video link") #return [] video_link = m.group(1) series = False if u"Фильм ABC DEF Gxx" args = [iter(iterable)] * n return izip_longest(fillvalue=fillvalue, *args) _ = (encoded_url[1:] if encoded_url.find('#') != -1 else encoded_url) tokens = map(lambda items: '\u0'+''.join(items), grouper(3, _)) return ''.join(tokens).decode('unicode_escape') def decode_direct_media_url(self, encoded_url, checkhttp=False): if(checkhttp == True and (encoded_url.find('http://') != -1 or encoded_url.find('https://') != -1)): return False try: if encoded_url.find('#') != -1: return self.decode_unicode(encoded_url) else: return self.decode_base64(encoded_url) except: return False def decode_uppod_text(self, text): Client_codec_a = ["l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "="] Client_codec_b = ["w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h"] text = text.replace("\n", "").strip() for i in range(len(Client_codec_a)): char1 = Client_codec_b[i] char2 = Client_codec_a[i] text = text.replace(char1, "___") text = text.replace(char2, char1) text = text.replace("___", char2) result = base64.b64decode(text) print result return result def get_streams2(self,url0): m = re.search("\[([\d\w,]+)\]",url0) if not m: return [("?",url0)] res = m.group(1) streams=[] for res in res.split(","): if not res: continue if res in ["1080p"]: continue #TODO fullhd only in PRO+ version url=re.sub("\[[\d\w,]+\]",res,url0) streams.append((res,url)) return streams if __name__ == "__main__": sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) import run source = Source() data= sys.argv[1] if len(sys.argv)>1 else source.name+"::home" run.run(source, data) sys.exit()