123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766 |
- # -*- coding: UTF-8 -*-
- # /*
- # * Copyright (C) 2011 Libor Zoubek,ivars777
- # *
- # *
- # * This Program is free software; you can redistribute it and/or modify
- # * it under the terms of the GNU General Public License as published by
- # * the Free Software Foundation; either version 2, or (at your option)
- # * any later version.
- # *
- # * This Program is distributed in the hope that it will be useful,
- # * but WITHOUT ANY WARRANTY; without even the implied warranty of
- # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- # * GNU General Public License for more details.
- # *
- # * You should have received a copy of the GNU General Public License
- # * along with this program; see the file COPYING. If not, write to
- # * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
- # * http://www.gnu.org/copyleft/gpl.html
- # *
- # */
- import os, sys, re
- import urllib, urllib2
- import datetime, time
- import traceback
- import cookielib
- import requests
- try:
- from requests.packages.urllib3.exceptions import InsecureRequestWarning
- requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
- except:
- pass
- from htmlentitydefs import name2codepoint as n2cp
- import HTMLParser
- import StringIO
-
- #import threading
- #import Queue
- import pickle
- import string
- import simplejson as json
- #from demjson import demjson
- #import demjson
- import json
- #from bs4 import BeautifulSoup
-
- UA = 'Mozilla/6.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.5) Gecko/2008092417 Firefox/3.0.3'
- LOG = 2
-
- _cookie_jar = None
- CACHE_COOKIES = 'cookies'
-
- def system():
- if "kodi" in sys.executable.lower():
- return "kodi"
- elif sys.platform == "win32":
- return "windows"
- elif sys.platform == "linux2":
- return "enigma2"
- else:
- return "unknown"
-
- def nfo2xml(nfo_dict):
- nfo_type,nfo = next(nfo_dict.iteritems())
- s= "<%s>\n"%nfo_type.encode("utf8")
- for k,v in nfo.iteritems():
- if isinstance(v,list):
- for v2 in v:
- if isinstance(v2,unicode): v2 = v2.encode("utf8")
- s += " <%s>%s</%s>\n"%(k.encode("utf8"), v2, k.encode("utf8"))
- else:
- if isinstance(v,unicode): v = v.encode("utf8")
- s += " <%s>%s</%s>\n"%(k.encode("utf8"), v, k.encode("utf8"))
- s += "</%s>\n"%nfo_type.encode("utf8")
- return s
-
- def nfo2desc(nfo):
-
- if not "title" in nfo:
- nfo_type, nfo = next(nfo.iteritems())
- desc = nfo2title(nfo)
- dd = lambda t: "\n" + nfo[t] if t in nfo and nfo[t] else ""
- dd2 = lambda t: "\n" + ",".join(nfo[t]) if t in nfo and nfo[t] else ""
-
- def ddd(t,title=""):
- if title:
- title = title + ": "
- if t in nfo and nfo[t]:
- if isinstance(nfo[t],list):
- d = "\n" + title + ",".join(nfo[t])
- else:
- d = "\n" + title + nfo[t]
- else:
- d = ""
- return d.encode("utf8") if isinstance(d, unicode) else d
-
- desc += ddd("genre","Genre")
- desc += ddd("runtime","Length")
- desc += ddd("director","Director")
- desc += ddd("actor","Actors")
- desc += ddd("language","Languages")
- desc += ddd("quality","Quality")
- desc += ddd("tagline")
- if "plot" in nfo and "tagline" in nfo and nfo["tagline"] <> nfo["plot"]:
- desc += ddd("plot")
- elif "plot" in nfo and not "tagline" in nfo:
- desc += ddd("plot")
- return desc.encode("utf8") if isinstance(desc,unicode) else desc
-
- def nfo2title(nfo):
- if not "title" in nfo:
- nfo_type, nfo = next(nfo.iteritems())
- title = nfo["title"]
- if "originaltitle" in nfo and nfo["originaltitle"] and nfo["originaltitle"]<>nfo["title"]:
- title +=" ~ "+nfo["originaltitle"]
- se = ""
- if "season" in nfo and nfo["season"]:
- se += "S%s" % nfo["season"]
- if "episode" in nfo and nfo["episode"]:
- se += "E%s" % nfo["episode"]
- if se:
- title += " (%s)" % se
- elif "year" in nfo and nfo["year"]:
- title += " (%s)"%nfo["year"]
- return title.encode("utf8") if isinstance(title,unicode) else title
-
- def stream_type(data):
- data = data.lower()
- m = re.search(r"^(\w+)://", data)
- prefix = m.group(1) if m else ""
- if prefix in ("http","https"):
- if ".m3u8" in data:
- return "hls"
- elif ".mpd" in data:
- return "dash"
- else:
- return "http"
- else:
- return prefix
-
- def check_version(package,url="http://feed.blue.lv/Packages"):
- "Return current package version from OPKG feed"
- url = "http://feed.blue.lv/Packages"
- r = requests.get(url)
- if not r.ok:
- return ""
- m = re.search("Package: %s\nVersion: (.+?)\n"%package, r.content)
- if not m:
- return ""
- return m.group(1)
-
- SPLIT_CHAR = "~"
- SPLIT_CODE = urllib.quote(SPLIT_CHAR)
- EQ_CODE = urllib.quote("=")
- COL_CODE = urllib.quote(":")
- SPACE_CODE = urllib.quote(" ")
-
- def make_fname(title):
- "Make file name from title"
- title = title.strip()
- fname0 = re.sub("[/\n\r\t,:]"," ",title)
- fname0 = re.sub("['""]","",fname0)
- return fname0
-
- def hls_base(url):
- url2 = url.split("?")[0]
- url2 = "/".join(url2.split("/")[0:-1])+ "/"
- return url2
-
- def stream_change(stream):
- #return stream # TODO
- if "resolver" in stream and stream["resolver"] in ("viaplay","hqq","filmas") or \
- "surl" in stream and re.search("https*://(hqq|goo\.gl)",stream["surl"]):
- stream["url"] = streamproxy_encode(stream["url"],stream["headers"])
- stream["headers"] = {}
- return stream
- else:
- return stream
-
- def streamproxy_encode(url,headers=[],proxy_url=None):
- PROXY_URL = "http://localhost:8880/"
- if not "?" in url:
- url = url+"?"
- url2 = url.replace(SPLIT_CHAR,SPLIT_CODE).replace(":",COL_CODE).replace(" ",SPACE_CODE)
- if not proxy_url:
- proxy_url = PROXY_URL
- url2 = proxy_url + url2
- if headers:
- headers2 = []
- for h in headers:
- headers2.append("%s=%s"%(h,headers[h].replace("=",EQ_CODE).replace(SPLIT_CHAR,SPLIT_CODE).replace(" ",SPACE_CODE)))
- headers2 = SPLIT_CHAR.join(headers2)
- url2 = url2+SPLIT_CHAR+headers2
- #return url2.encode("utf8") if isinstance(url2,unicode) else url2
- return url2
-
- def streamproxy_decode(urlp):
- import urlparse
- path = urlp.replace(re.search("http://[^/]+",urlp).group(0),"")
- p = path.split(SPLIT_CHAR)
- url = urllib.unquote(p[0][1:])
- #headers = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46"}
- headers={}
- if len(p)>1:
- for h in p[1:]:
- #h = urllib.unquote()
- headers[h.split("=")[0]]=urllib.unquote(h.split("=")[1])
- return url,headers
-
- def streamproxy_encode2(url,headers=[],proxy_url=None):
- PROXY_URL = "http://localhost:8880/"
- #url2 = url.replace(SPLIT_CHAR,SPLIT_CODE).replace(":",COL_CODE).replace(" ",SPACE_CODE)
- url2 = urllib.quote_plus(url)
- if not proxy_url:
- proxy_url = PROXY_URL
- url2 = proxy_url + url2+"/?"
- if headers:
- headers2 = []
- for h in headers:
- headers2.append("%s=%s"%(h,headers[h].replace("=",EQ_CODE).replace(SPLIT_CHAR,SPLIT_CODE).replace(" ",SPACE_CODE)))
- headers2 = SPLIT_CHAR.join(headers2)
- url2 = url2+SPLIT_CHAR+headers2
- return url2
-
- def streamproxy_decode2(urlp):
- path = urlp.replace(re.search("http://[^/]+",urlp).group(0),"")
- p = path.split(SPLIT_CHAR)
- url = urllib.unquote_plus(p[0][1:-2])
- #headers = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46"}
- headers={}
- if len(p)>1:
- for h in p[1:]:
- #h = urllib.unquote()
- headers[h.split("=")[0]]=urllib.unquote(h.split("=")[1])
- return url,headers
-
-
- def streamproxy_encode3(cmd, data, proxy_url=None, headers=None, qs=None):
- PROXY_URL = "http://localhost:8880/"
- #url2 = url.replace(SPLIT_CHAR,SPLIT_CODE).replace(":",COL_CODE).replace(" ",SPACE_CODE)
- data2 = urllib.quote_plus(data)
- if not proxy_url:
- proxy_url = PROXY_URL
- urlp = proxy_url + cmd + "/" + data2 + "/"
- qs2 = []
- if headers:
- headers2 = []
- for h in headers:
- headers2.append("%s=%s"%(h,urllib.quote_plus(headers[h])))
- headers2 = SPLIT_CHAR.join(headers2)
- qs2.append("headers="+urllib.quote_plus(headers2))
- if qs:
- for qv in qs:
- qs2.append(qv+"="+urllib.quote_plus(qs[qv]))
- if qs2:
- qs2 = "&".join(qs2)
- if qs2:
- urlp = urlp + "?" + qs2
- return urlp
-
- def streamproxy_decode3(urlp):
- m = re.search("http://[^/]+",urlp)
- urlp = urlp.replace(m.group(0),"") if m else urlp
- path = urlp.split("?")[0]
- cmd = path.split("/")[1]
- data = "/".join(path.split("/")[2:])
- data = urllib.unquote_plus(data)
- qs = urlp.split("?")[1] if "?" in urlp else ""
- qs2 = {}
- headers = {}
- if qs:
- qs = qs.split("&")
- for q in qs:
- qk = q.split("=")[0]
- if qk == "headers":
- hh = urllib.unquote_plus(q.split("=")[1])
- hh = hh.split(SPLIT_CHAR)
- for h in hh:
- headers[h.split("=")[0]] = urllib.unquote_plus(h.split("=")[1])
- else:
- qv = urllib.unquote_plus(q.split("=")[1]) if "=" in q else ""
- qs2[qk] = qv
- return cmd, data, headers, qs2
-
-
- class Captions(object):
- def __init__(self,uri):
- self.uri = uri
- self.subs = []
- self.styles = {}
- if uri.startswith("http"):
- r = requests.get(uri)
- if r.status_code == 200:
- self.loads(r.content)
-
- def loads(self,s):
- if "WEBVTT" in s[:s.find("\n")]: # vtt captions
- self.load_vtt(s)
- elif "<?xml" in s[:s.find("\n")]:
- self.load_ttaf(s)
- else:
- self.load_vtt(s) # TODO
-
-
- def load_ttaf(self,s):
- for r2 in re.findall("<style .+?/>", s):
- st = {}
- for a in re.findall(r'(\w+)="([^ "]+)"', r2):
- st[a[0]] = a[1]
- if a[0] == "id":
- sid = a[1]
- self.styles[sid] = st
- for r2 in re.findall("<p .+?</p>", s):
- sub = {}
- sub["begin"] = str2sec(re.search('begin="([^"]+)"', r2).group(1)) if re.search('begin="([^"]+)"', r2) else -1
- sub["end"] = str2sec(re.search('end="([^"]+)"', r2).group(1)) if re.search('end="([^"]+)"', r2) else -1
- sub["style"] = re.search('style="([^"]+)"', r2).group(1) if re.search('style="([^"]+)"', r2) else None
- sub["text"] = re.search("<p[^>]+>(.+)</p>", r2).group(1).replace("\n","")
- sub["text"] = re.sub("<br\s*?/>","\n",sub["text"])
- sub["text"] = re.sub("<.+?>"," ",sub["text"])
- self.subs.append(sub)
- pass
-
- def load_vtt(self,s):
- f = StringIO.StringIO(s)
- while True:
- line = f.readline()
- if not line:
- break
- m = re.search(r"([\d\.\,:]+)\s*-->\s*([\d\.\,\:]+)",line)
- if m:
- sub = {}
- sub["begin"] = str2sec(m.group(1))
- sub["end"] = str2sec(m.group(2))
- sub["style"] = None
- sub["text"] = []
- line = f.readline()
- while line.strip():
- txt = line.strip()
- if isinstance(txt,unicode):
- txt = txt.encode("utf8")
- sub["text"].append(txt)
- line = f.readline()
- sub["text"] = "\n".join(sub["text"])
- self.subs.append(sub)
- else:
- continue
- pass
-
- def get_srt(self):
- out = ""
- i = 0
- for sub in self.subs:
- i +=1
- begin = sub["begin"]
- begin = "%s,%03i"%(str(datetime.timedelta(seconds=begin/1000)),begin%1000)
- end = sub["end"]
- end = "%s,%03i"%(str(datetime.timedelta(seconds=end/1000)),end%1000)
- txt2 = sub["text"]
- out += "%s\n%s --> %s\n%s\n\n\n"%(i,begin,end,txt2)
- return out
-
- def str2sec(r):
- # Convert str time to miliseconds
- r= r.replace(",",".")
- m = re.search(r"(\d+\:)*(\d+)\:(\d+\.\d+)", r)
- if m:
- sec = int(m.group(1)[:-1])*60*60*1000 if m.group(1) else 0
- sec += int(m.group(2))*60*1000 + int(float(m.group(3))*1000)
- return sec
- else:
- return -1
-
-
- #c = Captions("http://195.13.216.2/mobile-vod/mp4:lb_barbecue_fr_lq.mp4/lb_barbecue_lv.vtt")
- #c = Captions("http://www.bbc.co.uk/iplayer/subtitles/ng/modav/bUnknown-0edd6227-0f38-411c-8d46-fa033c4c61c1_b05ql1s3_1479853893356.xml")
- #url = "http://195.13.216.2/mobile-vod/mp4:ac_now_you_see_me_2_en_lq.mp4/ac_now_you_see_me_2_lv.vtt"
- #c = Captions(url)
-
- #pass
-
-
- def ttaf2srt(s):
- out = u""
- i = 0
- for p,txt in re.findall("<p ([^>]+)>(.+?)</p>", s, re.DOTALL):
- i +=1
- begin = re.search('begin="(.+?)"',p).group(1)
- begin = begin.replace(".",",")
- end = re.search('end="(.+?)"',p).group(1)
- end = end.replace(".",",")
- txt2 = re.sub("<br */>","\n",txt)
- out += "%s\n%s --> %s\n%s\n\n"%(i,begin,end,txt2)
- return out
-
- def vtt2srt(s):
- out = ""
- i = 0
- # result = re.findall(r"([\d:\. ]+-->[\d:\. ]+)\n(.+?\n)+", r.content)
- for t1, t2, txt in re.findall(r"([\d:\.]+)\s*-->\s*([\d:\.]+)\n(.+?\n)+", s):
- i +=1
- if len(t1.split(":")) == 2:
- t1 = "00:" + t1
- t1 = t1.replace(".", ",")
- if len(t2.split(":")) == 2:
- t2 = "00:" + t2
- t2 = t2.replace(".", ",")
- out += "%s\n%s --> %s\n%s\n\n"%(i,t1,t2,txt)
- return out
-
-
- def item():
- """Default item content"""
- stream0 = {
- 'name': '', #
- 'url': '',
- 'quality': '?',
- 'surl': '',
- 'subs': [],
- 'headers': {},
- "desc":"",
- "img":"",
- "lang":"",
- "type":"",
- "resolver":"",
- "order":0,
- "live":False
- }
- return stream0
-
- class _StringCookieJar(cookielib.LWPCookieJar):
-
- def __init__(self, string=None, filename=None, delayload=False, policy=None):
- cookielib.LWPCookieJar.__init__(self, filename, delayload, policy)
- if string and len(string) > 0:
- self._cookies = pickle.loads(str(string))
-
- def dump(self):
- return pickle.dumps(self._cookies)
-
-
- def init_urllib(cache=None):
- """
- Initializes urllib cookie handler
- """
- global _cookie_jar
- data = None
- if cache is not None:
- data = cache.get(CACHE_COOKIES)
- _cookie_jar = _StringCookieJar(data)
- opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(_cookie_jar))
- urllib2.install_opener(opener)
-
-
- def cache_cookies(cache):
- """
- Saves cookies to cache
- """
- global _cookie_jar
- if _cookie_jar:
- cache.set(CACHE_COOKIES, _cookie_jar.dump())
-
-
- def request0(url, headers={}):
- debug('request: %s' % url)
- req = urllib2.Request(url, headers=headers)
- req.add_header('User-Agent', UA)
- try:
- response = urllib2.urlopen(req)
- data = response.read()
- response.close()
- except urllib2.HTTPError, error:
- data = error.read()
-
- debug('len(data) %s' % len(data))
- return data
-
- def request(url, headers={}):
- debug('request: %s' % url)
- #req = urllib2.Request(url, headers=headers)
- #req.add_header('User-Agent', UA)
- if 'User-Agent' not in headers:
- headers['User-Agent']= UA
- try:
- r = requests.get(url, headers=headers)
- data = r.content
- except:
- data = r.content
-
- debug('len(data) %s' % len(data))
- return data
-
- def post(url, data, headers={}):
- postdata = urllib.urlencode(data)
- #req = urllib2.Request(url, postdata, headers)
- #req.add_header('User-Agent', UA)
- import requests
- if 'User-Agent' not in headers:
- headers['User-Agent']= UA
- try:
- r = requests.post(url, data=postdata,headers=headers)
- data = r.content
- except urllib2.HTTPError, error:
- data = r.content
- return data
-
- def post0(url, data, headers={}):
- postdata = urllib.urlencode(data)
- req = urllib2.Request(url, postdata, headers)
- req.add_header('User-Agent', UA)
- try:
- response = urllib2.urlopen(req)
- data = response.read()
- response.close()
- except urllib2.HTTPError, error:
- data = error.read()
- return data
-
-
- def post_json(url, data, headers={}):
- postdata = json.dumps(data)
- headers['Content-Type'] = 'application/json'
- req = urllib2.Request(url, postdata, headers)
- req.add_header('User-Agent', UA)
- response = urllib2.urlopen(req)
- data = response.read()
- response.close()
- return data
-
-
- #def run_parallel_in_threads(target, args_list):
- #result = Queue.Queue()
- ## wrapper to collect return value in a Queue
-
- #def task_wrapper(*args):
- #result.put(target(*args))
-
- #threads = [threading.Thread(target=task_wrapper, args=args) for args in args_list]
- #for t in threads:
- #t.start()
- #for t in threads:
- #t.join()
- #return result
-
-
- def substr(data, start, end):
- i1 = data.find(start)
- i2 = data.find(end, i1)
- return data[i1:i2]
-
-
- def save_to_file(url, file):
- try:
- return save_data_to_file(request(url), file)
- except:
- traceback.print_exc()
-
-
- def save_data_to_file(data, file):
- try:
- f = open(file, 'wb')
- f.write(data)
- f.close()
- info('File %s saved' % file)
- return True
- except:
- traceback.print_exc()
-
-
- def read_file(file):
- if not os.path.exists(file):
- return ''
- f = open(file, 'r')
- data = f.read()
- f.close()
- return data
-
-
- def _substitute_entity(match):
- ent = match.group(3)
- if match.group(1) == '#':
- # decoding by number
- if match.group(2) == '':
- # number is in decimal
- return unichr(int(ent))
- elif match.group(2) == 'x':
- # number is in hex
- return unichr(int('0x' + ent, 16))
- else:
- # they were using a name
- cp = n2cp.get(ent)
- if cp:
- return unichr(cp)
- else:
- return match.group()
-
-
- def decode_html(data):
- if not type(data) == str:
- return data
- try:
- if not type(data) == unicode:
- data = unicode(data, 'utf-8', errors='ignore')
- entity_re = re.compile(r'&(#?)(x?)(\w+);')
- return entity_re.subn(_substitute_entity, data)[0]
- except:
- traceback.print_exc()
- print[data]
- return data
-
- def unescape(s0):
- #s2 = re.sub("&#\w+;",HTMLParser.HTMLParser().unescape("\1"),s)
- s0 = s0.replace("&","&").replace(""", '"')
- for s in re.findall("&#\w+;",s0):
- s2 = HTMLParser.HTMLParser().unescape(s)
- if isinstance(s0,str):
- s2 = s2.encode("utf8")
- s0 = s0.replace(s,s2)
- pass
- return s0
-
- def debug(text):
- if LOG > 1:
- print('[DEBUG] ' + str([text]))
-
- def info(text):
- if LOG > 0:
- print('[INFO] ' + str([text]))
-
- def error(text):
- print('[ERROR] ' + str([text]))
-
- _diacritic_replace = {u'\u00f3': 'o',
- u'\u0213': '-',
- u'\u00e1': 'a',
- u'\u010d': 'c',
- u'\u010c': 'C',
- u'\u010f': 'd',
- u'\u010e': 'D',
- u'\u00e9': 'e',
- u'\u011b': 'e',
- u'\u00ed': 'i',
- u'\u0148': 'n',
- u'\u0159': 'r',
- u'\u0161': 's',
- u'\u0165': 't',
- u'\u016f': 'u',
- u'\u00fd': 'y',
- u'\u017e': 'z',
- u'\xed': 'i',
- u'\xe9': 'e',
- u'\xe1': 'a',
- }
-
-
- def replace_diacritic(string):
- ret = []
- for char in string:
- if char in _diacritic_replace:
- ret.append(_diacritic_replace[char])
- else:
- ret.append(char)
- return ''.join(ret)
-
-
- def params(url=None):
- if not url:
- url = sys.argv[2]
- param = {}
- paramstring = url
- if len(paramstring) >= 2:
- params = url
- cleanedparams = params.replace('?', '')
- if (params[len(params) - 1] == '/'):
- params = params[0:len(params) - 2]
- pairsofparams = cleanedparams.split('&')
- param = {}
- for i in range(len(pairsofparams)):
- splitparams = {}
- splitparams = pairsofparams[i].split('=')
- if (len(splitparams)) == 2:
- param[splitparams[0]] = splitparams[1]
- for p in param.keys():
- param[p] = param[p].decode('hex')
- return param
-
-
- def int_to_base(number, base):
- digs = string.digits + string.letters
- if number < 0:
- sign = -1
- elif number == 0:
- return digs[0]
- else:
- sign = 1
- number *= sign
- digits = []
- while number:
- digits.append(digs[number % base])
- number /= base
- if sign < 0:
- digits.append('-')
- digits.reverse()
- return ''.join(digits)
-
-
- def extract_jwplayer_setup(data):
- """
- Extracts jwplayer setup configuration and returns it as a dictionary.
-
- :param data: A string to extract the setup from
- :return: A dictionary containing the setup configuration
- """
- data = re.search(r'<script.+?}\(\'(.+)\',\d+,\d+,\'([\w\|]+)\'.*</script>', data, re.I | re.S)
- if data:
- replacements = data.group(2).split('|')
- data = data.group(1)
- for i in reversed(range(len(replacements))):
- if len(replacements[i]) > 0:
- data = re.sub(r'\b%s\b' % int_to_base(i, 36), replacements[i], data)
- data = re.search(r'\.setup\(([^\)]+?)\);', data)
- if data:
- return json.loads(data.group(1).decode('string_escape'))
- #return demjson.decode(data.group(1).decode('string_escape')) ### III
- return None
-
- def datetime_from_utc_to_local(utc_datetime):
- now_timestamp = time.time()
- offset = datetime.datetime.fromtimestamp(now_timestamp) - datetime.datetime.utcfromtimestamp(now_timestamp)
- return utc_datetime + offset
-
- def datetime_from_zulu_to_utc(data):
- d = map(int, data.split("T")[0].split("-"))
- h = map(int, data.split("T")[1][:-1].split(":"))
- dt = datetime.datetime(d[0], d[1], d[2], h[0], h[1], h[2])
- return dt
-
- #def parse_html(url):
- # return BeautifulSoup(request(url), 'html5lib', from_encoding='utf-8')
-
- if __name__ == "__main__":
-
- datetime_from_zulu_to_utc('2018-06-24T06:10:00Z')
- s = open("The LEGO Ninjago Movie (2017).lv.vtt").read()
- #s2 = vtt2srt(s)
- #open("The LEGO Ninjago Movie (2017).lv.srt", "w").write(s2)
-
- #s = 'B\xc4\x93thovena D\xc4\x81rgumu Taka (2014)/Beethoven's Treasure [LV]'
- #s = s.decode("utf8")
- #s=unescape(s)
- #url = "http://localhost:88/https://walterebert.com/playground/video/hls/ts/480x270.m3u8?token=xxxx~User-Agent=Enigma2~Cookie=xxxxx"
- #url = "http://hyt4d6.vkcache.com/secip/0/UMQ3q2gNjTlOPnEVm3iTiA/ODAuMjMyLjI0MC42/1479610800/hls-vod-s3/flv/api/files/videos/2015/09/11/144197748923a22.mp4.m3u8http://hyt4d6.vkcache.com/secip/0/Y-ZA1qRm8toplc0dN_L6_w/ODAuMjMyLjI0MC42/1479654000/hls-vod-s3/flv/api/files/videos/2015/09/11/144197748923a22.mp4.m3u8"
- #headers = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46"}
- #url = "http://str1e.lattelecom.tv/mobile-vod/mp4:sf_fantastic_beasts_and_where_to_find_them_en_hd.mp4/playlist.m3u8?resource_id=fantastic_beasts_and_where_to_find_them&auth_token=6NAvMFDG+rYTAc4hb5JeL2bmsaRR7bAE23M6KDmhKYOGyXoo0gDpJUE9scYy+nQmfbgk03cWMe9MuXWSH1GqwolEk2jOQ/8Mrg7tOdbwrA8zM7nmkfCZPqQkwajZN4mfSJQVKHqXqJ8="
- #headers={}
- #print url
- #url = "replay::tiesraide/ltv1/"
- #url = "ltc::content/live-streams/103?include=quality"
- #urlp = streamproxy_encode2(url,headers)
- #print urlp
- #url2,headers2 = streamproxy_decode2(urlp)
- #print url2
- #player(urlp)
- pass
-
|