Python module (submodule repositary), which provides content (video streams) from various online stream sources to corresponding Enigma2, Kodi, Plex plugins

util.py 21KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. # -*- coding: UTF-8 -*-
  2. # /*
  3. # * Copyright (C) 2011 Libor Zoubek,ivars777
  4. # *
  5. # *
  6. # * This Program is free software; you can redistribute it and/or modify
  7. # * it under the terms of the GNU General Public License as published by
  8. # * the Free Software Foundation; either version 2, or (at your option)
  9. # * any later version.
  10. # *
  11. # * This Program is distributed in the hope that it will be useful,
  12. # * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. # * GNU General Public License for more details.
  15. # *
  16. # * You should have received a copy of the GNU General Public License
  17. # * along with this program; see the file COPYING. If not, write to
  18. # * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  19. # * http://www.gnu.org/copyleft/gpl.html
  20. # *
  21. # */
  22. import os, sys, re
  23. import urllib, urllib2
  24. import datetime
  25. import traceback
  26. import cookielib
  27. import requests
  28. try:
  29. from requests.packages.urllib3.exceptions import InsecureRequestWarning
  30. requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
  31. except:
  32. pass
  33. from htmlentitydefs import name2codepoint as n2cp
  34. import HTMLParser
  35. import StringIO
  36. #import threading
  37. #import Queue
  38. import pickle
  39. import string
  40. import simplejson as json
  41. #from demjson import demjson
  42. #import demjson
  43. import json
  44. #from bs4 import BeautifulSoup
  45. UA = 'Mozilla/6.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.5) Gecko/2008092417 Firefox/3.0.3'
  46. LOG = 2
  47. _cookie_jar = None
  48. CACHE_COOKIES = 'cookies'
  49. def system():
  50. if "kodi" in sys.executable.lower():
  51. return "kodi"
  52. elif sys.platform == "win32":
  53. return "windows"
  54. elif sys.platform == "linux2":
  55. return "enigma2"
  56. else:
  57. return "unknown"
  58. def nfo2xml(nfo_dict):
  59. nfo_type,nfo = next(nfo_dict.iteritems())
  60. s= "<%s>\n"%nfo_type.encode("utf8")
  61. for k,v in nfo.iteritems():
  62. if isinstance(v,list):
  63. for v2 in v:
  64. if isinstance(v2,unicode): v2 = v2.encode("utf8")
  65. s += " <%s>%s</%s>\n"%(k.encode("utf8"), v2, k.encode("utf8"))
  66. else:
  67. if isinstance(v,unicode): v = v.encode("utf8")
  68. s += " <%s>%s</%s>\n"%(k.encode("utf8"), v, k.encode("utf8"))
  69. s += "</%s>\n"%nfo_type.encode("utf8")
  70. return s
  71. def nfo2desc(nfo):
  72. if not "title" in nfo:
  73. nfo_type, nfo = next(nfo.iteritems())
  74. desc = nfo2title(nfo)
  75. dd = lambda t: "\n" + nfo[t] if t in nfo and nfo[t] else ""
  76. dd2 = lambda t: "\n" + ",".join(nfo[t]) if t in nfo and nfo[t] else ""
  77. def ddd(t,title=""):
  78. if title:
  79. title = title + ": "
  80. if t in nfo and nfo[t]:
  81. if isinstance(nfo[t],list):
  82. return "\n" + title + ",".join(nfo[t])
  83. else:
  84. return "\n" + title + nfo[t]
  85. else:
  86. return ""
  87. desc += ddd("tagline")
  88. if "plot" in nfo and "tagline" in nfo and nfo["tagline"] <> nfo["plot"]:
  89. desc += ddd("plot")
  90. elif "plot" in nfo and not "tagline" in nfo:
  91. desc += ddd("plot")
  92. desc += ddd("genre","Genre")
  93. desc += ddd("runtime","Length")
  94. desc += ddd("director","Director")
  95. desc += ddd("actor","Actors")
  96. desc += ddd("language","Languages")
  97. desc += ddd("quality","Quality")
  98. return desc.encode("utf8") if isinstance(desc,unicode) else desc
  99. def nfo2title(nfo):
  100. if not "title" in nfo:
  101. nfo_type, nfo = next(nfo.iteritems())
  102. title = nfo["title"]
  103. if "originaltitle" in nfo and nfo["originaltitle"] and nfo["originaltitle"]<>nfo["title"]:
  104. title +=" ~ "+nfo["originaltitle"]
  105. if "year" in nfo and nfo["year"]:
  106. title += " (%s)"%nfo["year"]
  107. return title.encode("utf8") if isinstance(title,unicode) else title
  108. def stream_type(data):
  109. data = data.lower()
  110. m = re.search(r"^(\w+)://", data)
  111. prefix = m.group(1) if m else ""
  112. if prefix in ("http","https"):
  113. if ".m3u8" in data:
  114. return "hls"
  115. elif ".mpd" in data:
  116. return "dash"
  117. else:
  118. return "http"
  119. else:
  120. return prefix
  121. def check_version(package,url="http://feed.blue.lv/Packages"):
  122. "Return current package version from OPKG feed"
  123. url = "http://feed.blue.lv/Packages"
  124. r = requests.get(url)
  125. if not r.ok:
  126. return ""
  127. m = re.search("Package: %s\nVersion: (.+?)\n"%package, r.content)
  128. if not m:
  129. return ""
  130. return m.group(1)
  131. SPLIT_CHAR = "~"
  132. SPLIT_CODE = urllib.quote(SPLIT_CHAR)
  133. EQ_CODE = urllib.quote("=")
  134. COL_CODE = urllib.quote(":")
  135. SPACE_CODE = urllib.quote(" ")
  136. def make_fname(title):
  137. "Make file name from title"
  138. title = title.strip()
  139. fname0 = re.sub("[/\n\r\t,:]"," ",title)
  140. fname0 = re.sub("['""]","",fname0)
  141. return fname0
  142. def hls_base(url):
  143. url2 = url.split("?")[0]
  144. url2 = "/".join(url2.split("/")[0:-1])+ "/"
  145. return url2
  146. def stream_change(stream):
  147. #return stream # TODO
  148. if "resolver" in stream and stream["resolver"] in ("viaplay","hqq","filmas") or \
  149. "surl" in stream and re.search("https*://(hqq|goo\.gl)",stream["surl"]):
  150. stream["url"] = streamproxy_encode(stream["url"],stream["headers"])
  151. stream["headers"] = {}
  152. return stream
  153. else:
  154. return stream
  155. def streamproxy_encode(url,headers=[],proxy_url=None):
  156. PROXY_URL = "http://localhost:8880/"
  157. if not "?" in url:
  158. url = url+"?"
  159. url2 = url.replace(SPLIT_CHAR,SPLIT_CODE).replace(":",COL_CODE).replace(" ",SPACE_CODE)
  160. if not proxy_url:
  161. proxy_url = PROXY_URL
  162. url2 = proxy_url + url2
  163. if headers:
  164. headers2 = []
  165. for h in headers:
  166. headers2.append("%s=%s"%(h,headers[h].replace("=",EQ_CODE).replace(SPLIT_CHAR,SPLIT_CODE).replace(" ",SPACE_CODE)))
  167. headers2 = SPLIT_CHAR.join(headers2)
  168. url2 = url2+SPLIT_CHAR+headers2
  169. #return url2.encode("utf8") if isinstance(url2,unicode) else url2
  170. return url2
  171. def streamproxy_decode(urlp):
  172. import urlparse
  173. path = urlp.replace(re.search("http://[^/]+",urlp).group(0),"")
  174. p = path.split(SPLIT_CHAR)
  175. url = urllib.unquote(p[0][1:])
  176. #headers = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46"}
  177. headers={}
  178. if len(p)>1:
  179. for h in p[1:]:
  180. #h = urllib.unquote()
  181. headers[h.split("=")[0]]=urllib.unquote(h.split("=")[1])
  182. return url,headers
  183. def streamproxy_encode2(url,headers=[],proxy_url=None):
  184. PROXY_URL = "http://localhost:8880/"
  185. #url2 = url.replace(SPLIT_CHAR,SPLIT_CODE).replace(":",COL_CODE).replace(" ",SPACE_CODE)
  186. url2 = urllib.quote_plus(url)
  187. if not proxy_url:
  188. proxy_url = PROXY_URL
  189. url2 = proxy_url + url2+"/?"
  190. if headers:
  191. headers2 = []
  192. for h in headers:
  193. headers2.append("%s=%s"%(h,headers[h].replace("=",EQ_CODE).replace(SPLIT_CHAR,SPLIT_CODE).replace(" ",SPACE_CODE)))
  194. headers2 = SPLIT_CHAR.join(headers2)
  195. url2 = url2+SPLIT_CHAR+headers2
  196. return url2
  197. def streamproxy_decode2(urlp):
  198. path = urlp.replace(re.search("http://[^/]+",urlp).group(0),"")
  199. p = path.split(SPLIT_CHAR)
  200. url = urllib.unquote_plus(p[0][1:-2])
  201. #headers = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46"}
  202. headers={}
  203. if len(p)>1:
  204. for h in p[1:]:
  205. #h = urllib.unquote()
  206. headers[h.split("=")[0]]=urllib.unquote(h.split("=")[1])
  207. return url,headers
  208. class Captions(object):
  209. def __init__(self,uri):
  210. self.uri = uri
  211. self.subs = []
  212. self.styles = {}
  213. if uri.startswith("http"):
  214. r = requests.get(uri)
  215. if r.status_code == 200:
  216. self.loads(r.content)
  217. def loads(self,s):
  218. if "WEBVTT" in s[:s.find("\n")]: # vtt captions
  219. self.load_vtt(s)
  220. elif "<?xml" in s[:s.find("\n")]:
  221. self.load_ttaf(s)
  222. else:
  223. self.load_vtt(s) # TODO
  224. def load_ttaf(self,s):
  225. for r2 in re.findall("<style .+?/>", s):
  226. st = {}
  227. for a in re.findall(r'(\w+)="([^ "]+)"', r2):
  228. st[a[0]] = a[1]
  229. if a[0] == "id":
  230. sid = a[1]
  231. self.styles[sid] = st
  232. for r2 in re.findall("<p .+?</p>", s):
  233. sub = {}
  234. sub["begin"] = str2sec(re.search('begin="([^"]+)"', r2).group(1)) if re.search('begin="([^"]+)"', r2) else -1
  235. sub["end"] = str2sec(re.search('end="([^"]+)"', r2).group(1)) if re.search('end="([^"]+)"', r2) else -1
  236. sub["style"] = re.search('style="([^"]+)"', r2).group(1) if re.search('style="([^"]+)"', r2) else None
  237. sub["text"] = re.search("<p[^>]+>(.+)</p>", r2).group(1).replace("\n","")
  238. sub["text"] = re.sub("<br\s*?/>","\n",sub["text"])
  239. sub["text"] = re.sub("<.+?>"," ",sub["text"])
  240. self.subs.append(sub)
  241. pass
  242. def load_vtt(self,s):
  243. f = StringIO.StringIO(s)
  244. while True:
  245. line = f.readline()
  246. if not line:
  247. break
  248. m = re.search(r"([\d\.\,:]+)\s*-->\s*([\d\.\,\:]+)",line)
  249. if m:
  250. sub = {}
  251. sub["begin"] = str2sec(m.group(1))
  252. sub["end"] = str2sec(m.group(2))
  253. sub["style"] = None
  254. sub["text"] = []
  255. line = f.readline()
  256. while line.strip():
  257. txt = line.strip()
  258. if isinstance(txt,unicode):
  259. txt = txt.encode("utf8")
  260. sub["text"].append(txt)
  261. line = f.readline()
  262. sub["text"] = "\n".join(sub["text"])
  263. self.subs.append(sub)
  264. else:
  265. continue
  266. pass
  267. def get_srt(self):
  268. out = ""
  269. i = 0
  270. for sub in self.subs:
  271. i +=1
  272. begin = sub["begin"]
  273. begin = "%s,%03i"%(str(datetime.timedelta(seconds=begin/1000)),begin%1000)
  274. end = sub["end"]
  275. end = "%s,%03i"%(str(datetime.timedelta(seconds=end/1000)),end%1000)
  276. txt2 = sub["text"]
  277. out += "%s\n%s --> %s\n%s\n\n\n"%(i,begin,end,txt2)
  278. return out
  279. def str2sec(r):
  280. # Convert str time to miliseconds
  281. r= r.replace(",",".")
  282. m = re.search(r"(\d+\:)*(\d+)\:(\d+\.\d+)", r)
  283. if m:
  284. sec = int(m.group(1)[:-1])*60*60*1000 if m.group(1) else 0
  285. sec += int(m.group(2))*60*1000 + int(float(m.group(3))*1000)
  286. return sec
  287. else:
  288. return -1
  289. #c = Captions("http://195.13.216.2/mobile-vod/mp4:lb_barbecue_fr_lq.mp4/lb_barbecue_lv.vtt")
  290. #c = Captions("http://www.bbc.co.uk/iplayer/subtitles/ng/modav/bUnknown-0edd6227-0f38-411c-8d46-fa033c4c61c1_b05ql1s3_1479853893356.xml")
  291. #url = "http://195.13.216.2/mobile-vod/mp4:ac_now_you_see_me_2_en_lq.mp4/ac_now_you_see_me_2_lv.vtt"
  292. #c = Captions(url)
  293. #pass
  294. def ttaf2srt(s):
  295. out = u""
  296. i = 0
  297. for p,txt in re.findall("<p ([^>]+)>(.+?)</p>", s, re.DOTALL):
  298. i +=1
  299. begin = re.search('begin="(.+?)"',p).group(1)
  300. begin = begin.replace(".",",")
  301. end = re.search('end="(.+?)"',p).group(1)
  302. end = end.replace(".",",")
  303. txt2 = re.sub("<br */>","\n",txt)
  304. out += "%s\n%s --> %s\n%s\n\n"%(i,begin,end,txt2)
  305. return out
  306. def item():
  307. """Default item content"""
  308. stream0 = {
  309. 'name': '', #
  310. 'url': '',
  311. 'quality': '?',
  312. 'surl': '',
  313. 'subs': [],
  314. 'headers': {},
  315. "desc":"",
  316. "img":"",
  317. "lang":"",
  318. "type":"",
  319. "resolver":"",
  320. "order":0,
  321. "live":False
  322. }
  323. return stream0
  324. class _StringCookieJar(cookielib.LWPCookieJar):
  325. def __init__(self, string=None, filename=None, delayload=False, policy=None):
  326. cookielib.LWPCookieJar.__init__(self, filename, delayload, policy)
  327. if string and len(string) > 0:
  328. self._cookies = pickle.loads(str(string))
  329. def dump(self):
  330. return pickle.dumps(self._cookies)
  331. def init_urllib(cache=None):
  332. """
  333. Initializes urllib cookie handler
  334. """
  335. global _cookie_jar
  336. data = None
  337. if cache is not None:
  338. data = cache.get(CACHE_COOKIES)
  339. _cookie_jar = _StringCookieJar(data)
  340. opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(_cookie_jar))
  341. urllib2.install_opener(opener)
  342. def cache_cookies(cache):
  343. """
  344. Saves cookies to cache
  345. """
  346. global _cookie_jar
  347. if _cookie_jar:
  348. cache.set(CACHE_COOKIES, _cookie_jar.dump())
  349. def request0(url, headers={}):
  350. debug('request: %s' % url)
  351. req = urllib2.Request(url, headers=headers)
  352. req.add_header('User-Agent', UA)
  353. try:
  354. response = urllib2.urlopen(req)
  355. data = response.read()
  356. response.close()
  357. except urllib2.HTTPError, error:
  358. data = error.read()
  359. debug('len(data) %s' % len(data))
  360. return data
  361. def request(url, headers={}):
  362. debug('request: %s' % url)
  363. #req = urllib2.Request(url, headers=headers)
  364. #req.add_header('User-Agent', UA)
  365. if 'User-Agent' not in headers:
  366. headers['User-Agent']= UA
  367. try:
  368. r = requests.get(url, headers=headers)
  369. data = r.content
  370. except:
  371. data = r.content
  372. debug('len(data) %s' % len(data))
  373. return data
  374. def post(url, data, headers={}):
  375. postdata = urllib.urlencode(data)
  376. #req = urllib2.Request(url, postdata, headers)
  377. #req.add_header('User-Agent', UA)
  378. import requests
  379. if 'User-Agent' not in headers:
  380. headers['User-Agent']= UA
  381. try:
  382. r = requests.post(url, data=postdata,headers=headers)
  383. data = r.content
  384. except urllib2.HTTPError, error:
  385. data = r.content
  386. return data
  387. def post0(url, data, headers={}):
  388. postdata = urllib.urlencode(data)
  389. req = urllib2.Request(url, postdata, headers)
  390. req.add_header('User-Agent', UA)
  391. try:
  392. response = urllib2.urlopen(req)
  393. data = response.read()
  394. response.close()
  395. except urllib2.HTTPError, error:
  396. data = error.read()
  397. return data
  398. def post_json(url, data, headers={}):
  399. postdata = json.dumps(data)
  400. headers['Content-Type'] = 'application/json'
  401. req = urllib2.Request(url, postdata, headers)
  402. req.add_header('User-Agent', UA)
  403. response = urllib2.urlopen(req)
  404. data = response.read()
  405. response.close()
  406. return data
  407. #def run_parallel_in_threads(target, args_list):
  408. #result = Queue.Queue()
  409. ## wrapper to collect return value in a Queue
  410. #def task_wrapper(*args):
  411. #result.put(target(*args))
  412. #threads = [threading.Thread(target=task_wrapper, args=args) for args in args_list]
  413. #for t in threads:
  414. #t.start()
  415. #for t in threads:
  416. #t.join()
  417. #return result
  418. def substr(data, start, end):
  419. i1 = data.find(start)
  420. i2 = data.find(end, i1)
  421. return data[i1:i2]
  422. def save_to_file(url, file):
  423. try:
  424. return save_data_to_file(request(url), file)
  425. except:
  426. traceback.print_exc()
  427. def save_data_to_file(data, file):
  428. try:
  429. f = open(file, 'wb')
  430. f.write(data)
  431. f.close()
  432. info('File %s saved' % file)
  433. return True
  434. except:
  435. traceback.print_exc()
  436. def read_file(file):
  437. if not os.path.exists(file):
  438. return ''
  439. f = open(file, 'r')
  440. data = f.read()
  441. f.close()
  442. return data
  443. def _substitute_entity(match):
  444. ent = match.group(3)
  445. if match.group(1) == '#':
  446. # decoding by number
  447. if match.group(2) == '':
  448. # number is in decimal
  449. return unichr(int(ent))
  450. elif match.group(2) == 'x':
  451. # number is in hex
  452. return unichr(int('0x' + ent, 16))
  453. else:
  454. # they were using a name
  455. cp = n2cp.get(ent)
  456. if cp:
  457. return unichr(cp)
  458. else:
  459. return match.group()
  460. def decode_html(data):
  461. if not type(data) == str:
  462. return data
  463. try:
  464. if not type(data) == unicode:
  465. data = unicode(data, 'utf-8', errors='ignore')
  466. entity_re = re.compile(r'&(#?)(x?)(\w+);')
  467. return entity_re.subn(_substitute_entity, data)[0]
  468. except:
  469. traceback.print_exc()
  470. print[data]
  471. return data
  472. def unescape(s0):
  473. #s2 = re.sub("&#\w+;",HTMLParser.HTMLParser().unescape("\1"),s)
  474. s0 = s0.replace("&amp;","&")
  475. for s in re.findall("&#\w+;",s0):
  476. s2 = HTMLParser.HTMLParser().unescape(s)
  477. if isinstance(s0,str):
  478. s2 = s2.encode("utf8")
  479. s0 = s0.replace(s,s2)
  480. pass
  481. return s0
  482. def debug(text):
  483. if LOG > 1:
  484. print('[DEBUG] ' + str([text]))
  485. def info(text):
  486. if LOG > 0:
  487. print('[INFO] ' + str([text]))
  488. def error(text):
  489. print('[ERROR] ' + str([text]))
  490. _diacritic_replace = {u'\u00f3': 'o',
  491. u'\u0213': '-',
  492. u'\u00e1': 'a',
  493. u'\u010d': 'c',
  494. u'\u010c': 'C',
  495. u'\u010f': 'd',
  496. u'\u010e': 'D',
  497. u'\u00e9': 'e',
  498. u'\u011b': 'e',
  499. u'\u00ed': 'i',
  500. u'\u0148': 'n',
  501. u'\u0159': 'r',
  502. u'\u0161': 's',
  503. u'\u0165': 't',
  504. u'\u016f': 'u',
  505. u'\u00fd': 'y',
  506. u'\u017e': 'z',
  507. u'\xed': 'i',
  508. u'\xe9': 'e',
  509. u'\xe1': 'a',
  510. }
  511. def replace_diacritic(string):
  512. ret = []
  513. for char in string:
  514. if char in _diacritic_replace:
  515. ret.append(_diacritic_replace[char])
  516. else:
  517. ret.append(char)
  518. return ''.join(ret)
  519. def params(url=None):
  520. if not url:
  521. url = sys.argv[2]
  522. param = {}
  523. paramstring = url
  524. if len(paramstring) >= 2:
  525. params = url
  526. cleanedparams = params.replace('?', '')
  527. if (params[len(params) - 1] == '/'):
  528. params = params[0:len(params) - 2]
  529. pairsofparams = cleanedparams.split('&')
  530. param = {}
  531. for i in range(len(pairsofparams)):
  532. splitparams = {}
  533. splitparams = pairsofparams[i].split('=')
  534. if (len(splitparams)) == 2:
  535. param[splitparams[0]] = splitparams[1]
  536. for p in param.keys():
  537. param[p] = param[p].decode('hex')
  538. return param
  539. def int_to_base(number, base):
  540. digs = string.digits + string.letters
  541. if number < 0:
  542. sign = -1
  543. elif number == 0:
  544. return digs[0]
  545. else:
  546. sign = 1
  547. number *= sign
  548. digits = []
  549. while number:
  550. digits.append(digs[number % base])
  551. number /= base
  552. if sign < 0:
  553. digits.append('-')
  554. digits.reverse()
  555. return ''.join(digits)
  556. def extract_jwplayer_setup(data):
  557. """
  558. Extracts jwplayer setup configuration and returns it as a dictionary.
  559. :param data: A string to extract the setup from
  560. :return: A dictionary containing the setup configuration
  561. """
  562. data = re.search(r'<script.+?}\(\'(.+)\',\d+,\d+,\'([\w\|]+)\'.*</script>', data, re.I | re.S)
  563. if data:
  564. replacements = data.group(2).split('|')
  565. data = data.group(1)
  566. for i in reversed(range(len(replacements))):
  567. if len(replacements[i]) > 0:
  568. data = re.sub(r'\b%s\b' % int_to_base(i, 36), replacements[i], data)
  569. data = re.search(r'\.setup\(([^\)]+?)\);', data)
  570. if data:
  571. return json.loads(data.group(1).decode('string_escape'))
  572. #return demjson.decode(data.group(1).decode('string_escape')) ### III
  573. return None
  574. #def parse_html(url):
  575. # return BeautifulSoup(request(url), 'html5lib', from_encoding='utf-8')
  576. if __name__ == "__main__":
  577. s = 'B\xc4\x93thovena D\xc4\x81rgumu Taka (2014)/Beethoven&#x27;s Treasure [LV]'
  578. #s = s.decode("utf8")
  579. #s=unescape(s)
  580. #url = "http://localhost:88/https://walterebert.com/playground/video/hls/ts/480x270.m3u8?token=xxxx~User-Agent=Enigma2~Cookie=xxxxx"
  581. url = "http://hyt4d6.vkcache.com/secip/0/UMQ3q2gNjTlOPnEVm3iTiA/ODAuMjMyLjI0MC42/1479610800/hls-vod-s3/flv/api/files/videos/2015/09/11/144197748923a22.mp4.m3u8http://hyt4d6.vkcache.com/secip/0/Y-ZA1qRm8toplc0dN_L6_w/ODAuMjMyLjI0MC42/1479654000/hls-vod-s3/flv/api/files/videos/2015/09/11/144197748923a22.mp4.m3u8"
  582. headers = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46"}
  583. url = "http://str1e.lattelecom.tv/mobile-vod/mp4:sf_fantastic_beasts_and_where_to_find_them_en_hd.mp4/playlist.m3u8?resource_id=fantastic_beasts_and_where_to_find_them&auth_token=6NAvMFDG+rYTAc4hb5JeL2bmsaRR7bAE23M6KDmhKYOGyXoo0gDpJUE9scYy+nQmfbgk03cWMe9MuXWSH1GqwolEk2jOQ/8Mrg7tOdbwrA8zM7nmkfCZPqQkwajZN4mfSJQVKHqXqJ8="
  584. headers={}
  585. print url
  586. url = "replay::tiesraide/ltv1/"
  587. url = "ltc::content/live-streams/103?include=quality"
  588. urlp = streamproxy_encode2(url,headers)
  589. print urlp
  590. url2,headers2 = streamproxy_decode2(urlp)
  591. print url2
  592. player(urlp)
  593. pass