Enigma2 plugin to to play various online streams (mostly Latvian).

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603
  1. # -*- coding: UTF-8 -*-
  2. # /*
  3. # * Copyright (C) 2011 Libor Zoubek,ivars777
  4. # *
  5. # *
  6. # * This Program is free software; you can redistribute it and/or modify
  7. # * it under the terms of the GNU General Public License as published by
  8. # * the Free Software Foundation; either version 2, or (at your option)
  9. # * any later version.
  10. # *
  11. # * This Program is distributed in the hope that it will be useful,
  12. # * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. # * GNU General Public License for more details.
  15. # *
  16. # * You should have received a copy of the GNU General Public License
  17. # * along with this program; see the file COPYING. If not, write to
  18. # * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  19. # * http://www.gnu.org/copyleft/gpl.html
  20. # *
  21. # */
  22. import os, sys, re
  23. import urllib, urllib2
  24. import datetime
  25. import traceback
  26. import cookielib
  27. import requests
  28. from htmlentitydefs import name2codepoint as n2cp
  29. import HTMLParser
  30. import StringIO
  31. #import threading
  32. #import Queue
  33. import pickle
  34. import string
  35. import simplejson as json
  36. #from demjson import demjson
  37. #import demjson
  38. import json
  39. #from bs4 import BeautifulSoup
  40. UA = 'Mozilla/6.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.5) Gecko/2008092417 Firefox/3.0.3'
  41. LOG = 2
  42. _cookie_jar = None
  43. CACHE_COOKIES = 'cookies'
  44. def system():
  45. if "kodi" in sys.executable.lower():
  46. return "kodi"
  47. elif sys.platform == "win32":
  48. return "windows"
  49. elif sys.platform == "linux2":
  50. return "enigma2"
  51. else:
  52. return "unknown"
  53. def play_video(streams):
  54. if len(streams)>1:
  55. for i,s in enumerate(streams):
  56. print "%s: [%s,%s,%s] %s"%(i,s["quality"],s["lang"],s["type"],s["name"])
  57. a = raw_input("Select stram to play: ")
  58. try:
  59. n = int(a)
  60. except:
  61. n = 0
  62. if n>=len(streams):
  63. stream = streams[-1]
  64. else:
  65. stream = streams[n]
  66. else:
  67. stream = streams[0]
  68. stream = stream_change(stream)
  69. title = stream["name"]
  70. url = stream["url"]
  71. suburl = ""
  72. print url
  73. if "subs" in stream and stream["subs"]:
  74. suburl = stream["subs"][0]["url"]
  75. print "\n**Download subtitles %s - %s"%(title,suburl)
  76. subs = urllib2.urlopen(suburl).read()
  77. if subs:
  78. fname0 = re.sub("[/\n\r\t,]","_",title)
  79. subext = ".srt"
  80. subfile = os.path.join("",fname0+subext)
  81. if ".xml" in suburl:
  82. subs = ttaf2srt(subs)
  83. with open(subfile,"w") as f:
  84. f.write(subs)
  85. else:
  86. print "\n Error downloading subtitle %s"%suburl
  87. return player(url,stream["name"],suburl,stream["headers"])
  88. def player(url,title="",suburl="",headers={}):
  89. from subprocess import call
  90. print "\n**Play stream %s\n%s"%(title,url.encode("utf8"))
  91. cmd1 = [r"c:\Program Files\VideoLAN\VLC\vlc.exe",url,
  92. "--meta-title",title.decode("utf8").encode(sys.getfilesystemencoding()),
  93. "--http-user-agent","Enigma2"
  94. ]
  95. # gst-launch-1.0 -v souphttpsrc ssl-strict=false proxy=127.0.0.1:8888 extra-headers="Origin:adadadasd" location="http://bitdash-a.akamaihd.net/content/sintel/sintel.mpd" ! decodebin! autovideosink
  96. cmd2 = [
  97. r"C:\gstreamer\1.0\x86_64\bin\gst-launch-1.0","-v",
  98. "playbin", 'uri="%s"'%url,
  99. #"souphttpsrc", "ssl-strict=false",
  100. #"proxy=127.0.0.1:8888",
  101. #'location="%s"'%url,
  102. #'!decodebin!autovideosink'
  103. ]
  104. cmd = cmd1 if url.startswith("https") else cmd2
  105. ret = call(cmd)
  106. #if ret:
  107. #a = raw_input("*** Error, continue")
  108. return
  109. SPLIT_CHAR = "~"
  110. SPLIT_CODE = urllib.quote(SPLIT_CHAR)
  111. EQ_CODE = urllib.quote("=")
  112. COL_CODE = urllib.quote(":")
  113. SPACE_CODE = urllib.quote(" ")
  114. PROXY_URL = "http://localhost:88/"
  115. def stream_change(stream):
  116. #return stream # TODO
  117. if stream["surl"]:
  118. if not re.search("https*://(hqq|goo.\gl)",stream["surl"]):
  119. return stream
  120. stream["url"] = streamproxy_encode(stream["url"],stream["headers"])
  121. stream["headers"] = {}
  122. return stream
  123. else:
  124. return stream
  125. def streamproxy_encode(url,headers=[]):
  126. if not "?" in url:
  127. url = url+"?"
  128. url2 = url.replace(SPLIT_CHAR,SPLIT_CODE).replace(":",COL_CODE).replace(" ",SPACE_CODE)
  129. url2 = PROXY_URL + url2
  130. if headers:
  131. headers2 = []
  132. for h in headers:
  133. headers2.append("%s=%s"%(h,headers[h].replace("=",EQ_CODE).replace(SPLIT_CHAR,SPLIT_CODE).replace(" ",SPACE_CODE)))
  134. headers2 = SPLIT_CHAR.join(headers2)
  135. url2 = url2+SPLIT_CHAR+headers2
  136. return url2
  137. def streamproxy_decode(urlp):
  138. import urlparse
  139. path = urlp.replace(re.search("http://[^/]+",urlp).group(0),"")
  140. p = path.split(SPLIT_CHAR)
  141. url = urllib.unquote(p[0][1:])
  142. #headers = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46"}
  143. headers={}
  144. if len(p)>1:
  145. for h in p[1:]:
  146. #h = urllib.unquote()
  147. headers[h.split("=")[0]]=urllib.unquote(h.split("=")[1])
  148. return url,headers
  149. class Captions(object):
  150. def __init__(self,uri):
  151. self.uri = uri
  152. self.subs = []
  153. self.styles = {}
  154. if uri.startswith("http"):
  155. r = requests.get(uri)
  156. if r.status_code == 200:
  157. self.loads(r.content)
  158. def loads(self,s):
  159. if "WEBVTT" in s[:s.find("\n")]: # vtt captions
  160. self.load_vtt(s)
  161. elif "<?xml" in s[:s.find("\n")]:
  162. self.load_ttaf(s)
  163. else:
  164. self.load_vtt(s) # TODO
  165. def load_ttaf(self,s):
  166. for r2 in re.findall("<style .+?/>", s):
  167. st = {}
  168. for a in re.findall(r'(\w+)="([^ "]+)"', r2):
  169. st[a[0]] = a[1]
  170. if a[0] == "id":
  171. sid = a[1]
  172. self.styles[sid] = st
  173. for r2 in re.findall("<p .+?</p>", s):
  174. sub = {}
  175. sub["begin"] = str2sec(re.search('begin="([^"]+)"', r2).group(1)) if re.search('begin="([^"]+)"', r2) else -1
  176. sub["end"] = str2sec(re.search('end="([^"]+)"', r2).group(1)) if re.search('end="([^"]+)"', r2) else -1
  177. sub["style"] = re.search('style="([^"]+)"', r2).group(1) if re.search('style="([^"]+)"', r2) else None
  178. sub["text"] = re.search("<p[^>]+>(.+)</p>", r2).group(1).replace("\n","")
  179. sub["text"] = re.sub("<br\s*?/>","\n",sub["text"])
  180. sub["text"] = re.sub("<.+?>"," ",sub["text"])
  181. self.subs.append(sub)
  182. pass
  183. def load_vtt(self,s):
  184. f = StringIO.StringIO(s)
  185. while True:
  186. line = f.readline()
  187. if not line:
  188. break
  189. m = re.search(r"([\d\.\,:]+)\s*-->\s*([\d\.\,\:]+)",line)
  190. if m:
  191. sub = {}
  192. sub["begin"] = str2sec(m.group(1))
  193. sub["end"] = str2sec(m.group(2))
  194. sub["style"] = None
  195. sub["text"] = []
  196. line = f.readline()
  197. while line.strip():
  198. txt = line.strip()
  199. if isinstance(txt,unicode):
  200. txt = txt.encode("utf8")
  201. sub["text"].append(txt)
  202. line = f.readline()
  203. sub["text"] = "\n".join(sub["text"])
  204. self.subs.append(sub)
  205. else:
  206. continue
  207. pass
  208. def get_srt(self):
  209. out = ""
  210. i = 0
  211. for sub in self.subs:
  212. i +=1
  213. begin = sub["begin"]
  214. begin = "%s,%03i"%(str(datetime.timedelta(seconds=begin/1000)),begin%1000)
  215. end = sub["end"]
  216. end = "%s,%03i"%(str(datetime.timedelta(seconds=end/1000)),end%1000)
  217. txt2 = sub["text"]
  218. out += "%s\n%s --> %s\n%s\n\n\n"%(i,begin,end,txt2)
  219. return out
  220. def str2sec(r):
  221. # Convert str time to miliseconds
  222. r= r.replace(",",".")
  223. m = re.search(r"(\d+\:)*(\d+)\:(\d+\.\d+)", r)
  224. if m:
  225. sec = int(m.group(1)[:-1])*60*60*1000 if m.group(1) else 0
  226. sec += int(m.group(2))*60*1000 + int(float(m.group(3))*1000)
  227. return sec
  228. else:
  229. return -1
  230. #c = Captions("http://195.13.216.2/mobile-vod/mp4:lb_barbecue_fr_lq.mp4/lb_barbecue_lv.vtt")
  231. #c = Captions("http://www.bbc.co.uk/iplayer/subtitles/ng/modav/bUnknown-0edd6227-0f38-411c-8d46-fa033c4c61c1_b05ql1s3_1479853893356.xml")
  232. #url = "http://195.13.216.2/mobile-vod/mp4:ac_now_you_see_me_2_en_lq.mp4/ac_now_you_see_me_2_lv.vtt"
  233. #c = Captions(url)
  234. #pass
  235. def ttaf2srt(s):
  236. out = u""
  237. i = 0
  238. for p,txt in re.findall("<p ([^>]+)>(.+?)</p>", s, re.DOTALL):
  239. i +=1
  240. begin = re.search('begin="(.+?)"',p).group(1)
  241. begin = begin.replace(".",",")
  242. end = re.search('end="(.+?)"',p).group(1)
  243. end = end.replace(".",",")
  244. txt2 = re.sub("<br */>","\n",txt)
  245. out += "%s\n%s --> %s\n%s\n\n"%(i,begin,end,txt2)
  246. return out
  247. def item():
  248. stream0 = {
  249. 'name': '',
  250. 'url': '',
  251. 'quality': '?',
  252. 'surl': '',
  253. 'subs': [],
  254. 'headers': {},
  255. "desc":"",
  256. "img":"",
  257. "lang":"",
  258. "type":"",
  259. "resolver":"",
  260. "order":0
  261. }
  262. return stream0
  263. class _StringCookieJar(cookielib.LWPCookieJar):
  264. def __init__(self, string=None, filename=None, delayload=False, policy=None):
  265. cookielib.LWPCookieJar.__init__(self, filename, delayload, policy)
  266. if string and len(string) > 0:
  267. self._cookies = pickle.loads(str(string))
  268. def dump(self):
  269. return pickle.dumps(self._cookies)
  270. def init_urllib(cache=None):
  271. """
  272. Initializes urllib cookie handler
  273. """
  274. global _cookie_jar
  275. data = None
  276. if cache is not None:
  277. data = cache.get(CACHE_COOKIES)
  278. _cookie_jar = _StringCookieJar(data)
  279. opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(_cookie_jar))
  280. urllib2.install_opener(opener)
  281. def cache_cookies(cache):
  282. """
  283. Saves cookies to cache
  284. """
  285. global _cookie_jar
  286. if _cookie_jar:
  287. cache.set(CACHE_COOKIES, _cookie_jar.dump())
  288. def request0(url, headers={}):
  289. debug('request: %s' % url)
  290. req = urllib2.Request(url, headers=headers)
  291. req.add_header('User-Agent', UA)
  292. try:
  293. response = urllib2.urlopen(req)
  294. data = response.read()
  295. response.close()
  296. except urllib2.HTTPError, error:
  297. data = error.read()
  298. debug('len(data) %s' % len(data))
  299. return data
  300. def request(url, headers={}):
  301. debug('request: %s' % url)
  302. #req = urllib2.Request(url, headers=headers)
  303. #req.add_header('User-Agent', UA)
  304. if 'User-Agent' not in headers:
  305. headers['User-Agent']= UA
  306. try:
  307. r = requests.get(url, headers=headers)
  308. data = r.content
  309. except:
  310. data = r.content
  311. debug('len(data) %s' % len(data))
  312. return data
  313. def post(url, data, headers={}):
  314. postdata = urllib.urlencode(data)
  315. #req = urllib2.Request(url, postdata, headers)
  316. #req.add_header('User-Agent', UA)
  317. import requests
  318. if 'User-Agent' not in headers:
  319. headers['User-Agent']= UA
  320. try:
  321. r = requests.post(url, data=postdata,headers=headers)
  322. data = r.content
  323. except urllib2.HTTPError, error:
  324. data = r.content
  325. return data
  326. def post0(url, data, headers={}):
  327. postdata = urllib.urlencode(data)
  328. req = urllib2.Request(url, postdata, headers)
  329. req.add_header('User-Agent', UA)
  330. try:
  331. response = urllib2.urlopen(req)
  332. data = response.read()
  333. response.close()
  334. except urllib2.HTTPError, error:
  335. data = error.read()
  336. return data
  337. def post_json(url, data, headers={}):
  338. postdata = json.dumps(data)
  339. headers['Content-Type'] = 'application/json'
  340. req = urllib2.Request(url, postdata, headers)
  341. req.add_header('User-Agent', UA)
  342. response = urllib2.urlopen(req)
  343. data = response.read()
  344. response.close()
  345. return data
  346. #def run_parallel_in_threads(target, args_list):
  347. #result = Queue.Queue()
  348. ## wrapper to collect return value in a Queue
  349. #def task_wrapper(*args):
  350. #result.put(target(*args))
  351. #threads = [threading.Thread(target=task_wrapper, args=args) for args in args_list]
  352. #for t in threads:
  353. #t.start()
  354. #for t in threads:
  355. #t.join()
  356. #return result
  357. def substr(data, start, end):
  358. i1 = data.find(start)
  359. i2 = data.find(end, i1)
  360. return data[i1:i2]
  361. def save_to_file(url, file):
  362. try:
  363. return save_data_to_file(request(url), file)
  364. except:
  365. traceback.print_exc()
  366. def save_data_to_file(data, file):
  367. try:
  368. f = open(file, 'wb')
  369. f.write(data)
  370. f.close()
  371. info('File %s saved' % file)
  372. return True
  373. except:
  374. traceback.print_exc()
  375. def read_file(file):
  376. if not os.path.exists(file):
  377. return ''
  378. f = open(file, 'r')
  379. data = f.read()
  380. f.close()
  381. return data
  382. def _substitute_entity(match):
  383. ent = match.group(3)
  384. if match.group(1) == '#':
  385. # decoding by number
  386. if match.group(2) == '':
  387. # number is in decimal
  388. return unichr(int(ent))
  389. elif match.group(2) == 'x':
  390. # number is in hex
  391. return unichr(int('0x' + ent, 16))
  392. else:
  393. # they were using a name
  394. cp = n2cp.get(ent)
  395. if cp:
  396. return unichr(cp)
  397. else:
  398. return match.group()
  399. def decode_html(data):
  400. if not type(data) == str:
  401. return data
  402. try:
  403. if not type(data) == unicode:
  404. data = unicode(data, 'utf-8', errors='ignore')
  405. entity_re = re.compile(r'&(#?)(x?)(\w+);')
  406. return entity_re.subn(_substitute_entity, data)[0]
  407. except:
  408. traceback.print_exc()
  409. print[data]
  410. return data
  411. def unescape(s0):
  412. #s2 = re.sub("&#\w+;",HTMLParser.HTMLParser().unescape("\1"),s)
  413. s0 = s0.replace("&amp;","&")
  414. for s in re.findall("&#\w+;",s0):
  415. s2 = HTMLParser.HTMLParser().unescape(s)
  416. if isinstance(s0,str):
  417. s2 = s2.encode("utf8")
  418. s0 = s0.replace(s,s2)
  419. pass
  420. return s0
  421. def debug(text):
  422. if LOG > 1:
  423. print('[DEBUG] ' + str([text]))
  424. def info(text):
  425. if LOG > 0:
  426. print('[INFO] ' + str([text]))
  427. def error(text):
  428. print('[ERROR] ' + str([text]))
  429. _diacritic_replace = {u'\u00f3': 'o',
  430. u'\u0213': '-',
  431. u'\u00e1': 'a',
  432. u'\u010d': 'c',
  433. u'\u010c': 'C',
  434. u'\u010f': 'd',
  435. u'\u010e': 'D',
  436. u'\u00e9': 'e',
  437. u'\u011b': 'e',
  438. u'\u00ed': 'i',
  439. u'\u0148': 'n',
  440. u'\u0159': 'r',
  441. u'\u0161': 's',
  442. u'\u0165': 't',
  443. u'\u016f': 'u',
  444. u'\u00fd': 'y',
  445. u'\u017e': 'z',
  446. u'\xed': 'i',
  447. u'\xe9': 'e',
  448. u'\xe1': 'a',
  449. }
  450. def replace_diacritic(string):
  451. ret = []
  452. for char in string:
  453. if char in _diacritic_replace:
  454. ret.append(_diacritic_replace[char])
  455. else:
  456. ret.append(char)
  457. return ''.join(ret)
  458. def params(url=None):
  459. if not url:
  460. url = sys.argv[2]
  461. param = {}
  462. paramstring = url
  463. if len(paramstring) >= 2:
  464. params = url
  465. cleanedparams = params.replace('?', '')
  466. if (params[len(params) - 1] == '/'):
  467. params = params[0:len(params) - 2]
  468. pairsofparams = cleanedparams.split('&')
  469. param = {}
  470. for i in range(len(pairsofparams)):
  471. splitparams = {}
  472. splitparams = pairsofparams[i].split('=')
  473. if (len(splitparams)) == 2:
  474. param[splitparams[0]] = splitparams[1]
  475. for p in param.keys():
  476. param[p] = param[p].decode('hex')
  477. return param
  478. def int_to_base(number, base):
  479. digs = string.digits + string.letters
  480. if number < 0:
  481. sign = -1
  482. elif number == 0:
  483. return digs[0]
  484. else:
  485. sign = 1
  486. number *= sign
  487. digits = []
  488. while number:
  489. digits.append(digs[number % base])
  490. number /= base
  491. if sign < 0:
  492. digits.append('-')
  493. digits.reverse()
  494. return ''.join(digits)
  495. def extract_jwplayer_setup(data):
  496. """
  497. Extracts jwplayer setup configuration and returns it as a dictionary.
  498. :param data: A string to extract the setup from
  499. :return: A dictionary containing the setup configuration
  500. """
  501. data = re.search(r'<script.+?}\(\'(.+)\',\d+,\d+,\'([\w\|]+)\'.*</script>', data, re.I | re.S)
  502. if data:
  503. replacements = data.group(2).split('|')
  504. data = data.group(1)
  505. for i in reversed(range(len(replacements))):
  506. if len(replacements[i]) > 0:
  507. data = re.sub(r'\b%s\b' % int_to_base(i, 36), replacements[i], data)
  508. data = re.search(r'\.setup\(([^\)]+?)\);', data)
  509. if data:
  510. return json.loads(data.group(1).decode('string_escape'))
  511. #return demjson.decode(data.group(1).decode('string_escape')) ### III
  512. return None
  513. #def parse_html(url):
  514. # return BeautifulSoup(request(url), 'html5lib', from_encoding='utf-8')
  515. if __name__ == "__main__":
  516. s = 'B\xc4\x93thovena D\xc4\x81rgumu Taka (2014)/Beethoven&#x27;s Treasure [LV]'
  517. #s = s.decode("utf8")
  518. #s=unescape(s)
  519. #url = "http://localhost:88/https://walterebert.com/playground/video/hls/ts/480x270.m3u8?token=xxxx~User-Agent=Enigma2~Cookie=xxxxx"
  520. url = "http://hyt4d6.vkcache.com/secip/0/UMQ3q2gNjTlOPnEVm3iTiA/ODAuMjMyLjI0MC42/1479610800/hls-vod-s3/flv/api/files/videos/2015/09/11/144197748923a22.mp4.m3u8http://hyt4d6.vkcache.com/secip/0/Y-ZA1qRm8toplc0dN_L6_w/ODAuMjMyLjI0MC42/1479654000/hls-vod-s3/flv/api/files/videos/2015/09/11/144197748923a22.mp4.m3u8"
  521. headers = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46"}
  522. urlp = streamproxy_encode(url,headers)
  523. print urlp
  524. player(urlp)
  525. pass