# -*- coding: UTF-8 -*- # /* # * Copyright (C) 2011 Libor Zoubek,ivars777 # * # * # * This Program is free software; you can redistribute it and/or modify # * it under the terms of the GNU General Public License as published by # * the Free Software Foundation; either version 2, or (at your option) # * any later version. # * # * This Program is distributed in the hope that it will be useful, # * but WITHOUT ANY WARRANTY; without even the implied warranty of # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # * GNU General Public License for more details. # * # * You should have received a copy of the GNU General Public License # * along with this program; see the file COPYING. If not, write to # * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. # * http://www.gnu.org/copyleft/gpl.html # * # */ import os, sys, re import urllib, urllib2 import datetime import traceback import cookielib import requests try: from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) except: pass from htmlentitydefs import name2codepoint as n2cp import HTMLParser import StringIO #import threading #import Queue import pickle import string import simplejson as json #from demjson import demjson #import demjson import json #from bs4 import BeautifulSoup UA = 'Mozilla/6.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.5) Gecko/2008092417 Firefox/3.0.3' LOG = 2 _cookie_jar = None CACHE_COOKIES = 'cookies' def system(): if "kodi" in sys.executable.lower(): return "kodi" elif sys.platform == "win32": return "windows" elif sys.platform == "linux2": return "enigma2" else: return "unknown" def nfo2xml(nfo_dict): nfo_type,nfo = next(nfo_dict.iteritems()) s= "<%s>\n"%nfo_type.encode("utf8") for k,v in nfo.iteritems(): if isinstance(v,list): for v2 in v: if isinstance(v2,unicode): v2 = v2.encode("utf8") s += " <%s>%s%s>\n"%(k.encode("utf8"), v2, k.encode("utf8")) else: if isinstance(v,unicode): v = v.encode("utf8") s += " <%s>%s%s>\n"%(k.encode("utf8"), v, k.encode("utf8")) s += "%s>\n"%nfo_type.encode("utf8") return s def nfo2desc(nfo): if not "title" in nfo: nfo_type, nfo = next(nfo.iteritems()) desc = nfo2title(nfo) dd = lambda t: "\n" + nfo[t] if t in nfo and nfo[t] else "" dd2 = lambda t: "\n" + ",".join(nfo[t]) if t in nfo and nfo[t] else "" def ddd(t,title=""): if title: title = title + ": " if t in nfo and nfo[t]: if isinstance(nfo[t],list): return "\n" + title + ",".join(nfo[t]) else: return "\n" + title + nfo[t] else: return "" desc += ddd("tagline") if "plot" in nfo and "tagline" in nfo and nfo["tagline"] <> nfo["plot"]: desc += ddd("plot") elif "plot" in nfo and not "tagline" in nfo: desc += ddd("plot") desc += ddd("genre","Genre") desc += ddd("runtime","Length") desc += ddd("director","Director") desc += ddd("actor","Actors") desc += ddd("language","Languages") desc += ddd("quality","Quality") return desc.encode("utf8") if isinstance(desc,unicode) else desc def nfo2title(nfo): if not "title" in nfo: nfo_type, nfo = next(nfo.iteritems()) title = nfo["title"] if "originaltitle" in nfo and nfo["originaltitle"] and nfo["originaltitle"]<>nfo["title"]: title +=" ~ "+nfo["originaltitle"] if "year" in nfo and nfo["year"]: title += " (%s)"%nfo["year"] return title.encode("utf8") if isinstance(title,unicode) else title def play_video(streams): if len(streams)>1: for i,s in enumerate(streams): print "%s: [%s,%s,%s] %s"%(i,s["quality"],s["lang"],s["type"],s["name"]) a = raw_input("Select stram to play: ") try: n = int(a) except: n = 0 if n>=len(streams): stream = streams[-1] else: stream = streams[n] else: stream = streams[0] stream = stream_change(stream) title = stream["name"] if not "nfo" in stream or not stream["nfo"] else nfo2title(stream["nfo"]) desc = stream["desc"] if not "nfo" in stream or not stream["nfo"] else nfo2desc(stream["nfo"]) img = stream["img"] url = stream["url"] suburl = "" print url if "subs" in stream and stream["subs"]: suburl = stream["subs"][0]["url"] print "\n**Download subtitles %s - %s"%(title,suburl) subs = urllib2.urlopen(suburl).read() if subs: fname0 = re.sub("[/\n\r\t,:\?]","_",title) subext = ".srt" subfile = os.path.join("",fname0+subext) if ".xml" in suburl: subs = ttaf2srt(subs) with open(subfile,"w") as f: f.write(subs) else: print "\n Error downloading subtitle %s"%suburl print "\n**Play stream %s\n%s" % (title, url.encode("utf8")) return player(url,title,suburl,stream["headers"]) def player(url, title = "", suburl= "",headers={}): from subprocess import call cmd1 = [r"c:\Program Files\VideoLAN\VLC\vlc.exe",url, "--meta-title",title.decode("utf8").encode(sys.getfilesystemencoding()), "--http-user-agent","Enigma2" ] # gst-launch-1.0 -v souphttpsrc ssl-strict=false proxy=127.0.0.1:8888 extra-headers="Origin:adadadasd" location="http://bitdash-a.akamaihd.net/content/sintel/sintel.mpd" ! decodebin! autovideosink cmd2 = [ r"C:\gstreamer\1.0\x86_64\bin\gst-launch-1.0","-v", "playbin", 'uri="%s"'%url, #"souphttpsrc", "ssl-strict=false", #"proxy=127.0.0.1:8888", #'location="%s"'%url, #'!decodebin!autovideosink' ] cmd3 = ["ffplay.exe",url] cmd = cmd3 if url.startswith("https") else cmd2 ret = call(cmd) #if ret: #a = raw_input("*** Error, continue") return def stream_type(data): data = data.lower() m = re.search(r"^(\w+)://", data) prefix = m.group(1) if m else "" if prefix in ("http","https"): if ".m3u8" in data: return "hls" elif ".mpd" in data: return "dash" else: return "http" else: return prefix def check_version(package,url="http://feed.blue.lv/Packages"): "Return current package version from OPKG feed" url = "http://feed.blue.lv/Packages" r = requests.get(url) if not r.ok: return "" m = re.search("Package: %s\nVersion: (.+?)\n"%package, r.content) if not m: return "" return m.group(1) SPLIT_CHAR = "~" SPLIT_CODE = urllib.quote(SPLIT_CHAR) EQ_CODE = urllib.quote("=") COL_CODE = urllib.quote(":") SPACE_CODE = urllib.quote(" ") def make_fname(title): "Make file name from title" title = title.strip() fname0 = re.sub("[/\n\r\t,:]"," ",title) fname0 = re.sub("['""]","",fname0) return fname0 def hls_base(url): url2 = url.split("?")[0] url2 = "/".join(url2.split("/")[0:-1])+ "/" return url2 def stream_change(stream): #return stream # TODO if "resolver" in stream and stream["resolver"] in ("viaplay","hqq","filmas") or \ "surl" in stream and re.search("https*://(hqq|goo\.gl)",stream["surl"]): stream["url"] = streamproxy_encode(stream["url"],stream["headers"]) stream["headers"] = {} return stream else: return stream def streamproxy_encode(url,headers=[],proxy_url=None): PROXY_URL = "http://localhost:8880/" if not "?" in url: url = url+"?" url2 = url.replace(SPLIT_CHAR,SPLIT_CODE).replace(":",COL_CODE).replace(" ",SPACE_CODE) if not proxy_url: proxy_url = PROXY_URL url2 = proxy_url + url2 if headers: headers2 = [] for h in headers: headers2.append("%s=%s"%(h,headers[h].replace("=",EQ_CODE).replace(SPLIT_CHAR,SPLIT_CODE).replace(" ",SPACE_CODE))) headers2 = SPLIT_CHAR.join(headers2) url2 = url2+SPLIT_CHAR+headers2 #return url2.encode("utf8") if isinstance(url2,unicode) else url2 return url2 def streamproxy_decode(urlp): import urlparse path = urlp.replace(re.search("http://[^/]+",urlp).group(0),"") p = path.split(SPLIT_CHAR) url = urllib.unquote(p[0][1:]) #headers = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46"} headers={} if len(p)>1: for h in p[1:]: #h = urllib.unquote() headers[h.split("=")[0]]=urllib.unquote(h.split("=")[1]) return url,headers def streamproxy_encode2(url,headers=[],proxy_url=None): PROXY_URL = "http://localhost:8880/" #url2 = url.replace(SPLIT_CHAR,SPLIT_CODE).replace(":",COL_CODE).replace(" ",SPACE_CODE) url2 = urllib.quote_plus(url) if not proxy_url: proxy_url = PROXY_URL url2 = proxy_url + url2+"/?" if headers: headers2 = [] for h in headers: headers2.append("%s=%s"%(h,headers[h].replace("=",EQ_CODE).replace(SPLIT_CHAR,SPLIT_CODE).replace(" ",SPACE_CODE))) headers2 = SPLIT_CHAR.join(headers2) url2 = url2+SPLIT_CHAR+headers2 return url2 def streamproxy_decode2(urlp): path = urlp.replace(re.search("http://[^/]+",urlp).group(0),"") p = path.split(SPLIT_CHAR) url = urllib.unquote_plus(p[0][1:-2]) #headers = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46"} headers={} if len(p)>1: for h in p[1:]: #h = urllib.unquote() headers[h.split("=")[0]]=urllib.unquote(h.split("=")[1]) return url,headers class Captions(object): def __init__(self,uri): self.uri = uri self.subs = [] self.styles = {} if uri.startswith("http"): r = requests.get(uri) if r.status_code == 200: self.loads(r.content) def loads(self,s): if "WEBVTT" in s[:s.find("\n")]: # vtt captions self.load_vtt(s) elif "", s): st = {} for a in re.findall(r'(\w+)="([^ "]+)"', r2): st[a[0]] = a[1] if a[0] == "id": sid = a[1] self.styles[sid] = st for r2 in re.findall("
", s): sub = {} sub["begin"] = str2sec(re.search('begin="([^"]+)"', r2).group(1)) if re.search('begin="([^"]+)"', r2) else -1 sub["end"] = str2sec(re.search('end="([^"]+)"', r2).group(1)) if re.search('end="([^"]+)"', r2) else -1 sub["style"] = re.search('style="([^"]+)"', r2).group(1) if re.search('style="([^"]+)"', r2) else None sub["text"] = re.search("
]+>(.+)
", r2).group(1).replace("\n","") sub["text"] = re.sub("]+)>(.+?)
", s, re.DOTALL): i +=1 begin = re.search('begin="(.+?)"',p).group(1) begin = begin.replace(".",",") end = re.search('end="(.+?)"',p).group(1) end = end.replace(".",",") txt2 = re.sub("