Ivars пре 7 година
родитељ
комит
1779ce8ab0

+ 262
- 0
ContentSources.py Прегледај датотеку

@@ -0,0 +1,262 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+
9
+import sys, os, re
10
+import glob, traceback
11
+sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
12
+from sources.SourceBase import stream_type
13
+import util
14
+
15
+show_hidden = False  # Ja True, tad vienalga radā hidden sources (noder izstradē)
16
+
17
+class ContentSources(object):
18
+    """Wrapper for content sources plugin"""
19
+
20
+    #----------------------------------------------------------------------
21
+    def __init__(self, plugin_path):
22
+        self.plugins = {}
23
+        self.error_content = [("..atpakaļ", "back", None, "Kļūda, atgriezties atpakaļ")]
24
+        sys.path.insert(0, plugin_path)
25
+        #for f in os.listdir(plugin_path):
26
+        #for f in next(os.walk(plugin_path))[2]:
27
+        print "ContentSources: Importing sources from "+plugin_path
28
+        files = glob.glob(os.path.join(plugin_path, "*.py"))
29
+        for f in files:
30
+            fname, ext = os.path.splitext(f)
31
+            fname = os.path.split(fname)[1]
32
+            if fname == "__init__": continue
33
+            if ext == '.py':
34
+                print "Importing %s"%fname
35
+                mod = __import__(fname)
36
+                reload(mod)
37
+                if "Source" in dir(mod):
38
+                    self.plugins[fname] = mod.Source()
39
+                    print fname+ " imported"
40
+                else:
41
+                    pass
42
+                    #print fname+ "skipped"
43
+        sys.path.pop(0)
44
+        if not "config" in self.plugins:
45
+            raise Exception("Problem importing content sources")
46
+        cfg = self.plugins["config"]
47
+        for pl in self.plugins.keys():
48
+            found = False
49
+            for lst in cfg.get_lists():
50
+                for item in cfg.lists[lst]:
51
+                    if item[1].split("::")[0]==pl:
52
+                        found = True
53
+                        break
54
+                    if found: break
55
+            if not found:
56
+                title = self.plugins[pl].title if "title" in dir(self.plugins[pl]) else pl
57
+                img = self.plugins[pl].img if "img" in dir(self.plugins[pl]) else ""
58
+                desc = self.plugins[pl].desc if "desc" in dir(self.plugins[pl]) else title
59
+                cfg.add_item("home",(title,"%s::home"%pl,img,desc))
60
+                cfg.write_streams()
61
+
62
+    def get_content(self,data):
63
+        source = data.split("::")[0]
64
+        if source in self.plugins:
65
+            content0 = self.plugins[source].get_content(data)
66
+            if content0:
67
+                content = []
68
+                if isinstance(content0,list):
69
+                    for i,item in enumerate(content0):
70
+                        source2 = item[1].split("::")[0]
71
+                        if not (source2 == "back" or item[1].startswith("http") or item[1].startswith("rtmp")):
72
+                            if source2 not in self.plugins or (not show_hidden and "hidden" in dir(self.plugins[source2]) and self.plugins[source2].hidden):
73
+                                continue
74
+                        item2=[]
75
+                        for el in item:
76
+                            if isinstance(el,unicode):
77
+                                el = el.encode("utf8")
78
+                            item2.append(el)
79
+                        content.append(tuple(item2))
80
+                else:
81
+                    item2=[]
82
+                    for el in content0:
83
+                        if isinstance(el,unicode):
84
+                            el = el.encode("utf8")
85
+                        item2.append(el)
86
+                    content=tuple(item2)
87
+                return content
88
+            else:
89
+                return self.error_content
90
+        else:
91
+            return self.error_content
92
+
93
+    def get_streams(self,data):
94
+        if stream_type(data):
95
+            if "::" in data:
96
+                data = data.split("::")[1]
97
+            content = self.get_content(data)
98
+            stream = util.item()
99
+            stream["name"] = data
100
+            stream["url"] = data
101
+            stream["type"] = stream_type(data)
102
+            #stream["img"] = ""
103
+            #stream["desc"] = ""
104
+            return[stream]
105
+
106
+        if not self.is_video(data):
107
+            return []
108
+        source = data.split("::")[0]
109
+        if source in self.plugins:
110
+            streams = self.plugins[source].get_streams(data)
111
+            for s in streams:
112
+                for k in s:
113
+                    if isinstance(s[k],unicode):
114
+                        s[k] = s[k].encode("utf8")
115
+                if not "resolver" in s:
116
+                    s["resolver"] = source
117
+                if not "surl" in s or not s["surl"]:
118
+                    s["surl"] = data
119
+                if not "nfo" in s:
120
+                    s["nfo"]={"movie":{"title":s["name"],"thumb":s["img"],"plot":s["desc"]}}
121
+            return streams
122
+        else:
123
+            return []
124
+
125
+    def get_info(self,data):
126
+        nfo = {}
127
+        if self.is_video(data):
128
+            source = data.split("::")[0]
129
+            if source in self.plugins:
130
+                if "get_info" in dir(self.plugins[source]):
131
+                    streams = self.get_streams(data)
132
+                    nfo = streams[0]["nfo"] if streams and "nfo" in streams[0] else {}
133
+                else:
134
+                    nfo = self.plugins[source].get_info(data)
135
+        else:
136
+            pass    # TODO create nfo for listing
137
+        return nfo
138
+
139
+
140
+    def stream_type(self,data):
141
+        return stream_type(data)
142
+
143
+    def is_video(self,data):
144
+        if self.stream_type(data):
145
+            return True
146
+        source = data.split("::")[0]
147
+        if source in self.plugins:
148
+            return self.plugins[source].is_video(data)
149
+        else:
150
+            return False
151
+
152
+    def options_read(self,source):
153
+        if source in self.plugins:
154
+            options = self.plugins[source].options_read()
155
+            if options:
156
+                return options
157
+            else:
158
+                return None
159
+        else:
160
+            return None
161
+
162
+    def options_write(self,source,options):
163
+        if source in self.plugins:
164
+            return self.plugins[source].options_write(options)
165
+        else:
166
+            return None
167
+
168
+if __name__ == "__main__":
169
+
170
+    show_hidden = False
171
+    sources = ContentSources("sources")
172
+    if len(sys.argv)>1:
173
+        data= sys.argv[1]
174
+    else:
175
+        data = "config::home"
176
+
177
+    #options = sources.options_read("ltc")
178
+    #print options
179
+    history = []
180
+    cur = ("Home",data,None,None)
181
+    content = sources.get_content(cur[1])
182
+
183
+    exit_loop = False
184
+    while True:
185
+        print
186
+        for i,item in enumerate(content):
187
+            s = "%i: %s - %s %s"%(i,item[0],item[1],item[2])
188
+            print s #.encode(sys.stdout.encoding,"replace")
189
+
190
+        while True:
191
+            a = raw_input("Enter number, (-) for download, q for exit: ")
192
+            if a in ("q","Q","x","X"):
193
+                exit_loop = True
194
+                print "Exiting"
195
+                break
196
+            try:
197
+                n = int(a)
198
+                break
199
+            except:
200
+                print "Not number!"
201
+        if exit_loop: break
202
+        download = False
203
+        if n<0:
204
+            n = abs(n)
205
+            download = True
206
+        cur2 = content[n]
207
+        data0 = cur2[1].split("::")[1] if "::" in cur2[1] else cur2[1]
208
+        if not data0:
209
+            pass
210
+        elif cur2[1] == "back":
211
+            cur = history.pop()
212
+        elif sources.is_video(cur2[1]):
213
+            if sources.stream_type(cur2[1]):
214
+                stream = util.item()
215
+                stream["url"] = cur2[1]
216
+                stream["name"] = cur2[0]
217
+                streams = [stream]
218
+            else:
219
+                try:
220
+                    if not download:
221
+                        streams = sources.get_streams(cur2[1])
222
+                    else:
223
+                        stream = util.item()
224
+                        stream["url"] = cur2[1]
225
+                        stream["name"] = cur2[0]
226
+                        stream["url"] = util.streamproxy_encode2(stream["url"])
227
+                        print stream["url"]
228
+                        streams = [stream]
229
+                except Exception as e:
230
+                    print unicode(e)
231
+                    traceback.print_exc()
232
+                    streams = []
233
+            if streams:
234
+                if not download:
235
+                    util.play_video(streams)
236
+                else:
237
+                    #urlp = util.streamproxy_encode2(streams[0]["url"])
238
+                    #print urlp
239
+                    #util.player(urlp)
240
+                    #Downloader.download_video(streams)
241
+                    pass
242
+            else:
243
+                print "**No stream to play - %s "%(
244
+                    cur2[1])
245
+                raw_input("Press any key")
246
+            #import os
247
+            #os.system('"c:\Program Files (x86)\VideoLAN\VLC\vlc.exe" "%s"'%cur2[1])
248
+
249
+        else:
250
+            if "{0}" in cur2[1]:
251
+                a = raw_input("Enter value:")
252
+                cur2 = (cur2[0],cur2[1].format(a),cur2[2],cur2[3])
253
+            history.append(cur)
254
+            cur = cur2
255
+        try:
256
+            content = sources.get_content(cur[1])
257
+        except Exception as e:
258
+            print unicode(e)
259
+            traceback.print_exc()
260
+            raw_input("Continue?")
261
+
262
+


+ 6281
- 0
demjson.py
Разлика између датотеке није приказан због своје велике величине
Прегледај датотеку


+ 1375
- 0
project.wpr
Разлика између датотеке није приказан због своје велике величине
Прегледај датотеку


+ 138
- 0
resolver.py Прегледај датотеку

@@ -0,0 +1,138 @@
1
+# *      Copyright (C) 2011 Libor Zoubek
2
+# *      Modified by ivars777
3
+# *      Based in code from https://github.com/kodi-czsk/script.module.stream.resolver
4
+# *
5
+# *
6
+# *  This Program is free software; you can redistribute it and/or modify
7
+# *  it under the terms of the GNU General Public License as published by
8
+# *  the Free Software Foundation; either version 2, or (at your option)
9
+# *  any later version.
10
+# *
11
+# *  This Program is distributed in the hope that it will be useful,
12
+# *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+# *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
+# *  GNU General Public License for more details.
15
+# *
16
+# *  You should have received a copy of the GNU General Public License
17
+# *  along with this program; see the file COPYING.  If not, write to
18
+# *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19
+# *  http://www.gnu.org/copyleft/gpl.html
20
+# *
21
+# */
22
+# Based in code from https://github.com/kodi-czsk/script.module.stream.resolver
23
+
24
+import re
25
+import traceback
26
+
27
+import util
28
+import sys,os
29
+#sys.path.insert(0,os.path.dirname(os.path.abspath(__file__)))
30
+server_path = "resolvers"
31
+sys.path.append(os.path.join(os.path.dirname(__file__), server_path))
32
+
33
+RESOLVERS = []
34
+util.debug('%s searching for modules' % __name__)
35
+
36
+for module in os.listdir(os.path.join(os.path.dirname(__file__), server_path)):
37
+    if module == '__init__.py' or module[-3:] != '.py':
38
+        continue
39
+    module = module[:-3]
40
+    #exec 'import %s' % module
41
+    #resolver = eval(module)
42
+    resolver = __import__(module)
43
+    #reload(resolver)
44
+
45
+    if not hasattr(resolver, 'resolve'):
46
+        continue
47
+    print 'found %s %s' % (resolver, dir(resolver))
48
+    #util.debug('found %s %s' % (resolver, dir(resolver)))
49
+    if not hasattr(resolver, '__priority__'):
50
+        resolver.__priority__ = 0
51
+    RESOLVERS.append(resolver)
52
+    del module
53
+RESOLVERS = sorted(RESOLVERS, key=lambda m: -m.__priority__)
54
+
55
+
56
+def resolve(url):
57
+    """
58
+        resolves given url by asking all resolvers
59
+        returns Array of resolved objects in positive usecase
60
+    """
61
+    url = util.decode_html(url)
62
+    util.info('Resolving ' + url)
63
+    resolver = _get_resolver(url)
64
+    if resolver is None:
65
+        return []
66
+    util.info('Using resolver \'%s\'' % str(resolver.__name__));
67
+    value = resolver.resolve(url)
68
+    if not value:
69
+        return []
70
+    default = util.item()
71
+    for i in value:
72
+        i["resolver"] = resolver.__name__
73
+        if 'name' not in i.keys():
74
+            i['name'] = resolver.__name__
75
+        i['surl'] = url
76
+        for key in default.keys():
77
+            if key not in i.keys():
78
+                i[key] = default[key]
79
+        if "|" in i["url"]:
80
+            headers = i["url"].split("|")[1]
81
+            i["url"]=i["url"].split("|")[0]
82
+            for h in headers.split("&"):
83
+                if "=" in h:
84
+                    i["headers"][h.split("=")[0]] = h.split("=")[1]
85
+
86
+    return sorted(value, key=lambda i: i['quality'], reverse=True) # TODO sorted by order?
87
+
88
+
89
+def _get_resolver(url):
90
+    util.debug('Get resolver for ' + url)
91
+    for r in RESOLVERS:
92
+        util.debug('querying %s' % r)
93
+        if r.supports(url):
94
+            return r
95
+
96
+def can_resolve(url):
97
+    """ Returns true if we are able to resolve stream by given URL """
98
+    return _get_resolver(url) is not None
99
+
100
+
101
+if __name__ == "__main__":
102
+
103
+    from subprocess import call
104
+    #url = "http://hqq.tv/player/embed_player.php?vid=235238210241210222228241233208212245&autoplay=no"
105
+    #url = "http://hqq.tv/player/embed_player.php?vid=243221241234244238208213206212211231&autoplay=no"
106
+    url = "http://hqq.tv/player/embed_player.php?vid=208231211231207221227243206206221244&autoplay=no"
107
+    #url = "https://openload.co/embed/TMthIdpy4PI/"
108
+    #url = "https://www.youtube.com/watch?v=Tx1K51_F99o"
109
+    #url = "https://www.youtube.com/watch?v=8BkcX7O1890"
110
+    #url = "https://www.youtube.com/watch?v=Se07R8SYsg0"
111
+    #url = "https://kinostok.tv/embed/731f3437e3c53104dd56d04039a0b15a"
112
+    #url = "http://vk.com/video_ext.php?oid=246066565&id=169244575&hash=d430ab0e76c9f7a1&hd=3"
113
+    #url ="https://openload.co/embed/rPMXJYPTkw4/"
114
+    #url = "https://openload.co/embed/bE7WfZ-vz_A/"
115
+    #url = "https://openload.co/embed/bE7WfZ/"
116
+    #url = "https://openload.co/embed/OuskaKyC2GU/"
117
+    url = "http://hqq.tv/player/embed_player.php?vid=235238210241210222228241233208212245&autoplay=no"
118
+    url = "https://hqq.tv/player/embed_player.php?vid=235242211228257255206246241253244213194271217261258"
119
+    #url = "https://openload.co/embed/rmNcP-0QopE/"
120
+    #url = "https://openload.co/embed/gpLF6Grzy80/"
121
+    #url = "https://openload.co/embed/oQLXcU1ITAY/"
122
+    #url = "http://hqq.tv/player/embed_player.php?vid=245208228234224222241224221239207212&autoplay=no"
123
+    url = "https://goo.gl/yMTzqf"
124
+    url = "http://cdn.kapnob.ru/video/5e67c8b1ad018ffa/iframe"
125
+    url = "http://kodik.cc/video/10830/4269a802d1a9d9bdc53fe38488d53a52/720p"
126
+    url = "https://www.youtube.com/embed/RUyQ_JJ6A84?rel=0&fs=1&wmode=transparent"
127
+    streams = resolve(url)
128
+    if not streams:
129
+        print "No streams found"
130
+        sys.exit()
131
+
132
+    for s in streams:
133
+        print s
134
+
135
+    print streams[0]["url"]
136
+    util.play_video(streams)
137
+    ##call([r"c:\Program Files\VideoLAN\VLC\vlc.exe",streams[0]["url"]])
138
+    pass

+ 31
- 0
resolvers/__init__.py Прегледај датотеку

@@ -0,0 +1,31 @@
1
+#/*
2
+# *      Copyright (C) 2011 Libor Zoubek
3
+# *
4
+# *
5
+# *  This Program is free software; you can redistribute it and/or modify
6
+# *  it under the terms of the GNU General Public License as published by
7
+# *  the Free Software Foundation; either version 2, or (at your option)
8
+# *  any later version.
9
+# *
10
+# *  This Program is distributed in the hope that it will be useful,
11
+# *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+# *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+# *  GNU General Public License for more details.
14
+# *
15
+# *  You should have received a copy of the GNU General Public License
16
+# *  along with this program; see the file COPYING.  If not, write to
17
+# *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
18
+# *  http://www.gnu.org/copyleft/gpl.html
19
+# *
20
+# */
21
+
22
+
23
+##########################################################3
24
+# all resolvers modules in this directory must have following methods:
25
+
26
+# __name__ - name of the resolver module - can override module filename
27
+# def supports(url) - returns true iff resolver is able to resolve url to stream otherwise false
28
+# def resolve(url) - returns array of all hashmaps that were resolved
29
+#   - if resolving fails, nothing is returned
30
+#   - a hash MUST contain key 'url' - it's value is stream URL
31
+#   - optional keys are 'subs' (link to subtitle), 'quality' (quality string like '240p' or just 'HD'

+ 209
- 0
resolvers/aadecode.py Прегледај датотеку

@@ -0,0 +1,209 @@
1
+#-*- coding: utf-8 -*-
2
+#
3
+# author : Djeman
4
+# Updated by Shani-08 (https://github.com/Shani-08/ShaniXBMCWork2)
5
+
6
+import re
7
+
8
+class AADecoder(object):
9
+    def __init__(self, aa_encoded_data):
10
+        self.encoded_str = aa_encoded_data.replace('/*´∇`*/','')
11
+
12
+        self.b = ["(c^_^o)", "(゚Θ゚)", "((o^_^o) - (゚Θ゚))", "(o^_^o)",
13
+                  "(゚ー゚)", "((゚ー゚) + (゚Θ゚))", "((o^_^o) +(o^_^o))", "((゚ー゚) + (o^_^o))",
14
+                  "((゚ー゚) + (゚ー゚))", "((゚ー゚) + (゚ー゚) + (゚Θ゚))", "(゚Д゚) .゚ω゚ノ", "(゚Д゚) .゚Θ゚ノ",
15
+                  "(゚Д゚) ['c']", "(゚Д゚) .゚ー゚ノ", "(゚Д゚) .゚Д゚ノ", "(゚Д゚) [゚Θ゚]"]
16
+
17
+    def is_aaencoded(self):
18
+        idx = self.encoded_str.find("゚ω゚ノ= /`m´)ノ ~┻━┻   //*´∇`*/ ['_']; o=(゚ー゚)  =_=3; c=(゚Θ゚) =(゚ー゚)-(゚ー゚); ")
19
+        if idx == -1:
20
+            return False
21
+
22
+        if self.encoded_str.find("(゚Д゚)[゚o゚]) (゚Θ゚)) ('_');", idx) == -1:
23
+            return False
24
+
25
+        return True
26
+
27
+    def base_repr(self, number, base=2, padding=0):
28
+        digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
29
+        if base > len(digits):
30
+            base = len(digits)
31
+
32
+        num = abs(number)
33
+        res = []
34
+        while num:
35
+            res.append(digits[num % base])
36
+            num //= base
37
+        if padding:
38
+            res.append('0' * padding)
39
+        if number < 0:
40
+            res.append('-')
41
+        return ''.join(reversed(res or '0'))
42
+
43
+    def decode_char(self, enc_char, radix):
44
+        end_char = "+ "
45
+        str_char = ""
46
+        while enc_char != '':
47
+            found = False
48
+
49
+            if not found:
50
+                for i in range(len(self.b)):             
51
+                    enc_char=enc_char.replace(self.b[i], str(i))
52
+                
53
+                startpos=0
54
+                findClose=True
55
+                balance=1
56
+                result=[]
57
+                if enc_char.startswith('('):
58
+                    l=0
59
+                    
60
+                    for t in enc_char[1:]:
61
+                        l+=1
62
+                        if findClose and t==')':
63
+                            balance-=1;
64
+                            if balance==0:
65
+                                result+=[enc_char[startpos:l+1]]
66
+                                findClose=False
67
+                                continue
68
+                        elif not findClose and t=='(':
69
+                            startpos=l
70
+                            findClose=True
71
+                            balance=1
72
+                            continue
73
+                        elif t=='(':
74
+                            balance+=1
75
+                 
76
+
77
+                if result is None or len(result)==0:
78
+                    return ""
79
+                else:
80
+                    
81
+                    for r in result:
82
+                        value = self.decode_digit(r, radix)
83
+                        if value == "":
84
+                            return ""
85
+                        else:
86
+                            str_char += value
87
+                            
88
+                    return str_char
89
+
90
+            enc_char = enc_char[len(end_char):]
91
+
92
+        return str_char
93
+
94
+        
95
+              
96
+    def decode_digit(self, enc_int, radix):
97
+
98
+        rr = '(\(.+?\)\))\+'
99
+        rerr=enc_int.split('))+')
100
+        v = ''
101
+        
102
+        #new mode
103
+
104
+        for c in rerr:
105
+            
106
+            if len(c)>0:
107
+                if c.strip().endswith('+'):
108
+                    c=c.strip()[:-1]
109
+
110
+                startbrackets=len(c)-len(c.replace('(',''))
111
+                endbrackets=len(c)-len(c.replace(')',''))
112
+                    
113
+                if startbrackets>endbrackets:
114
+                    c+=')'*startbrackets-endbrackets
115
+                    
116
+                c = c.replace('!+[]','1')
117
+                c = c.replace('-~','1+')
118
+                c = c.replace('[]','0')
119
+                    
120
+                v+=str(eval(c))
121
+                    
122
+        return v
123
+         
124
+        mode = 0
125
+        value = 0
126
+
127
+        while enc_int != '':
128
+            found = False
129
+            for i in range(len(self.b)):
130
+                if enc_int.find(self.b[i]) == 0:
131
+                    if mode == 0:
132
+                        value += i
133
+                    else:
134
+                        value -= i
135
+                    enc_int = enc_int[len(self.b[i]):]
136
+                    found = True
137
+                    break
138
+
139
+            if not found:
140
+                return ""
141
+
142
+            enc_int = re.sub('^\s+|\s+$', '', enc_int)
143
+            if enc_int.find("+") == 0:
144
+                mode = 0
145
+            else:
146
+                mode = 1
147
+
148
+            enc_int = enc_int[1:]
149
+            enc_int = re.sub('^\s+|\s+$', '', enc_int)
150
+
151
+        return self.base_repr(value, radix)
152
+
153
+    def decode(self):
154
+
155
+        self.encoded_str = re.sub('^\s+|\s+$', '', self.encoded_str)
156
+
157
+        # get data
158
+        pattern = (r"\(゚Д゚\)\[゚o゚\]\+ (.+?)\(゚Д゚\)\[゚o゚\]\)")
159
+        result = re.search(pattern, self.encoded_str, re.DOTALL)
160
+        if result is None:
161
+            print "AADecoder: data not found"
162
+            return False
163
+
164
+        data = result.group(1)
165
+
166
+        # hex decode string
167
+        begin_char = "(゚Д゚)[゚ε゚]+"
168
+        alt_char = "(o゚ー゚o)+ "
169
+
170
+        out = ''
171
+
172
+        while data != '':
173
+            # Check new char
174
+            if data.find(begin_char) != 0:
175
+                print "AADecoder: data not found"
176
+                return False
177
+
178
+            data = data[len(begin_char):]
179
+
180
+            # Find encoded char
181
+            enc_char = ""
182
+            if data.find(begin_char) == -1:
183
+                enc_char = data
184
+                data = ""
185
+            else:
186
+                enc_char = data[:data.find(begin_char)]
187
+                data = data[len(enc_char):]
188
+
189
+            
190
+            radix = 8
191
+            # Detect radix 16 for utf8 char
192
+            if enc_char.find(alt_char) == 0:
193
+                enc_char = enc_char[len(alt_char):]
194
+                radix = 16
195
+
196
+            str_char = self.decode_char(enc_char, radix)
197
+            
198
+            if str_char == "":
199
+                print "no match :  "
200
+                print  data + "\nout = " + out + "\n"
201
+                return False
202
+            
203
+            out += chr(int(str_char, radix))
204
+
205
+        if out == "":
206
+            print "no match : " + data
207
+            return False
208
+
209
+        return out

+ 100
- 0
resolvers/hdgo.py Прегледај датотеку

@@ -0,0 +1,100 @@
1
+# -*- coding: UTF-8 -*-
2
+# /*
3
+# *      Copyright (C) 2016 ivars777
4
+# *
5
+# *
6
+# *  This Program is free software; you can redistribute it and/or modify
7
+# *  it under the terms of the GNU General Public License as published by
8
+# *  the Free Software Foundation; either version 2, or (at your option)
9
+# *  any later version.
10
+# *
11
+# *  This Program is distributed in the hope that it will be useful,
12
+# *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+# *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
+# *  GNU General Public License for more details.
15
+# *
16
+# *  You should have received a copy of the GNU General Public License
17
+# *  along with this program; see the file COPYING.  If not, write to
18
+# *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19
+# *  http://www.gnu.org/copyleft/gpl.html
20
+# *
21
+# */
22
+
23
+import re,os,sys
24
+import json
25
+try:
26
+    import util
27
+except:
28
+    pp = os.path.dirname(os.path.abspath(__file__))
29
+    sys.path.insert(0,os.sep.join(pp.split(os.sep)[:-1]))
30
+    import util
31
+import urllib2
32
+import requests
33
+try:
34
+    from requests.packages.urllib3.exceptions import InsecureRequestWarning
35
+    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
36
+except:
37
+    pass
38
+#from aadecode import AADecoder
39
+
40
+if __name__ <> "__main__":
41
+    __name__ = 'hqq'
42
+
43
+def supports(url):
44
+    m = re.search(r"https?://hdgo\.\w+/(.+?)$", url, re.DOTALL)
45
+    if m:
46
+        return True
47
+    else:
48
+        return False
49
+
50
+def resolve(url):
51
+    HTTP_HEADER = {
52
+        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0',
53
+        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
54
+        'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
55
+        'Accept-Encoding': 'none',
56
+        'Accept-Language': 'en-US,en;q=0.8',
57
+        'Referer': url}  # 'Connection': 'keep-alive'
58
+    streams = []
59
+    m = re.search(r"https?://hdgo\.\w+/(.+?)$", url, re.DOTALL)
60
+    vid=m.group(1)
61
+    url2 = "http://couber.be/"+vid
62
+    r = requests.get(url2,headers=HTTP_HEADER)
63
+    if r.status_code <> 200:
64
+        return streams
65
+    m = re.search('<iframe src="([^"]+)"', r.content, re.DOTALL)
66
+    if not m: return streams
67
+    url3 = m.group(1)
68
+    HTTP_HEADER["Rererer"] = url2
69
+    r = requests.get(url3,headers=HTTP_HEADER)
70
+    m = re.search(r"else{\s+setFlash\('([^']+)'\);", r.content, re.DOTALL)
71
+    if not m: return streams
72
+    q = ["1080p","720p","480p","360p"]
73
+    for i,ss in enumerate(m.group(1).split(",")):
74
+        s = ss.split(" or ")
75
+        if not s[0]: continue
76
+        stream = util.item()
77
+        stream["url"] = s[0]
78
+        stream["name"] = s[0]
79
+        stream["quality"] = q[i]
80
+        streams.append(stream)
81
+    return streams
82
+
83
+
84
+if __name__ == "__main__":
85
+
86
+    from subprocess import call
87
+    url = "http://hdgo.cc/video/t/Qrz0riUvA65GtkTpDvmlD9TBOn56HSm2/127280/"
88
+    url = "http://hdgo.cc/video/t/Qrz0riUvA65GtkTpDvmlD9TBOn56HSm2/34879/"
89
+    streams = resolve(url)
90
+    if not streams:
91
+        print "No streams found"
92
+        sys.exit()
93
+    for s in streams:
94
+        print s
95
+    util.play_video(streams)
96
+
97
+
98
+    #print streams[0]["url"]
99
+    #call([r"gst-launch-1.0.exe",'uri="%s""'%streams[0]["url"]])
100
+    pass

+ 239
- 0
resolvers/hqqresolver.py Прегледај датотеку

@@ -0,0 +1,239 @@
1
+# -*- coding: UTF-8 -*-
2
+# *  GNU General Public License for more details.
3
+# *
4
+# *
5
+# *  You should have received a copy of the GNU General Public License
6
+# *  along with this program; see the file COPYING.  If not, write to
7
+# *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
8
+# *  http://www.gnu.org/copyleft/gpl.html
9
+# *
10
+# *
11
+# *  based on https://gitorious.org/iptv-pl-dla-openpli/ urlresolver
12
+# */
13
+from StringIO import StringIO
14
+import json
15
+import re
16
+import base64
17
+import urllib
18
+import sys,os
19
+
20
+try:
21
+    import util
22
+except:
23
+    pp = os.path.dirname(os.path.abspath(__file__))
24
+    sys.path.insert(0, os.sep.join(pp.split(os.sep)[:-1]))
25
+    import util
26
+import requests
27
+try:
28
+    from requests.packages.urllib3.exceptions import InsecureRequestWarning
29
+    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
30
+except:
31
+    pass
32
+
33
+__author__ = 'ivars777'
34
+if __name__ <> "__main__":
35
+    __name__ = 'hqq'
36
+
37
+
38
+def supports(url):
39
+    #return False
40
+    return _regex(url) is not None
41
+
42
+
43
+def _decode(data):
44
+    def O1l(string):
45
+        ret = ""
46
+        i = len(string) - 1
47
+        while i >= 0:
48
+            ret += string[i]
49
+            i -= 1
50
+        return ret
51
+
52
+    def l0I(string):
53
+        enc = ""
54
+        dec = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="
55
+        i = 0
56
+        while True:
57
+            h1 = dec.find(string[i])
58
+            i += 1
59
+            h2 = dec.find(string[i])
60
+            i += 1
61
+            h3 = dec.find(string[i])
62
+            i += 1
63
+            h4 = dec.find(string[i])
64
+            i += 1
65
+            bits = h1 << 18 | h2 << 12 | h3 << 6 | h4
66
+            o1 = bits >> 16 & 0xff
67
+            o2 = bits >> 8 & 0xff
68
+            o3 = bits & 0xff
69
+            if h3 == 64:
70
+                enc += unichr(o1)
71
+            else:
72
+                if h4 == 64:
73
+                    enc += unichr(o1) + unichr(o2)
74
+                else:
75
+                    enc += unichr(o1) + unichr(o2) + unichr(o3)
76
+            if i >= len(string):
77
+                break
78
+        return enc
79
+
80
+    escape = re.search("var _escape=\'([^\']+)", l0I(O1l(data))).group(1)
81
+    return escape.replace('%', '\\').decode('unicode-escape')
82
+
83
+
84
+def _decode2(file_url):
85
+    def K12K(a, typ='b'):
86
+        codec_a = ["G", "L", "M", "N", "Z", "o", "I", "t", "V", "y", "x", "p", "R", "m", "z", "u",
87
+                   "D", "7", "W", "v", "Q", "n", "e", "0", "b", "="]
88
+        codec_b = ["2", "6", "i", "k", "8", "X", "J", "B", "a", "s", "d", "H", "w", "f", "T", "3",
89
+                   "l", "c", "5", "Y", "g", "1", "4", "9", "U", "A"]
90
+        if 'd' == typ:
91
+            tmp = codec_a
92
+            codec_a = codec_b
93
+            codec_b = tmp
94
+        idx = 0
95
+        while idx < len(codec_a):
96
+            a = a.replace(codec_a[idx], "___")
97
+            a = a.replace(codec_b[idx], codec_a[idx])
98
+            a = a.replace("___", codec_b[idx])
99
+            idx += 1
100
+        return a
101
+
102
+    def _xc13(_arg1):
103
+        _lg27 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="
104
+        _local2 = ""
105
+        _local3 = [0, 0, 0, 0]
106
+        _local4 = [0, 0, 0]
107
+        _local5 = 0
108
+        while _local5 < len(_arg1):
109
+            _local6 = 0
110
+            while _local6 < 4 and (_local5 + _local6) < len(_arg1):
111
+                _local3[_local6] = _lg27.find(_arg1[_local5 + _local6])
112
+                _local6 += 1
113
+            _local4[0] = ((_local3[0] << 2) + ((_local3[1] & 48) >> 4))
114
+            _local4[1] = (((_local3[1] & 15) << 4) + ((_local3[2] & 60) >> 2))
115
+            _local4[2] = (((_local3[2] & 3) << 6) + _local3[3])
116
+
117
+            _local7 = 0
118
+            while _local7 < len(_local4):
119
+                if _local3[_local7 + 1] == 64:
120
+                    break
121
+                _local2 += chr(_local4[_local7])
122
+                _local7 += 1
123
+            _local5 += 4
124
+        return _local2
125
+
126
+    return _xc13(K12K(file_url, 'e'))
127
+
128
+
129
+def resolve(url):
130
+    m = _regex(url)
131
+    if m:
132
+        headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
133
+                   'Content-Type': 'text/html; charset=utf-8'}
134
+        if "goo.gl" in url:
135
+            data = util.request(url, headers)
136
+            player_url = re.search("var ppage = '(.+?)'",data).group(1)
137
+            player_url = urllib.unquote(player_url)
138
+        else:
139
+            vid = m.group('vid')
140
+            player_url = "http://hqq.tv/player/embed_player.php?vid=%s&autoplay=no" % vid
141
+            data = util.request(player_url, headers)
142
+        b64enc = re.search('base64([^\"]+)', data, re.DOTALL)
143
+        b64dec = b64enc and base64.decodestring(b64enc.group(1))
144
+        enc = b64dec and re.search("\'([^']+)\'", b64dec).group(1)
145
+        if enc:
146
+            data = re.findall('<input name="([^"]+?)" [^>]+? value="([^"]+?)">', _decode(enc))
147
+            post_data = {}
148
+            for idx in range(len(data)):
149
+                post_data[data[idx][0]] = data[idx][1]
150
+            data = util.post(player_url, post_data, headers)
151
+            b64enc = re.search('base64([^\"]+)', data, re.DOTALL)
152
+            b64dec = b64enc and base64.decodestring(b64enc.group(1))
153
+            enc = b64dec and re.search("\'([^']+)\'", b64dec).group(1)
154
+            if enc:
155
+                data = re.findall('<input name="([^"]+?)" [^>]+? value="([^"]*)">', _decode(enc))
156
+                post_data = {}
157
+                for idx in range(len(data)):
158
+                    post_data[data[idx][0]] = data[idx][1]
159
+                data = urllib.unquote(util.request("http://hqq.tv/sec/player/embed_player.php?" +
160
+                                                   urllib.urlencode(post_data), headers))
161
+                server_1 = re.search("server_1: (\w+)",data).group(1)
162
+                link_1 = re.search("link_1: (\w+)",data).group(1)
163
+                vid_server = re.search(r'var\s*%s\s*=\s*"([^"]*?)"'%server_1, data)
164
+                vid_link = re.search(r'var\s*%s\s*=\s*"([^"]*?)"'%link_1, data)
165
+                at = re.search(r'var\s*at\s*=\s*"([^"]*?)"', data)
166
+                vid = re.search('vid: "([^"]+)"',data)
167
+                sub_url = re.search('sub:"(.+?)"',data).group(1) if re.search('sub:"(.+?)"',data) else ""
168
+                subs_lang = re.search('sublangs:"(.+?)"',data).group(1) if re.search('sub:"(.+?)"',data) else ""
169
+                if sub_url:
170
+                    subs=[{"url":sub_url,'name':subs_lang,"lang":subs_lang}]
171
+                else:
172
+                    subs = []
173
+                if vid_server and vid_link and at:
174
+                    get_data = {'server_1': vid_server.group(1),
175
+                                'link_1': vid_link.group(1),
176
+                                'at': at.group(1),
177
+                                'adb': '0/',
178
+                                'b':'1',
179
+                                'vid': vid.group(1)}
180
+                    # X-Requested-With: XMLHttpRequest
181
+                    headers["X-Requested-With"] = "XMLHttpRequest"
182
+                    html = util.request("http://hqq.tv/player/get_md5.php?"+urllib.urlencode(get_data), headers)
183
+                    data = json.load(StringIO(html))
184
+                    if 'file' in data:
185
+                        file_url = _decode2(data['file'])
186
+                        file_url = re.sub(r'\?socket=?$', '.mp4.m3u8',file_url)
187
+                        stream  = {
188
+                            'url': file_url,
189
+                            'name': file_url,
190
+                            'subs':subs,
191
+                            'quality': 'hqq',
192
+                            'resolver': 'hqq',
193
+                            "headers":{"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46"}
194
+                        }
195
+                        return [stream]
196
+    return []
197
+
198
+
199
+def _regex(url):
200
+    # https://goo.gl/yMTzqf
201
+    match = re.search("(hqq|netu)\.tv/watch_video\.php\?v=(?P<vid>[0-9A-Z]+)", url)
202
+    if match:
203
+        return match
204
+    match = re.search(r'(hqq|netu)\.tv/player/embed_player\.php\?vid=(?P<vid>[0-9A-Za-z]+)', url)
205
+    if match:
206
+        return match
207
+    match = re.search(r'(hqq|netu)\.tv/player/hash\.php\?hash=\d+', url)
208
+    if match:
209
+        match = re.search(r'var\s+vid\s*=\s*\'(?P<vid>[^\']+)\'', urllib.unquote(util.request(url)))
210
+        if match:
211
+            return match
212
+    # https://goo.gl/yMTzqf
213
+    match = re.search("(goo)\.gl/(?P<vid>[\w]+)", url)
214
+    if match:
215
+        return match
216
+
217
+    b64enc = re.search(r'data:text/javascript\;charset\=utf\-8\;base64([^\"]+)', url)
218
+    b64dec = b64enc and base64.decodestring(b64enc.group(1))
219
+    enc = b64dec and re.search(r"\'([^']+)\'", b64dec).group(1)
220
+    if enc:
221
+        decoded = _decode(enc)
222
+        match = re.search(r'<input name="vid"[^>]+? value="(?P<vid>[^"]+?)">', decoded)
223
+        if re.search(r'<form(.+?)action="[^"]*(hqq|netu)\.tv/player/embed_player\.php"[^>]*>',
224
+                     decoded) and match:
225
+            return match
226
+    return None
227
+
228
+if __name__ == "__main__":
229
+
230
+    url = "http://hqq.tv/player/embed_player.php?vid=nYAKgzBAf7ll"
231
+    streams = resolve(url)
232
+    if not streams:
233
+        print "No streams found"
234
+        sys.exit()
235
+    for s in streams:
236
+        print s
237
+    print streams[0]["url"]
238
+    util.play_video(streams)
239
+    pass

+ 99
- 0
resolvers/kapnob.py Прегледај датотеку

@@ -0,0 +1,99 @@
1
+# -*- coding: UTF-8 -*-
2
+# /*
3
+# *      Copyright (C) 2016 Ivars777
4
+# *
5
+# *
6
+# *  This Program is free software; you can redistribute it and/or modify
7
+# *  it under the terms of the GNU General Public License as published by
8
+# *  the Free Software Foundation; either version 2, or (at your option)
9
+# *  any later version.
10
+# *
11
+# *  This Program is distributed in the hope that it will be useful,
12
+# *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+# *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
+# *  GNU General Public License for more details.
15
+# *
16
+# *  You should have received a copy of the GNU General Public License
17
+# *  along with this program; see the file COPYING.  If not, write to
18
+# *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19
+# *  http://www.gnu.org/copyleft/gpl.html
20
+# *
21
+# */
22
+
23
+import re,os,sys
24
+import json
25
+try:
26
+    import util
27
+except:
28
+    pp = os.path.dirname(os.path.abspath(__file__))
29
+    sys.path.insert(0,os.sep.join(pp.split(os.sep)[:-1]))
30
+    import util
31
+import requests
32
+try:
33
+    from requests.packages.urllib3.exceptions import InsecureRequestWarning
34
+    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
35
+except:
36
+    pass
37
+
38
+__author__ = 'ivars777'
39
+if __name__ <> "__main__":
40
+    __name__ = 'kapnob'
41
+
42
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
43
+headers = headers2dict("""
44
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
45
+Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
46
+Accept-Language: en-US,en;q=0.5
47
+""")
48
+
49
+
50
+def supports(url):
51
+    return True if "kapnob.ru" in url else False
52
+
53
+def resolve(url):
54
+    HTTP_HEADER = {
55
+        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0',
56
+        'Referer': url}  # 'Connection': 'keep-alive'
57
+    stream = util.item()
58
+    data = requests.get(url,headers = HTTP_HEADER).content
59
+    m = re.search(r'subtitles: \[\s+{\s+src: "(.+?)",\s+label: "(.+?)",\s+language: "(.+?)"', data, re.DOTALL)
60
+    if m:
61
+        sub = {}
62
+        sub["url"] = m.group(1)
63
+        sub["name"] = m.group(2)
64
+        sub["lang"] = m.group(3)
65
+        sub["type"] = "srt"
66
+        stream["subs"]=[sub]
67
+
68
+    video_token = re.search("video_token: '(.+?)'",data).group(1)
69
+    content_type = re.search("content_type: '(.+?)'",data).group(1)
70
+    mw_key = re.search("mw_key: '(.+?)'",data).group(1)
71
+    mw_domain_id = re.search("mw_domain_id: (\d+)",data).group(1)
72
+    uuid = re.search("uuid: '(.+?)'",data).group(1)
73
+    params = "video_token=%s&content_type=%s&mw_key=%s&mw_pid=&mw_domain_id=%s&ad_attr=0&debug=false&uuid=%s"%(
74
+        video_token,content_type,mw_key,mw_domain_id,uuid)
75
+    headers = headers2dict("""
76
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0
77
+Content-Type: application/x-www-form-urlencoded; charset=UTF-8
78
+X-Iframe-Option: Direct
79
+X-Requested-With: XMLHttpRequest
80
+""")
81
+    data = requests.post("http://cdn.kapnob.ru/sessions/new_session", data=params,headers=headers).content
82
+    js = json.loads(data)
83
+    stream["url"] = js["mans"]["manifest_m3u8"]
84
+    stream["name"]= stream["url"]
85
+    return [stream]
86
+
87
+
88
+if __name__ == "__main__":
89
+
90
+    url = "http://cdn.kapnob.ru/video/5e67c8b1ad018ffa/iframe"
91
+    streams = resolve(url)
92
+    if not streams:
93
+        print "No streams found"
94
+        sys.exit()
95
+    for s in streams:
96
+        print s
97
+    print streams[0]["url"]
98
+    util.play_video(streams)
99
+    pass

+ 100
- 0
resolvers/kodik.py Прегледај датотеку

@@ -0,0 +1,100 @@
1
+# -*- coding: UTF-8 -*-
2
+# /*
3
+# *      Copyright (C) 2016 Ivars777
4
+# *
5
+# *
6
+# *  This Program is free software; you can redistribute it and/or modify
7
+# *  it under the terms of the GNU General Public License as published by
8
+# *  the Free Software Foundation; either version 2, or (at your option)
9
+# *  any later version.
10
+# *
11
+# *  This Program is distributed in the hope that it will be useful,
12
+# *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+# *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
+# *  GNU General Public License for more details.
15
+# *
16
+# *  You should have received a copy of the GNU General Public License
17
+# *  along with this program; see the file COPYING.  If not, write to
18
+# *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19
+# *  http://www.gnu.org/copyleft/gpl.html
20
+# *
21
+# */
22
+
23
+import re,os,sys
24
+import json
25
+try:
26
+    import util
27
+except:
28
+    pp = os.path.dirname(os.path.abspath(__file__))
29
+    sys.path.insert(0,os.sep.join(pp.split(os.sep)[:-1]))
30
+    import util
31
+import requests
32
+try:
33
+    from requests.packages.urllib3.exceptions import InsecureRequestWarning
34
+    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
35
+except:
36
+    pass
37
+
38
+__author__ = 'ivars777'
39
+if __name__ <> "__main__":
40
+    __name__ = 'kodik'
41
+
42
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
43
+headers0 = headers2dict("""
44
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0
45
+Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
46
+Upgrade-Insecure-Requests: 1
47
+DNT: 1
48
+Connection: keep-alive
49
+Upgrade-Insecure-Requests: 1
50
+Cache-Control: max-age=0
51
+
52
+""")
53
+
54
+
55
+def supports(url):
56
+    return True if "kodik.cc" in url else False
57
+
58
+def resolve(url):
59
+    global headers0
60
+    streams = []
61
+    try:
62
+        r = requests.get(url,headers=headers0)
63
+    except:
64
+        return []
65
+    if r.status_code<>200:
66
+        return []
67
+    data = r.content
68
+    hash = re.search('hash: "(.+?)"',data).group(1)
69
+    vid = re.search('id: "(.+?)"',data).group(1)
70
+    quality = re.search('quality: "(.+?)"',data).group(1)
71
+    params = "domain=&url=&type=database&hash=%s&id=%s&quality=%s"%(hash,vid,quality)
72
+    headers = headers2dict("""
73
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0
74
+Accept: application/json, text/javascript, */*; q=0.01
75
+Content-Type: application/x-www-form-urlencoded; charset=UTF-8
76
+X-Requested-With: XMLHttpRequest
77
+Referer: %s
78
+"""%url)
79
+    data = requests.post("http://kodik.cc/get-video", data=params,headers=headers).content
80
+    js = json.loads(data)
81
+    for st in js["qualities"]:
82
+        stream = util.item()
83
+        stream["url"] = js["qualities"][st]["src"]
84
+        stream["quality"]=int(st)
85
+        stream["name"]= stream["url"]
86
+        streams.append(stream)
87
+    return streams
88
+
89
+if __name__ == "__main__":
90
+
91
+    url = "http://kodik.cc/video/10830/4269a802d1a9d9bdc53fe38488d53a52/720p"
92
+    streams = resolve(url)
93
+    if not streams:
94
+        print "No streams found"
95
+        sys.exit()
96
+    for s in streams:
97
+        print s
98
+    print streams[0]["url"]
99
+    util.play_video(streams)
100
+    pass

+ 114
- 0
resolvers/openload3.py Прегледај датотеку

@@ -0,0 +1,114 @@
1
+# -*- coding: UTF-8 -*-
2
+# /*
3
+# *      Copyright (C) 2015 Lubomir Kucera
4
+# *
5
+# *
6
+# *  This Program is free software; you can redistribute it and/or modify
7
+# *  it under the terms of the GNU General Public License as published by
8
+# *  the Free Software Foundation; either version 2, or (at your option)
9
+# *  any later version.
10
+# *
11
+# *  This Program is distributed in the hope that it will be useful,
12
+# *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+# *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
+# *  GNU General Public License for more details.
15
+# *
16
+# *  You should have received a copy of the GNU General Public License
17
+# *  along with this program; see the file COPYING.  If not, write to
18
+# *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19
+# *  http://www.gnu.org/copyleft/gpl.html
20
+# *
21
+# */
22
+
23
+import re,os,sys
24
+import json
25
+try:
26
+    import util
27
+except:
28
+    pp = os.path.dirname(os.path.abspath(__file__))
29
+    sys.path.insert(0,os.sep.join(pp.split(os.sep)[:-1]))
30
+    import util
31
+import urllib2
32
+import requests
33
+try:
34
+    from requests.packages.urllib3.exceptions import InsecureRequestWarning
35
+    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
36
+except:
37
+    pass
38
+#from aadecode import AADecoder
39
+
40
+__author__ = 'Jose Riha/Lubomir Kucera'
41
+__name__ = 'openload3'
42
+
43
+
44
+def supports(url):
45
+    return re.search(r'openload\.\w+/embed/.+', url) is not None
46
+
47
+
48
+#INFO_URL = API_BASE_URL + '/streaming/info'
49
+
50
+def resolve(url):
51
+    HTTP_HEADER = {
52
+        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0',
53
+        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
54
+        'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
55
+        'Accept-Encoding': 'none',
56
+        'Accept-Language': 'en-US,en;q=0.8',
57
+        'Referer': url}  # 'Connection': 'keep-alive'
58
+
59
+    stream = util.item()
60
+    m = re.search('https*://openload\.\w+/embed/([^/]+)', url)
61
+    if not m:
62
+        return stream
63
+    vid=m.group(1)
64
+    url2 = "https://api.openload.co/1/streaming/get?file="+vid
65
+    r = requests.get(url2,headers=HTTP_HEADER)
66
+    try:
67
+        js = json.loads(r.content)
68
+    except:
69
+        return stream
70
+    if js["status"] <>200:
71
+        raise Exception(js["msg"])
72
+    res = js["result"]
73
+    stream["url"] = res["url"]
74
+    stream["name"]= res["url"]
75
+    ### Retrieve subtitles ####
76
+    html = requests.get(url, headers=HTTP_HEADER).content
77
+    m = re.search('<track kind="captions" src="([^"]+)" srclang="([^"]+)" label="([^"]+)"', html)
78
+    if m:
79
+        stream["subs"] = m.group(1)
80
+        stream["lang"] = m.group(2)
81
+
82
+    return [stream]
83
+
84
+
85
+if __name__ == "__main__":
86
+
87
+    from subprocess import call
88
+    #url = "http://hqq.tv/player/embed_player.php?vid=235238210241210222228241233208212245&autoplay=no"
89
+    #url = "http://hqq.tv/player/embed_player.php?vid=243221241234244238208213206212211231&autoplay=no"
90
+    url = "http://hqq.tv/player/embed_player.php?vid=208231211231207221227243206206221244&autoplay=no"
91
+    #url = "https://openload.co/embed/TMthIdpy4PI/"
92
+    #url = "https://www.youtube.com/watch?v=Tx1K51_F99o"
93
+    #url = "https://www.youtube.com/watch?v=8BkcX7O1890"
94
+    #url = "https://www.youtube.com/watch?v=Se07R8SYsg0"
95
+    #url = "https://kinostok.tv/embed/731f3437e3c53104dd56d04039a0b15a"
96
+    #url = "http://vk.com/video_ext.php?oid=246066565&id=169244575&hash=d430ab0e76c9f7a1&hd=3"
97
+    #url ="https://openload.co/embed/rPMXJYPTkw4/"
98
+    #url = "https://openload.co/embed/bE7WfZ-vz_A/"
99
+    #url = "https://openload.co/embed/bE7WfZ/"
100
+    #url = "https://openload.co/embed/OuskaKyC2GU/"
101
+    url = "http://hqq.tv/player/embed_player.php?vid=235238210241210222228241233208212245&autoplay=no"
102
+    url = "https://openload.co/embed/rmNcP-0QopE/"
103
+    url = "https://openload.co/embed/oQLXcU1ITAY/"
104
+    streams = resolve(url)
105
+    if not streams:
106
+        print "No streams found"
107
+        sys.exit()
108
+
109
+    for s in streams:
110
+        print s
111
+
112
+    print streams[0]["url"]
113
+    call([r"c:\Program Files\VideoLAN\VLC\vlc.exe",streams[0]["url"]])
114
+    pass

+ 347
- 0
resolvers/youtuberesolver.py Прегледај датотеку

@@ -0,0 +1,347 @@
1
+# -*- coding: UTF-8 -*-
2
+
3
+import urllib2
4
+# source from https://github.com/rg3/youtube-dl/issues/1208
5
+# removed some unnecessary debug messages..
6
+class CVevoSignAlgoExtractor:
7
+    # MAX RECURSION Depth for security
8
+    MAX_REC_DEPTH = 5
9
+
10
+    def __init__(self):
11
+        self.algoCache = {}
12
+        self._cleanTmpVariables()
13
+
14
+    def _cleanTmpVariables(self):
15
+        self.fullAlgoCode = ''
16
+        self.allLocalFunNamesTab = []
17
+        self.playerData = ''
18
+
19
+    def _jsToPy(self, jsFunBody):
20
+        pythonFunBody = jsFunBody.replace('function', 'def').replace('{', ':\n\t').replace('}', '').replace(';', '\n\t').replace('var ', '')
21
+        pythonFunBody = pythonFunBody.replace('.reverse()', '[::-1]')
22
+
23
+        lines = pythonFunBody.split('\n')
24
+        for i in range(len(lines)):
25
+            # a.split("") -> list(a)
26
+            match = re.search('(\w+?)\.split\(""\)', lines[i])
27
+            if match:
28
+                lines[i] = lines[i].replace(match.group(0), 'list(' + match.group(1) + ')')
29
+            # a.length -> len(a)
30
+            match = re.search('(\w+?)\.length', lines[i])
31
+            if match:
32
+                lines[i] = lines[i].replace(match.group(0), 'len(' + match.group(1) + ')')
33
+            # a.slice(3) -> a[3:]
34
+            match = re.search('(\w+?)\.slice\(([0-9]+?)\)', lines[i])
35
+            if match:
36
+                lines[i] = lines[i].replace(match.group(0), match.group(1) + ('[%s:]' % match.group(2)))
37
+            # a.join("") -> "".join(a)
38
+            match = re.search('(\w+?)\.join\(("[^"]*?")\)', lines[i])
39
+            if match:
40
+                lines[i] = lines[i].replace(match.group(0), match.group(2) + '.join(' + match.group(1) + ')')
41
+        return "\n".join(lines)
42
+
43
+    def _getLocalFunBody(self, funName):
44
+        # get function body
45
+        match = re.search('(function %s\([^)]+?\){[^}]+?})' % funName, self.playerData)
46
+        if match:
47
+            # return jsFunBody
48
+            return match.group(1)
49
+        return ''
50
+
51
+    def _getAllLocalSubFunNames(self, mainFunBody):
52
+        match = re.compile('[ =(,](\w+?)\([^)]*?\)').findall(mainFunBody)
53
+        if len(match):
54
+            # first item is name of main function, so omit it
55
+            funNameTab = set(match[1:])
56
+            return funNameTab
57
+        return set()
58
+
59
+    def decryptSignature(self, s, playerUrl):
60
+        playerUrl = playerUrl[:4] != 'http' and 'http:' + playerUrl or playerUrl
61
+        util.debug("decrypt_signature sign_len[%d] playerUrl[%s]" % (len(s), playerUrl))
62
+
63
+        # clear local data
64
+        self._cleanTmpVariables()
65
+
66
+        # use algoCache
67
+        if playerUrl not in self.algoCache:
68
+            # get player HTML 5 sript
69
+            request = urllib2.Request(playerUrl)
70
+            try:
71
+                self.playerData = urllib2.urlopen(request).read()
72
+                self.playerData = self.playerData.decode('utf-8', 'ignore')
73
+            except:
74
+                util.debug('Unable to download playerUrl webpage')
75
+                return ''
76
+
77
+            # get main function name
78
+            match = re.search("signature=(\w+?)\([^)]\)", self.playerData)
79
+            if match:
80
+                mainFunName = match.group(1)
81
+                util.debug('Main signature function name = "%s"' % mainFunName)
82
+            else:
83
+                util.debug('Can not get main signature function name')
84
+                return ''
85
+
86
+            self._getfullAlgoCode(mainFunName)
87
+
88
+            # wrap all local algo function into one function extractedSignatureAlgo()
89
+            algoLines = self.fullAlgoCode.split('\n')
90
+            for i in range(len(algoLines)):
91
+                algoLines[i] = '\t' + algoLines[i]
92
+            self.fullAlgoCode = 'def extractedSignatureAlgo(param):'
93
+            self.fullAlgoCode += '\n'.join(algoLines)
94
+            self.fullAlgoCode += '\n\treturn %s(param)' % mainFunName
95
+            self.fullAlgoCode += '\noutSignature = extractedSignatureAlgo( inSignature )\n'
96
+
97
+            # after this function we should have all needed code in self.fullAlgoCode
98
+            try:
99
+                algoCodeObj = compile(self.fullAlgoCode, '', 'exec')
100
+            except:
101
+                util.debug('decryptSignature compile algo code EXCEPTION')
102
+                return ''
103
+        else:
104
+            # get algoCodeObj from algoCache
105
+            util.debug('Algo taken from cache')
106
+            algoCodeObj = self.algoCache[playerUrl]
107
+
108
+        # for security alow only flew python global function in algo code
109
+        vGlobals = {"__builtins__": None, 'len': len, 'list': list}
110
+
111
+        # local variable to pass encrypted sign and get decrypted sign
112
+        vLocals = { 'inSignature': s, 'outSignature': '' }
113
+
114
+        # execute prepared code
115
+        try:
116
+            exec(algoCodeObj, vGlobals, vLocals)
117
+        except:
118
+            util.debug('decryptSignature exec code EXCEPTION')
119
+            return ''
120
+
121
+        util.debug('Decrypted signature = [%s]' % vLocals['outSignature'])
122
+        # if algo seems ok and not in cache, add it to cache
123
+        if playerUrl not in self.algoCache and '' != vLocals['outSignature']:
124
+            util.debug('Algo from player [%s] added to cache' % playerUrl)
125
+            self.algoCache[playerUrl] = algoCodeObj
126
+
127
+        # free not needed data
128
+        self._cleanTmpVariables()
129
+
130
+        return vLocals['outSignature']
131
+
132
+    # Note, this method is using a recursion
133
+    def _getfullAlgoCode(self, mainFunName, recDepth=0):
134
+        if self.MAX_REC_DEPTH <= recDepth:
135
+            util.debug('_getfullAlgoCode: Maximum recursion depth exceeded')
136
+            return
137
+
138
+        funBody = self._getLocalFunBody(mainFunName)
139
+        if '' != funBody:
140
+            funNames = self._getAllLocalSubFunNames(funBody)
141
+            if len(funNames):
142
+                for funName in funNames:
143
+                    if funName not in self.allLocalFunNamesTab:
144
+                        self.allLocalFunNamesTab.append(funName)
145
+                        util.debug("Add local function %s to known functions" % mainFunName)
146
+                        self._getfullAlgoCode(funName, recDepth + 1)
147
+
148
+            # conver code from javascript to python
149
+            funBody = self._jsToPy(funBody)
150
+            self.fullAlgoCode += '\n' + funBody + '\n'
151
+        return
152
+
153
+decryptor = CVevoSignAlgoExtractor()
154
+
155
+'''
156
+   YouTube plugin for XBMC
157
+    Copyright (C) 2010-2012 Tobias Ussing And Henrik Mosgaard Jensen
158
+
159
+    This program is free software: you can redistribute it and/or modify
160
+    it under the terms of the GNU General Public License as published by
161
+    the Free Software Foundation, either version 3 of the License, or
162
+    (at your option) any later version.
163
+
164
+    This program is distributed in the hope that it will be useful,
165
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
166
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
167
+    GNU General Public License for more details.
168
+
169
+    You should have received a copy of the GNU General Public License
170
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
171
+'''
172
+
173
+import sys
174
+import urllib
175
+import cgi
176
+import simplejson as json
177
+
178
+
179
+class YoutubePlayer(object):
180
+    fmt_value = {
181
+            5: "240p",
182
+            18: "360p",
183
+            22: "720p",
184
+            26: "???",
185
+            33: "???",
186
+            34: "360p",
187
+            35: "480p",
188
+            37: "1080p",
189
+            38: "720p",
190
+            43: "360p",
191
+            44: "480p",
192
+            45: "720p",
193
+            46: "520p",
194
+            59: "480",
195
+            78: "400",
196
+            82: "360p",
197
+            83: "240p",
198
+            84: "720p",
199
+            85: "520p",
200
+            100: "360p",
201
+            101: "480p",
202
+            102: "720p",
203
+            120: "hd720",
204
+            121: "hd1080"
205
+            }
206
+
207
+    # YouTube Playback Feeds
208
+    urls = {}
209
+    urls['video_stream'] = "http://www.youtube.com/watch?v=%s&safeSearch=none"
210
+    urls['embed_stream'] = "http://www.youtube.com/get_video_info?video_id=%s"
211
+    urls['video_info'] = "http://gdata.youtube.com/feeds/api/videos/%s"
212
+
213
+    def __init__(self):
214
+        pass
215
+
216
+    def removeAdditionalEndingDelimiter(self, data):
217
+        pos = data.find("};")
218
+        if pos != -1:
219
+            data = data[:pos + 1]
220
+        return data
221
+
222
+    def extractFlashVars(self, data, assets):
223
+        flashvars = {}
224
+        found = False
225
+
226
+        for line in data.split("\n"):
227
+            if line.strip().find(";ytplayer.config = ") > 0:
228
+                found = True
229
+                p1 = line.find(";ytplayer.config = ") + len(";ytplayer.config = ") - 1
230
+                p2 = line.rfind(";")
231
+                if p1 <= 0 or p2 <= 0:
232
+                    continue
233
+                data = line[p1 + 1:p2]
234
+                break
235
+        data = self.removeAdditionalEndingDelimiter(data)
236
+
237
+        if found:
238
+            data = json.loads(data)
239
+            if assets:
240
+                flashvars = data["assets"]
241
+            else:
242
+                flashvars = data["args"]
243
+        return flashvars
244
+
245
+    def scrapeWebPageForVideoLinks(self, result, video):
246
+        links = {}
247
+        flashvars = self.extractFlashVars(result, 0)
248
+        if not flashvars.has_key(u"url_encoded_fmt_stream_map"):
249
+            return links
250
+
251
+        if flashvars.has_key(u"ttsurl"):
252
+            video[u"ttsurl"] = flashvars[u"ttsurl"]
253
+        if flashvars.has_key("title"):
254
+            video["title"] = flashvars["title"]
255
+
256
+        for url_desc in flashvars[u"url_encoded_fmt_stream_map"].split(u","):
257
+            url_desc_map = cgi.parse_qs(url_desc)
258
+            if not (url_desc_map.has_key(u"url") or url_desc_map.has_key(u"stream")):
259
+                continue
260
+
261
+            key = int(url_desc_map[u"itag"][0])
262
+            url = u""
263
+            if url_desc_map.has_key(u"url"):
264
+                url = urllib.unquote(url_desc_map[u"url"][0])
265
+            elif url_desc_map.has_key(u"conn") and url_desc_map.has_key(u"stream"):
266
+                url = urllib.unquote(url_desc_map[u"conn"][0])
267
+                if url.rfind("/") < len(url) - 1:
268
+                    url = url + "/"
269
+                url = url + urllib.unquote(url_desc_map[u"stream"][0])
270
+            elif url_desc_map.has_key(u"stream") and not url_desc_map.has_key(u"conn"):
271
+                url = urllib.unquote(url_desc_map[u"stream"][0])
272
+
273
+            if url_desc_map.has_key(u"sig"):
274
+                url = url + u"&signature=" + url_desc_map[u"sig"][0]
275
+            elif url_desc_map.has_key(u"s"):
276
+                sig = url_desc_map[u"s"][0]
277
+                flashvars = self.extractFlashVars(result, 1)
278
+                js = flashvars[u"js"]
279
+                url = url + u"&signature=" + self.decrypt_signature(sig, js)
280
+
281
+            links[key] = url
282
+
283
+        return links
284
+
285
+    def decrypt_signature(self, s, js):
286
+        return decryptor.decryptSignature(s, js)
287
+
288
+
289
+    def extractVideoLinksFromYoutube(self, url, videoid, video):
290
+        result = util.request(self.urls[u"video_stream"] % videoid)
291
+        links = self.scrapeWebPageForVideoLinks(result, video)
292
+        if len(links) == 0:
293
+            util.error(u"Couldn't find video url- or stream-map.")
294
+        return links
295
+# /*
296
+# *      Copyright (C) 2011 Libor Zoubek
297
+# *
298
+# *
299
+# *  This Program is free software; you can redistribute it and/or modify
300
+# *  it under the terms of the GNU General Public License as published by
301
+# *  the Free Software Foundation; either version 2, or (at your option)
302
+# *  any later version.
303
+# *
304
+# *  This Program is distributed in the hope that it will be useful,
305
+# *  but WITHOUT ANY WARRANTY; without even the implied warranty of
306
+# *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
307
+# *  GNU General Public License for more details.
308
+# *
309
+# *  You should have received a copy of the GNU General Public License
310
+# *  along with this program; see the file COPYING.  If not, write to
311
+# *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
312
+# *  http://www.gnu.org/copyleft/gpl.html
313
+# *
314
+# */
315
+import re, util, urllib
316
+__name__ = 'youtube'
317
+
318
+
319
+def supports(url):
320
+    return not _regex(url) == None
321
+
322
+def resolve(url):
323
+    m = _regex(url)
324
+    if not m == None:
325
+        player = YoutubePlayer()
326
+        video = {'title':'žádný název'}
327
+        index = url.find('&')  # strip out everytihing after &
328
+        if index > 0:
329
+            url = url[:index]
330
+        links = player.extractVideoLinksFromYoutube(url, m.group('id'), video)
331
+        resolved = []
332
+        for q in links:
333
+            if q in player.fmt_value.keys():
334
+                quality = player.fmt_value[q]
335
+                item = {}
336
+                item['name'] = __name__
337
+                item['url'] = links[q]
338
+                item['quality'] = quality
339
+                item['surl'] = url
340
+                item['subs'] = ''
341
+                item['title'] = video['title']
342
+                item['fmt'] = q
343
+                resolved.append(item)
344
+        return resolved
345
+
346
+def _regex(url):
347
+    return re.search('www\.youtube\.com/(watch\?v=|v/|embed/)(?P<id>.+?)(\?|$|&)', url, re.IGNORECASE | re.DOTALL)

+ 169
- 0
sources/SourceBase.py Прегледај датотеку

@@ -0,0 +1,169 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+
9
+import urllib2, urllib
10
+import datetime, re, sys,os
11
+import requests, json
12
+import ssl
13
+if "_create_unverified_context" in dir(ssl):
14
+    ssl._create_default_https_context = ssl._create_unverified_context
15
+
16
+
17
+try:
18
+    from requests.packages.urllib3.exceptions import InsecureRequestWarning
19
+    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
20
+except:
21
+    pass
22
+from collections import OrderedDict
23
+import ConfigParser
24
+try:
25
+    import util
26
+except:
27
+    parent = os.path.dirname(os.path.abspath(__file__))
28
+    parent = os.sep.join(parent.split(os.sep)[:-1])
29
+    sys.path.insert(0,parent)
30
+    import util
31
+
32
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
33
+
34
+class SourceBase(object):
35
+    """Stream source base class"""
36
+
37
+    def __init__(self,country="lv"):
38
+        self.name = "name"
39
+        self.title = "Title"
40
+        self.img = ""
41
+        self.desc = ""
42
+        self.options = OrderedDict()
43
+        self.config_file = ""
44
+        self.url = "http://www.bbb.com/"
45
+        self.headers = headers2dict("""
46
+User-Agent: Mozilla/5.0 (Linux; U; Android 4.4.4; Nexus 5 Build/KTU84P) AppleWebkit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30
47
+""")
48
+
49
+    def login(self,user="",password=""):
50
+        return False
51
+
52
+    def logout(self):
53
+        return True
54
+
55
+    def get_content(self,data):
56
+        ### To be overriden in child class
57
+        return [("..atpakaļ","back",None,"Kļūda, atgriezties atpakaļ")]
58
+
59
+    def is_video(self,data):
60
+        ### To be overriden in child class
61
+        return False
62
+
63
+    def get_streams(self,data):
64
+        ### Normally to be overrided in child class
65
+
66
+        if not self.is_video(data):
67
+            return []
68
+        content = self.get_content(data)
69
+        stream = util.item()
70
+        stream["name"] = content[0].encode("utf8") if isinstance(content[0],unicode) else content[0]
71
+        stream["url"] = content[1].encode("utf8") if isinstance(content[1],unicode) else content[1]
72
+        stream["img"] = content[2].encode("utf8") if isinstance(content[2],unicode) else content[2]
73
+        stream["desc"] = content[3].encode("utf8") if isinstance(content[3],unicode) else content[3]
74
+        stream["type"] = stream_type(content[1]).encode("utf8")
75
+        return[stream]
76
+
77
+    def get_epg(self,data):
78
+        ### Normally to be overrided in child class
79
+        return [self.get_info(data)]
80
+
81
+    def options_read(self):
82
+        if not ("options" in dir(self) and self.options): # process options only if self.options defined, self.config_file should be defined too
83
+            return None
84
+        config = ConfigParser.ConfigParser()
85
+        if os.path.exists(self.config_file):
86
+            options0 = self.options
87
+            config.read(self.config_file)
88
+            self.options = OrderedDict(config.items(self.name))
89
+            for k in options0:
90
+                if not k in self.options:
91
+                    self.options[k] = options0[k]
92
+                    self.options_write(self.options)
93
+        else:
94
+            self.options_write(self.options)
95
+        return self.options
96
+
97
+    def options_write(self,options):
98
+        config = ConfigParser.ConfigParser()
99
+        config.add_section(self.name)
100
+        for k in options.keys():
101
+            config.set(self.name, k,options[k])
102
+        with open(self.config_file,"w") as f:
103
+            config.write(f)
104
+        self.options = OrderedDict(config.items(self.name))
105
+
106
+    def call(self, data,params=None,headers=None,lang=""):
107
+        if not headers: headers = self.headers
108
+        url = self.url+data
109
+        result = self._http_request(url,params,headers=headers)
110
+        return result
111
+
112
+    def call_json(self, data,params=None,headers=None,lang=""):
113
+        result = self.call(data,params,headers)
114
+        if result:
115
+            result = json.loads(result)
116
+            return result
117
+        else:
118
+            raise "No data returned"
119
+
120
+    def _http_request(self, url,params = None, headers=None):
121
+        if not headers:
122
+            headers = self.headers if "headers" in dir(self) else headers2dict("User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0")
123
+        try:
124
+            if params:
125
+                r = requests.post(url, data=params, headers=headers,verify=False)
126
+            else:
127
+                r = requests.get(url, headers=headers,verify=False)
128
+            return r.content
129
+        except Exception as ex:
130
+            if "read" in ex:
131
+                content = ex.read()
132
+            else:
133
+                content = None
134
+            return content
135
+
136
+    @staticmethod
137
+    def stream_type(data):
138
+        return stream_type(data)
139
+
140
+    @staticmethod
141
+    def parse_data(data):
142
+        if "::" in data:
143
+            source = data.split("::")[0]
144
+            data = data.split("::")[1]
145
+        else:
146
+            source = ""
147
+        path = data.split("?")[0]
148
+        plist = path.split("/")
149
+        clist = plist[0]
150
+        params = data[data.find("?"):] if "?" in data else ""
151
+        qs = dict(map(lambda x:x.split("="),re.findall("\w+=[\w\-]+",params)))
152
+        return source,data,path,plist,clist,params,qs
153
+
154
+def stream_type(data):
155
+    data = data.lower()
156
+    m = re.search(r"^(\w+)://", data)
157
+    prefix = m.group(1) if m else ""
158
+    if prefix in ("http","https"):
159
+        if ".m3u8" in data:
160
+            return "hls"
161
+        elif ".mpd" in data:
162
+            return "dash"
163
+        else:
164
+            return "http"
165
+    else:
166
+        return prefix
167
+
168
+if __name__ == "__main__":
169
+    pass

+ 407
- 0
sources/YouTubeVideoUrl.py Прегледај датотеку

@@ -0,0 +1,407 @@
1
+# -*- coding: UTF-8 -*-
2
+# This video extraction code based on youtube-dl: https://github.com/rg3/youtube-dl
3
+
4
+import codecs
5
+import json
6
+import re
7
+
8
+from urllib import urlencode
9
+from urllib2 import urlopen, URLError
10
+import sys
11
+import ssl
12
+if "_create_unverified_context" in dir(ssl):
13
+    ssl._create_default_https_context = ssl._create_unverified_context
14
+
15
+#from Components.config import config
16
+
17
+#from . import sslContext
18
+sslContext = None
19
+if sys.version_info >= (2, 7, 9):
20
+    try:
21
+        import ssl
22
+        sslContext = ssl._create_unverified_context()
23
+    except:
24
+        pass
25
+from jsinterp import JSInterpreter
26
+from swfinterp import SWFInterpreter
27
+
28
+
29
+PRIORITY_VIDEO_FORMAT = []
30
+maxResolution =  '22'
31
+
32
+
33
+def createPriorityFormats():
34
+    global PRIORITY_VIDEO_FORMAT,maxResolution
35
+    PRIORITY_VIDEO_FORMAT = []
36
+    use_format = False
37
+    for itag_value in ['38', '37', '96', '22', '95', '120',
38
+                           '35', '94', '18', '93', '5', '92', '132', '17']:
39
+        if itag_value == maxResolution: #config.plugins.YouTube.maxResolution.value:
40
+            use_format = True
41
+        if use_format:
42
+            PRIORITY_VIDEO_FORMAT.append(itag_value)
43
+
44
+createPriorityFormats()
45
+
46
+IGNORE_VIDEO_FORMAT = [
47
+    '43',  # webm
48
+                '44',  # webm
49
+                '45',  # webm
50
+                '46',  # webm
51
+                '100',  # webm
52
+                '101',  # webm
53
+                '102'  # webm
54
+]
55
+
56
+
57
+def uppercase_escape(s):
58
+    unicode_escape = codecs.getdecoder('unicode_escape')
59
+    return re.sub(
60
+            r'\\U[0-9a-fA-F]{8}',
61
+                lambda m: unicode_escape(m.group(0))[0],
62
+                s)
63
+
64
+
65
+def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
66
+    if string == '':
67
+        return string
68
+    res = string.split('%')
69
+    if len(res) == 1:
70
+        return string
71
+    if encoding is None:
72
+        encoding = 'utf-8'
73
+    if errors is None:
74
+        errors = 'replace'
75
+    # pct_sequence: contiguous sequence of percent-encoded bytes, decoded
76
+    pct_sequence = b''
77
+    string = res[0]
78
+    for item in res[1:]:
79
+        try:
80
+            if not item:
81
+                raise ValueError
82
+            pct_sequence += item[:2].decode('hex')
83
+            rest = item[2:]
84
+            if not rest:
85
+                # This segment was just a single percent-encoded character.
86
+                # May be part of a sequence of code units, so delay decoding.
87
+                # (Stored in pct_sequence).
88
+                continue
89
+        except ValueError:
90
+            rest = '%' + item
91
+        # Encountered non-percent-encoded characters. Flush the current
92
+        # pct_sequence.
93
+        string += pct_sequence.decode(encoding, errors) + rest
94
+        pct_sequence = b''
95
+    if pct_sequence:
96
+        # Flush the final pct_sequence
97
+        string += pct_sequence.decode(encoding, errors)
98
+    return string
99
+
100
+
101
+def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
102
+               encoding='utf-8', errors='replace'):
103
+    qs, _coerce_result = qs, unicode
104
+    pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
105
+    r = []
106
+    for name_value in pairs:
107
+        if not name_value and not strict_parsing:
108
+            continue
109
+        nv = name_value.split('=', 1)
110
+        if len(nv) != 2:
111
+            if strict_parsing:
112
+                raise ValueError("bad query field: %r" % (name_value,))
113
+            # Handle case of a control-name with no equal sign
114
+            if keep_blank_values:
115
+                nv.append('')
116
+            else:
117
+                continue
118
+        if len(nv[1]) or keep_blank_values:
119
+            name = nv[0].replace('+', ' ')
120
+            name = compat_urllib_parse_unquote(
121
+                            name, encoding=encoding, errors=errors)
122
+            name = _coerce_result(name)
123
+            value = nv[1].replace('+', ' ')
124
+            value = compat_urllib_parse_unquote(
125
+                            value, encoding=encoding, errors=errors)
126
+            value = _coerce_result(value)
127
+            r.append((name, value))
128
+    return r
129
+
130
+
131
+def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
132
+                    encoding='utf-8', errors='replace'):
133
+    parsed_result = {}
134
+    pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
135
+                           encoding=encoding, errors=errors)
136
+    for name, value in pairs:
137
+        if name in parsed_result:
138
+            parsed_result[name].append(value)
139
+        else:
140
+            parsed_result[name] = [value]
141
+    return parsed_result
142
+
143
+
144
+class YouTubeVideoUrl():
145
+
146
+    def _download_webpage(self, url):
147
+        """ Returns a tuple (page content as string, URL handle) """
148
+        try:
149
+            if sslContext:
150
+                urlh = urlopen(url, context = sslContext)
151
+            else:
152
+                urlh = urlopen(url)
153
+        except URLError, e:
154
+            #raise Exception(e.reason)
155
+            return ""
156
+        return urlh.read()
157
+
158
+    def _search_regex(self, pattern, string):
159
+        """
160
+        Perform a regex search on the given string, using a single or a list of
161
+        patterns returning the first matching group.
162
+        """
163
+        mobj = re.search(pattern, string, 0)
164
+        if mobj:
165
+            # return the first matching group
166
+            return next(g for g in mobj.groups() if g is not None)
167
+        else:
168
+            raise Exception('Unable extract pattern from string!')
169
+
170
+    def _decrypt_signature(self, s, player_url):
171
+        """Turn the encrypted s field into a working signature"""
172
+
173
+        if player_url is None:
174
+            raise Exception('Cannot decrypt signature without player_url!')
175
+
176
+        if player_url[:2] == '//':
177
+            player_url = 'https:' + player_url
178
+        try:
179
+            func = self._extract_signature_function(player_url)
180
+            return func(s)
181
+        except:
182
+            raise Exception('Signature extraction failed!')
183
+
184
+    def _extract_signature_function(self, player_url):
185
+        id_m = re.match(
186
+                    r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?|/base)?\.(?P<ext>[a-z]+)$',
187
+                        player_url)
188
+        if not id_m:
189
+            raise Exception('Cannot identify player %r!' % player_url)
190
+        player_type = id_m.group('ext')
191
+        code = self._download_webpage(player_url)
192
+        if player_type == 'js':
193
+            return self._parse_sig_js(code)
194
+        elif player_type == 'swf':
195
+            return self._parse_sig_swf(code)
196
+        else:
197
+            raise Exception('Invalid player type %r!' % player_type)
198
+
199
+    def _parse_sig_js(self, jscode):
200
+        funcname = self._search_regex(r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode)
201
+        jsi = JSInterpreter(jscode)
202
+        initial_function = jsi.extract_function(funcname)
203
+        return lambda s: initial_function([s])
204
+
205
+    def _parse_sig_swf(self, file_contents):
206
+        swfi = SWFInterpreter(file_contents)
207
+        TARGET_CLASSNAME = 'SignatureDecipher'
208
+        searched_class = swfi.extract_class(TARGET_CLASSNAME)
209
+        initial_function = swfi.extract_function(searched_class, 'decipher')
210
+        return lambda s: initial_function([s])
211
+
212
+    def _extract_from_m3u8(self, manifest_url):
213
+        url_map = {}
214
+
215
+        def _get_urls(_manifest):
216
+            lines = _manifest.split('\n')
217
+            urls = filter(lambda l: l and not l.startswith('#'), lines)
218
+            return urls
219
+
220
+        manifest = self._download_webpage(manifest_url)
221
+        formats_urls = _get_urls(manifest)
222
+        for format_url in formats_urls:
223
+            itag = self._search_regex(r'itag/(\d+?)/', format_url)
224
+            url_map[itag] = format_url
225
+        return url_map
226
+
227
+    def _get_ytplayer_config(self, webpage):
228
+        # User data may contain arbitrary character sequences that may affect
229
+        # JSON extraction with regex, e.g. when '};' is contained the second
230
+        # regex won't capture the whole JSON. Yet working around by trying more
231
+        # concrete regex first keeping in mind proper quoted string handling
232
+        # to be implemented in future that will replace this workaround (see
233
+        # https://github.com/rg3/youtube-dl/issues/7468,
234
+        # https://github.com/rg3/youtube-dl/pull/7599)
235
+        patterns = [
236
+                    r';ytplayer\.config\s*=\s*({.+?});ytplayer',
237
+                        r';ytplayer\.config\s*=\s*({.+?});',
238
+                ]
239
+        for pattern in patterns:
240
+            config = self._search_regex(pattern, webpage)
241
+            if config:
242
+                return json.loads(uppercase_escape(config))
243
+
244
+    def extract(self, video_id):
245
+        url = 'https://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
246
+
247
+        # Get video webpage
248
+        video_webpage = self._download_webpage(url)
249
+        if not video_webpage:
250
+            #raise Exception('Video webpage not found!')
251
+            return ""
252
+
253
+        # Attempt to extract SWF player URL
254
+        mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
255
+        if mobj is not None:
256
+            player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
257
+        else:
258
+            player_url = None
259
+
260
+        # Get video info
261
+        embed_webpage = None
262
+        if re.search(r'player-age-gate-content">', video_webpage) is not None:
263
+            age_gate = True
264
+            # We simulate the access to the video from www.youtube.com/v/{video_id}
265
+            # this can be viewed without login into Youtube
266
+            url = 'https://www.youtube.com/embed/%s' % video_id
267
+            embed_webpage = self._download_webpage(url)
268
+            data = urlencode({
269
+                            'video_id': video_id,
270
+                                'eurl': 'https://youtube.googleapis.com/v/' + video_id,
271
+                                'sts': self._search_regex(r'"sts"\s*:\s*(\d+)', embed_webpage),
272
+                        })
273
+            video_info_url = 'https://www.youtube.com/get_video_info?' + data
274
+            video_info_webpage = self._download_webpage(video_info_url)
275
+            video_info = compat_parse_qs(video_info_webpage)
276
+        else:
277
+            age_gate = False
278
+            video_info = None
279
+            # Try looking directly into the video webpage
280
+            ytplayer_config = self._get_ytplayer_config(video_webpage)
281
+            if ytplayer_config:
282
+                args = ytplayer_config['args']
283
+                if args.get('url_encoded_fmt_stream_map'):
284
+                    # Convert to the same format returned by compat_parse_qs
285
+                    video_info = dict((k, [v]) for k, v in args.items())
286
+
287
+            if not video_info:
288
+                # We also try looking in get_video_info since it may contain different dashmpd
289
+                # URL that points to a DASH manifest with possibly different itag set (some itags
290
+                # are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
291
+                # manifest pointed by get_video_info's dashmpd).
292
+                # The general idea is to take a union of itags of both DASH manifests (for example
293
+                # video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
294
+                for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
295
+                    video_info_url = (
296
+                                            'https://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
297
+                                                % (video_id, el_type))
298
+                    video_info_webpage = self._download_webpage(video_info_url)
299
+                    video_info = compat_parse_qs(video_info_webpage)
300
+                    if 'token' in video_info:
301
+                        break
302
+        if 'token' not in video_info:
303
+            if 'reason' in video_info:
304
+                print '[YouTubeVideoUrl] %s' % video_info['reason'][0]
305
+            else:
306
+                print '[YouTubeVideoUrl] "token" parameter not in video info for unknown reason'
307
+
308
+        # Start extracting information
309
+        if 'conn' in video_info and video_info['conn'][0][:4] == 'rtmp':
310
+            url = video_info['conn'][0]
311
+        elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or \
312
+                     len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
313
+            encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + \
314
+                            ',' + video_info.get('adaptive_fmts', [''])[0]
315
+            if 'rtmpe%3Dyes' in encoded_url_map:
316
+                raise Exception('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343')
317
+
318
+            # Find the best format from our format priority map
319
+            encoded_url_map = encoded_url_map.split(',')
320
+            url_map_str = None
321
+            # If format changed in config, recreate priority list
322
+            if PRIORITY_VIDEO_FORMAT[0] != maxResolution: #config.plugins.YouTube.maxResolution.value:
323
+                createPriorityFormats()
324
+            for our_format in PRIORITY_VIDEO_FORMAT:
325
+                our_format = 'itag=' + our_format
326
+                for encoded_url in encoded_url_map:
327
+                    if our_format in encoded_url and 'url=' in encoded_url:
328
+                        url_map_str = encoded_url
329
+                        break
330
+                if url_map_str:
331
+                    break
332
+            # If anything not found, used first in the list if it not in ignore map
333
+            if not url_map_str:
334
+                for encoded_url in encoded_url_map:
335
+                    if 'url=' in encoded_url:
336
+                        url_map_str = encoded_url
337
+                        for ignore_format in IGNORE_VIDEO_FORMAT:
338
+                            ignore_format = 'itag=' + ignore_format
339
+                            if ignore_format in encoded_url:
340
+                                url_map_str = None
341
+                                break
342
+                    if url_map_str:
343
+                        break
344
+            if not url_map_str:
345
+                url_map_str = encoded_url_map[0]
346
+
347
+            url_data = compat_parse_qs(url_map_str)
348
+            url = url_data['url'][0]
349
+            if 'sig' in url_data:
350
+                url += '&signature=' + url_data['sig'][0]
351
+            elif 's' in url_data:
352
+                encrypted_sig = url_data['s'][0]
353
+                ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
354
+
355
+                jsplayer_url_json = self._search_regex(ASSETS_RE,
356
+                                                                       embed_webpage if age_gate else video_webpage)
357
+                if not jsplayer_url_json and not age_gate:
358
+                    # We need the embed website after all
359
+                    if embed_webpage is None:
360
+                        embed_url = 'https://www.youtube.com/embed/%s' % video_id
361
+                        embed_webpage = self._download_webpage(embed_url)
362
+                    jsplayer_url_json = self._search_regex(ASSETS_RE, embed_webpage)
363
+
364
+                player_url = json.loads(jsplayer_url_json)
365
+                if player_url is None:
366
+                    player_url_json = self._search_regex(
367
+                                            r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
368
+                                                video_webpage)
369
+                    player_url = json.loads(player_url_json)
370
+
371
+                signature = self._decrypt_signature(encrypted_sig, player_url)
372
+                url += '&signature=' + signature
373
+            if 'ratebypass' not in url:
374
+                url += '&ratebypass=yes'
375
+        elif video_info.get('hlsvp'):
376
+            url = None
377
+            manifest_url = video_info['hlsvp'][0]
378
+            url_map = self._extract_from_m3u8(manifest_url)
379
+
380
+            # Find the best format from our format priority map
381
+            for our_format in PRIORITY_VIDEO_FORMAT:
382
+                if url_map.get(our_format):
383
+                    url = url_map[our_format]
384
+                    break
385
+            # If anything not found, used first in the list if it not in ignore map
386
+            if not url:
387
+                for url_map_key in url_map.keys():
388
+                    if url_map_key not in IGNORE_VIDEO_FORMAT:
389
+                        url = url_map[url_map_key]
390
+                        break
391
+            if not url:
392
+                url = url_map.values()[0]
393
+        else:
394
+            #raise Exception('No supported formats found in video info!')
395
+            return ""
396
+
397
+        return str(url)
398
+
399
+if __name__ == "__main__":
400
+
401
+    #yt = YouTubeVideoUrl()
402
+    if len(sys.argv)>1:
403
+        video_id= sys.argv[1]
404
+    else:
405
+        video_id = "2rlTF6HiMGg"
406
+    e = YouTubeVideoUrl().extract(video_id)
407
+    print e

+ 0
- 0
sources/__init__.py Прегледај датотеку


+ 213
- 0
sources/cinemalive.py Прегледај датотеку

@@ -0,0 +1,213 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+try:
9
+    import json
10
+except:
11
+    import simplejson as json
12
+import urllib2, urllib
13
+import datetime, re, sys,os
14
+import ConfigParser
15
+from SourceBase import SourceBase
16
+#from collections import OrderedDict
17
+import os
18
+import ssl
19
+if "_create_unverified_context" in dir(ssl):
20
+    ssl._create_default_https_context = ssl._create_unverified_context
21
+
22
+#sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
23
+from resolver import resolve
24
+import util
25
+
26
+
27
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
28
+import HTMLParser
29
+h = HTMLParser.HTMLParser()
30
+
31
+class Source(SourceBase):
32
+
33
+    def __init__(self,country="",cfg_path=None):
34
+        self.hidden = True # nerāda menu nestrādājošos avotus
35
+        self.name = "cinemalive"
36
+        self.title = "cinemalive.tv"
37
+        self.img = "picons/cinemalive.png" #"https://cinemalive.tv/assets/img/logo.png"
38
+        self.desc = "cinemalive.tv satura skatīšanās"
39
+        self.country=country
40
+        self.headers = headers2dict("""
41
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:48.0) Gecko/20100101 Firefox/48.0
42
+Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8,application/json
43
+""")
44
+        self.headers2 = headers2dict("""
45
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36
46
+Content-Type: application/x-www-form-urlencoded; charset=UTF-8
47
+Accept-Language: en-US,en;q=0.8
48
+""")
49
+        self.url = "https://cinemalive.tv/"
50
+        #self.login()
51
+
52
+
53
+    ######### Entry point ########
54
+    def get_content(self, data):
55
+        print "[cinemalive] get_content:", data
56
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
57
+        content=[]
58
+        content.append(("..return", "back","","Return back"))
59
+
60
+        if clist=="home":
61
+            content.extend([
62
+                ("Search", "cinemalive::scripts/search.php?search={0}","","Search"),
63
+                ("Filmas latviski - visas", "cinemalive::filmaslatviski/visas/lapa/1","","Filmas latviski - visas"),
64
+                ("Filmas angliski", "cinemalive::home_en","","Filmas angliski"),
65
+                ("Filmas latviski - jaunākās", "cinemalive::filmaslatviski/jaunakas/lapa/1","","Filmas latviski - jaunākās"),
66
+                ("Filmas latviski - vertētākās", "cinemalive::filmaslatviski/vertetakas/lapa/1","","Filmas latviski - vērtētākās"),
67
+                ("Filmas latviski - skatitakās", "cinemalive::filmaslatviski/skatitakas/lapa/1","","Filmas latviski - skatītākās"),
68
+            ])
69
+            r = self.call("filmaslatviski")
70
+            for item in re.findall(r'<li class="nav-submenu-item"><a href="/([\w/]+)">(.+?)</a></li>', r):
71
+                title = "Filmas latviski - "+item[1]
72
+                data2 = item[0]+"/lapa/1"
73
+                img = self.img
74
+                desc = title
75
+                content.append((title,self.name+"::"+data2,img,desc))
76
+            return content
77
+
78
+        elif clist=="home_en":
79
+            content.extend([
80
+                ("Search", "cinemalive::scripts/search.php?search={0}","","Search"),
81
+                ("Movies English - all", "cinemalive::moviesenglish/all/page/1","","Movies English - all"),
82
+                ("Movies Latvian", "cinemalive::home","","Filmas latviski"),
83
+                ("Movies English - newest", "cinemalive::moviesenglish/newestmovies/page/1","","Movies English - newest"),
84
+                ("Movies English - top rated", "cinemalive::moviesenglish/toprated/page/1","","Movies English - top rated"),
85
+                ("Movies English - most watched", "cinemalive::moviesenglish/mostwatched/page/1","","Movies English - most watched"),
86
+            ])
87
+            r = self.call("moviesenglish")
88
+            for item in re.findall(r'<li class="nav-submenu-item"><a href="/([\w/]+)">(.+?)</a></li>', r):
89
+                title = "Movies English - "+item[1]
90
+                data2 = item[0]+"/page/1"
91
+                img = self.img
92
+                desc = title
93
+                content.append((title,self.name+"::"+data2,img,desc))
94
+            return content
95
+
96
+
97
+        elif "search.php" in data:
98
+
99
+            r=self.call(path,params=params[1:],headers=self.headers2)
100
+            result = re.findall(r'<div class="results.+?<a href="https://cinemalive\.tv/(.+?)">.+?<img src="(.+?)".+?<span style="color:#bcbcbc">([^<]+)</span> <span style="color:#5a606d;font-size:12px;">([^<]+)</span><br/>.+?<p class="dec" style="font-size:12px; color:#777;line-height:14px;">([^<]+)</p>', r, re.DOTALL)
101
+            for item in result:
102
+                title = item[2]
103
+                title0 = re.sub(" \(\d+\)","",title)
104
+                if title0 == item[3]:
105
+                    title = title+" [EN]"
106
+                else:
107
+                    title = title + "/"+ item[3]+" [LV]"
108
+                title = util.unescape(title)
109
+                data2 = item[0]
110
+                img = item[1].replace("xs.","sm.")
111
+                desc = util.unescape(item[4])
112
+                content.append((title,self.name+"::"+data2,img,desc))
113
+            return content
114
+
115
+        elif clist in ("filmaslatviski","moviesenglish"):
116
+            r = self.call(data)
117
+            if not r:
118
+                return content
119
+            result = re.findall(r'<div class="base-used">.+?<a href="https://cinemalive.tv/([^"]+)">.+?<img class="img-thumbnail" src="/([^"]+)" alt="([^"]+)"/>.+?<p class="year">(\d+)</p>', r, re.DOTALL)
120
+            for item in result:
121
+                title = item[2] + " (%s)"%item[3]
122
+                data2 = item[0]
123
+                img = "https://cinemalive.tv/"+item[1]
124
+                title = util.unescape(title)
125
+                desc = title
126
+                content.append((title,self.name+"::"+data2,img,desc))
127
+            m = re.search(r"""<a href='https://cinemalive\.tv/([^']+)' style="border-right:none;">»</a>""", r, re.DOTALL)
128
+            if m:
129
+                data2 = m.group(1)
130
+                content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
131
+            return content
132
+
133
+        else:
134
+            return content
135
+
136
+    def is_video(self,data):
137
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
138
+        if clist=="movie":
139
+            return True
140
+        else:
141
+            return False
142
+
143
+    def get_streams(self, data):
144
+        print "[cinemalive] get_streams:", data
145
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
146
+        r = self.call(path)
147
+        if not r:
148
+            return []
149
+        streams = []
150
+        title0 = re.search("<title>([^<]+)</title>", r).group(1)
151
+        lang = "LV" if "Filma Online Latviski" in title0 else "EN"
152
+        title = title0.replace(" - Filma Online Latviski","").replace(" - Movie Online English HD","")
153
+        desc = re.search('<p class="plot">(.+?)</p>', r).group(1)
154
+        img = "http://cinemalive.tv"+re.search('<img src="(.+?)" class="img-thumbnail"', r).group(1)
155
+
156
+        m = re.search(r'<video id=.+?<source src="([^"]+\.mp4)"', r, re.DOTALL)
157
+        if m:
158
+            s = util.item()
159
+            s["url"] = m.group(1)
160
+            s["name"] = util.unescape(title)
161
+            s["desc"] = util.unescape(desc)
162
+            s["img"] = img
163
+            s["type"] = self.stream_type(s["url"])
164
+            s["lang"] = lang
165
+            return [s]
166
+
167
+        #m = re.search('<div class="viboom-overroll"><iframe src="([^"]+)"', r)
168
+        #if m:
169
+        result = re.findall('<div id="video_container"><iframe.+?src="(.+?)"', r)
170
+        if result:
171
+            streams = resolve(result[0])
172
+            for s in streams:
173
+                s["name"] = util.unescape(title)
174
+                s["desc"] = util.unescape(desc)
175
+                s["img"] = img
176
+                s["type"] = self.stream_type(s["url"])
177
+                s["lang"] = lang
178
+            if len(result)>1:
179
+                lang2 = "EN" if lang=="LV" else "LV"
180
+                streams2 = resolve(result[1])
181
+                for s in streams2:
182
+                    s["name"] = util.unescape(title)
183
+                    s["desc"] = util.unescape(desc)
184
+                    s["img"] = img
185
+                    s["type"]= self.stream_type(s["url"])
186
+                    s["lang"] = lang2
187
+                    streams.append(s)
188
+            return streams
189
+        else:
190
+            return []
191
+
192
+
193
+if __name__ == "__main__":
194
+    country= "lv"
195
+    c = Source(country)
196
+    if len(sys.argv)>1:
197
+        data= sys.argv[1]
198
+    else:
199
+        data = "home"
200
+    content = c.get_content(data)
201
+    for item in content:
202
+        print item
203
+    #cat = api.get_categories(country)
204
+    #chan = api.get_channels("lv")
205
+    #prog = api.get_programs(channel=6400)
206
+    #prog = api.get_programs(category=55)
207
+    #seas = api.get_seasons(program=6453)
208
+    #str = api.get_streams(660243)
209
+    #res = api.get_videos(802)
210
+    #formats = api.getAllFormats()
211
+    #det = api.detailed("1516")
212
+    #vid = api.getVideos("13170")
213
+    pass

+ 115
- 0
sources/config.py Прегледај датотеку

@@ -0,0 +1,115 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+import os.path,re
9
+import collections
10
+from SourceBase import SourceBase
11
+
12
+os.path.dirname(os.path.abspath(__file__))
13
+class Source(SourceBase):
14
+
15
+    def __init__(self,country="lv",cfg_path=None):
16
+        self.name = "config"
17
+        self.country=country
18
+        cur_directory = os.path.dirname(os.path.abspath(__file__))
19
+        if not cfg_path: cfg_path = cur_directory
20
+        self.streams_file = os.path.join(cfg_path,"streams.cfg")
21
+        self.lists = collections.OrderedDict()
22
+        self.titles = {}
23
+        self.read_streams()
24
+
25
+    def get_content(self, data):
26
+        print "[config] get_content",data
27
+        self.read_streams()
28
+        if "::" in data:
29
+            data = data.split("::")[1]
30
+        if not data in self.lists:
31
+            return []
32
+        return self.lists[data]
33
+
34
+    def is_video(self,data):
35
+        return False
36
+
37
+    def read_streams(self):
38
+        for line in open(self.streams_file,"r"):
39
+            r = re.search("^\[(\w+)\]", line)
40
+            if r:
41
+                name = r.group(1)
42
+                self.lists[name] = []
43
+            else:
44
+                if line[0] in ("#"): continue
45
+                items = line.strip().split("|")
46
+                if not items[0]: continue
47
+                if len(items)==1:
48
+                    self.titles[name] = items[0]
49
+                else:
50
+                    if len(items) == 4:
51
+                        items[3] = items[3].replace("\\n","\n")
52
+                    self.lists[name].append(items)
53
+
54
+    def write_streams(self):
55
+        f = open(self.streams_file,"w")
56
+        for l in self.lists.keys():
57
+            f.write("[%s]\n"%l)
58
+            t = self.get_title(l)
59
+            if t<>l:
60
+                f.write("%s\n"%t)
61
+            for item in self.lists[l]:
62
+                f.write("%s|%s|%s|%s\n"%(item[0].replace("\n",""),item[1],item[2],item[3].replace("\n","\\n")))
63
+            f.write("\n")
64
+        f.close()
65
+
66
+    def get_lists(self):
67
+        return self.lists.keys()
68
+
69
+    def get_list_items(self,name):
70
+        return self.lists[name]
71
+
72
+    def get_title(self,name):
73
+        if name in self.titles:
74
+            return self.titles[name]
75
+        else:
76
+            return name
77
+
78
+    def add_list(self,name):
79
+        if not name in self.lists.keys():
80
+            self.lists[name] = []
81
+
82
+    def del_list(self,name):
83
+        if name in self.lists.keys():
84
+            del self.lists[name]
85
+
86
+    def add_item(self,name,item,pos=None):
87
+        if name in self.lists.keys():
88
+            if pos==None:
89
+                self.lists[name].append(item)
90
+            else:
91
+                self.lists[name].insert(pos,item)
92
+
93
+    def del_item(self,name,pos):
94
+        self.lists[name].pop(pos)
95
+
96
+    def replace_item(self,name,item,pos):
97
+        self.lists[name][pos]=item
98
+
99
+
100
+if __name__ == "__main__":
101
+    c = Source()
102
+    content = c.get_content("home")
103
+    for item in content: print item
104
+    #c.del_item("home",0)
105
+    #c.del_list("favorites")
106
+
107
+    #c.add_item("home",("My Streams","config::favorites","","Mani saglabātie TV kanāli un video"),0)
108
+    c.replace_item("home",("My Streams","config::my_streams","default","Mani saglabātie TV kanāli un video"),0)
109
+    #c.add_list("favorites")
110
+    #c.add_item("favorites",("..return","back","","Atgriezties atpakaļ"))
111
+    #c.add_item("favorites",("LTV1","http://streamltv.cloudy.services/ltv/LTV02.smil/playlist.m3u8","picons/latvia1.png", "Latvijas televīzijas 1.kanāls"))
112
+
113
+    c.write_streams()
114
+    for item in content: print item
115
+

+ 305
- 0
sources/euronews.py Прегледај датотеку

@@ -0,0 +1,305 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+try:
9
+    import json
10
+except:
11
+    import simplejson as json
12
+
13
+import urllib2, urllib
14
+import datetime, time,re, sys,os
15
+from collections import OrderedDict
16
+from SourceBase import SourceBase
17
+import util
18
+import ssl
19
+if "_create_unverified_context" in dir(ssl):
20
+    ssl._create_default_https_context = ssl._create_unverified_context
21
+
22
+
23
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
24
+import HTMLParser
25
+h = HTMLParser.HTMLParser()
26
+
27
+class Source(SourceBase):
28
+
29
+    def __init__(self,language="en",cfg_path=None):
30
+        self.name = "euronews"
31
+        self.title = "Euronews"
32
+        self.img = "http://pbs.twimg.com/profile_images/732665354242150400/tZsCnjuh_400x400.jpg"
33
+        self.desc = "Euronews live and archive"
34
+        self.headers = headers2dict("""
35
+User-Agent: Euronews/4.0.126
36
+Content-Type: application/json
37
+Connection: keep-alive
38
+        """)
39
+        #self.language=language
40
+        cur_directory = os.path.dirname(os.path.abspath(__file__))
41
+        if not cfg_path: cfg_path = cur_directory
42
+        self.config_file = os.path.join(cfg_path,self.name+".cfg")
43
+        self.options = OrderedDict([("language","en")])
44
+        self.options_read()
45
+        self.vid={"1": "News", "2": "European Affairs", "3": "Lifestyle", "4": "Knowledge"}
46
+        self.languages = []
47
+        try:
48
+            self.get_languages()
49
+        except:
50
+            pass
51
+
52
+    def login(self,user="",password=""):
53
+        return True
54
+
55
+    def get_content(self, data):
56
+        print "[%s] get_content:"%self.name, data
57
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
58
+        #lang = qs["lang"] if "lang" in qs else self.language
59
+        lang = self.options["language"]
60
+        if not lang in self.get_languages():
61
+            raise Exception("Not valid default language - '%s'"%lang)
62
+
63
+        content=[]
64
+        content.append(("..return", "back","","Return back"))
65
+
66
+        if clist=="home":
67
+            content.extend([
68
+                ("Search", "euronews::content/getSearch?lang=%s&byPage=40&page=1&text={0}"%lang,self.img,"Top stories timeline"),
69
+                ("Live stream", "euronews::live?lang=%s"%lang,self.img,"Euronews live stream"),
70
+                ("Just in", "euronews::content/getTimeline?lang=%s&byPage=40&page=1"%lang,self.img,"News timeline"),
71
+                ("Top stories", "euronews::content/getTopStories?lang=%s"%lang,self.img,"Top stories timeline"),
72
+                ("Category - News", "euronews::content/getVertical?lang=%s&byPage=40&page=1&vId=1"%lang,self.img,"Category - News"),
73
+                ("Category - European Affairs", "euronews::content/getVertical?lang=%s&byPage=40&page=1&vId=2"%lang,self.img,"Category - European Affairs"),
74
+                ("Category - Lifestyle", "euronews::content/getVertical?lang=%s&byPage=40&page=1&vId=3"%lang,self.img,"Category - Lifestyle"),
75
+                ("Category - Knowledge", "euronews::content/getVertical?lang=%s&byPage=40&page=1&vId=4"%lang,self.img,"Category - Knowledge"),
76
+                ("Latest programs", "euronews::content/getLatestPrograms?lang=%s&byPage=40&page=1"%lang,self.img,"Latest programs"),
77
+                ("Programs list", "euronews::content/getPrograms?lang=%s"%lang,self.img,"Programs list"),
78
+             ])
79
+            return content
80
+
81
+
82
+        ### Video arhīvs ###
83
+        elif clist=="content":
84
+            if "lang" in qs:
85
+                del qs["lang"]
86
+            params = json.dumps(qs)
87
+
88
+            req = '{"methodName":"content.%s","apiKey":"androidPhoneEuronews-1.0","params":%s,"language":"%s"}'%(plist[1],params,lang)
89
+            r = self.call(req)
90
+            if not r:
91
+                return content
92
+            lst = r["timeline"] if "timeline" in r else\
93
+                r["topstorieslist"] if "topstorieslist" in r else\
94
+                r["programs"] if "programs" in r else\
95
+                r["programDetailsList"] if "programDetailsList" in r else\
96
+                r["programlist"] if "programlist" in r else\
97
+                r["articlelist"] if "articlelist" in r else\
98
+                r["verticals"] if "verticals" in r else\
99
+                []
100
+            if not lst:
101
+                return content
102
+
103
+            for item in lst:
104
+                if plist[1] in ("getTimeline"):
105
+                    article = item["article"]
106
+                    atype = item["type"]
107
+                    if item["type"] == "wire":
108
+                        continue # TODO
109
+                else:
110
+                    article = item
111
+                    atype = "article"
112
+                if plist[1]=="getPrograms":
113
+                    title = article["title"]
114
+                    id = article["pId"]
115
+                    desc = title
116
+                    img = "http://static.euronews.com/articles/programs/533x360_%s"%article["img"]
117
+                    data2 = "content/getProgramDetails?lang=%s&byPage=40&page=1&pId=%s"%(lang,id)
118
+                    content.append((title,self.name+"::"+data2,img,desc))
119
+                else:
120
+                    title = article["title"] if "title" in article else article["text"] if "text" in article else "No title"
121
+                    if atype <> "article":
122
+                        title = "[%s] %s"%(atype,title)
123
+                    atime = datetime.datetime.fromtimestamp(int(article["uts"]))
124
+                    #atime = datetime.datetime.fromtimestamp(int(article["uts"])-time.altzone)
125
+                    atime = atime.strftime("%Y-%m-%d %H:%M")
126
+                    vert = self.vid[article["vId"]] if "vId" in article else ""
127
+                    ptitle = article["pTitle"] if "pTitle" in article else ""
128
+                    id = article["id"]
129
+                    desc = "%s\n%s\n%s %s"%(title,atime,vert,ptitle)
130
+                    img = "http://static.euronews.com/articles/%s/399x225_%s.jpg"%(id,id)
131
+                    if not atype in ("breakingnews","wire"):
132
+                        data2 = "content/getArticle?lang=%s&id=%s"%(lang,id)
133
+                    else:
134
+                        data2 = ""
135
+                    content.append((title,self.name+"::"+data2,img,desc))
136
+            if "page=" in data:
137
+                data2 = re.sub("page=\d+","page=%s"%(int(qs["page"])+1),data)
138
+                content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
139
+            return content
140
+
141
+
142
+    def is_video(self,data):
143
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
144
+        if path == "live":
145
+            return True
146
+        elif clist=="content" and plist[1]=="getArticle":
147
+            return True
148
+        else:
149
+            return False
150
+
151
+    def get_streams(self, data):
152
+        print "[euronews] get_streams:", data
153
+        if not self.is_video(data):
154
+            return []
155
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
156
+        #lang = qs["lang"] if "lang" in qs else self.language
157
+        lang = self.options["language"]
158
+        if not lang in self.get_languages():
159
+            raise Exception("Not valid default language - '%s'"%lang)
160
+
161
+        streams = []
162
+
163
+        if path == "live":
164
+            url = "http://www.euronews.com/api/watchlive.json" if lang == "en" else "http://%s.euronews.com/api/watchlive.json" % lang
165
+            r = self._http_request(url)
166
+            try:
167
+                js = json.loads(r)
168
+                url = js["url"]
169
+            except:
170
+                raise Exception("No live stream found")
171
+            r = self._http_request(url)
172
+            try:
173
+                js = json.loads(r)
174
+                if not js["status"]=="ok":
175
+                    raise Exception("No live stream found")
176
+            except:
177
+                raise Exception("No live stream found")
178
+
179
+            stream = util.item()
180
+            stream["url"]=js["primary"]
181
+            stream["lang"]= lang
182
+            stream["quality"]="variant"
183
+            stream["name"]="Euronews live [%s]"%lang
184
+            stream["desc"]=stream["name"]
185
+            stream["type"]="hls" #stream_type(url)
186
+            streams.append(stream)
187
+
188
+        elif clist=="content" and plist[1] == "getArticle":
189
+            if "lang" in qs:
190
+                del qs["lang"]
191
+            languages = self.get_languages()
192
+            for lang in languages:
193
+                id = qs["id"]
194
+                req = '{"methodName":"content.getArticle","apiKey":"androidPhoneEuronews-1.0","params":{"id":"%s"},"language":"%s"}'%(id,lang)
195
+                r = self.call(req)
196
+                if not r:
197
+                    raise Exception("No live stream found")
198
+                if not "articlelist" in r:
199
+                    msg = r["label"] if "label" in r else "No article find"
200
+                    continue
201
+                    #raise Exception(msg)
202
+                article = r["articlelist"]
203
+                stream = util.item()
204
+                stream["url"]=article["videoUri"] if "videoUri" in article else ""
205
+                if not stream["url"]:
206
+                    return []
207
+                stream["lang"]=lang
208
+                stream["quality"]="?"
209
+                stream["name"]= article["title"]
210
+                stream["desc"]=article["text"] if "text" in article else article["title"]
211
+                stream["type"]="http" #stream_type(url)
212
+                streams.append(stream)
213
+
214
+        else:
215
+            raise Exception("No live stream found")
216
+
217
+        ### TODO - sakārtot sarakstu, lai pirmais ir labakais video
218
+        qlist = ["???","lq","mq","hq","hd","variant"]
219
+        llist = ["fr","en","ru","lv"]
220
+        for s in streams:
221
+            if s["lang"]==self.options["language"]:
222
+                s["order"] = 10000
223
+                continue
224
+            lv = llist.index(s["lang"])*10 if s["lang"] in llist else 0
225
+            qv=qlist.index(s["quality"]) if s["quality"] in qlist else 0
226
+            s["order"] = lv+qv
227
+        streams = sorted(streams,key=lambda item: item["order"],reverse=True)
228
+        return streams
229
+
230
+    def get_languages(self):
231
+        if self.languages: return self.languages
232
+        url = "http://www.euronews.com/"
233
+        r = self._http_request(url)
234
+        result = re.findall(r'<option value="([^"]+)"\s*(selected)*\slang="([^"]+)" hreflang="([^"]+)">', r, re.DOTALL)
235
+        languages = []
236
+        for item in result:
237
+            languages.append(item[3])
238
+        if not languages:
239
+            raise Exception("Can not get languages list")
240
+        self.languages = languages
241
+        return self.languages
242
+
243
+
244
+    def get_languages0(self):
245
+        if self.languages: return self.languages
246
+        url = "http://www.euronews.com/api/watchlive.json"
247
+        r = self._http_request(url)
248
+        try:
249
+            js = json.loads(r)
250
+            url = js["url"]
251
+        except:
252
+            raise Exception("Can not get languages list")
253
+        r = self._http_request(url)
254
+        try:
255
+            js = json.loads(r)
256
+            if not js["status"]=="ok":
257
+                raise Exception("Can not get languages list")
258
+        except:
259
+            raise Exception("Can not get languages list")
260
+
261
+        slist = js["primary"]
262
+        self.languages=slist.keys()
263
+        return self.languages
264
+
265
+    def call(self, data,params = None, headers=None):
266
+        if not headers: headers = self.headers
267
+        #if not lang: lang = self.country
268
+        url = "http://api.euronews.com/ipad/"
269
+        headers = headers2dict("""
270
+User-Agent: Euronews/4.0.126
271
+Content-Type: multipart/form-data, boundary=AaB03xBounDaRy; charset=UTF-8
272
+Host: api.euronews.com
273
+Connection: Keep-Alive
274
+        """)
275
+        params = """
276
+--AaB03xBounDaRy
277
+content-disposition: form-data; name=request
278
+
279
+%s
280
+--AaB03xBounDaRy--
281
+"""%data
282
+        content = self._http_request(url, params, headers)
283
+        if content:
284
+            try:
285
+                result = json.loads(content)
286
+                return result
287
+            except Exception, ex:
288
+                return None
289
+        else:
290
+            return None
291
+
292
+
293
+if __name__ == "__main__":
294
+    language= "en"
295
+    c = Source(language)
296
+    data = '{"methodName":"content.getTimeline","apiKey":"androidPhoneEuronews-1.0","params":{"page":"1","byPage":"30"},"language":"en"}'
297
+    r = c.call(data)
298
+    if len(sys.argv)>1:
299
+        data= sys.argv[1]
300
+    else:
301
+        data = "home"
302
+    content = c.get_content(data)
303
+    for item in content:
304
+        print item
305
+    pass

+ 412
- 0
sources/filmix.py Прегледај датотеку

@@ -0,0 +1,412 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+try:
9
+    import json
10
+except:
11
+    import simplejson as json
12
+
13
+import urllib2, urllib
14
+import datetime, re, sys,os
15
+import ConfigParser
16
+from SourceBase import SourceBase
17
+import base64
18
+from collections import OrderedDict
19
+import sys
20
+import ssl
21
+if "_create_unverified_context" in dir(ssl):
22
+    ssl._create_default_https_context = ssl._create_unverified_context
23
+
24
+try:
25
+    import util
26
+except:
27
+    sys.path.insert(0,'..')
28
+    import util
29
+
30
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
31
+
32
+class Source(SourceBase):
33
+
34
+    def __init__(self,country="",cfg_path=None):
35
+        self.name = "filmix"
36
+        self.title = "filmix.me"
37
+        self.img = "http://cs5324.vk.me/g33668783/a_903fcc63.jpg"
38
+        self.desc = "filmix.me satura skatīšanās"
39
+        self.country=country
40
+        self.headers = headers2dict("""
41
+Host: filmix.me
42
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
43
+Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
44
+Accept-Language: en-US,en;q=0.5
45
+""")
46
+        self.headers2 = headers2dict("""
47
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
48
+X-Requested-With: XMLHttpRequest
49
+Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
50
+""")
51
+        self.url = "https://filmix.me/"
52
+        #self.login()
53
+
54
+    def login(self,user="",password=""):
55
+        return True
56
+
57
+    def get_content(self, data):
58
+        print "[filmix] get_content:", data
59
+        source, data, path, plist, clist, params, qs = self.parse_data(data)
60
+        content=[]
61
+        content.append(("..return", "back","","Return back"))
62
+
63
+        if clist=="home":
64
+            content.extend([
65
+                ("Search", "filmix::search/{0}","","Search"),
66
+                ("Movies", "filmix::movies","","Movies"),
67
+                ("Series", "filmix::series","","TV Series"),
68
+                ("Cartoons", "filmix::cartoons","","Cartoons"),
69
+            ])
70
+            return content
71
+
72
+        #elif clist=="search":
73
+            # TODO
74
+            #return content
75
+
76
+        elif data in ("movies","series","cartoons"):
77
+            r = self.call("")
78
+            r = r.decode("cp1251").encode("utf8")
79
+            if data == "movies":
80
+                sname = "Фильмы"
81
+            elif data=="series":
82
+                sname = "Сериалы"
83
+            else:
84
+                sname = "Мультфильмы"
85
+            # <span class="menu-title">Фильмы</span>
86
+            m = re.search('<span class="menu-title">%s</span>(.+?)<li>\s+?<span'%sname, r, re.DOTALL|re.UNICODE)
87
+            if not m: return content
88
+            r2 = m.group(1)
89
+            result = re.findall(r'<a .*?href="https://filmix\.me/([^"]+)".*?>([^<]+)</', r2, re.DOTALL)
90
+            for item in result:
91
+                if "catalog" in item[0]: continue
92
+                title = item[1]
93
+                data2 = item[0]
94
+                img = self.img
95
+                desc = title
96
+                content.append((title,self.name+"::"+data2,img,desc))
97
+            return content
98
+
99
+        ## Seriāls
100
+        elif clist=="play":
101
+            r = self.call(path)
102
+            r = r.decode("cp1251").encode("utf8")
103
+            title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
104
+            m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
105
+            img = m.group(1) if m else self.img
106
+            m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
107
+            duration = "(%s)"%m.group(1) if m else ""
108
+            m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
109
+            desc = desc0 =  util.unescape(m.group(1).strip()) if m else ""
110
+            vid = plist[-1]
111
+            m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE)
112
+            key = m.group(3) if m else ""
113
+            js = self.get_movie_info(vid,key)
114
+            translations = js["message"]["translations"]["html5"]
115
+            for pl  in translations:
116
+                if translations[pl].startswith("http"):
117
+                    continue
118
+                pl_link = translations[pl]
119
+                lang = pl.encode("utf8")
120
+                break
121
+            else:
122
+                raise Exception("No episodes list found!")
123
+            #pl_link = js["message"]["translations"]["flash"].values()[0]
124
+            #  TODO process several players, currently taking the first
125
+            #TODO - kļuda, vairs nesradā
126
+            if not pl_link.startswith("http"):
127
+                pl_link = self.decode_direct_media_url(pl_link)
128
+            js = self._http_request(pl_link)
129
+            js = self.decode_direct_media_url(js)
130
+            js = json.loads(js)
131
+            if "s" in qs:
132
+                s = int(qs["s"])
133
+                for i,ep in enumerate(js["playlist"][s-1]["playlist"]):
134
+                    title = title0+" - "+js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
135
+                    serie = js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
136
+                    data2 = data+"&e=%s"%(i+1)
137
+                    desc = serie +"\n"+desc0
138
+                    content.append((title,self.name+"::"+data2,img,desc))
139
+            else:
140
+                for i,ep in enumerate(js["playlist"]):
141
+                    title = title0 +" - "+js["playlist"][i]["comment"].encode("utf8")
142
+                    serie = js["playlist"][i]["comment"].encode("utf8")
143
+                    if "file" in ep and ep["file"]:
144
+                        data2 = data+"?e=%s"%(i+1)
145
+                    else:
146
+                        data2 = data+"?s=%s"%(i+1)
147
+                    desc = serie +"\n"+desc0
148
+                    content.append((title,self.name+"::"+data2,img,desc))
149
+            return content
150
+            #r = self._http_request(url)
151
+
152
+
153
+        ### saraksts ###
154
+        else:
155
+            r = self.call(data)
156
+            r = r.decode("cp1251").encode("utf8")
157
+            for r2 in re.findall('<article class="shortstory line".+?</article>', r, re.DOTALL):
158
+                #m2 = re.search(r'<a class="watch icon-play" itemprop="url" href="([^"]+)"', r2, re.DOTALL)
159
+                #<a class="watch icon-play" itemprop="url" href="https://filmix.me/dramy/110957-stolik-19-2017.html"
160
+                #m = re.search(r'<a href="https://filmix\.me/play/(\d+)" class="watch icon-play">', r2, re.DOTALL)
161
+                m = re.search(r'<a class="watch icon-play" itemprop="url" href="https://filmix.me/\w+/(\d+)-', r2, re.DOTALL)
162
+                if not m: continue
163
+                vid = m.group(1)
164
+                data2 = "play/%s"%vid
165
+                #title = re.search('itemprop="name">([^<]+)</div>', r2, re.DOTALL).group(1)
166
+                title = re.search('itemprop="name" content="([^"]+)"', r2, re.DOTALL).group(1)
167
+                m = re.search('itemprop="alternativeHeadline" content="([^"]+)"', r2, re.DOTALL)
168
+                if m:
169
+                    title = title + "/"+m.group(1)
170
+                m = re.search('<img src="([^"]+.jpg)"', r2, re.DOTALL)
171
+                img = m.group(1) if m else self.img
172
+                m = re.search(r'<a itemprop="copyrightYear".+?>(\d+)<', r2, re.DOTALL)
173
+                if m:
174
+                    year = m.group(1) if m else ""
175
+                    title = "%s (%s)"%(title,year)
176
+                title = util.unescape(title)
177
+                genre = re.findall('<a itemprop="genre"[^>]+?">([^<]+)</a>', r2, re.DOTALL)
178
+                genre = ",".join(genre)
179
+                m = re.search('<p itemprop="description">([^<]+)</p>', r2, re.DOTALL)
180
+                desc0 = util.unescape(m.group(1)) if m else ""
181
+                m = re.search('<div class="quality">([^<]+)</div>', r2, re.DOTALL)
182
+                quality = m.group(1) if m else ""
183
+                actors = re.findall('itemprop="actor">([^<]+)<', r2, re.DOTALL)
184
+                actors = ",".join(actors)
185
+                desc="%s\n%s\n%s\n%s\n%s"%(title,genre,desc0,actors,quality)
186
+                content.append((title,self.name+"::"+data2,img,desc))
187
+            if '<div class="navigation">' in r:
188
+                m = re.search(r'href="https://filmix\.me/([^"]+)" class="next icon-arowRight btn-tooltip"', r, re.DOTALL)
189
+                if m:
190
+                    data2 = m.group(1)
191
+                else:
192
+                    m = re.search("/page/(\d)+",data)
193
+                    if m:
194
+                        page = int(m.group(1))+1
195
+                        data2 = re.sub("/page/(\d)+", "/page/%s"%page, data)
196
+                    else:
197
+                        data2 = data + "/page/2"
198
+                content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
199
+
200
+            return content
201
+
202
+    def is_video(self,data):
203
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
204
+        if clist == "play" and "e=" in data:
205
+            return True
206
+        elif clist=="play" and not params:
207
+            r = self.call(path)
208
+            #r = r.decode("cp1251").encode("utf8")
209
+            #m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
210
+            #if not m:
211
+            if u"Фильм <a href=" in r.decode("cp1251"):
212
+                return True
213
+            else:
214
+                return False
215
+
216
+        else:
217
+            return False
218
+
219
+    def get_streams(self, data):
220
+        print "[filmix] get_streams:", data
221
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
222
+
223
+        r = self.call(path)
224
+        if not r:
225
+            return []
226
+        streams = []
227
+        r = r.decode("cp1251").encode("utf8")
228
+        title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
229
+        m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
230
+        img = m.group(1) if m else self.img
231
+        m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
232
+        duration = "(%s)"%m.group(1) if m else ""
233
+        m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
234
+        desc = desc0 =  util.unescape(m.group(1).strip()) if m else ""
235
+        m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
236
+        if not m:
237
+            raise Exception("Can not find video link")
238
+            #return []
239
+        video_link = m.group(1)
240
+        series = False if u"Фильм <a href=" in r.decode("utf8") else True
241
+        vid = plist[1]
242
+        m = re.search(r"meta_key = \['(\w+)', '(\w+)', '(\w+)'\]", r, re.IGNORECASE)
243
+        key = m.group(3) if m else ""
244
+        js = self.get_movie_info(vid,key)
245
+        translations = js["message"]["translations"]["html5"]
246
+        for pl in translations:
247
+            if translations[pl].startswith("http"):
248
+                continue
249
+            pl_link = translations[pl]
250
+            lang = pl.encode("utf8")
251
+            break
252
+        else:
253
+            raise Exception("No episodes list found!")
254
+        if not pl_link.startswith("http"):
255
+            pl_link = self.decode_direct_media_url(pl_link)
256
+
257
+        if not series : # Filma
258
+            url0 = pl_link
259
+            streams2 = self.get_streams2(url0)
260
+            for st in streams2:
261
+                stream = util.item()
262
+                stream["url"]=st[1]
263
+                stream["lang"]=lang
264
+                stream["quality"]=st[0]
265
+                stream["name"]= title
266
+                stream["desc"]=desc
267
+                streams.append(stream)
268
+            return streams
269
+
270
+        else: # Seriāls
271
+            #pl_link = video_link
272
+            js = self._http_request(pl_link)
273
+            js = self.decode_direct_media_url(js)
274
+            js = json.loads(js)
275
+            if "e" in qs:
276
+                if "s" in qs:
277
+                    s = int(qs["s"])
278
+                else:
279
+                    s = None
280
+                e = int(qs["e"])
281
+                if s: # sezona + epizode
282
+                    serie = js["playlist"][s-1]["playlist"][e-1]["comment"].encode("utf8")
283
+                    title = title0+" - "+ serie
284
+                    url0 = js["playlist"][s-1]["playlist"][e-1]["file"].encode("utf8")
285
+                else: # tikai epizode, nav sezonas
286
+                    title = title0 +" - "+js["playlist"][e-1]["comment"].encode("utf8")
287
+                    serie = js["playlist"][e-1]["comment"].encode("utf8")
288
+                    url0 = js["playlist"][e-1]["file"].encode("utf8")
289
+                streams2 = self.get_streams2(url0)
290
+                for st in streams2:
291
+                    stream = util.item()
292
+                    stream["url"]=st[1]
293
+                    stream["lang"]=lang
294
+                    stream["quality"]=st[0]
295
+                    stream["name"]= title
296
+                    stream["desc"]=desc
297
+                    streams.append(stream)
298
+                return streams
299
+
300
+    def call(self, data,params=None,headers=None,lang=""):
301
+        if not headers: headers = self.headers
302
+        url = self.url+data
303
+        result = self._http_request(url,params,headers=headers)
304
+        return result
305
+
306
+    def get_movie_info(self,vid,key=""):
307
+        headers = headers2dict("""
308
+    User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0
309
+    Accept: application/json, text/javascript, */*; q=0.01
310
+    Accept-Language: en-US,en;q=0.5
311
+    Content-Type: application/x-www-form-urlencoded; charset=UTF-8
312
+    X-Requested-With: XMLHttpRequest
313
+    Referer: https://filmix.me/play/%s
314
+    Cookie: ad_win12=1;
315
+    """%vid )
316
+        post_data = {"post_id":vid,"key=":key}
317
+        r = util.post("https://filmix.me/api/movies/player_data", data=post_data, headers = headers)
318
+        if not r:
319
+            raise Exception("Can not get movie info")
320
+            #return []
321
+        js = json.loads(r)
322
+        return js
323
+
324
+    def decode_base64(self, encoded_url):
325
+        codec_a = ("l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "=")
326
+        codec_b = ("w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h")
327
+        i = 0
328
+        for a in codec_a:
329
+            b = codec_b[i]
330
+            i += 1
331
+            encoded_url = encoded_url.replace(a, '___')
332
+            encoded_url = encoded_url.replace(b, a)
333
+            encoded_url = encoded_url.replace('___', b)
334
+        return base64.b64decode(encoded_url)
335
+
336
+    def decode_unicode(self, encoded_url):
337
+        from itertools import izip_longest
338
+        def grouper(n, iterable, fillvalue=None):
339
+            "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
340
+            args = [iter(iterable)] * n
341
+            return izip_longest(fillvalue=fillvalue, *args)
342
+
343
+        _ = (encoded_url[1:] if encoded_url.find('#') != -1 else encoded_url)
344
+        tokens = map(lambda items: '\u0'+''.join(items), grouper(3, _))
345
+        return ''.join(tokens).decode('unicode_escape')
346
+
347
+    def decode_direct_media_url(self, encoded_url, checkhttp=False):
348
+        if(checkhttp == True and (encoded_url.find('http://') != -1 or encoded_url.find('https://') != -1)):
349
+            return False
350
+
351
+        try:
352
+            if encoded_url.find('#') != -1:
353
+                return self.decode_unicode(encoded_url)
354
+            else:
355
+                return self.decode_base64(encoded_url)
356
+        except:
357
+            return False
358
+
359
+
360
+
361
+    def decode_uppod_text(self, text):
362
+        Client_codec_a = ["l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "="]
363
+        Client_codec_b = ["w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h"]
364
+        text = text.replace("\n", "").strip()
365
+        for i in range(len(Client_codec_a)):
366
+            char1 = Client_codec_b[i]
367
+            char2 = Client_codec_a[i]
368
+            text = text.replace(char1, "___")
369
+            text = text.replace(char2, char1)
370
+            text = text.replace("___", char2)
371
+        result = base64.b64decode(text)
372
+        print result
373
+        return result
374
+
375
+    def get_streams2(self,url0):
376
+        m = re.search("\[([\d\w,]+)\]",url0)
377
+        if not m:
378
+            return [("?",url0)]
379
+        res = m.group(1)
380
+        streams=[]
381
+        for res in res.split(","):
382
+            if not res: continue
383
+            if res in ["1080p"]: continue #TODO fullhd only in PRO+ version
384
+            url=re.sub("\[[\d\w,]+\]",res,url0)
385
+            streams.append((res,url))
386
+        return streams
387
+
388
+
389
+if __name__ == "__main__":
390
+
391
+    c = Source()
392
+    #s = "ZnVuY3Rpb24gc2VuZE1lc3NhZ2U2MDc3ODkoZSl7dmFyIGg9bWdfd3M2MDc3ODkub25tZXNzYWdlOyBtZ193czYwNzc4OS5yZWFkeVN0YXRlPT1tZ193czYwNzc4OS5DTE9TRUQmJihtZ193czYwNzc4OT1uZXcgV2ViU29ja2V0KG1nX3dzNjA3Nzg5X2xvY2F0aW9uKSksbWdfd3M2MDc3ODkub25tZXNzYWdlPWgsd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkobWdfd3M2MDc3ODksZnVuY3Rpb24oKXttZ193czYwNzc4OS5zZW5kKGUpfSl9ZnVuY3Rpb24gd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkoZSx0KXtzZXRUaW1lb3V0KGZ1bmN0aW9uKCl7cmV0dXJuIDE9PT1lLnJlYWR5U3RhdGU/dm9pZChudWxsIT10JiZ0KCkpOnZvaWQgd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkoZSx0KX0sNSl9OyB2YXIgbWdfd3M2MDc3ODlfbG9jYXRpb24gPSAid3NzOi8vd3NwLm1hcmtldGdpZC5jb20vd3MiOyBtZ193czYwNzc4OSA9IG5ldyBXZWJTb2NrZXQobWdfd3M2MDc3ODlfbG9jYXRpb24pLCBtZ193czYwNzc4OS5vbm1lc3NhZ2UgPSBmdW5jdGlvbiAodCkge3Bvc3RNZXNzYWdlKHQuZGF0YSk7fSwgb25tZXNzYWdlID0gZnVuY3Rpb24oZSl7c2VuZE1lc3NhZ2U2MDc3ODkoZS5kYXRhKX0="
393
+
394
+    #txt = c.decode_uppod_text(s)
395
+    if len(sys.argv)>1:
396
+        data= sys.argv[1]
397
+    else:
398
+        data = "home"
399
+    content = c.get_content(data)
400
+    for item in content:
401
+        print item
402
+    #cat = api.get_categories(country)
403
+    #chan = api.get_channels("lv")
404
+    #prog = api.get_programs(channel=6400)
405
+    #prog = api.get_programs(category=55)
406
+    #seas = api.get_seasons(program=6453)
407
+    #str = api.get_streams(660243)
408
+    #res = api.get_videos(802)
409
+    #formats = api.getAllFormats()
410
+    #det = api.detailed("1516")
411
+    #vid = api.getVideos("13170")
412
+    pass

+ 276
- 0
sources/filmon.py Прегледај датотеку

@@ -0,0 +1,276 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+try:
9
+    import json
10
+except:
11
+    import simplejson as json
12
+
13
+import urllib2, urllib
14
+import datetime, re, sys
15
+from SourceBase import SourceBase
16
+import ssl
17
+if "_create_unverified_context" in dir(ssl):
18
+    ssl._create_default_https_context = ssl._create_unverified_context
19
+
20
+API_URL = 'http://www.filmon.com/'
21
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
22
+#User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
23
+headers0 = headers2dict("""
24
+User-Agent: Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46
25
+Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
26
+Accept-Language: en-US,en;q=0.5
27
+Accept-Encoding: deflate
28
+Connection: keep-alive
29
+""")
30
+import HTMLParser
31
+h = HTMLParser.HTMLParser()
32
+
33
+class Source(SourceBase):
34
+
35
+    def __init__(self,country="lv",cfg_path=None):
36
+        #self.hidden = True
37
+        self.name = "filmon"
38
+        self.title = "FilmOn"
39
+        self.img = "http://behindthegloves.com/wp-content/uploads/2016/01/FilmOn-logo1.jpg"
40
+        self.desc = "FilmOn portāla satura skatīšanās"
41
+        self.headers = headers0
42
+
43
+        self.country=country
44
+        self.jstv = None
45
+        self.session_key = None
46
+        self.cookie = None
47
+
48
+    def get_content(self, data):
49
+        print "[filmon] get_content:", data
50
+        if "::" in data:
51
+            data = data.split("::")[1]
52
+        path = data.split("?")[0]
53
+        clist = path.split("/")[0]
54
+        params = data[data.find("?"):] if "?" in data else ""
55
+        qs = dict(map(lambda x:x.split("="),re.findall("\w+=\w+",params)))
56
+        lang = qs["lang"] if "lang" in qs else self.country
57
+
58
+        if not self.jstv:
59
+            self.jstv = self.get_tv_channels()
60
+        #if not self.session_key: # TODO izskatās, ka strādā bez, vismaz ja nelogojas iekšā,  jānočeko
61
+        #    html = self._http_request("http://www.filmon.com/api/init")
62
+        #    js = json.loads(html)
63
+        #    self.session_key = js["session_key"]
64
+
65
+        content=[]
66
+        content.append(("..return", "back","","Return back"))
67
+
68
+        if clist=="home":
69
+            content.extend([
70
+                ("Live streams", "filmon::tv","","TV live streams"),
71
+                ("Video on demand", "filmon::vod","","Last videos"),
72
+            ])
73
+            return content
74
+
75
+        ### TV Groups ###
76
+        elif clist in ("tv","home"):
77
+            for gr in self.jstv:
78
+                title = gr["name"].encode("utf8")
79
+                data2 = "group?id=%s"%gr["id"]
80
+                img = gr["logo_148x148_uri"].encode("utf8")
81
+                desc = gr["description"].encode("utf8")
82
+                content.append((title,self.name+"::"+data2,img,desc))
83
+            return content
84
+
85
+        ### TV group channels ###
86
+        elif clist=="group":
87
+            if "id" in qs:
88
+                group_id = qs["id"]
89
+            else:
90
+                return content
91
+            group = None
92
+            for gr in self.jstv:
93
+                if gr["id"]==group_id:
94
+                    group = gr
95
+                    break
96
+            if not group:
97
+                return content
98
+            for ch in group["channels"]:
99
+                title = ch["title"].encode("utf8")
100
+                data2 = "channel?id=%s"%ch["id"]
101
+                img = ch["big_logo"].encode("utf8")
102
+                desc = ch["description"].encode("utf8") if ch["description"] else title
103
+                content.append((title,self.name+"::"+data2,img,desc))
104
+            return content
105
+
106
+        ### TV Channel ###
107
+        elif clist == "channel" or clist == "video":
108
+            if "id" in qs:
109
+                ch_id = qs["id"]
110
+            else:
111
+                return ("No stream found %s"%data,"","","No stream found")
112
+            ch = self.get_tv_channel_info(ch_id)
113
+            if ch["now_playing"]:
114
+                current_event = ch["now_playing"]["programme_name"] if "programme_name" in ch["now_playing"] else ""
115
+            else:
116
+                current_event = ""
117
+            title = u"%s - %s"%(ch["title"],current_event)
118
+            title = title.encode("utf8")
119
+            if current_event:
120
+                desc = ch["now_playing"]["programme_description"].encode("utf8")
121
+            else:
122
+                desc = title
123
+            data2 = ""
124
+            for t in ("SD","HD"):
125
+                for s in ch["streams"]:
126
+                    if s["name"]==t:
127
+                        data2 = s["url"].encode("utf8")
128
+                        break
129
+                if data2: break
130
+            return (title,data2,"",desc)
131
+
132
+        ### VOD genres ###
133
+        elif path in ("vod","vod/genres"):
134
+            data = "vod/genres"
135
+            js = self.call(data)
136
+            for gr in js["response"]:
137
+                title = gr["name"].encode("utf8")
138
+                data2 = "vod/search?genre=%s&max_results=30&no_episode=true&start_index=0"%(gr["slug"].encode("utf8"))
139
+                img = gr["images"][0]["url"].encode("utf8")
140
+                desc = gr["description"].encode("utf8") if gr["description"] else title
141
+                content.append((title,self.name+"::"+data2,img,desc))
142
+            return content
143
+
144
+        ### VOD genre videos ###
145
+        elif path == "vod/search":
146
+            js = self.call(data)
147
+            for vid in js["response"]:
148
+                title = vid["title"].encode("utf8")
149
+                if vid["type"]=="series":
150
+                    title = "[Series] "+title
151
+                data2 = "vod/movie?id=%s&type=%s"%(vid["id"],vid["type"].encode("utf8"))
152
+                img = "http://static.filmon.com/assets/"+vid["poster"]["couchdb_url"].encode("utf8")
153
+                desc = vid["description"].encode("utf8") if vid["description"] else title
154
+                content.append((title,self.name+"::"+data2,img,desc))
155
+            start_index = int(qs["start_index"]) if "start_index" in qs else 0
156
+            if start_index+js["total"]<js["total_found"]:
157
+                start_index += 30
158
+                data2 = re.sub("start_index=\d+","start_index=%s"%start_index,data) if "start_index" in qs else data +"&start_index=30"
159
+                content.append(("Next page",self.name+"::"+data2,"","Next page"))
160
+            return content
161
+
162
+        ### VOD video sigle/series ###
163
+        elif path == "vod/movie":
164
+            js = self.call(data)
165
+            if js["response"]["type"] == "series":
166
+                ids = ",".join(js["response"]["episodes"])
167
+                data2 = "vod/movies?ids=%s"%ids
168
+                js2 = self.call(data2)
169
+                for vid in js2["response"]:
170
+                    title = vid["title"].encode("utf8")
171
+                    if vid["type"]=="series":
172
+                        title = "[Series] "+title
173
+                    data2 = "vod/movie?id=%s&type=%s"%(vid["id"],vid["type"].encode("utf8"))
174
+                    img = "http://static.filmon.com/assets/"+vid["poster"]["couchdb_url"].encode("utf8")
175
+                    desc = vid["description"].encode("utf8") if vid["description"] else title
176
+                    content.append((title,self.name+"::"+data2,img,desc))
177
+                return content
178
+            else:
179
+                title = js["response"]["title"].encode("utf8")
180
+                desc = js["response"]["description"].encode("utf8") if js["response"]["description"] else title
181
+                data2 = js["response"]["streams"]["low"]["url"].encode("utf8")
182
+                return (title,data2,"",desc)
183
+
184
+    def is_video(self,data):
185
+        if "::" in data:
186
+            data = data.split("::")[1]
187
+        cmd = data.split("?")
188
+        if cmd[0] in ("video","channel"):
189
+            return True
190
+        elif cmd[0] == "vod/movie" and "type=movie" in data:
191
+            return True
192
+        else:
193
+            return False
194
+
195
+    def call(self, data,headers=headers0,lang=""):
196
+        if not lang: lang = self.country
197
+        url = "http://www.filmon.com/api/" + data
198
+        #if not "?" in url: url += "?session_key=%s"%self.session_key
199
+        #if not "session_key=" in url: url += "&session_key=%s"%self.session_key
200
+        #print "[TVPlay Api] url: ",url
201
+        result = []
202
+        content = self._http_request(url)
203
+        if content:
204
+            try:
205
+                result = json.loads(content)
206
+            except Exception, ex:
207
+                return None
208
+        return result
209
+
210
+    #----------------------------------------------------------------------
211
+    def get_tv_channel_info(self,id):
212
+        url = "http://www.filmon.com/ajax/getChannelInfo"
213
+        headers = headers2dict("""
214
+Host: www.filmon.com
215
+User-Agent: Mozilla/5.0 (Windows NT 6.1; rv:41.0) Gecko/20100101 Firefox/41.0
216
+Accept: application/json, text/javascript, */*; q=0.01
217
+Accept-Language: en-US,en;q=0.5
218
+Accept-Encoding: deflate
219
+DNT: 1
220
+Content-Type: application/x-www-form-urlencoded; charset=UTF-8
221
+X-Requested-With: XMLHttpRequest
222
+Referer: http://www.filmon.com/tv/live
223
+Connection: keep-alive
224
+Pragma: no-cache
225
+Cache-Control: no-cache
226
+""")
227
+        headers["Cookie"] = self.cookie
228
+        data = "channel_id=%s&quality=low"%id
229
+        response = urllib2.urlopen(urllib2.Request(url, headers=headers,data=data))
230
+        html =  response.read()
231
+        js = json.loads(html)
232
+        return js
233
+
234
+    #----------------------------------------------------------------------
235
+    def get_tv_channels(self):
236
+        """Get tv channels list"""
237
+        headers = headers2dict("""
238
+Host: www.filmon.com
239
+User-Agent: Mozilla/5.0 (Windows NT 6.1; rv:41.0) Gecko/20100101 Firefox/41.0
240
+Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
241
+Accept-Language: en-US,en;q=0.5
242
+Accept-Encoding: deflate
243
+DNT: 1
244
+Connection: keep-alive
245
+    """)
246
+
247
+        url = "http://www.filmon.com/tv"
248
+        response = urllib2.urlopen(urllib2.Request(url, headers=headers))
249
+        if "set-cookie" in response.headers:
250
+            self.cookie = response.headers["set-cookie"]
251
+        html =  response.read()
252
+        s = re.search("(?i)var groups = (.*);", html).groups(1)[0]
253
+        js = json.loads(s)
254
+        return js
255
+
256
+if __name__ == "__main__":
257
+    country= "lv"
258
+    c = Source(country)
259
+    if len(sys.argv)>1:
260
+        data= sys.argv[1]
261
+    else:
262
+        data = "home"
263
+    content = c.get_content(data)
264
+    for item in content:
265
+        print item
266
+    #cat = api.get_categories(country)
267
+    #chan = api.get_channels("lv")
268
+    #prog = api.get_programs(channel=6400)
269
+    #prog = api.get_programs(category=55)
270
+    #seas = api.get_seasons(program=6453)
271
+    #str = api.get_streams(660243)
272
+    #res = api.get_videos(802)
273
+    #formats = api.getAllFormats()
274
+    #det = api.detailed("1516")
275
+    #vid = api.getVideos("13170")
276
+    pass

+ 559
- 0
sources/iplayer.py Прегледај датотеку

@@ -0,0 +1,559 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+import sys, os, os.path, re, sys
9
+import urllib,urllib2
10
+from xml.sax.saxutils import unescape,escape
11
+from urllib import quote, unquote
12
+import datetime
13
+import HTMLParser
14
+import json
15
+import datetime,time
16
+from SourceBase import SourceBase, stream_type
17
+import util
18
+from collections import OrderedDict
19
+import ssl
20
+if "_create_unverified_context" in dir(ssl):
21
+    ssl._create_default_https_context = ssl._create_unverified_context
22
+
23
+API_URL = 'https://m.lattelecom.tv/'
24
+user_agent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3"
25
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
26
+h = HTMLParser.HTMLParser()
27
+
28
+class Source(SourceBase):
29
+
30
+    def __init__(self,cfg_path=None):
31
+        self.name = "iplayer"
32
+        self.title = "BBC iPlayer"
33
+        self.img = "http://www.userlogos.org/files/logos/inductiveload/BBC_iPlayer_logo.png"
34
+        self.desc = "BBC iPlayer portal content"
35
+
36
+        self.api_url = "http://ibl.api.bbci.co.uk/ibl/v1/"
37
+        self.headers = headers2dict("""
38
+User-Agent: BBCiPlayer/4.19.0.3021 (SM-G900FD; Android 4.4.2)
39
+Connection: Keep-Alive
40
+        """)
41
+        self.headers2 = headers2dict("""
42
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36
43
+Connection: Keep-Alive
44
+        """)
45
+
46
+        self.ch = []
47
+        self.ch_id={}
48
+        self.ch_id2={}
49
+        self.ch_name={}
50
+        self.logos ={
51
+            "bbc_one_london":"http://www.lyngsat-logo.com/hires/bb/bbc_one.png",
52
+            "bbc_two_england":"http://www.lyngsat-logo.com/hires/bb/bbc_two_uk.png",
53
+            "bbc_three":"http://www.lyngsat-logo.com/hires/bb/bbc_three_uk.png",
54
+            "bbc_four":"http://www.lyngsat-logo.com/hires/bb/bbc_four_uk.png",
55
+            "bbc_radio_one":"http://www.lyngsat-logo.com/hires/bb/bbc_radio1.png",
56
+            "cbbc":"http://www.lyngsat-logo.com/hires/bb/bbc_cbbc.png",
57
+            "cbeebies":"http://www.lyngsat-logo.com/hires/bb/bbc_cbeebies_uk.png",
58
+            "bbc_news24":"http://www.lyngsat-logo.com/hires/bb/bbc_news.png",
59
+            "bbc_parliament":"http://www.lyngsat-logo.com/hires/bb/bbc_parliament.png",
60
+            "bbc_alba":"http://www.lyngsat-logo.com/hires/bb/bbc_alba.png",
61
+            "s4cpbs":"http://www.lyngsat-logo.com/hires/ss/s4c_uk.png"
62
+        }
63
+        cur_directory = os.path.dirname(os.path.abspath(__file__))
64
+        if not cfg_path: cfg_path = cur_directory
65
+        self.config_file = os.path.join(cfg_path,self.name+".cfg")
66
+        self.options = OrderedDict([("user","lietotajs"),("password","parole")])
67
+        self.options_read()
68
+
69
+    def get_content(self, data):
70
+        print "[iplayer] get_content:", data
71
+        if "::" in data:
72
+            data = data.split("::")[1]
73
+        path = data.split("?")[0]
74
+        clist = path.split("/")[0]
75
+        params = data[data.find("?"):] if "?" in data else ""
76
+        qs = dict(map(lambda x:x.split("="),re.findall("\w+=[\w-]+",params)))
77
+        #lang = qs["lang"] if "lang" in qs else self.country
78
+
79
+        content=[]
80
+        content.append(("..return", "back","","Return back"))
81
+
82
+        ### Home ###
83
+        if data=="home":
84
+            content.extend([
85
+                ("Search TV", "iplayer::search/{0}","","Search in iPlayer"),
86
+                ("Live streams", "iplayer::live","","TV live streams"),
87
+                ("Channels", "iplayer::channels","","Programmes by channel/date"),
88
+                ("Categories", "iplayer::categories","","Programmes by categories"),
89
+                ("A-Z", "iplayer::a-z","","All programmes by name"),
90
+                ("Highlights", "iplayer::home/highlights","","Current highlights"),
91
+                ("Most popular", "iplayer::groups/popular/episodes?per_page=40&page=1","","Most popular programmes")
92
+            ])
93
+            return content
94
+
95
+        ### Search ###
96
+        elif clist=="search":
97
+            data_ = "search-suggest/?q=%s&rights=mobile&initial_child_count=1"%data.split("/")[1]
98
+            r = self.call(data_)
99
+            for item in r["search_suggest"]["results"]:
100
+                title,data2,img,desc = self.get_data_element(item)
101
+                content.append((title,self.name+"::"+data2,img,desc))
102
+            return content
103
+
104
+
105
+        ### Live main ###
106
+        elif data=="live":
107
+            for ch in self.get_channels():
108
+                title = ch["title"]
109
+                img = self.logos[ch["id"]] if ch["id"] in self.logos else  "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
110
+                desc = title
111
+                data2 = "live/%s"%ch["id"]
112
+                ee = self.get_epg_live(ch["id"])
113
+                desc = ee[2]
114
+                content.append((title,self.name+"::"+data2,img,desc))
115
+            return content
116
+
117
+        ### Categories ###
118
+        elif data == "categories":
119
+            r = self.call(data)
120
+            if not "categories":
121
+                raise Exception("Error reading categories")
122
+            for item in r["categories"]:
123
+                data2 = "categories/%s"%(item["id"])
124
+                title = item["title"]
125
+                desc = title
126
+                img = self.img
127
+                content.append((title,self.name+"::"+data2,img,desc))
128
+            return content
129
+
130
+        ### Catetory root ###
131
+        elif clist == "categories" and len(data.split("/"))==2:
132
+            r = self.call(data)
133
+            title = "%s - highlights"%r["category"]["title"]
134
+            content.append((title,self.name+"::"+data+"/highlights?lang=en&rights=mobile&availability=available",self.img,title))
135
+            title = "%s - recent (%s programmes, %s episodes)"%(r["category"]["title"],r["category"]["child_programme_count"],r["category"]["child_episode_count"])
136
+            content.append((title,self.name+"::"+data+"/programmes?rights=mobile&page=1&per_page=40&sort=recent&sort_direction=asc&initial_child_count=1&availability=available",self.img,title))
137
+            title = "%s - a-z (%s programmes, %s episodes)"%(r["category"]["title"],r["category"]["child_programme_count"],r["category"]["child_episode_count"])
138
+            content.append((title,self.name+"::"+data+"/programmes?rights=mobile&page=1&per_page=40&sort=title&sort_direction=asc&initial_child_count=1&availability=available",self.img,title))
139
+            return content
140
+
141
+        ### Program/episodes list ###
142
+        elif   re.search("categories/([\w\-]+)/(highlights|programmes).+",data) or\
143
+               re.search("programmes/(\w+)/episodes.+",data) or\
144
+               re.search("groups/(\w+)/episodes.+",data) or\
145
+               re.search("atoz/([\w]+)/programmes.+",data) or\
146
+               re.search("channels/(\w+)/schedule/[\d\-].+",data) or\
147
+               re.search("channels/(\w+)/programmes.+",data) or\
148
+               re.search("channels/(\w+)/highlights.+",data) or\
149
+               data == "home/highlights":
150
+            r = self.call(data)
151
+            lst = r["category_highlights"] if "category_highlights" in r else\
152
+                  r["category_programmes"] if "category_programmes" in r else\
153
+                  r["programme_episodes"] if "programme_episodes" in r else\
154
+                  r["atoz_programmes"] if "atoz_programmes" in r else\
155
+                  r["group_episodes"] if "group_episodes" in r else\
156
+                  r["schedule"] if "schedule" in r else\
157
+                  r["channel_highlights"] if "channel_highlights" in r else\
158
+                  r["channel_programmes"] if "channel_programmes" in r else\
159
+                  r["home_highlights"] if "home_highlights" in r else\
160
+                  []
161
+            if not lst:
162
+                return content
163
+            for el in lst["elements"]:
164
+                if el["type"] == "broadcast":
165
+                    if not len(el["episode"]["versions"]):continue
166
+                    title,data2,img,desc = self.get_data_element(el["episode"])
167
+                    t1 = gt(el['scheduled_start'])
168
+                    t2 = gt(el['scheduled_end'])
169
+                    title = "[%s-%s]%s"%(t1.strftime("%d.%m.%Y %H:%M"),t2.strftime("%H:%M"),title)
170
+                else:
171
+                    title,data2,img,desc = self.get_data_element(el)
172
+                content.append((title,self.name+"::"+data2,img,desc))
173
+
174
+            if "&page=" in data and lst["page"]*lst["per_page"]<lst["count"]:
175
+                data2 = re.sub("&page=\d+","&page=%s"%(lst["page"]+1),data)
176
+                content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
177
+            return content
178
+
179
+        ### A-z root ###
180
+        elif data=="a-z":
181
+            url = "http://www.bbc.co.uk/programmes/a-z/by/x/all.json?page=1"
182
+            r = self._http_request(url)
183
+            if not r:
184
+                raise Exception("Can not read %s"%s)
185
+            js = json.loads(r)
186
+            for ch in js["atoz"]["letters"]:
187
+                title = ch.upper()
188
+                desc = "Programmes beginning with %s"%title
189
+                img = self.img
190
+                data2 = "atoz/%s/programmes?rights=mobile&page=1&per_page=40&initial_child_count=1&sort=title&sort_direction=asc&availability=available"%ch
191
+                content.append((title,self.name+"::"+data2,img,desc))
192
+            return content
193
+
194
+        ###  Channels home ###
195
+        elif data=="channels":
196
+            for ch in self.get_channels():
197
+                title = ch["title"]
198
+                img = self.logos[ch["id"]] if ch["id"] in self.logos else  "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
199
+                desc = title
200
+                data2 = "channels/%s"%ch["id"]
201
+                #ee = self.get_epg_live(ch["id"])
202
+                desc = title
203
+                content.append((title,self.name+"::"+data2,img,desc))
204
+            return content
205
+
206
+        ### Channel higlihts/progrmmes/days ###
207
+        elif clist=="channels" and len(data.split("/"))==2:
208
+            r = self.call(data)
209
+            chid = data.split("/")[1]
210
+            ch = self.get_channel_by_id(chid)
211
+
212
+            # Highlights
213
+            title = ch["title"] + " - highlights"
214
+            img = "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
215
+            data2 = "channels/%s/highlights?lang=en&rights=mobile&availability=available"%ch["id"]
216
+            desc = title
217
+            content.append((title,self.name+"::"+data2,img,desc))
218
+
219
+            #AtoZ
220
+            title = ch["title"] + " - programmes AtoZ"
221
+            data2 = "channels/%s/programmes?rights=mobile&page=1&per_page=40&sort=recent&sort_direction=asc&initial_child_count=1&availability=available"%ch["id"]
222
+            desc = title
223
+            content.append((title,self.name+"::"+data2,img,desc))
224
+
225
+            day0 = datetime.date.today()
226
+            for i in range(10):
227
+                day = day0-datetime.timedelta(days=i)
228
+                days = day.strftime("%Y-%m-%d")
229
+                title = ch["title"] + " - " + days
230
+                img = "http://static.bbci.co.uk/mobileiplayerappbranding/1.9/android/images/channels/tv-guide-wide-logo/layout_normal/xxhdpi/%s_tv-guide-wide-logo.png"%ch["id"]
231
+                data2 = "channels/%s/schedule/%s?availability=available"%(ch["id"],days)
232
+                #ee = self.get_epg_live(ch["id"])
233
+                desc = title
234
+                content.append((title,self.name+"::"+data2,img,desc))
235
+            return content
236
+
237
+
238
+    def get_streams(self, data):
239
+        print "[iplayer] get_streams:", data
240
+        if "::" in data: data = data.split("::")[1]
241
+        if not self.is_video(data):
242
+            return []
243
+        cmd = data.split("/")
244
+        vid = cmd[1].split("?")[0]
245
+        if cmd[0] == "live":
246
+            title,img,desc,nfo = self.get_epg_live(vid)
247
+        else:
248
+            #data_ = "episodes/%s"%vid
249
+            #r = self.call(data_)
250
+            title,img,desc,vid,nfo = self.get_epg_video(vid)
251
+        url = "http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/format/json/mediaset/iptv-all/vpid/%s"%vid
252
+        print "vid=%s"%vid
253
+        print url
254
+        r = self._http_request(url) #,headers=self.headers2
255
+        if not r:
256
+            raise Exception("No streams found")
257
+        js = json.loads(r)
258
+        if "result" in js and js["result"]=="geolocation":
259
+            raise Exception("BBC iPlayer service available only from UK")
260
+        if not "media" in js:
261
+            raise Exception("No streams found")
262
+        streams = []
263
+        captions = []
264
+        for s in js["media"]:
265
+            if s["kind"] == "captions":
266
+                if s["connection"][0]["href"]:
267
+                    sub = {}
268
+                    sub["url"] = s["connection"][0]["href"].encode('utf8')
269
+                    sub["type"] = s["type"]
270
+                    sub["name"] = s["service"] if "service" in s else "captions (taff)"
271
+                    sub["lang"] = "en"
272
+                    captions.append(sub)
273
+
274
+            if s["kind"] <> "video":
275
+                continue
276
+            for c in s["connection"]:
277
+                if c["transferFormat"] <> "hls": continue
278
+                #if not (c["supplier"].startswith("mf_") or c["supplier"].startswith("ll_")) : continue # TODO ir kaut kādas VPN problēmas ar akamaihd
279
+                #if c["priority"] <> "1": continue
280
+                url=c["href"].encode("utf8")
281
+                #print url.split("/")[2]
282
+                r2 = self._http_request(url)
283
+                if not r2:
284
+                    continue
285
+                slist = re.findall("#EXT-X-STREAM-INF:([^\n]+)\n([^\n]+)", r2, re.DOTALL)
286
+                if not slist:
287
+                    stream = util.item()
288
+                    stream["url"]=url
289
+                    stream["name"]=title
290
+                    stream["desc"]=desc
291
+                    stream["img"]=img
292
+                    stream["type"]="hls"
293
+                    stream["quality"]=("%s %sx%s %s,%s"%(s["bitrate"],s["width"],s["height"],c["supplier"],c["priority"])).encode("utf8")
294
+                    stream["lang"]="en"
295
+                    stream["subs"]=captions
296
+                    stream["order"]=int(s["bitrate"])
297
+                    stream["nfo"] = nfo
298
+                    #print url.split("/")[2]
299
+                    streams.append(stream)
300
+                else:
301
+                    for cc in slist:
302
+                        m = re.search("RESOLUTION=([\dx]+)",cc[0])
303
+                        resolution = m.group(1) if m else "%sx%s"%(s["width"],s["height"])
304
+                        m = re.search("BANDWIDTH=([\d]+)",cc[0])
305
+                        bitrate = m.group(1) if m else s["bitrate"]
306
+                        url2 = cc[1].encode("utf8")
307
+                        if not url2.startswith("http"):
308
+                            uu = url.split("/")[:-1]
309
+                            uu.append(url2)
310
+                            url2 = "/".join(uu)
311
+                        #print url.split("/")[2]
312
+                        stream = util.item()
313
+                        stream["url"]=url2
314
+                        stream["name"]=title
315
+                        stream["desc"]=desc
316
+                        stream["img"]=img
317
+                        stream["type"]="hls"
318
+                        stream["quality"]=("%s %s %s,%s"%(bitrate,resolution,c["supplier"],c["priority"])).encode("utf8")
319
+                        stream["lang"]="en"
320
+                        stream["subs"]=captions
321
+                        stream["order"]=int(bitrate)
322
+                        stream["nfo"] = nfo
323
+                        streams.append(stream)
324
+        if captions:
325
+            for s in streams:
326
+                s["subs"]=captions
327
+        streams = sorted(streams,key=lambda item: item["order"],reverse=True)
328
+        return streams
329
+
330
+    def is_video(self,data):
331
+        if "::" in data:
332
+            data = data.split("::")[1]
333
+        cmd = data.split("/")
334
+        if cmd[0]=="live" and  len(cmd)==2:
335
+            return True
336
+        elif cmd[0]=="episodes" and len(cmd)==2:
337
+            return True
338
+        else:
339
+            return False
340
+
341
+    def get_data_element(self,item):
342
+        if ("programme" in item["type"] or "group" in item["type"]) and item["count"]>1:
343
+            ep = item.copy()
344
+        elif ("programme" in item["type"] or "group" in item["type"]) and item["count"]==1:
345
+            ep = item["initial_children"][0].copy()
346
+        elif item["type"] == "episode":
347
+            ep = item.copy()
348
+        elif item["type"] == "broadcast":
349
+            ep = item["episode"].copy()
350
+        else:
351
+            ep = item.copy()
352
+        title = ep["title"]
353
+        if "subtitle" in ep and ep["subtitle"]:
354
+            title = title+". "+ ep["subtitle"]
355
+        desc = ep["synopses"]["large"] if "large" in ep["synopses"] else ep["synopses"]["medium"] if "medium" in ep["synopses"] else ep["synopses"]["small"]
356
+        #TODO papildus info pie apraksta
357
+        img = ep["images"]["standard"].replace("{recipe}","512x288") if "images" in ep else self.img
358
+        if ep["type"] == "episode":
359
+            data2 = "episodes/%s"%ep["id"]
360
+        elif "programme" in ep["type"]:
361
+            data2 = "programmes/%s/episodes?per_page=40&page=1"%ep["id"]
362
+            title = "%s [%s episodes]"%(title,ep["count"])
363
+        elif "group" in ep["type"]:
364
+            data2 = "groups/%s/episodes?per_page=40&page=1"%ep["id"]
365
+            title = "%s [%s episodes]"%(title,ep["count"])
366
+        else:
367
+            data2 = "programmes/%s/episodes?per_page=40&page=1"%ep["id"]
368
+            title = "%s [%s episodes]"%(title,ep["count"])
369
+        return title,data2,img,desc
370
+
371
+    def get_epg_video(self,vid):
372
+        data = "episodes/%s"%vid
373
+        nfo = {}
374
+        r = self.call(data)
375
+        if "episodes" in r :
376
+            ep = r["episodes"][0]
377
+            title = ep["title"]
378
+            if "subtitle" in ep:
379
+                title = title +". "+ ep["subtitle"]
380
+            title = title
381
+            desc = ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
382
+            desc = desc
383
+            ver = ep["versions"][0]
384
+            vid = ver["id"]
385
+            remaining = ver["availability"]["end"].split("T")[0] #["remaining"]["text"]
386
+            duration = ver["duration"]["text"]
387
+            first_broadcast = ver["first_broadcast"]
388
+            desc =u"%s\n%s\%s\n%s\n%s"%(title,duration,remaining,first_broadcast,desc)
389
+            img = ep["images"]["standard"].replace("{recipe}","512x288")
390
+
391
+            #Create nfo dictionary
392
+            tt = lambda dd,k,d: dd[k] if k in dd else d
393
+            nfo_type = "movie" if True else "tvswhow" # TODO
394
+            t = OrderedDict()
395
+            t["title"] = title
396
+            t["originaltitle"] = tt(ep,"original_title","")
397
+            t["thumb"] = img
398
+            t["id"] = vid
399
+            t["outline"] = ep["synopses"]["small"] if "small" in ep["synopses"] else ep["synopses"]["editorial"] if "editorial" in ep["synopses"] else ""
400
+            t["plot"] = ep["synopses"]["large"] if "large" in ep["synopses"] else ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
401
+            t["tagline"] = ep["synopses"]["editorial"] if "editorial" in ep["synopses"] else ""
402
+            t["runtime"] = tt(ver["duration"],"text","")
403
+            t["premiered"] = tt(ep,"release_date","")
404
+            t["aired"] = ver["availability"]["start"].split("T")[0] if "start" in ver["availability"] else ""
405
+            if "parent_position" in ep: t["episode"] = ep["parent_position"]
406
+            nfo[nfo_type] = t
407
+
408
+            return title.encode("utf8"),img.encode("utf8"),desc.encode("utf8"),vid.encode("utf8"),nfo
409
+        else:
410
+            raise Exception("No video info")
411
+
412
+    def get_epg_live(self,channelid):
413
+        data = "channels/%s/highlights?live=true"%channelid
414
+        r = self.call(data)
415
+        nfo = {}
416
+        if "channel_highlights" in r and r["channel_highlights"]["elements"][0]["id"] == "live":
417
+            epg = r["channel_highlights"]["elements"][0]["initial_children"][0].copy()
418
+            t1 = gt(epg['scheduled_start'])
419
+            t2 = gt(epg['scheduled_end'])
420
+            ep = epg["episode"]
421
+            title = ep["title"]
422
+            if "subtitle" in ep:
423
+                title = title +". "+ ep["subtitle"]
424
+            title = "%s (%s-%s)"%(title,t1.strftime("%H:%M"),t2.strftime("%H:%M"))
425
+            title = title
426
+            desc = ep["synopses"]["medium"] if "medium" in ep["synopses"] else p["synopses"]["small"] if "small" in ep["synopses"] else title
427
+            desc = desc
428
+            desc ="%s\n%s"%(title,desc)
429
+            img = ep["images"]["standard"].replace("{recipe}","512x288")
430
+            #return title,img,desc
431
+        else:
432
+            title = r["channel_highlights"]["channel"]["title"]
433
+            img = ""
434
+            desc = title
435
+
436
+        return title.encode("utf8"),img.encode("utf8"),desc.encode("utf8"),nfo
437
+
438
+    def get_channels(self):
439
+        if self.ch:
440
+            return self.ch
441
+        r= self.call("channels")
442
+        self.ch=[]
443
+        for i,item in enumerate(r["channels"]):
444
+            self.ch.append(item)
445
+            self.ch_id[item["id"]]=i
446
+            self.ch_id2[item["master_brand_id"]]=i
447
+            self.ch_name[item["title"]]=i
448
+        return self.ch
449
+
450
+    def get_channel_by_id(self,chid):
451
+        if not self.ch:
452
+            self.get_channels()
453
+        if not self.ch:
454
+            return None
455
+        return self.ch[self.ch_id[chid]] if self.ch_id.has_key(chid) else None
456
+
457
+    def get_channel_by_id2(self,chid):
458
+        if not self.ch:
459
+            self.get_channels()
460
+        if not self.ch:
461
+            return None
462
+        return self.ch[self.ch_id2[chid]] if self.ch_id2.has_key(chid) else None
463
+
464
+    def get_channel_by_name(self,name):
465
+        if not self.ch:
466
+            self.get_channels()
467
+        ch2 = self.get_channel_by_name2(name)
468
+        if not ch2:
469
+            return None
470
+        ch = self.get_channel_by_id2(ch2["id2"])
471
+        return ch
472
+
473
+
474
+    def call(self, data,params = None, headers=None):
475
+        if not headers: headers = self.headers
476
+        #if not lang: lang = self.country
477
+        url = self.api_url + data
478
+        content = self._http_request(url,params, headers)
479
+        if content:
480
+            try:
481
+                result = json.loads(content)
482
+                return result
483
+            except Exception, ex:
484
+                return None
485
+        else:
486
+            return None
487
+
488
+    def call2(self, data,params = None, headers=None):
489
+        if not headers: headers = self.headers2
490
+        #if not lang: lang = self.country
491
+        url = self.api_url2 + data
492
+        content = self._http_request(url,params, headers)
493
+        return content
494
+
495
+    def _http_request(self, url,params = None, headers=None):
496
+        if not headers: headers = self.headers
497
+        import requests
498
+        try:
499
+            from requests.packages.urllib3.exceptions import InsecureRequestWarning
500
+            requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
501
+        except:
502
+            pass
503
+        try:
504
+            r = requests.get(url, headers=headers)
505
+            return r.content
506
+
507
+        except Exception as ex:
508
+            if "code" in dir(ex) and ex.code==403:
509
+                return ex.read()
510
+            else:
511
+                return None
512
+
513
+def gt(dt_str):
514
+    dt, _, us= dt_str.partition(".")
515
+    dt= datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S")
516
+    dt = dt - datetime.timedelta(seconds=time.altzone)
517
+    #us= int(us.rstrip("Z"), 10)
518
+    #r = dt + datetime.timedelta(microseconds=us)a
519
+    return dt
520
+
521
+if __name__ == "__main__":
522
+    c = Source()
523
+    from subprocess import call
524
+    #ch = c.get_channels()
525
+    #c.get_epg_live("bbc_two_england")
526
+
527
+    if len(sys.argv)>1 and  not "iplayer::" in sys.argv[1]:
528
+
529
+        vid = sys.argv[1]
530
+        print "login - %s"%c.login("ivars777","xxx")
531
+        vid = "1069"
532
+        vid = "1462566072086"
533
+        channelid="101"
534
+        vid = "1350462656767"
535
+        #data = c.get_stream_url(vid,"vod")
536
+        #call([r"c:\Program Files\VideoLAN\VLC\vlc.exe",data["stream"]])
537
+        pass
538
+
539
+
540
+
541
+    else:
542
+        if len(sys.argv)>1:
543
+            data= sys.argv[1]
544
+        else:
545
+            data = "iplayer::home"
546
+        content = c.get_content(data)
547
+        for item in content:
548
+            print item
549
+        #cat = api.get_categories(country)
550
+        #chan = api.get_channels("lv")
551
+        #prog = api.get_programs(channel=6400)
552
+        #prog = api.get_programs(category=55)
553
+        #seas = api.get_seasons(program=6453)
554
+        #str = api.get_streams(660243)
555
+        #res = api.get_videos(802)
556
+        #formats = api.getAllFormats()
557
+        #det = api.detailed("1516")
558
+        #vid = api.getVideos("13170")
559
+        pass

+ 261
- 0
sources/jsinterp.py Прегледај датотеку

@@ -0,0 +1,261 @@
1
+# This code comes from youtube-dl: https://github.com/rg3/youtube-dl/blob/master/youtube_dl/jsinterp.py
2
+
3
+from __future__ import unicode_literals
4
+
5
+import json
6
+import operator
7
+import re
8
+
9
+
10
+_OPERATORS = [
11
+    ('|', operator.or_),
12
+    ('^', operator.xor),
13
+    ('&', operator.and_),
14
+    ('>>', operator.rshift),
15
+    ('<<', operator.lshift),
16
+    ('-', operator.sub),
17
+    ('+', operator.add),
18
+    ('%', operator.mod),
19
+    ('/', operator.truediv),
20
+    ('*', operator.mul),
21
+]
22
+_ASSIGN_OPERATORS = [(op + '=', opfunc) for op, opfunc in _OPERATORS]
23
+_ASSIGN_OPERATORS.append(('=', lambda cur, right: right))
24
+
25
+_NAME_RE = r'[a-zA-Z_$][a-zA-Z_$0-9]*'
26
+
27
+
28
+class JSInterpreter(object):
29
+    def __init__(self, code, objects=None):
30
+        if objects is None:
31
+            objects = {}
32
+        self.code = code
33
+        self._functions = {}
34
+        self._objects = objects
35
+
36
+    def interpret_statement(self, stmt, local_vars, allow_recursion=100):
37
+        if allow_recursion < 0:
38
+            print '[JSInterpreter] Recursion limit reached'
39
+            return None
40
+
41
+        should_abort = False
42
+        stmt = stmt.lstrip()
43
+        stmt_m = re.match(r'var\s', stmt)
44
+        if stmt_m:
45
+            expr = stmt[len(stmt_m.group(0)):]
46
+        else:
47
+            return_m = re.match(r'return(?:\s+|$)', stmt)
48
+            if return_m:
49
+                expr = stmt[len(return_m.group(0)):]
50
+                should_abort = True
51
+            else:
52
+                # Try interpreting it as an expression
53
+                expr = stmt
54
+
55
+        v = self.interpret_expression(expr, local_vars, allow_recursion)
56
+        return v, should_abort
57
+
58
+    def interpret_expression(self, expr, local_vars, allow_recursion):
59
+        expr = expr.strip()
60
+
61
+        if expr == '':  # Empty expression
62
+            return None
63
+
64
+        if expr.startswith('('):
65
+            parens_count = 0
66
+            for m in re.finditer(r'[()]', expr):
67
+                if m.group(0) == '(':
68
+                    parens_count += 1
69
+                else:
70
+                    parens_count -= 1
71
+                    if parens_count == 0:
72
+                        sub_expr = expr[1:m.start()]
73
+                        sub_result = self.interpret_expression(
74
+                            sub_expr, local_vars, allow_recursion)
75
+                        remaining_expr = expr[m.end():].strip()
76
+                        if not remaining_expr:
77
+                            return sub_result
78
+                        else:
79
+                            expr = json.dumps(sub_result) + remaining_expr
80
+                        break
81
+            else:
82
+                print '[JSInterpreter] Premature end of parens in %r' % expr
83
+                return None
84
+
85
+        for op, opfunc in _ASSIGN_OPERATORS:
86
+            m = re.match(r'''(?x)
87
+                (?P<out>%s)(?:\[(?P<index>[^\]]+?)\])?
88
+                \s*%s
89
+                (?P<expr>.*)$''' % (_NAME_RE, re.escape(op)), expr)
90
+            if not m:
91
+                continue
92
+            right_val = self.interpret_expression(
93
+                m.group('expr'), local_vars, allow_recursion - 1)
94
+
95
+            if m.groupdict().get('index'):
96
+                lvar = local_vars[m.group('out')]
97
+                idx = self.interpret_expression(
98
+                    m.group('index'), local_vars, allow_recursion)
99
+                assert isinstance(idx, int)
100
+                cur = lvar[idx]
101
+                val = opfunc(cur, right_val)
102
+                lvar[idx] = val
103
+                return val
104
+            else:
105
+                cur = local_vars.get(m.group('out'))
106
+                val = opfunc(cur, right_val)
107
+                local_vars[m.group('out')] = val
108
+                return val
109
+
110
+        if expr.isdigit():
111
+            return int(expr)
112
+
113
+        var_m = re.match(
114
+            r'(?!if|return|true|false)(?P<name>%s)$' % _NAME_RE,
115
+            expr)
116
+        if var_m:
117
+            return local_vars[var_m.group('name')]
118
+
119
+        try:
120
+            return json.loads(expr)
121
+        except ValueError:
122
+            pass
123
+
124
+        m = re.match(
125
+            r'(?P<var>%s)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$' % _NAME_RE,
126
+            expr)
127
+        if m:
128
+            variable = m.group('var')
129
+            member = m.group('member')
130
+            arg_str = m.group('args')
131
+
132
+            if variable in local_vars:
133
+                obj = local_vars[variable]
134
+            else:
135
+                if variable not in self._objects:
136
+                    self._objects[variable] = self.extract_object(variable)
137
+                obj = self._objects[variable]
138
+
139
+            if arg_str is None:
140
+                # Member access
141
+                if member == 'length':
142
+                    return len(obj)
143
+                return obj[member]
144
+
145
+            assert expr.endswith(')')
146
+            # Function call
147
+            if arg_str == '':
148
+                argvals = tuple()
149
+            else:
150
+                argvals = tuple([
151
+                    self.interpret_expression(v, local_vars, allow_recursion)
152
+                    for v in arg_str.split(',')])
153
+
154
+            if member == 'split':
155
+                assert argvals == ('',)
156
+                return list(obj)
157
+            if member == 'join':
158
+                assert len(argvals) == 1
159
+                return argvals[0].join(obj)
160
+            if member == 'reverse':
161
+                assert len(argvals) == 0
162
+                obj.reverse()
163
+                return obj
164
+            if member == 'slice':
165
+                assert len(argvals) == 1
166
+                return obj[argvals[0]:]
167
+            if member == 'splice':
168
+                assert isinstance(obj, list)
169
+                index, howMany = argvals
170
+                res = []
171
+                for i in range(index, min(index + howMany, len(obj))):
172
+                    res.append(obj.pop(index))
173
+                return res
174
+
175
+            return obj[member](argvals)
176
+
177
+        m = re.match(
178
+            r'(?P<in>%s)\[(?P<idx>.+)\]$' % _NAME_RE, expr)
179
+        if m:
180
+            val = local_vars[m.group('in')]
181
+            idx = self.interpret_expression(
182
+                m.group('idx'), local_vars, allow_recursion - 1)
183
+            return val[idx]
184
+
185
+        for op, opfunc in _OPERATORS:
186
+            m = re.match(r'(?P<x>.+?)%s(?P<y>.+)' % re.escape(op), expr)
187
+            if not m:
188
+                continue
189
+            x, abort = self.interpret_statement(
190
+                m.group('x'), local_vars, allow_recursion - 1)
191
+            if abort:
192
+                print '[JSInterpreter] Premature left-side return of %s in %r' % (op, expr)
193
+                return None
194
+            y, abort = self.interpret_statement(
195
+                m.group('y'), local_vars, allow_recursion - 1)
196
+            if abort:
197
+                print '[JSInterpreter] Premature right-side return of %s in %r' % (op, expr)
198
+                return None
199
+            return opfunc(x, y)
200
+
201
+        m = re.match(
202
+            r'^(?P<func>%s)\((?P<args>[a-zA-Z0-9_$,]+)\)$' % _NAME_RE, expr)
203
+        if m:
204
+            fname = m.group('func')
205
+            argvals = tuple([
206
+                int(v) if v.isdigit() else local_vars[v]
207
+                for v in m.group('args').split(',')])
208
+            if fname not in self._functions:
209
+                self._functions[fname] = self.extract_function(fname)
210
+            return self._functions[fname](argvals)
211
+
212
+        print '[JSInterpreter] Unsupported JS expression %r' % expr
213
+        return None
214
+
215
+    def extract_object(self, objname):
216
+        obj = {}
217
+        obj_m = re.search(
218
+            (r'(?:var\s+)?%s\s*=\s*\{' % re.escape(objname)) +
219
+            r'\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\}(?:,\s*)?)*)' +
220
+            r'\}\s*;',
221
+            self.code)
222
+        fields = obj_m.group('fields')
223
+        # Currently, it only supports function definitions
224
+        fields_m = re.finditer(
225
+            r'(?P<key>[a-zA-Z$0-9]+)\s*:\s*function'
226
+            r'\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}',
227
+            fields)
228
+        for f in fields_m:
229
+            argnames = f.group('args').split(',')
230
+            obj[f.group('key')] = self.build_function(argnames, f.group('code'))
231
+
232
+        return obj
233
+
234
+    def extract_function(self, funcname):
235
+        func_m = re.search(
236
+            r'''(?x)
237
+                (?:function\s+%s|[{;,]%s\s*=\s*function|var\s+%s\s*=\s*function)\s*
238
+                \((?P<args>[^)]*)\)\s*
239
+                \{(?P<code>[^}]+)\}''' % (
240
+                re.escape(funcname), re.escape(funcname), re.escape(funcname)),
241
+            self.code)
242
+        if func_m is None:
243
+            print '[JSInterpreter] Could not find JS function %r' % funcname
244
+            return None
245
+        argnames = func_m.group('args').split(',')
246
+
247
+        return self.build_function(argnames, func_m.group('code'))
248
+
249
+    def call_function(self, funcname, *args):
250
+        f = self.extract_function(funcname)
251
+        return f(args)
252
+
253
+    def build_function(self, argnames, code):
254
+        def resf(args):
255
+            local_vars = dict(zip(argnames, args))
256
+            for stmt in code.split(';'):
257
+                res, abort = self.interpret_statement(stmt, local_vars)
258
+                if abort:
259
+                    break
260
+            return res
261
+        return resf

+ 219
- 0
sources/lmt.py Прегледај датотеку

@@ -0,0 +1,219 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+try:
9
+    import json
10
+except:
11
+    import simplejson as json
12
+import requests
13
+import datetime, re, sys,os
14
+import ConfigParser
15
+import ssl
16
+if "_create_unverified_context" in dir(ssl):
17
+    ssl._create_default_https_context = ssl._create_unverified_context
18
+
19
+from SourceBase import SourceBase
20
+
21
+import resolver
22
+try:
23
+    import util
24
+except:
25
+    sys.path.insert(0,'..')
26
+    import util
27
+from YouTubeVideoUrl import YouTubeVideoUrl
28
+
29
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
30
+import HTMLParser
31
+h = HTMLParser.HTMLParser()
32
+
33
+class Source(SourceBase):
34
+
35
+    def __init__(self, country="",cfg_path=None):
36
+        self.name = "lmt"
37
+        self.title = "LMT straume"
38
+        self.img = "http://www.lob.lv/images/logo/lmt_straume_vert_rgb.png"
39
+        self.desc = "LMT straume - dažādi video latviesu valodā"
40
+        self.headers = headers2dict("""
41
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
42
+Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
43
+Accept-Language: en-US,en;q=0.5
44
+""")
45
+        self.headers2 = headers2dict("""
46
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
47
+X-Requested-With: XMLHttpRequest
48
+Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
49
+""")
50
+        self.url = "http://straume.lmt.lv/lv/"
51
+
52
+    ######### Entry point ########
53
+    def get_content(self, data):
54
+        print "[lmt] get_content:", data
55
+        source, data, path, plist, clist, params, qs = self.parse_data(data)
56
+        content = []
57
+        content.append(("..return", "back","","Return back"))
58
+
59
+        if clist=="home":
60
+            content.extend([
61
+                ("Meklēt", "lmt::meklet?q={0}","","Meklēt"),
62
+                ("Straumes", "lmt::straumes","","Tiešraides un aktuāli video"),
63
+                #("TV", "lmt::tv","","TV tiešraides (tikai LMT tīklā)"),
64
+                ("Jaunākie", "lmt::video/jaunakie?videoPage=1", "", "Visu žanru jaunākie video"),
65
+                ("Sports", "lmt::video/sports?videoPage=1", "", "Sports"),
66
+                ("Kultūra un māksla", "lmt::video/kultura?videoPage=1", "", "Kultūra un māksla"),
67
+                ("Konferences", "lmt::video/konferences?videoPage=1", "", "Konferences"),
68
+                ("Raidījumi", "lmt::video/raidijumi?videoPage=1", "", "Raidījumi"),
69
+                ("Notikumi", "lmt::video/notikumi?videoPage=1", "", "Notikumi"),
70
+                ("Filmas un seriāli", "lmt::video/filmas?videoPage=1", "", "Filmas un seriāli"),
71
+                ("Dažādi video", "lmt::video/dazadi?videoPage=1", "", "Dažādi video"),
72
+                ("Viedtelevīzija", "lmt::video/viedtelevizija?videoPage=1", "", "Viedtelevīzija"),
73
+            ])
74
+            return content
75
+
76
+        elif clist in ("meklet","video", "straumes","video-saraksts"):
77
+            r=self.call(data)
78
+            result = re.findall('<a class="video-picture" (.+?)</li>', r, re.IGNORECASE | re.MULTILINE)
79
+            for r2 in result:
80
+                m = re.search('<a class="video-title" href="/lv/([^"]+)">([^<]+)<', r2)
81
+                title = m.group(2)
82
+                data2 = m.group(1)
83
+                m = re.search("([^ ]+) 2x", r2)
84
+                if m:
85
+                    img = m.group(1)
86
+                else:
87
+                    m = re.search('<img src="([^"]+)', r2)
88
+                    img = m.group(1) if m else ""
89
+                m = re.search('<span class="playlist-overlay">([^<]+)</span>', r2)
90
+                overlay = m.group(1) if m else ""
91
+                m = re.search('<span class="badge badge-[^>]+>([^<]+)(<[^>]+>([^<]+))*</span>', r2, re.IGNORECASE)
92
+                badge = ""
93
+                if m:
94
+                    badge = m.group(1)
95
+                    if m.group(3):
96
+                        badge = badge + m.group(3)
97
+                categories = re.findall('<span class="category-title">([^<]+)</span>', r2)
98
+                categories = "".join(categories)
99
+                if overlay:
100
+                    title = "%s [%s]"%(title,overlay)
101
+                if badge:
102
+                    title = "%s [%s]"%(title,badge)
103
+                desc = title
104
+                if categories:
105
+                    desc = desc + "\n"+ categories
106
+                content.append((title,self.name+"::"+data2,img,desc))
107
+            m = re.search("videoPage=(\d+)",data)
108
+            if m:
109
+                page = int(m.group(1))+1
110
+                data2 = re.sub(r"videoPage=\d+", r"videoPage=%s"%page, data)
111
+                content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
112
+            #print content
113
+            return content
114
+
115
+
116
+        ### kaut kas neparedzets ###
117
+        else:
118
+            return content
119
+
120
+    def is_video(self,data):
121
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
122
+        if not clist in ("meklet","video", "straumes","video-saraksts","home"):
123
+            return True
124
+
125
+    def call(self, data,params=None,headers=None,lang=""):
126
+        if not headers: headers = self.headers
127
+        url = self.url+data
128
+        r = requests.get(url,headers = headers)
129
+        return r.content
130
+        #result = self._http_request(url,params,headers=headers)
131
+        return result
132
+
133
+    def get_streams(self,data):
134
+        print "[lmt] get_streams:", data
135
+        if not self.is_video(data):
136
+            return []
137
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
138
+        r = self.call(path)
139
+        title = re.search("<h1>(.+?)</h1", r, re.IGNORECASE).group(1)
140
+        m = re.search('<a class="category-title".+?[^>]+>([^<]+)<', r, re.IGNORECASE | re.DOTALL)
141
+        categories = m.group(1) if m else ""
142
+        m = re.search('<span class="category-title">([^<]+)</span>.+?</p>', r, re.IGNORECASE | re.DOTALL)
143
+        if m:
144
+            categories = categories + m.group(1)
145
+        if categories:
146
+            tite = "%s [%s]"%(title,categories)
147
+        img = re.search('<meta property="twitter:image" content="([^"]+)">', r, re.IGNORECASE | re.DOTALL).group(1)
148
+        desc = title + "\n" + re.search('<meta property="og:description" content="([^"]+)">', r, re.IGNORECASE | re.DOTALL).group(1)
149
+        m = re.search('file: "([^"]+)"', r, re.IGNORECASE)
150
+        if m:
151
+            data2 = m.group(1)
152
+            stream = util.item()
153
+            stream["name"] = title
154
+            stream["url"] = data2
155
+            stream["img"] = img
156
+            stream["desc"] = desc
157
+            stream["resolver"] = "lmt"
158
+            return [stream]
159
+        elif re.search('src="http*://www.youtube.com/embed/([\w-]+).*"',r):
160
+            m = re.search('src="http*://www.youtube.com/embed/([\w-]+).*"',r)
161
+            video_id = m.group(1)
162
+            #http://www.youtube.com/embed/RUyQ_JJ6A84?rel=0&fs=1&wmode=transparent
163
+            data2 = YouTubeVideoUrl().extract(video_id)
164
+            s = util.item()
165
+            s["name"] = title
166
+            s["url"] = data2
167
+            s["desc"] = desc
168
+            s["img"] = img
169
+            s["resolver"] = "lmt"
170
+            return [s]
171
+        elif 'src="http://cdn.tiesraides.lv/lmtstraume.lv/' in r:
172
+            m = re.search('src="(http://cdn\.tiesraides\.lv/[^"]+)"',r)
173
+            url = m.group(1)
174
+            # src="http://cdn.tiesraides.lv/lmtstraume.lv/live-record2-ip/40?c=614127284dcd58d8a84afcf498a3ac7a&v=1405"
175
+            r = self._http_request(url)
176
+            #http://edge-telia2.tiesraides.lv/live-record2/lmtstraume.lv.40_1/manifest.f4m
177
+            m = re.search("'(http://.+?\.m3u8)'",r)
178
+            data2 = m.group(1) if m else ""
179
+            s = util.item()
180
+            s["name"] = title
181
+            s["url"] = data2
182
+            s["desc"] = desc
183
+            s["img"] = img
184
+            s["resolver"] = "lmt"
185
+            return [s]
186
+        else:
187
+            raise Exception("No stream found")
188
+
189
+
190
+                # streams = resolver.resolve(url)
191
+            # for s in streams:
192
+            #     s["name"] = title
193
+            #     s["desc"] = desc
194
+            #     s["img"] = img
195
+            #     streams.append(s)
196
+            # return streams
197
+
198
+
199
+if __name__ == "__main__":
200
+    country= "lv"
201
+    c = Source(country)
202
+    if len(sys.argv)>1:
203
+        data= sys.argv[1]
204
+    else:
205
+        data = "home"
206
+    content = c.get_content(data)
207
+    for item in content:
208
+        print item
209
+    #cat = api.get_categories(country)
210
+    #chan = api.get_channels("lv")
211
+    #prog = api.get_programs(channel=6400)
212
+    #prog = api.get_programs(category=55)
213
+    #seas = api.get_seasons(program=6453)
214
+    #str = api.get_streams(660243)
215
+    #res = api.get_videos(802)
216
+    #formats = api.getAllFormats()
217
+    #det = api.detailed("1516")
218
+    #vid = api.getVideos("13170")
219
+    pass

+ 1088
- 0
sources/ltc.py
Разлика између датотеке није приказан због своје велике величине
Прегледај датотеку


+ 235
- 0
sources/movieplace.py Прегледај датотеку

@@ -0,0 +1,235 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+try:
9
+    import json
10
+except:
11
+    import simplejson as json
12
+import urllib2, urllib
13
+import datetime, re, sys,os
14
+import ConfigParser
15
+from collections import OrderedDict
16
+from SourceBase import SourceBase
17
+import ssl
18
+if "_create_unverified_context" in dir(ssl):
19
+    ssl._create_default_https_context = ssl._create_unverified_context
20
+
21
+import resolver
22
+try:
23
+    import util
24
+except:
25
+    sys.path.insert(0,'..')
26
+    import util
27
+
28
+
29
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
30
+import HTMLParser
31
+h = HTMLParser.HTMLParser()
32
+
33
+class Source(SourceBase):
34
+
35
+    def __init__(self, country="",cfg_path=None):
36
+        self.hidden = True # nerāda menu nestrādājošos avotus
37
+        self.name = "movieplace"
38
+        self.title = "MoviePlace.lv"
39
+        self.img = "http://movieplace.lv/images/logo.png"
40
+        self.desc = "Movieplace.lv - filmas latviesu valodā"
41
+        self.headers = headers2dict("""
42
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
43
+Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
44
+Accept-Language: en-US,en;q=0.5
45
+""")
46
+        self.headers2 = headers2dict("""
47
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
48
+X-Requested-With: XMLHttpRequest
49
+Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
50
+""")
51
+        self.url = "http://movieplace.lv/"
52
+
53
+
54
+    ######### Entry point ########
55
+    def get_content(self, data):
56
+        print "[movieplace] get_content:", data
57
+        source, data, path, plist, clist, params, qs = self.parse_data(data)
58
+        content = []
59
+        content.append(("..return", "back","","Return back"))
60
+
61
+        if clist=="home":
62
+            content.extend([
63
+                ("Meklēt", "movieplace::search/?q={0}","","Meklēt"),
64
+                ("Jaunākās filmas", "movieplace::load/?page1","","Visu žanru jaunākās filmas"),
65
+                ("Jaunākie seriāli", "movieplace::dir/?page1","","Visu žanru jaunākās filmas"),
66
+                #("Top50 filmas", "movieplace::index/top_50_filmas/0-4","","Top 50 filmas"),
67
+            ])
68
+            r = self.call("load/")
69
+            #i = r.find('<div class="cat-title">Meklēt pēc žanriem</div>')
70
+            #if i<=0:
71
+            #    return content
72
+            i = 0
73
+            for item in re.findall('<a href="/([^"]+)" class="catName">([^>]+)</a>', r[i:]):
74
+                title = item[1]
75
+                data2 = item[0]+"-1"
76
+                img = self.img
77
+                desc = title
78
+                content.append((title,self.name+"::"+data2,img,desc))
79
+            return content
80
+
81
+        elif clist=="search":
82
+            # TODO
83
+            r=self.call(data)
84
+            result = re.findall('<a href="([^"]+)"> (.+?) </a></div>.+?> (.+?)</div>', r, re.DOTALL)
85
+            for item in result:
86
+                title = item[1].replace("<b>","").replace("</b>","")
87
+                data2 = item[0].replace("http://movieplace.lv/","")
88
+                img = self.img
89
+                desc = item[2].replace("<b>","").replace("</b>","")
90
+                content.append((title,self.name+"::"+data2,img,desc))
91
+            if '<span>&raquo;</span>' in r:
92
+                m = re.search("p=(\d+)",data)
93
+                if m:
94
+                    page = int(m.group(1))+1
95
+                    data2 = re.sub(r"p=\d+", r"p=%s"%page, data)
96
+                    content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
97
+            return content
98
+
99
+        # Filmu saraksti ##
100
+        elif clist in ["load","dir"] and len(plist)<=3:
101
+            if clist == "jaunakas":
102
+                r = self.call("")
103
+            else:
104
+                r = self.call(data)
105
+            #r = r.decode("cp1251").encode("utf8")
106
+            if clist == "load":
107
+                result = re.findall(r' <a href="/([^"]+)" alt="([^"]+)"><img src="/([^"]+)" title="([^"]+)">.+?<div class="years">([^<]+)</div>\s+<div class="country">([^<]+)</div>', r, re.DOTALL)
108
+            else:
109
+                result = re.findall(r' <a href="/([^"]+)" alt="([^"]+)"><img src="/([^"]+)" title="[^"]+">.+?<span>([^<]+)</span>\s*<div class="country">([^<]+)</div>', r, re.IGNORECASE | re.DOTALL)
110
+            for item in result:
111
+                title = item[1]+" [%s]"%item[4] if clist=="load" else item[1]+" / %s [%s]"%(item[3],item[4])
112
+                img = "http://movieplace.lv/"+item[2]
113
+                data2 = item[0]
114
+                desc = "%s\n%s"%(title,item[5]) if clist=="load" else title
115
+                content.append((title,self.name+"::"+data2,img,desc))
116
+            m = re.search('<[ab] class="swchItemA*1"( href="/([^"]+)" onclick="[^"]+")*><span>([^<]+)</span></[ab]> </span>', r, re.DOTALL)
117
+            if m:
118
+                if m.group(1):
119
+                    page = int(re.search("\d+$",data).group())
120
+                    page = page+1
121
+                    data2 = re.sub("\d$","%s"%page,data)
122
+                    content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
123
+            return content
124
+
125
+        ### Seriāls ###
126
+        elif clist=="dir" and len(plist)==4:
127
+            r = self.call(path)
128
+            title0 = re.search('<h2 class="title" itemprop="name">(.+?)</h2>', r, re.DOTALL).group(1)
129
+            m = re.search(r'<span>VALODA:</span> <b><em itemprop="alternativeHeadline"><a href="[^"]*" class="entAllCats">([^<]+)</a></em></b></div>\s+?<div><span>SEZONA:</span> <b>([^<]+)</b></div>', r, re.IGNORECASE | re.DOTALL)
130
+            if m:
131
+                title0 = "%s / Season %s [%s]"%(title0,m.group(2),m.group(1))
132
+            desc0 = title0
133
+            img0 = "http://movieplace.lv" + re.search('<img src="(.+?)".+?itemprop="image">', r, re.DOTALL).group(1)
134
+            #TODO
135
+            result = re.findall(r'<summary>([^<]+)</summary><iframe src="https://openload\.co/embed/[^/]+/"', r, re.DOTALL)
136
+            i = 1
137
+            for item in result:
138
+                title = title0+" - " + item
139
+                data2 = data+"?e=%s"%i
140
+                img = img0
141
+                desc = desc0
142
+                content.append((title,self.name+"::"+data2,img,desc))
143
+                i += 1
144
+            return content
145
+
146
+        ### kaut kas neparedzets ###
147
+        else:
148
+            return content
149
+
150
+    def is_video(self,data):
151
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
152
+        if clist=="dir" and len(plist) == 4 and "e"in qs: # sērija
153
+            return True
154
+        elif clist=="load" and len(plist) == 4:
155
+            return True
156
+        else:
157
+            return False
158
+
159
+    def call(self, data,params=None,headers=None,lang=""):
160
+        if not headers: headers = self.headers
161
+        url = self.url+data
162
+        result = self._http_request(url,params,headers=headers)
163
+        return result
164
+
165
+    def get_streams(self,data):
166
+        print "[movieplace] get_streams:", data
167
+        if not self.is_video(data):
168
+            return []
169
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
170
+        r = self.call(path)
171
+        if clist=="load":
172
+            m = re.search('<h2 class="title" itemprop="name">([^<]+)</h2>', r, re.DOTALL)
173
+            title = re.search('<itemprop="name">(.+?)</itemprop="name">', r, re.DOTALL).group(1)
174
+            m = re.search(r'<div role="tabpanel" class="tab-pane fade in active" id="heading-tab4">\s*(.+?)\s*</div>', r, re.DOTALL)
175
+            desc = m.group(1) if m else title
176
+            m = re.search('<meta property="og:image" content="([^"]+)" />', r, re.DOTALL)
177
+            img = m.group(1) if m else ""
178
+            rr = []
179
+            for m in re.finditer("(RU|ENG|LAT|LAT SUB)<BR( /)*>.*?>?<BR( /)*>.*?<iframe", r, re.IGNORECASE | re.DOTALL):
180
+                if len(rr)>0:
181
+                    rr[-1]["end"] = m.start()
182
+                rr.append({"lang":m.group(1),"start":m.start(),"end":len(r)})
183
+            streams = []
184
+            for m in re.finditer(r'src="(https*://(goo\.gl|songs2dl|kodik|cdn\.kapnob|hqq|openload|sv1.servkino|vidwatch|online\.kinozz).+?)"', r, re.IGNORECASE | re.DOTALL):
185
+                url = m.group(1)
186
+                lang = "?"
187
+                for rrr in rr:
188
+                    if m.start()>rrr["start"] and m.start()<rrr["end"]:
189
+                        lang = rrr["lang"]
190
+                        break
191
+                for s in resolver.resolve(url):
192
+                    s["name"] = title
193
+                    s["desc"] = desc
194
+                    s["img"] = img
195
+                    s["type"] = self.stream_type(s["url"])
196
+                    s["lang"] = lang
197
+                    streams.append(s)
198
+            return streams
199
+
200
+
201
+        elif clist=="dir" and "e" in qs: # serialā sērija
202
+            #TODO
203
+            result = re.findall(r'<summary>([^<]+)</summary><iframe src="([^"]+)"', r, re.DOTALL)
204
+            i = int(qs["s"])-1
205
+            url0 = result[i][1]
206
+            title = title + " - " + result[i][0]
207
+        else:
208
+            #iframe src="https://openload.co/embed/wlw6Vl9zwL0/"
209
+            result = re.findall(r'<iframe src="([^"]+)"', r, re.DOTALL)
210
+            if not result:
211
+                return []
212
+            url0 = result[0]
213
+        return streams
214
+
215
+if __name__ == "__main__":
216
+    country= "lv"
217
+    c = Source(country)
218
+    if len(sys.argv)>1:
219
+        data= sys.argv[1]
220
+    else:
221
+        data = "home"
222
+    content = c.get_content(data)
223
+    for item in content:
224
+        print item
225
+    #cat = api.get_categories(country)
226
+    #chan = api.get_channels("lv")
227
+    #prog = api.get_programs(channel=6400)
228
+    #prog = api.get_programs(category=55)
229
+    #seas = api.get_seasons(program=6453)
230
+    #str = api.get_streams(660243)
231
+    #res = api.get_videos(802)
232
+    #formats = api.getAllFormats()
233
+    #det = api.detailed("1516")
234
+    #vid = api.getVideos("13170")
235
+    pass

+ 283
- 0
sources/mtgplay.py Прегледај датотеку

@@ -0,0 +1,283 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+
9
+
10
+try:
11
+    import json
12
+except:
13
+    import simplejson as json
14
+#!/usr/bin/env python
15
+# coding=utf8
16
+import urllib2, urllib
17
+import datetime, re, sys
18
+import ssl
19
+if "_create_unverified_context" in dir(ssl):
20
+    ssl._create_default_https_context = ssl._create_unverified_context
21
+
22
+from SourceBase import SourceBase
23
+
24
+API_URL = 'http://playapi.mtgx.tv/v3/'
25
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
26
+headers0 = headers2dict("""
27
+User-Agent: Mozilla/5.0 (Linux; U; Android 4.4.4; Nexus 5 Build/KTU84P) AppleWebkit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30
28
+""")
29
+
30
+REGIONS = [
31
+    ("Latvia",None,"lv",""),
32
+    ("Estonia",None,"ee",""),
33
+    ("Lituania",None,"lt",""),
34
+    ("Sweden",None,"se",""),
35
+    ("Denmark",None,"dk",""),
36
+    ("Norway",None,"no",""),
37
+    ("Bulgaria",None,"bg","")
38
+]
39
+
40
+
41
+class Source(SourceBase):
42
+
43
+    def __init__(self,country="lv",cfg_path=None):
44
+        self.name = "mtgplay"
45
+        self.title = "Skaties.lv (TV3)"
46
+        self.img = "http://skaties.lv/touch-icon-192x192.png"
47
+        self.desc = "MTG skaties.lv satura skatīšanās (LNT,TV3, TV6 u.c.)"
48
+
49
+        self.country=country
50
+        self.pic_size = "327x250" #"1000x765"
51
+
52
+    def get_content(self, data):
53
+        print "[mtgplay] get_content:", data
54
+        if "::" in data:
55
+            data = data.split("::")[1]
56
+        if "/" in data:
57
+            citem,cid = data.split("/")
58
+            clist = ""
59
+        else:
60
+            clist = data.split("?")[0]
61
+            qs = dict(map(lambda x:x.split("="),re.findall("\w+=\w+",data)))
62
+            citem,cid = ("","")
63
+            self.country = qs["country"] if "country" in qs else "lv"
64
+
65
+        content=[]
66
+        content.append(("..return", "back","","Return back"))
67
+
68
+        if clist=="home":
69
+            content.extend([
70
+                #("Search", "mtgplay::meklet?country=%s&term={0}"%self.country,"","Search videos"), ### TODO
71
+                ("TV Live", "mtgplay::videos?country=%s&order=title&type=live"%self.country,"","TV live streams(not always available)"),
72
+                ("Last videos", "mtgplay::videos?country=%s&order=-airdate"%self.country,"","Last aired videos"),
73
+                ("Categories", "mtgplay::categories?country=%s&order=name"%self.country,"","Categories"),
74
+                ("Channels", "mtgplay::channels?country=%s&order=id"%self.country,"","TV channels"),
75
+                ("Programs by name", "mtgplay::formats?country=%s&order=-title"%self.country,"","Programs by name"),
76
+                ("Programs by popularity", "mtgplay::formats?country=%s&order=-popularity"%self.country,"","Programs by popularity")
77
+            ])
78
+            return content
79
+
80
+        r = self.call(data)
81
+        if not r:
82
+            content.append(("Error", "","","Error reading '%s'"%data))
83
+            return content
84
+
85
+        if clist:
86
+            if r["_links"].has_key("prev"):
87
+                data2 = r["_links"]["prev"]["href"].replace(API_URL,"")
88
+                content.append(("Previous page", self.name+"::"+data2.encode("utf8"),"", "Goto previous page"))
89
+
90
+            if "_embedded" in r:
91
+                for item in r["_embedded"][clist]:
92
+                    if "title" in item:
93
+                        title = item["title"]
94
+                    elif "name" in item:
95
+                        title = item["name"]
96
+                    #data2 = self.name+"::"+"%s/%s"%(clist,item["id"])
97
+                    img = item["_links"]["image"]["href"].replace("{size}",self.pic_size) if "image" in item["_links"] else ""
98
+                    desc = item["summary"] if "summary" in item and item["summary"] else ""
99
+
100
+                    ### Video ###
101
+                    if clist=="videos":
102
+                        data2 = "videos/%s"%item["id"]
103
+                        summary = item["summary"] if item["summary"] else ""
104
+                        air_at = item["broadcasts"][0]["air_at"] if "broadcasts" in item and len(item["broadcasts"])>0 and "air_at" in item["broadcasts"][0] else ""
105
+                        if not air_at:
106
+                            air_at = item["publish_at"] if "publish_at" in item else ""
107
+                        air_at = air_at[0:16].replace("T"," ") if air_at else ""
108
+                        try: playable_to = item["broadcasts"][0]["playable_to"]
109
+                        except: playable_to =""
110
+                        playable_to = "(till "+playable_to[0:10].replace("T"," ")+")" if playable_to else ""
111
+                        duration = item["duration"] if "duration" in item else ""
112
+                        duration = str(datetime.timedelta(seconds=int(duration))) if duration else ""
113
+                        try:
114
+                            views = item["views"]["total"] if "views" in item and "total" in item["views"] else ""
115
+                            views = views+" views"
116
+                        except: views = ""
117
+                        desc = "Aired: %s %s\nDuration: %s %s\n\n%s"%(air_at, playable_to,duration,views,summary)
118
+
119
+                    ### Categories ###
120
+                    elif clist == "categories":
121
+                        #data2 = item["_links"]["formats"]["href"].replace(API_URL,"")
122
+                        data2 = "formats?category=%s"%item["id"]
123
+                        if "country" in qs: data2 += "&country="+qs["country"]
124
+                        if "category" in qs: data2 += "&category="+qs["category"]
125
+                        if "channel" in qs: data2 += "&channel="+qs["channel"]
126
+                        data2 += "&order=title"
127
+
128
+                    ### Channels ###
129
+                    elif clist == "channels":
130
+                        #data2 = item["_links"]["categories"]["href"].replace(API_URL,"")
131
+                        data2 = "categories?channel=%s"%item["id"]
132
+                        if "country" in qs: data2 += "&country="+qs["country"]
133
+                        if "category" in qs: data2 += "&category="+qs["category"]
134
+                        if "channel" in qs: data2 += "&channel="+qs["channel"]
135
+                        data2 += "&order=name"
136
+
137
+                    ### Formats (programs) ###
138
+                    elif clist == "formats":
139
+                        #data2 = item["_links"]["videos"]["href"].replace(API_URL,"")
140
+                        data2 = "seasons?format=%s"%item["id"]
141
+                        #if "country" in qs: data2 += "&country="+qs["country"]
142
+                        #if "category" in qs: data2 += "&category="+qs["category"]
143
+                        #if "channel" in qs: data2 += "&channel="+qs["channel"]
144
+                        data2 += "&order=title"
145
+                        air_at = item["latest_video"]["publish_at"] if "publish_at" in item["latest_video"] else ""
146
+                        air_at = air_at[0:16].replace("T"," ") if air_at else ""
147
+                        if air_at:
148
+                            desc = "Last video: %s\n"%air_at + desc
149
+
150
+                    ### Seasons ###
151
+                    elif clist == "seasons":
152
+                        #data2 = item["_links"]["videos"]["href"].replace(API_URL,"")
153
+                        data2 = "videos?season=%s"%item["id"]
154
+                        #if "country" in qs: data2 += "&country="+qs["country"]
155
+                        #if "category" in qs: data2 += "&category="+qs["category"]
156
+                        #if "channel" in qs: data2 += "&channel="+qs["channel"]
157
+                        data2 += "&order=title"
158
+
159
+                        summary = item["summary"] if "summary" in item and item["summary"] else ""
160
+                        try:
161
+                            latest_video = item["latest_video"]["publish_at"]
162
+                            latest_video = latest_video[0:16].replace("T"," ")
163
+                        except: latest_video = ""
164
+                        desc = ("%s\nLatest video: %s"%(summary,latest_video))
165
+
166
+                    content.append((title.encode("utf8"),self.name+"::"+data2.encode("utf8"),img.encode("utf8"),desc.encode("utf8")))
167
+
168
+            if r["_links"].has_key("next"):
169
+                data2 = r["_links"]["next"]["href"].replace(API_URL,"").encode("utf8")
170
+                content.append(("Next page", self.name+"::"+data2.encode("utf8"),"","Goto next page"))
171
+
172
+        elif citem:
173
+            item = r
174
+            if "title" in item:
175
+                title = item["title"]
176
+            elif "name" in item:
177
+                title = r["name"]
178
+            #data2 = self.name+"::"+"%s/%s"%(clist,item["id"])
179
+            img = item["_links"]["image"]["href"].replace("{size}",self.pic_size) if "image" in item["_links"] else ""
180
+            desc = item["summary"] if "summary" in item and item["summary"] else ""
181
+
182
+            dd = "videos/stream/%s"%cid
183
+            r2 = self.call(dd)
184
+            if "streams" in r2 and "hls" in r2["streams"]:
185
+                data2 = r2["streams"]["hls"]
186
+                content = (title.encode("utf8"),data2.encode("utf8"),img.encode("utf8"),desc.encode("utf8"))
187
+            elif "msg" in r2:
188
+                content = (r2["msg"].encode("utf8"),"","","")
189
+            else:
190
+                content = ("Error getting stream","","","")
191
+
192
+        else:
193
+            pass
194
+        return content
195
+
196
+    def is_video(self,data):
197
+        if "::" in data:
198
+            data = data.split("::")[1]
199
+        cmd = data.split("/")
200
+        if cmd[0]=="videos":
201
+            return True
202
+        else:
203
+            return False
204
+
205
+    def get_stream(self,id):
206
+        dd = "videos/stream/%s"%id
207
+        r2 = self.call(dd)
208
+        if "streams" in r2 and "hls" in r2["streams"]:
209
+            data2 = r2["streams"]["hls"]
210
+        else:
211
+            data2 = ""
212
+        return data2.encode("utf8")
213
+
214
+    def call_all(self, endpoint, params = None):
215
+        url = API_URL % (endpoint)
216
+        if params:
217
+            url += '?' + params
218
+        print "[TVPlay Api] url: ",url
219
+        result = []
220
+        while True:
221
+            content = self._http_request(url)
222
+            if content:
223
+                try:
224
+                    content = json.loads(content)
225
+                except Exception, ex:
226
+                    return {" Error " : "in call_api: %s" % ex}
227
+            else: break
228
+            if content.has_key("_embedded") and content["_embedded"].has_key(endpoint):
229
+                result.extend(content["_embedded"][endpoint])
230
+                pass
231
+            else: break
232
+            if content.has_key("_links") and content["_links"].has_key("next"):
233
+                url = content["_links"]["next"]["href"]
234
+            else: break
235
+        return result
236
+
237
+    def call(self, data,headers=headers0):
238
+        url = API_URL + data
239
+        #print "[TVPlay Api] url: ",url
240
+        result = []
241
+        content = self._http_request(url)
242
+        if content:
243
+            try:
244
+                result = json.loads(content)
245
+            except Exception, ex:
246
+                return None
247
+        return result
248
+
249
+    def _http_request0(self, url,headers=headers0):
250
+        try:
251
+            r = urllib2.Request(url, headers=headers)
252
+            u = urllib2.urlopen(r)
253
+            content = u.read()
254
+            u.close()
255
+            return content
256
+        except Exception as ex:
257
+            if "read" in ex:
258
+                content = ex.read()
259
+            else:
260
+                content = None
261
+            return content
262
+
263
+if __name__ == "__main__":
264
+    country= "lv"
265
+    c = Source(country)
266
+    if len(sys.argv)>1:
267
+        data= sys.argv[1]
268
+    else:
269
+        data = "home"
270
+    content = c.get_content(data)
271
+    for item in content:
272
+        print item
273
+    #cat = api.get_categories(country)
274
+    #chan = api.get_channels("lv")
275
+    #prog = api.get_programs(channel=6400)
276
+    #prog = api.get_programs(category=55)
277
+    #seas = api.get_seasons(program=6453)
278
+    #str = api.get_streams(660243)
279
+    #res = api.get_videos(802)
280
+    #formats = api.getAllFormats()
281
+    #det = api.detailed("1516")
282
+    #vid = api.getVideos("13170")
283
+    pass

+ 216
- 0
sources/play24.py Прегледај датотеку

@@ -0,0 +1,216 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+try:
9
+    import json
10
+except:
11
+    import simplejson as json
12
+
13
+import urllib2, urllib
14
+import datetime, re, sys
15
+import ssl
16
+if "_create_unverified_context" in dir(ssl):
17
+    ssl._create_default_https_context = ssl._create_unverified_context
18
+
19
+from SourceBase import SourceBase
20
+
21
+API_URL = 'http://replay.lsm.lv/'
22
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
23
+headers0 = headers2dict("""
24
+User-Agent: Mozilla/5.0 (Linux; U; Android 4.4.4; Nexus 5 Build/KTU84P) AppleWebkit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30
25
+""")
26
+import HTMLParser
27
+h = HTMLParser.HTMLParser()
28
+
29
+class Source(SourceBase):
30
+
31
+    def __init__(self,country="lv",cfg_path=None):
32
+        self.name = "play24"
33
+        self.title = "Play24.lv"
34
+        self.img = "http://play24.lv/images/play24-logo-black.png"
35
+        self.desc = "play24.lv (Riga24TV) satura skatīšanās"
36
+
37
+        self.country=country
38
+
39
+    def get_content(self, data):
40
+        print "[play24] get_content:", data
41
+        if "::" in data:
42
+            data = data.split("::")[1]
43
+        path = data.split("?")[0]
44
+        clist = path.split("/")[0]
45
+        params = data[data.find("?"):] if "?" in data else ""
46
+        qs = dict(map(lambda x:x.split("="),re.findall("\w+=\w+",params)))
47
+        lang = qs["lang"] if "lang" in qs else self.country
48
+
49
+        content=[]
50
+        content.append(("..return", "back","","Return back"))
51
+
52
+        if clist=="home":
53
+            content.extend([
54
+                ("Live stream", "play24::tiesraide","","TV live streams"),
55
+                ("Last videos", "play24::jaunakie","","Last videos"),
56
+                ("Categories", "play24::kategorijas","","Categories"),
57
+                ("Programs", "play24::raidijumi","","Programs"),
58
+             ])
59
+            return content
60
+
61
+        ### Jaunākie video ###
62
+        elif clist=="jaunakie":
63
+            url = "http://play24.lv/"
64
+            r = self._http_request(url)
65
+            for item in re.findall(' <div class="top-article__image">.*?<a class="top-article__image-link" href="([^"]+)">.*?<img.+?src="([^"]+)".+?alt="([^"]+)" />.+?</picture>', r, re.DOTALL):
66
+                title = item[2]
67
+                title =  h.unescape(title.decode("utf8")).encode("utf8")
68
+                img = item[1]
69
+                data2 = item[0].replace("http://play24.lv/","")
70
+                desc = title
71
+                content.append((title,self.name+"::"+data2,img,desc))
72
+            return content
73
+
74
+        ### Kategorijas ###
75
+        elif clist=="kategorijas":
76
+            url = "http://play24.lv/"
77
+            r = self._http_request(url)
78
+            r2 = r[r.find('<div class="footer-navigation">'):]
79
+            for item in re.findall('<a href="http://play24.lv/(kategorija/[^"]+)" class="navigation__link">([^<]+)</a>', r2, re.DOTALL):
80
+                title = item[1]
81
+                data2 = item[0]
82
+                img = ""
83
+                desc = title
84
+                content.append((title,self.name+"::"+data2,img,desc))
85
+            return content
86
+
87
+        elif clist=="kategorija":
88
+            url = "http://play24.lv/"+data
89
+            r = self._http_request(url)
90
+            for article in re.findall(r"<article\b[^>]*>(.+?)</article>", r, re.DOTALL):
91
+                m = re.search('<a class="masonry-item__link" href="http://play24\.lv/([^"]+)">', article, re.DOTALL)
92
+                data2 = m.group(1) if m else ""
93
+                m = re.search('<img src="([^"]+)" alt="([^"]+)" />', article, re.DOTALL)
94
+                if m:
95
+                    img = m.group(1)
96
+                    title = m.group(2)
97
+                    title =  h.unescape(title.decode("utf8")).encode("utf8")
98
+                else:
99
+                    img = ""
100
+                    title = ""
101
+                m = re.search(r'<span class="masonry-item__tags">\s+<a href="([^"]+)">([^<]+)</a>.*?</span>', article, re.DOTALL)
102
+                progr = m.group(2) if m else ""
103
+                m = re.search('<span class="masonry-item__date">([^<]+)</span>', article, re.DOTALL)
104
+                date = m.group(1).strip() if m else ""
105
+
106
+                if date:
107
+                    title = title + " (%s %s)"%(date,progr)
108
+                desc = title + "\n%s - %s"%(progr,date)
109
+                content.append((title,self.name+"::"+data2,img,desc))
110
+            m = re.search(r'<li><a href="http://play24\.lv/([^"]+)" rel="next">&raquo;</a></li>', r, re.DOTALL)
111
+            if m:
112
+                data2 = m.group(1)
113
+                content.append(("Next page",self.name+"::"+data2,"","Next page"))
114
+            return content
115
+
116
+        ### Raidijumi (programmas)
117
+        elif clist=="raidijumi":
118
+            url = "http://play24.lv/"
119
+            r = self._http_request(url)
120
+            for item in re.findall(r'<li class="tag-box__item">.*?<a href="http://play24\.lv/(birka/[^"]+)">([^<]+)</a>.*?</li>', r, re.DOTALL):
121
+                title = item[1]
122
+                title =  h.unescape(title.decode("utf8")).encode("utf8")
123
+                data2 = item[0]
124
+                img = ""
125
+                desc = title
126
+                content.append((title,self.name+"::"+data2,img,desc))
127
+            return content
128
+
129
+        ### Programmas (video saraksts)
130
+        elif clist=="birka":
131
+            url = "http://play24.lv/"+data
132
+            r = self._http_request(url)
133
+            for item in re.findall(r'<article\b[^>]*>.+?<a class="masonry-item__link" href="http://play24.lv/([^"]+)">.*?<img src="([^"]+)" alt="([^"]+)" />.*?<span class="masonry-item__tags">.+?<a href="([^"]+)">([^<]+)</a>.*?<span class="masonry-item__date">([^<]+)</span>.*?</article>', r, re.DOTALL):
134
+                title = item[2]
135
+                title =  h.unescape(title.decode("utf8")).encode("utf8")
136
+                title = title + " (%s)"%item[5].strip()
137
+                img = item[1]
138
+                data2 = item[0]
139
+                desc = title + "\n%s - %s"%(item[4],item[5].strip())
140
+                content.append((title,self.name+"::"+data2,img,desc))
141
+            m = re.search(r'<li><a href="http://play24\.lv/([^"]+)" rel="next">&raquo;</a></li>', r, re.DOTALL)
142
+            if m:
143
+                data2 = m.group(1)
144
+                content.append(("Next page",self.name+"::"+data2,"","Next page"))
145
+            return content
146
+
147
+        elif clist == "video" or clist == "tiesraide":
148
+            if clist == "video":
149
+                url = "http://play24.lv/"+data
150
+                r = self._http_request(url)
151
+                # var ov_video_id = '59422';
152
+                m = re.search(r"var ov_video_id = '(\d+)';", r, re.DOTALL)
153
+                if m:
154
+                    id = m.group(1)
155
+                else:
156
+                    return ("No stream found %s"%data,"","","No stream found")
157
+                m = re.search('<meta name="description" content="([^"]+)" />', r, re.DOTALL)
158
+                desc = m.group(1) if m else ""
159
+                desc = h.unescape(desc.decode("utf8")).encode("utf8")
160
+
161
+                url = "http://player.tvnet.lv/v/%s"%id
162
+            else:
163
+                url = "http://player.tvnet.lv/l/11"
164
+                desc = ""
165
+            r = self._http_request(url)
166
+            m = re.search('<h1 class="static title">.+?<a href="[^"]+">([^<]+)</a>', r, re.DOTALL)
167
+            title = m.group(1) if m else ""
168
+            s = {}
169
+            for item in re.findall('source src="([^"]+)" data-stream="([^"]+)" data-quality="([^"]+)"', r, re.DOTALL):
170
+                s[item[1]] = (item[0],item[2])
171
+            data2 = ""
172
+            for t in ("hls","http","rtmp"):
173
+                if t in s:
174
+                    data2 = s[t][0]
175
+                    break
176
+            return (title,data2,"",desc)
177
+
178
+
179
+    def is_video(self,data):
180
+        if "::" in data:
181
+            data = data.split("::")[1]
182
+        cmd = data.split("/")
183
+        if cmd[0] in ("video","tiesraide"):
184
+            return True
185
+        else:
186
+            return False
187
+
188
+    def call(self, data,headers=headers0,lang=""):
189
+        if not lang: lang = self.country
190
+        url = API_URL%lang + data
191
+        #print "[TVPlay Api] url: ",url
192
+        result = []
193
+        content = self._http_request(url)
194
+        return content
195
+
196
+if __name__ == "__main__":
197
+    country= "lv"
198
+    c = Source(country)
199
+    if len(sys.argv)>1:
200
+        data= sys.argv[1]
201
+    else:
202
+        data = "home"
203
+    content = c.get_content(data)
204
+    for item in content:
205
+        print item
206
+    #cat = api.get_categories(country)
207
+    #chan = api.get_channels("lv")
208
+    #prog = api.get_programs(channel=6400)
209
+    #prog = api.get_programs(category=55)
210
+    #seas = api.get_seasons(program=6453)
211
+    #str = api.get_streams(660243)
212
+    #res = api.get_videos(802)
213
+    #formats = api.getAllFormats()
214
+    #det = api.detailed("1516")
215
+    #vid = api.getVideos("13170")
216
+    pass

+ 310
- 0
sources/replay.py Прегледај датотеку

@@ -0,0 +1,310 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+try:
9
+    import json
10
+except:
11
+    import simplejson as json
12
+
13
+import urllib2, urllib
14
+import datetime, re, sys
15
+import ssl
16
+if "_create_unverified_context" in dir(ssl):
17
+    ssl._create_default_https_context = ssl._create_unverified_context
18
+
19
+from SourceBase import SourceBase
20
+import util
21
+
22
+API_URL = 'http://replay.lsm.lv/%s/'
23
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
24
+headers0 = headers2dict("""
25
+User-Agent: Mozilla/5.0 (Linux; U; Android 4.4.4; Nexus 5 Build/KTU84P) AppleWebkit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30
26
+""")
27
+import HTMLParser
28
+h = HTMLParser.HTMLParser()
29
+from YouTubeVideoUrl import YouTubeVideoUrl
30
+
31
+class Source(SourceBase):
32
+
33
+    def __init__(self,country="lv",cfg_path=None):
34
+        self.name = "replay"
35
+        self.title = "Replay.lv (LTV)"
36
+        self.img = "http://replay.lsm.lv/apple-touch-icon.png"
37
+        self.desc = "LSM replay.lv satura skatīšanās"
38
+
39
+        self.country=country
40
+        self.pic_size = "327x250" #"1000x765"
41
+
42
+    def get_content(self, data):
43
+        print "[replay] get_content:", data
44
+        if "::" in data:
45
+            data = data.split("::")[1]
46
+        path = data.split("?")[0]
47
+        clist = path.split("/")[0]
48
+        params = data[data.find("?"):] if "?" in data else ""
49
+        qs = dict(map(lambda x:x.split("="),re.findall("\w+=\w+",params)))
50
+        lang = qs["lang"] if "lang" in qs else self.country
51
+
52
+        content=[]
53
+        content.append(("..return", "back","","Return back"))
54
+
55
+        if clist=="home":
56
+            content.extend([
57
+                ("Live streams", "replay::tiesraide","","TV live streams"),
58
+                ("Search LV", "replay::search/?term={0}&lang=lv","","Search content LV"),
59
+                ("Last videos LV", "replay::visi/jaunakie/?source=ltv&lang=lv","","Last aired videos LV"),
60
+                ("Last videos by categories LV", "replay::kategorijas/?lang=lv","","Last videos by categories LV"),
61
+                ("All programs LV", "replay::raidijumi/?type=video","","All programs by name LV"),
62
+                ("Programs by categories LV", "replay::categories?lang=lv","","All programs by categories LV"),
63
+                #("Channels", "replay::channels?language=%s"%self.country,"","TV channels"),
64
+                ("Videos by popularity LV", "replay::visi/popularie/?source=ltv&lang=lv","","Programs by popularity"),
65
+
66
+                ("Search RU", "replay::search/?term={0}&lang=ru","","Search content RU"),
67
+                ("Last videos RU", "replay::vse/novie/?source=ltv&lang=ru","","Last aired videos RU"),
68
+                ("Last videos by categories RU", "replay::kategorijas/?lang=ru","","Last videos by categories RU"),
69
+                ("All programs RU", "replay::peredachi/?lang=ru&type=video","","All programs by name"),
70
+                ("Programs by categories RU", "replay::categories?lang=ru","","Programs by categories RU")
71
+            ])
72
+            return content
73
+
74
+        ### programmu kategorijas ###
75
+        elif clist=="categories":
76
+            url = "http://replay.lsm.lv/lv/raidijumi/?lang=lv&type=video" if lang =="lv" else "http://replay.lsm.lv/ru/peredachi/?lang=ru&type=video"
77
+            r = self._http_request(url)
78
+            for item in re.findall(r'<a .+href="(\?lang=\w+&type=video&theme=\d+)">([^<]+)</a>\t', r):
79
+                title = item[1]
80
+                data2 = url.split("?")[0]+item[0]
81
+                data2 = data2.replace(API_URL%lang,"")
82
+                img = ""
83
+                desc = title
84
+                content.append((title,self.name+"::"+data2,img,desc))
85
+            return content
86
+
87
+        ### jaunāko raidijumu kategorijas ###
88
+        elif clist=="kategorijas":
89
+            url = "http://replay.lsm.lv/lv/" if lang =="lv" else "http://replay.lsm.lv/ru/"
90
+            r = self._http_request(url)
91
+            for item in re.findall(r'<a href="/(lv|ru)/kategorija/(\w+)/">.+?<i class="[^"]+"></i>.+?<span>([^<]+)</span>', r, re.DOTALL):
92
+                title = item[2]
93
+                data2 = "kategorija/%s/?lang=%s"%(item[1],item[0])
94
+                img = ""
95
+                desc = title
96
+                content.append((title,self.name+"::"+data2,img,desc))
97
+            return content
98
+
99
+        ### Tiešraides kanānālu saraksts
100
+        elif path=="tiesraide":
101
+            url = "http://replay.lsm.lv/styles/main.css"
102
+            r= self._http_request(url)
103
+            for item in re.findall(r'channel-logo--(\w+)\{background-image:url\("([^"]+\.png)"', r):
104
+                ch = item[0]
105
+                title = ch.upper()
106
+                data2 = "tiesraide/%s/"%ch
107
+                img = "http://replay.lsm.lv"+item[1]
108
+                veids = "video "if "tv" in ch else "audio"
109
+                desc = title+" tiesraide (%s)"%veids
110
+                content.append((title,self.name+"::"+data2,img,desc))
111
+            return content
112
+
113
+        ### Kanāla tiesraide
114
+        elif clist == "tiesraide" and "/" in data:
115
+            ch = data.split('/')[1]
116
+            veids = "video" if "tv" in ch else "audio"
117
+            #url = "http://replay.lsm.lv/lv/tiesraide/ltv7/"
118
+            url = "http://replay.lsm.lv/lv/tiesraide/%s/"%ch
119
+            r= self._http_request(url)
120
+
121
+            m = re.search('%s/">.+?<h5>([^<]+)+</h5>.*?<time>([^<]+)</time>'%ch, r, re.DOTALL)
122
+            tagad = m.group(1).strip() if m else ""
123
+            laiks = m.group(2).strip() if m else ""
124
+            laiks = h.unescape(laiks).encode("utf8")
125
+            m = re.search("<h1>([^<]+)</h1>", r)
126
+            title = m.group(1).strip() if m else path.split("/")[1].upper()
127
+            title = "%s - %s (%s)"%(title,tagad,laiks)
128
+
129
+            if veids == "video":
130
+                m = re.search('<div class="video"><iframe.+src="([^"]+)"', r)
131
+                if not m:
132
+                    content=("No stream found %s"%data,"","","No stream found")
133
+                    return content
134
+                url = m.group(1)
135
+                headers = headers2dict("""
136
+            User-Agent: Mozilla/5.0 (Linux; U; Android 4.4.4; Nexus 5 Build/KTU84P) AppleWebkit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30
137
+            Referer: http://replay.lsm.lv/lv/ieraksts/ltv/70398/tiesa-runa.-lielbritanija-gatavojas-referendumam-par-tu/
138
+                    """)
139
+                r = self._http_request(url,headers=headers)
140
+
141
+                m = re.search('<div class="video-player"><iframe.+src="([^"]+)"', r)
142
+                if not m:
143
+                    content=("No stream found %s"%data,"","","No stream found")
144
+                    return content
145
+                url = m.group(1)
146
+
147
+                r = self._http_request(url,headers=headers)
148
+                m = re.search('"([^"]+m3u8[^"]+)"', r)
149
+                if not m:
150
+                    content=("No stream found %s"%data,"","","No stream found")
151
+                    return content
152
+                data2 = m.group(1).replace("\\","")
153
+                #r = self._http_request(data2, headers=headers)
154
+
155
+            else: # audio
156
+                lrn = ch.replace("lr","")
157
+                url = "http://www.latvijasradio.lsm.lv/lv/tiesraide/?channel=%s"%lrn
158
+                r = self._http_request(url)
159
+                m = re.search('"file":"([^"]+?m3u8.*?)"', r)
160
+                if not m:
161
+                    content=("No stream found %s"%data,"","","No stream found")
162
+                    return content
163
+                data2 = m.group(1).replace("\\","")
164
+
165
+            img = ""
166
+            desc = ""
167
+            content =(title,data2,img,desc)
168
+            return content
169
+
170
+        #m = re.search(r'(\?page=\d+)" class=" paging__prev', r, re.IGNORECASE)
171
+        #if m:
172
+        #    data = re.sub("\?page=\d+", "", data)
173
+        #    data2 = data+m.group(1)
174
+        #    content.append(("Previous page",self.name+"::"+data2,"","Previous page"))
175
+
176
+        r = self.call(data, lang=lang)
177
+        if not r:
178
+            return content
179
+
180
+        if clist == "search":
181
+            #for r2 in re.findall('<article itemtype="http://schema.org/Article" itemscope class="thumbnail thumbnail--default ">(.+?)</article>', r2, re.DOTALL):
182
+            for item in re.findall('itemprop="image" data-image="([^"]+)".+?<figcaption><h5 itemprop="name"><a itemprop="url" href="([^<]+)">([^<]+)</a></h5></figcaption>', r):
183
+                title = item[2]
184
+                data2 = item[1].replace("/%s/"%lang,"")+"?lang=%s"%lang
185
+                img = "http://replay.lsm.lv" + item[0]
186
+                desc  = title
187
+                content.append((title,self.name+"::"+data2,img,desc))
188
+
189
+            #for item in re.findall('itemprop="image" data-image="([^"]+)".+?<figcaption><h4 itemprop="about"><a href="([^"]+)">([^<]+)</a></h4>.*?<h5 itemprop="name"><a itemprop="url" href="([^"]+)">([^<]+)</a></h5>.+?datetime="([^"]+)" class="thumbnail__date ">([^<]+)</time>', r2):
190
+            for item in re.findall('itemprop="image" data-image="([^"]+)".+? class="icon-(ltv|lr).+?<figcaption><h4 itemprop="about"><a href="([^"]+)">([^<]+)</a></h4>.*?<h5 itemprop="name"><a itemprop="url" href="([^"]+)">([^<]+)</a></h5>.+?datetime="([^"]+)" class="thumbnail__date ">([^<]+)</time>', r):
191
+                if item[1]=="lr":continue
192
+                title = "%s - %s (%s)"%(item[3],item[5],item[7])
193
+                data2 = item[4].replace("/%s/"%lang,"")+"?lang=%s"%lang
194
+                img = item[0].replace("https:","http:")
195
+                desc = title
196
+                content.append((title,self.name+"::"+data2,img,desc))
197
+
198
+        ### Raidijumi (programmas) ###
199
+        elif clist in ( "raidijumi","peredachi"):
200
+            for item in re.findall('<li itemprop="name"><a href="([^"]+)" itemprop="url">([^<]+)', r):
201
+            #for item in re.findall('<li itemprop="name"><a href="([^"]+)" itemprop="url">([^<]+)</a></li>', r):
202
+                title = item[1]
203
+                data2 = item[0].replace("/%s/"%lang,"")+"?lang=%s"%lang
204
+                img = ""
205
+                desc  = ""
206
+                content.append((title,self.name+"::"+data2,img,desc))
207
+
208
+        ### Raidijuma ieraksti speciālie###
209
+        elif clist in ( "visi","vse",):
210
+            for item in re.findall('(?i)<figure><a href="([^"]+)" itemprop="image" data-image="([^"]+)".+class="thumbnail__duration">([^<]+)</time></figure><figcaption><h4 itemprop="about"><a href="[^"]+">([^<]+)</a></h4>.+>([^<]+).*</h5>.+>([^<]+)</time></figcaption>', r):
211
+                title = item[3]
212
+                data2 = item[0].replace("/%s/"%lang,"")+"?lang=%s"%lang
213
+                img = item[1].replace("https:","http:")
214
+                desc  = "%s - %s\n%s"%(item[5],item[2],item[4])
215
+                content.append((title,self.name+"::"+data2,img,desc))
216
+
217
+        ### Raidijuma ieraksti (videos)
218
+        elif clist in ("raidijums","peredacha","kategorija"):
219
+            for item in re.findall('<article .+ href="([^"]+)".+image="([^"]+)".+class="thumbnail__duration">([^<]+).+">([^<]+).+class="thumbnail__date ">([^"]+)</time></figcaption></article>', r):
220
+                title = item[3]
221
+                data2 = item[0].replace("/%s/"%lang,"")+"?lang=%s"%lang
222
+                img = item[1].replace("https:","http:")
223
+                desc = "%s - %s"%(item[4],item[2])
224
+                content.append((title,self.name+"::"+data2,img,desc))
225
+
226
+        ### Ieraksts (video) ###
227
+        elif clist in ("ieraksts","statja"):
228
+            m = re.search('src="([^"]+)"></iframe>', r)
229
+            if m:
230
+                url2 = m.group(1)
231
+                headers = headers2dict("""
232
+User-Agent: Mozilla/5.0 (Linux; U; Android 4.4.4; Nexus 5 Build/KTU84P) AppleWebkit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30
233
+Referer: http://replay.lsm.lv/lv/ieraksts/ltv/70398/tiesa-runa.-lielbritanija-gatavojas-referendumam-par-tu/
234
+            """)
235
+                r2 = self._http_request(url2,headers=headers)
236
+                m = re.search('"file":"([^"]+)', r2)
237
+                if m:
238
+                    data2 = m.group(1).replace("\\","")
239
+                    m = re.search('"idstring":"([^"]+)', r2)
240
+                    title = m.group(1) if m else ""
241
+                    title = title.decode("unicode-escape").encode("utf8")
242
+                    title = title.replace("\n","")
243
+                    img = ""
244
+                    desc = ""
245
+                    if "youtube" in data2:
246
+                        video_id = re.search(r"/watch\?v=([^&]+)",data2).group(1)
247
+                        data2 = YouTubeVideoUrl().extract(video_id)
248
+                        if not data2:
249
+                            content=("No stream found %s"%data,"","","No stream found")
250
+                            return content
251
+                    content =(title,data2,img,desc)
252
+                    return content
253
+            content=("No stream found %s"%data,"","","No stream found")
254
+            return content
255
+
256
+        m = re.search(r'href="\?([^"]+)" class=" paging__next', r)
257
+        if m:
258
+            page = int(re.search("page=(\d+)",m.group(1)).group(1))
259
+            if "page="in data:
260
+                data2 = re.sub("page=\d+","page=%i"%page,data)
261
+            else:
262
+                if "?" in data:
263
+                    data2 =data+"&page=%i"%page
264
+                else:
265
+                    data2 =data+"?page=%i"%page
266
+            content.append(("Next page",self.name+"::"+data2,"","Next page"))
267
+
268
+        return content
269
+
270
+    def is_video(self,data):
271
+        if "::" in data:
272
+            data = data.split("::")[1]
273
+        cmd = data.split("/")
274
+        if cmd[0] in ("ieraksts","statja"):
275
+            return True
276
+        elif cmd[0]=="tiesraide" and len(cmd)>1:
277
+            return True
278
+        else:
279
+            return False
280
+
281
+    def call(self, data,headers=headers0,lang=""):
282
+        if not lang: lang = self.country
283
+        url = API_URL%lang + data
284
+        #print "[TVPlay Api] url: ",url
285
+        result = []
286
+        content = self._http_request(url,headers=headers0)
287
+        return content
288
+
289
+
290
+if __name__ == "__main__":
291
+    country= "lv"
292
+    c = Source(country)
293
+    if len(sys.argv)>1:
294
+        data= sys.argv[1]
295
+    else:
296
+        data = "home"
297
+    content = c.get_content(data)
298
+    for item in content:
299
+        print item
300
+    #cat = api.get_categories(country)
301
+    #chan = api.get_channels("lv")
302
+    #prog = api.get_programs(channel=6400)
303
+    #prog = api.get_programs(category=55)
304
+    #seas = api.get_seasons(program=6453)
305
+    #str = api.get_streams(660243)
306
+    #res = api.get_videos(802)
307
+    #formats = api.getAllFormats()
308
+    #det = api.detailed("1516")
309
+    #vid = api.getVideos("13170")
310
+    pass

+ 235
- 0
sources/serialguru.py Прегледај датотеку

@@ -0,0 +1,235 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+try:
9
+    import json
10
+except:
11
+    import simplejson as json
12
+
13
+import urllib2, urllib
14
+import datetime, re, sys,os
15
+import ConfigParser
16
+import ssl
17
+if "_create_unverified_context" in dir(ssl):
18
+    ssl._create_default_https_context = ssl._create_unverified_context
19
+
20
+from SourceBase import SourceBase
21
+
22
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
23
+import HTMLParser
24
+h = HTMLParser.HTMLParser()
25
+
26
+class Source(SourceBase):
27
+
28
+    def __init__(self,country="",cfg_path=None):
29
+        self.hidden = True # nerāda menu nestrādājošos avotus
30
+        self.name = "serialguru"
31
+        self.title = "SerialGURU.ru"
32
+        self.img = "http://serialguru.ru/images/xlogo_new.png.pagespeed.ic.0sre2_2OJN.png"
33
+        self.desc = "Serialguru.ru portāla satura skatīšanās"
34
+        self.country=country
35
+        self.headers = headers2dict("""
36
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36
37
+Referer: http://serialguru.ru/
38
+""")
39
+        self.headers2 = headers2dict("""
40
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36
41
+X-Requested-With: XMLHttpRequest
42
+Content-Type: application/x-www-form-urlencoded; charset=UTF-8
43
+Referer: http://serialguru.ru/
44
+""")
45
+        self.url = "http://serialguru.ru/"
46
+        #self.login()
47
+
48
+    def login(self,user="",password=""):
49
+        return True
50
+
51
+    def get_content(self, data):
52
+        print "[tvdom] get_content:", data
53
+        if "::" in data:
54
+            data = data.split("::")[1]
55
+        path = data.split("?")[0]
56
+        clist = path.split("/")[0]
57
+        params = data[data.find("?"):] if "?" in data else ""
58
+        qs = dict(map(lambda x:x.split("="),re.findall("[%\w]+=\w+",params)))
59
+        lang = qs["lang"] if "lang" in qs else self.country
60
+
61
+        content=[]
62
+        content.append(("..return", "back","","Return back"))
63
+
64
+        if clist=="home":
65
+            content.extend([
66
+                ("Search", "serialguru::search/{0}","","Search content"),
67
+                ("Last", "serialguru::last","","Last series"),
68
+                ("Series", "serialguru::serials","","TV Series"),
69
+                ("Shows", "serialguru::tv","","TV Shows"),
70
+                ("Animations", "serialguru::mult","","Animation series"),
71
+
72
+                #("Archive - all", "tvdom::arhivs_all","","Video archive all"),
73
+            ])
74
+            return content
75
+
76
+        elif data == "last":
77
+            r = self.call("")
78
+            for item in re.findall(r'<li><a href="(http://serialguru\.ru/[^"]+)"><i>([^<]+)</i>  <i>([^<]+)</i> <b>([^<]+)</b></a></li>', r, re.DOTALL):
79
+                title = item[1] + " - " + item[2]+"/"+item[3]
80
+                img = ""
81
+                data2 = item[0].replace(self.url, "")
82
+                desc = title
83
+                content.append((title, self.name+"::"+data2, img, desc))
84
+            return content
85
+
86
+        elif data=="serials":
87
+            content.extend([
88
+                ("All", "serialguru::serials?o=0&t=S","","All series"),
89
+                ("Russian", "serialguru::serials?c%5B%5D=53&c%5B%5D=61&c%5B%5D=33&c%5B%5D=42&c%5B%5D=31&o=0&t=S","","Russian countries series"),
90
+                ("English", "serialguru::serials?c%5B%5D=27&c%5B%5D=26&c%5B%5D=43&c%5B%5D=30&c%5B%5D=34&c%5B%5D=25&o=0&t=S","","English countries series"),
91
+                ("Europe", "serialguru::serials?c%5B%5D=29&c%5B%5D=66&c%5B%5D=44&c%5B%5D=28&c%5B%5D=51&c%5B%5D=65&c%5B%5D=62&c%5B%5D=40&c%5B%5D=45&c%5B%5D=68&c%5B%5D=59&c%5B%5D=39&c%5B%5D=35&c%5B%5D=47&o=0&t=S","","European countries series"),
92
+                ("Other", "serialguru::serials?c%5B%5D=36&c%5B%5D=32&c%5B%5D=67&c%5B%5D=63&c%5B%5D=60&c%5B%5D=64&c%5B%5D=38&c%5B%5D=52&c%5B%5D=41&c%5B%5D=58&c%5B%5D=57&c%5B%5D=37&c%5B%5D=50&c%5B%5D=46&o=0&t=S","","Other countries series"),
93
+                #("Archive - all", "tvdom::arhivs_all","","Video archive all"),
94
+            ])
95
+            return content
96
+
97
+        elif data=="tv":
98
+            content.extend([
99
+                ("All", "serialguru::tv?o=0&t=S","","All series"),
100
+                ("Russian", "serialguru::tv?c%5B%5D=53&c%5B%5D=61&c%5B%5D=33&c%5B%5D=42&c%5B%5D=31&o=0&t=P","","Russian countries TV shows"),
101
+                ("English", "serialguru::tv?c%5B%5D=27&c%5B%5D=26&c%5B%5D=43&c%5B%5D=30&c%5B%5D=34&c%5B%5D=25&o=0&t=P","","English countries TV shows"),
102
+                ("Europe", "serialguru::tv?c%5B%5D=29&c%5B%5D=66&c%5B%5D=44&c%5B%5D=28&c%5B%5D=51&c%5B%5D=65&c%5B%5D=62&c%5B%5D=40&c%5B%5D=45&c%5B%5D=68&c%5B%5D=59&c%5B%5D=39&c%5B%5D=35&c%5B%5D=47&o=0&t=P","","European countries TV shows series"),
103
+                ("Other", "serialguru::tv?c%5B%5D=36&c%5B%5D=32&c%5B%5D=67&c%5B%5D=63&c%5B%5D=60&c%5B%5D=64&c%5B%5D=38&c%5B%5D=52&c%5B%5D=41&c%5B%5D=58&c%5B%5D=57&c%5B%5D=37&c%5B%5D=50&c%5B%5D=46&o=0&t=P","","Other countries TV shows"),
104
+                #("Archive - all", "tvdom::arhivs_all","","Video archive all"),
105
+            ])
106
+            return content
107
+
108
+        elif data=="mult":
109
+            content.extend([
110
+                ("All", "serialguru::mult?o=0&t=S","","All series"),
111
+                ("Russian", "serialguru::mult?c%5B%5D=53&c%5B%5D=61&c%5B%5D=33&c%5B%5D=42&c%5B%5D=31&o=0&t=M","","Russian countries animantions"),
112
+                ("English", "serialguru::mult?c%5B%5D=27&c%5B%5D=26&c%5B%5D=43&c%5B%5D=30&c%5B%5D=34&c%5B%5D=25&o=0&t=M","","English countries animantions"),
113
+                ("Europe", "serialguru::mult?c%5B%5D=29&c%5B%5D=66&c%5B%5D=44&c%5B%5D=28&c%5B%5D=51&c%5B%5D=65&c%5B%5D=62&c%5B%5D=40&c%5B%5D=45&c%5B%5D=68&c%5B%5D=59&c%5B%5D=39&c%5B%5D=35&c%5B%5D=47&o=0&t=M","","European countries animantions"),
114
+                ("Other", "serialguru::mult?c%5B%5D=36&c%5B%5D=32&c%5B%5D=67&c%5B%5D=63&c%5B%5D=60&c%5B%5D=64&c%5B%5D=38&c%5B%5D=52&c%5B%5D=41&c%5B%5D=58&c%5B%5D=57&c%5B%5D=37&c%5B%5D=50&c%5B%5D=46&o=0&t=M","","Other countries animantions"),
115
+                #("Archive - all", "tvdom::arhivs_all","","Video archive all"),
116
+            ])
117
+            return content
118
+
119
+        elif clist=="search":
120
+            if data.split("/")>1:
121
+                term = data.split("/")[1]
122
+            else:
123
+                return content
124
+            r = self.call("main/autocomplete?term=%s"%(term))
125
+            if r=="null":
126
+                return content
127
+            js = json.loads(r)
128
+            for item in js:
129
+                title = item["name"].encode("utf8")
130
+                data2 = item["url"].encode("utf8")
131
+                img = "http://serialguru.ru/uploads/cover/"+item["image_s"].replace("_s","")+".jpg"
132
+                rating = item["rating"].encode("utf8") if item["rating"] else ""
133
+                desc = title +"\nRating:%s (%s+/%s-)"%(rating,item["plus_cnt"].encode("utf8"),item["minus_cnt"].encode("utf8"))
134
+                content.append((title,self.name+"::"+data2,img,desc))
135
+            return content
136
+
137
+        elif path=="serials" or path=="tv" or path=="mult":
138
+            if path=="serials" and not "cat%5B%5D" in data:
139
+                #content.append(("All", "serialguru::"+data+"&cat%5B%5D=","","All series"))
140
+                categories = self.get_categories(path)
141
+                for c in categories:
142
+                    content.append((c[1], "serialguru::"+data+"&cat%5B%5D="+c[0],"",c[1]))
143
+                return content
144
+            else:
145
+                r = self.call("main/load", params[1:], headers=self.headers2)
146
+                for item in re.findall('<li><a href="([^"]+)"><div>.*?<img src="([^"]+)" alt="([^"]+)"[^<]*?><p>([^<]+)<i><span class="r">([^<]+)</span> <span class="plus">([^<]+)</span> <span class="minus">([^<]+)</span></i></p></div>([^<]+)</a></li>', r, re.DOTALL):
147
+                    title = "%s (%s)"%(item[2],item[3])
148
+                    img = item[1].replace("_s.jpg","_l.jpg")
149
+                    data2 = item[0].replace(self.url,"")
150
+                    desc = title +"\nRating:%s (%s+/%s-)"%(item[4],item[5],item[6])
151
+                    content.append((title,self.name+"::"+data2,img,desc))
152
+                page=int(re.search("o=(\d+)",data).group(1))
153
+                data2 = re.sub("o=(\d+)","o=%s"%(page+15),data)
154
+                content.append(("Next page",self.name+"::"+data2,"","Go to next page"))
155
+                return content
156
+
157
+
158
+        ### Pārraide
159
+        else:
160
+            r = self.call(clist)
161
+            title0=re.search('<h2>(.+?)</h2>',r,re.DOTALL).group(1)
162
+            m=re.search('<div class="description">(.+?)</div>',r,re.DOTALL)
163
+            desc0=m.group(1) if m else ""
164
+            desc0=desc0.replace("<p>","").replace("</p>","\n").replace('<a href="#">ПОКАЗАТЬ ПОЛНОСТЬЮ</a>',"")
165
+            desc0=title0+"\n"+desc0.strip()
166
+            img0=""
167
+            m = re.search("http://serialguru.ru/main/playlist/\d+",r)
168
+            if m:
169
+                url = m.group()
170
+            else:
171
+                raise Exception ("No stream found")
172
+            r = self._http_request(url)
173
+            js = json.loads(r,"utf8")
174
+            if not "/" in data: # sezonas
175
+                for i,item in enumerate(js["playlist"]):
176
+                    title = title0 + " - " + item["comment"].encode("utf8")
177
+                    img = img0
178
+                    data2 = "%s/%s"%(data,i)
179
+                    desc = desc0
180
+                    content.append((title,self.name+"::"+data2,img,desc))
181
+            else:
182
+                snum = int(data.split("/")[1])
183
+                title1 = js["playlist"][snum]["comment"].encode('utf8')
184
+                for i,item in enumerate(js["playlist"][snum]["playlist"]):
185
+                    title = title0 + " - " + title1+"/"+item["comment"].encode("utf8")
186
+                    img = img0
187
+                    data2 = item["file"].encode("utf8")
188
+                    desc = desc0
189
+                    content.append((title,data2,img,desc))
190
+            return content
191
+
192
+
193
+    def is_video(self,data):
194
+        if "::" in data:
195
+            data = data.split("::")[1]
196
+        if "live/view" in data:
197
+            return True
198
+        else:
199
+            return False
200
+
201
+    def get_categories(self,data):
202
+        r = self.call(data)
203
+        r2 = re.search('<td class="category">(.+?)</td>', r, re.DOTALL).group(1)
204
+        items = re.findall(r'<a href="#" data-id="(\d+)">([^<]+)</a>', r2, re.DOTALL)
205
+        return items
206
+
207
+
208
+    def call(self, data,params = None, headers=None):
209
+        if not headers: headers = self.headers
210
+        #if not lang: lang = self.country
211
+        url = self.url + data
212
+        content = self._http_request(url,params, headers)
213
+        return content
214
+
215
+if __name__ == "__main__":
216
+    country= "lv"
217
+    c = Source(country)
218
+    if len(sys.argv)>1:
219
+        data= sys.argv[1]
220
+    else:
221
+        data = "home"
222
+    content = c.get_content(data)
223
+    for item in content:
224
+        print item
225
+    #cat = api.get_categories(country)
226
+    #chan = api.get_channels("lv")
227
+    #prog = api.get_programs(channel=6400)
228
+    #prog = api.get_programs(category=55)
229
+    #seas = api.get_seasons(program=6453)
230
+    #str = api.get_streams(660243)
231
+    #res = api.get_videos(802)
232
+    #formats = api.getAllFormats()
233
+    #det = api.detailed("1516")
234
+    #vid = api.getVideos("13170")
235
+    pass

+ 828
- 0
sources/swfinterp.py Прегледај датотеку

@@ -0,0 +1,828 @@
1
+# This code comes from youtube-dl: https://github.com/rg3/youtube-dl/blob/master/youtube_dl/swfinterp.py
2
+
3
+from __future__ import unicode_literals
4
+
5
+import collections
6
+import io
7
+import struct
8
+import zlib
9
+
10
+
11
+def _extract_tags(file_contents):
12
+    if file_contents[1:3] != b'WS':
13
+        print '[SWFInterpreter] Not an SWF file; header is %r' % file_contents[:3]
14
+    if file_contents[:1] == b'C':
15
+        content = zlib.decompress(file_contents[8:])
16
+    else:
17
+        raise NotImplementedError(
18
+            'Unsupported compression format %r' %
19
+            file_contents[:1])
20
+
21
+    # Determine number of bits in framesize rectangle
22
+    framesize_nbits = struct.unpack('!B', content[:1])[0] >> 3
23
+    framesize_len = (5 + 4 * framesize_nbits + 7) // 8
24
+
25
+    pos = framesize_len + 2 + 2
26
+    while pos < len(content):
27
+        header16 = struct.unpack('<H', content[pos:pos + 2])[0]
28
+        pos += 2
29
+        tag_code = header16 >> 6
30
+        tag_len = header16 & 0x3f
31
+        if tag_len == 0x3f:
32
+            tag_len = struct.unpack('<I', content[pos:pos + 4])[0]
33
+            pos += 4
34
+        assert pos + tag_len <= len(content), \
35
+            ('Tag %d ends at %d+%d - that\'s longer than the file (%d)'
36
+                % (tag_code, pos, tag_len, len(content)))
37
+        yield (tag_code, content[pos:pos + tag_len])
38
+        pos += tag_len
39
+
40
+
41
+class _AVMClass_Object(object):
42
+    def __init__(self, avm_class):
43
+        self.avm_class = avm_class
44
+
45
+    def __repr__(self):
46
+        return '%s#%x' % (self.avm_class.name, id(self))
47
+
48
+
49
+class _ScopeDict(dict):
50
+    def __init__(self, avm_class):
51
+        super(_ScopeDict, self).__init__()
52
+        self.avm_class = avm_class
53
+
54
+    def __repr__(self):
55
+        return '%s__Scope(%s)' % (
56
+            self.avm_class.name,
57
+            super(_ScopeDict, self).__repr__())
58
+
59
+
60
+class _AVMClass(object):
61
+    def __init__(self, name_idx, name, static_properties=None):
62
+        self.name_idx = name_idx
63
+        self.name = name
64
+        self.method_names = {}
65
+        self.method_idxs = {}
66
+        self.methods = {}
67
+        self.method_pyfunctions = {}
68
+        self.static_properties = static_properties if static_properties else {}
69
+
70
+        self.variables = _ScopeDict(self)
71
+        self.constants = {}
72
+
73
+    def make_object(self):
74
+        return _AVMClass_Object(self)
75
+
76
+    def __repr__(self):
77
+        return '_AVMClass(%s)' % (self.name)
78
+
79
+    def register_methods(self, methods):
80
+        self.method_names.update(methods.items())
81
+        self.method_idxs.update(dict(
82
+            (idx, name)
83
+            for name, idx in methods.items()))
84
+
85
+
86
+class _Multiname(object):
87
+    def __init__(self, kind):
88
+        self.kind = kind
89
+
90
+    def __repr__(self):
91
+        return '[MULTINAME kind: 0x%x]' % self.kind
92
+
93
+
94
+def _read_int(reader):
95
+    res = 0
96
+    shift = 0
97
+    for _ in range(5):
98
+        buf = reader.read(1)
99
+        assert len(buf) == 1
100
+        b = struct.unpack('<B', buf)[0]
101
+        res = res | ((b & 0x7f) << shift)
102
+        if b & 0x80 == 0:
103
+            break
104
+        shift += 7
105
+    return res
106
+
107
+
108
+def _u30(reader):
109
+    res = _read_int(reader)
110
+    assert res & 0xf0000000 == 0
111
+    return res
112
+_u32 = _read_int
113
+
114
+
115
+def _s32(reader):
116
+    v = _read_int(reader)
117
+    if v & 0x80000000 != 0:
118
+        v = - ((v ^ 0xffffffff) + 1)
119
+    return v
120
+
121
+
122
+def _s24(reader):
123
+    bs = reader.read(3)
124
+    assert len(bs) == 3
125
+    last_byte = b'\xff' if (ord(bs[2:3]) >= 0x80) else b'\x00'
126
+    return struct.unpack('<i', bs + last_byte)[0]
127
+
128
+
129
+def _read_string(reader):
130
+    slen = _u30(reader)
131
+    resb = reader.read(slen)
132
+    assert len(resb) == slen
133
+    return resb.decode('utf-8')
134
+
135
+
136
+def _read_bytes(count, reader):
137
+    assert count >= 0
138
+    resb = reader.read(count)
139
+    assert len(resb) == count
140
+    return resb
141
+
142
+
143
+def _read_byte(reader):
144
+    resb = _read_bytes(1, reader=reader)
145
+    res = struct.unpack('<B', resb)[0]
146
+    return res
147
+
148
+
149
+StringClass = _AVMClass('(no name idx)', 'String')
150
+ByteArrayClass = _AVMClass('(no name idx)', 'ByteArray')
151
+TimerClass = _AVMClass('(no name idx)', 'Timer')
152
+TimerEventClass = _AVMClass('(no name idx)', 'TimerEvent', {'TIMER': 'timer'})
153
+_builtin_classes = {
154
+    StringClass.name: StringClass,
155
+    ByteArrayClass.name: ByteArrayClass,
156
+    TimerClass.name: TimerClass,
157
+    TimerEventClass.name: TimerEventClass,
158
+}
159
+
160
+
161
+class _Undefined(object):
162
+    def __bool__(self):
163
+        return False
164
+    __nonzero__ = __bool__
165
+
166
+    def __hash__(self):
167
+        return 0
168
+
169
+    def __str__(self):
170
+        return 'undefined'
171
+    __repr__ = __str__
172
+
173
+undefined = _Undefined()
174
+
175
+
176
+class SWFInterpreter(object):
177
+    def __init__(self, file_contents):
178
+        self._patched_functions = {
179
+            (TimerClass, 'addEventListener'): lambda params: undefined,
180
+        }
181
+        code_tag = next(tag
182
+                        for tag_code, tag in _extract_tags(file_contents)
183
+                        if tag_code == 82)
184
+        p = code_tag.index(b'\0', 4) + 1
185
+        code_reader = io.BytesIO(code_tag[p:])
186
+
187
+        # Parse ABC (AVM2 ByteCode)
188
+
189
+        # Define a couple convenience methods
190
+        u30 = lambda *args: _u30(*args, reader=code_reader)
191
+        s32 = lambda *args: _s32(*args, reader=code_reader)
192
+        u32 = lambda *args: _u32(*args, reader=code_reader)
193
+        read_bytes = lambda *args: _read_bytes(*args, reader=code_reader)
194
+        read_byte = lambda *args: _read_byte(*args, reader=code_reader)
195
+
196
+        # minor_version + major_version
197
+        read_bytes(2 + 2)
198
+
199
+        # Constant pool
200
+        int_count = u30()
201
+        self.constant_ints = [0]
202
+        for _c in range(1, int_count):
203
+            self.constant_ints.append(s32())
204
+        self.constant_uints = [0]
205
+        uint_count = u30()
206
+        for _c in range(1, uint_count):
207
+            self.constant_uints.append(u32())
208
+        double_count = u30()
209
+        read_bytes(max(0, (double_count - 1)) * 8)
210
+        string_count = u30()
211
+        self.constant_strings = ['']
212
+        for _c in range(1, string_count):
213
+            s = _read_string(code_reader)
214
+            self.constant_strings.append(s)
215
+        namespace_count = u30()
216
+        for _c in range(1, namespace_count):
217
+            read_bytes(1)  # kind
218
+            u30()  # name
219
+        ns_set_count = u30()
220
+        for _c in range(1, ns_set_count):
221
+            count = u30()
222
+            for _c2 in range(count):
223
+                u30()
224
+        multiname_count = u30()
225
+        MULTINAME_SIZES = {
226
+            0x07: 2,  # QName
227
+            0x0d: 2,  # QNameA
228
+            0x0f: 1,  # RTQName
229
+            0x10: 1,  # RTQNameA
230
+            0x11: 0,  # RTQNameL
231
+            0x12: 0,  # RTQNameLA
232
+            0x09: 2,  # Multiname
233
+            0x0e: 2,  # MultinameA
234
+            0x1b: 1,  # MultinameL
235
+            0x1c: 1,  # MultinameLA
236
+        }
237
+        self.multinames = ['']
238
+        for _c in range(1, multiname_count):
239
+            kind = u30()
240
+            assert kind in MULTINAME_SIZES, 'Invalid multiname kind %r' % kind
241
+            if kind == 0x07:
242
+                u30()  # namespace_idx
243
+                name_idx = u30()
244
+                self.multinames.append(self.constant_strings[name_idx])
245
+            elif kind == 0x09:
246
+                name_idx = u30()
247
+                u30()
248
+                self.multinames.append(self.constant_strings[name_idx])
249
+            else:
250
+                self.multinames.append(_Multiname(kind))
251
+                for _c2 in range(MULTINAME_SIZES[kind]):
252
+                    u30()
253
+
254
+        # Methods
255
+        method_count = u30()
256
+        MethodInfo = collections.namedtuple(
257
+            'MethodInfo',
258
+            ['NEED_ARGUMENTS', 'NEED_REST'])
259
+        method_infos = []
260
+        for method_id in range(method_count):
261
+            param_count = u30()
262
+            u30()  # return type
263
+            for _ in range(param_count):
264
+                u30()  # param type
265
+            u30()  # name index (always 0 for youtube)
266
+            flags = read_byte()
267
+            if flags & 0x08 != 0:
268
+                # Options present
269
+                option_count = u30()
270
+                for c in range(option_count):
271
+                    u30()  # val
272
+                    read_bytes(1)  # kind
273
+            if flags & 0x80 != 0:
274
+                # Param names present
275
+                for _ in range(param_count):
276
+                    u30()  # param name
277
+            mi = MethodInfo(flags & 0x01 != 0, flags & 0x04 != 0)
278
+            method_infos.append(mi)
279
+
280
+        # Metadata
281
+        metadata_count = u30()
282
+        for _c in range(metadata_count):
283
+            u30()  # name
284
+            item_count = u30()
285
+            for _c2 in range(item_count):
286
+                u30()  # key
287
+                u30()  # value
288
+
289
+        def parse_traits_info():
290
+            trait_name_idx = u30()
291
+            kind_full = read_byte()
292
+            kind = kind_full & 0x0f
293
+            attrs = kind_full >> 4
294
+            methods = {}
295
+            constants = None
296
+            if kind == 0x00:  # Slot
297
+                u30()  # Slot id
298
+                u30()  # type_name_idx
299
+                vindex = u30()
300
+                if vindex != 0:
301
+                    read_byte()  # vkind
302
+            elif kind == 0x06:  # Const
303
+                u30()  # Slot id
304
+                u30()  # type_name_idx
305
+                vindex = u30()
306
+                vkind = 'any'
307
+                if vindex != 0:
308
+                    vkind = read_byte()
309
+                if vkind == 0x03:  # Constant_Int
310
+                    value = self.constant_ints[vindex]
311
+                elif vkind == 0x04:  # Constant_UInt
312
+                    value = self.constant_uints[vindex]
313
+                else:
314
+                    return {}, None  # Ignore silently for now
315
+                constants = {self.multinames[trait_name_idx]: value}
316
+            elif kind in (0x01, 0x02, 0x03):  # Method / Getter / Setter
317
+                u30()  # disp_id
318
+                method_idx = u30()
319
+                methods[self.multinames[trait_name_idx]] = method_idx
320
+            elif kind == 0x04:  # Class
321
+                u30()  # slot_id
322
+                u30()  # classi
323
+            elif kind == 0x05:  # Function
324
+                u30()  # slot_id
325
+                function_idx = u30()
326
+                methods[function_idx] = self.multinames[trait_name_idx]
327
+            else:
328
+                print '[SWFInterpreter] Unsupported trait kind %d' % kind
329
+                return None
330
+
331
+            if attrs & 0x4 != 0:  # Metadata present
332
+                metadata_count = u30()
333
+                for _c3 in range(metadata_count):
334
+                    u30()  # metadata index
335
+
336
+            return methods, constants
337
+
338
+        # Classes
339
+        class_count = u30()
340
+        classes = []
341
+        for class_id in range(class_count):
342
+            name_idx = u30()
343
+
344
+            cname = self.multinames[name_idx]
345
+            avm_class = _AVMClass(name_idx, cname)
346
+            classes.append(avm_class)
347
+
348
+            u30()  # super_name idx
349
+            flags = read_byte()
350
+            if flags & 0x08 != 0:  # Protected namespace is present
351
+                u30()  # protected_ns_idx
352
+            intrf_count = u30()
353
+            for _c2 in range(intrf_count):
354
+                u30()
355
+            u30()  # iinit
356
+            trait_count = u30()
357
+            for _c2 in range(trait_count):
358
+                trait_methods, trait_constants = parse_traits_info()
359
+                avm_class.register_methods(trait_methods)
360
+                if trait_constants:
361
+                    avm_class.constants.update(trait_constants)
362
+
363
+        assert len(classes) == class_count
364
+        self._classes_by_name = dict((c.name, c) for c in classes)
365
+
366
+        for avm_class in classes:
367
+            avm_class.cinit_idx = u30()
368
+            trait_count = u30()
369
+            for _c2 in range(trait_count):
370
+                trait_methods, trait_constants = parse_traits_info()
371
+                avm_class.register_methods(trait_methods)
372
+                if trait_constants:
373
+                    avm_class.constants.update(trait_constants)
374
+
375
+        # Scripts
376
+        script_count = u30()
377
+        for _c in range(script_count):
378
+            u30()  # init
379
+            trait_count = u30()
380
+            for _c2 in range(trait_count):
381
+                parse_traits_info()
382
+
383
+        # Method bodies
384
+        method_body_count = u30()
385
+        Method = collections.namedtuple('Method', ['code', 'local_count'])
386
+        self._all_methods = []
387
+        for _c in range(method_body_count):
388
+            method_idx = u30()
389
+            u30()  # max_stack
390
+            local_count = u30()
391
+            u30()  # init_scope_depth
392
+            u30()  # max_scope_depth
393
+            code_length = u30()
394
+            code = read_bytes(code_length)
395
+            m = Method(code, local_count)
396
+            self._all_methods.append(m)
397
+            for avm_class in classes:
398
+                if method_idx in avm_class.method_idxs:
399
+                    avm_class.methods[avm_class.method_idxs[method_idx]] = m
400
+            exception_count = u30()
401
+            for _c2 in range(exception_count):
402
+                u30()  # from
403
+                u30()  # to
404
+                u30()  # target
405
+                u30()  # exc_type
406
+                u30()  # var_name
407
+            trait_count = u30()
408
+            for _c2 in range(trait_count):
409
+                parse_traits_info()
410
+
411
+        assert p + code_reader.tell() == len(code_tag)
412
+
413
+    def patch_function(self, avm_class, func_name, f):
414
+        self._patched_functions[(avm_class, func_name)] = f
415
+
416
+    def extract_class(self, class_name, call_cinit=True):
417
+        try:
418
+            res = self._classes_by_name[class_name]
419
+        except KeyError:
420
+            print '[SWFInterpreter] Class %r not found' % class_name
421
+            return None
422
+
423
+        if call_cinit and hasattr(res, 'cinit_idx'):
424
+            res.register_methods({'$cinit': res.cinit_idx})
425
+            res.methods['$cinit'] = self._all_methods[res.cinit_idx]
426
+            cinit = self.extract_function(res, '$cinit')
427
+            cinit([])
428
+
429
+        return res
430
+
431
+    def extract_function(self, avm_class, func_name):
432
+        p = self._patched_functions.get((avm_class, func_name))
433
+        if p:
434
+            return p
435
+        if func_name in avm_class.method_pyfunctions:
436
+            return avm_class.method_pyfunctions[func_name]
437
+        if func_name in self._classes_by_name:
438
+            return self._classes_by_name[func_name].make_object()
439
+        if func_name not in avm_class.methods:
440
+            print '[SWFInterpreter] Cannot find function %s.%s' % (
441
+                avm_class.name, func_name)
442
+            return None
443
+        m = avm_class.methods[func_name]
444
+
445
+        def resfunc(args):
446
+            # Helper functions
447
+            coder = io.BytesIO(m.code)
448
+            s24 = lambda: _s24(coder)
449
+            u30 = lambda: _u30(coder)
450
+
451
+            registers = [avm_class.variables] + list(args) + [None] * m.local_count
452
+            stack = []
453
+            scopes = collections.deque([
454
+                self._classes_by_name, avm_class.constants, avm_class.variables])
455
+            while True:
456
+                opcode = _read_byte(coder)
457
+                if opcode == 9:  # label
458
+                    pass  # Spec says: "Do nothing."
459
+                elif opcode == 16:  # jump
460
+                    offset = s24()
461
+                    coder.seek(coder.tell() + offset)
462
+                elif opcode == 17:  # iftrue
463
+                    offset = s24()
464
+                    value = stack.pop()
465
+                    if value:
466
+                        coder.seek(coder.tell() + offset)
467
+                elif opcode == 18:  # iffalse
468
+                    offset = s24()
469
+                    value = stack.pop()
470
+                    if not value:
471
+                        coder.seek(coder.tell() + offset)
472
+                elif opcode == 19:  # ifeq
473
+                    offset = s24()
474
+                    value2 = stack.pop()
475
+                    value1 = stack.pop()
476
+                    if value2 == value1:
477
+                        coder.seek(coder.tell() + offset)
478
+                elif opcode == 20:  # ifne
479
+                    offset = s24()
480
+                    value2 = stack.pop()
481
+                    value1 = stack.pop()
482
+                    if value2 != value1:
483
+                        coder.seek(coder.tell() + offset)
484
+                elif opcode == 21:  # iflt
485
+                    offset = s24()
486
+                    value2 = stack.pop()
487
+                    value1 = stack.pop()
488
+                    if value1 < value2:
489
+                        coder.seek(coder.tell() + offset)
490
+                elif opcode == 32:  # pushnull
491
+                    stack.append(None)
492
+                elif opcode == 33:  # pushundefined
493
+                    stack.append(undefined)
494
+                elif opcode == 36:  # pushbyte
495
+                    v = _read_byte(coder)
496
+                    stack.append(v)
497
+                elif opcode == 37:  # pushshort
498
+                    v = u30()
499
+                    stack.append(v)
500
+                elif opcode == 38:  # pushtrue
501
+                    stack.append(True)
502
+                elif opcode == 39:  # pushfalse
503
+                    stack.append(False)
504
+                elif opcode == 40:  # pushnan
505
+                    stack.append(float('NaN'))
506
+                elif opcode == 42:  # dup
507
+                    value = stack[-1]
508
+                    stack.append(value)
509
+                elif opcode == 44:  # pushstring
510
+                    idx = u30()
511
+                    stack.append(self.constant_strings[idx])
512
+                elif opcode == 48:  # pushscope
513
+                    new_scope = stack.pop()
514
+                    scopes.append(new_scope)
515
+                elif opcode == 66:  # construct
516
+                    arg_count = u30()
517
+                    args = list(reversed(
518
+                        [stack.pop() for _ in range(arg_count)]))
519
+                    obj = stack.pop()
520
+                    res = obj.avm_class.make_object()
521
+                    stack.append(res)
522
+                elif opcode == 70:  # callproperty
523
+                    index = u30()
524
+                    mname = self.multinames[index]
525
+                    arg_count = u30()
526
+                    args = list(reversed(
527
+                        [stack.pop() for _ in range(arg_count)]))
528
+                    obj = stack.pop()
529
+
530
+                    if obj == StringClass:
531
+                        if mname == 'String':
532
+                            assert len(args) == 1
533
+                            assert isinstance(args[0], (
534
+                                int, unicode, _Undefined))
535
+                            if args[0] == undefined:
536
+                                res = 'undefined'
537
+                            else:
538
+                                res = unicode(args[0])
539
+                            stack.append(res)
540
+                            continue
541
+                        else:
542
+                            raise NotImplementedError(
543
+                                'Function String.%s is not yet implemented'
544
+                                % mname)
545
+                    elif isinstance(obj, _AVMClass_Object):
546
+                        func = self.extract_function(obj.avm_class, mname)
547
+                        res = func(args)
548
+                        stack.append(res)
549
+                        continue
550
+                    elif isinstance(obj, _AVMClass):
551
+                        func = self.extract_function(obj, mname)
552
+                        res = func(args)
553
+                        stack.append(res)
554
+                        continue
555
+                    elif isinstance(obj, _ScopeDict):
556
+                        if mname in obj.avm_class.method_names:
557
+                            func = self.extract_function(obj.avm_class, mname)
558
+                            res = func(args)
559
+                        else:
560
+                            res = obj[mname]
561
+                        stack.append(res)
562
+                        continue
563
+                    elif isinstance(obj, unicode):
564
+                        if mname == 'split':
565
+                            assert len(args) == 1
566
+                            assert isinstance(args[0], unicode)
567
+                            if args[0] == '':
568
+                                res = list(obj)
569
+                            else:
570
+                                res = obj.split(args[0])
571
+                            stack.append(res)
572
+                            continue
573
+                        elif mname == 'charCodeAt':
574
+                            assert len(args) <= 1
575
+                            idx = 0 if len(args) == 0 else args[0]
576
+                            assert isinstance(idx, int)
577
+                            res = ord(obj[idx])
578
+                            stack.append(res)
579
+                            continue
580
+                    elif isinstance(obj, list):
581
+                        if mname == 'slice':
582
+                            assert len(args) == 1
583
+                            assert isinstance(args[0], int)
584
+                            res = obj[args[0]:]
585
+                            stack.append(res)
586
+                            continue
587
+                        elif mname == 'join':
588
+                            assert len(args) == 1
589
+                            assert isinstance(args[0], unicode)
590
+                            res = args[0].join(obj)
591
+                            stack.append(res)
592
+                            continue
593
+                    raise NotImplementedError(
594
+                        'Unsupported property %r on %r'
595
+                        % (mname, obj))
596
+                elif opcode == 71:  # returnvoid
597
+                    res = undefined
598
+                    return res
599
+                elif opcode == 72:  # returnvalue
600
+                    res = stack.pop()
601
+                    return res
602
+                elif opcode == 73:  # constructsuper
603
+                    # Not yet implemented, just hope it works without it
604
+                    arg_count = u30()
605
+                    args = list(reversed(
606
+                        [stack.pop() for _ in range(arg_count)]))
607
+                    obj = stack.pop()
608
+                elif opcode == 74:  # constructproperty
609
+                    index = u30()
610
+                    arg_count = u30()
611
+                    args = list(reversed(
612
+                        [stack.pop() for _ in range(arg_count)]))
613
+                    obj = stack.pop()
614
+
615
+                    mname = self.multinames[index]
616
+                    assert isinstance(obj, _AVMClass)
617
+
618
+                    # We do not actually call the constructor for now;
619
+                    # we just pretend it does nothing
620
+                    stack.append(obj.make_object())
621
+                elif opcode == 79:  # callpropvoid
622
+                    index = u30()
623
+                    mname = self.multinames[index]
624
+                    arg_count = u30()
625
+                    args = list(reversed(
626
+                        [stack.pop() for _ in range(arg_count)]))
627
+                    obj = stack.pop()
628
+                    if isinstance(obj, _AVMClass_Object):
629
+                        func = self.extract_function(obj.avm_class, mname)
630
+                        res = func(args)
631
+                        assert res is undefined
632
+                        continue
633
+                    if isinstance(obj, _ScopeDict):
634
+                        assert mname in obj.avm_class.method_names
635
+                        func = self.extract_function(obj.avm_class, mname)
636
+                        res = func(args)
637
+                        assert res is undefined
638
+                        continue
639
+                    if mname == 'reverse':
640
+                        assert isinstance(obj, list)
641
+                        obj.reverse()
642
+                    else:
643
+                        raise NotImplementedError(
644
+                            'Unsupported (void) property %r on %r'
645
+                            % (mname, obj))
646
+                elif opcode == 86:  # newarray
647
+                    arg_count = u30()
648
+                    arr = []
649
+                    for i in range(arg_count):
650
+                        arr.append(stack.pop())
651
+                    arr = arr[::-1]
652
+                    stack.append(arr)
653
+                elif opcode == 93:  # findpropstrict
654
+                    index = u30()
655
+                    mname = self.multinames[index]
656
+                    for s in reversed(scopes):
657
+                        if mname in s:
658
+                            res = s
659
+                            break
660
+                    else:
661
+                        res = scopes[0]
662
+                    if mname not in res and mname in _builtin_classes:
663
+                        stack.append(_builtin_classes[mname])
664
+                    else:
665
+                        stack.append(res[mname])
666
+                elif opcode == 94:  # findproperty
667
+                    index = u30()
668
+                    mname = self.multinames[index]
669
+                    for s in reversed(scopes):
670
+                        if mname in s:
671
+                            res = s
672
+                            break
673
+                    else:
674
+                        res = avm_class.variables
675
+                    stack.append(res)
676
+                elif opcode == 96:  # getlex
677
+                    index = u30()
678
+                    mname = self.multinames[index]
679
+                    for s in reversed(scopes):
680
+                        if mname in s:
681
+                            scope = s
682
+                            break
683
+                    else:
684
+                        scope = avm_class.variables
685
+
686
+                    if mname in scope:
687
+                        res = scope[mname]
688
+                    elif mname in _builtin_classes:
689
+                        res = _builtin_classes[mname]
690
+                    else:
691
+                        # Assume unitialized
692
+                        # TODO warn here
693
+                        res = undefined
694
+                    stack.append(res)
695
+                elif opcode == 97:  # setproperty
696
+                    index = u30()
697
+                    value = stack.pop()
698
+                    idx = self.multinames[index]
699
+                    if isinstance(idx, _Multiname):
700
+                        idx = stack.pop()
701
+                    obj = stack.pop()
702
+                    obj[idx] = value
703
+                elif opcode == 98:  # getlocal
704
+                    index = u30()
705
+                    stack.append(registers[index])
706
+                elif opcode == 99:  # setlocal
707
+                    index = u30()
708
+                    value = stack.pop()
709
+                    registers[index] = value
710
+                elif opcode == 102:  # getproperty
711
+                    index = u30()
712
+                    pname = self.multinames[index]
713
+                    if pname == 'length':
714
+                        obj = stack.pop()
715
+                        assert isinstance(obj, (unicode, list))
716
+                        stack.append(len(obj))
717
+                    elif isinstance(pname, unicode):  # Member access
718
+                        obj = stack.pop()
719
+                        if isinstance(obj, _AVMClass):
720
+                            res = obj.static_properties[pname]
721
+                            stack.append(res)
722
+                            continue
723
+
724
+                        assert isinstance(obj, (dict, _ScopeDict)),\
725
+                            'Accessing member %r on %r' % (pname, obj)
726
+                        res = obj.get(pname, undefined)
727
+                        stack.append(res)
728
+                    else:  # Assume attribute access
729
+                        idx = stack.pop()
730
+                        assert isinstance(idx, int)
731
+                        obj = stack.pop()
732
+                        assert isinstance(obj, list)
733
+                        stack.append(obj[idx])
734
+                elif opcode == 104:  # initproperty
735
+                    index = u30()
736
+                    value = stack.pop()
737
+                    idx = self.multinames[index]
738
+                    if isinstance(idx, _Multiname):
739
+                        idx = stack.pop()
740
+                    obj = stack.pop()
741
+                    obj[idx] = value
742
+                elif opcode == 115:  # convert_
743
+                    value = stack.pop()
744
+                    intvalue = int(value)
745
+                    stack.append(intvalue)
746
+                elif opcode == 128:  # coerce
747
+                    u30()
748
+                elif opcode == 130:  # coerce_a
749
+                    value = stack.pop()
750
+                    # um, yes, it's any value
751
+                    stack.append(value)
752
+                elif opcode == 133:  # coerce_s
753
+                    assert isinstance(stack[-1], (type(None), unicode))
754
+                elif opcode == 147:  # decrement
755
+                    value = stack.pop()
756
+                    assert isinstance(value, int)
757
+                    stack.append(value - 1)
758
+                elif opcode == 149:  # typeof
759
+                    value = stack.pop()
760
+                    return {
761
+                        _Undefined: 'undefined',
762
+                        unicode: 'String',
763
+                        int: 'Number',
764
+                        float: 'Number',
765
+                    }[type(value)]
766
+                elif opcode == 160:  # add
767
+                    value2 = stack.pop()
768
+                    value1 = stack.pop()
769
+                    res = value1 + value2
770
+                    stack.append(res)
771
+                elif opcode == 161:  # subtract
772
+                    value2 = stack.pop()
773
+                    value1 = stack.pop()
774
+                    res = value1 - value2
775
+                    stack.append(res)
776
+                elif opcode == 162:  # multiply
777
+                    value2 = stack.pop()
778
+                    value1 = stack.pop()
779
+                    res = value1 * value2
780
+                    stack.append(res)
781
+                elif opcode == 164:  # modulo
782
+                    value2 = stack.pop()
783
+                    value1 = stack.pop()
784
+                    res = value1 % value2
785
+                    stack.append(res)
786
+                elif opcode == 168:  # bitand
787
+                    value2 = stack.pop()
788
+                    value1 = stack.pop()
789
+                    assert isinstance(value1, int)
790
+                    assert isinstance(value2, int)
791
+                    res = value1 & value2
792
+                    stack.append(res)
793
+                elif opcode == 171:  # equals
794
+                    value2 = stack.pop()
795
+                    value1 = stack.pop()
796
+                    result = value1 == value2
797
+                    stack.append(result)
798
+                elif opcode == 175:  # greaterequals
799
+                    value2 = stack.pop()
800
+                    value1 = stack.pop()
801
+                    result = value1 >= value2
802
+                    stack.append(result)
803
+                elif opcode == 192:  # increment_i
804
+                    value = stack.pop()
805
+                    assert isinstance(value, int)
806
+                    stack.append(value + 1)
807
+                elif opcode == 208:  # getlocal_0
808
+                    stack.append(registers[0])
809
+                elif opcode == 209:  # getlocal_1
810
+                    stack.append(registers[1])
811
+                elif opcode == 210:  # getlocal_2
812
+                    stack.append(registers[2])
813
+                elif opcode == 211:  # getlocal_3
814
+                    stack.append(registers[3])
815
+                elif opcode == 212:  # setlocal_0
816
+                    registers[0] = stack.pop()
817
+                elif opcode == 213:  # setlocal_1
818
+                    registers[1] = stack.pop()
819
+                elif opcode == 214:  # setlocal_2
820
+                    registers[2] = stack.pop()
821
+                elif opcode == 215:  # setlocal_3
822
+                    registers[3] = stack.pop()
823
+                else:
824
+                    raise NotImplementedError(
825
+                        'Unsupported opcode %d' % opcode)
826
+
827
+        avm_class.method_pyfunctions[func_name] = resfunc
828
+        return resfunc

+ 345
- 0
sources/tvdom.py Прегледај датотеку

@@ -0,0 +1,345 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+try:
9
+    import json
10
+except:
11
+    import simplejson as json
12
+import urllib2, urllib
13
+import datetime, re, sys,os
14
+from collections import OrderedDict
15
+import ssl
16
+if "_create_unverified_context" in dir(ssl):
17
+    ssl._create_default_https_context = ssl._create_unverified_context
18
+
19
+from SourceBase import SourceBase
20
+import util
21
+
22
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
23
+headers0 = headers2dict("""
24
+User-Agent: Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.04
25
+Accept: application/json, text/javascript, */*; q=0.01
26
+Accept-Language: en-US,en;q=0.5
27
+Accept-Encoding: gzip, deflate, br
28
+Content-Type: application/x-www-form-urlencoded; charset=UTF-8
29
+X-Requested-With: XMLHttpRequest""")
30
+import HTMLParser
31
+h = HTMLParser.HTMLParser()
32
+
33
+class Source(SourceBase):
34
+
35
+    def __init__(self,country="lv",cfg_path=None):
36
+        self.name = "tvdom"
37
+        self.title = "TVDom.tv"
38
+        self.img = "https://tvdom.tv/front/assets/images/logo.png"
39
+        self.desc = "TVDom.tv portāla satura skatīšanās"
40
+        self.headers = headers0
41
+        self.url = "https://tvdom.tv/"
42
+
43
+        self.country=country
44
+        self.session = None
45
+        self.token = None
46
+
47
+        cur_directory = os.path.dirname(os.path.abspath(__file__))
48
+        if not cfg_path: cfg_path = cur_directory
49
+        self.config_file = os.path.join(cfg_path,self.name+".cfg")
50
+        self.options = OrderedDict([("user","lietotajs"),("password","parole"), ("region", "lv"), ("lang", "lv")])
51
+        self.options_read()
52
+
53
+    def login(self,user="",password=""):
54
+        self.options_read()
55
+        if not user: user=self.options["user"]
56
+        if not password: password = self.options["password"]
57
+        headers = headers2dict("""
58
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0
59
+Accept: */*
60
+Accept-Language: en-US,en;q=0.5
61
+Accept-Encoding: gzip, deflate, br
62
+Content-Type: application/x-www-form-urlencoded; charset=UTF-8
63
+X-Requested-With: XMLHttpRequest
64
+Referer: https://tvdom.tv/
65
+        """)
66
+        url = "https://tvdom.tv/infinity/on_register_user"
67
+        params = "email=%s&password=%s&remember=false&auth_type=login"%(user,password)
68
+        import requests
69
+        try:
70
+            from requests.packages.urllib3.exceptions import InsecureRequestWarning
71
+            requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
72
+        except:
73
+            pass
74
+        r = requests.post(url, data=params, headers=headers)
75
+        js = json.loads(r.content)
76
+        if 'success' in r.content:
77
+            self.token = js["access_token"]
78
+            if 'PHPSESSID' in r.cookies:
79
+                self.session = r.cookies["PHPSESSID"]
80
+            return True
81
+        else:
82
+            raise Exception(js["error"])
83
+
84
+
85
+    def get_content(self, data):
86
+        print "[tvdom] get_content:", data
87
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
88
+        lang = self.options["lang"]
89
+        region = self.options["region"]
90
+        content=[]
91
+        content.append(("..return", "back","","Return back"))
92
+
93
+        if clist=="home":
94
+            content.extend([
95
+                ("Live stream", "tvdom::tiesraides","","TV live streams"),
96
+                ("Archive - last", "tvdom::arhivs?filter=new&page=0&limit=30&region=%s&language=%s" % (region, lang),"","Video archive - last videos"),
97
+                ("Archive - popular", "tvdom::arhivs?filter=new&page=0&limit=30&region=%s&language=%s"% (region, lang),"","Video archive - popular video"),
98
+                ("Archive - categories", "tvdom::arhivs","","Video archive by categories"),
99
+                ("Search", "tvdom::search/?srch-term={0}","","Search archive"),
100
+
101
+                #("Archive - all", "tvdom::arhivs_all","","Video archive all"),
102
+            ])
103
+            return content
104
+
105
+        ### Tiesraides kanalu saraksts ###
106
+        elif data=="tiesraides":
107
+            data1 = "infinity/on_front_filter_channels_list.json"
108
+            params = "hd=&language=%s&region=%s" % (self.options["lang"], self.options["region"])
109
+            r = self.call_json(data1, params)
110
+            for genre in  r["data"]:
111
+                for item in r["data"][genre]:
112
+                    ch = item["channel_name"]
113
+                    title = u"%s - %s" % (item["channel_name"], item["program_title"])
114
+                    img = "https://tvdom.tv"+item["channel_logo"]
115
+                    data2 = item["program_url"][1:]
116
+                    desc = u"%s\n%s-%s"%(title,item["time_start"],item["time_stop"])
117
+                    content.append((title,self.name+"::"+data2,img,desc))
118
+            return content
119
+
120
+#         elif clist == "tiesraides":
121
+#             if not self.session:
122
+#                 self.login()
123
+#             url = "https://tvdom.tv/" + data
124
+#             headers = self.headers
125
+#             headers["Cookie"] = "PHPSESSID=%s; neverending_story=1;"%self.session
126
+#             r = self._http_request(url,headers=headers)
127
+#             m = re.search("var streamConnectionUrl = '([^']+)'", r, re.DOTALL)
128
+#             if m:
129
+#                 data2 = m.group(1)
130
+#             else:
131
+#                 return ("No stream found %s"%data,"","","No stream found")
132
+#             m = re.search('title: "([^"]+)"', r, re.DOTALL)
133
+#             title = m.group(1) if m else data2
134
+#             m = re.search('<div id="panel">([^<]+)<', r, re.DOTALL)
135
+#             desc = m.group(1) if m else title
136
+#             m = re.search('<div id="panel">([^<]+)<', r, re.DOTALL)
137
+#             desc = m.group(1) if m else title
138
+#             m = re.search('var promo_image *= "([^"]+)', r, re.DOTALL)
139
+#             img = m.group(1) if m else ""
140
+#             return (title,data2,img,desc)
141
+
142
+        ### Search ###
143
+        elif clist=="search":
144
+            url = "https://tvdom.tv/" + data
145
+            r = self._http_request(url)
146
+            result = re.findall(r'<div class="film-block_img">\s*<img src="([^"]+).+?<a href="/([^"]+)">([^<]+)</a>\s*</div>\s*<ul class="film-block_descr_tag">\s*<li><a href="">([^<]+)</a>', r, re.DOTALL)
147
+            for item in result:
148
+                title = item[2] + " ("+ item[3] + ")"
149
+                title =  h.unescape(title.decode("utf8")).encode("utf8")
150
+                img = "https://tvdom.tv" + item[0]
151
+                data2 = item[1]
152
+                desc = title
153
+                content.append((title,self.name+"::"+data2,img,desc))
154
+            return content
155
+
156
+        ### Arhīva kategorijas ###
157
+        elif data=="arhivs":
158
+            url = "https://tvdom.tv/"+data
159
+            headers = self.headers
160
+            headers["Cookie"] = "neverending_story=1; user_selected_language=%s"% (self.options["lang"])
161
+            r = self._http_request(url)
162
+            result = re.findall(r'data-text-mobile="(.+?)" data-id="(\d+)" data-filter-type="archive_genre">', r)
163
+            for item in result:
164
+                title = item[0]
165
+                img = ""
166
+                data2 = "arhivs?" + "genre1=%s&filter=new&region=%s&lang=%s&page=0&limit=30" % (item[1], region, lang)
167
+                desc = title
168
+                content.append((title,self.name+"::"+data2,img,desc))
169
+            return content
170
+
171
+        ### Arhīva kategorijas programmas ###
172
+        elif clist=="arhivs":
173
+            data1 = "infinity/on_front_archive_get_events2.json"
174
+            r = self.call_json(data1, params[1:])
175
+
176
+            for item in r["data"]:
177
+                title = item["title"] + "- " + item["online_time"]
178
+                img = "https://tvdom.tv"+item["image"]
179
+                data2 = item["url"][1:]
180
+                desc = u"%s\nlast: %s\npopularity: %s\n%s" % (title, item["online_time"], item["popularity"], item["channel_code"])
181
+                content.append((title,self.name+"::"+data2,img,desc))
182
+            if len(r["data"]) == 30:
183
+                page = int(qs["page"]) + 1
184
+                data2 = re.sub("page=\d+", "page=%s" % page, data)
185
+                content.append(("Next page",self.name+"::"+data2,"","Go to next page"))
186
+            return content
187
+
188
+        ### Arhīva programmas video saraksts ###
189
+        elif clist=="play_arhivs" and len(data.split("/"))==3 and not re.search("_\d+",plist[2]):
190
+            url = "https://tvdom.tv/"+data
191
+            r = self._http_request(url)
192
+            vid=re.search('materialIdentifier : "(\d+)', r, re.DOTALL).group(1)
193
+            pname = re.split("[_\.\-]", plist[-1])[0]
194
+            data2 = data+"_"+vid
195
+            m = re.search(r'var program_title\s*= "(.+?)"', r)
196
+            title = title0 = m.group(1) if m else data2
197
+            m = re.search('<p class="content-info__descr-item">(.+?)</p>', r, re.DOTALL)
198
+            datums = m.group(1).strip() if m else ""
199
+            title = title + " " + datums
200
+            m = re.search('<p class="video-player__descr-text" style="height: 100px;">(.+?)<span', r, re.DOTALL)
201
+            desc = m.group(1).strip() if m else title
202
+            m = re.search('var share_image\s*= "([^"]+)', r, re.DOTALL)
203
+            img = m.group(1) if m else ""
204
+            content.append((title,self.name+"::"+data2,img,desc))
205
+            i2 = r.find("content  content--indent-pb")
206
+            r2 = r if i2 == -1 else r[:i2]
207
+            result = re.findall(r'<img src="([^"]+)" class="content-item__image" alt=""><a href="/([^"]+)" class="content-item__hide-info">.+?<h3 class="content-item__name">\s*<a href="#">\s*([^<]*)\s*<br>\s*([^<]*)\s*</a>\s*</h3>', r2, re.DOTALL)
208
+            for item in result:
209
+                if pname not in item[1]:
210
+                    continue
211
+                title = item[2] + " " + item[3].strip()
212
+                title =  h.unescape(title.decode("utf8")).encode("utf8")
213
+                img = "https://tvdom.tv"+item[0]
214
+                data2 = item[1]
215
+                desc = title # TODO
216
+                content.append((title,self.name+"::"+data2,img,desc))
217
+            return content
218
+
219
+        ### Arhīva video
220
+#         elif clist=="play_arhivs" and len(data.split("/"))==3 and re.search("_\d+",plist[2]):
221
+#             url = "https://tvdom.tv/" + data
222
+#             headers = self.headers
223
+#             headers["Cookie"] = "PHPSESSID=%s; neverending_story=1;"%self.session
224
+#             r = self._http_request(url,headers=headers)
225
+#             m = re.search('var streamConnectionUrl  = "([^"]+)"', r, re.DOTALL)
226
+#             if m:
227
+#                 data2 = m.group(1)
228
+#             else:
229
+#                 return ("No stream found %s"%data,"","","No stream found")
230
+#             m = re.search('program_title        = "([^"]+)"', r, re.DOTALL)
231
+#             title = m.group(1) if m else data2
232
+#             m = re.search('<a class="episode">Pārraides laiks ēterā: <span>([^<]+)</span></a>', r, re.DOTALL)
233
+#             datums = m.group(1) if m else ""
234
+#             title = title + " " + datums
235
+#             m = re.search('<div id="panel">([^<]+)<', r, re.DOTALL)
236
+#             desc = m.group(1) if m else title
237
+#             m = re.search('<div id="panel">([^<]+)<', r, re.DOTALL)
238
+#             desc = m.group(1) if m else title
239
+#             m = re.search('var share_image *= "([^"]+)', r, re.DOTALL)
240
+#             img = m.group(1) if m else ""
241
+#             return (title,data2,img,desc)
242
+
243
+    def get_streams(self,data):
244
+        print "[tvdom] get_streams:", data
245
+        if not self.is_video(data):
246
+            return []
247
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
248
+        if not self.session:
249
+            self.login()
250
+        url = "https://tvdom.tv/" + data
251
+        headers = self.headers
252
+        headers["Cookie"] = "PHPSESSID=%s; neverending_story=1;"%self.session
253
+        r = self._http_request(url,headers=headers)
254
+
255
+        if clist == "play_arhivs":
256
+            m = re.search('program_title        = "([^"]+)"', r, re.DOTALL)
257
+            title = m.group(1) if m else data2
258
+            m = re.search('<a class="episode">Pārraides laiks ēterā: <span>([^<]+)</span></a>', r, re.DOTALL)
259
+            datums = m.group(1) if m else ""
260
+            title = title + " " + datums
261
+            m = re.search('<div id="panel">([^<]+)<', r, re.DOTALL)
262
+            desc = m.group(1) if m else title
263
+            m = re.search('<div id="panel">([^<]+)<', r, re.DOTALL)
264
+            desc = m.group(1) if m else title
265
+            m = re.search('var share_image *= "([^"]+)', r, re.DOTALL)
266
+            img = m.group(1) if m else ""
267
+
268
+            m = re.search(r"var streams\s*= ({[^;]+);", r, re.DOTALL)
269
+            if not m:
270
+                raise Exception("No stream found")
271
+            js = json.loads(m.group(1))
272
+            streams = []
273
+            for st in js:
274
+                if st == "qualities": continue
275
+                s = js[st]
276
+                data2 = s["url"]
277
+                stream = util.item()
278
+                stream["name"] = title
279
+                stream["url"] = data2
280
+                stream["img"] = img
281
+                stream["desc"] = desc
282
+                stream["resolver"] = "tvdom"
283
+                stream["quality"] = st
284
+                stream["order"] = int(st[:-1])
285
+                streams.append(stream)
286
+
287
+            streams = sorted(streams,key=lambda item: item["order"],reverse=True)
288
+            return streams
289
+
290
+        # Tiešraides video
291
+        else:
292
+            m = re.search("var streamConnectionUrl = '([^']+)'", r, re.DOTALL)
293
+            if m:
294
+                data2 = m.group(1)
295
+            else:
296
+                raise Exception("No stream found")
297
+            m = re.search('title: "([^"]+)"', r, re.DOTALL)
298
+            title = m.group(1) if m else data2
299
+            m = re.search('<div id="panel">([^<]+)<', r, re.DOTALL)
300
+            desc = m.group(1) if m else title
301
+            m = re.search('<div id="panel">([^<]+)<', r, re.DOTALL)
302
+            desc = m.group(1) if m else title
303
+            m = re.search('var promo_image *= "([^"]+)', r, re.DOTALL)
304
+            img = m.group(1) if m else ""
305
+            stream = util.item()
306
+            stream["name"] = title
307
+            stream["url"] = data2
308
+            stream["img"] = img
309
+            stream["desc"] = desc
310
+            stream["resolver"] = "tvdom"
311
+            return [stream]
312
+
313
+
314
+    def is_video(self,data):
315
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
316
+        cmd = data.split("/")
317
+        if cmd[0] in ("tiesraides") and len(cmd)>1:
318
+            return True
319
+        elif cmd[0]=="play_arhivs" and len(cmd)==3 and re.search("_\d+",plist[2]):
320
+            return True
321
+        else:
322
+            return False
323
+
324
+
325
+if __name__ == "__main__":
326
+    country= "lv"
327
+    c = Source(country)
328
+    if len(sys.argv)>1:
329
+        data= sys.argv[1]
330
+    else:
331
+        data = "home"
332
+    content = c.get_content(data)
333
+    for item in content:
334
+        print item
335
+    #cat = api.get_categories(country)
336
+    #chan = api.get_channels("lv")
337
+    #prog = api.get_programs(channel=6400)
338
+    #prog = api.get_programs(category=55)
339
+    #seas = api.get_seasons(program=6453)
340
+    #str = api.get_streams(660243)
341
+    #res = api.get_videos(802)
342
+    #formats = api.getAllFormats()
343
+    #det = api.detailed("1516")
344
+    #vid = api.getVideos("13170")
345
+    pass

+ 193
- 0
sources/ustvnow.py Прегледај датотеку

@@ -0,0 +1,193 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+try:
9
+    import json
10
+except:
11
+    import simplejson as json
12
+import urllib2, urllib
13
+import datetime, re, sys,os
14
+import traceback
15
+from collections import OrderedDict
16
+import ssl
17
+if "_create_unverified_context" in dir(ssl):
18
+    ssl._create_default_https_context = ssl._create_unverified_context
19
+
20
+from SourceBase import SourceBase
21
+
22
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
23
+headers0 = headers2dict("""
24
+Host: m-api.ustvnow.com
25
+User-Agent: Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46
26
+Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
27
+DNT: 1
28
+Connection: keep-alive
29
+""")
30
+import HTMLParser
31
+h = HTMLParser.HTMLParser()
32
+
33
+class Source(SourceBase):
34
+
35
+    def __init__(self,country="lv",cfg_path=None):
36
+        self.hidden = True
37
+        self.name = "ustvnow"
38
+        self.title = "USTVNow"
39
+        self.img = "http://watch.ustvnow.com/assets/ustvnow/img/ustvnow_og_image.png"
40
+        self.desc = "USTVNow kanālu tiešraide"
41
+        self.headers = headers0
42
+
43
+        self.country=country
44
+        self.token = ""
45
+        cur_directory = os.path.dirname(os.path.abspath(__file__))
46
+        if not cfg_path: cfg_path = cur_directory
47
+        self.config_file = os.path.join(cfg_path,self.name+".cfg")
48
+        self.options = OrderedDict([("user","lietotajs"),("password","parole")])
49
+        self.options_read()
50
+
51
+    def login(self,user="",password=""):
52
+        self.options_read()
53
+        if not user: user=self.options["user"]
54
+        if not password: password = self.options["password"]
55
+        headers = headers2dict("""
56
+        Host: m-api.ustvnow.com
57
+        Accept-Language: en-US,en;q=0.5
58
+        User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
59
+        Accept: text/html,application/xhtml+xml,application/xml
60
+        Connection: keep-alive
61
+        """)
62
+
63
+        url = "http://m-api.ustvnow.com/iphone/1/live/login?username=%s&password=%s&device=gtv&redir=0"%(user,password)
64
+        #url = "http://m-api.ustvnow.com/gtv/1/live/login?username=%s&password=%s&device=gtv&redir=0"%(user,password)
65
+        r = self._http_request(url,headers=headers)
66
+        if 'success' in r:
67
+            self.token = re.search('"token":"([^"]+)',r).group(1)
68
+            return True
69
+        else:
70
+            return False
71
+
72
+    def get_content(self, data):
73
+        print "[ustvnow] get_content:", data
74
+        if "::" in data:
75
+            data = data.split("::")[1]
76
+        path = data.split("?")[0]
77
+        clist = path.split("/")[0]
78
+        params = data[data.find("?"):] if "?" in data else ""
79
+        qs = dict(map(lambda x:x.split("="),re.findall("\w+=\w+",params)))
80
+        lang = qs["lang"] if "lang" in qs else self.country
81
+
82
+        content=[]
83
+        content.append(("..return", "back","","Return back"))
84
+
85
+        if clist=="home":
86
+            content.extend([
87
+                ("TV live streams", "ustvnow::tvlive","","TV live streams"),
88
+                ("Movies", "ustvnow::movies","","Movies (not implemented yet"),
89
+                ("Recordings", "ustvnow::recordings","","Recordings (not implemented yet"),
90
+            ])
91
+            return content
92
+
93
+        if clist=="movies":
94
+            return content
95
+
96
+        if clist=="recordings":
97
+            return content
98
+
99
+        ### Tiesraides kanalu saraksts ###
100
+        elif data=="tvlive":
101
+            if not self.token:
102
+                if not self.login():
103
+                    raise Exception("Can not login\nPlease check USTVNow username/password in\n/usr/lib/enigma2/python/Plugins/Extensions/sources/ustvnow.cfg file")
104
+            data = "live/channelguide?token=%s"%self.token
105
+            self.r = self.call(data)
106
+            if not self.r:
107
+                return content
108
+            for item in self.r["results"]:
109
+                if item["order"] == 1:
110
+                    title = item["stream_code"]
111
+                    title =  h.unescape(title.decode("utf8")).encode("utf8")
112
+                    img = "http://m-api.ustvnow.com/"+item["prg_img"] #item["img"]
113
+                    data2 = "live/view?scode=%s&token=%s"%(item["scode"],self.token)
114
+                    desc = "%s\n%s (+%s')\n%s"%(item["title"],item["event_time"],int(item["actualremainingtime"])/60,item["description"])
115
+                    content.append((title,self.name+"::"+data2,img,desc))
116
+            return content
117
+
118
+        ### Tiesraides kanāls ###
119
+        elif path == "live/view":
120
+            url = "http://m-api.ustvnow.com/stream/1/%s"%data
121
+            #url = "http://m.ustvnow.com/stream/1/live/view?scode=whphd&token=o7oxits4dcjd8hbxusf9d9cgcyad&br_n=Chrome&br_v=60&br_d=desktop"
122
+            url =  "http://m.ustvnow.com/stream/1/live/%s"%data
123
+            headers = headers2dict("""
124
+Accept: application/json, text/javascript, */*; q=0.01
125
+User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36
126
+Referer: http://watch.ustvnow.com/guide
127
+                """)
128
+            r = self._http_request(url, headers=headers )
129
+            if not r:
130
+                return ("No stream found %s"%data,"","","No stream found")
131
+            r = json.loads(r)
132
+            if not "r" in dir(self):
133
+                if not self.token:
134
+                    self.login()
135
+                self.r = self.call("live/channelguide?token=%s"%self.token)
136
+            if self.r:
137
+                ch = qs["scode"]
138
+                for item in self.r["results"]:
139
+                    if item["order"] == 1 and item["scode"] == ch:
140
+                        title = item["stream_code"]
141
+                        title = "%s - %s (%s)"%(item["stream_code"],item["title"],item["event_time"])
142
+                        img = "http://m-api.ustvnow.com/"+item["prg_img"]
143
+                        data2 = "live/view?scode=%s&token=%s"%(item["scode"],self.token)
144
+                        desc = "%s\n%s (+%s')\n%s"%(item["title"],item["event_time"],int(item["actualremainingtime"])/60,item["description"])
145
+            else:
146
+                title = data
147
+            data2 = r["stream"]
148
+            desc = title
149
+            img = "" # img TODO
150
+            return (title,data2,img,desc)
151
+
152
+    def is_video(self,data):
153
+        if "::" in data:
154
+            data = data.split("::")[1]
155
+        if "live/view" in data:
156
+            return True
157
+        else:
158
+            return False
159
+
160
+    def call(self, data,headers=headers0,lang=""):
161
+        if not lang: lang = self.country
162
+        url = "http://m-api.ustvnow.com/gtv/1/"+data
163
+        content = self._http_request(url)
164
+        result = None
165
+        if content:
166
+            try:
167
+                result = json.loads(content)
168
+            except Exception, ex:
169
+                return None
170
+        return result
171
+
172
+
173
+if __name__ == "__main__":
174
+    country= "lv"
175
+    c = Source(country)
176
+    if len(sys.argv)>1:
177
+        data= sys.argv[1]
178
+    else:
179
+        data = "home"
180
+    content = c.get_content(data)
181
+    for item in content:
182
+        print item
183
+    #cat = api.get_categories(country)
184
+    #chan = api.get_channels("lv")
185
+    #prog = api.get_programs(channel=6400)
186
+    #prog = api.get_programs(category=55)
187
+    #seas = api.get_seasons(program=6453)
188
+    #str = api.get_streams(660243)
189
+    #res = api.get_videos(802)
190
+    #formats = api.getAllFormats()
191
+    #det = api.detailed("1516")
192
+    #vid = api.getVideos("13170")
193
+    pass

+ 496
- 0
sources/viaplay.py Прегледај датотеку

@@ -0,0 +1,496 @@
1
+#!/usr/bin/env python
2
+# coding=utf8
3
+#
4
+# This file is part of PlayStream - enigma2 plugin to play video streams from various sources
5
+# Copyright (c) 2016 ivars777 (ivars777@gmail.com)
6
+# Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
7
+#
8
+try:
9
+    import json
10
+except:
11
+    import simplejson as json
12
+
13
+import requests
14
+try:
15
+    from requests.packages.urllib3.exceptions import InsecureRequestWarning
16
+    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
17
+except:
18
+    pass
19
+import urlparse, urllib
20
+import datetime, time,re, sys,os
21
+from collections import OrderedDict
22
+import ssl
23
+if "_create_unverified_context" in dir(ssl):
24
+    ssl._create_default_https_context = ssl._create_unverified_context
25
+
26
+from SourceBase import SourceBase
27
+try:
28
+    import util
29
+except:
30
+    parent = os.path.dirname(os.path.abspath(__file__))
31
+    parent = os.sep.join(parent.split(os.sep)[:-1])
32
+    sys.path.insert(0,parent)
33
+    import util
34
+
35
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
36
+
37
+class Source(SourceBase):
38
+
39
+    def __init__(self,language="en",cfg_path=None):
40
+        self.hidden = True # nerāda menu nestrādājošos avotus
41
+        self.name = "viaplay"
42
+        self.title = "viaplay.lv"
43
+        self.img = "https://yt3.ggpht.com/-noVdjbNR-V8/AAAAAAAAAAI/AAAAAAAAAAA/yZ9XNP5urLY/s900-c-k-no-mo-rj-c0xffffff/photo.jpg"
44
+        self.desc = "Viaplay.lv saturs"
45
+        self.url = "https://viaplay.lv/"
46
+        self.headers = headers2dict("""
47
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36
48
+Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
49
+Connection: keep-alive
50
+Upgrade-Insecure-Requests: 1
51
+        """)
52
+        #self.language=language
53
+        cur_directory = os.path.dirname(os.path.abspath(__file__))
54
+        if not cfg_path: cfg_path = cur_directory
55
+        self.config_file = os.path.join(cfg_path,self.name+".cfg")
56
+        self.options = OrderedDict([("user","change_user"),("password","change_password"),("device","")])
57
+        self.options_read()
58
+        self.device = self.options["device"]
59
+        self.r = None # requests
60
+        self.play_session = None
61
+        self.s = None
62
+
63
+    def login(self,user="",password=""):
64
+        self.options_read()
65
+        if not user: user=self.options["user"]
66
+        if not password: password = self.options["password"]
67
+        self.s = requests.Session()
68
+
69
+        ### Dabu sesijas ID ===
70
+        headers = headers2dict("""
71
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36
72
+Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
73
+Accept-Language: en-US,en;q=0.5
74
+Accept-Encoding: gzip, deflate, br
75
+Referer: https://viaplay.lv/
76
+Cookie: ott_cookies_confirmed=1;
77
+DNT: 1
78
+Connection: keep-alive
79
+Upgrade-Insecure-Requests: 1
80
+""")
81
+        r = requests.get(self.url,headers=headers)
82
+        if not "PLAY_SESSION" in r.cookies:
83
+            return False
84
+        self.play_session = r.cookies["PLAY_SESSION"]
85
+        self.csrfToken = re.search("csrfToken=(.+)",self.play_session).group(1)
86
+
87
+        ### Ielogojamies ###
88
+        headers = headers2dict("""
89
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36
90
+Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
91
+Accept-Language: en-US,en;q=0.5
92
+Accept-Encoding: gzip, deflate, br
93
+Referer: https://viaplay.lv/
94
+Cookie: ott_cookies_confirmed=1; PLAY_SESSION=e618c42b377a65021298ff63309d5a907988ed1b-PSESSIONID=b010ea1b-fc5e-4a18-aa15-ebbe8b57b3f0&csrfToken=b4eb35263d9be16ef9f7b2f5d10a8ee99dfe75a8-1478051634814-63682b20f1e7e5579de6d056
95
+DNT: 1
96
+Connection: keep-alive
97
+Upgrade-Insecure-Requests: 1
98
+Content-Type: application/x-www-form-urlencoded
99
+""")
100
+        url = "https://viaplay.lv/tdi/login/nav/form?csrfToken=%s"%self.csrfToken
101
+        #      https://viaplay.lv/tdi/login/nav/form?_infuse=1&_ts=1490554674901&csrfToken=
102
+        params = "nav_redirectUri=https%3A%2F%2Fviaplay.lv%2F&nav_email={}&nav_password={}".format(urllib.quote(user),urllib.quote(password))
103
+        #         nav_redirectUri=https%3A%2F%2Fviaplay.lv%2F&nav_email=ivars777%40gmail.com&nav_password=kaskade7&nav_remember=true
104
+        headers["Cookie"] = "ott_cookies_confirmed=1; PLAY_SESSION=%s;"%self.play_session
105
+        if self.device:
106
+            headers["Cookie"] += "ott_dids=%s"%self.device
107
+        #cookie = dict(PLAY_SESSION=self.play_session,_hjIncludedInSample=1, mobileAppPromo="shown")
108
+        r = requests.post(url,params,headers=headers,allow_redirects=False)
109
+        if not "Set-Cookie" in r.headers:
110
+            self.play_session = None
111
+            return False
112
+        if not "ott_web_sac" in r.cookies:
113
+            self.play_session = None
114
+            return False
115
+        self.ott = r.cookies["ott_web_sac"]
116
+
117
+        ### Dabu iekārtas ID ###
118
+        if not self.device:
119
+            headers = headers2dict("""
120
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36
121
+Accept: application/xml, text/xml, */*; q=0.01
122
+Accept-Language: en-US,en;q=0.5
123
+Accept-Encoding: gzip, deflate, br
124
+Content-Type: application/x-www-form-urlencoded; charset=UTF-8
125
+X-Requested-With: XMLHttpRequest
126
+Referer: https://viaplay.lv/movies/me-and-earl-and-the-dying-girl
127
+DNT: 1
128
+Connection: keep-alive    """)
129
+            url = "https://viaplay.lv/tdi/account/device/create?_infuse=1&csrfToken=%s"%self.csrfToken
130
+            params = "successRedirectUri=https%3A%2F%2Fviaplay.lv%2Fmovies%2F&slotId=&title=Enigma2"
131
+            headers["Cookie"] = "PLAY_SESSION=%s; ott_cookies_confirmed=1; ott_web_sac=%s;"%(self.play_session,self.ott)
132
+            #cookie = dict(PLAY_SESSION=self.play_session,_hjIncludedInSample=1, mobileAppPromo="shown")
133
+            r = requests.post(url,params,headers=headers,allow_redirects=False)
134
+            if not ("Set-Cookie" in r.headers and "ott_dids" in r.headers["Set-Cookie"]):
135
+                self.play_session = None
136
+                return False
137
+            self.device =  r.cookies["ott_dids"]
138
+            self.options["device"] = self.device
139
+            self.options_write(self.options)
140
+        return True
141
+
142
+    def logout(self):
143
+        return True
144
+
145
+    def is_logedin(self):
146
+        if self.play_session:
147
+            return True
148
+        else:
149
+            return False
150
+
151
+    def get_video_info(self,vid):
152
+        import demjson
153
+        ### Dabu strimus ###
154
+        headers = headers2dict("""
155
+Host: viaplay.lv
156
+User-Agent: Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46
157
+Accept: application/xml, text/xml, */*; q=0.01
158
+Accept-Language: en-US,en;q=0.5
159
+Accept-Encoding: gzip, deflate, br
160
+X-Requested-With: XMLHttpRequest
161
+DNT: 1
162
+Connection: keep-alive
163
+Referer: https://viaplay.lv/
164
+""")
165
+        url = "https://viaplay.lv/prehravac/init?_infuse=1&productId=%s"%vid #t110623
166
+        headers["Cookie"] = "ott_cookies_confirmed=1; ott_dids=%s; PLAY_SESSION=%s"%(self.device,self.play_session)
167
+        r = requests.get(url,headers=headers,allow_redirects=False)
168
+        statuss = re.search("<status>(.+?)</status>", r.content).group(1)
169
+        if statuss.lower() <> "ok":
170
+            raise Exception(statuss)
171
+        #print r.content
172
+        m = re.search(r"<!\[CDATA\[\s+var TDIPlayerOptions = (.+?);[\n\t\s]+\]\]>\s+</script>", r.content, re.DOTALL)
173
+        if not m:
174
+            raise "Can not find stream info"
175
+        txt = m.group(1)
176
+        txt = re.sub("// .+$", "", txt, flags=re.MULTILINE)
177
+        #print txt
178
+        #for m in re.finditer("// .+$", txt, re.MULTILINE):
179
+        #    txt = txt[:m.start()] + txt[m.end():]
180
+        #print txt
181
+        js = demjson.decode(txt)
182
+        return js
183
+        #return txt
184
+
185
+
186
+    def get_content(self, data):
187
+        print "[%s] get_content:"%self.name, data
188
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
189
+        content=[]
190
+        content.append(("..return", "back","","Return back"))
191
+
192
+        if clist=="home":
193
+            content.extend([
194
+                ("Search", "viaplay::search-results-all?query={0}",self.img,"Meklēt"),
195
+                ("Filmas", "viaplay::movies",self.img,"Filmas"),
196
+                ("Seriāli", "viaplay::series",self.img,"Seriāli"),
197
+                ("Bērniem", "viaplay::kids",self.img,"Bērniem"),
198
+                ("Dokumentalās filmas", "viaplay::documentary",self.img,"Dokumentalās filmas"),
199
+                ("Sports", "viaplay::live",self.img,"Sports"),
200
+             ])
201
+            return content
202
+
203
+        ### Meklēt ###
204
+        elif clist=="search-results-all":
205
+            url = "https://viaplay.lv/"+data
206
+            r = self._http_request(url)
207
+            result = re.findall(r'<div id="product-(\w+)".+?<a href="([^"]+)">.+?<img data-srcset="([^ ]+).+?alt="([^"]+)">.+?<h3 class="is-size-h6">([^<]+)</h3>.*?<p>([^<]+).+?</p>.+?<p class="promo-notice">([^<]+)</p>.+?<p class="info">([^<]+)</p>', r, re.DOTALL)
208
+            for item in result:
209
+                vid = item[0]
210
+                data2 = item[1].replace("https://viaplay.lv/","")
211
+                img = item[2]
212
+                ep = item[3]
213
+                title= item[4]
214
+                seas = item[5].replace("\n","").replace("\t","")
215
+                desc = item[6]
216
+                desc2 = item[7]
217
+                if ep==title:
218
+                    title = "%s (%s)"%(title,seas)
219
+                else:
220
+                    title = "%s - %s%s"%(title,seas,ep)
221
+                desc = "%s\n%s\n%s"%(title,desc2,desc)
222
+                content.append((title,self.name+"::"+data2,img,desc))
223
+            return content
224
+
225
+        ### Sadalas ##
226
+        elif data in ["movies","series","kids","documentary"]:
227
+            r = self._http_request(self.url+data)
228
+            # https://viaplay.lv/tdi/movies/next?sections[]=MOVIES&genres[]=a3591&sort[]=latest&offset=0
229
+            # https://viaplay.lv/tdi/series/next?sections[]=SERIES&sort[]=latest&offset=0
230
+            # https://viaplay.lv/tdi/kids/next?sections[]=KIDS&cat[]=SERIES&cat[]=MOVIE&sort[]=latest&offset=18
231
+            # https://viaplay.lv/kids?sections[]=KIDS&cat[]=SERIES&sort[]=latest
232
+            sections =  {"movies":"MOVIES","series":"SERIES","kids":"KIDS","documentary":"DOCUMENTS"}
233
+            nosaukums = {"movies":"Flmas","series":"Seriāli","kids":"Bērnu","documentary":"Dokumentalās"}
234
+            #availability = {"new":"jaunākās","last":"pēdējā iespēja"}
235
+            sort = OrderedDict([("latest","jaunākais"),("title","pēc nosaukuma"),("popular","pēc popularitātes"),("year","pēc gada")])
236
+            for s in sort:
237
+                if data in ("movies","series"):
238
+                    title = "%s - %s"%(nosaukums[data],sort[s])
239
+                    data2 = "%s/next?sections[]=%s&sort[]=%s"%(data,sections[data],s)
240
+                    content.append((title,self.name+"::"+data2,self.img,title))
241
+                else:
242
+                    title = "%s filmas - %s"%(nosaukums[data],sort[s])
243
+                    data2 = "%s/next?sections[]=%s&cat[]=MOVIE&sort[]=%s"%(data,sections[data],s)
244
+                    content.append((title,self.name+"::"+data2,self.img,title))
245
+                    title = "%s seriāli - %s"%(nosaukums[data],sort[s])
246
+                    data2 = "%s/next?sections[]=%s&cat[]=SERIES&sort[]=%s"%(data,sections[data],s)
247
+                    content.append((title,self.name+"::"+data2,self.img,title))
248
+
249
+            # Pievienojam žanru sarakstu
250
+            result = re.findall(r'name="genres\[\]" value="([^"]+)">.+?class="">([^<]+)</label>', r, re.DOTALL)
251
+            for item in result:
252
+                s = "latest"
253
+                genre = item[1].replace("&amp;","&")
254
+                title = "%s: %s"%(nosaukums[data],genre)
255
+                data2 = "%s/next?sections[]=%s&genres[]=%s&sort[]=%s"%(data,sections[data],item[0],s)
256
+                content.append((title,self.name+"::"+data2,self.img,title))
257
+
258
+            return content
259
+
260
+        ### Filmu/seriālu/sēriju saraksts ###
261
+        elif clist in ("movies","series","kids","documentary") and plist[1] == "next":
262
+            url = "https://viaplay.lv/tdi/"+data
263
+            r = self._http_request(url)
264
+            if clist == "series" and "season" in qs:
265
+                result = re.findall(r'<div id="product-(\w+)".+?<a href="([^"]+)">.+?<img data-srcset="([^ ]+).+?alt="([^"]+)">.+?<h3 class="is-size-h6">([^<]+)</h3>.*?<p>([^<]+).+?</p>.+?<p class="promo-notice">([^<]+)</p>.+?<p class="info">([^<]+)</p>', r, re.DOTALL)
266
+                for item in result:
267
+                    vid = item[0]
268
+                    data2 = item[1].replace("https://viaplay.lv/","")
269
+                    img = item[2]
270
+                    ep = item[3]
271
+                    title= item[4]
272
+                    seas = item[5]
273
+                    desc = item[6]
274
+                    desc2 = item[7]
275
+                    title = "%s - %s%s"%(title,seas,ep)
276
+                    desc = "%s\n%s\n%s"%(title,desc2,desc)
277
+                    content.append((title,self.name+"::"+data2,img,desc))
278
+            else: # filmas
279
+                result = re.findall(r'<div id="product-(\w+)".+?<a href="([^"]+)">.+?<img data-srcset="([^ ]+).+?alt="([^"]+)">.+?<p>([^<]+)</p>.+?<p class="promo-notice">([^<]+).+?<p class="is-strong detail">(.+?)</p>.+?<p class="info">([^<]+)</p>', r, re.DOTALL)
280
+                for item in result:
281
+                    vid = item[0]
282
+                    data2 = item[1].replace("https://viaplay.lv/","")
283
+                    img = item[2]
284
+                    title = item[3]
285
+                    year = item[4]
286
+                    year  = year.replace("\n","").replace("\t","")
287
+                    title = title +"(%s)"%year
288
+                    desc= item[5]
289
+                    genre = re.findall(">([^<]+)<", item[6], re.DOTALL)
290
+                    genre = ("".join(genre)).replace("&amp;","&")
291
+                    desc2 = item[7]
292
+                    desc = "%s\n%s\n%s"%(genre,desc2,desc)
293
+                    content.append((title,self.name+"::"+data2, img,desc))
294
+            m = re.search(r"data\('href', 'https://viaplay\.lv/tdi/([^']+)'\)", r, re.DOTALL)
295
+            if m:
296
+                data2 = m.group(1)
297
+                content.append(("Next page",self.name+"::"+data2,img,"Next page"))
298
+            return content
299
+
300
+        ### Seriāls ###
301
+        elif clist == "series" and len(plist)==2:
302
+            url = "https://viaplay.lv/"+data
303
+            r = self._http_request(url)
304
+            result = re.findall(r'<li>.*?<a class="tdi" href="https://viaplay\.lv/([^"]+)" data-related-ancestor="\.js-tdi-items-filter-and-items">([^<]+)</a>.*?</li>', r, re.DOTALL)
305
+            for item in result:
306
+                title = item[1]
307
+                data2 = item[0]
308
+                data2 = data2.replace("series/","series/next/")
309
+                data2 = data2+"&sort[]=ord"
310
+                #series/littlest-pet-shop?season=t6821
311
+                #series/next/peppa-pig?season=t8430
312
+                # &sort[]=ord
313
+                if "availability=" in data2: continue
314
+                content.append((title,self.name+"::"+data2,self.img,title)) #TODO bilde
315
+            return content
316
+
317
+    def is_video(self,data):
318
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
319
+        if clist in ("movies","documentary","kids") and len(plist)>1 and plist[1]<>"next":
320
+            return True
321
+        elif clist == "series"  and len(plist)>1 and plist[1] == "episode":
322
+            return True
323
+        else:
324
+            return False
325
+
326
+    def get_streams(self, data):
327
+        print "[viaplay] get_streams:", data
328
+        if not self.is_video(data):
329
+            return []
330
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
331
+        if not self.is_logedin():
332
+            self.login()
333
+        if not self.is_logedin():
334
+            raise Exception("Could not login to viaplay.lv, check username/password in options")
335
+
336
+        streams = []
337
+        url = "https://viaplay.lv/"+data
338
+        r = self._http_request(url)
339
+        if clist in ("series","livestream"): # TODO nerāda overtime u.c.
340
+            m = re.search(r'<h1 class="is-bottom-sticked is-size-h2">(.+?)</h1>.*?<h2 class="is-size-h4">.*?<p class="is-size-h6 is-strong is-bottom-sticked">(.+?)<div class="toggler-content">\s+<p>(.+?)</p>', r, re.DOTALL)
341
+            if not m:
342
+                raise Exception("Problem getting video information")
343
+            title = m.group(1).replace("\n"," ").replace("\t","").strip()
344
+            title = re.sub("<[^>]+>","",title).strip()
345
+            desc2 = m.group(2).replace("\n"," ").replace("\t","").strip()
346
+            desc2 = re.sub("<[^>]+>","",desc2).strip()
347
+            desc = m.group(3)
348
+            desc = "%s\n%s"%(desc2,desc)
349
+            vid = re.search('data-productid="(\w+)"',r).group(1)
350
+        else:
351
+            m = re.search(r'<h1 class="is-strong is-bottom-sticked is-size-h2" jnp-id="(\w+)">([^<]+)</h1>.*?<h2 class="is-strong is-size-h4">([^<]+)</h2>.*?<p class="is-size-h6 is-strong is-bottom-sticked">(.+?)<div class="toggler-content">\s+<p>(.+?)</p>', r, re.DOTALL)
352
+            if not m:
353
+                raise Exception("Problem getting video information")
354
+            title = m.group(2).strip()
355
+            title2 = m.group(3).strip()
356
+            title = "%s | %s"%(title,title2)
357
+            desc = m.group(5).strip()
358
+            desc2 = m.group(4).strip()
359
+            desc2 = re.sub("<[^>]+>","",desc2)
360
+            desc2 = desc2.replace("\n"," ").replace("\t","")
361
+            desc = "%s\n%s"%(desc2,desc)
362
+            vid = m.group(1)
363
+
364
+        js = self.get_video_info(vid)
365
+        #for m in re.finditer(r"lang: '(?P<lang>\w+)',\s+src: '(?P<url>[^']+)',\s+type: '(?P<mime>[^']+)',\s+drm: \[(?P<drm>.+?)\]\s*\}", r, re.DOTALL):
366
+        if not js:
367
+            return []
368
+        tracks = js["tracks"]
369
+        #if not tracks["HLS"]:
370
+        #    raise Exception("Encrypted DASH playing not yet implemented")
371
+
372
+        captions = []
373
+        llist = ["fr","en","ru","lv"]
374
+        for st in js["plugins"]["settings"]["subtitles"]:
375
+            sub = {}
376
+            sub["url"] = st["src"]
377
+            sub["lang"] = st["srclang"]
378
+            sub["name"] = st["label"]
379
+            sub["type"] = "vtt"
380
+            sub["order"] = llist.index(sub["lang"])*10 if sub["lang"] in llist else 0
381
+            captions.append(sub)
382
+        captions = sorted(captions,key=lambda item: item["order"],reverse=True)
383
+
384
+        for s in tracks["HLS"] if tracks["HLS"]  else tracks["DASH"] :
385
+            stype = "DASH" if "dash" in s["type"] else "HLS"
386
+            if "drm" in s: ###
387
+                # TODO, encrypted stream
388
+                raise Exception("Can not play DRM protected stream!\nOnly local and Russian content available without DRM")
389
+                continue
390
+            url = s["src"]
391
+            #urlp = util.streamproxy_encode(s["src"])
392
+            stream = util.item()
393
+            stream["url"]=url
394
+            stream["resolver"] = "viaplay"
395
+            stream["lang"]=s["lang"]
396
+            stream["quality"]="variant"
397
+            stream["bitrate"]= "1000000"
398
+            stream["name"]= title
399
+            stream["desc"]=desc
400
+            stream["type"]=stype
401
+            stream["subs"] = captions
402
+            print url
403
+            if stype=="DASH": streams.append(stream)
404
+
405
+            if stype == "HLS": # izvelkam individuālos strimus
406
+                r = requests.get(url)
407
+                result = re.findall("#EXT-X-STREAM-INF:BANDWIDTH=(\d+),RESOLUTION=(\d+x\d+)\n(\w+.m3u8)", r.content)
408
+                if not result:
409
+                    continue
410
+                for s2 in result:
411
+                    ### TODO vajag lietot cookie ar tokenu no playlista requesta
412
+                    if "set-cookie" in r.headers:
413
+                        headers = {"Cookie":r.headers["set-cookie"]}
414
+                    else:
415
+                        headers={}
416
+                    #url2 = re.sub(r"(http.*://.+/)\w+.m3u8", r"\1"+s2[2], url)
417
+                    url2 = "/".join(url.split("/")[:-1])+"/"+s2[2]
418
+                    #r2 = requests.get(url2,headers=headers)
419
+                    #if "set-cookie" in r2.headers:
420
+                        #headers = {"Cookie":r2.headers["set-cookie"]}
421
+                    #else:
422
+                        #headers={}
423
+                    #url2p=util.streamproxy_encode(url2,headers)
424
+                    stream = util.item()
425
+                    stream["url"]=url2
426
+                    stream["lang"]=s["lang"]
427
+                    stream["quality"]="%s"%(s2[1])
428
+                    stream["name"]= title
429
+                    stream["desc"]=desc
430
+                    stream["bitrate"]=s2[0]
431
+                    stream["type"]="DASH" if "dash" in s["type"] else "HLS"
432
+                    streams.append(stream)
433
+
434
+        ### TODO - sakārtot sarakstu, lai pirmais ir labakais video
435
+        qlist = ["","512","640","758","1024","variant"]
436
+        llist = ["lt","et","fr","en","ru","lv"]
437
+        for s in streams:
438
+            lv = llist.index(s["lang"])*10000000 if s["lang"] in llist else 0
439
+            #qv=qlist.index(s["quality"]) if s["quality"] in qlist else 0
440
+            qv = int(s["bitrate"]) if s["bitrate"] else 0
441
+            s["order"] = lv+qv
442
+            #print s["lang"],s["quality"],s["bitrate"],s["order"]
443
+
444
+        streams = sorted(streams,key=lambda item: item["order"],reverse=True)
445
+        return streams
446
+
447
+    def call(self, data,params = None, headers=None):
448
+        if not headers: headers = self.headers
449
+        #if not lang: lang = self.country
450
+        url = "https://viaplay.lv/tdi/" + data
451
+        content = self._http_request(url, params, headers)
452
+        return content
453
+
454
+if __name__ == "__main__":
455
+    if len(sys.argv)>1:
456
+        data= sys.argv[1]
457
+    else:
458
+        data = "kids/child-and-karlson"
459
+    c = Source()
460
+    print "login: %s"%c.login()
461
+    if "/" in data:
462
+        streams = c.get_streams(data)
463
+        util.play_video(streams)
464
+    else:
465
+        vinfo = c.get_video_info(data)
466
+        if "HLS" in vinfo["tracks"] and vinfo["tracks"]["HLS"]:
467
+            url = vinfo["tracks"]["HLS"][0]["src"]
468
+            urlp = util.streamproxy_encode(url)
469
+            util.player(urlp)
470
+        else:
471
+            print "No HLS stream"
472
+    sys.exit()
473
+
474
+    r = requests.get("https://viaplay.lv/movies?sections[]=MOVIES")
475
+    result = re.findall(r'<div id="product-(\w+)".+?<a href="([^"]+)">.+?<img data-srcset="([^ ]+).+?alt="([^"]+)">.+?<p class="promo-notice">([^<]+)<', r.content, re.DOTALL)
476
+    for item in result:
477
+        vid = item[0]
478
+        url = item[1]
479
+        img = item[2]
480
+        title = item[3]
481
+        desc= item[4]
482
+        print "\n%s (%s):"%(title,vid)
483
+        vinfo = c.get_video_info(vid)
484
+        if "HLS" in vinfo["tracks"]:
485
+            for s in vinfo["tracks"]["HLS"]:
486
+                print "HLS %s: \n%s"%(s["lang"],s["src"])
487
+
488
+        if "DASH" in vinfo["tracks"]:
489
+            for s in vinfo["tracks"]["DASH"]:
490
+                print "DASH %s: \n%s"%(s["lang"],s["src"])
491
+        #except Exception,ex:
492
+            #print ex.message
493
+    #content = c.get_content(data)
494
+    #for item in content:
495
+    #    print item
496
+    pass

+ 741
- 0
util.py Прегледај датотеку

@@ -0,0 +1,741 @@
1
+# -*- coding: UTF-8 -*-
2
+# /*
3
+# *      Copyright (C) 2011 Libor Zoubek,ivars777
4
+# *
5
+# *
6
+# *  This Program is free software; you can redistribute it and/or modify
7
+# *  it under the terms of the GNU General Public License as published by
8
+# *  the Free Software Foundation; either version 2, or (at your option)
9
+# *  any later version.
10
+# *
11
+# *  This Program is distributed in the hope that it will be useful,
12
+# *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+# *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
+# *  GNU General Public License for more details.
15
+# *
16
+# *  You should have received a copy of the GNU General Public License
17
+# *  along with this program; see the file COPYING.  If not, write to
18
+# *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19
+# *  http://www.gnu.org/copyleft/gpl.html
20
+# *
21
+# */
22
+import os, sys, re
23
+import urllib, urllib2
24
+import datetime
25
+import traceback
26
+import cookielib
27
+import requests
28
+try:
29
+    from requests.packages.urllib3.exceptions import InsecureRequestWarning
30
+    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
31
+except:
32
+    pass
33
+from htmlentitydefs import name2codepoint as n2cp
34
+import HTMLParser
35
+import StringIO
36
+
37
+#import threading
38
+#import Queue
39
+import pickle
40
+import string
41
+import simplejson as json
42
+#from demjson import demjson
43
+#import demjson
44
+import json
45
+#from bs4 import BeautifulSoup
46
+
47
+UA = 'Mozilla/6.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.5) Gecko/2008092417 Firefox/3.0.3'
48
+LOG = 2
49
+
50
+_cookie_jar = None
51
+CACHE_COOKIES = 'cookies'
52
+
53
+def system():
54
+    if "kodi" in sys.executable.lower():
55
+        return "kodi"
56
+    elif sys.platform == "win32":
57
+        return "windows"
58
+    elif sys.platform == "linux2":
59
+        return "enigma2"
60
+    else:
61
+        return "unknown"
62
+
63
+def nfo2xml(nfo_dict):
64
+    nfo_type,nfo = next(nfo_dict.iteritems())
65
+    s= "<%s>\n"%nfo_type.encode("utf8")
66
+    for k,v in nfo.iteritems():
67
+        if isinstance(v,list):
68
+            for v2 in v:
69
+                if isinstance(v2,unicode): v2 = v2.encode("utf8")
70
+                s += "    <%s>%s</%s>\n"%(k.encode("utf8"), v2, k.encode("utf8"))
71
+        else:
72
+            if isinstance(v,unicode): v = v.encode("utf8")
73
+            s += "    <%s>%s</%s>\n"%(k.encode("utf8"), v, k.encode("utf8"))
74
+    s += "</%s>\n"%nfo_type.encode("utf8")
75
+    return s
76
+
77
+def nfo2desc(nfo):
78
+
79
+    if not "title" in nfo:
80
+        nfo_type, nfo = next(nfo.iteritems())
81
+    desc = nfo2title(nfo)
82
+    dd = lambda t: "\n" + nfo[t] if t in nfo and nfo[t] else ""
83
+    dd2 = lambda t: "\n" + ",".join(nfo[t]) if t in nfo and nfo[t] else ""
84
+
85
+    def ddd(t,title=""):
86
+        if title:
87
+            title = title + ": "
88
+        if t in nfo and nfo[t]:
89
+            if isinstance(nfo[t],list):
90
+                return "\n" + title + ",".join(nfo[t])
91
+            else:
92
+                return "\n" + title + nfo[t]
93
+        else:
94
+            return ""
95
+
96
+    desc += ddd("tagline")
97
+    if "plot" in nfo and "tagline" in nfo and nfo["tagline"] <> nfo["plot"]:
98
+        desc += ddd("plot")
99
+    elif "plot" in nfo and not "tagline" in nfo:
100
+        desc += ddd("plot")
101
+    desc += ddd("genre","Genre")
102
+    desc += ddd("runtime","Length")
103
+    desc += ddd("director","Director")
104
+    desc += ddd("actor","Actors")
105
+    desc += ddd("language","Languages")
106
+    desc += ddd("quality","Quality")
107
+    return desc.encode("utf8") if isinstance(desc,unicode) else desc
108
+
109
+def nfo2title(nfo):
110
+    if not "title" in nfo:
111
+        nfo_type, nfo = next(nfo.iteritems())
112
+    title = nfo["title"]
113
+    if "originaltitle" in nfo and nfo["originaltitle"] and nfo["originaltitle"]<>nfo["title"]:
114
+        title +=" ~ "+nfo["originaltitle"]
115
+    if "year" in nfo and nfo["year"]:
116
+        title += " (%s)"%nfo["year"]
117
+    return title.encode("utf8") if isinstance(title,unicode) else title
118
+
119
+def play_video(streams):
120
+    if len(streams)>1:
121
+        for i,s in enumerate(streams):
122
+
123
+            print "%s: [%s,%s,%s] %s"%(i,s["quality"],s["lang"],s["type"],s["name"])
124
+        a = raw_input("Select stram to play: ")
125
+        try:
126
+            n = int(a)
127
+        except:
128
+            n = 0
129
+        if n>=len(streams):
130
+            stream = streams[-1]
131
+        else:
132
+            stream = streams[n]
133
+    else:
134
+        stream = streams[0]
135
+
136
+    stream = stream_change(stream)
137
+    title = stream["name"] if not "nfo" in stream or not stream["nfo"] else nfo2title(stream["nfo"])
138
+    desc = stream["desc"] if not "nfo" in stream or not stream["nfo"] else nfo2desc(stream["nfo"])
139
+    img = stream["img"]
140
+    url = stream["url"]
141
+    suburl = ""
142
+    print url
143
+    if "subs" in stream and stream["subs"]:
144
+        suburl = stream["subs"][0]["url"]
145
+        print "\n**Download subtitles %s - %s"%(title,suburl)
146
+        subs = urllib2.urlopen(suburl).read()
147
+        if subs:
148
+            fname0 = re.sub("[/\n\r\t,:\?]","_",title)
149
+            subext = ".srt"
150
+            subfile = os.path.join("",fname0+subext)
151
+            if ".xml" in suburl:
152
+                subs = ttaf2srt(subs)
153
+            with open(subfile,"w") as f:
154
+                f.write(subs)
155
+        else:
156
+            print "\n Error downloading subtitle %s"%suburl
157
+    print "\n**Play stream %s\n%s" % (title, url.encode("utf8"))
158
+    return player(url,title,suburl,stream["headers"])
159
+
160
+def player(url, title = "", suburl= "",headers={}):
161
+    from subprocess import call
162
+    cmd1 = [r"c:\Program Files\VideoLAN\VLC\vlc.exe",url,
163
+           "--meta-title",title.decode("utf8").encode(sys.getfilesystemencoding()),
164
+           "--http-user-agent","Enigma2"
165
+    ]
166
+    # gst-launch-1.0 -v souphttpsrc ssl-strict=false proxy=127.0.0.1:8888 extra-headers="Origin:adadadasd"  location="http://bitdash-a.akamaihd.net/content/sintel/sintel.mpd" ! decodebin! autovideosink
167
+    cmd2 = [
168
+        r"C:\gstreamer\1.0\x86_64\bin\gst-launch-1.0","-v",
169
+        "playbin", 'uri="%s"'%url,
170
+        #"souphttpsrc", "ssl-strict=false",
171
+        #"proxy=127.0.0.1:8888",
172
+        #'location="%s"'%url,
173
+        #'!decodebin!autovideosink'
174
+    ]
175
+    cmd3 = ["ffplay.exe",url]
176
+    cmd = cmd3 if url.startswith("https") else cmd2
177
+    ret = call(cmd)
178
+    #if ret:
179
+        #a = raw_input("*** Error, continue")
180
+    return
181
+
182
+def stream_type(data):
183
+    data = data.lower()
184
+    m = re.search(r"^(\w+)://", data)
185
+    prefix = m.group(1) if m else ""
186
+    if prefix in ("http","https"):
187
+        if ".m3u8" in data:
188
+            return "hls"
189
+        elif ".mpd" in data:
190
+            return "dash"
191
+        else:
192
+            return "http"
193
+    else:
194
+        return prefix
195
+
196
+def check_version(package,url="http://feed.blue.lv/Packages"):
197
+    "Return current package version from OPKG feed"
198
+    url = "http://feed.blue.lv/Packages"
199
+    r = requests.get(url)
200
+    if not r.ok:
201
+        return ""
202
+    m = re.search("Package: %s\nVersion: (.+?)\n"%package, r.content)
203
+    if not m:
204
+        return ""
205
+    return m.group(1)
206
+
207
+SPLIT_CHAR = "~"
208
+SPLIT_CODE = urllib.quote(SPLIT_CHAR)
209
+EQ_CODE = urllib.quote("=")
210
+COL_CODE = urllib.quote(":")
211
+SPACE_CODE = urllib.quote(" ")
212
+
213
+def make_fname(title):
214
+    "Make file name from title"
215
+    title = title.strip()
216
+    fname0 = re.sub("[/\n\r\t,:]"," ",title)
217
+    fname0 = re.sub("['""]","",fname0)
218
+    return fname0
219
+
220
+def hls_base(url):
221
+    url2 = url.split("?")[0]
222
+    url2 = "/".join(url2.split("/")[0:-1])+ "/"
223
+    return url2
224
+
225
+def stream_change(stream):
226
+    #return stream # TODO
227
+    if "resolver" in stream and stream["resolver"] in ("viaplay","hqq","filmas") or \
228
+        "surl" in stream and re.search("https*://(hqq|goo\.gl)",stream["surl"]):
229
+        stream["url"] = streamproxy_encode(stream["url"],stream["headers"])
230
+        stream["headers"] = {}
231
+        return stream
232
+    else:
233
+        return stream
234
+
235
+def streamproxy_encode(url,headers=[],proxy_url=None):
236
+    PROXY_URL = "http://localhost:8880/"
237
+    if not "?" in url:
238
+        url = url+"?"
239
+    url2 = url.replace(SPLIT_CHAR,SPLIT_CODE).replace(":",COL_CODE).replace(" ",SPACE_CODE)
240
+    if not proxy_url:
241
+        proxy_url = PROXY_URL
242
+    url2 = proxy_url + url2
243
+    if headers:
244
+        headers2 = []
245
+        for h in headers:
246
+            headers2.append("%s=%s"%(h,headers[h].replace("=",EQ_CODE).replace(SPLIT_CHAR,SPLIT_CODE).replace(" ",SPACE_CODE)))
247
+        headers2 = SPLIT_CHAR.join(headers2)
248
+        url2 = url2+SPLIT_CHAR+headers2
249
+    #return url2.encode("utf8") if isinstance(url2,unicode) else url2
250
+    return url2
251
+
252
+def streamproxy_decode(urlp):
253
+    import urlparse
254
+    path = urlp.replace(re.search("http://[^/]+",urlp).group(0),"")
255
+    p = path.split(SPLIT_CHAR)
256
+    url = urllib.unquote(p[0][1:])
257
+    #headers = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46"}
258
+    headers={}
259
+    if len(p)>1:
260
+        for h in p[1:]:
261
+            #h = urllib.unquote()
262
+            headers[h.split("=")[0]]=urllib.unquote(h.split("=")[1])
263
+    return url,headers
264
+
265
+def streamproxy_encode2(url,headers=[],proxy_url=None):
266
+    PROXY_URL = "http://localhost:8880/"
267
+    #url2 = url.replace(SPLIT_CHAR,SPLIT_CODE).replace(":",COL_CODE).replace(" ",SPACE_CODE)
268
+    url2 = urllib.quote_plus(url)
269
+    if not proxy_url:
270
+        proxy_url = PROXY_URL
271
+    url2 = proxy_url + url2+"/?"
272
+    if headers:
273
+        headers2 = []
274
+        for h in headers:
275
+            headers2.append("%s=%s"%(h,headers[h].replace("=",EQ_CODE).replace(SPLIT_CHAR,SPLIT_CODE).replace(" ",SPACE_CODE)))
276
+        headers2 = SPLIT_CHAR.join(headers2)
277
+        url2 = url2+SPLIT_CHAR+headers2
278
+    return url2
279
+
280
+def streamproxy_decode2(urlp):
281
+    path = urlp.replace(re.search("http://[^/]+",urlp).group(0),"")
282
+    p = path.split(SPLIT_CHAR)
283
+    url = urllib.unquote_plus(p[0][1:-2])
284
+    #headers = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46"}
285
+    headers={}
286
+    if len(p)>1:
287
+        for h in p[1:]:
288
+            #h = urllib.unquote()
289
+            headers[h.split("=")[0]]=urllib.unquote(h.split("=")[1])
290
+    return url,headers
291
+
292
+class Captions(object):
293
+    def __init__(self,uri):
294
+        self.uri = uri
295
+        self.subs = []
296
+        self.styles = {}
297
+        if uri.startswith("http"):
298
+            r = requests.get(uri)
299
+        if r.status_code == 200:
300
+            self.loads(r.content)
301
+
302
+    def loads(self,s):
303
+        if "WEBVTT" in s[:s.find("\n")]: # vtt captions
304
+            self.load_vtt(s)
305
+        elif "<?xml" in s[:s.find("\n")]:
306
+            self.load_ttaf(s)
307
+        else:
308
+            self.load_vtt(s) # TODO
309
+
310
+
311
+    def load_ttaf(self,s):
312
+        for r2 in re.findall("<style .+?/>", s):
313
+            st = {}
314
+            for a in re.findall(r'(\w+)="([^ "]+)"', r2):
315
+                st[a[0]] = a[1]
316
+                if a[0] == "id":
317
+                    sid = a[1]
318
+            self.styles[sid] = st
319
+        for r2 in re.findall("<p .+?</p>", s):
320
+            sub = {}
321
+            sub["begin"] = str2sec(re.search('begin="([^"]+)"', r2).group(1)) if re.search('begin="([^"]+)"', r2) else -1
322
+            sub["end"] = str2sec(re.search('end="([^"]+)"', r2).group(1)) if re.search('end="([^"]+)"', r2) else -1
323
+            sub["style"] = re.search('style="([^"]+)"', r2).group(1) if re.search('style="([^"]+)"', r2) else None
324
+            sub["text"] = re.search("<p[^>]+>(.+)</p>", r2).group(1).replace("\n","")
325
+            sub["text"] = re.sub("<br\s*?/>","\n",sub["text"])
326
+            sub["text"] = re.sub("<.+?>"," ",sub["text"])
327
+            self.subs.append(sub)
328
+        pass
329
+
330
+    def load_vtt(self,s):
331
+        f = StringIO.StringIO(s)
332
+        while True:
333
+            line = f.readline()
334
+            if not line:
335
+                break
336
+            m = re.search(r"([\d\.\,:]+)\s*-->\s*([\d\.\,\:]+)",line)
337
+            if m:
338
+                sub = {}
339
+                sub["begin"] = str2sec(m.group(1))
340
+                sub["end"] = str2sec(m.group(2))
341
+                sub["style"] = None
342
+                sub["text"] = []
343
+                line = f.readline()
344
+                while line.strip():
345
+                    txt = line.strip()
346
+                    if isinstance(txt,unicode):
347
+                        txt = txt.encode("utf8")
348
+                    sub["text"].append(txt)
349
+                    line = f.readline()
350
+                sub["text"] = "\n".join(sub["text"])
351
+                self.subs.append(sub)
352
+            else:
353
+                continue
354
+        pass
355
+
356
+    def get_srt(self):
357
+        out = ""
358
+        i = 0
359
+        for sub in self.subs:
360
+            i +=1
361
+            begin = sub["begin"]
362
+            begin = "%s,%03i"%(str(datetime.timedelta(seconds=begin/1000)),begin%1000)
363
+            end = sub["end"]
364
+            end = "%s,%03i"%(str(datetime.timedelta(seconds=end/1000)),end%1000)
365
+            txt2 = sub["text"]
366
+            out += "%s\n%s --> %s\n%s\n\n\n"%(i,begin,end,txt2)
367
+        return out
368
+
369
+def str2sec(r):
370
+    # Convert str time to miliseconds
371
+    r= r.replace(",",".")
372
+    m = re.search(r"(\d+\:)*(\d+)\:(\d+\.\d+)", r)
373
+    if m:
374
+        sec = int(m.group(1)[:-1])*60*60*1000 if m.group(1) else 0
375
+        sec += int(m.group(2))*60*1000 + int(float(m.group(3))*1000)
376
+        return sec
377
+    else:
378
+        return -1
379
+
380
+
381
+#c = Captions("http://195.13.216.2/mobile-vod/mp4:lb_barbecue_fr_lq.mp4/lb_barbecue_lv.vtt")
382
+#c = Captions("http://www.bbc.co.uk/iplayer/subtitles/ng/modav/bUnknown-0edd6227-0f38-411c-8d46-fa033c4c61c1_b05ql1s3_1479853893356.xml")
383
+#url = "http://195.13.216.2/mobile-vod/mp4:ac_now_you_see_me_2_en_lq.mp4/ac_now_you_see_me_2_lv.vtt"
384
+#c = Captions(url)
385
+
386
+#pass
387
+
388
+
389
+def ttaf2srt(s):
390
+    out = u""
391
+    i = 0
392
+    for p,txt in re.findall("<p ([^>]+)>(.+?)</p>", s, re.DOTALL):
393
+        i +=1
394
+        begin = re.search('begin="(.+?)"',p).group(1)
395
+        begin = begin.replace(".",",")
396
+        end = re.search('end="(.+?)"',p).group(1)
397
+        end = end.replace(".",",")
398
+        txt2 = re.sub("<br */>","\n",txt)
399
+        out += "%s\n%s --> %s\n%s\n\n"%(i,begin,end,txt2)
400
+    return out
401
+
402
+
403
+def item():
404
+    """Default item content"""
405
+    stream0 = {
406
+        'name': '', #
407
+        'url': '',
408
+        'quality': '?',
409
+        'surl': '',
410
+        'subs': [],
411
+        'headers': {},
412
+        "desc":"",
413
+        "img":"",
414
+        "lang":"",
415
+        "type":"",
416
+        "resolver":"",
417
+        "order":0,
418
+        "live":False
419
+        }
420
+    return stream0
421
+
422
+class _StringCookieJar(cookielib.LWPCookieJar):
423
+
424
+    def __init__(self, string=None, filename=None, delayload=False, policy=None):
425
+        cookielib.LWPCookieJar.__init__(self, filename, delayload, policy)
426
+        if string and len(string) > 0:
427
+            self._cookies = pickle.loads(str(string))
428
+
429
+    def dump(self):
430
+        return pickle.dumps(self._cookies)
431
+
432
+
433
+def init_urllib(cache=None):
434
+    """
435
+    Initializes urllib cookie handler
436
+    """
437
+    global _cookie_jar
438
+    data = None
439
+    if cache is not None:
440
+        data = cache.get(CACHE_COOKIES)
441
+    _cookie_jar = _StringCookieJar(data)
442
+    opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(_cookie_jar))
443
+    urllib2.install_opener(opener)
444
+
445
+
446
+def cache_cookies(cache):
447
+    """
448
+    Saves cookies to cache
449
+    """
450
+    global _cookie_jar
451
+    if _cookie_jar:
452
+        cache.set(CACHE_COOKIES, _cookie_jar.dump())
453
+
454
+
455
+def request0(url, headers={}):
456
+    debug('request: %s' % url)
457
+    req = urllib2.Request(url, headers=headers)
458
+    req.add_header('User-Agent', UA)
459
+    try:
460
+        response = urllib2.urlopen(req)
461
+        data = response.read()
462
+        response.close()
463
+    except urllib2.HTTPError, error:
464
+        data = error.read()
465
+
466
+    debug('len(data) %s' % len(data))
467
+    return data
468
+
469
+def request(url, headers={}):
470
+    debug('request: %s' % url)
471
+    #req = urllib2.Request(url, headers=headers)
472
+    #req.add_header('User-Agent', UA)
473
+    if 'User-Agent' not in headers:
474
+        headers['User-Agent']= UA
475
+    try:
476
+        r = requests.get(url, headers=headers)
477
+        data = r.content
478
+    except:
479
+        data = r.content
480
+
481
+    debug('len(data) %s' % len(data))
482
+    return data
483
+
484
+def post(url, data, headers={}):
485
+    postdata = urllib.urlencode(data)
486
+    #req = urllib2.Request(url, postdata, headers)
487
+    #req.add_header('User-Agent', UA)
488
+    import requests
489
+    if 'User-Agent' not in headers:
490
+        headers['User-Agent']= UA
491
+    try:
492
+        r = requests.post(url, data=postdata,headers=headers)
493
+        data = r.content
494
+    except urllib2.HTTPError, error:
495
+        data = r.content
496
+    return data
497
+
498
+def post0(url, data, headers={}):
499
+    postdata = urllib.urlencode(data)
500
+    req = urllib2.Request(url, postdata, headers)
501
+    req.add_header('User-Agent', UA)
502
+    try:
503
+        response = urllib2.urlopen(req)
504
+        data = response.read()
505
+        response.close()
506
+    except urllib2.HTTPError, error:
507
+        data = error.read()
508
+    return data
509
+
510
+
511
+def post_json(url, data, headers={}):
512
+    postdata = json.dumps(data)
513
+    headers['Content-Type'] = 'application/json'
514
+    req = urllib2.Request(url, postdata, headers)
515
+    req.add_header('User-Agent', UA)
516
+    response = urllib2.urlopen(req)
517
+    data = response.read()
518
+    response.close()
519
+    return data
520
+
521
+
522
+#def run_parallel_in_threads(target, args_list):
523
+    #result = Queue.Queue()
524
+    ## wrapper to collect return value in a Queue
525
+
526
+    #def task_wrapper(*args):
527
+        #result.put(target(*args))
528
+
529
+    #threads = [threading.Thread(target=task_wrapper, args=args) for args in args_list]
530
+    #for t in threads:
531
+        #t.start()
532
+    #for t in threads:
533
+        #t.join()
534
+    #return result
535
+
536
+
537
+def substr(data, start, end):
538
+    i1 = data.find(start)
539
+    i2 = data.find(end, i1)
540
+    return data[i1:i2]
541
+
542
+
543
+def save_to_file(url, file):
544
+    try:
545
+        return save_data_to_file(request(url), file)
546
+    except:
547
+        traceback.print_exc()
548
+
549
+
550
+def save_data_to_file(data, file):
551
+    try:
552
+        f = open(file, 'wb')
553
+        f.write(data)
554
+        f.close()
555
+        info('File %s saved' % file)
556
+        return True
557
+    except:
558
+        traceback.print_exc()
559
+
560
+
561
+def read_file(file):
562
+    if not os.path.exists(file):
563
+        return ''
564
+    f = open(file, 'r')
565
+    data = f.read()
566
+    f.close()
567
+    return data
568
+
569
+
570
+def _substitute_entity(match):
571
+    ent = match.group(3)
572
+    if match.group(1) == '#':
573
+        # decoding by number
574
+        if match.group(2) == '':
575
+            # number is in decimal
576
+            return unichr(int(ent))
577
+        elif match.group(2) == 'x':
578
+            # number is in hex
579
+            return unichr(int('0x' + ent, 16))
580
+    else:
581
+        # they were using a name
582
+        cp = n2cp.get(ent)
583
+        if cp:
584
+            return unichr(cp)
585
+        else:
586
+            return match.group()
587
+
588
+
589
+def decode_html(data):
590
+    if not type(data) == str:
591
+        return data
592
+    try:
593
+        if not type(data) == unicode:
594
+            data = unicode(data, 'utf-8', errors='ignore')
595
+        entity_re = re.compile(r'&(#?)(x?)(\w+);')
596
+        return entity_re.subn(_substitute_entity, data)[0]
597
+    except:
598
+        traceback.print_exc()
599
+        print[data]
600
+        return data
601
+
602
+def unescape(s0):
603
+    #s2 = re.sub("&#\w+;",HTMLParser.HTMLParser().unescape("\1"),s)
604
+    s0 = s0.replace("&amp;","&")
605
+    for s in re.findall("&#\w+;",s0):
606
+        s2 = HTMLParser.HTMLParser().unescape(s)
607
+        if isinstance(s0,str):
608
+            s2 = s2.encode("utf8")
609
+        s0 = s0.replace(s,s2)
610
+        pass
611
+    return s0
612
+
613
+def debug(text):
614
+    if LOG > 1:
615
+        print('[DEBUG] ' + str([text]))
616
+
617
+def info(text):
618
+    if LOG > 0:
619
+        print('[INFO] ' + str([text]))
620
+
621
+def error(text):
622
+    print('[ERROR] ' + str([text]))
623
+
624
+_diacritic_replace = {u'\u00f3': 'o',
625
+                      u'\u0213': '-',
626
+                      u'\u00e1': 'a',
627
+                      u'\u010d': 'c',
628
+                      u'\u010c': 'C',
629
+                      u'\u010f': 'd',
630
+                      u'\u010e': 'D',
631
+                      u'\u00e9': 'e',
632
+                      u'\u011b': 'e',
633
+                      u'\u00ed': 'i',
634
+                      u'\u0148': 'n',
635
+                      u'\u0159': 'r',
636
+                      u'\u0161': 's',
637
+                      u'\u0165': 't',
638
+                      u'\u016f': 'u',
639
+                      u'\u00fd': 'y',
640
+                      u'\u017e': 'z',
641
+                      u'\xed': 'i',
642
+                      u'\xe9': 'e',
643
+                      u'\xe1': 'a',
644
+                      }
645
+
646
+
647
+def replace_diacritic(string):
648
+    ret = []
649
+    for char in string:
650
+        if char in _diacritic_replace:
651
+            ret.append(_diacritic_replace[char])
652
+        else:
653
+            ret.append(char)
654
+    return ''.join(ret)
655
+
656
+
657
+def params(url=None):
658
+    if not url:
659
+        url = sys.argv[2]
660
+    param = {}
661
+    paramstring = url
662
+    if len(paramstring) >= 2:
663
+        params = url
664
+        cleanedparams = params.replace('?', '')
665
+        if (params[len(params) - 1] == '/'):
666
+            params = params[0:len(params) - 2]
667
+        pairsofparams = cleanedparams.split('&')
668
+        param = {}
669
+        for i in range(len(pairsofparams)):
670
+            splitparams = {}
671
+            splitparams = pairsofparams[i].split('=')
672
+            if (len(splitparams)) == 2:
673
+                param[splitparams[0]] = splitparams[1]
674
+    for p in param.keys():
675
+        param[p] = param[p].decode('hex')
676
+    return param
677
+
678
+
679
+def int_to_base(number, base):
680
+    digs = string.digits + string.letters
681
+    if number < 0:
682
+        sign = -1
683
+    elif number == 0:
684
+        return digs[0]
685
+    else:
686
+        sign = 1
687
+    number *= sign
688
+    digits = []
689
+    while number:
690
+        digits.append(digs[number % base])
691
+        number /= base
692
+    if sign < 0:
693
+        digits.append('-')
694
+    digits.reverse()
695
+    return ''.join(digits)
696
+
697
+
698
+def extract_jwplayer_setup(data):
699
+    """
700
+    Extracts jwplayer setup configuration and returns it as a dictionary.
701
+
702
+    :param data: A string to extract the setup from
703
+    :return: A dictionary containing the setup configuration
704
+    """
705
+    data = re.search(r'<script.+?}\(\'(.+)\',\d+,\d+,\'([\w\|]+)\'.*</script>', data, re.I | re.S)
706
+    if data:
707
+        replacements = data.group(2).split('|')
708
+        data = data.group(1)
709
+        for i in reversed(range(len(replacements))):
710
+            if len(replacements[i]) > 0:
711
+                data = re.sub(r'\b%s\b' % int_to_base(i, 36), replacements[i], data)
712
+        data = re.search(r'\.setup\(([^\)]+?)\);', data)
713
+        if data:
714
+            return json.loads(data.group(1).decode('string_escape'))
715
+        #return demjson.decode(data.group(1).decode('string_escape')) ### III
716
+    return None
717
+
718
+
719
+#def parse_html(url):
720
+#    return BeautifulSoup(request(url), 'html5lib', from_encoding='utf-8')
721
+
722
+if __name__ == "__main__":
723
+    s = 'B\xc4\x93thovena D\xc4\x81rgumu Taka (2014)/Beethoven&#x27;s Treasure [LV]'
724
+    #s = s.decode("utf8")
725
+    #s=unescape(s)
726
+    #url = "http://localhost:88/https://walterebert.com/playground/video/hls/ts/480x270.m3u8?token=xxxx~User-Agent=Enigma2~Cookie=xxxxx"
727
+    url = "http://hyt4d6.vkcache.com/secip/0/UMQ3q2gNjTlOPnEVm3iTiA/ODAuMjMyLjI0MC42/1479610800/hls-vod-s3/flv/api/files/videos/2015/09/11/144197748923a22.mp4.m3u8http://hyt4d6.vkcache.com/secip/0/Y-ZA1qRm8toplc0dN_L6_w/ODAuMjMyLjI0MC42/1479654000/hls-vod-s3/flv/api/files/videos/2015/09/11/144197748923a22.mp4.m3u8"
728
+    headers = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46"}
729
+    url = "http://str1e.lattelecom.tv/mobile-vod/mp4:sf_fantastic_beasts_and_where_to_find_them_en_hd.mp4/playlist.m3u8?resource_id=fantastic_beasts_and_where_to_find_them&auth_token=6NAvMFDG+rYTAc4hb5JeL2bmsaRR7bAE23M6KDmhKYOGyXoo0gDpJUE9scYy+nQmfbgk03cWMe9MuXWSH1GqwolEk2jOQ/8Mrg7tOdbwrA8zM7nmkfCZPqQkwajZN4mfSJQVKHqXqJ8="
730
+    headers={}
731
+    print url
732
+    url = "replay::tiesraide/ltv1/"
733
+    url = "ltc::content/live-streams/103?include=quality"
734
+    urlp = streamproxy_encode2(url,headers)
735
+    print urlp
736
+    url2,headers2 = streamproxy_decode2(urlp)
737
+    print url2
738
+    player(urlp)
739
+    pass
740
+
741
+