ソースを参照

urllib2 SSL kļūdas labojums

Ivars 7 年 前
コミット
57720f256f

+ 14
- 2
ContentSources.py ファイルの表示

@@ -210,7 +210,15 @@ if __name__ == "__main__":
210 210
                 streams = [stream]
211 211
             else:
212 212
                 try:
213
-                    streams = sources.get_streams(cur2[1])
213
+                    if not download:
214
+                        streams = sources.get_streams(cur2[1])
215
+                    else:
216
+                        stream = util.item()
217
+                        stream["url"] = cur2[1]
218
+                        stream["name"] = cur2[0]
219
+                        stream["url"] = util.streamproxy_encode2(stream["url"])
220
+                        print stream["url"]
221
+                        streams = [stream]
214 222
                 except Exception as e:
215 223
                     print unicode(e)
216 224
                     traceback.print_exc()
@@ -219,7 +227,11 @@ if __name__ == "__main__":
219 227
                 if not download:
220 228
                     util.play_video(streams)
221 229
                 else:
222
-                    Downloader.download_video(streams)
230
+                    #urlp = util.streamproxy_encode2(streams[0]["url"])
231
+                    #print urlp
232
+                    #util.player(urlp)
233
+                    #Downloader.download_video(streams)
234
+                    pass
223 235
             else:
224 236
                 print "**No stream to play - %s "%(
225 237
                     cur2[1])

+ 2
- 2
Downloader.py ファイルの表示

@@ -120,7 +120,7 @@ class DownloadWithProgressFragmented:
120 120
     def download_fragment(self):
121 121
         if self.ts_num>=len(self.ts_list):
122 122
             pass
123
-            print "Call later"
123
+            #print "Call later"
124 124
             reactor.callLater(10,self.update_manifest)
125 125
             reactor.callLater(10, self.download_fragment)
126 126
         else:
@@ -239,7 +239,7 @@ def stop():
239 239
     reactor.stop()
240 240
 ###############################################
241 241
 
242
-def download_vide(stream):
242
+def download_video(stream):
243 243
     stream = stream[0]
244 244
     url = stream["url"]
245 245
     headers = stream["headers"]

+ 1
- 1
PlayStream.py ファイルの表示

@@ -6,7 +6,7 @@
6 6
 # Used fragments of code from enigma2-plugin-tv3play by Taapat (https://github.com/Taapat/enigma2-plugin-tv3play)
7 7
 #
8 8
 
9
-__version__ = "0.6r"
9
+__version__ = "0.6t"
10 10
 __id__ = "playstream"
11 11
 __title__ = "PlayStream"
12 12
 __author__ = "ivars777@gmail.com"

+ 486
- 479
PlayStream.wpr
ファイル差分が大きすぎるため省略します
ファイルの表示


+ 7
- 0
changelog.md ファイルの表示

@@ -1,5 +1,12 @@
1
+**0.6t** (17.06.2017)
2
+- [bugfix] kļūda verot vaļā HTTPS lapas (urllib2 SSL: CERTIFICATE_VERIFY_FAILED)
3
+
4
+**0.6s** (13.04.2017)
5
+- [bugfix] LMT straume google tiešraides kļūda novēsta
6
+
1 7
 **0.6r** (01.04.2017)
2 8
 - [feature] LMT Straume video (bez TV, kas strādā tikai LMT tīklā)
9
+- [bugfix] proxy serveris piefrizēts, cerams ka strādā stabilāk (aktuāls viaplay ejošajai daļai)
3 10
 
4 11
 **0.6p** (31.03.2017)
5 12
 - [bugfix] filmix sērijas

+ 208
- 149
playstreamproxy.py ファイルの表示

@@ -1,4 +1,5 @@
1 1
 #!/usr/bin/python
2
+# -*- coding: utf-8 -*-
2 3
 """
3 4
 StreamProxy daemon (based on Livestream daemon)
4 5
 Ensures persistent cookies, User-Agents and others tricks to play protected HLS/DASH streams
@@ -18,6 +19,7 @@ from urllib import unquote, quote
18 19
 import urllib,urlparse
19 20
 #import cookielib,urllib2
20 21
 import requests
22
+
21 23
 try:
22 24
     from requests.packages.urllib3.exceptions import InsecureRequestWarning
23 25
     requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
@@ -25,7 +27,7 @@ except:
25 27
     pass
26 28
 
27 29
 HOST_NAME = ""
28
-PORT_NUMBER = 88
30
+PORT_NUMBER = 8880
29 31
 DEBUG = True
30 32
 DEBUG2 = False
31 33
 
@@ -36,17 +38,18 @@ COL_CODE = "%3A"
36 38
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
37 39
 headers0 = headers2dict("""
38 40
 icy-metadata: 1
39
-Cache-Control: max-age=0
40
-Accept-Encoding: gzip, deflate
41 41
 User-Agent: GStreamer souphttpsrc libsoup/2.52.2
42
-Connection: Keep-Alive
43 42
 """)
44 43
 sessions = {}
44
+cur_directory = os.path.dirname(os.path.realpath(__file__))
45
+sources = None
46
+slinks = {}
47
+
45 48
 
46 49
 class StreamHandler(BaseHTTPRequestHandler):
47 50
 
48 51
     def do_HEAD(self):
49
-        print "**head"
52
+        print "**get_head"
50 53
         self.send_response(200)
51 54
         self.send_header("Server", "playstreamproxy")
52 55
         if ".m3u8" in self.path.lower():
@@ -62,14 +65,14 @@ class StreamHandler(BaseHTTPRequestHandler):
62 65
 
63 66
     def do_GET(self):
64 67
         """Respond to a GET request"""
65
-        self.log_message("\n\n"+40*"#"+"\nget_url: \n%s", self.path)
68
+        print "\n\n"+40*"#"+"\nget_url: \n%s", self.path
66 69
         p = self.path.split("~")
67
-        #url = urllib.unquote(p[0][1:])
68
-        url = p[0][1:]
69
-        url = url.replace(COL_CODE, ":")
70
-        headers = self.headers.dict
71
-        headers = {} # TODO
72
-        headers["host"] = urlparse.urlparse(url).hostname
70
+        #url = urllib.unquote(p[0][1:]) # TODO - vajag nocekot vai visi urli strādā
71
+        urlp = p[0][1:]
72
+        url = urlp.replace(COL_CODE, ":")
73
+        #headers = self.headers.dict
74
+        headers = {} # TODO izmanto saņemtos headerus, var aizvietot ar defaultajiem
75
+        #headers["host"] = urlparse.urlparse(url).hostname
73 76
         if len(p)>1:
74 77
             for h in p[1:]:
75 78
                 k = h.split("=")[0].lower()
@@ -78,18 +81,18 @@ class StreamHandler(BaseHTTPRequestHandler):
78 81
         if DEBUG:
79 82
             print "url=%s"%url
80 83
             print "Original request headers + url headers:"
81
-            print_headers(headers)
82
-
84
+            print_headers(self.headers.dict)
83 85
         self.protocol_version = 'HTTP/1.1'
84 86
 
85
-        # TODO fetch selection
86 87
         try:
87
-            if ".lattelecom.tv/" in url: # lattelecom.tv hack
88
-                self.fetch_ltc(self.wfile, url, headers)
88
+            if "::" in url: # encoded source link
89
+                self.fetch_source(urlp, headers)
90
+            elif ".lattelecom.tv/" in url: # lattelecom.tv hack
91
+                self.fetch_ltc( url, headers)
89 92
             elif "filmas.lv" in url or "viaplay" in url: #  HLS session/decode filmas.lv in url:
90
-                self.fetch_url2(self.wfile, url, headers)
93
+                self.fetch_url2(url, headers)
91 94
             else: # plain fetch
92
-                self.fetch_url(self.wfile, url, headers)
95
+                self.fetch_url( url, headers)
93 96
         except Exception as e:
94 97
             print "Got Exception: ", str(e)
95 98
             import traceback
@@ -97,7 +100,7 @@ class StreamHandler(BaseHTTPRequestHandler):
97 100
 
98 101
     ### Remote server request procedures ###
99 102
 
100
-    def fetch_offline(self,wfile):
103
+    def fetch_offline(self):
101 104
         print "** Fetch offline"
102 105
         self.send_response(200)
103 106
         self.send_header("Server", "playstreamproxy")
@@ -106,172 +109,189 @@ class StreamHandler(BaseHTTPRequestHandler):
106 109
         self.wfile.write(open("offline.mp4", "rb").read())
107 110
         #self.wfile.close()
108 111
 
109
-    def fetch_url(self,wfile,url,headers):
112
+    def fetch_source(self, urlp, headers):
110 113
         if DEBUG:
111 114
             print "\n***********************************************************"
112
-            print "fetch_url: \n%s"%url
113
-            print "**Server request headers: "
114
-            print_headers(headers)
115
-        #if ".lattelecom.tv/" in url and EQ_CODE in url:
116
-        #    url = url.replace(EQ_CODE,"=")
117
-        r = requests.get(url,headers = headers)
115
+            print "fetch_source: \n%s"%urlp
116
+        base_data = hls_base(urlp)
117
+        data = urllib.unquote_plus(base_data)[:-1]
118
+        if DEBUG: print "base_data=", base_data
119
+        if DEBUG: print "data=", data
120
+        if not base_data in slinks :
121
+            streams = sources.get_streams(data)
122
+            if not streams:
123
+                self.write_error(500)  # TODO
124
+                return
125
+            url = streams[0]["url"]
126
+            base_url = hls_base(url)
127
+            if DEBUG: print "New link, base_url=",base_url
128
+            ses = requests.Session()
129
+            ses.trust_env = False
130
+            slinks[base_data] = {"data": data, "urlp":urlp,"url": url, "base_url": base_url,"session":ses}
131
+        else:
132
+            ses = slinks[base_data]["session"]
133
+            if urlp == slinks[base_data]["urlp"]:
134
+                url = slinks[base_data]["url"]
135
+                if DEBUG: print "Existing base link", url
136
+            else:
137
+                url = urlp.replace(base_data, slinks[base_data]["base_url"])
138
+                if DEBUG: print "Existing new link", url
139
+        r = self.get_page_ses(url,ses,True,headers = headers)
118 140
         code = r.status_code
141
+        if not code in (200,206): # TODO mēģina vēlreiz get_streams
142
+            self.write_error(code)
143
+            return
144
+        self.send_response(code)
145
+        self.send_headers(r.headers)
146
+        CHUNK_SIZE = 1024 *4
147
+        for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
148
+            try:
149
+                self.wfile.write(chunk)
150
+            except Exception as e:
151
+                print "Exception: ", str(e)
152
+                self.wfile.close()
153
+                return
154
+        if DEBUG: print "**File downloaded"
155
+        if "connection" in r.headers and r.headers["connection"] <> "keep-alive":
156
+            self.wfile.close()
157
+        return
158
+
159
+
160
+    def fetch_url(self, url,headers):
119 161
         if DEBUG:
120
-            print "** Server/proxy response, code = %s"%code
121
-            print_headers(r.headers)
162
+            print "\n***********************************************************"
163
+            print "fetch_url: \n%s"%url
164
+        r = self.get_page(url,headers = headers)
165
+        code = r.status_code
122 166
         if not code in (200,206):
123
-            print "***Error, code=%s",code
124
-            self.send_response(code)
125
-            self.send_headers(r.headers)
126
-            wfile.close()
167
+            self.write_error(code)
127 168
             return
128 169
         self.send_response(code)
129 170
         self.send_headers(r.headers)
130 171
         CHUNK_SIZE = 1024*4
131
-        for chunk in r.iter_content(CHUNK_SIZE):
172
+        for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
132 173
             try:
133
-                wfile.write(chunk)
174
+                self.wfile.write(chunk)
134 175
             except Exception as e:
135 176
                 print "Exception: ", str(e)
136
-                wfile.close()
177
+                self.wfile.close()
137 178
                 return
138 179
         if DEBUG: print "**File downloaded"
139
-        wfile.close()
140
-        # time.sleep(1)
180
+        if "connection" in r.headers and r.headers["connection"] <> "keep-alive":
181
+            self.wfile.close()
141 182
         return
142 183
 
143
-    def fetch_ltc(self, wfile, url, headers):
184
+    def fetch_ltc(self, url, headers):
185
+        "lattelecom.tv hack (have to update chunklist after each 6 min"
144 186
         if DEBUG:
145
-            print "\n***********************************************************"
146
-            print "fetch_url2: \n%s"%url
147
-        #self.log_message("fetch_filmas: \n%s", url)
148
-        #self.log_message("headers: %s", headers)
187
+            print "\n\n***********************************************************"
188
+            print "fetch_ltc: \n%s"%url
149 189
         base_url = hls_base(url)
150 190
         if DEBUG: print "base_url=",base_url
151 191
         if base_url not in sessions:
152 192
             if DEBUG: print "New session"
153 193
             sessions[base_url] = {}
154 194
             sessions[base_url]["session"] = requests.Session()
155
-            #sessions[base_url]["session"].headers = {}
156
-            sessions[base_url]["key"] = binascii.a2b_hex(headers["key"]) if "key" in headers and headers["key"] else None
195
+            sessions[base_url]["session"].trust_env = False
196
+            sessions[base_url]["session"].headers.update(headers0)
197
+            sessions[base_url]["playlist"] = ""
198
+            sessions[base_url]["chunklist"] = []
199
+
200
+        #  change ts file to valid one media_w215689190_33.ts?
201
+        tsfile = re.search("media_\w+_(\d+)\.ts", url, re.IGNORECASE)
202
+        if tsfile and sessions[base_url]["chunklist"]:
203
+            tnum = int(tsfile.group(1))
204
+            url2 = sessions[base_url]["chunklist"][tnum]
205
+            if not url2.startswith("http"):
206
+                url2 = base_url + url2
207
+            url = url2
208
+            if DEBUG: print "[playstreamproxy] url changed to ", url
209
+
210
+        ### get_page ###
157 211
         ses = sessions[base_url]["session"]
158
-        key = sessions[base_url]["key"]
159
-        ses.headers.clear()
160
-        ses.headers.update(headers0)
212
+        #ses.headers.update(headers0)
161 213
         ses.headers.update(headers)
162
-        ses.headers["Connection"]="Keep-Alive"
163
-        if DEBUG:
164
-            print "**Server request headers: "
165
-            print_headers(ses.headers)
166
-        for t in range(3):
167
-            r = ses.get(url, stream=True, verify=False)
168
-            code = r.status_code #r.status_code
169
-            if DEBUG:
170
-                print "\n\n=====================================\n**Server response:", code #r.status_code
171
-                print "**Server response headers: "
172
-                print_headers(r.headers)
173
-            if code in (200,2016): break
174
-        if not (code in (200,206)):
175
-            print "***Error, code=%s"%code
176
-            self.send_response(code)
177
-            self.send_headers(r.headers)
178
-            wfile.close()
179
-            #self.fetch_offline(wfile)
214
+        # ses.headers["Connection"]="Keep-Alive"
215
+        r = self.get_page_ses(url,ses)
216
+        code = r.status_code #r.status_code
217
+
218
+        if not (code in (200,206)) and tsfile:
219
+            # update chunklist
220
+            r2 = self.get_page(sessions[base_url]["playlist"])
221
+            streams = re.findall(r"#EXT-X-STREAM-INF:.*?BANDWIDTH=(\d+).*?\n(.+?)$", r2.content, re.IGNORECASE | re.MULTILINE)
222
+            if streams:
223
+                sorted(streams, key=lambda item: int(item[0]), reverse=True)
224
+                chunklist = streams[0][1]
225
+                if not chunklist.startswith("http"):
226
+                    chunklist = base_url + chunklist
227
+            else:
228
+                self.write_error(r.status_code)
229
+                return
230
+            print "[playstreamproxy] trying to update chunklist", chunklist
231
+            r3 = self.get_page_ses(chunklist,ses,True)
232
+            ts_list = re.findall(r"#EXTINF:.*?\n(.+?)$", r3.content, re.IGNORECASE | re.MULTILINE)
233
+            sessions[base_url]["chunklist"]= ts_list
234
+            tnum = int(tsfile.group(1))
235
+            url2 = sessions[base_url]["chunklist"][tnum]
236
+            if not url2.startswith("http"):
237
+                url2 = base_url + url2
238
+            r = self.get_page_ses(url2,ses,True)
239
+            if not r.status_code in (200,206):
240
+                self.write_error(r.status_code)
241
+                return
242
+        elif not r.status_code in (200,206):
243
+            self.write_error(r.status_code)
180 244
             return
181 245
 
246
+        if "playlist.m3u8" in url:
247
+            sessions[base_url]["playlist"] = url
248
+
182 249
         ### Start of return formin and sending
183 250
         self.send_response(200)
184 251
         #headers2 = del_headers(r.headers,["Content-Encoding",'Transfer-Encoding',"Connection",'content-range',"range"])
185 252
         headers2  = {"server":"playstreamproxy", "content-type":"text/html"}
186 253
 
187
-        # Content-Type: application/vnd.apple.mpegurl (encrypted)
188
-        if r.headers["content-type"] == "application/vnd.apple.mpegurl":
189
-            content = r.content
190
-            content = r.content.replace(base_url,"")
191
-            content = re.sub("#EXT-X-KEY:METHOD=AES-128.+\n", "", content, 0, re.IGNORECASE | re.MULTILINE)
192
-            headers2["content-type"] = "application/vnd.apple.mpegurl"
193
-            headers2["content-length"] = "%s"%len(content)
194
-            r.headers["content-length"] = "%s"%len(content)
195
-            #headers2['content-range'] = 'bytes 0-%s/%s'%(len(content)-1,len(content))
196
-            #self.send_headers(headers2)
197
-            self.send_headers(r.headers)
198
-            wfile.write(content)
199
-            wfile.close()
200
-
201
-        # Content-Type: video/MP2T (encrypted)
202
-        elif r.headers["content-type"] == "video/MP2T" and key:
203
-            print "Decode video/MP2T"
204
-            content = r.content
205
-            from Crypto.Cipher import AES
206
-            iv = content[:16]
207
-            d = AES.new(key, AES.MODE_CBC, iv)
208
-            content = d.decrypt(content[16:])
209
-            headers2["content-type"] = "video/MP2T"
210
-            headers2["content-length"] = "%s"% (len(content))
211
-            #headers2['content-range'] = 'bytes 0-%s/%s' % (len(content) - 1, len(content))
212
-            print content[0:16]
213
-            print "Finish decode"
214
-            self.send_headers(headers2)
215
-            wfile.write(content)
216
-            wfile.close()
217
-
218
-        else:
219
-            print "Return regular content"
220
-            headers2["content-type"]  = r.headers["content-type"]
221
-            if "content-length" in r.headers:
222
-                headers2["content-length"] = r.headers["content-length"]
223
-            self.send_headers(r.headers)
224
-            CHUNK_SIZE = 4 * 1024
225
-            for chunk in r.iter_content(CHUNK_SIZE):
226
-                try:
227
-                    #print "#",
228
-                    wfile.write(chunk)
229
-                except Exception as e:
230
-                    print "Exception: ", str(e)
231
-                    return
232
-            if DEBUG: print "File downloaded = "
233
-            wfile.close()
234
-            #time.sleep(1)
235
-            return
254
+        if DEBUG: print "\n** Return  content"
255
+        headers2["content-type"]  = r.headers["content-type"]
256
+        if "content-length" in r.headers:
257
+            headers2["content-length"] = r.headers["content-length"]
258
+        self.send_headers(r.headers)
259
+        CHUNK_SIZE = 4 * 1024
260
+        for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
261
+            try:
262
+                #print "#",
263
+                self.wfile.write(chunk)
264
+            except Exception as e:
265
+                print "Exception: ", str(e)
266
+                return
267
+        if DEBUG: print "File downloaded = "
268
+        self.wfile.close()
269
+        #time.sleep(1)
270
+        return
236 271
 
237 272
 
238
-    def fetch_url2(self, wfile, url, headers):
273
+    def fetch_url2(self, url, headers):
239 274
         if DEBUG:
240 275
             print "\n***********************************************************"
241 276
             print "fetch_url2: \n%s"%url
242
-        #self.log_message("fetch_filmas: \n%s", url)
243
-        #self.log_message("headers: %s", headers)
244 277
         base_url = hls_base(url)
245 278
         if DEBUG: print "base_url=",base_url
246 279
         if base_url not in sessions:
247 280
             if DEBUG: print "New session"
248 281
             sessions[base_url] = {}
249 282
             sessions[base_url]["session"] = requests.Session()
250
-            #sessions[base_url]["session"].headers = {}
283
+            sessions[base_url]["session"].trust_env = False
284
+            sessions[base_url]["session"].headers.update(headers0)
251 285
             sessions[base_url]["key"] = binascii.a2b_hex(headers["key"]) if "key" in headers and headers["key"] else None
252 286
         ses = sessions[base_url]["session"]
287
+        ses.trust_env = False
253 288
         key = sessions[base_url]["key"]
254
-        ses.headers.clear()
255
-        ses.headers.update(headers0)
289
+        #ses.headers.clear()
256 290
         ses.headers.update(headers)
257
-        ses.headers["Connection"]="Keep-Alive"
258
-        if DEBUG:
259
-            print "**Server request headers: "
260
-            print_headers(ses.headers)
261
-        for t in range(3):
262
-            r = ses.get(url, stream=True, verify=False)
263
-            code = r.status_code #r.status_code
264
-            if DEBUG:
265
-                print "\n\n=====================================\n**Server response:", code #r.status_code
266
-                print "**Server response headers: "
267
-                print_headers(r.headers)
268
-            if code in (200,2016): break
291
+        r = self.get_page_ses(url, ses,stream=False)
292
+        code = r.status_code #r.status_code
269 293
         if not (code in (200,206)):
270
-            print "***Error, code=%s"%code
271
-            self.send_response(code)
272
-            self.send_headers(r.headers)
273
-            wfile.close()
274
-            #self.fetch_offline(wfile)
294
+            self.write_error(r.status_code)
275 295
             return
276 296
 
277 297
         ### Start of return formin and sending
@@ -280,7 +300,7 @@ class StreamHandler(BaseHTTPRequestHandler):
280 300
         headers2  = {"server":"playstreamproxy", "content-type":"text/html"}
281 301
 
282 302
         # Content-Type: application/vnd.apple.mpegurl (encrypted)
283
-        if r.headers["content-type"] == "application/vnd.apple.mpegurl":
303
+        if r.headers["content-type"] == "application/vnd.apple.mpegurl" and key:
284 304
             content = r.content
285 305
             content = r.content.replace(base_url,"")
286 306
             content = re.sub("#EXT-X-KEY:METHOD=AES-128.+\n", "", content, 0, re.IGNORECASE | re.MULTILINE)
@@ -288,10 +308,10 @@ class StreamHandler(BaseHTTPRequestHandler):
288 308
             headers2["content-length"] = "%s"%len(content)
289 309
             r.headers["content-length"] = "%s"%len(content)
290 310
             #headers2['content-range'] = 'bytes 0-%s/%s'%(len(content)-1,len(content))
291
-            #self.send_headers(headers2)
292
-            self.send_headers(r.headers)
293
-            wfile.write(content)
294
-            wfile.close()
311
+            self.send_headers(headers2)
312
+            #self.send_headers(r.headers)
313
+            self.wfile.write(content)
314
+            self.wfile.close()
295 315
 
296 316
         # Content-Type: video/MP2T (encrypted)
297 317
         elif r.headers["content-type"] == "video/MP2T" and key:
@@ -307,25 +327,27 @@ class StreamHandler(BaseHTTPRequestHandler):
307 327
             print content[0:16]
308 328
             print "Finish decode"
309 329
             self.send_headers(headers2)
310
-            wfile.write(content)
311
-            wfile.close()
330
+            self.wfile.write(content)
331
+            self.wfile.close()
312 332
 
313 333
         else:
314
-            print "Return regular content"
334
+            if DEBUG: print "Return regular content"
315 335
             headers2["content-type"]  = r.headers["content-type"]
316 336
             if "content-length" in r.headers:
317 337
                 headers2["content-length"] = r.headers["content-length"]
318 338
             self.send_headers(r.headers)
339
+            #self.send_headers(headers2)
319 340
             CHUNK_SIZE = 4 * 1024
320
-            for chunk in r.iter_content(CHUNK_SIZE):
341
+            for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
321 342
                 try:
322 343
                     #print "#",
323
-                    wfile.write(chunk)
344
+                    self.wfile.write(chunk)
324 345
                 except Exception as e:
325 346
                     print "Exception: ", str(e)
326 347
                     return
327 348
             if DEBUG: print "File downloaded = "
328
-            wfile.close()
349
+            if "connection" in r.headers and r.headers["connection"]<>"keep-alive":
350
+                self.wfile.close()
329 351
             #time.sleep(1)
330 352
             return
331 353
 
@@ -337,11 +359,48 @@ class StreamHandler(BaseHTTPRequestHandler):
337 359
             self.send_header(h, headers[h])
338 360
         self.end_headers()
339 361
 
362
+    def write_error(self,code):
363
+        print "***Error, code=%s" % code
364
+        self.send_response(code)
365
+        #self.send_headers(r.headers)
366
+        self.wfile.close() # TODO?
367
+        # self.fetch_offline()
368
+
369
+    def get_page_ses(self,url,ses,stream=True, headers=None):
370
+        headers= headers if headers else headers0
371
+        ses.headers.update(headers)
372
+        if DEBUG:
373
+            print "\n\n====================================================\n**get_page_ses\n%s"%url
374
+            print "**Server request headers: "
375
+            print_headers(ses.headers)
376
+        r = ses.get(url, stream=stream, verify=False)
377
+        if DEBUG:
378
+            print "**Server response:", r.status_code
379
+            print "**Server response headers: "
380
+            print_headers(r.headers)
381
+        return r
382
+
383
+    def get_page(self,url,headers=None):
384
+        if not headers:
385
+            headers = headers0
386
+        if DEBUG:
387
+            print "\n\n====================================================\n**get_page\n%s"%url
388
+            print "**Server request headers: "
389
+            print_headers(headers)
390
+        r = requests.get(url, headers=headers,stream=True)
391
+        if DEBUG:
392
+            print "**Server response:", r.status_code
393
+            print "**Server response headers: "
394
+            print_headers(r.headers)
395
+        return r
340 396
 
341 397
 class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
342 398
     """Handle requests in a separate thread."""
343 399
 
344 400
 def start(host = HOST_NAME, port = PORT_NUMBER):
401
+    import ContentSources, util
402
+    global sources
403
+    sources = ContentSources.ContentSources(os.path.join(cur_directory, "sources"))
345 404
     httpd = ThreadedHTTPServer((host, port), StreamHandler)
346 405
     print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER)
347 406
     try:

バイナリ
release/enigma2-plugin-extensions-playstream_0.6s.ipk ファイルの表示


バイナリ
release/enigma2-plugin-extensions-playstream_0.6t.ipk ファイルの表示


+ 3
- 0
sources/SourceBase.py ファイルの表示

@@ -9,6 +9,9 @@
9 9
 import urllib2, urllib
10 10
 import datetime, re, sys,os
11 11
 import requests
12
+import ssl
13
+ssl._create_default_https_context = ssl._create_unverified_context
14
+
12 15
 try:
13 16
     from requests.packages.urllib3.exceptions import InsecureRequestWarning
14 17
     requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

+ 3
- 1
sources/YouTubeVideoUrl.py ファイルの表示

@@ -8,6 +8,8 @@ import re
8 8
 from urllib import urlencode
9 9
 from urllib2 import urlopen, URLError
10 10
 import sys
11
+import ssl
12
+ssl._create_default_https_context = ssl._create_unverified_context
11 13
 
12 14
 #from Components.config import config
13 15
 
@@ -18,7 +20,7 @@ if sys.version_info >= (2, 7, 9):
18 20
 		import ssl
19 21
 		sslContext = ssl._create_unverified_context()
20 22
 	except:
21
-		pass 
23
+		pass
22 24
 from jsinterp import JSInterpreter
23 25
 from swfinterp import SWFInterpreter
24 26
 

+ 31
- 28
sources/cinemalive.py ファイルの表示

@@ -15,8 +15,11 @@ import ConfigParser
15 15
 from SourceBase import SourceBase
16 16
 #from collections import OrderedDict
17 17
 import os
18
+import ssl
19
+ssl._create_default_https_context = ssl._create_unverified_context
18 20
 
19
-#sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 
21
+
22
+#sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
20 23
 from resolver import resolve
21 24
 import util
22 25
 
@@ -24,9 +27,9 @@ import util
24 27
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
25 28
 import HTMLParser
26 29
 h = HTMLParser.HTMLParser()
27
-    
30
+
28 31
 class Source(SourceBase):
29
-    
32
+
30 33
     def __init__(self,country="",cfg_path=None):
31 34
         self.name = "cinemalive"
32 35
         self.title = "cinemalive.tv"
@@ -44,18 +47,18 @@ Accept-Language: en-US,en;q=0.8
44 47
 """)
45 48
         self.url = "https://cinemalive.tv/"
46 49
         #self.login()
47
-        
48
-            
50
+
51
+
49 52
     ######### Entry point ########
50 53
     def get_content(self, data):
51 54
         print "[cinemalive] get_content:", data
52
-        source,data,path,plist,clist,params,qs = self.parse_data(data)     
55
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
53 56
         content=[]
54 57
         content.append(("..return", "back","","Return back"))
55
-        
58
+
56 59
         if clist=="home":
57 60
             content.extend([
58
-                ("Search", "cinemalive::scripts/search.php?search={0}","","Search"),            
61
+                ("Search", "cinemalive::scripts/search.php?search={0}","","Search"),
59 62
                 ("Filmas latviski - visas", "cinemalive::filmaslatviski/visas/lapa/1","","Filmas latviski - visas"),
60 63
                 ("Filmas angliski", "cinemalive::home_en","","Filmas angliski"),
61 64
                 ("Filmas latviski - jaunākās", "cinemalive::filmaslatviski/jaunakas/lapa/1","","Filmas latviski - jaunākās"),
@@ -68,12 +71,12 @@ Accept-Language: en-US,en;q=0.8
68 71
                 data2 = item[0]+"/lapa/1"
69 72
                 img = self.img
70 73
                 desc = title
71
-                content.append((title,self.name+"::"+data2,img,desc))      
74
+                content.append((title,self.name+"::"+data2,img,desc))
72 75
             return content
73 76
 
74 77
         elif clist=="home_en":
75 78
             content.extend([
76
-                ("Search", "cinemalive::scripts/search.php?search={0}","","Search"),            
79
+                ("Search", "cinemalive::scripts/search.php?search={0}","","Search"),
77 80
                 ("Movies English - all", "cinemalive::moviesenglish/all/page/1","","Movies English - all"),
78 81
                 ("Movies Latvian", "cinemalive::home","","Filmas latviski"),
79 82
                 ("Movies English - newest", "cinemalive::moviesenglish/newestmovies/page/1","","Movies English - newest"),
@@ -86,14 +89,14 @@ Accept-Language: en-US,en;q=0.8
86 89
                 data2 = item[0]+"/page/1"
87 90
                 img = self.img
88 91
                 desc = title
89
-                content.append((title,self.name+"::"+data2,img,desc))      
92
+                content.append((title,self.name+"::"+data2,img,desc))
90 93
             return content
91 94
 
92
-    
95
+
93 96
         elif "search.php" in data:
94
-            
97
+
95 98
             r=self.call(path,params=params[1:],headers=self.headers2)
96
-            result = re.findall(r'<div class="results.+?<a href="https://cinemalive\.tv/(.+?)">.+?<img src="(.+?)".+?<span style="color:#bcbcbc">([^<]+)</span> <span style="color:#5a606d;font-size:12px;">([^<]+)</span><br/>.+?<p class="dec" style="font-size:12px; color:#777;line-height:14px;">([^<]+)</p>', r, re.DOTALL)            
99
+            result = re.findall(r'<div class="results.+?<a href="https://cinemalive\.tv/(.+?)">.+?<img src="(.+?)".+?<span style="color:#bcbcbc">([^<]+)</span> <span style="color:#5a606d;font-size:12px;">([^<]+)</span><br/>.+?<p class="dec" style="font-size:12px; color:#777;line-height:14px;">([^<]+)</p>', r, re.DOTALL)
97 100
             for item in result:
98 101
                 title = item[2]
99 102
                 title0 = re.sub(" \(\d+\)","",title)
@@ -105,7 +108,7 @@ Accept-Language: en-US,en;q=0.8
105 108
                 data2 = item[0]
106 109
                 img = item[1].replace("xs.","sm.")
107 110
                 desc = util.unescape(item[4])
108
-                content.append((title,self.name+"::"+data2,img,desc))            
111
+                content.append((title,self.name+"::"+data2,img,desc))
109 112
             return content
110 113
 
111 114
         elif clist in ("filmaslatviski","moviesenglish"):
@@ -119,23 +122,23 @@ Accept-Language: en-US,en;q=0.8
119 122
                 img = "https://cinemalive.tv/"+item[1]
120 123
                 title = util.unescape(title)
121 124
                 desc = title
122
-                content.append((title,self.name+"::"+data2,img,desc)) 
125
+                content.append((title,self.name+"::"+data2,img,desc))
123 126
             m = re.search(r"""<a href='https://cinemalive\.tv/([^']+)' style="border-right:none;">»</a>""", r, re.DOTALL)
124 127
             if m:
125 128
                 data2 = m.group(1)
126
-                content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))                                  
127
-            return content      
128
-         
129
+                content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
130
+            return content
131
+
129 132
         else:
130
-            return content                            
131
-              
133
+            return content
134
+
132 135
     def is_video(self,data):
133
-        source,data,path,plist,clist,params,qs = self.parse_data(data)        
136
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
134 137
         if clist=="movie":
135 138
             return True
136 139
         else:
137 140
             return False
138
-                        
141
+
139 142
     def get_streams(self, data):
140 143
         print "[cinemalive] get_streams:", data
141 144
         source,data,path,plist,clist,params,qs = self.parse_data(data)
@@ -148,7 +151,7 @@ Accept-Language: en-US,en;q=0.8
148 151
         title = title0.replace(" - Filma Online Latviski","").replace(" - Movie Online English HD","")
149 152
         desc = re.search('<p class="plot">(.+?)</p>', r).group(1)
150 153
         img = "http://cinemalive.tv"+re.search('<img src="(.+?)" class="img-thumbnail"', r).group(1)
151
-        
154
+
152 155
         m = re.search(r'<video id=.+?<source src="([^"]+\.mp4)"', r, re.DOTALL)
153 156
         if m:
154 157
             s = util.item()
@@ -157,9 +160,9 @@ Accept-Language: en-US,en;q=0.8
157 160
             s["desc"] = util.unescape(desc)
158 161
             s["img"] = img
159 162
             s["type"] = self.stream_type(s["url"])
160
-            s["lang"] = lang 
163
+            s["lang"] = lang
161 164
             return [s]
162
-        
165
+
163 166
         #m = re.search('<div class="viboom-overroll"><iframe src="([^"]+)"', r)
164 167
         #if m:
165 168
         result = re.findall('<div id="video_container"><iframe.+?src="(.+?)"', r)
@@ -180,12 +183,12 @@ Accept-Language: en-US,en;q=0.8
180 183
                     s["img"] = img
181 184
                     s["type"]= self.stream_type(s["url"])
182 185
                     s["lang"] = lang2
183
-                    streams.append(s)  
186
+                    streams.append(s)
184 187
             return streams
185 188
         else:
186 189
             return []
187 190
 
188
-                    
191
+
189 192
 if __name__ == "__main__":
190 193
     country= "lv"
191 194
     c = Source(country)

+ 1
- 0
sources/config.py ファイルの表示

@@ -23,6 +23,7 @@ class Source(SourceBase):
23 23
         self.read_streams()
24 24
 
25 25
     def get_content(self, data):
26
+        print "[config] get_content",data
26 27
         self.read_streams()
27 28
         if "::" in data:
28 29
             data = data.split("::")[1]

バイナリ
sources/config.pyc ファイルの表示


+ 3
- 0
sources/euronews.py ファイルの表示

@@ -15,6 +15,9 @@ import datetime, time,re, sys,os
15 15
 from collections import OrderedDict
16 16
 from SourceBase import SourceBase
17 17
 import util
18
+import ssl
19
+ssl._create_default_https_context = ssl._create_unverified_context
20
+
18 21
 
19 22
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
20 23
 import HTMLParser

+ 21
- 7
sources/filmix.py ファイルの表示

@@ -17,6 +17,9 @@ from SourceBase import SourceBase
17 17
 import base64
18 18
 from collections import OrderedDict
19 19
 import sys
20
+import ssl
21
+ssl._create_default_https_context = ssl._create_unverified_context
22
+
20 23
 try:
21 24
     import util
22 25
 except:
@@ -135,7 +138,10 @@ Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
135 138
                 for i,ep in enumerate(js["playlist"]):
136 139
                     title = title0 +" - "+js["playlist"][i]["comment"].encode("utf8")
137 140
                     serie = js["playlist"][i]["comment"].encode("utf8")
138
-                    data2 = data+"?s=%s"%(i+1)
141
+                    if "file" in ep and ep["file"]:
142
+                        data2 = data+"?e=%s"%(i+1)
143
+                    else:
144
+                        data2 = data+"?s=%s"%(i+1)
139 145
                     desc = serie +"\n"+desc0
140 146
                     content.append((title,self.name+"::"+data2,img,desc))
141 147
             return content
@@ -190,7 +196,7 @@ Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
190 196
 
191 197
     def is_video(self,data):
192 198
         source,data,path,plist,clist,params,qs = self.parse_data(data)
193
-        if clist == "play" and "s=" in data and "e=" in data:
199
+        if clist == "play" and "e=" in data:
194 200
             return True
195 201
         elif clist=="play" and not params:
196 202
             r = self.call(path)
@@ -263,12 +269,20 @@ Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
263 269
             js = self._http_request(pl_link)
264 270
             js = self.decode_uppod_text(js)
265 271
             js = json.loads(js)
266
-            if "s" in qs and "e" in qs:
267
-                s = int(qs["s"])
272
+            if "e" in qs:
273
+                if "s" in qs:
274
+                    s = int(qs["s"])
275
+                else:
276
+                    s = None
268 277
                 e = int(qs["e"])
269
-                serie = js["playlist"][s-1]["playlist"][e-1]["comment"].encode("utf8")
270
-                title = title0+" - "+ serie
271
-                url0 = js["playlist"][s-1]["playlist"][e-1]["file"].encode("utf8")
278
+                if s: # sezona + epizode
279
+                    serie = js["playlist"][s-1]["playlist"][e-1]["comment"].encode("utf8")
280
+                    title = title0+" - "+ serie
281
+                    url0 = js["playlist"][s-1]["playlist"][e-1]["file"].encode("utf8")
282
+                else: # tikai epizode, nav sezonas
283
+                    title = title0 +" - "+js["playlist"][e-1]["comment"].encode("utf8")
284
+                    serie = js["playlist"][e-1]["comment"].encode("utf8")
285
+                    url0 = js["playlist"][e-1]["file"].encode("utf8")
272 286
                 streams2 = self.get_streams2(url0)
273 287
                 for st in streams2:
274 288
                     stream = util.item()

バイナリ
sources/filmix.pyc ファイルの表示


+ 8
- 5
sources/filmon.py ファイルの表示

@@ -13,6 +13,9 @@ except:
13 13
 import urllib2, urllib
14 14
 import datetime, re, sys
15 15
 from SourceBase import SourceBase
16
+import ssl
17
+ssl._create_default_https_context = ssl._create_unverified_context
18
+
16 19
 
17 20
 API_URL = 'http://www.filmon.com/'
18 21
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
@@ -44,7 +47,7 @@ class Source(SourceBase):
44 47
     def get_content(self, data):
45 48
         print "[filmon] get_content:", data
46 49
         if "::" in data:
47
-            data = data.split("::")[1] 
50
+            data = data.split("::")[1]
48 51
         path = data.split("?")[0]
49 52
         clist = path.split("/")[0]
50 53
         params = data[data.find("?"):] if "?" in data else ""
@@ -81,7 +84,7 @@ class Source(SourceBase):
81 84
         ### TV group channels ###
82 85
         elif clist=="group":
83 86
             if "id" in qs:
84
-                group_id = qs["id"] 
87
+                group_id = qs["id"]
85 88
             else:
86 89
                 return content
87 90
             group = None
@@ -102,7 +105,7 @@ class Source(SourceBase):
102 105
         ### TV Channel ###
103 106
         elif clist == "channel" or clist == "video":
104 107
             if "id" in qs:
105
-                ch_id = qs["id"] 
108
+                ch_id = qs["id"]
106 109
             else:
107 110
                 return ("No stream found %s"%data,"","","No stream found")
108 111
             ch = self.get_tv_channel_info(ch_id)
@@ -135,7 +138,7 @@ class Source(SourceBase):
135 138
                 img = gr["images"][0]["url"].encode("utf8")
136 139
                 desc = gr["description"].encode("utf8") if gr["description"] else title
137 140
                 content.append((title,self.name+"::"+data2,img,desc))
138
-            return content           
141
+            return content
139 142
 
140 143
         ### VOD genre videos ###
141 144
         elif path == "vod/search":
@@ -152,7 +155,7 @@ class Source(SourceBase):
152 155
             if start_index+js["total"]<js["total_found"]:
153 156
                 start_index += 30
154 157
                 data2 = re.sub("start_index=\d+","start_index=%s"%start_index,data) if "start_index" in qs else data +"&start_index=30"
155
-                content.append(("Next page",self.name+"::"+data2,"","Next page"))                                            
158
+                content.append(("Next page",self.name+"::"+data2,"","Next page"))
156 159
             return content
157 160
 
158 161
         ### VOD video sigle/series ###

+ 4
- 1
sources/iplayer.py ファイルの表示

@@ -16,6 +16,9 @@ import datetime,time
16 16
 from SourceBase import SourceBase, stream_type
17 17
 import util
18 18
 from collections import OrderedDict
19
+import ssl
20
+ssl._create_default_https_context = ssl._create_unverified_context
21
+
19 22
 
20 23
 API_URL = 'https://m.lattelecom.tv/'
21 24
 user_agent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3"
@@ -498,7 +501,7 @@ Connection: Keep-Alive
498 501
             return r.content
499 502
 
500 503
         except Exception as ex:
501
-            if ex.code==403:
504
+            if "code" in dir(ex) and ex.code==403:
502 505
                 return ex.read()
503 506
             else:
504 507
                 return None

バイナリ
sources/iplayer.pyc ファイルの表示


+ 7
- 3
sources/lmt.py ファイルの表示

@@ -12,8 +12,12 @@ except:
12 12
 import requests
13 13
 import datetime, re, sys,os
14 14
 import ConfigParser
15
-from collections import OrderedDict
15
+import ssl
16
+ssl._create_default_https_context = ssl._create_unverified_context
17
+
18
+
16 19
 from SourceBase import SourceBase
20
+
17 21
 import resolver
18 22
 try:
19 23
     import util
@@ -152,8 +156,8 @@ Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
152 156
             stream["desc"] = desc
153 157
             stream["resolver"] = "lmt"
154 158
             return [stream]
155
-        elif re.search('src="http*://www.youtube.com/embed/(\w+).*"',r):
156
-            m = re.search('src="http*://www.youtube.com/embed/(\w+).*"',r)
159
+        elif re.search('src="http*://www.youtube.com/embed/([\w-]+).*"',r):
160
+            m = re.search('src="http*://www.youtube.com/embed/([\w-]+).*"',r)
157 161
             video_id = m.group(1)
158 162
             #http://www.youtube.com/embed/RUyQ_JJ6A84?rel=0&fs=1&wmode=transparent
159 163
             data2 = YouTubeVideoUrl().extract(video_id)

+ 31
- 17
sources/ltc.py ファイルの表示

@@ -16,6 +16,8 @@ import datetime
16 16
 from SourceBase import SourceBase, stream_type
17 17
 import util
18 18
 from collections import OrderedDict
19
+import ssl
20
+ssl._create_default_https_context = ssl._create_unverified_context
19 21
 
20 22
 API_URL = 'https://m.lattelecom.tv/'
21 23
 user_agent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3"
@@ -72,13 +74,7 @@ Accept-Language: en-US,en;q=0.8
72 74
 
73 75
     def get_content(self, data):
74 76
         print "[ltc] get_content:", data
75
-        if "::" in data:
76
-            data = data.split("::")[1]
77
-        path = data.split("?")[0]
78
-        clist = path.split("/")[0]
79
-        params = data[data.find("?"):] if "?" in data else ""
80
-        qs = dict(map(lambda x:x.split("="),re.findall("\w+=[\w-]+",params)))
81
-        #lang = qs["lang"] if "lang" in qs else self.country
77
+        source, data, path, plist, clist, params, qs = self.parse_data(data)
82 78
 
83 79
         content=[]
84 80
         content.append(("..return", "back","","Return back"))
@@ -333,7 +329,7 @@ Accept-Language: en-US,en;q=0.8
333 329
                 data2 = item["url"][1:].encode("utf8")
334 330
                 if data2[-1]=="/": data2=data2[:-1]
335 331
                 if "/raidijumi/" in data2:
336
-                    data2 += "?series"
332
+                    data2 += "?series" # TODO
337 333
                 img = "https://www.lattelecom.tv"+item["image"].encode("utf8")
338 334
                 desc = "%s\n%s"%(title,item["genre"].encode("utf8"))
339 335
                 content.append((title,self.name+"::"+data2,img,desc))
@@ -345,16 +341,31 @@ Accept-Language: en-US,en;q=0.8
345 341
             return content
346 342
 
347 343
         ### Sērijas
348
-        elif clist=="videonoma" and  params=="?series":
344
+        elif clist=="videonoma" and  (params=="?series" or "season_nr" in qs):
349 345
             url = "https://www.lattelecom.tv/"+path
350 346
             r = self._http_request(url,headers=self.headers2)
351 347
             if not r:
352 348
                 return content
353 349
             m = re.search('<div class="movie_details"><div class="movie_titles"><div class="en">([^<]+?)</div>', r, re.DOTALL | re.IGNORECASE)
354 350
             raidijums = m.group(1) + " - " if m else ""
351
+            img0 = re.search('<meta name="og:image" content="([^"]+)">', r).group(1) if re.search('<meta name="dr:say:img" content="([^"]+)">', r) else ""
352
+
353
+            m = re.search('<ul class="episode_choiser".+?(<li>[^<]+</li>)+</ul>', r)
354
+            if m and not "season_nr" in qs: # ir sezonas
355
+                for s,it in enumerate(re.findall('<li>([^<]+)</li>', m.group())):
356
+                    title = "%s%s" % (raidijums, it)
357
+                    se = re.search("(\d+)",it).group(1)
358
+                    data2 = path+"?season_nr=%s"%(se)
359
+                    img = img0
360
+                    desc = title
361
+                    content.append((title, self.name + "::" + data2, img, desc))
362
+                return content
363
+
355 364
             m = re.search('data-bid="([^"]+)',r)
356 365
             if m:
357
-                url = "https://www.lattelecom.tv/episode-loader.json?bcast_ids=%s"%(m.group(1))
366
+                if not "season_nr" in qs:
367
+                    qs["season_nr"]="0"
368
+                url = "https://www.lattelecom.tv/episode-loader.json?bcast_ids=%s&season_nr=%s"%(m.group(1),qs["season_nr"])
358 369
                 r = self._http_request(url,headers=self.headers2)
359 370
                 i1 = r.find('{"total":')
360 371
                 i2 = r.find('}<div class=')
@@ -439,12 +450,15 @@ Accept-Language: en-US,en;q=0.8
439 450
                     desc = title
440 451
             elif vtype == "record-streams":
441 452
                 epg = self.get_epg_id(vid)
442
-                title = epg["title"].encode("utf8")
443
-                t1 = datetime.datetime.fromtimestamp(int(epg["unix_start"])).strftime('%H:%M')
444
-                t2 = datetime.datetime.fromtimestamp(int(epg["unix_stop"])).strftime('%H:%M')
445
-                date = epg["date"]
446
-                title = "%s (%s %s-%s)"%(title,date,t1,t2)
447
-                desc = epg["description"]
453
+                if epg:
454
+                    title = epg["title"].encode("utf8")
455
+                    t1 = datetime.datetime.fromtimestamp(int(epg["unix_start"])).strftime('%H:%M')
456
+                    t2 = datetime.datetime.fromtimestamp(int(epg["unix_stop"])).strftime('%H:%M')
457
+                    date = epg["date"]
458
+                    title = "%s (%s %s-%s)"%(title,date,t1,t2)
459
+                    desc = epg["description"]
460
+                else:
461
+                    title = desc = data
448 462
 
449 463
             streams = []
450 464
             for s in r["data"]:
@@ -551,7 +565,7 @@ Accept-Language: en-US,en;q=0.8
551 565
             self.epg_id={}
552 566
             self.epg_id2={}
553 567
             self.epg_date={}
554
-            slef.epg_ch={}
568
+            self.epg_ch={}
555 569
             self.epg_cat={}
556 570
         if not date in self.epgdates:
557 571
             r=self.call("tv/epg/?daynight=%s"%date)

バイナリ
sources/ltc.pyc ファイルの表示


+ 3
- 0
sources/movieplace.py ファイルの表示

@@ -14,6 +14,9 @@ import datetime, re, sys,os
14 14
 import ConfigParser
15 15
 from collections import OrderedDict
16 16
 from SourceBase import SourceBase
17
+import ssl
18
+ssl._create_default_https_context = ssl._create_unverified_context
19
+
17 20
 import resolver
18 21
 try:
19 22
     import util

+ 41
- 38
sources/mtgplay.py ファイルの表示

@@ -15,6 +15,9 @@ except:
15 15
 # coding=utf8
16 16
 import urllib2, urllib
17 17
 import datetime, re, sys
18
+import ssl
19
+ssl._create_default_https_context = ssl._create_unverified_context
20
+
18 21
 from SourceBase import SourceBase
19 22
 
20 23
 API_URL = 'http://playapi.mtgx.tv/v3/'
@@ -35,20 +38,20 @@ REGIONS = [
35 38
 
36 39
 
37 40
 class Source(SourceBase):
38
-    
41
+
39 42
     def __init__(self,country="lv",cfg_path=None):
40 43
         self.name = "mtgplay"
41 44
         self.title = "Skaties.lv (TV3)"
42 45
         self.img = "http://skaties.lv/touch-icon-192x192.png"
43 46
         self.desc = "MTG skaties.lv satura skatīšanās (LNT,TV3, TV6 u.c.)"
44
-        
47
+
45 48
         self.country=country
46 49
         self.pic_size = "327x250" #"1000x765"
47
-        
50
+
48 51
     def get_content(self, data):
49 52
         print "[mtgplay] get_content:", data
50 53
         if "::" in data:
51
-            data = data.split("::")[1]        
54
+            data = data.split("::")[1]
52 55
         if "/" in data:
53 56
             citem,cid = data.split("/")
54 57
             clist = ""
@@ -57,45 +60,45 @@ class Source(SourceBase):
57 60
             qs = dict(map(lambda x:x.split("="),re.findall("\w+=\w+",data)))
58 61
             citem,cid = ("","")
59 62
             self.country = qs["country"] if "country" in qs else "lv"
60
-        
63
+
61 64
         content=[]
62 65
         content.append(("..return", "back","","Return back"))
63
-        
66
+
64 67
         if clist=="home":
65 68
             content.extend([
66
-                #("Search", "mtgplay::meklet?country=%s&term={0}"%self.country,"","Search videos"), ### TODO                
69
+                #("Search", "mtgplay::meklet?country=%s&term={0}"%self.country,"","Search videos"), ### TODO
67 70
                 ("TV Live", "mtgplay::videos?country=%s&order=title&type=live"%self.country,"","TV live streams(not always available)"),
68 71
                 ("Last videos", "mtgplay::videos?country=%s&order=-airdate"%self.country,"","Last aired videos"),
69 72
                 ("Categories", "mtgplay::categories?country=%s&order=name"%self.country,"","Categories"),
70 73
                 ("Channels", "mtgplay::channels?country=%s&order=id"%self.country,"","TV channels"),
71
-                ("Programs by name", "mtgplay::formats?country=%s&order=-title"%self.country,"","Programs by name"),             
72
-                ("Programs by popularity", "mtgplay::formats?country=%s&order=-popularity"%self.country,"","Programs by popularity")             
74
+                ("Programs by name", "mtgplay::formats?country=%s&order=-title"%self.country,"","Programs by name"),
75
+                ("Programs by popularity", "mtgplay::formats?country=%s&order=-popularity"%self.country,"","Programs by popularity")
73 76
             ])
74 77
             return content
75
-        
78
+
76 79
         r = self.call(data)
77 80
         if not r:
78 81
             content.append(("Error", "","","Error reading '%s'"%data))
79 82
             return content
80
-        
83
+
81 84
         if clist:
82 85
             if r["_links"].has_key("prev"):
83 86
                 data2 = r["_links"]["prev"]["href"].replace(API_URL,"")
84 87
                 content.append(("Previous page", self.name+"::"+data2.encode("utf8"),"", "Goto previous page"))
85
-                
88
+
86 89
             if "_embedded" in r:
87 90
                 for item in r["_embedded"][clist]:
88 91
                     if "title" in item:
89 92
                         title = item["title"]
90 93
                     elif "name" in item:
91 94
                         title = item["name"]
92
-                    #data2 = self.name+"::"+"%s/%s"%(clist,item["id"]) 
95
+                    #data2 = self.name+"::"+"%s/%s"%(clist,item["id"])
93 96
                     img = item["_links"]["image"]["href"].replace("{size}",self.pic_size) if "image" in item["_links"] else ""
94 97
                     desc = item["summary"] if "summary" in item and item["summary"] else ""
95
-                    
98
+
96 99
                     ### Video ###
97 100
                     if clist=="videos":
98
-                        data2 = "videos/%s"%item["id"]                            
101
+                        data2 = "videos/%s"%item["id"]
99 102
                         summary = item["summary"] if item["summary"] else ""
100 103
                         air_at = item["broadcasts"][0]["air_at"] if "broadcasts" in item and len(item["broadcasts"])>0 and "air_at" in item["broadcasts"][0] else ""
101 104
                         if not air_at:
@@ -111,8 +114,8 @@ class Source(SourceBase):
111 114
                             views = views+" views"
112 115
                         except: views = ""
113 116
                         desc = "Aired: %s %s\nDuration: %s %s\n\n%s"%(air_at, playable_to,duration,views,summary)
114
-                        
115
-                    ### Categories ###     
117
+
118
+                    ### Categories ###
116 119
                     elif clist == "categories":
117 120
                         #data2 = item["_links"]["formats"]["href"].replace(API_URL,"")
118 121
                         data2 = "formats?category=%s"%item["id"]
@@ -120,8 +123,8 @@ class Source(SourceBase):
120 123
                         if "category" in qs: data2 += "&category="+qs["category"]
121 124
                         if "channel" in qs: data2 += "&channel="+qs["channel"]
122 125
                         data2 += "&order=title"
123
-                        
124
-                    ### Channels ###     
126
+
127
+                    ### Channels ###
125 128
                     elif clist == "channels":
126 129
                         #data2 = item["_links"]["categories"]["href"].replace(API_URL,"")
127 130
                         data2 = "categories?channel=%s"%item["id"]
@@ -129,8 +132,8 @@ class Source(SourceBase):
129 132
                         if "category" in qs: data2 += "&category="+qs["category"]
130 133
                         if "channel" in qs: data2 += "&channel="+qs["channel"]
131 134
                         data2 += "&order=name"
132
-                        
133
-                    ### Formats (programs) ###     
135
+
136
+                    ### Formats (programs) ###
134 137
                     elif clist == "formats":
135 138
                         #data2 = item["_links"]["videos"]["href"].replace(API_URL,"")
136 139
                         data2 = "seasons?format=%s"%item["id"]
@@ -141,9 +144,9 @@ class Source(SourceBase):
141 144
                         air_at = item["latest_video"]["publish_at"] if "publish_at" in item["latest_video"] else ""
142 145
                         air_at = air_at[0:16].replace("T"," ") if air_at else ""
143 146
                         if air_at:
144
-                            desc = "Last video: %s\n"%air_at + desc                        
145
-                        
146
-                    ### Seasons ###     
147
+                            desc = "Last video: %s\n"%air_at + desc
148
+
149
+                    ### Seasons ###
147 150
                     elif clist == "seasons":
148 151
                         #data2 = item["_links"]["videos"]["href"].replace(API_URL,"")
149 152
                         data2 = "videos?season=%s"%item["id"]
@@ -151,30 +154,30 @@ class Source(SourceBase):
151 154
                         #if "category" in qs: data2 += "&category="+qs["category"]
152 155
                         #if "channel" in qs: data2 += "&channel="+qs["channel"]
153 156
                         data2 += "&order=title"
154
-                        
157
+
155 158
                         summary = item["summary"] if "summary" in item and item["summary"] else ""
156 159
                         try:
157 160
                             latest_video = item["latest_video"]["publish_at"]
158 161
                             latest_video = latest_video[0:16].replace("T"," ")
159 162
                         except: latest_video = ""
160 163
                         desc = ("%s\nLatest video: %s"%(summary,latest_video))
161
-                                      
164
+
162 165
                     content.append((title.encode("utf8"),self.name+"::"+data2.encode("utf8"),img.encode("utf8"),desc.encode("utf8")))
163
-                    
166
+
164 167
             if r["_links"].has_key("next"):
165 168
                 data2 = r["_links"]["next"]["href"].replace(API_URL,"").encode("utf8")
166 169
                 content.append(("Next page", self.name+"::"+data2.encode("utf8"),"","Goto next page"))
167
-                
170
+
168 171
         elif citem:
169 172
             item = r
170 173
             if "title" in item:
171 174
                 title = item["title"]
172 175
             elif "name" in item:
173 176
                 title = r["name"]
174
-            #data2 = self.name+"::"+"%s/%s"%(clist,item["id"]) 
177
+            #data2 = self.name+"::"+"%s/%s"%(clist,item["id"])
175 178
             img = item["_links"]["image"]["href"].replace("{size}",self.pic_size) if "image" in item["_links"] else ""
176 179
             desc = item["summary"] if "summary" in item and item["summary"] else ""
177
-            
180
+
178 181
             dd = "videos/stream/%s"%cid
179 182
             r2 = self.call(dd)
180 183
             if "streams" in r2 and "hls" in r2["streams"]:
@@ -182,13 +185,13 @@ class Source(SourceBase):
182 185
                 content = (title.encode("utf8"),data2.encode("utf8"),img.encode("utf8"),desc.encode("utf8"))
183 186
             elif "msg" in r2:
184 187
                 content = (r2["msg"].encode("utf8"),"","","")
185
-            else: 
186
-                content = ("Error getting stream","","","")            
187
-            
188
+            else:
189
+                content = ("Error getting stream","","","")
190
+
188 191
         else:
189 192
             pass
190 193
         return content
191
-    
194
+
192 195
     def is_video(self,data):
193 196
         if "::" in data:
194 197
             data = data.split("::")[1]
@@ -197,8 +200,8 @@ class Source(SourceBase):
197 200
             return True
198 201
         else:
199 202
             return False
200
-    
201
-    def get_stream(self,id):   
203
+
204
+    def get_stream(self,id):
202 205
         dd = "videos/stream/%s"%id
203 206
         r2 = self.call(dd)
204 207
         if "streams" in r2 and "hls" in r2["streams"]:
@@ -206,7 +209,7 @@ class Source(SourceBase):
206 209
         else:
207 210
             data2 = ""
208 211
         return data2.encode("utf8")
209
-        
212
+
210 213
     def call_all(self, endpoint, params = None):
211 214
         url = API_URL % (endpoint)
212 215
         if params:
@@ -229,7 +232,7 @@ class Source(SourceBase):
229 232
                 url = content["_links"]["next"]["href"]
230 233
             else: break
231 234
         return result
232
-    
235
+
233 236
     def call(self, data,headers=headers0):
234 237
         url = API_URL + data
235 238
         #print "[TVPlay Api] url: ",url

+ 24
- 21
sources/play24.py ファイルの表示

@@ -12,6 +12,9 @@ except:
12 12
 
13 13
 import urllib2, urllib
14 14
 import datetime, re, sys
15
+import ssl
16
+ssl._create_default_https_context = ssl._create_unverified_context
17
+
15 18
 from SourceBase import SourceBase
16 19
 
17 20
 API_URL = 'http://replay.lsm.lv/'
@@ -21,39 +24,39 @@ User-Agent: Mozilla/5.0 (Linux; U; Android 4.4.4; Nexus 5 Build/KTU84P) AppleWeb
21 24
 """)
22 25
 import HTMLParser
23 26
 h = HTMLParser.HTMLParser()
24
-    
27
+
25 28
 class Source(SourceBase):
26
-    
29
+
27 30
     def __init__(self,country="lv",cfg_path=None):
28 31
         self.name = "play24"
29 32
         self.title = "Play24.lv"
30 33
         self.img = "http://play24.lv/images/play24-logo-black.png"
31 34
         self.desc = "play24.lv (Riga24TV) satura skatīšanās"
32
-        
35
+
33 36
         self.country=country
34
-        
37
+
35 38
     def get_content(self, data):
36 39
         print "[play24] get_content:", data
37 40
         if "::" in data:
38
-            data = data.split("::")[1] 
41
+            data = data.split("::")[1]
39 42
         path = data.split("?")[0]
40 43
         clist = path.split("/")[0]
41 44
         params = data[data.find("?"):] if "?" in data else ""
42 45
         qs = dict(map(lambda x:x.split("="),re.findall("\w+=\w+",params)))
43 46
         lang = qs["lang"] if "lang" in qs else self.country
44
-    
47
+
45 48
         content=[]
46 49
         content.append(("..return", "back","","Return back"))
47
-        
50
+
48 51
         if clist=="home":
49 52
             content.extend([
50 53
                 ("Live stream", "play24::tiesraide","","TV live streams"),
51 54
                 ("Last videos", "play24::jaunakie","","Last videos"),
52 55
                 ("Categories", "play24::kategorijas","","Categories"),
53
-                ("Programs", "play24::raidijumi","","Programs"),             
56
+                ("Programs", "play24::raidijumi","","Programs"),
54 57
              ])
55 58
             return content
56
-  
59
+
57 60
         ### Jaunākie video ###
58 61
         elif clist=="jaunakie":
59 62
             url = "http://play24.lv/"
@@ -66,7 +69,7 @@ class Source(SourceBase):
66 69
                 desc = title
67 70
                 content.append((title,self.name+"::"+data2,img,desc))
68 71
             return content
69
-                
72
+
70 73
         ### Kategorijas ###
71 74
         elif clist=="kategorijas":
72 75
             url = "http://play24.lv/"
@@ -79,7 +82,7 @@ class Source(SourceBase):
79 82
                 desc = title
80 83
                 content.append((title,self.name+"::"+data2,img,desc))
81 84
             return content
82
-       
85
+
83 86
         elif clist=="kategorija":
84 87
             url = "http://play24.lv/"+data
85 88
             r = self._http_request(url)
@@ -98,7 +101,7 @@ class Source(SourceBase):
98 101
                 progr = m.group(2) if m else ""
99 102
                 m = re.search('<span class="masonry-item__date">([^<]+)</span>', article, re.DOTALL)
100 103
                 date = m.group(1).strip() if m else ""
101
-                         
104
+
102 105
                 if date:
103 106
                     title = title + " (%s %s)"%(date,progr)
104 107
                 desc = title + "\n%s - %s"%(progr,date)
@@ -106,9 +109,9 @@ class Source(SourceBase):
106 109
             m = re.search(r'<li><a href="http://play24\.lv/([^"]+)" rel="next">&raquo;</a></li>', r, re.DOTALL)
107 110
             if m:
108 111
                 data2 = m.group(1)
109
-                content.append(("Next page",self.name+"::"+data2,"","Next page"))                                            
112
+                content.append(("Next page",self.name+"::"+data2,"","Next page"))
110 113
             return content
111
-        
114
+
112 115
         ### Raidijumi (programmas)
113 116
         elif clist=="raidijumi":
114 117
             url = "http://play24.lv/"
@@ -137,11 +140,11 @@ class Source(SourceBase):
137 140
             m = re.search(r'<li><a href="http://play24\.lv/([^"]+)" rel="next">&raquo;</a></li>', r, re.DOTALL)
138 141
             if m:
139 142
                 data2 = m.group(1)
140
-                content.append(("Next page",self.name+"::"+data2,"","Next page"))                                            
143
+                content.append(("Next page",self.name+"::"+data2,"","Next page"))
141 144
             return content
142 145
 
143 146
         elif clist == "video" or clist == "tiesraide":
144
-            if clist == "video":  
147
+            if clist == "video":
145 148
                 url = "http://play24.lv/"+data
146 149
                 r = self._http_request(url)
147 150
                 # var ov_video_id = '59422';
@@ -153,14 +156,14 @@ class Source(SourceBase):
153 156
                 m = re.search('<meta name="description" content="([^"]+)" />', r, re.DOTALL)
154 157
                 desc = m.group(1) if m else ""
155 158
                 desc = h.unescape(desc.decode("utf8")).encode("utf8")
156
-                
159
+
157 160
                 url = "http://player.tvnet.lv/v/%s"%id
158 161
             else:
159 162
                 url = "http://player.tvnet.lv/l/11"
160 163
                 desc = ""
161 164
             r = self._http_request(url)
162 165
             m = re.search('<h1 class="static title">.+?<a href="[^"]+">([^<]+)</a>', r, re.DOTALL)
163
-            title = m.group(1) if m else ""   
166
+            title = m.group(1) if m else ""
164 167
             s = {}
165 168
             for item in re.findall('source src="([^"]+)" data-stream="([^"]+)" data-quality="([^"]+)"', r, re.DOTALL):
166 169
                 s[item[1]] = (item[0],item[2])
@@ -170,8 +173,8 @@ class Source(SourceBase):
170 173
                     data2 = s[t][0]
171 174
                     break
172 175
             return (title,data2,"",desc)
173
-               
174
-    
176
+
177
+
175 178
     def is_video(self,data):
176 179
         if "::" in data:
177 180
             data = data.split("::")[1]
@@ -180,7 +183,7 @@ class Source(SourceBase):
180 183
             return True
181 184
         else:
182 185
             return False
183
-    
186
+
184 187
     def call(self, data,headers=headers0,lang=""):
185 188
         if not lang: lang = self.country
186 189
         url = API_URL%lang + data

+ 4
- 0
sources/replay.py ファイルの表示

@@ -12,6 +12,9 @@ except:
12 12
 
13 13
 import urllib2, urllib
14 14
 import datetime, re, sys
15
+import ssl
16
+ssl._create_default_https_context = ssl._create_unverified_context
17
+
15 18
 from SourceBase import SourceBase
16 19
 import util
17 20
 
@@ -146,6 +149,7 @@ class Source(SourceBase):
146 149
                     content=("No stream found %s"%data,"","","No stream found")
147 150
                     return content
148 151
                 data2 = m.group(1).replace("\\","")
152
+                #r = self._http_request(data2, headers=headers)
149 153
 
150 154
             else: # audio
151 155
                 lrn = ch.replace("lr","")

バイナリ
sources/replay.pyc ファイルの表示


+ 3
- 0
sources/serialguru.py ファイルの表示

@@ -13,6 +13,9 @@ except:
13 13
 import urllib2, urllib
14 14
 import datetime, re, sys,os
15 15
 import ConfigParser
16
+import ssl
17
+ssl._create_default_https_context = ssl._create_unverified_context
18
+
16 19
 from SourceBase import SourceBase
17 20
 
18 21
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])

+ 1
- 2
sources/streams.cfg ファイルの表示

@@ -10,7 +10,6 @@ Skaties.lv (TV3)|mtgplay::home|http://skaties.lv/touch-icon-192x192.png|MTG kan
10 10
 Shortcut (lattelecom.tv)|ltc::home|https://kursors.lv/wp-content/uploads/2016/07/Shortcut-logo.png|lattelecom TV, arhīves un video
11 11
 Play24.lv (Riga24TV)|play24::home|http://play24.lv/images/play24-logo-black.png|play24.lv (Riga24TV)tiešraide un arhīvs
12 12
 viaplay.lv|viaplay::home|https://yt3.ggpht.com/-noVdjbNR-V8/AAAAAAAAAAI/AAAAAAAAAAA/yZ9XNP5urLY/s900-c-k-no-mo-rj-c0xffffff/photo.jpg|Viaplay.lv - filmas latviešu, krievu u.c. valodās
13
-LMT straume|lmt::home|http://www.lob.lv/images/logo/lmt_straume_vert_rgb.png|LMT straume - dažādi video latviesu valodā
14 13
 TVDom.tv|tvdom::home|https://tvdom.tv/front/assets/images/logo.png|PBK tiešraides un arhīvs
15 14
 BBC iPlayer|iplayer::home|http://www.userlogos.org/files/logos/inductiveload/BBC_iPlayer_logo.png|BBC live streams and arhive
16 15
 Euronews|euronews::home|http://pbs.twimg.com/profile_images/732665354242150400/tZsCnjuh_400x400.jpg|Euronews live streams and archive
@@ -22,7 +21,7 @@ USTVNow|ustvnow::home|http://watch.ustvnow.com/assets/ustvnow/img/ustvnow_og_ima
22 21
 FilmOn|filmon::home|http://behindthegloves.com/wp-content/uploads/2016/01/FilmOn-logo1.jpg|FilmOn - tiešraides un video (dažādās valodās)
23 22
 MTGPlay|config::mtg|https://www.mtg.com/wp-content/uploads/2015/11/MTG-Logo-Medium-Red-PNG.png|Other countries MTG media portals content
24 23
 Filmas.lv|filmas::home|https://www.filmas.lv/wp-content/uploads/2013/06/LVfilmas-logo-jauns21.png|Filmas.lv - Latvijas filmas
25
-
24
+LMT straume|lmt::home|http://www.lob.lv/images/logo/lmt_straume_vert_rgb.png|LMT straume - dažādi video latviesu valodā
26 25
 
27 26
 [my_tv]
28 27
 My Tv

+ 3
- 1
sources/tvdom.py ファイルの表示

@@ -9,10 +9,12 @@ try:
9 9
     import json
10 10
 except:
11 11
     import simplejson as json
12
-
13 12
 import urllib2, urllib
14 13
 import datetime, re, sys,os
15 14
 from collections import OrderedDict
15
+import ssl
16
+ssl._create_default_https_context = ssl._create_unverified_context
17
+
16 18
 from SourceBase import SourceBase
17 19
 
18 20
 API_URL = 'http://replay.lsm.lv/'

+ 3
- 1
sources/ustvnow.py ファイルの表示

@@ -9,11 +9,13 @@ try:
9 9
     import json
10 10
 except:
11 11
     import simplejson as json
12
-
13 12
 import urllib2, urllib
14 13
 import datetime, re, sys,os
15 14
 import traceback
16 15
 from collections import OrderedDict
16
+import ssl
17
+ssl._create_default_https_context = ssl._create_unverified_context
18
+
17 19
 from SourceBase import SourceBase
18 20
 
19 21
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])

+ 5
- 2
sources/viaplay.py ファイルの表示

@@ -19,6 +19,9 @@ except:
19 19
 import urlparse, urllib
20 20
 import datetime, time,re, sys,os
21 21
 from collections import OrderedDict
22
+import ssl
23
+ssl._create_default_https_context = ssl._create_unverified_context
24
+
22 25
 from SourceBase import SourceBase
23 26
 try:
24 27
     import util
@@ -415,9 +418,9 @@ Referer: https://viaplay.lv/
415 418
                         #headers = {"Cookie":r2.headers["set-cookie"]}
416 419
                     #else:
417 420
                         #headers={}
418
-                    url2p=util.streamproxy_encode(url2,headers)
421
+                    #url2p=util.streamproxy_encode(url2,headers)
419 422
                     stream = util.item()
420
-                    stream["url"]=url2p
423
+                    stream["url"]=url2
421 424
                     stream["lang"]=s["lang"]
422 425
                     stream["quality"]="%s"%(s2[1])
423 426
                     stream["name"]= title

バイナリ
sources/viaplay.pyc ファイルの表示


+ 39
- 9
util.py ファイルの表示

@@ -195,8 +195,6 @@ SPLIT_CODE = urllib.quote(SPLIT_CHAR)
195 195
 EQ_CODE = urllib.quote("=")
196 196
 COL_CODE = urllib.quote(":")
197 197
 SPACE_CODE = urllib.quote(" ")
198
-PROXY_URL = "http://localhost:88/"
199
-
200 198
 
201 199
 def make_fname(title):
202 200
     "Make file name from title"
@@ -205,7 +203,6 @@ def make_fname(title):
205 203
     fname0 = re.sub("['""]","",fname0)
206 204
     return fname0
207 205
 
208
-
209 206
 def hls_base(url):
210 207
     url2 = url.split("?")[0]
211 208
     url2 = "/".join(url2.split("/")[0:-1])+ "/"
@@ -214,24 +211,28 @@ def hls_base(url):
214 211
 def stream_change(stream):
215 212
     #return stream # TODO
216 213
     if "resolver" in stream and stream["resolver"] in ("viaplay","hqq","filmas") or \
217
-        "surl" in stream and re.search("https*://(hqq|goo.\gl)",stream["surl"]):
214
+        "surl" in stream and re.search("https*://(hqq|goo\.gl)",stream["surl"]):
218 215
         stream["url"] = streamproxy_encode(stream["url"],stream["headers"])
219 216
         stream["headers"] = {}
220 217
         return stream
221 218
     else:
222 219
         return stream
223 220
 
224
-def streamproxy_encode(url,headers=[]):
221
+def streamproxy_encode(url,headers=[],proxy_url=None):
222
+    PROXY_URL = "http://localhost:8880/"
225 223
     if not "?" in url:
226 224
         url = url+"?"
227 225
     url2 = url.replace(SPLIT_CHAR,SPLIT_CODE).replace(":",COL_CODE).replace(" ",SPACE_CODE)
228
-    url2 = PROXY_URL + url2
226
+    if not proxy_url:
227
+        proxy_url = PROXY_URL
228
+    url2 = proxy_url + url2
229 229
     if headers:
230 230
         headers2 = []
231 231
         for h in headers:
232 232
             headers2.append("%s=%s"%(h,headers[h].replace("=",EQ_CODE).replace(SPLIT_CHAR,SPLIT_CODE).replace(" ",SPACE_CODE)))
233 233
         headers2 = SPLIT_CHAR.join(headers2)
234 234
         url2 = url2+SPLIT_CHAR+headers2
235
+    #return url2.encode("utf8") if isinstance(url2,unicode) else url2
235 236
     return url2
236 237
 
237 238
 def streamproxy_decode(urlp):
@@ -247,6 +248,33 @@ def streamproxy_decode(urlp):
247 248
             headers[h.split("=")[0]]=urllib.unquote(h.split("=")[1])
248 249
     return url,headers
249 250
 
251
+def streamproxy_encode2(url,headers=[],proxy_url=None):
252
+    PROXY_URL = "http://localhost:8880/"
253
+    #url2 = url.replace(SPLIT_CHAR,SPLIT_CODE).replace(":",COL_CODE).replace(" ",SPACE_CODE)
254
+    url2 = urllib.quote_plus(url)
255
+    if not proxy_url:
256
+        proxy_url = PROXY_URL
257
+    url2 = proxy_url + url2+"/?"
258
+    if headers:
259
+        headers2 = []
260
+        for h in headers:
261
+            headers2.append("%s=%s"%(h,headers[h].replace("=",EQ_CODE).replace(SPLIT_CHAR,SPLIT_CODE).replace(" ",SPACE_CODE)))
262
+        headers2 = SPLIT_CHAR.join(headers2)
263
+        url2 = url2+SPLIT_CHAR+headers2
264
+    return url2
265
+
266
+def streamproxy_decode2(urlp):
267
+    path = urlp.replace(re.search("http://[^/]+",urlp).group(0),"")
268
+    p = path.split(SPLIT_CHAR)
269
+    url = urllib.unquote_plus(p[0][1:-2])
270
+    #headers = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46"}
271
+    headers={}
272
+    if len(p)>1:
273
+        for h in p[1:]:
274
+            #h = urllib.unquote()
275
+            headers[h.split("=")[0]]=urllib.unquote(h.split("=")[1])
276
+    return url,headers
277
+
250 278
 class Captions(object):
251 279
     def __init__(self,uri):
252 280
         self.uri = uri
@@ -687,10 +715,12 @@ if __name__ == "__main__":
687 715
     url = "http://str1e.lattelecom.tv/mobile-vod/mp4:sf_fantastic_beasts_and_where_to_find_them_en_hd.mp4/playlist.m3u8?resource_id=fantastic_beasts_and_where_to_find_them&auth_token=6NAvMFDG+rYTAc4hb5JeL2bmsaRR7bAE23M6KDmhKYOGyXoo0gDpJUE9scYy+nQmfbgk03cWMe9MuXWSH1GqwolEk2jOQ/8Mrg7tOdbwrA8zM7nmkfCZPqQkwajZN4mfSJQVKHqXqJ8="
688 716
     headers={}
689 717
     print url
690
-    urlp = streamproxy_encode(url,headers)
718
+    url = "replay::tiesraide/ltv1/"
719
+    url = "ltc::content/live-streams/103?include=quality"
720
+    urlp = streamproxy_encode2(url,headers)
691 721
     print urlp
692
-    url2,headers2 = streamproxy_decode(urlp)
693
-    #print url2 - 2
722
+    url2,headers2 = streamproxy_decode2(urlp)
723
+    print url2
694 724
     player(urlp)
695 725
     pass
696 726