Browse Source

urllib2 SSL kļūdas labojums

Ivars 7 years ago
parent
commit
57720f256f

+ 14
- 2
ContentSources.py View File

210
                 streams = [stream]
210
                 streams = [stream]
211
             else:
211
             else:
212
                 try:
212
                 try:
213
-                    streams = sources.get_streams(cur2[1])
213
+                    if not download:
214
+                        streams = sources.get_streams(cur2[1])
215
+                    else:
216
+                        stream = util.item()
217
+                        stream["url"] = cur2[1]
218
+                        stream["name"] = cur2[0]
219
+                        stream["url"] = util.streamproxy_encode2(stream["url"])
220
+                        print stream["url"]
221
+                        streams = [stream]
214
                 except Exception as e:
222
                 except Exception as e:
215
                     print unicode(e)
223
                     print unicode(e)
216
                     traceback.print_exc()
224
                     traceback.print_exc()
219
                 if not download:
227
                 if not download:
220
                     util.play_video(streams)
228
                     util.play_video(streams)
221
                 else:
229
                 else:
222
-                    Downloader.download_video(streams)
230
+                    #urlp = util.streamproxy_encode2(streams[0]["url"])
231
+                    #print urlp
232
+                    #util.player(urlp)
233
+                    #Downloader.download_video(streams)
234
+                    pass
223
             else:
235
             else:
224
                 print "**No stream to play - %s "%(
236
                 print "**No stream to play - %s "%(
225
                     cur2[1])
237
                     cur2[1])

+ 2
- 2
Downloader.py View File

120
     def download_fragment(self):
120
     def download_fragment(self):
121
         if self.ts_num>=len(self.ts_list):
121
         if self.ts_num>=len(self.ts_list):
122
             pass
122
             pass
123
-            print "Call later"
123
+            #print "Call later"
124
             reactor.callLater(10,self.update_manifest)
124
             reactor.callLater(10,self.update_manifest)
125
             reactor.callLater(10, self.download_fragment)
125
             reactor.callLater(10, self.download_fragment)
126
         else:
126
         else:
239
     reactor.stop()
239
     reactor.stop()
240
 ###############################################
240
 ###############################################
241
 
241
 
242
-def download_vide(stream):
242
+def download_video(stream):
243
     stream = stream[0]
243
     stream = stream[0]
244
     url = stream["url"]
244
     url = stream["url"]
245
     headers = stream["headers"]
245
     headers = stream["headers"]

+ 1
- 1
PlayStream.py View File

6
 # Used fragments of code from enigma2-plugin-tv3play by Taapat (https://github.com/Taapat/enigma2-plugin-tv3play)
6
 # Used fragments of code from enigma2-plugin-tv3play by Taapat (https://github.com/Taapat/enigma2-plugin-tv3play)
7
 #
7
 #
8
 
8
 
9
-__version__ = "0.6r"
9
+__version__ = "0.6t"
10
 __id__ = "playstream"
10
 __id__ = "playstream"
11
 __title__ = "PlayStream"
11
 __title__ = "PlayStream"
12
 __author__ = "ivars777@gmail.com"
12
 __author__ = "ivars777@gmail.com"

+ 486
- 479
PlayStream.wpr
File diff suppressed because it is too large
View File


+ 7
- 0
changelog.md View File

1
+**0.6t** (17.06.2017)
2
+- [bugfix] kļūda verot vaļā HTTPS lapas (urllib2 SSL: CERTIFICATE_VERIFY_FAILED)
3
+
4
+**0.6s** (13.04.2017)
5
+- [bugfix] LMT straume google tiešraides kļūda novēsta
6
+
1
 **0.6r** (01.04.2017)
7
 **0.6r** (01.04.2017)
2
 - [feature] LMT Straume video (bez TV, kas strādā tikai LMT tīklā)
8
 - [feature] LMT Straume video (bez TV, kas strādā tikai LMT tīklā)
9
+- [bugfix] proxy serveris piefrizēts, cerams ka strādā stabilāk (aktuāls viaplay ejošajai daļai)
3
 
10
 
4
 **0.6p** (31.03.2017)
11
 **0.6p** (31.03.2017)
5
 - [bugfix] filmix sērijas
12
 - [bugfix] filmix sērijas

+ 208
- 149
playstreamproxy.py View File

1
 #!/usr/bin/python
1
 #!/usr/bin/python
2
+# -*- coding: utf-8 -*-
2
 """
3
 """
3
 StreamProxy daemon (based on Livestream daemon)
4
 StreamProxy daemon (based on Livestream daemon)
4
 Ensures persistent cookies, User-Agents and others tricks to play protected HLS/DASH streams
5
 Ensures persistent cookies, User-Agents and others tricks to play protected HLS/DASH streams
18
 import urllib,urlparse
19
 import urllib,urlparse
19
 #import cookielib,urllib2
20
 #import cookielib,urllib2
20
 import requests
21
 import requests
22
+
21
 try:
23
 try:
22
     from requests.packages.urllib3.exceptions import InsecureRequestWarning
24
     from requests.packages.urllib3.exceptions import InsecureRequestWarning
23
     requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
25
     requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
25
     pass
27
     pass
26
 
28
 
27
 HOST_NAME = ""
29
 HOST_NAME = ""
28
-PORT_NUMBER = 88
30
+PORT_NUMBER = 8880
29
 DEBUG = True
31
 DEBUG = True
30
 DEBUG2 = False
32
 DEBUG2 = False
31
 
33
 
36
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
38
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
37
 headers0 = headers2dict("""
39
 headers0 = headers2dict("""
38
 icy-metadata: 1
40
 icy-metadata: 1
39
-Cache-Control: max-age=0
40
-Accept-Encoding: gzip, deflate
41
 User-Agent: GStreamer souphttpsrc libsoup/2.52.2
41
 User-Agent: GStreamer souphttpsrc libsoup/2.52.2
42
-Connection: Keep-Alive
43
 """)
42
 """)
44
 sessions = {}
43
 sessions = {}
44
+cur_directory = os.path.dirname(os.path.realpath(__file__))
45
+sources = None
46
+slinks = {}
47
+
45
 
48
 
46
 class StreamHandler(BaseHTTPRequestHandler):
49
 class StreamHandler(BaseHTTPRequestHandler):
47
 
50
 
48
     def do_HEAD(self):
51
     def do_HEAD(self):
49
-        print "**head"
52
+        print "**get_head"
50
         self.send_response(200)
53
         self.send_response(200)
51
         self.send_header("Server", "playstreamproxy")
54
         self.send_header("Server", "playstreamproxy")
52
         if ".m3u8" in self.path.lower():
55
         if ".m3u8" in self.path.lower():
62
 
65
 
63
     def do_GET(self):
66
     def do_GET(self):
64
         """Respond to a GET request"""
67
         """Respond to a GET request"""
65
-        self.log_message("\n\n"+40*"#"+"\nget_url: \n%s", self.path)
68
+        print "\n\n"+40*"#"+"\nget_url: \n%s", self.path
66
         p = self.path.split("~")
69
         p = self.path.split("~")
67
-        #url = urllib.unquote(p[0][1:])
68
-        url = p[0][1:]
69
-        url = url.replace(COL_CODE, ":")
70
-        headers = self.headers.dict
71
-        headers = {} # TODO
72
-        headers["host"] = urlparse.urlparse(url).hostname
70
+        #url = urllib.unquote(p[0][1:]) # TODO - vajag nocekot vai visi urli strādā
71
+        urlp = p[0][1:]
72
+        url = urlp.replace(COL_CODE, ":")
73
+        #headers = self.headers.dict
74
+        headers = {} # TODO izmanto saņemtos headerus, var aizvietot ar defaultajiem
75
+        #headers["host"] = urlparse.urlparse(url).hostname
73
         if len(p)>1:
76
         if len(p)>1:
74
             for h in p[1:]:
77
             for h in p[1:]:
75
                 k = h.split("=")[0].lower()
78
                 k = h.split("=")[0].lower()
78
         if DEBUG:
81
         if DEBUG:
79
             print "url=%s"%url
82
             print "url=%s"%url
80
             print "Original request headers + url headers:"
83
             print "Original request headers + url headers:"
81
-            print_headers(headers)
82
-
84
+            print_headers(self.headers.dict)
83
         self.protocol_version = 'HTTP/1.1'
85
         self.protocol_version = 'HTTP/1.1'
84
 
86
 
85
-        # TODO fetch selection
86
         try:
87
         try:
87
-            if ".lattelecom.tv/" in url: # lattelecom.tv hack
88
-                self.fetch_ltc(self.wfile, url, headers)
88
+            if "::" in url: # encoded source link
89
+                self.fetch_source(urlp, headers)
90
+            elif ".lattelecom.tv/" in url: # lattelecom.tv hack
91
+                self.fetch_ltc( url, headers)
89
             elif "filmas.lv" in url or "viaplay" in url: #  HLS session/decode filmas.lv in url:
92
             elif "filmas.lv" in url or "viaplay" in url: #  HLS session/decode filmas.lv in url:
90
-                self.fetch_url2(self.wfile, url, headers)
93
+                self.fetch_url2(url, headers)
91
             else: # plain fetch
94
             else: # plain fetch
92
-                self.fetch_url(self.wfile, url, headers)
95
+                self.fetch_url( url, headers)
93
         except Exception as e:
96
         except Exception as e:
94
             print "Got Exception: ", str(e)
97
             print "Got Exception: ", str(e)
95
             import traceback
98
             import traceback
97
 
100
 
98
     ### Remote server request procedures ###
101
     ### Remote server request procedures ###
99
 
102
 
100
-    def fetch_offline(self,wfile):
103
+    def fetch_offline(self):
101
         print "** Fetch offline"
104
         print "** Fetch offline"
102
         self.send_response(200)
105
         self.send_response(200)
103
         self.send_header("Server", "playstreamproxy")
106
         self.send_header("Server", "playstreamproxy")
106
         self.wfile.write(open("offline.mp4", "rb").read())
109
         self.wfile.write(open("offline.mp4", "rb").read())
107
         #self.wfile.close()
110
         #self.wfile.close()
108
 
111
 
109
-    def fetch_url(self,wfile,url,headers):
112
+    def fetch_source(self, urlp, headers):
110
         if DEBUG:
113
         if DEBUG:
111
             print "\n***********************************************************"
114
             print "\n***********************************************************"
112
-            print "fetch_url: \n%s"%url
113
-            print "**Server request headers: "
114
-            print_headers(headers)
115
-        #if ".lattelecom.tv/" in url and EQ_CODE in url:
116
-        #    url = url.replace(EQ_CODE,"=")
117
-        r = requests.get(url,headers = headers)
115
+            print "fetch_source: \n%s"%urlp
116
+        base_data = hls_base(urlp)
117
+        data = urllib.unquote_plus(base_data)[:-1]
118
+        if DEBUG: print "base_data=", base_data
119
+        if DEBUG: print "data=", data
120
+        if not base_data in slinks :
121
+            streams = sources.get_streams(data)
122
+            if not streams:
123
+                self.write_error(500)  # TODO
124
+                return
125
+            url = streams[0]["url"]
126
+            base_url = hls_base(url)
127
+            if DEBUG: print "New link, base_url=",base_url
128
+            ses = requests.Session()
129
+            ses.trust_env = False
130
+            slinks[base_data] = {"data": data, "urlp":urlp,"url": url, "base_url": base_url,"session":ses}
131
+        else:
132
+            ses = slinks[base_data]["session"]
133
+            if urlp == slinks[base_data]["urlp"]:
134
+                url = slinks[base_data]["url"]
135
+                if DEBUG: print "Existing base link", url
136
+            else:
137
+                url = urlp.replace(base_data, slinks[base_data]["base_url"])
138
+                if DEBUG: print "Existing new link", url
139
+        r = self.get_page_ses(url,ses,True,headers = headers)
118
         code = r.status_code
140
         code = r.status_code
141
+        if not code in (200,206): # TODO mēģina vēlreiz get_streams
142
+            self.write_error(code)
143
+            return
144
+        self.send_response(code)
145
+        self.send_headers(r.headers)
146
+        CHUNK_SIZE = 1024 *4
147
+        for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
148
+            try:
149
+                self.wfile.write(chunk)
150
+            except Exception as e:
151
+                print "Exception: ", str(e)
152
+                self.wfile.close()
153
+                return
154
+        if DEBUG: print "**File downloaded"
155
+        if "connection" in r.headers and r.headers["connection"] <> "keep-alive":
156
+            self.wfile.close()
157
+        return
158
+
159
+
160
+    def fetch_url(self, url,headers):
119
         if DEBUG:
161
         if DEBUG:
120
-            print "** Server/proxy response, code = %s"%code
121
-            print_headers(r.headers)
162
+            print "\n***********************************************************"
163
+            print "fetch_url: \n%s"%url
164
+        r = self.get_page(url,headers = headers)
165
+        code = r.status_code
122
         if not code in (200,206):
166
         if not code in (200,206):
123
-            print "***Error, code=%s",code
124
-            self.send_response(code)
125
-            self.send_headers(r.headers)
126
-            wfile.close()
167
+            self.write_error(code)
127
             return
168
             return
128
         self.send_response(code)
169
         self.send_response(code)
129
         self.send_headers(r.headers)
170
         self.send_headers(r.headers)
130
         CHUNK_SIZE = 1024*4
171
         CHUNK_SIZE = 1024*4
131
-        for chunk in r.iter_content(CHUNK_SIZE):
172
+        for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
132
             try:
173
             try:
133
-                wfile.write(chunk)
174
+                self.wfile.write(chunk)
134
             except Exception as e:
175
             except Exception as e:
135
                 print "Exception: ", str(e)
176
                 print "Exception: ", str(e)
136
-                wfile.close()
177
+                self.wfile.close()
137
                 return
178
                 return
138
         if DEBUG: print "**File downloaded"
179
         if DEBUG: print "**File downloaded"
139
-        wfile.close()
140
-        # time.sleep(1)
180
+        if "connection" in r.headers and r.headers["connection"] <> "keep-alive":
181
+            self.wfile.close()
141
         return
182
         return
142
 
183
 
143
-    def fetch_ltc(self, wfile, url, headers):
184
+    def fetch_ltc(self, url, headers):
185
+        "lattelecom.tv hack (have to update chunklist after each 6 min"
144
         if DEBUG:
186
         if DEBUG:
145
-            print "\n***********************************************************"
146
-            print "fetch_url2: \n%s"%url
147
-        #self.log_message("fetch_filmas: \n%s", url)
148
-        #self.log_message("headers: %s", headers)
187
+            print "\n\n***********************************************************"
188
+            print "fetch_ltc: \n%s"%url
149
         base_url = hls_base(url)
189
         base_url = hls_base(url)
150
         if DEBUG: print "base_url=",base_url
190
         if DEBUG: print "base_url=",base_url
151
         if base_url not in sessions:
191
         if base_url not in sessions:
152
             if DEBUG: print "New session"
192
             if DEBUG: print "New session"
153
             sessions[base_url] = {}
193
             sessions[base_url] = {}
154
             sessions[base_url]["session"] = requests.Session()
194
             sessions[base_url]["session"] = requests.Session()
155
-            #sessions[base_url]["session"].headers = {}
156
-            sessions[base_url]["key"] = binascii.a2b_hex(headers["key"]) if "key" in headers and headers["key"] else None
195
+            sessions[base_url]["session"].trust_env = False
196
+            sessions[base_url]["session"].headers.update(headers0)
197
+            sessions[base_url]["playlist"] = ""
198
+            sessions[base_url]["chunklist"] = []
199
+
200
+        #  change ts file to valid one media_w215689190_33.ts?
201
+        tsfile = re.search("media_\w+_(\d+)\.ts", url, re.IGNORECASE)
202
+        if tsfile and sessions[base_url]["chunklist"]:
203
+            tnum = int(tsfile.group(1))
204
+            url2 = sessions[base_url]["chunklist"][tnum]
205
+            if not url2.startswith("http"):
206
+                url2 = base_url + url2
207
+            url = url2
208
+            if DEBUG: print "[playstreamproxy] url changed to ", url
209
+
210
+        ### get_page ###
157
         ses = sessions[base_url]["session"]
211
         ses = sessions[base_url]["session"]
158
-        key = sessions[base_url]["key"]
159
-        ses.headers.clear()
160
-        ses.headers.update(headers0)
212
+        #ses.headers.update(headers0)
161
         ses.headers.update(headers)
213
         ses.headers.update(headers)
162
-        ses.headers["Connection"]="Keep-Alive"
163
-        if DEBUG:
164
-            print "**Server request headers: "
165
-            print_headers(ses.headers)
166
-        for t in range(3):
167
-            r = ses.get(url, stream=True, verify=False)
168
-            code = r.status_code #r.status_code
169
-            if DEBUG:
170
-                print "\n\n=====================================\n**Server response:", code #r.status_code
171
-                print "**Server response headers: "
172
-                print_headers(r.headers)
173
-            if code in (200,2016): break
174
-        if not (code in (200,206)):
175
-            print "***Error, code=%s"%code
176
-            self.send_response(code)
177
-            self.send_headers(r.headers)
178
-            wfile.close()
179
-            #self.fetch_offline(wfile)
214
+        # ses.headers["Connection"]="Keep-Alive"
215
+        r = self.get_page_ses(url,ses)
216
+        code = r.status_code #r.status_code
217
+
218
+        if not (code in (200,206)) and tsfile:
219
+            # update chunklist
220
+            r2 = self.get_page(sessions[base_url]["playlist"])
221
+            streams = re.findall(r"#EXT-X-STREAM-INF:.*?BANDWIDTH=(\d+).*?\n(.+?)$", r2.content, re.IGNORECASE | re.MULTILINE)
222
+            if streams:
223
+                sorted(streams, key=lambda item: int(item[0]), reverse=True)
224
+                chunklist = streams[0][1]
225
+                if not chunklist.startswith("http"):
226
+                    chunklist = base_url + chunklist
227
+            else:
228
+                self.write_error(r.status_code)
229
+                return
230
+            print "[playstreamproxy] trying to update chunklist", chunklist
231
+            r3 = self.get_page_ses(chunklist,ses,True)
232
+            ts_list = re.findall(r"#EXTINF:.*?\n(.+?)$", r3.content, re.IGNORECASE | re.MULTILINE)
233
+            sessions[base_url]["chunklist"]= ts_list
234
+            tnum = int(tsfile.group(1))
235
+            url2 = sessions[base_url]["chunklist"][tnum]
236
+            if not url2.startswith("http"):
237
+                url2 = base_url + url2
238
+            r = self.get_page_ses(url2,ses,True)
239
+            if not r.status_code in (200,206):
240
+                self.write_error(r.status_code)
241
+                return
242
+        elif not r.status_code in (200,206):
243
+            self.write_error(r.status_code)
180
             return
244
             return
181
 
245
 
246
+        if "playlist.m3u8" in url:
247
+            sessions[base_url]["playlist"] = url
248
+
182
         ### Start of return formin and sending
249
         ### Start of return formin and sending
183
         self.send_response(200)
250
         self.send_response(200)
184
         #headers2 = del_headers(r.headers,["Content-Encoding",'Transfer-Encoding',"Connection",'content-range',"range"])
251
         #headers2 = del_headers(r.headers,["Content-Encoding",'Transfer-Encoding',"Connection",'content-range',"range"])
185
         headers2  = {"server":"playstreamproxy", "content-type":"text/html"}
252
         headers2  = {"server":"playstreamproxy", "content-type":"text/html"}
186
 
253
 
187
-        # Content-Type: application/vnd.apple.mpegurl (encrypted)
188
-        if r.headers["content-type"] == "application/vnd.apple.mpegurl":
189
-            content = r.content
190
-            content = r.content.replace(base_url,"")
191
-            content = re.sub("#EXT-X-KEY:METHOD=AES-128.+\n", "", content, 0, re.IGNORECASE | re.MULTILINE)
192
-            headers2["content-type"] = "application/vnd.apple.mpegurl"
193
-            headers2["content-length"] = "%s"%len(content)
194
-            r.headers["content-length"] = "%s"%len(content)
195
-            #headers2['content-range'] = 'bytes 0-%s/%s'%(len(content)-1,len(content))
196
-            #self.send_headers(headers2)
197
-            self.send_headers(r.headers)
198
-            wfile.write(content)
199
-            wfile.close()
200
-
201
-        # Content-Type: video/MP2T (encrypted)
202
-        elif r.headers["content-type"] == "video/MP2T" and key:
203
-            print "Decode video/MP2T"
204
-            content = r.content
205
-            from Crypto.Cipher import AES
206
-            iv = content[:16]
207
-            d = AES.new(key, AES.MODE_CBC, iv)
208
-            content = d.decrypt(content[16:])
209
-            headers2["content-type"] = "video/MP2T"
210
-            headers2["content-length"] = "%s"% (len(content))
211
-            #headers2['content-range'] = 'bytes 0-%s/%s' % (len(content) - 1, len(content))
212
-            print content[0:16]
213
-            print "Finish decode"
214
-            self.send_headers(headers2)
215
-            wfile.write(content)
216
-            wfile.close()
217
-
218
-        else:
219
-            print "Return regular content"
220
-            headers2["content-type"]  = r.headers["content-type"]
221
-            if "content-length" in r.headers:
222
-                headers2["content-length"] = r.headers["content-length"]
223
-            self.send_headers(r.headers)
224
-            CHUNK_SIZE = 4 * 1024
225
-            for chunk in r.iter_content(CHUNK_SIZE):
226
-                try:
227
-                    #print "#",
228
-                    wfile.write(chunk)
229
-                except Exception as e:
230
-                    print "Exception: ", str(e)
231
-                    return
232
-            if DEBUG: print "File downloaded = "
233
-            wfile.close()
234
-            #time.sleep(1)
235
-            return
254
+        if DEBUG: print "\n** Return  content"
255
+        headers2["content-type"]  = r.headers["content-type"]
256
+        if "content-length" in r.headers:
257
+            headers2["content-length"] = r.headers["content-length"]
258
+        self.send_headers(r.headers)
259
+        CHUNK_SIZE = 4 * 1024
260
+        for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
261
+            try:
262
+                #print "#",
263
+                self.wfile.write(chunk)
264
+            except Exception as e:
265
+                print "Exception: ", str(e)
266
+                return
267
+        if DEBUG: print "File downloaded = "
268
+        self.wfile.close()
269
+        #time.sleep(1)
270
+        return
236
 
271
 
237
 
272
 
238
-    def fetch_url2(self, wfile, url, headers):
273
+    def fetch_url2(self, url, headers):
239
         if DEBUG:
274
         if DEBUG:
240
             print "\n***********************************************************"
275
             print "\n***********************************************************"
241
             print "fetch_url2: \n%s"%url
276
             print "fetch_url2: \n%s"%url
242
-        #self.log_message("fetch_filmas: \n%s", url)
243
-        #self.log_message("headers: %s", headers)
244
         base_url = hls_base(url)
277
         base_url = hls_base(url)
245
         if DEBUG: print "base_url=",base_url
278
         if DEBUG: print "base_url=",base_url
246
         if base_url not in sessions:
279
         if base_url not in sessions:
247
             if DEBUG: print "New session"
280
             if DEBUG: print "New session"
248
             sessions[base_url] = {}
281
             sessions[base_url] = {}
249
             sessions[base_url]["session"] = requests.Session()
282
             sessions[base_url]["session"] = requests.Session()
250
-            #sessions[base_url]["session"].headers = {}
283
+            sessions[base_url]["session"].trust_env = False
284
+            sessions[base_url]["session"].headers.update(headers0)
251
             sessions[base_url]["key"] = binascii.a2b_hex(headers["key"]) if "key" in headers and headers["key"] else None
285
             sessions[base_url]["key"] = binascii.a2b_hex(headers["key"]) if "key" in headers and headers["key"] else None
252
         ses = sessions[base_url]["session"]
286
         ses = sessions[base_url]["session"]
287
+        ses.trust_env = False
253
         key = sessions[base_url]["key"]
288
         key = sessions[base_url]["key"]
254
-        ses.headers.clear()
255
-        ses.headers.update(headers0)
289
+        #ses.headers.clear()
256
         ses.headers.update(headers)
290
         ses.headers.update(headers)
257
-        ses.headers["Connection"]="Keep-Alive"
258
-        if DEBUG:
259
-            print "**Server request headers: "
260
-            print_headers(ses.headers)
261
-        for t in range(3):
262
-            r = ses.get(url, stream=True, verify=False)
263
-            code = r.status_code #r.status_code
264
-            if DEBUG:
265
-                print "\n\n=====================================\n**Server response:", code #r.status_code
266
-                print "**Server response headers: "
267
-                print_headers(r.headers)
268
-            if code in (200,2016): break
291
+        r = self.get_page_ses(url, ses,stream=False)
292
+        code = r.status_code #r.status_code
269
         if not (code in (200,206)):
293
         if not (code in (200,206)):
270
-            print "***Error, code=%s"%code
271
-            self.send_response(code)
272
-            self.send_headers(r.headers)
273
-            wfile.close()
274
-            #self.fetch_offline(wfile)
294
+            self.write_error(r.status_code)
275
             return
295
             return
276
 
296
 
277
         ### Start of return formin and sending
297
         ### Start of return formin and sending
280
         headers2  = {"server":"playstreamproxy", "content-type":"text/html"}
300
         headers2  = {"server":"playstreamproxy", "content-type":"text/html"}
281
 
301
 
282
         # Content-Type: application/vnd.apple.mpegurl (encrypted)
302
         # Content-Type: application/vnd.apple.mpegurl (encrypted)
283
-        if r.headers["content-type"] == "application/vnd.apple.mpegurl":
303
+        if r.headers["content-type"] == "application/vnd.apple.mpegurl" and key:
284
             content = r.content
304
             content = r.content
285
             content = r.content.replace(base_url,"")
305
             content = r.content.replace(base_url,"")
286
             content = re.sub("#EXT-X-KEY:METHOD=AES-128.+\n", "", content, 0, re.IGNORECASE | re.MULTILINE)
306
             content = re.sub("#EXT-X-KEY:METHOD=AES-128.+\n", "", content, 0, re.IGNORECASE | re.MULTILINE)
288
             headers2["content-length"] = "%s"%len(content)
308
             headers2["content-length"] = "%s"%len(content)
289
             r.headers["content-length"] = "%s"%len(content)
309
             r.headers["content-length"] = "%s"%len(content)
290
             #headers2['content-range'] = 'bytes 0-%s/%s'%(len(content)-1,len(content))
310
             #headers2['content-range'] = 'bytes 0-%s/%s'%(len(content)-1,len(content))
291
-            #self.send_headers(headers2)
292
-            self.send_headers(r.headers)
293
-            wfile.write(content)
294
-            wfile.close()
311
+            self.send_headers(headers2)
312
+            #self.send_headers(r.headers)
313
+            self.wfile.write(content)
314
+            self.wfile.close()
295
 
315
 
296
         # Content-Type: video/MP2T (encrypted)
316
         # Content-Type: video/MP2T (encrypted)
297
         elif r.headers["content-type"] == "video/MP2T" and key:
317
         elif r.headers["content-type"] == "video/MP2T" and key:
307
             print content[0:16]
327
             print content[0:16]
308
             print "Finish decode"
328
             print "Finish decode"
309
             self.send_headers(headers2)
329
             self.send_headers(headers2)
310
-            wfile.write(content)
311
-            wfile.close()
330
+            self.wfile.write(content)
331
+            self.wfile.close()
312
 
332
 
313
         else:
333
         else:
314
-            print "Return regular content"
334
+            if DEBUG: print "Return regular content"
315
             headers2["content-type"]  = r.headers["content-type"]
335
             headers2["content-type"]  = r.headers["content-type"]
316
             if "content-length" in r.headers:
336
             if "content-length" in r.headers:
317
                 headers2["content-length"] = r.headers["content-length"]
337
                 headers2["content-length"] = r.headers["content-length"]
318
             self.send_headers(r.headers)
338
             self.send_headers(r.headers)
339
+            #self.send_headers(headers2)
319
             CHUNK_SIZE = 4 * 1024
340
             CHUNK_SIZE = 4 * 1024
320
-            for chunk in r.iter_content(CHUNK_SIZE):
341
+            for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
321
                 try:
342
                 try:
322
                     #print "#",
343
                     #print "#",
323
-                    wfile.write(chunk)
344
+                    self.wfile.write(chunk)
324
                 except Exception as e:
345
                 except Exception as e:
325
                     print "Exception: ", str(e)
346
                     print "Exception: ", str(e)
326
                     return
347
                     return
327
             if DEBUG: print "File downloaded = "
348
             if DEBUG: print "File downloaded = "
328
-            wfile.close()
349
+            if "connection" in r.headers and r.headers["connection"]<>"keep-alive":
350
+                self.wfile.close()
329
             #time.sleep(1)
351
             #time.sleep(1)
330
             return
352
             return
331
 
353
 
337
             self.send_header(h, headers[h])
359
             self.send_header(h, headers[h])
338
         self.end_headers()
360
         self.end_headers()
339
 
361
 
362
+    def write_error(self,code):
363
+        print "***Error, code=%s" % code
364
+        self.send_response(code)
365
+        #self.send_headers(r.headers)
366
+        self.wfile.close() # TODO?
367
+        # self.fetch_offline()
368
+
369
+    def get_page_ses(self,url,ses,stream=True, headers=None):
370
+        headers= headers if headers else headers0
371
+        ses.headers.update(headers)
372
+        if DEBUG:
373
+            print "\n\n====================================================\n**get_page_ses\n%s"%url
374
+            print "**Server request headers: "
375
+            print_headers(ses.headers)
376
+        r = ses.get(url, stream=stream, verify=False)
377
+        if DEBUG:
378
+            print "**Server response:", r.status_code
379
+            print "**Server response headers: "
380
+            print_headers(r.headers)
381
+        return r
382
+
383
+    def get_page(self,url,headers=None):
384
+        if not headers:
385
+            headers = headers0
386
+        if DEBUG:
387
+            print "\n\n====================================================\n**get_page\n%s"%url
388
+            print "**Server request headers: "
389
+            print_headers(headers)
390
+        r = requests.get(url, headers=headers,stream=True)
391
+        if DEBUG:
392
+            print "**Server response:", r.status_code
393
+            print "**Server response headers: "
394
+            print_headers(r.headers)
395
+        return r
340
 
396
 
341
 class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
397
 class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
342
     """Handle requests in a separate thread."""
398
     """Handle requests in a separate thread."""
343
 
399
 
344
 def start(host = HOST_NAME, port = PORT_NUMBER):
400
 def start(host = HOST_NAME, port = PORT_NUMBER):
401
+    import ContentSources, util
402
+    global sources
403
+    sources = ContentSources.ContentSources(os.path.join(cur_directory, "sources"))
345
     httpd = ThreadedHTTPServer((host, port), StreamHandler)
404
     httpd = ThreadedHTTPServer((host, port), StreamHandler)
346
     print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER)
405
     print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER)
347
     try:
406
     try:

BIN
release/enigma2-plugin-extensions-playstream_0.6s.ipk View File


BIN
release/enigma2-plugin-extensions-playstream_0.6t.ipk View File


+ 3
- 0
sources/SourceBase.py View File

9
 import urllib2, urllib
9
 import urllib2, urllib
10
 import datetime, re, sys,os
10
 import datetime, re, sys,os
11
 import requests
11
 import requests
12
+import ssl
13
+ssl._create_default_https_context = ssl._create_unverified_context
14
+
12
 try:
15
 try:
13
     from requests.packages.urllib3.exceptions import InsecureRequestWarning
16
     from requests.packages.urllib3.exceptions import InsecureRequestWarning
14
     requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
17
     requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

+ 3
- 1
sources/YouTubeVideoUrl.py View File

8
 from urllib import urlencode
8
 from urllib import urlencode
9
 from urllib2 import urlopen, URLError
9
 from urllib2 import urlopen, URLError
10
 import sys
10
 import sys
11
+import ssl
12
+ssl._create_default_https_context = ssl._create_unverified_context
11
 
13
 
12
 #from Components.config import config
14
 #from Components.config import config
13
 
15
 
18
 		import ssl
20
 		import ssl
19
 		sslContext = ssl._create_unverified_context()
21
 		sslContext = ssl._create_unverified_context()
20
 	except:
22
 	except:
21
-		pass 
23
+		pass
22
 from jsinterp import JSInterpreter
24
 from jsinterp import JSInterpreter
23
 from swfinterp import SWFInterpreter
25
 from swfinterp import SWFInterpreter
24
 
26
 

+ 31
- 28
sources/cinemalive.py View File

15
 from SourceBase import SourceBase
15
 from SourceBase import SourceBase
16
 #from collections import OrderedDict
16
 #from collections import OrderedDict
17
 import os
17
 import os
18
+import ssl
19
+ssl._create_default_https_context = ssl._create_unverified_context
18
 
20
 
19
-#sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 
21
+
22
+#sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
20
 from resolver import resolve
23
 from resolver import resolve
21
 import util
24
 import util
22
 
25
 
24
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
27
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
25
 import HTMLParser
28
 import HTMLParser
26
 h = HTMLParser.HTMLParser()
29
 h = HTMLParser.HTMLParser()
27
-    
30
+
28
 class Source(SourceBase):
31
 class Source(SourceBase):
29
-    
32
+
30
     def __init__(self,country="",cfg_path=None):
33
     def __init__(self,country="",cfg_path=None):
31
         self.name = "cinemalive"
34
         self.name = "cinemalive"
32
         self.title = "cinemalive.tv"
35
         self.title = "cinemalive.tv"
44
 """)
47
 """)
45
         self.url = "https://cinemalive.tv/"
48
         self.url = "https://cinemalive.tv/"
46
         #self.login()
49
         #self.login()
47
-        
48
-            
50
+
51
+
49
     ######### Entry point ########
52
     ######### Entry point ########
50
     def get_content(self, data):
53
     def get_content(self, data):
51
         print "[cinemalive] get_content:", data
54
         print "[cinemalive] get_content:", data
52
-        source,data,path,plist,clist,params,qs = self.parse_data(data)     
55
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
53
         content=[]
56
         content=[]
54
         content.append(("..return", "back","","Return back"))
57
         content.append(("..return", "back","","Return back"))
55
-        
58
+
56
         if clist=="home":
59
         if clist=="home":
57
             content.extend([
60
             content.extend([
58
-                ("Search", "cinemalive::scripts/search.php?search={0}","","Search"),            
61
+                ("Search", "cinemalive::scripts/search.php?search={0}","","Search"),
59
                 ("Filmas latviski - visas", "cinemalive::filmaslatviski/visas/lapa/1","","Filmas latviski - visas"),
62
                 ("Filmas latviski - visas", "cinemalive::filmaslatviski/visas/lapa/1","","Filmas latviski - visas"),
60
                 ("Filmas angliski", "cinemalive::home_en","","Filmas angliski"),
63
                 ("Filmas angliski", "cinemalive::home_en","","Filmas angliski"),
61
                 ("Filmas latviski - jaunākās", "cinemalive::filmaslatviski/jaunakas/lapa/1","","Filmas latviski - jaunākās"),
64
                 ("Filmas latviski - jaunākās", "cinemalive::filmaslatviski/jaunakas/lapa/1","","Filmas latviski - jaunākās"),
68
                 data2 = item[0]+"/lapa/1"
71
                 data2 = item[0]+"/lapa/1"
69
                 img = self.img
72
                 img = self.img
70
                 desc = title
73
                 desc = title
71
-                content.append((title,self.name+"::"+data2,img,desc))      
74
+                content.append((title,self.name+"::"+data2,img,desc))
72
             return content
75
             return content
73
 
76
 
74
         elif clist=="home_en":
77
         elif clist=="home_en":
75
             content.extend([
78
             content.extend([
76
-                ("Search", "cinemalive::scripts/search.php?search={0}","","Search"),            
79
+                ("Search", "cinemalive::scripts/search.php?search={0}","","Search"),
77
                 ("Movies English - all", "cinemalive::moviesenglish/all/page/1","","Movies English - all"),
80
                 ("Movies English - all", "cinemalive::moviesenglish/all/page/1","","Movies English - all"),
78
                 ("Movies Latvian", "cinemalive::home","","Filmas latviski"),
81
                 ("Movies Latvian", "cinemalive::home","","Filmas latviski"),
79
                 ("Movies English - newest", "cinemalive::moviesenglish/newestmovies/page/1","","Movies English - newest"),
82
                 ("Movies English - newest", "cinemalive::moviesenglish/newestmovies/page/1","","Movies English - newest"),
86
                 data2 = item[0]+"/page/1"
89
                 data2 = item[0]+"/page/1"
87
                 img = self.img
90
                 img = self.img
88
                 desc = title
91
                 desc = title
89
-                content.append((title,self.name+"::"+data2,img,desc))      
92
+                content.append((title,self.name+"::"+data2,img,desc))
90
             return content
93
             return content
91
 
94
 
92
-    
95
+
93
         elif "search.php" in data:
96
         elif "search.php" in data:
94
-            
97
+
95
             r=self.call(path,params=params[1:],headers=self.headers2)
98
             r=self.call(path,params=params[1:],headers=self.headers2)
96
-            result = re.findall(r'<div class="results.+?<a href="https://cinemalive\.tv/(.+?)">.+?<img src="(.+?)".+?<span style="color:#bcbcbc">([^<]+)</span> <span style="color:#5a606d;font-size:12px;">([^<]+)</span><br/>.+?<p class="dec" style="font-size:12px; color:#777;line-height:14px;">([^<]+)</p>', r, re.DOTALL)            
99
+            result = re.findall(r'<div class="results.+?<a href="https://cinemalive\.tv/(.+?)">.+?<img src="(.+?)".+?<span style="color:#bcbcbc">([^<]+)</span> <span style="color:#5a606d;font-size:12px;">([^<]+)</span><br/>.+?<p class="dec" style="font-size:12px; color:#777;line-height:14px;">([^<]+)</p>', r, re.DOTALL)
97
             for item in result:
100
             for item in result:
98
                 title = item[2]
101
                 title = item[2]
99
                 title0 = re.sub(" \(\d+\)","",title)
102
                 title0 = re.sub(" \(\d+\)","",title)
105
                 data2 = item[0]
108
                 data2 = item[0]
106
                 img = item[1].replace("xs.","sm.")
109
                 img = item[1].replace("xs.","sm.")
107
                 desc = util.unescape(item[4])
110
                 desc = util.unescape(item[4])
108
-                content.append((title,self.name+"::"+data2,img,desc))            
111
+                content.append((title,self.name+"::"+data2,img,desc))
109
             return content
112
             return content
110
 
113
 
111
         elif clist in ("filmaslatviski","moviesenglish"):
114
         elif clist in ("filmaslatviski","moviesenglish"):
119
                 img = "https://cinemalive.tv/"+item[1]
122
                 img = "https://cinemalive.tv/"+item[1]
120
                 title = util.unescape(title)
123
                 title = util.unescape(title)
121
                 desc = title
124
                 desc = title
122
-                content.append((title,self.name+"::"+data2,img,desc)) 
125
+                content.append((title,self.name+"::"+data2,img,desc))
123
             m = re.search(r"""<a href='https://cinemalive\.tv/([^']+)' style="border-right:none;">»</a>""", r, re.DOTALL)
126
             m = re.search(r"""<a href='https://cinemalive\.tv/([^']+)' style="border-right:none;">»</a>""", r, re.DOTALL)
124
             if m:
127
             if m:
125
                 data2 = m.group(1)
128
                 data2 = m.group(1)
126
-                content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))                                  
127
-            return content      
128
-         
129
+                content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
130
+            return content
131
+
129
         else:
132
         else:
130
-            return content                            
131
-              
133
+            return content
134
+
132
     def is_video(self,data):
135
     def is_video(self,data):
133
-        source,data,path,plist,clist,params,qs = self.parse_data(data)        
136
+        source,data,path,plist,clist,params,qs = self.parse_data(data)
134
         if clist=="movie":
137
         if clist=="movie":
135
             return True
138
             return True
136
         else:
139
         else:
137
             return False
140
             return False
138
-                        
141
+
139
     def get_streams(self, data):
142
     def get_streams(self, data):
140
         print "[cinemalive] get_streams:", data
143
         print "[cinemalive] get_streams:", data
141
         source,data,path,plist,clist,params,qs = self.parse_data(data)
144
         source,data,path,plist,clist,params,qs = self.parse_data(data)
148
         title = title0.replace(" - Filma Online Latviski","").replace(" - Movie Online English HD","")
151
         title = title0.replace(" - Filma Online Latviski","").replace(" - Movie Online English HD","")
149
         desc = re.search('<p class="plot">(.+?)</p>', r).group(1)
152
         desc = re.search('<p class="plot">(.+?)</p>', r).group(1)
150
         img = "http://cinemalive.tv"+re.search('<img src="(.+?)" class="img-thumbnail"', r).group(1)
153
         img = "http://cinemalive.tv"+re.search('<img src="(.+?)" class="img-thumbnail"', r).group(1)
151
-        
154
+
152
         m = re.search(r'<video id=.+?<source src="([^"]+\.mp4)"', r, re.DOTALL)
155
         m = re.search(r'<video id=.+?<source src="([^"]+\.mp4)"', r, re.DOTALL)
153
         if m:
156
         if m:
154
             s = util.item()
157
             s = util.item()
157
             s["desc"] = util.unescape(desc)
160
             s["desc"] = util.unescape(desc)
158
             s["img"] = img
161
             s["img"] = img
159
             s["type"] = self.stream_type(s["url"])
162
             s["type"] = self.stream_type(s["url"])
160
-            s["lang"] = lang 
163
+            s["lang"] = lang
161
             return [s]
164
             return [s]
162
-        
165
+
163
         #m = re.search('<div class="viboom-overroll"><iframe src="([^"]+)"', r)
166
         #m = re.search('<div class="viboom-overroll"><iframe src="([^"]+)"', r)
164
         #if m:
167
         #if m:
165
         result = re.findall('<div id="video_container"><iframe.+?src="(.+?)"', r)
168
         result = re.findall('<div id="video_container"><iframe.+?src="(.+?)"', r)
180
                     s["img"] = img
183
                     s["img"] = img
181
                     s["type"]= self.stream_type(s["url"])
184
                     s["type"]= self.stream_type(s["url"])
182
                     s["lang"] = lang2
185
                     s["lang"] = lang2
183
-                    streams.append(s)  
186
+                    streams.append(s)
184
             return streams
187
             return streams
185
         else:
188
         else:
186
             return []
189
             return []
187
 
190
 
188
-                    
191
+
189
 if __name__ == "__main__":
192
 if __name__ == "__main__":
190
     country= "lv"
193
     country= "lv"
191
     c = Source(country)
194
     c = Source(country)

+ 1
- 0
sources/config.py View File

23
         self.read_streams()
23
         self.read_streams()
24
 
24
 
25
     def get_content(self, data):
25
     def get_content(self, data):
26
+        print "[config] get_content",data
26
         self.read_streams()
27
         self.read_streams()
27
         if "::" in data:
28
         if "::" in data:
28
             data = data.split("::")[1]
29
             data = data.split("::")[1]

BIN
sources/config.pyc View File


+ 3
- 0
sources/euronews.py View File

15
 from collections import OrderedDict
15
 from collections import OrderedDict
16
 from SourceBase import SourceBase
16
 from SourceBase import SourceBase
17
 import util
17
 import util
18
+import ssl
19
+ssl._create_default_https_context = ssl._create_unverified_context
20
+
18
 
21
 
19
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
22
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
20
 import HTMLParser
23
 import HTMLParser

+ 21
- 7
sources/filmix.py View File

17
 import base64
17
 import base64
18
 from collections import OrderedDict
18
 from collections import OrderedDict
19
 import sys
19
 import sys
20
+import ssl
21
+ssl._create_default_https_context = ssl._create_unverified_context
22
+
20
 try:
23
 try:
21
     import util
24
     import util
22
 except:
25
 except:
135
                 for i,ep in enumerate(js["playlist"]):
138
                 for i,ep in enumerate(js["playlist"]):
136
                     title = title0 +" - "+js["playlist"][i]["comment"].encode("utf8")
139
                     title = title0 +" - "+js["playlist"][i]["comment"].encode("utf8")
137
                     serie = js["playlist"][i]["comment"].encode("utf8")
140
                     serie = js["playlist"][i]["comment"].encode("utf8")
138
-                    data2 = data+"?s=%s"%(i+1)
141
+                    if "file" in ep and ep["file"]:
142
+                        data2 = data+"?e=%s"%(i+1)
143
+                    else:
144
+                        data2 = data+"?s=%s"%(i+1)
139
                     desc = serie +"\n"+desc0
145
                     desc = serie +"\n"+desc0
140
                     content.append((title,self.name+"::"+data2,img,desc))
146
                     content.append((title,self.name+"::"+data2,img,desc))
141
             return content
147
             return content
190
 
196
 
191
     def is_video(self,data):
197
     def is_video(self,data):
192
         source,data,path,plist,clist,params,qs = self.parse_data(data)
198
         source,data,path,plist,clist,params,qs = self.parse_data(data)
193
-        if clist == "play" and "s=" in data and "e=" in data:
199
+        if clist == "play" and "e=" in data:
194
             return True
200
             return True
195
         elif clist=="play" and not params:
201
         elif clist=="play" and not params:
196
             r = self.call(path)
202
             r = self.call(path)
263
             js = self._http_request(pl_link)
269
             js = self._http_request(pl_link)
264
             js = self.decode_uppod_text(js)
270
             js = self.decode_uppod_text(js)
265
             js = json.loads(js)
271
             js = json.loads(js)
266
-            if "s" in qs and "e" in qs:
267
-                s = int(qs["s"])
272
+            if "e" in qs:
273
+                if "s" in qs:
274
+                    s = int(qs["s"])
275
+                else:
276
+                    s = None
268
                 e = int(qs["e"])
277
                 e = int(qs["e"])
269
-                serie = js["playlist"][s-1]["playlist"][e-1]["comment"].encode("utf8")
270
-                title = title0+" - "+ serie
271
-                url0 = js["playlist"][s-1]["playlist"][e-1]["file"].encode("utf8")
278
+                if s: # sezona + epizode
279
+                    serie = js["playlist"][s-1]["playlist"][e-1]["comment"].encode("utf8")
280
+                    title = title0+" - "+ serie
281
+                    url0 = js["playlist"][s-1]["playlist"][e-1]["file"].encode("utf8")
282
+                else: # tikai epizode, nav sezonas
283
+                    title = title0 +" - "+js["playlist"][e-1]["comment"].encode("utf8")
284
+                    serie = js["playlist"][e-1]["comment"].encode("utf8")
285
+                    url0 = js["playlist"][e-1]["file"].encode("utf8")
272
                 streams2 = self.get_streams2(url0)
286
                 streams2 = self.get_streams2(url0)
273
                 for st in streams2:
287
                 for st in streams2:
274
                     stream = util.item()
288
                     stream = util.item()

BIN
sources/filmix.pyc View File


+ 8
- 5
sources/filmon.py View File

13
 import urllib2, urllib
13
 import urllib2, urllib
14
 import datetime, re, sys
14
 import datetime, re, sys
15
 from SourceBase import SourceBase
15
 from SourceBase import SourceBase
16
+import ssl
17
+ssl._create_default_https_context = ssl._create_unverified_context
18
+
16
 
19
 
17
 API_URL = 'http://www.filmon.com/'
20
 API_URL = 'http://www.filmon.com/'
18
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
21
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
44
     def get_content(self, data):
47
     def get_content(self, data):
45
         print "[filmon] get_content:", data
48
         print "[filmon] get_content:", data
46
         if "::" in data:
49
         if "::" in data:
47
-            data = data.split("::")[1] 
50
+            data = data.split("::")[1]
48
         path = data.split("?")[0]
51
         path = data.split("?")[0]
49
         clist = path.split("/")[0]
52
         clist = path.split("/")[0]
50
         params = data[data.find("?"):] if "?" in data else ""
53
         params = data[data.find("?"):] if "?" in data else ""
81
         ### TV group channels ###
84
         ### TV group channels ###
82
         elif clist=="group":
85
         elif clist=="group":
83
             if "id" in qs:
86
             if "id" in qs:
84
-                group_id = qs["id"] 
87
+                group_id = qs["id"]
85
             else:
88
             else:
86
                 return content
89
                 return content
87
             group = None
90
             group = None
102
         ### TV Channel ###
105
         ### TV Channel ###
103
         elif clist == "channel" or clist == "video":
106
         elif clist == "channel" or clist == "video":
104
             if "id" in qs:
107
             if "id" in qs:
105
-                ch_id = qs["id"] 
108
+                ch_id = qs["id"]
106
             else:
109
             else:
107
                 return ("No stream found %s"%data,"","","No stream found")
110
                 return ("No stream found %s"%data,"","","No stream found")
108
             ch = self.get_tv_channel_info(ch_id)
111
             ch = self.get_tv_channel_info(ch_id)
135
                 img = gr["images"][0]["url"].encode("utf8")
138
                 img = gr["images"][0]["url"].encode("utf8")
136
                 desc = gr["description"].encode("utf8") if gr["description"] else title
139
                 desc = gr["description"].encode("utf8") if gr["description"] else title
137
                 content.append((title,self.name+"::"+data2,img,desc))
140
                 content.append((title,self.name+"::"+data2,img,desc))
138
-            return content           
141
+            return content
139
 
142
 
140
         ### VOD genre videos ###
143
         ### VOD genre videos ###
141
         elif path == "vod/search":
144
         elif path == "vod/search":
152
             if start_index+js["total"]<js["total_found"]:
155
             if start_index+js["total"]<js["total_found"]:
153
                 start_index += 30
156
                 start_index += 30
154
                 data2 = re.sub("start_index=\d+","start_index=%s"%start_index,data) if "start_index" in qs else data +"&start_index=30"
157
                 data2 = re.sub("start_index=\d+","start_index=%s"%start_index,data) if "start_index" in qs else data +"&start_index=30"
155
-                content.append(("Next page",self.name+"::"+data2,"","Next page"))                                            
158
+                content.append(("Next page",self.name+"::"+data2,"","Next page"))
156
             return content
159
             return content
157
 
160
 
158
         ### VOD video sigle/series ###
161
         ### VOD video sigle/series ###

+ 4
- 1
sources/iplayer.py View File

16
 from SourceBase import SourceBase, stream_type
16
 from SourceBase import SourceBase, stream_type
17
 import util
17
 import util
18
 from collections import OrderedDict
18
 from collections import OrderedDict
19
+import ssl
20
+ssl._create_default_https_context = ssl._create_unverified_context
21
+
19
 
22
 
20
 API_URL = 'https://m.lattelecom.tv/'
23
 API_URL = 'https://m.lattelecom.tv/'
21
 user_agent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3"
24
 user_agent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3"
498
             return r.content
501
             return r.content
499
 
502
 
500
         except Exception as ex:
503
         except Exception as ex:
501
-            if ex.code==403:
504
+            if "code" in dir(ex) and ex.code==403:
502
                 return ex.read()
505
                 return ex.read()
503
             else:
506
             else:
504
                 return None
507
                 return None

BIN
sources/iplayer.pyc View File


+ 7
- 3
sources/lmt.py View File

12
 import requests
12
 import requests
13
 import datetime, re, sys,os
13
 import datetime, re, sys,os
14
 import ConfigParser
14
 import ConfigParser
15
-from collections import OrderedDict
15
+import ssl
16
+ssl._create_default_https_context = ssl._create_unverified_context
17
+
18
+
16
 from SourceBase import SourceBase
19
 from SourceBase import SourceBase
20
+
17
 import resolver
21
 import resolver
18
 try:
22
 try:
19
     import util
23
     import util
152
             stream["desc"] = desc
156
             stream["desc"] = desc
153
             stream["resolver"] = "lmt"
157
             stream["resolver"] = "lmt"
154
             return [stream]
158
             return [stream]
155
-        elif re.search('src="http*://www.youtube.com/embed/(\w+).*"',r):
156
-            m = re.search('src="http*://www.youtube.com/embed/(\w+).*"',r)
159
+        elif re.search('src="http*://www.youtube.com/embed/([\w-]+).*"',r):
160
+            m = re.search('src="http*://www.youtube.com/embed/([\w-]+).*"',r)
157
             video_id = m.group(1)
161
             video_id = m.group(1)
158
             #http://www.youtube.com/embed/RUyQ_JJ6A84?rel=0&fs=1&wmode=transparent
162
             #http://www.youtube.com/embed/RUyQ_JJ6A84?rel=0&fs=1&wmode=transparent
159
             data2 = YouTubeVideoUrl().extract(video_id)
163
             data2 = YouTubeVideoUrl().extract(video_id)

+ 31
- 17
sources/ltc.py View File

16
 from SourceBase import SourceBase, stream_type
16
 from SourceBase import SourceBase, stream_type
17
 import util
17
 import util
18
 from collections import OrderedDict
18
 from collections import OrderedDict
19
+import ssl
20
+ssl._create_default_https_context = ssl._create_unverified_context
19
 
21
 
20
 API_URL = 'https://m.lattelecom.tv/'
22
 API_URL = 'https://m.lattelecom.tv/'
21
 user_agent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3"
23
 user_agent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3"
72
 
74
 
73
     def get_content(self, data):
75
     def get_content(self, data):
74
         print "[ltc] get_content:", data
76
         print "[ltc] get_content:", data
75
-        if "::" in data:
76
-            data = data.split("::")[1]
77
-        path = data.split("?")[0]
78
-        clist = path.split("/")[0]
79
-        params = data[data.find("?"):] if "?" in data else ""
80
-        qs = dict(map(lambda x:x.split("="),re.findall("\w+=[\w-]+",params)))
81
-        #lang = qs["lang"] if "lang" in qs else self.country
77
+        source, data, path, plist, clist, params, qs = self.parse_data(data)
82
 
78
 
83
         content=[]
79
         content=[]
84
         content.append(("..return", "back","","Return back"))
80
         content.append(("..return", "back","","Return back"))
333
                 data2 = item["url"][1:].encode("utf8")
329
                 data2 = item["url"][1:].encode("utf8")
334
                 if data2[-1]=="/": data2=data2[:-1]
330
                 if data2[-1]=="/": data2=data2[:-1]
335
                 if "/raidijumi/" in data2:
331
                 if "/raidijumi/" in data2:
336
-                    data2 += "?series"
332
+                    data2 += "?series" # TODO
337
                 img = "https://www.lattelecom.tv"+item["image"].encode("utf8")
333
                 img = "https://www.lattelecom.tv"+item["image"].encode("utf8")
338
                 desc = "%s\n%s"%(title,item["genre"].encode("utf8"))
334
                 desc = "%s\n%s"%(title,item["genre"].encode("utf8"))
339
                 content.append((title,self.name+"::"+data2,img,desc))
335
                 content.append((title,self.name+"::"+data2,img,desc))
345
             return content
341
             return content
346
 
342
 
347
         ### Sērijas
343
         ### Sērijas
348
-        elif clist=="videonoma" and  params=="?series":
344
+        elif clist=="videonoma" and  (params=="?series" or "season_nr" in qs):
349
             url = "https://www.lattelecom.tv/"+path
345
             url = "https://www.lattelecom.tv/"+path
350
             r = self._http_request(url,headers=self.headers2)
346
             r = self._http_request(url,headers=self.headers2)
351
             if not r:
347
             if not r:
352
                 return content
348
                 return content
353
             m = re.search('<div class="movie_details"><div class="movie_titles"><div class="en">([^<]+?)</div>', r, re.DOTALL | re.IGNORECASE)
349
             m = re.search('<div class="movie_details"><div class="movie_titles"><div class="en">([^<]+?)</div>', r, re.DOTALL | re.IGNORECASE)
354
             raidijums = m.group(1) + " - " if m else ""
350
             raidijums = m.group(1) + " - " if m else ""
351
+            img0 = re.search('<meta name="og:image" content="([^"]+)">', r).group(1) if re.search('<meta name="dr:say:img" content="([^"]+)">', r) else ""
352
+
353
+            m = re.search('<ul class="episode_choiser".+?(<li>[^<]+</li>)+</ul>', r)
354
+            if m and not "season_nr" in qs: # ir sezonas
355
+                for s,it in enumerate(re.findall('<li>([^<]+)</li>', m.group())):
356
+                    title = "%s%s" % (raidijums, it)
357
+                    se = re.search("(\d+)",it).group(1)
358
+                    data2 = path+"?season_nr=%s"%(se)
359
+                    img = img0
360
+                    desc = title
361
+                    content.append((title, self.name + "::" + data2, img, desc))
362
+                return content
363
+
355
             m = re.search('data-bid="([^"]+)',r)
364
             m = re.search('data-bid="([^"]+)',r)
356
             if m:
365
             if m:
357
-                url = "https://www.lattelecom.tv/episode-loader.json?bcast_ids=%s"%(m.group(1))
366
+                if not "season_nr" in qs:
367
+                    qs["season_nr"]="0"
368
+                url = "https://www.lattelecom.tv/episode-loader.json?bcast_ids=%s&season_nr=%s"%(m.group(1),qs["season_nr"])
358
                 r = self._http_request(url,headers=self.headers2)
369
                 r = self._http_request(url,headers=self.headers2)
359
                 i1 = r.find('{"total":')
370
                 i1 = r.find('{"total":')
360
                 i2 = r.find('}<div class=')
371
                 i2 = r.find('}<div class=')
439
                     desc = title
450
                     desc = title
440
             elif vtype == "record-streams":
451
             elif vtype == "record-streams":
441
                 epg = self.get_epg_id(vid)
452
                 epg = self.get_epg_id(vid)
442
-                title = epg["title"].encode("utf8")
443
-                t1 = datetime.datetime.fromtimestamp(int(epg["unix_start"])).strftime('%H:%M')
444
-                t2 = datetime.datetime.fromtimestamp(int(epg["unix_stop"])).strftime('%H:%M')
445
-                date = epg["date"]
446
-                title = "%s (%s %s-%s)"%(title,date,t1,t2)
447
-                desc = epg["description"]
453
+                if epg:
454
+                    title = epg["title"].encode("utf8")
455
+                    t1 = datetime.datetime.fromtimestamp(int(epg["unix_start"])).strftime('%H:%M')
456
+                    t2 = datetime.datetime.fromtimestamp(int(epg["unix_stop"])).strftime('%H:%M')
457
+                    date = epg["date"]
458
+                    title = "%s (%s %s-%s)"%(title,date,t1,t2)
459
+                    desc = epg["description"]
460
+                else:
461
+                    title = desc = data
448
 
462
 
449
             streams = []
463
             streams = []
450
             for s in r["data"]:
464
             for s in r["data"]:
551
             self.epg_id={}
565
             self.epg_id={}
552
             self.epg_id2={}
566
             self.epg_id2={}
553
             self.epg_date={}
567
             self.epg_date={}
554
-            slef.epg_ch={}
568
+            self.epg_ch={}
555
             self.epg_cat={}
569
             self.epg_cat={}
556
         if not date in self.epgdates:
570
         if not date in self.epgdates:
557
             r=self.call("tv/epg/?daynight=%s"%date)
571
             r=self.call("tv/epg/?daynight=%s"%date)

BIN
sources/ltc.pyc View File


+ 3
- 0
sources/movieplace.py View File

14
 import ConfigParser
14
 import ConfigParser
15
 from collections import OrderedDict
15
 from collections import OrderedDict
16
 from SourceBase import SourceBase
16
 from SourceBase import SourceBase
17
+import ssl
18
+ssl._create_default_https_context = ssl._create_unverified_context
19
+
17
 import resolver
20
 import resolver
18
 try:
21
 try:
19
     import util
22
     import util

+ 41
- 38
sources/mtgplay.py View File

15
 # coding=utf8
15
 # coding=utf8
16
 import urllib2, urllib
16
 import urllib2, urllib
17
 import datetime, re, sys
17
 import datetime, re, sys
18
+import ssl
19
+ssl._create_default_https_context = ssl._create_unverified_context
20
+
18
 from SourceBase import SourceBase
21
 from SourceBase import SourceBase
19
 
22
 
20
 API_URL = 'http://playapi.mtgx.tv/v3/'
23
 API_URL = 'http://playapi.mtgx.tv/v3/'
35
 
38
 
36
 
39
 
37
 class Source(SourceBase):
40
 class Source(SourceBase):
38
-    
41
+
39
     def __init__(self,country="lv",cfg_path=None):
42
     def __init__(self,country="lv",cfg_path=None):
40
         self.name = "mtgplay"
43
         self.name = "mtgplay"
41
         self.title = "Skaties.lv (TV3)"
44
         self.title = "Skaties.lv (TV3)"
42
         self.img = "http://skaties.lv/touch-icon-192x192.png"
45
         self.img = "http://skaties.lv/touch-icon-192x192.png"
43
         self.desc = "MTG skaties.lv satura skatīšanās (LNT,TV3, TV6 u.c.)"
46
         self.desc = "MTG skaties.lv satura skatīšanās (LNT,TV3, TV6 u.c.)"
44
-        
47
+
45
         self.country=country
48
         self.country=country
46
         self.pic_size = "327x250" #"1000x765"
49
         self.pic_size = "327x250" #"1000x765"
47
-        
50
+
48
     def get_content(self, data):
51
     def get_content(self, data):
49
         print "[mtgplay] get_content:", data
52
         print "[mtgplay] get_content:", data
50
         if "::" in data:
53
         if "::" in data:
51
-            data = data.split("::")[1]        
54
+            data = data.split("::")[1]
52
         if "/" in data:
55
         if "/" in data:
53
             citem,cid = data.split("/")
56
             citem,cid = data.split("/")
54
             clist = ""
57
             clist = ""
57
             qs = dict(map(lambda x:x.split("="),re.findall("\w+=\w+",data)))
60
             qs = dict(map(lambda x:x.split("="),re.findall("\w+=\w+",data)))
58
             citem,cid = ("","")
61
             citem,cid = ("","")
59
             self.country = qs["country"] if "country" in qs else "lv"
62
             self.country = qs["country"] if "country" in qs else "lv"
60
-        
63
+
61
         content=[]
64
         content=[]
62
         content.append(("..return", "back","","Return back"))
65
         content.append(("..return", "back","","Return back"))
63
-        
66
+
64
         if clist=="home":
67
         if clist=="home":
65
             content.extend([
68
             content.extend([
66
-                #("Search", "mtgplay::meklet?country=%s&term={0}"%self.country,"","Search videos"), ### TODO                
69
+                #("Search", "mtgplay::meklet?country=%s&term={0}"%self.country,"","Search videos"), ### TODO
67
                 ("TV Live", "mtgplay::videos?country=%s&order=title&type=live"%self.country,"","TV live streams(not always available)"),
70
                 ("TV Live", "mtgplay::videos?country=%s&order=title&type=live"%self.country,"","TV live streams(not always available)"),
68
                 ("Last videos", "mtgplay::videos?country=%s&order=-airdate"%self.country,"","Last aired videos"),
71
                 ("Last videos", "mtgplay::videos?country=%s&order=-airdate"%self.country,"","Last aired videos"),
69
                 ("Categories", "mtgplay::categories?country=%s&order=name"%self.country,"","Categories"),
72
                 ("Categories", "mtgplay::categories?country=%s&order=name"%self.country,"","Categories"),
70
                 ("Channels", "mtgplay::channels?country=%s&order=id"%self.country,"","TV channels"),
73
                 ("Channels", "mtgplay::channels?country=%s&order=id"%self.country,"","TV channels"),
71
-                ("Programs by name", "mtgplay::formats?country=%s&order=-title"%self.country,"","Programs by name"),             
72
-                ("Programs by popularity", "mtgplay::formats?country=%s&order=-popularity"%self.country,"","Programs by popularity")             
74
+                ("Programs by name", "mtgplay::formats?country=%s&order=-title"%self.country,"","Programs by name"),
75
+                ("Programs by popularity", "mtgplay::formats?country=%s&order=-popularity"%self.country,"","Programs by popularity")
73
             ])
76
             ])
74
             return content
77
             return content
75
-        
78
+
76
         r = self.call(data)
79
         r = self.call(data)
77
         if not r:
80
         if not r:
78
             content.append(("Error", "","","Error reading '%s'"%data))
81
             content.append(("Error", "","","Error reading '%s'"%data))
79
             return content
82
             return content
80
-        
83
+
81
         if clist:
84
         if clist:
82
             if r["_links"].has_key("prev"):
85
             if r["_links"].has_key("prev"):
83
                 data2 = r["_links"]["prev"]["href"].replace(API_URL,"")
86
                 data2 = r["_links"]["prev"]["href"].replace(API_URL,"")
84
                 content.append(("Previous page", self.name+"::"+data2.encode("utf8"),"", "Goto previous page"))
87
                 content.append(("Previous page", self.name+"::"+data2.encode("utf8"),"", "Goto previous page"))
85
-                
88
+
86
             if "_embedded" in r:
89
             if "_embedded" in r:
87
                 for item in r["_embedded"][clist]:
90
                 for item in r["_embedded"][clist]:
88
                     if "title" in item:
91
                     if "title" in item:
89
                         title = item["title"]
92
                         title = item["title"]
90
                     elif "name" in item:
93
                     elif "name" in item:
91
                         title = item["name"]
94
                         title = item["name"]
92
-                    #data2 = self.name+"::"+"%s/%s"%(clist,item["id"]) 
95
+                    #data2 = self.name+"::"+"%s/%s"%(clist,item["id"])
93
                     img = item["_links"]["image"]["href"].replace("{size}",self.pic_size) if "image" in item["_links"] else ""
96
                     img = item["_links"]["image"]["href"].replace("{size}",self.pic_size) if "image" in item["_links"] else ""
94
                     desc = item["summary"] if "summary" in item and item["summary"] else ""
97
                     desc = item["summary"] if "summary" in item and item["summary"] else ""
95
-                    
98
+
96
                     ### Video ###
99
                     ### Video ###
97
                     if clist=="videos":
100
                     if clist=="videos":
98
-                        data2 = "videos/%s"%item["id"]                            
101
+                        data2 = "videos/%s"%item["id"]
99
                         summary = item["summary"] if item["summary"] else ""
102
                         summary = item["summary"] if item["summary"] else ""
100
                         air_at = item["broadcasts"][0]["air_at"] if "broadcasts" in item and len(item["broadcasts"])>0 and "air_at" in item["broadcasts"][0] else ""
103
                         air_at = item["broadcasts"][0]["air_at"] if "broadcasts" in item and len(item["broadcasts"])>0 and "air_at" in item["broadcasts"][0] else ""
101
                         if not air_at:
104
                         if not air_at:
111
                             views = views+" views"
114
                             views = views+" views"
112
                         except: views = ""
115
                         except: views = ""
113
                         desc = "Aired: %s %s\nDuration: %s %s\n\n%s"%(air_at, playable_to,duration,views,summary)
116
                         desc = "Aired: %s %s\nDuration: %s %s\n\n%s"%(air_at, playable_to,duration,views,summary)
114
-                        
115
-                    ### Categories ###     
117
+
118
+                    ### Categories ###
116
                     elif clist == "categories":
119
                     elif clist == "categories":
117
                         #data2 = item["_links"]["formats"]["href"].replace(API_URL,"")
120
                         #data2 = item["_links"]["formats"]["href"].replace(API_URL,"")
118
                         data2 = "formats?category=%s"%item["id"]
121
                         data2 = "formats?category=%s"%item["id"]
120
                         if "category" in qs: data2 += "&category="+qs["category"]
123
                         if "category" in qs: data2 += "&category="+qs["category"]
121
                         if "channel" in qs: data2 += "&channel="+qs["channel"]
124
                         if "channel" in qs: data2 += "&channel="+qs["channel"]
122
                         data2 += "&order=title"
125
                         data2 += "&order=title"
123
-                        
124
-                    ### Channels ###     
126
+
127
+                    ### Channels ###
125
                     elif clist == "channels":
128
                     elif clist == "channels":
126
                         #data2 = item["_links"]["categories"]["href"].replace(API_URL,"")
129
                         #data2 = item["_links"]["categories"]["href"].replace(API_URL,"")
127
                         data2 = "categories?channel=%s"%item["id"]
130
                         data2 = "categories?channel=%s"%item["id"]
129
                         if "category" in qs: data2 += "&category="+qs["category"]
132
                         if "category" in qs: data2 += "&category="+qs["category"]
130
                         if "channel" in qs: data2 += "&channel="+qs["channel"]
133
                         if "channel" in qs: data2 += "&channel="+qs["channel"]
131
                         data2 += "&order=name"
134
                         data2 += "&order=name"
132
-                        
133
-                    ### Formats (programs) ###     
135
+
136
+                    ### Formats (programs) ###
134
                     elif clist == "formats":
137
                     elif clist == "formats":
135
                         #data2 = item["_links"]["videos"]["href"].replace(API_URL,"")
138
                         #data2 = item["_links"]["videos"]["href"].replace(API_URL,"")
136
                         data2 = "seasons?format=%s"%item["id"]
139
                         data2 = "seasons?format=%s"%item["id"]
141
                         air_at = item["latest_video"]["publish_at"] if "publish_at" in item["latest_video"] else ""
144
                         air_at = item["latest_video"]["publish_at"] if "publish_at" in item["latest_video"] else ""
142
                         air_at = air_at[0:16].replace("T"," ") if air_at else ""
145
                         air_at = air_at[0:16].replace("T"," ") if air_at else ""
143
                         if air_at:
146
                         if air_at:
144
-                            desc = "Last video: %s\n"%air_at + desc                        
145
-                        
146
-                    ### Seasons ###     
147
+                            desc = "Last video: %s\n"%air_at + desc
148
+
149
+                    ### Seasons ###
147
                     elif clist == "seasons":
150
                     elif clist == "seasons":
148
                         #data2 = item["_links"]["videos"]["href"].replace(API_URL,"")
151
                         #data2 = item["_links"]["videos"]["href"].replace(API_URL,"")
149
                         data2 = "videos?season=%s"%item["id"]
152
                         data2 = "videos?season=%s"%item["id"]
151
                         #if "category" in qs: data2 += "&category="+qs["category"]
154
                         #if "category" in qs: data2 += "&category="+qs["category"]
152
                         #if "channel" in qs: data2 += "&channel="+qs["channel"]
155
                         #if "channel" in qs: data2 += "&channel="+qs["channel"]
153
                         data2 += "&order=title"
156
                         data2 += "&order=title"
154
-                        
157
+
155
                         summary = item["summary"] if "summary" in item and item["summary"] else ""
158
                         summary = item["summary"] if "summary" in item and item["summary"] else ""
156
                         try:
159
                         try:
157
                             latest_video = item["latest_video"]["publish_at"]
160
                             latest_video = item["latest_video"]["publish_at"]
158
                             latest_video = latest_video[0:16].replace("T"," ")
161
                             latest_video = latest_video[0:16].replace("T"," ")
159
                         except: latest_video = ""
162
                         except: latest_video = ""
160
                         desc = ("%s\nLatest video: %s"%(summary,latest_video))
163
                         desc = ("%s\nLatest video: %s"%(summary,latest_video))
161
-                                      
164
+
162
                     content.append((title.encode("utf8"),self.name+"::"+data2.encode("utf8"),img.encode("utf8"),desc.encode("utf8")))
165
                     content.append((title.encode("utf8"),self.name+"::"+data2.encode("utf8"),img.encode("utf8"),desc.encode("utf8")))
163
-                    
166
+
164
             if r["_links"].has_key("next"):
167
             if r["_links"].has_key("next"):
165
                 data2 = r["_links"]["next"]["href"].replace(API_URL,"").encode("utf8")
168
                 data2 = r["_links"]["next"]["href"].replace(API_URL,"").encode("utf8")
166
                 content.append(("Next page", self.name+"::"+data2.encode("utf8"),"","Goto next page"))
169
                 content.append(("Next page", self.name+"::"+data2.encode("utf8"),"","Goto next page"))
167
-                
170
+
168
         elif citem:
171
         elif citem:
169
             item = r
172
             item = r
170
             if "title" in item:
173
             if "title" in item:
171
                 title = item["title"]
174
                 title = item["title"]
172
             elif "name" in item:
175
             elif "name" in item:
173
                 title = r["name"]
176
                 title = r["name"]
174
-            #data2 = self.name+"::"+"%s/%s"%(clist,item["id"]) 
177
+            #data2 = self.name+"::"+"%s/%s"%(clist,item["id"])
175
             img = item["_links"]["image"]["href"].replace("{size}",self.pic_size) if "image" in item["_links"] else ""
178
             img = item["_links"]["image"]["href"].replace("{size}",self.pic_size) if "image" in item["_links"] else ""
176
             desc = item["summary"] if "summary" in item and item["summary"] else ""
179
             desc = item["summary"] if "summary" in item and item["summary"] else ""
177
-            
180
+
178
             dd = "videos/stream/%s"%cid
181
             dd = "videos/stream/%s"%cid
179
             r2 = self.call(dd)
182
             r2 = self.call(dd)
180
             if "streams" in r2 and "hls" in r2["streams"]:
183
             if "streams" in r2 and "hls" in r2["streams"]:
182
                 content = (title.encode("utf8"),data2.encode("utf8"),img.encode("utf8"),desc.encode("utf8"))
185
                 content = (title.encode("utf8"),data2.encode("utf8"),img.encode("utf8"),desc.encode("utf8"))
183
             elif "msg" in r2:
186
             elif "msg" in r2:
184
                 content = (r2["msg"].encode("utf8"),"","","")
187
                 content = (r2["msg"].encode("utf8"),"","","")
185
-            else: 
186
-                content = ("Error getting stream","","","")            
187
-            
188
+            else:
189
+                content = ("Error getting stream","","","")
190
+
188
         else:
191
         else:
189
             pass
192
             pass
190
         return content
193
         return content
191
-    
194
+
192
     def is_video(self,data):
195
     def is_video(self,data):
193
         if "::" in data:
196
         if "::" in data:
194
             data = data.split("::")[1]
197
             data = data.split("::")[1]
197
             return True
200
             return True
198
         else:
201
         else:
199
             return False
202
             return False
200
-    
201
-    def get_stream(self,id):   
203
+
204
+    def get_stream(self,id):
202
         dd = "videos/stream/%s"%id
205
         dd = "videos/stream/%s"%id
203
         r2 = self.call(dd)
206
         r2 = self.call(dd)
204
         if "streams" in r2 and "hls" in r2["streams"]:
207
         if "streams" in r2 and "hls" in r2["streams"]:
206
         else:
209
         else:
207
             data2 = ""
210
             data2 = ""
208
         return data2.encode("utf8")
211
         return data2.encode("utf8")
209
-        
212
+
210
     def call_all(self, endpoint, params = None):
213
     def call_all(self, endpoint, params = None):
211
         url = API_URL % (endpoint)
214
         url = API_URL % (endpoint)
212
         if params:
215
         if params:
229
                 url = content["_links"]["next"]["href"]
232
                 url = content["_links"]["next"]["href"]
230
             else: break
233
             else: break
231
         return result
234
         return result
232
-    
235
+
233
     def call(self, data,headers=headers0):
236
     def call(self, data,headers=headers0):
234
         url = API_URL + data
237
         url = API_URL + data
235
         #print "[TVPlay Api] url: ",url
238
         #print "[TVPlay Api] url: ",url

+ 24
- 21
sources/play24.py View File

12
 
12
 
13
 import urllib2, urllib
13
 import urllib2, urllib
14
 import datetime, re, sys
14
 import datetime, re, sys
15
+import ssl
16
+ssl._create_default_https_context = ssl._create_unverified_context
17
+
15
 from SourceBase import SourceBase
18
 from SourceBase import SourceBase
16
 
19
 
17
 API_URL = 'http://replay.lsm.lv/'
20
 API_URL = 'http://replay.lsm.lv/'
21
 """)
24
 """)
22
 import HTMLParser
25
 import HTMLParser
23
 h = HTMLParser.HTMLParser()
26
 h = HTMLParser.HTMLParser()
24
-    
27
+
25
 class Source(SourceBase):
28
 class Source(SourceBase):
26
-    
29
+
27
     def __init__(self,country="lv",cfg_path=None):
30
     def __init__(self,country="lv",cfg_path=None):
28
         self.name = "play24"
31
         self.name = "play24"
29
         self.title = "Play24.lv"
32
         self.title = "Play24.lv"
30
         self.img = "http://play24.lv/images/play24-logo-black.png"
33
         self.img = "http://play24.lv/images/play24-logo-black.png"
31
         self.desc = "play24.lv (Riga24TV) satura skatīšanās"
34
         self.desc = "play24.lv (Riga24TV) satura skatīšanās"
32
-        
35
+
33
         self.country=country
36
         self.country=country
34
-        
37
+
35
     def get_content(self, data):
38
     def get_content(self, data):
36
         print "[play24] get_content:", data
39
         print "[play24] get_content:", data
37
         if "::" in data:
40
         if "::" in data:
38
-            data = data.split("::")[1] 
41
+            data = data.split("::")[1]
39
         path = data.split("?")[0]
42
         path = data.split("?")[0]
40
         clist = path.split("/")[0]
43
         clist = path.split("/")[0]
41
         params = data[data.find("?"):] if "?" in data else ""
44
         params = data[data.find("?"):] if "?" in data else ""
42
         qs = dict(map(lambda x:x.split("="),re.findall("\w+=\w+",params)))
45
         qs = dict(map(lambda x:x.split("="),re.findall("\w+=\w+",params)))
43
         lang = qs["lang"] if "lang" in qs else self.country
46
         lang = qs["lang"] if "lang" in qs else self.country
44
-    
47
+
45
         content=[]
48
         content=[]
46
         content.append(("..return", "back","","Return back"))
49
         content.append(("..return", "back","","Return back"))
47
-        
50
+
48
         if clist=="home":
51
         if clist=="home":
49
             content.extend([
52
             content.extend([
50
                 ("Live stream", "play24::tiesraide","","TV live streams"),
53
                 ("Live stream", "play24::tiesraide","","TV live streams"),
51
                 ("Last videos", "play24::jaunakie","","Last videos"),
54
                 ("Last videos", "play24::jaunakie","","Last videos"),
52
                 ("Categories", "play24::kategorijas","","Categories"),
55
                 ("Categories", "play24::kategorijas","","Categories"),
53
-                ("Programs", "play24::raidijumi","","Programs"),             
56
+                ("Programs", "play24::raidijumi","","Programs"),
54
              ])
57
              ])
55
             return content
58
             return content
56
-  
59
+
57
         ### Jaunākie video ###
60
         ### Jaunākie video ###
58
         elif clist=="jaunakie":
61
         elif clist=="jaunakie":
59
             url = "http://play24.lv/"
62
             url = "http://play24.lv/"
66
                 desc = title
69
                 desc = title
67
                 content.append((title,self.name+"::"+data2,img,desc))
70
                 content.append((title,self.name+"::"+data2,img,desc))
68
             return content
71
             return content
69
-                
72
+
70
         ### Kategorijas ###
73
         ### Kategorijas ###
71
         elif clist=="kategorijas":
74
         elif clist=="kategorijas":
72
             url = "http://play24.lv/"
75
             url = "http://play24.lv/"
79
                 desc = title
82
                 desc = title
80
                 content.append((title,self.name+"::"+data2,img,desc))
83
                 content.append((title,self.name+"::"+data2,img,desc))
81
             return content
84
             return content
82
-       
85
+
83
         elif clist=="kategorija":
86
         elif clist=="kategorija":
84
             url = "http://play24.lv/"+data
87
             url = "http://play24.lv/"+data
85
             r = self._http_request(url)
88
             r = self._http_request(url)
98
                 progr = m.group(2) if m else ""
101
                 progr = m.group(2) if m else ""
99
                 m = re.search('<span class="masonry-item__date">([^<]+)</span>', article, re.DOTALL)
102
                 m = re.search('<span class="masonry-item__date">([^<]+)</span>', article, re.DOTALL)
100
                 date = m.group(1).strip() if m else ""
103
                 date = m.group(1).strip() if m else ""
101
-                         
104
+
102
                 if date:
105
                 if date:
103
                     title = title + " (%s %s)"%(date,progr)
106
                     title = title + " (%s %s)"%(date,progr)
104
                 desc = title + "\n%s - %s"%(progr,date)
107
                 desc = title + "\n%s - %s"%(progr,date)
106
             m = re.search(r'<li><a href="http://play24\.lv/([^"]+)" rel="next">&raquo;</a></li>', r, re.DOTALL)
109
             m = re.search(r'<li><a href="http://play24\.lv/([^"]+)" rel="next">&raquo;</a></li>', r, re.DOTALL)
107
             if m:
110
             if m:
108
                 data2 = m.group(1)
111
                 data2 = m.group(1)
109
-                content.append(("Next page",self.name+"::"+data2,"","Next page"))                                            
112
+                content.append(("Next page",self.name+"::"+data2,"","Next page"))
110
             return content
113
             return content
111
-        
114
+
112
         ### Raidijumi (programmas)
115
         ### Raidijumi (programmas)
113
         elif clist=="raidijumi":
116
         elif clist=="raidijumi":
114
             url = "http://play24.lv/"
117
             url = "http://play24.lv/"
137
             m = re.search(r'<li><a href="http://play24\.lv/([^"]+)" rel="next">&raquo;</a></li>', r, re.DOTALL)
140
             m = re.search(r'<li><a href="http://play24\.lv/([^"]+)" rel="next">&raquo;</a></li>', r, re.DOTALL)
138
             if m:
141
             if m:
139
                 data2 = m.group(1)
142
                 data2 = m.group(1)
140
-                content.append(("Next page",self.name+"::"+data2,"","Next page"))                                            
143
+                content.append(("Next page",self.name+"::"+data2,"","Next page"))
141
             return content
144
             return content
142
 
145
 
143
         elif clist == "video" or clist == "tiesraide":
146
         elif clist == "video" or clist == "tiesraide":
144
-            if clist == "video":  
147
+            if clist == "video":
145
                 url = "http://play24.lv/"+data
148
                 url = "http://play24.lv/"+data
146
                 r = self._http_request(url)
149
                 r = self._http_request(url)
147
                 # var ov_video_id = '59422';
150
                 # var ov_video_id = '59422';
153
                 m = re.search('<meta name="description" content="([^"]+)" />', r, re.DOTALL)
156
                 m = re.search('<meta name="description" content="([^"]+)" />', r, re.DOTALL)
154
                 desc = m.group(1) if m else ""
157
                 desc = m.group(1) if m else ""
155
                 desc = h.unescape(desc.decode("utf8")).encode("utf8")
158
                 desc = h.unescape(desc.decode("utf8")).encode("utf8")
156
-                
159
+
157
                 url = "http://player.tvnet.lv/v/%s"%id
160
                 url = "http://player.tvnet.lv/v/%s"%id
158
             else:
161
             else:
159
                 url = "http://player.tvnet.lv/l/11"
162
                 url = "http://player.tvnet.lv/l/11"
160
                 desc = ""
163
                 desc = ""
161
             r = self._http_request(url)
164
             r = self._http_request(url)
162
             m = re.search('<h1 class="static title">.+?<a href="[^"]+">([^<]+)</a>', r, re.DOTALL)
165
             m = re.search('<h1 class="static title">.+?<a href="[^"]+">([^<]+)</a>', r, re.DOTALL)
163
-            title = m.group(1) if m else ""   
166
+            title = m.group(1) if m else ""
164
             s = {}
167
             s = {}
165
             for item in re.findall('source src="([^"]+)" data-stream="([^"]+)" data-quality="([^"]+)"', r, re.DOTALL):
168
             for item in re.findall('source src="([^"]+)" data-stream="([^"]+)" data-quality="([^"]+)"', r, re.DOTALL):
166
                 s[item[1]] = (item[0],item[2])
169
                 s[item[1]] = (item[0],item[2])
170
                     data2 = s[t][0]
173
                     data2 = s[t][0]
171
                     break
174
                     break
172
             return (title,data2,"",desc)
175
             return (title,data2,"",desc)
173
-               
174
-    
176
+
177
+
175
     def is_video(self,data):
178
     def is_video(self,data):
176
         if "::" in data:
179
         if "::" in data:
177
             data = data.split("::")[1]
180
             data = data.split("::")[1]
180
             return True
183
             return True
181
         else:
184
         else:
182
             return False
185
             return False
183
-    
186
+
184
     def call(self, data,headers=headers0,lang=""):
187
     def call(self, data,headers=headers0,lang=""):
185
         if not lang: lang = self.country
188
         if not lang: lang = self.country
186
         url = API_URL%lang + data
189
         url = API_URL%lang + data

+ 4
- 0
sources/replay.py View File

12
 
12
 
13
 import urllib2, urllib
13
 import urllib2, urllib
14
 import datetime, re, sys
14
 import datetime, re, sys
15
+import ssl
16
+ssl._create_default_https_context = ssl._create_unverified_context
17
+
15
 from SourceBase import SourceBase
18
 from SourceBase import SourceBase
16
 import util
19
 import util
17
 
20
 
146
                     content=("No stream found %s"%data,"","","No stream found")
149
                     content=("No stream found %s"%data,"","","No stream found")
147
                     return content
150
                     return content
148
                 data2 = m.group(1).replace("\\","")
151
                 data2 = m.group(1).replace("\\","")
152
+                #r = self._http_request(data2, headers=headers)
149
 
153
 
150
             else: # audio
154
             else: # audio
151
                 lrn = ch.replace("lr","")
155
                 lrn = ch.replace("lr","")

BIN
sources/replay.pyc View File


+ 3
- 0
sources/serialguru.py View File

13
 import urllib2, urllib
13
 import urllib2, urllib
14
 import datetime, re, sys,os
14
 import datetime, re, sys,os
15
 import ConfigParser
15
 import ConfigParser
16
+import ssl
17
+ssl._create_default_https_context = ssl._create_unverified_context
18
+
16
 from SourceBase import SourceBase
19
 from SourceBase import SourceBase
17
 
20
 
18
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
21
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])

+ 1
- 2
sources/streams.cfg View File

10
 Shortcut (lattelecom.tv)|ltc::home|https://kursors.lv/wp-content/uploads/2016/07/Shortcut-logo.png|lattelecom TV, arhīves un video
10
 Shortcut (lattelecom.tv)|ltc::home|https://kursors.lv/wp-content/uploads/2016/07/Shortcut-logo.png|lattelecom TV, arhīves un video
11
 Play24.lv (Riga24TV)|play24::home|http://play24.lv/images/play24-logo-black.png|play24.lv (Riga24TV)tiešraide un arhīvs
11
 Play24.lv (Riga24TV)|play24::home|http://play24.lv/images/play24-logo-black.png|play24.lv (Riga24TV)tiešraide un arhīvs
12
 viaplay.lv|viaplay::home|https://yt3.ggpht.com/-noVdjbNR-V8/AAAAAAAAAAI/AAAAAAAAAAA/yZ9XNP5urLY/s900-c-k-no-mo-rj-c0xffffff/photo.jpg|Viaplay.lv - filmas latviešu, krievu u.c. valodās
12
 viaplay.lv|viaplay::home|https://yt3.ggpht.com/-noVdjbNR-V8/AAAAAAAAAAI/AAAAAAAAAAA/yZ9XNP5urLY/s900-c-k-no-mo-rj-c0xffffff/photo.jpg|Viaplay.lv - filmas latviešu, krievu u.c. valodās
13
-LMT straume|lmt::home|http://www.lob.lv/images/logo/lmt_straume_vert_rgb.png|LMT straume - dažādi video latviesu valodā
14
 TVDom.tv|tvdom::home|https://tvdom.tv/front/assets/images/logo.png|PBK tiešraides un arhīvs
13
 TVDom.tv|tvdom::home|https://tvdom.tv/front/assets/images/logo.png|PBK tiešraides un arhīvs
15
 BBC iPlayer|iplayer::home|http://www.userlogos.org/files/logos/inductiveload/BBC_iPlayer_logo.png|BBC live streams and arhive
14
 BBC iPlayer|iplayer::home|http://www.userlogos.org/files/logos/inductiveload/BBC_iPlayer_logo.png|BBC live streams and arhive
16
 Euronews|euronews::home|http://pbs.twimg.com/profile_images/732665354242150400/tZsCnjuh_400x400.jpg|Euronews live streams and archive
15
 Euronews|euronews::home|http://pbs.twimg.com/profile_images/732665354242150400/tZsCnjuh_400x400.jpg|Euronews live streams and archive
22
 FilmOn|filmon::home|http://behindthegloves.com/wp-content/uploads/2016/01/FilmOn-logo1.jpg|FilmOn - tiešraides un video (dažādās valodās)
21
 FilmOn|filmon::home|http://behindthegloves.com/wp-content/uploads/2016/01/FilmOn-logo1.jpg|FilmOn - tiešraides un video (dažādās valodās)
23
 MTGPlay|config::mtg|https://www.mtg.com/wp-content/uploads/2015/11/MTG-Logo-Medium-Red-PNG.png|Other countries MTG media portals content
22
 MTGPlay|config::mtg|https://www.mtg.com/wp-content/uploads/2015/11/MTG-Logo-Medium-Red-PNG.png|Other countries MTG media portals content
24
 Filmas.lv|filmas::home|https://www.filmas.lv/wp-content/uploads/2013/06/LVfilmas-logo-jauns21.png|Filmas.lv - Latvijas filmas
23
 Filmas.lv|filmas::home|https://www.filmas.lv/wp-content/uploads/2013/06/LVfilmas-logo-jauns21.png|Filmas.lv - Latvijas filmas
25
-
24
+LMT straume|lmt::home|http://www.lob.lv/images/logo/lmt_straume_vert_rgb.png|LMT straume - dažādi video latviesu valodā
26
 
25
 
27
 [my_tv]
26
 [my_tv]
28
 My Tv
27
 My Tv

+ 3
- 1
sources/tvdom.py View File

9
     import json
9
     import json
10
 except:
10
 except:
11
     import simplejson as json
11
     import simplejson as json
12
-
13
 import urllib2, urllib
12
 import urllib2, urllib
14
 import datetime, re, sys,os
13
 import datetime, re, sys,os
15
 from collections import OrderedDict
14
 from collections import OrderedDict
15
+import ssl
16
+ssl._create_default_https_context = ssl._create_unverified_context
17
+
16
 from SourceBase import SourceBase
18
 from SourceBase import SourceBase
17
 
19
 
18
 API_URL = 'http://replay.lsm.lv/'
20
 API_URL = 'http://replay.lsm.lv/'

+ 3
- 1
sources/ustvnow.py View File

9
     import json
9
     import json
10
 except:
10
 except:
11
     import simplejson as json
11
     import simplejson as json
12
-
13
 import urllib2, urllib
12
 import urllib2, urllib
14
 import datetime, re, sys,os
13
 import datetime, re, sys,os
15
 import traceback
14
 import traceback
16
 from collections import OrderedDict
15
 from collections import OrderedDict
16
+import ssl
17
+ssl._create_default_https_context = ssl._create_unverified_context
18
+
17
 from SourceBase import SourceBase
19
 from SourceBase import SourceBase
18
 
20
 
19
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
21
 headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])

+ 5
- 2
sources/viaplay.py View File

19
 import urlparse, urllib
19
 import urlparse, urllib
20
 import datetime, time,re, sys,os
20
 import datetime, time,re, sys,os
21
 from collections import OrderedDict
21
 from collections import OrderedDict
22
+import ssl
23
+ssl._create_default_https_context = ssl._create_unverified_context
24
+
22
 from SourceBase import SourceBase
25
 from SourceBase import SourceBase
23
 try:
26
 try:
24
     import util
27
     import util
415
                         #headers = {"Cookie":r2.headers["set-cookie"]}
418
                         #headers = {"Cookie":r2.headers["set-cookie"]}
416
                     #else:
419
                     #else:
417
                         #headers={}
420
                         #headers={}
418
-                    url2p=util.streamproxy_encode(url2,headers)
421
+                    #url2p=util.streamproxy_encode(url2,headers)
419
                     stream = util.item()
422
                     stream = util.item()
420
-                    stream["url"]=url2p
423
+                    stream["url"]=url2
421
                     stream["lang"]=s["lang"]
424
                     stream["lang"]=s["lang"]
422
                     stream["quality"]="%s"%(s2[1])
425
                     stream["quality"]="%s"%(s2[1])
423
                     stream["name"]= title
426
                     stream["name"]= title

BIN
sources/viaplay.pyc View File


+ 39
- 9
util.py View File

195
 EQ_CODE = urllib.quote("=")
195
 EQ_CODE = urllib.quote("=")
196
 COL_CODE = urllib.quote(":")
196
 COL_CODE = urllib.quote(":")
197
 SPACE_CODE = urllib.quote(" ")
197
 SPACE_CODE = urllib.quote(" ")
198
-PROXY_URL = "http://localhost:88/"
199
-
200
 
198
 
201
 def make_fname(title):
199
 def make_fname(title):
202
     "Make file name from title"
200
     "Make file name from title"
205
     fname0 = re.sub("['""]","",fname0)
203
     fname0 = re.sub("['""]","",fname0)
206
     return fname0
204
     return fname0
207
 
205
 
208
-
209
 def hls_base(url):
206
 def hls_base(url):
210
     url2 = url.split("?")[0]
207
     url2 = url.split("?")[0]
211
     url2 = "/".join(url2.split("/")[0:-1])+ "/"
208
     url2 = "/".join(url2.split("/")[0:-1])+ "/"
214
 def stream_change(stream):
211
 def stream_change(stream):
215
     #return stream # TODO
212
     #return stream # TODO
216
     if "resolver" in stream and stream["resolver"] in ("viaplay","hqq","filmas") or \
213
     if "resolver" in stream and stream["resolver"] in ("viaplay","hqq","filmas") or \
217
-        "surl" in stream and re.search("https*://(hqq|goo.\gl)",stream["surl"]):
214
+        "surl" in stream and re.search("https*://(hqq|goo\.gl)",stream["surl"]):
218
         stream["url"] = streamproxy_encode(stream["url"],stream["headers"])
215
         stream["url"] = streamproxy_encode(stream["url"],stream["headers"])
219
         stream["headers"] = {}
216
         stream["headers"] = {}
220
         return stream
217
         return stream
221
     else:
218
     else:
222
         return stream
219
         return stream
223
 
220
 
224
-def streamproxy_encode(url,headers=[]):
221
+def streamproxy_encode(url,headers=[],proxy_url=None):
222
+    PROXY_URL = "http://localhost:8880/"
225
     if not "?" in url:
223
     if not "?" in url:
226
         url = url+"?"
224
         url = url+"?"
227
     url2 = url.replace(SPLIT_CHAR,SPLIT_CODE).replace(":",COL_CODE).replace(" ",SPACE_CODE)
225
     url2 = url.replace(SPLIT_CHAR,SPLIT_CODE).replace(":",COL_CODE).replace(" ",SPACE_CODE)
228
-    url2 = PROXY_URL + url2
226
+    if not proxy_url:
227
+        proxy_url = PROXY_URL
228
+    url2 = proxy_url + url2
229
     if headers:
229
     if headers:
230
         headers2 = []
230
         headers2 = []
231
         for h in headers:
231
         for h in headers:
232
             headers2.append("%s=%s"%(h,headers[h].replace("=",EQ_CODE).replace(SPLIT_CHAR,SPLIT_CODE).replace(" ",SPACE_CODE)))
232
             headers2.append("%s=%s"%(h,headers[h].replace("=",EQ_CODE).replace(SPLIT_CHAR,SPLIT_CODE).replace(" ",SPACE_CODE)))
233
         headers2 = SPLIT_CHAR.join(headers2)
233
         headers2 = SPLIT_CHAR.join(headers2)
234
         url2 = url2+SPLIT_CHAR+headers2
234
         url2 = url2+SPLIT_CHAR+headers2
235
+    #return url2.encode("utf8") if isinstance(url2,unicode) else url2
235
     return url2
236
     return url2
236
 
237
 
237
 def streamproxy_decode(urlp):
238
 def streamproxy_decode(urlp):
247
             headers[h.split("=")[0]]=urllib.unquote(h.split("=")[1])
248
             headers[h.split("=")[0]]=urllib.unquote(h.split("=")[1])
248
     return url,headers
249
     return url,headers
249
 
250
 
251
+def streamproxy_encode2(url,headers=[],proxy_url=None):
252
+    PROXY_URL = "http://localhost:8880/"
253
+    #url2 = url.replace(SPLIT_CHAR,SPLIT_CODE).replace(":",COL_CODE).replace(" ",SPACE_CODE)
254
+    url2 = urllib.quote_plus(url)
255
+    if not proxy_url:
256
+        proxy_url = PROXY_URL
257
+    url2 = proxy_url + url2+"/?"
258
+    if headers:
259
+        headers2 = []
260
+        for h in headers:
261
+            headers2.append("%s=%s"%(h,headers[h].replace("=",EQ_CODE).replace(SPLIT_CHAR,SPLIT_CODE).replace(" ",SPACE_CODE)))
262
+        headers2 = SPLIT_CHAR.join(headers2)
263
+        url2 = url2+SPLIT_CHAR+headers2
264
+    return url2
265
+
266
+def streamproxy_decode2(urlp):
267
+    path = urlp.replace(re.search("http://[^/]+",urlp).group(0),"")
268
+    p = path.split(SPLIT_CHAR)
269
+    url = urllib.unquote_plus(p[0][1:-2])
270
+    #headers = {"User-Agent":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C71 Safari/601.1.46"}
271
+    headers={}
272
+    if len(p)>1:
273
+        for h in p[1:]:
274
+            #h = urllib.unquote()
275
+            headers[h.split("=")[0]]=urllib.unquote(h.split("=")[1])
276
+    return url,headers
277
+
250
 class Captions(object):
278
 class Captions(object):
251
     def __init__(self,uri):
279
     def __init__(self,uri):
252
         self.uri = uri
280
         self.uri = uri
687
     url = "http://str1e.lattelecom.tv/mobile-vod/mp4:sf_fantastic_beasts_and_where_to_find_them_en_hd.mp4/playlist.m3u8?resource_id=fantastic_beasts_and_where_to_find_them&auth_token=6NAvMFDG+rYTAc4hb5JeL2bmsaRR7bAE23M6KDmhKYOGyXoo0gDpJUE9scYy+nQmfbgk03cWMe9MuXWSH1GqwolEk2jOQ/8Mrg7tOdbwrA8zM7nmkfCZPqQkwajZN4mfSJQVKHqXqJ8="
715
     url = "http://str1e.lattelecom.tv/mobile-vod/mp4:sf_fantastic_beasts_and_where_to_find_them_en_hd.mp4/playlist.m3u8?resource_id=fantastic_beasts_and_where_to_find_them&auth_token=6NAvMFDG+rYTAc4hb5JeL2bmsaRR7bAE23M6KDmhKYOGyXoo0gDpJUE9scYy+nQmfbgk03cWMe9MuXWSH1GqwolEk2jOQ/8Mrg7tOdbwrA8zM7nmkfCZPqQkwajZN4mfSJQVKHqXqJ8="
688
     headers={}
716
     headers={}
689
     print url
717
     print url
690
-    urlp = streamproxy_encode(url,headers)
718
+    url = "replay::tiesraide/ltv1/"
719
+    url = "ltc::content/live-streams/103?include=quality"
720
+    urlp = streamproxy_encode2(url,headers)
691
     print urlp
721
     print urlp
692
-    url2,headers2 = streamproxy_decode(urlp)
693
-    #print url2 - 2
722
+    url2,headers2 = streamproxy_decode2(urlp)
723
+    print url2
694
     player(urlp)
724
     player(urlp)
695
     pass
725
     pass
696
 
726