Explorar el Código

playstreamproxy2 (WSGI)

ltc2
Ivars hace 5 años
padre
commit
390409a769
Se han modificado 17 ficheros con 11003 adiciones y 707 borrados
  1. 4418
    0
      bottle.py
  2. 287
    0
      daemonize.py
  3. 36
    0
      diskcache/__init__.py
  4. 1
    0
      diskcache/cli.py
  5. 1947
    0
      diskcache/core.py
  6. 595
    0
      diskcache/fanout.py
  7. 105
    0
      diskcache/memo.py
  8. 1348
    0
      diskcache/persistent.py
  9. 78
    0
      diskcache/stampede.py
  10. 60
    0
      mtwsgi.py
  11. 45
    9
      playstreamproxy.py
  12. 152
    0
      playstreamproxy2.py
  13. 633
    695
      project.wpr
  14. 1270
    0
      sources/ltc2.py
  15. 1
    0
      sources/streams.cfg
  16. 23
    0
      test_url.py
  17. 4
    3
      util.py

+ 4418
- 0
bottle.py
La diferencia del archivo ha sido suprimido porque es demasiado grande
Ver fichero


+ 287
- 0
daemonize.py Ver fichero

1
+#!/bin/env python
2
+'''
3
+***
4
+Modified generic daemon class
5
+***
6
+
7
+Author:
8
+                https://web.archive.org/web/20160305151936/http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
9
+
10
+Modified by ivars777@gmail.com
11
+
12
+License:        http://creativecommons.org/licenses/by-sa/3.0/
13
+
14
+'''
15
+
16
+# Core modules
17
+from __future__ import print_function
18
+import atexit
19
+import errno
20
+import os
21
+import sys
22
+import time
23
+import signal
24
+
25
+
26
+class Daemon(object):
27
+    """
28
+    A generic daemon class.
29
+
30
+    Usage: subclass the Daemon class and override the run() method
31
+    """
32
+    def __init__(self, app, pidfile, stdin=os.devnull,
33
+                 stdout=os.devnull, stderr=os.devnull,
34
+                 home_dir='.', umask=0o22, verbose=1,
35
+                 use_gevent=False, use_eventlet=False):
36
+        self.app = app
37
+        self.stdin = stdin
38
+        self.stdout = stdout
39
+        self.stderr = stderr
40
+        self.pidfile = pidfile
41
+        self.home_dir = home_dir
42
+        self.verbose = verbose
43
+        self.umask = umask
44
+        self.daemon_alive = True
45
+        self.use_gevent = use_gevent
46
+        self.use_eventlet = use_eventlet
47
+
48
+    def log(self, *args):
49
+        if self.verbose >= 1:
50
+            print(*args)
51
+
52
+    def daemonize(self):
53
+        """
54
+        Do the UNIX double-fork magic, see Stevens' "Advanced
55
+        Programming in the UNIX Environment" for details (ISBN 0201563177)
56
+        http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
57
+        """
58
+        if self.use_eventlet:
59
+            import eventlet.tpool
60
+            eventlet.tpool.killall()
61
+        try:
62
+            pid = os.fork()
63
+            if pid > 0:
64
+                # Exit first parent
65
+                sys.exit(0)
66
+        except OSError as e:
67
+            sys.stderr.write(
68
+                "fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
69
+            sys.exit(1)
70
+
71
+        # Decouple from parent environment
72
+        os.chdir(self.home_dir)
73
+        os.setsid()
74
+        os.umask(self.umask)
75
+
76
+        # Do second fork
77
+        try:
78
+            pid = os.fork()
79
+            if pid > 0:
80
+                # Exit from second parent
81
+                sys.exit(0)
82
+        except OSError as e:
83
+            sys.stderr.write(
84
+                "fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
85
+            sys.exit(1)
86
+
87
+        if sys.platform != 'darwin':  # This block breaks on OS X
88
+            # Redirect standard file descriptors
89
+            sys.stdout.flush()
90
+            sys.stderr.flush()
91
+            si = open(self.stdin, 'r')
92
+            so = open(self.stdout, 'a+')
93
+            if self.stderr:
94
+                try:
95
+                    se = open(self.stderr, 'a+', 0)
96
+                except ValueError:
97
+                    # Python 3 can't have unbuffered text I/O
98
+                    se = open(self.stderr, 'a+', 1)
99
+            else:
100
+                se = so
101
+            os.dup2(si.fileno(), sys.stdin.fileno())
102
+            os.dup2(so.fileno(), sys.stdout.fileno())
103
+            os.dup2(se.fileno(), sys.stderr.fileno())
104
+
105
+        def sigtermhandler(signum, frame):
106
+            self.daemon_alive = False
107
+            sys.exit()
108
+
109
+        if self.use_gevent:
110
+            import gevent
111
+            gevent.reinit()
112
+            gevent.signal(signal.SIGTERM, sigtermhandler, signal.SIGTERM, None)
113
+            gevent.signal(signal.SIGINT, sigtermhandler, signal.SIGINT, None)
114
+        else:
115
+            signal.signal(signal.SIGTERM, sigtermhandler)
116
+            signal.signal(signal.SIGINT, sigtermhandler)
117
+
118
+        self.log("Started")
119
+
120
+        # Write pidfile
121
+        atexit.register(
122
+            self.delpid)  # Make sure pid file is removed if we quit
123
+        pid = str(os.getpid())
124
+        open(self.pidfile, 'w+').write("%s\n" % pid)
125
+
126
+    def delpid(self):
127
+        try:
128
+            # the process may fork itself again
129
+            pid = int(open(self.pidfile, 'r').read().strip())
130
+            if pid == os.getpid():
131
+                os.remove(self.pidfile)
132
+        except OSError as e:
133
+            if e.errno == errno.ENOENT:
134
+                pass
135
+            else:
136
+                raise
137
+
138
+    def start(self, *args, **kwargs):
139
+        """
140
+        Start the daemon
141
+        """
142
+
143
+        self.log("Starting...")
144
+
145
+        # Check for a pidfile to see if the daemon already runs
146
+        try:
147
+            pf = open(self.pidfile, 'r')
148
+            pid = int(pf.read().strip())
149
+            pf.close()
150
+        except IOError:
151
+            pid = None
152
+        except SystemExit:
153
+            pid = None
154
+
155
+        if pid:
156
+            message = "pidfile %s already exists. Is it already running?\n"
157
+            sys.stderr.write(message % self.pidfile)
158
+            sys.exit(1)
159
+
160
+        # Start the daemon
161
+        self.daemonize()
162
+        self.run(*args, **kwargs)
163
+
164
+    def stop(self):
165
+        """
166
+        Stop the daemon
167
+        """
168
+
169
+        if self.verbose >= 1:
170
+            self.log("Stopping...")
171
+
172
+        # Get the pid from the pidfile
173
+        pid = self.get_pid()
174
+
175
+        if not pid:
176
+            message = "pidfile %s does not exist. Not running?\n"
177
+            sys.stderr.write(message % self.pidfile)
178
+
179
+            # Just to be sure. A ValueError might occur if the PID file is
180
+            # empty but does actually exist
181
+            if os.path.exists(self.pidfile):
182
+                os.remove(self.pidfile)
183
+
184
+            return  # Not an error in a restart
185
+
186
+        # Try killing the daemon process
187
+        try:
188
+            i = 0
189
+            while 1:
190
+                os.kill(pid, signal.SIGTERM)
191
+                time.sleep(0.1)
192
+                i = i + 1
193
+                if i % 10 == 0:
194
+                    os.kill(pid, signal.SIGHUP)
195
+        except OSError as err:
196
+            if err.errno == errno.ESRCH:
197
+                if os.path.exists(self.pidfile):
198
+                    os.remove(self.pidfile)
199
+            else:
200
+                print(str(err))
201
+                sys.exit(1)
202
+
203
+        self.log("Stopped")
204
+
205
+    def restart(self):
206
+        """
207
+        Restart the daemon
208
+        """
209
+        self.stop()
210
+        self.start()
211
+
212
+    def get_pid(self):
213
+        try:
214
+            pf = open(self.pidfile, 'r')
215
+            pid = int(pf.read().strip())
216
+            pf.close()
217
+        except IOError:
218
+            pid = None
219
+        except SystemExit:
220
+            pid = None
221
+        return pid
222
+
223
+    def is_running(self):
224
+        pid = self.get_pid()
225
+
226
+        if pid is None:
227
+            self.log('Process is stopped')
228
+            return False
229
+        elif os.path.exists('/proc/%d' % pid):
230
+            self.log('Process (pid %d) is running...' % pid)
231
+            return True
232
+        else:
233
+            self.log('Process (pid %d) is killed' % pid)
234
+            return False
235
+
236
+    def run(self, *args, **kwargs):
237
+        """
238
+        Running app
239
+        """
240
+        self.app(*args, **kwargs)
241
+        #self.log("Starting foreground...")
242
+
243
+
244
+def main(cmd):
245
+    from datetime import datetime
246
+    from time import sleep
247
+    import subprocess
248
+    print("Starting cmd: ")
249
+    print(str(cmd))
250
+    subprocess.call(cmd)
251
+    #while True:
252
+    #    print(str(datetime.now()))
253
+    #    sleep(5)
254
+
255
+#def main2(cmd):
256
+#    from bottle import route, run
257
+
258
+#    @route('/')
259
+#    def index():
260
+#        return '<b>Hello </b>!'
261
+
262
+#    run(host='localhost', port=8080)
263
+
264
+
265
+if __name__ == "__main__":
266
+    if len(sys.argv) > 1:
267
+        cmd=sys.argv[2:]
268
+        app = cmd[0] if cmd else "daemonize2"
269
+        pid = "/var/run/%s.pid"%app
270
+        daemon = Daemon(main,pid)
271
+        if "start" == sys.argv[1]:
272
+            daemon.start(cmd)
273
+        elif "stop" == sys.argv[1]:
274
+            daemon.stop()
275
+        elif "restart" == sys.argv[1]:
276
+            daemon.restart()
277
+        elif "manualstart" == sys.argv[1]:
278
+            daemon.run(cmd)
279
+        elif "status" == sys.argv[1]:
280
+            daemon.is_running()
281
+        else:
282
+            print("Unknown command")
283
+            sys.exit(2)
284
+        sys.exit(0)
285
+    else:
286
+        print("usage: %s start|stop|restart|manualstart" % sys.argv[0])
287
+        sys.exit(2)

+ 36
- 0
diskcache/__init__.py Ver fichero

1
+"DiskCache: disk and file backed cache."
2
+
3
+from .core import Cache, Disk, UnknownFileWarning, EmptyDirWarning, Timeout
4
+from .core import DEFAULT_SETTINGS, ENOVAL, EVICTION_POLICY, UNKNOWN
5
+from .fanout import FanoutCache
6
+from .persistent import Deque, Index
7
+
8
+__all__ = [
9
+    'Cache',
10
+    'Disk',
11
+    'UnknownFileWarning',
12
+    'EmptyDirWarning',
13
+    'Timeout',
14
+    'DEFAULT_SETTINGS',
15
+    'ENOVAL',
16
+    'EVICTION_POLICY',
17
+    'UNKNOWN',
18
+    'FanoutCache',
19
+    'Deque',
20
+    'Index',
21
+]
22
+
23
+try:
24
+    from .djangocache import DjangoCache  # pylint: disable=wrong-import-position
25
+    __all__.append('DjangoCache')
26
+except Exception:  # pylint: disable=broad-except
27
+    # Django not installed or not setup so ignore.
28
+    pass
29
+
30
+
31
+__title__ = 'diskcache'
32
+__version__ = '3.1.1'
33
+__build__ = 0x030101
34
+__author__ = 'Grant Jenks'
35
+__license__ = 'Apache 2.0'
36
+__copyright__ = 'Copyright 2016-2018 Grant Jenks'

+ 1
- 0
diskcache/cli.py Ver fichero

1
+"Command line interface to disk cache."

+ 1947
- 0
diskcache/core.py
La diferencia del archivo ha sido suprimido porque es demasiado grande
Ver fichero


+ 595
- 0
diskcache/fanout.py Ver fichero

1
+"Fanout cache automatically shards keys and values."
2
+
3
+import itertools as it
4
+import os.path as op
5
+import sqlite3
6
+import time
7
+
8
+from .core import ENOVAL, DEFAULT_SETTINGS, Cache, Disk, Timeout
9
+from .memo import memoize
10
+from .persistent import Deque, Index
11
+
12
+
13
+class FanoutCache(object):
14
+    "Cache that shards keys and values."
15
+    def __init__(self, directory, shards=8, timeout=0.010, disk=Disk,
16
+                 **settings):
17
+        """Initialize cache instance.
18
+
19
+        :param str directory: cache directory
20
+        :param int shards: number of shards to distribute writes
21
+        :param float timeout: SQLite connection timeout
22
+        :param disk: `Disk` instance for serialization
23
+        :param settings: any of `DEFAULT_SETTINGS`
24
+
25
+        """
26
+        self._directory = directory
27
+        self._count = shards
28
+        default_size_limit = DEFAULT_SETTINGS['size_limit']
29
+        size_limit = settings.pop('size_limit', default_size_limit) / shards
30
+        self._shards = tuple(
31
+            Cache(
32
+                op.join(directory, '%03d' % num),
33
+                timeout=timeout,
34
+                disk=disk,
35
+                size_limit=size_limit,
36
+                **settings
37
+            )
38
+            for num in range(shards)
39
+        )
40
+        self._hash = self._shards[0].disk.hash
41
+        self._deques = {}
42
+        self._indexes = {}
43
+
44
+
45
+    @property
46
+    def directory(self):
47
+        """Cache directory."""
48
+        return self._directory
49
+
50
+
51
+    def __getattr__(self, name):
52
+        return getattr(self._shards[0], name)
53
+
54
+
55
+    def set(self, key, value, expire=None, read=False, tag=None, retry=False):
56
+        """Set `key` and `value` item in cache.
57
+
58
+        When `read` is `True`, `value` should be a file-like object opened
59
+        for reading in binary mode.
60
+
61
+        If database timeout occurs then fails silently unless `retry` is set to
62
+        `True` (default `False`).
63
+
64
+        :param key: key for item
65
+        :param value: value for item
66
+        :param float expire: seconds until the key expires
67
+            (default None, no expiry)
68
+        :param bool read: read value as raw bytes from file (default False)
69
+        :param str tag: text to associate with key (default None)
70
+        :param bool retry: retry if database timeout expires (default False)
71
+        :return: True if item was set
72
+
73
+        """
74
+        index = self._hash(key) % self._count
75
+        set_func = self._shards[index].set
76
+
77
+        while True:
78
+            try:
79
+                return set_func(key, value, expire, read, tag)
80
+            except Timeout:
81
+                if retry:
82
+                    continue
83
+                else:
84
+                    return False
85
+
86
+
87
+    def __setitem__(self, key, value):
88
+        """Set `key` and `value` item in cache.
89
+
90
+        Calls :func:`FanoutCache.set` internally with `retry` set to `True`.
91
+
92
+        :param key: key for item
93
+        :param value: value for item
94
+
95
+        """
96
+        self.set(key, value, retry=True)
97
+
98
+
99
+    def add(self, key, value, expire=None, read=False, tag=None, retry=False):
100
+        """Add `key` and `value` item to cache.
101
+
102
+        Similar to `set`, but only add to cache if key not present.
103
+
104
+        This operation is atomic. Only one concurrent add operation for given
105
+        key from separate threads or processes will succeed.
106
+
107
+        When `read` is `True`, `value` should be a file-like object opened
108
+        for reading in binary mode.
109
+
110
+        :param key: key for item
111
+        :param value: value for item
112
+        :param float expire: seconds until the key expires
113
+            (default None, no expiry)
114
+        :param bool read: read value as bytes from file (default False)
115
+        :param str tag: text to associate with key (default None)
116
+        :param bool retry: retry if database timeout expires (default False)
117
+        :return: True if item was added
118
+
119
+        """
120
+        index = self._hash(key) % self._count
121
+        add_func = self._shards[index].add
122
+
123
+        while True:
124
+            try:
125
+                return add_func(key, value, expire, read, tag)
126
+            except Timeout:
127
+                if retry:
128
+                    continue
129
+                else:
130
+                    return False
131
+
132
+
133
+    def incr(self, key, delta=1, default=0, retry=False):
134
+        """Increment value by delta for item with key.
135
+
136
+        If key is missing and default is None then raise KeyError. Else if key
137
+        is missing and default is not None then use default for value.
138
+
139
+        Operation is atomic. All concurrent increment operations will be
140
+        counted individually.
141
+
142
+        Assumes value may be stored in a SQLite column. Most builds that target
143
+        machines with 64-bit pointer widths will support 64-bit signed
144
+        integers.
145
+
146
+        :param key: key for item
147
+        :param int delta: amount to increment (default 1)
148
+        :param int default: value if key is missing (default 0)
149
+        :param bool retry: retry if database timeout expires (default False)
150
+        :return: new value for item on success else None
151
+        :raises KeyError: if key is not found and default is None
152
+
153
+        """
154
+        index = self._hash(key) % self._count
155
+        incr_func = self._shards[index].incr
156
+
157
+        while True:
158
+            try:
159
+                return incr_func(key, delta, default)
160
+            except Timeout:
161
+                if retry:
162
+                    continue
163
+                else:
164
+                    return None
165
+
166
+
167
+    def decr(self, key, delta=1, default=0, retry=False):
168
+        """Decrement value by delta for item with key.
169
+
170
+        If key is missing and default is None then raise KeyError. Else if key
171
+        is missing and default is not None then use default for value.
172
+
173
+        Operation is atomic. All concurrent decrement operations will be
174
+        counted individually.
175
+
176
+        Unlike Memcached, negative values are supported. Value may be
177
+        decremented below zero.
178
+
179
+        Assumes value may be stored in a SQLite column. Most builds that target
180
+        machines with 64-bit pointer widths will support 64-bit signed
181
+        integers.
182
+
183
+        :param key: key for item
184
+        :param int delta: amount to decrement (default 1)
185
+        :param int default: value if key is missing (default 0)
186
+        :param bool retry: retry if database timeout expires (default False)
187
+        :return: new value for item on success else None
188
+        :raises KeyError: if key is not found and default is None
189
+
190
+        """
191
+        return self.incr(key, -delta, default, retry)
192
+
193
+
194
+    def get(self, key, default=None, read=False, expire_time=False, tag=False,
195
+            retry=False):
196
+        """Retrieve value from cache. If `key` is missing, return `default`.
197
+
198
+        If database timeout occurs then returns `default` unless `retry` is set
199
+        to `True` (default `False`).
200
+
201
+        :param key: key for item
202
+        :param default: return value if key is missing (default None)
203
+        :param bool read: if True, return file handle to value
204
+            (default False)
205
+        :param float expire_time: if True, return expire_time in tuple
206
+            (default False)
207
+        :param tag: if True, return tag in tuple (default False)
208
+        :param bool retry: retry if database timeout expires (default False)
209
+        :return: value for item if key is found else default
210
+
211
+        """
212
+        index = self._hash(key) % self._count
213
+        get_func = self._shards[index].get
214
+
215
+        while True:
216
+            try:
217
+                return get_func(
218
+                    key, default=default, read=read, expire_time=expire_time,
219
+                    tag=tag,
220
+                )
221
+            except (Timeout, sqlite3.OperationalError):
222
+                if retry:
223
+                    continue
224
+                else:
225
+                    return default
226
+
227
+
228
+    def __getitem__(self, key):
229
+        """Return corresponding value for `key` from cache.
230
+
231
+        Calls :func:`FanoutCache.get` internally with `retry` set to `True`.
232
+
233
+        :param key: key for item
234
+        :return: value for item
235
+        :raises KeyError: if key is not found
236
+
237
+        """
238
+        value = self.get(key, default=ENOVAL, retry=True)
239
+
240
+        if value is ENOVAL:
241
+            raise KeyError(key)
242
+
243
+        return value
244
+
245
+
246
+    def read(self, key):
247
+        """Return file handle corresponding to `key` from cache.
248
+
249
+        :param key: key for item
250
+        :return: file open for reading in binary mode
251
+        :raises KeyError: if key is not found
252
+
253
+        """
254
+        handle = self.get(key, default=ENOVAL, read=True, retry=True)
255
+        if handle is ENOVAL:
256
+            raise KeyError(key)
257
+        return handle
258
+
259
+
260
+    def __contains__(self, key):
261
+        """Return `True` if `key` matching item is found in cache.
262
+
263
+        :param key: key for item
264
+        :return: True if key is found
265
+
266
+        """
267
+        index = self._hash(key) % self._count
268
+        return key in self._shards[index]
269
+
270
+
271
+    def pop(self, key, default=None, expire_time=False, tag=False,
272
+            retry=False):
273
+        """Remove corresponding item for `key` from cache and return value.
274
+
275
+        If `key` is missing, return `default`.
276
+
277
+        Operation is atomic. Concurrent operations will be serialized.
278
+
279
+        :param key: key for item
280
+        :param default: return value if key is missing (default None)
281
+        :param float expire_time: if True, return expire_time in tuple
282
+            (default False)
283
+        :param tag: if True, return tag in tuple (default False)
284
+        :param bool retry: retry if database timeout expires (default False)
285
+        :return: value for item if key is found else default
286
+
287
+        """
288
+        index = self._hash(key) % self._count
289
+        pop_func = self._shards[index].pop
290
+
291
+        while True:
292
+            try:
293
+                return pop_func(
294
+                    key, default=default, expire_time=expire_time, tag=tag,
295
+                )
296
+            except Timeout:
297
+                if retry:
298
+                    continue
299
+                else:
300
+                    return default
301
+
302
+
303
+    def delete(self, key, retry=False):
304
+        """Delete corresponding item for `key` from cache.
305
+
306
+        Missing keys are ignored.
307
+
308
+        If database timeout occurs then fails silently unless `retry` is set to
309
+        `True` (default `False`).
310
+
311
+        :param key: key for item
312
+        :param bool retry: retry if database timeout expires (default False)
313
+        :return: True if item was deleted
314
+
315
+        """
316
+        index = self._hash(key) % self._count
317
+        del_func = self._shards[index].__delitem__
318
+
319
+        while True:
320
+            try:
321
+                return del_func(key)
322
+            except Timeout:
323
+                if retry:
324
+                    continue
325
+                else:
326
+                    return False
327
+            except KeyError:
328
+                return False
329
+
330
+
331
+    def __delitem__(self, key):
332
+        """Delete corresponding item for `key` from cache.
333
+
334
+        Calls :func:`FanoutCache.delete` internally with `retry` set to `True`.
335
+
336
+        :param key: key for item
337
+        :raises KeyError: if key is not found
338
+
339
+        """
340
+        deleted = self.delete(key, retry=True)
341
+
342
+        if not deleted:
343
+            raise KeyError(key)
344
+
345
+
346
+    memoize = memoize
347
+
348
+
349
+    def check(self, fix=False):
350
+        """Check database and file system consistency.
351
+
352
+        Intended for use in testing and post-mortem error analysis.
353
+
354
+        While checking the cache table for consistency, a writer lock is held
355
+        on the database. The lock blocks other cache clients from writing to
356
+        the database. For caches with many file references, the lock may be
357
+        held for a long time. For example, local benchmarking shows that a
358
+        cache with 1,000 file references takes ~60ms to check.
359
+
360
+        :param bool fix: correct inconsistencies
361
+        :return: list of warnings
362
+        :raises Timeout: if database timeout expires
363
+
364
+        """
365
+        return sum((shard.check(fix=fix) for shard in self._shards), [])
366
+
367
+
368
+    def expire(self):
369
+        """Remove expired items from cache.
370
+
371
+        :return: count of items removed
372
+
373
+        """
374
+        return self._remove('expire', args=(time.time(),))
375
+
376
+
377
+    def create_tag_index(self):
378
+        """Create tag index on cache database.
379
+
380
+        It is better to initialize cache with `tag_index=True` than use this.
381
+
382
+        :raises Timeout: if database timeout expires
383
+
384
+        """
385
+        for shard in self._shards:
386
+            shard.create_tag_index()
387
+
388
+
389
+    def drop_tag_index(self):
390
+        """Drop tag index on cache database.
391
+
392
+        :raises Timeout: if database timeout expires
393
+
394
+        """
395
+        for shard in self._shards:
396
+            shard.drop_tag_index()
397
+
398
+
399
+    def evict(self, tag):
400
+        """Remove items with matching `tag` from cache.
401
+
402
+        :param str tag: tag identifying items
403
+        :return: count of items removed
404
+
405
+        """
406
+        return self._remove('evict', args=(tag,))
407
+
408
+
409
+    def cull(self):
410
+        """Cull items from cache until volume is less than size limit.
411
+
412
+        :return: count of items removed
413
+
414
+        """
415
+        return self._remove('cull')
416
+
417
+
418
+    def clear(self):
419
+        """Remove all items from cache.
420
+
421
+        :return: count of items removed
422
+
423
+        """
424
+        return self._remove('clear')
425
+
426
+
427
+    def _remove(self, name, args=()):
428
+        total = 0
429
+        for shard in self._shards:
430
+            method = getattr(shard, name)
431
+            while True:
432
+                try:
433
+                    count = method(*args)
434
+                    total += count
435
+                except Timeout as timeout:
436
+                    total += timeout.args[0]
437
+                else:
438
+                    break
439
+        return total
440
+
441
+
442
+    def stats(self, enable=True, reset=False):
443
+        """Return cache statistics hits and misses.
444
+
445
+        :param bool enable: enable collecting statistics (default True)
446
+        :param bool reset: reset hits and misses to 0 (default False)
447
+        :return: (hits, misses)
448
+
449
+        """
450
+        results = [shard.stats(enable, reset) for shard in self._shards]
451
+        return (sum(result[0] for result in results),
452
+                sum(result[1] for result in results))
453
+
454
+
455
+    def volume(self):
456
+        """Return estimated total size of cache on disk.
457
+
458
+        :return: size in bytes
459
+
460
+        """
461
+        return sum(shard.volume() for shard in self._shards)
462
+
463
+
464
+    def close(self):
465
+        "Close database connection."
466
+        for shard in self._shards:
467
+            shard.close()
468
+        self._deques.clear()
469
+        self._indexes.clear()
470
+
471
+
472
+    def __enter__(self):
473
+        return self
474
+
475
+
476
+    def __exit__(self, *exception):
477
+        self.close()
478
+
479
+
480
+    def __getstate__(self):
481
+        return (self._directory, self._count, self.timeout, type(self.disk))
482
+
483
+
484
+    def __setstate__(self, state):
485
+        self.__init__(*state)
486
+
487
+
488
+    def __iter__(self):
489
+        "Iterate keys in cache including expired items."
490
+        iterators = [iter(shard) for shard in self._shards]
491
+        return it.chain.from_iterable(iterators)
492
+
493
+
494
+    def __reversed__(self):
495
+        "Reverse iterate keys in cache including expired items."
496
+        iterators = [reversed(shard) for shard in self._shards]
497
+        return it.chain.from_iterable(reversed(iterators))
498
+
499
+
500
+    def __len__(self):
501
+        "Count of items in cache including expired items."
502
+        return sum(len(shard) for shard in self._shards)
503
+
504
+
505
+    def reset(self, key, value=ENOVAL):
506
+        """Reset `key` and `value` item from Settings table.
507
+
508
+        If `value` is not given, it is reloaded from the Settings
509
+        table. Otherwise, the Settings table is updated.
510
+
511
+        Settings attributes on cache objects are lazy-loaded and
512
+        read-only. Use `reset` to update the value.
513
+
514
+        Settings with the ``sqlite_`` prefix correspond to SQLite
515
+        pragmas. Updating the value will execute the corresponding PRAGMA
516
+        statement.
517
+
518
+        :param str key: Settings key for item
519
+        :param value: value for item (optional)
520
+        :return: updated value for item
521
+        :raises Timeout: if database timeout expires
522
+
523
+        """
524
+        for shard in self._shards:
525
+            while True:
526
+                try:
527
+                    result = shard.reset(key, value)
528
+                except Timeout:
529
+                    pass
530
+                else:
531
+                    break
532
+        return result
533
+
534
+
535
+    def deque(self, name):
536
+        """Return Deque with given `name` in subdirectory.
537
+
538
+        >>> cache = FanoutCache('/tmp/diskcache/fanoutcache')
539
+        >>> deque = cache.deque('test')
540
+        >>> deque.clear()
541
+        >>> deque.extend('abc')
542
+        >>> deque.popleft()
543
+        'a'
544
+        >>> deque.pop()
545
+        'c'
546
+        >>> len(deque)
547
+        1
548
+
549
+        :param str name: subdirectory name for Deque
550
+        :return: Deque with given name
551
+
552
+        """
553
+        _deques = self._deques
554
+
555
+        try:
556
+            return _deques[name]
557
+        except KeyError:
558
+            parts = name.split('/')
559
+            directory = op.join(self._directory, 'deque', *parts)
560
+            temp = Deque(directory=directory)
561
+            _deques[name] = temp
562
+            return temp
563
+
564
+
565
+    def index(self, name):
566
+        """Return Index with given `name` in subdirectory.
567
+
568
+        >>> cache = FanoutCache('/tmp/diskcache/fanoutcache')
569
+        >>> index = cache.index('test')
570
+        >>> index.clear()
571
+        >>> index['abc'] = 123
572
+        >>> index['def'] = 456
573
+        >>> index['ghi'] = 789
574
+        >>> index.popitem()
575
+        ('ghi', 789)
576
+        >>> del index['abc']
577
+        >>> len(index)
578
+        1
579
+        >>> index['def']
580
+        456
581
+
582
+        :param str name: subdirectory name for Index
583
+        :return: Index with given name
584
+
585
+        """
586
+        _indexes = self._indexes
587
+
588
+        try:
589
+            return _indexes[name]
590
+        except KeyError:
591
+            parts = name.split('/')
592
+            directory = op.join(self._directory, 'index', *parts)
593
+            temp = Index(directory)
594
+            _indexes[name] = temp
595
+            return temp

+ 105
- 0
diskcache/memo.py Ver fichero

1
+"""Memoization utilities.
2
+
3
+"""
4
+
5
+from functools import wraps
6
+
7
+from .core import ENOVAL
8
+
9
+def memoize(cache, name=None, typed=False, expire=None, tag=None):
10
+    """Memoizing cache decorator.
11
+
12
+    Decorator to wrap callable with memoizing function using cache. Repeated
13
+    calls with the same arguments will lookup result in cache and avoid
14
+    function evaluation.
15
+
16
+    If name is set to None (default), the callable name will be determined
17
+    automatically.
18
+
19
+    If typed is set to True, function arguments of different types will be
20
+    cached separately. For example, f(3) and f(3.0) will be treated as distinct
21
+    calls with distinct results.
22
+
23
+    The original underlying function is accessible through the __wrapped__
24
+    attribute. This is useful for introspection, for bypassing the cache, or
25
+    for rewrapping the function with a different cache.
26
+
27
+    >>> from diskcache import FanoutCache
28
+    >>> cache = FanoutCache('/tmp/diskcache/fanoutcache')
29
+    >>> @cache.memoize(typed=True, expire=1, tag='fib')
30
+    ... def fibonacci(number):
31
+    ...     if number == 0:
32
+    ...         return 0
33
+    ...     elif number == 1:
34
+    ...         return 1
35
+    ...     else:
36
+    ...         return fibonacci(number - 1) + fibonacci(number - 2)
37
+    >>> print(sum(fibonacci(number=value) for value in range(100)))
38
+    573147844013817084100
39
+
40
+    Remember to call memoize when decorating a callable. If you forget, then a
41
+    TypeError will occur. Note the lack of parenthenses after memoize below:
42
+
43
+    >>> @cache.memoize
44
+    ... def test():
45
+    ...     pass
46
+    Traceback (most recent call last):
47
+        ...
48
+    TypeError: name cannot be callable
49
+
50
+    :param cache: cache to store callable arguments and return values
51
+    :param str name: name given for callable (default None, automatic)
52
+    :param bool typed: cache different types separately (default False)
53
+    :param float expire: seconds until arguments expire
54
+        (default None, no expiry)
55
+    :param str tag: text to associate with arguments (default None)
56
+    :return: callable decorator
57
+
58
+    """
59
+    if callable(name):
60
+        raise TypeError('name cannot be callable')
61
+
62
+    def decorator(function):
63
+        "Decorator created by memoize call for callable."
64
+        if name is None:
65
+            try:
66
+                reference = function.__qualname__
67
+            except AttributeError:
68
+                reference = function.__name__
69
+
70
+            reference = function.__module__ + reference
71
+        else:
72
+            reference = name
73
+
74
+        reference = (reference,)
75
+
76
+        @wraps(function)
77
+        def wrapper(*args, **kwargs):
78
+            "Wrapper for callable to cache arguments and return values."
79
+
80
+            key = reference + args
81
+
82
+            if kwargs:
83
+                key += (ENOVAL,)
84
+                sorted_items = sorted(kwargs.items())
85
+
86
+                for item in sorted_items:
87
+                    key += item
88
+
89
+            if typed:
90
+                key += tuple(type(arg) for arg in args)
91
+
92
+                if kwargs:
93
+                    key += tuple(type(value) for _, value in sorted_items)
94
+
95
+            result = cache.get(key, default=ENOVAL, retry=True)
96
+
97
+            if result is ENOVAL:
98
+                result = function(*args, **kwargs)
99
+                cache.set(key, result, expire=expire, tag=tag, retry=True)
100
+
101
+            return result
102
+
103
+        return wrapper
104
+
105
+    return decorator

+ 1348
- 0
diskcache/persistent.py
La diferencia del archivo ha sido suprimido porque es demasiado grande
Ver fichero


+ 78
- 0
diskcache/stampede.py Ver fichero

1
+"Stampede barrier implementation."
2
+
3
+import functools as ft
4
+import math
5
+import random
6
+import tempfile
7
+import time
8
+
9
+from .core import Cache, ENOVAL
10
+
11
+
12
+class StampedeBarrier(object):
13
+    """Stampede barrier mitigates cache stampedes.
14
+
15
+    Cache stampedes are also known as dog-piling, cache miss storm, cache
16
+    choking, or the thundering herd problem.
17
+
18
+    Based on research by Vattani, A.; Chierichetti, F.; Lowenstein, K. (2015),
19
+    Optimal Probabilistic Cache Stampede Prevention,
20
+    VLDB, pp. 886?897, ISSN 2150-8097
21
+
22
+    Example:
23
+
24
+    ```python
25
+    stampede_barrier = StampedeBarrier('/tmp/user_data', expire=3)
26
+
27
+    @stampede_barrier
28
+    def load_user_info(user_id):
29
+        return database.lookup_user_info_by_id(user_id)
30
+    ```
31
+
32
+    """
33
+    # pylint: disable=too-few-public-methods
34
+    def __init__(self, cache=None, expire=None):
35
+        if isinstance(cache, Cache):
36
+            pass
37
+        elif cache is None:
38
+            cache = Cache(tempfile.mkdtemp())
39
+        else:
40
+            cache = Cache(cache)
41
+
42
+        self._cache = cache
43
+        self._expire = expire
44
+
45
+    def __call__(self, func):
46
+        cache = self._cache
47
+        expire = self._expire
48
+
49
+        @ft.wraps(func)
50
+        def wrapper(*args, **kwargs):
51
+            "Wrapper function to cache function result."
52
+            key = (args, kwargs)
53
+
54
+            try:
55
+                result, expire_time, delta = cache.get(
56
+                    key, default=ENOVAL, expire_time=True, tag=True
57
+                )
58
+
59
+                if result is ENOVAL:
60
+                    raise KeyError
61
+
62
+                now = time.time()
63
+                ttl = expire_time - now
64
+
65
+                if (-delta * math.log(random.random())) < ttl:
66
+                    return result
67
+
68
+            except KeyError:
69
+                pass
70
+
71
+            now = time.time()
72
+            result = func(*args, **kwargs)
73
+            delta = time.time() - now
74
+            cache.set(key, result, expire=expire, tag=delta)
75
+
76
+            return result
77
+
78
+        return wrapper

+ 60
- 0
mtwsgi.py Ver fichero

1
+'''
2
+WSGI-compliant HTTP server.  Dispatches requests to a pool of threads.
3
+https://github.com/RonRothman/mtwsgi
4
+'''
5
+
6
+from wsgiref.simple_server import WSGIServer, WSGIRequestHandler
7
+import multiprocessing.pool
8
+
9
+__all__ = ['ThreadPoolWSGIServer', 'make_server']
10
+
11
+import bottle
12
+
13
+class ThreadPoolWSGIServer(WSGIServer):
14
+    '''WSGI-compliant HTTP server.  Dispatches requests to a pool of threads.'''
15
+
16
+    def __init__(self, thread_count=None, *args, **kwargs):
17
+        '''If 'thread_count' == None, we'll use multiprocessing.cpu_count() threads.'''
18
+        WSGIServer.__init__(self, *args, **kwargs)
19
+        self.thread_count = thread_count
20
+        self.pool = multiprocessing.pool.ThreadPool(self.thread_count)
21
+
22
+    # Inspired by SocketServer.ThreadingMixIn.
23
+    def process_request_thread(self, request, client_address):
24
+        try:
25
+            self.finish_request(request, client_address)
26
+            self.shutdown_request(request)
27
+        except:
28
+            self.handle_error(request, client_address)
29
+            self.shutdown_request(request)
30
+
31
+    def process_request(self, request, client_address):
32
+        self.pool.apply_async(self.process_request_thread, args=(request, client_address))
33
+
34
+
35
+def make_server(host, port, app, thread_count=None, handler_class=WSGIRequestHandler):
36
+    '''Create a new WSGI server listening on `host` and `port` for `app`'''
37
+    httpd = ThreadPoolWSGIServer(thread_count, (host, port), handler_class)
38
+    httpd.set_app(app)
39
+    return httpd
40
+
41
+
42
+class MTServer(bottle.ServerAdapter):
43
+    def run(self, handler):
44
+        thread_count = self.options.pop('thread_count', None)
45
+        server = make_server(self.host, self.port, handler, thread_count, **self.options)
46
+        try:
47
+            server.serve_forever()
48
+        except KeyboardInterrupt:
49
+            server.server_close()  # Prevent ResourceWarning: unclosed socket
50
+            raise
51
+
52
+if __name__ == '__main__':
53
+    from wsgiref.simple_server import demo_app
54
+    httpd = make_server('', 8000, demo_app)
55
+    sa = httpd.socket.getsockname()
56
+    print "Serving HTTP on", sa[0], "port", sa[1], "..."
57
+    import webbrowser
58
+    webbrowser.open('http://localhost:8000/xyz?abc')
59
+    httpd.serve_forever()
60
+

+ 45
- 9
playstreamproxy.py Ver fichero

4
 StreamProxy daemon (based on Livestream daemon)
4
 StreamProxy daemon (based on Livestream daemon)
5
 Provides API to ContetSources + stream serving to play via m3u8 playlists
5
 Provides API to ContetSources + stream serving to play via m3u8 playlists
6
 """
6
 """
7
-import os
8
-import sys
9
-import time
7
+import os, sys, time, re, json
8
+import ConfigParser
10
 import atexit
9
 import atexit
11
-import re, json
12
-import binascii
13
-
14
 from signal import SIGTERM
10
 from signal import SIGTERM
15
 
11
 
16
 from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
12
 from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
17
 from SocketServer import ThreadingMixIn
13
 from SocketServer import ThreadingMixIn
14
+
18
 from urllib import unquote, quote
15
 from urllib import unquote, quote
19
 import urllib,urlparse
16
 import urllib,urlparse
20
-#import cookielib,urllib2
21
 import requests
17
 import requests
18
+
19
+#import cookielib,urllib2
22
 from ContentSources import ContentSources
20
 from ContentSources import ContentSources
23
 from sources.SourceBase import stream_type
21
 from sources.SourceBase import stream_type
24
 import util
22
 import util
32
 
30
 
33
 HOST_NAME = ""
31
 HOST_NAME = ""
34
 PORT_NUMBER = 8880
32
 PORT_NUMBER = 8880
35
-DEBUG = False
33
+DEBUG = True
36
 DEBUG2 = False
34
 DEBUG2 = False
35
+REDIRECT = True
37
 
36
 
38
 SPLIT_CHAR = "~"
37
 SPLIT_CHAR = "~"
39
 SPLIT_CODE = "%7E"
38
 SPLIT_CODE = "%7E"
59
 cfg_file = "streams.cfg"
58
 cfg_file = "streams.cfg"
60
 sources = ContentSources("", cfg_file)
59
 sources = ContentSources("", cfg_file)
61
 
60
 
61
+config = ConfigParser.ConfigParser()
62
+proxy_cfg_file = os.path.join(cur_directory, "playstreamproxy.cfg")
63
+if not os.path.exists(proxy_cfg_file):
64
+    config.add_section("default")
65
+    config["default"]["port"] = 80
66
+    config["default"]["host"] = localhost
67
+    config["default"]["redirect"] = True
68
+    config.write(open.file(proxy_cfg_file, "w"))
69
+else:
70
+    config.read(proxy_cfg_file)
71
+
72
+
62
 class StreamHandler(BaseHTTPRequestHandler):
73
 class StreamHandler(BaseHTTPRequestHandler):
63
 
74
 
64
     def do_HEAD(self):
75
     def do_HEAD(self):
130
         self.wfile.write(open("offline.mp4", "rb").read())
141
         self.wfile.write(open("offline.mp4", "rb").read())
131
         #self.wfile.close()
142
         #self.wfile.close()
132
 
143
 
144
+    def redirect_source(self, urlp):
145
+        cmd, data, headers, qs = streamproxy_decode3(urlp)
146
+        streams = sources.get_streams(data)
147
+        if not streams:
148
+            self.write_error(500)  # TODO
149
+            return
150
+        stream = streams[0]
151
+        url = stream["url"]
152
+        headers = stream["headers"] if "headers" in stream else headers0
153
+        self.send_response(307)
154
+        self.send_header("Location", url)
155
+        self.end_headers()
156
+
157
+
133
     def fetch_source(self, urlp):
158
     def fetch_source(self, urlp):
134
         cmd, data, headers, qs = streamproxy_decode3(urlp)
159
         cmd, data, headers, qs = streamproxy_decode3(urlp)
135
         if DEBUG:
160
         if DEBUG:
160
             else:
185
             else:
161
                 url = urlp.replace(base_data, slinks[base_data]["base_url"])
186
                 url = urlp.replace(base_data, slinks[base_data]["base_url"])
162
                 if DEBUG: print "Existing new link", url
187
                 if DEBUG: print "Existing new link", url
188
+        if REDIRECT:
189
+            print "-->redirect to: " + url
190
+            self.send_response(307)
191
+            self.send_header("Location", url)
192
+            self.end_headers()
193
+            #self.wfile.close()
194
+            return
195
+
163
         headers2 = headers if headers else self.headers.dict
196
         headers2 = headers if headers else self.headers.dict
164
         headers2 = del_headers(headers2, ["host"])
197
         headers2 = del_headers(headers2, ["host"])
165
         r = self.get_page_ses(url,ses,True,headers = headers2)
198
         r = self.get_page_ses(url,ses,True,headers = headers2)
240
 class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
273
 class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
241
     """Handle requests in a separate thread."""
274
     """Handle requests in a separate thread."""
242
 
275
 
243
-def start(host = HOST_NAME, port = PORT_NUMBER):
276
+def start(host = HOST_NAME, port = PORT_NUMBER, redirect=None):
277
+    global REDIRECT
278
+    if redirect:
279
+        REDIRECT = redirect
244
     httpd = ThreadedHTTPServer((host, port), StreamHandler)
280
     httpd = ThreadedHTTPServer((host, port), StreamHandler)
245
     print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER)
281
     print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER)
246
     try:
282
     try:

+ 152
- 0
playstreamproxy2.py Ver fichero

1
+#!/bin/env python
2
+# -*- coding: utf-8 -*-
3
+"""
4
+PlayStreamProxy server
5
+Provides API to ContetSources + stream serving to play via m3u8 playlists
6
+"""
7
+
8
+import os, sys, time
9
+import urllib,urlparse
10
+from urllib import unquote, quote
11
+#import cookielib,urllib2
12
+import requests
13
+import re, json
14
+from diskcache import Cache
15
+
16
+import bottle
17
+from bottle import Bottle, hook, response, route, request, run
18
+
19
+#if not sys.platform == 'win32':
20
+import daemonize
21
+
22
+from ContentSources import ContentSources
23
+from sources.SourceBase import stream_type
24
+import util
25
+from util import streamproxy_decode3, streamproxy_encode3
26
+
27
+try:
28
+    from requests.packages.urllib3.exceptions import InsecureRequestWarning
29
+    requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
30
+except:
31
+    pass
32
+
33
+# Default arguments
34
+SERVER = "wsgiref"
35
+PORT_NUMBER = 8880
36
+DEBUG = False
37
+
38
+cunicode = lambda s: s.decode("utf8") if isinstance(s, str) else s
39
+cstr = lambda s: s.encode("utf8") if isinstance(s, unicode) else s
40
+headers2dict = lambda  h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
41
+
42
+headers0 = headers2dict("""
43
+User-Agent: GStreamer souphttpsrc libsoup/2.52.2
44
+icy-metadata: 1
45
+""")
46
+headers0_ = headers2dict("""
47
+Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8
48
+User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36
49
+""")
50
+
51
+cur_directory = os.path.dirname(os.path.realpath(__file__))
52
+slinks = {}
53
+sessions = {}
54
+
55
+cfg_file = "streams.cfg"
56
+sources0 = ContentSources("", cfg_file)
57
+
58
+cache_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "cache")
59
+s0 = Cache(cache_dir)
60
+s0["n"] = 0
61
+s0["sources"] = sources0
62
+
63
+app = Bottle()
64
+
65
+@app.hook('before_request')
66
+def set_globals():
67
+    global s, sources, cmd, data, headers, qs
68
+    cmd, data, headers, qs = streamproxy_decode3(request.url)
69
+    cache_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "cache")
70
+    s = Cache(cache_dir)
71
+    sources = s["sources"]
72
+
73
+# http://localhost:8880/playstream/ltc%3A%3Acontent%2Flive-streams%2F101%3Finclude%3Dquality/
74
+#@app.route('/')
75
+@app.route('/playstream/<url:re:.*>')
76
+def fetch_source(url):
77
+    global s, sources, cmd, data, headers, qs
78
+    return 'cmd=%s, data=%s, qs=%s'% (cmd, data, str(qs))
79
+
80
+
81
+
82
+####################################################################
83
+# Run WSGI server
84
+def start(server,port):
85
+    options = {}
86
+    if server == "mtwsgi":
87
+        import mtwsgi
88
+        server = mtwsgi.MTServer
89
+        options = {"thread_count": 3,}
90
+
91
+    run(app=app,server=server, host='0.0.0.0',
92
+            port=port,
93
+            #reloader=True,
94
+            quiet=False,
95
+            plugins=None,
96
+            debug=True,
97
+            #thread_count=3,
98
+            config=None,
99
+            **options)
100
+
101
+def print_headers(headers):
102
+    for h in headers:
103
+        print "%s: %s"%(h,headers[h])
104
+
105
+def del_headers(headers0,tags):
106
+    headers = headers0.copy()
107
+    for t in tags:
108
+        if t in headers:
109
+            del headers[t]
110
+        if t.lower() in headers:
111
+            del headers[t.lower()]
112
+    return headers
113
+
114
+def hls_base(url):
115
+    base = url.split("?")[0]
116
+    base = "/".join(base.split("/")[0:3])+ "/"
117
+    rest = url.replace(base, "")
118
+    return base
119
+
120
+def hls_base2(url):
121
+    base = url.split("?")[0]
122
+    base = "/".join(base.split("/")[0:-1])+ "/"
123
+    rest = url.replace(base, "")
124
+    return base
125
+
126
+
127
+if __name__ == '__main__':
128
+    if len(sys.argv) > 1:
129
+        pid = "/var/run/playstreamproxy2.pid"
130
+        daemon = daemonize.Daemon(start, pid)
131
+        server = sys.argv[2] if len(sys.argv) > 2 else SERVER
132
+        port = sys.argv[3] if len(sys.argv) > 3 else PORT_NUMBER
133
+        if "start" == sys.argv[1]:
134
+            daemon.start(server,port)
135
+            daemon.is_running()
136
+        elif "stop" == sys.argv[1]:
137
+            daemon.stop()
138
+        elif "restart" == sys.argv[1]:
139
+            daemon.restart()
140
+            daemon.is_running()
141
+        elif "manualstart" == sys.argv[1]:
142
+            start(server,port)
143
+        elif "status" == sys.argv[1]:
144
+            daemon.is_running()
145
+        else:
146
+            print "Unknown command"
147
+            print "usage: %s start|stop|restart|manualstart" % sys.argv[0]
148
+            sys.exit(2)
149
+        sys.exit(0)
150
+    else:
151
+        print "usage: %s start|stop|restart|manualstart" % sys.argv[0]
152
+        sys.exit(2)

+ 633
- 695
project.wpr
La diferencia del archivo ha sido suprimido porque es demasiado grande
Ver fichero


+ 1270
- 0
sources/ltc2.py
La diferencia del archivo ha sido suprimido porque es demasiado grande
Ver fichero


+ 1
- 0
sources/streams.cfg Ver fichero

23
 MoviePlace.lv|movieplace::home|movieplace.png|Movieplace.lv - filmas latviesu valodā
23
 MoviePlace.lv|movieplace::home|movieplace.png|Movieplace.lv - filmas latviesu valodā
24
 Engima2|enigma2::home|enigma2.png|Get streams from engima2 sat receiver
24
 Engima2|enigma2::home|enigma2.png|Get streams from engima2 sat receiver
25
 test|http://mfe1-iptv.baltcom.lv/dash/CH_7_50.ism/playlist.mpd||testa strīms
25
 test|http://mfe1-iptv.baltcom.lv/dash/CH_7_50.ism/playlist.mpd||testa strīms
26
+Shortcut.lv (lattelecom.tv)|ltc2::home|shortcut.png|Shortcut.lv (lattelecom.tv) satura skatīšanās
26
 
27
 
27
 [my_tv]
28
 [my_tv]
28
 My Tv
29
 My Tv

+ 23
- 0
test_url.py Ver fichero

1
+#!/usr/bin/python
2
+# -*- coding: utf-8 -*-
3
+
4
+from util import streamproxy_encode3, streamproxy_decode3
5
+dt2ts = lambda dt: int(time.mktime(dt.timetuple()))
6
+ts2dt = lambda ts: datetime.datetime.fromtimestamp(ts)
7
+import time, datetime, pytz
8
+from datetime import datetime as dt
9
+
10
+# "20190302203000 +0200" panorāma
11
+print dt2ts(dt.now())
12
+print dt2ts(dt(2019, 3, 2, 20, 30))
13
+#print urlp
14
+# ltc::content/live-streams/101?include=quality
15
+print streamproxy_encode3("playstream", "ltc::content/catchup/101?start=1551551400")
16
+# http://localhost:8880/playstream/ltc%3A%3Acontent%2Fcatchup%2F101%3Fstart%3D1551551400/
17
+
18
+print streamproxy_encode3("playstream", "replay::tiesraide/ltv1")
19
+print streamproxy_encode3("playstream", "tvdom::tiesraides/ltv1/")
20
+print streamproxy_encode3("playstream", "tvplay::asset/10311641")
21
+print streamproxy_encode3("playstream", "xtv::rigatv24/video/Zw4pVPqr7OX-festivals_mainam_pasauli_sakam_ar_sevi")
22
+print streamproxy_encode3("playstream", "iplayer::episodes/b094f49s")
23
+#cmd, data, headers, qs = streamproxy_decode3(urlp)

+ 4
- 3
util.py Ver fichero

260
 
260
 
261
 def streamproxy_decode3(urlp):
261
 def streamproxy_decode3(urlp):
262
     m = re.search("http://[^/]+",urlp)
262
     m = re.search("http://[^/]+",urlp)
263
-    path = urlp.replace(m.group(0),"") if m else urlp
263
+    urlp = urlp.replace(m.group(0),"") if m else urlp
264
+    path = urlp.split("?")[0]
264
     cmd = path.split("/")[1]
265
     cmd = path.split("/")[1]
265
-    data = path.split("/")[2].split("?")[0]
266
+    data = "/".join(path.split("/")[2:])
266
     data = urllib.unquote_plus(data)
267
     data = urllib.unquote_plus(data)
267
-    qs = path.split("?")[1] if "?" in path else ""
268
+    qs = urlp.split("?")[1] if "?" in urlp else ""
268
     qs2 = {}
269
     qs2 = {}
269
     headers = {}
270
     headers = {}
270
     if qs:
271
     if qs: