Enigma2 plugin to to play various online streams (mostly Latvian).

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. #!/usr/bin/env python
  2. # coding=utf8
  3. #
  4. # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
  5. # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
  6. # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
  7. #
  8. try:
  9. import json
  10. except:
  11. import simplejson as json
  12. import urllib2, urllib
  13. import datetime, re, sys,os
  14. import ConfigParser
  15. from SourceBase import SourceBase
  16. import base64
  17. from collections import OrderedDict
  18. import sys
  19. headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
  20. import util
  21. class Source(SourceBase):
  22. def __init__(self,country=""):
  23. self.name = "filmix"
  24. self.title = "Filmix.net"
  25. self.img = "http://cs5324.vk.me/g33668783/a_903fcc63.jpg"
  26. self.desc = "Filmix.net satura skatīšanās"
  27. self.country=country
  28. self.headers = headers2dict("""
  29. Host: filmix.net
  30. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  31. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  32. Accept-Language: en-US,en;q=0.5
  33. """)
  34. self.headers2 = headers2dict("""
  35. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  36. X-Requested-With: XMLHttpRequest
  37. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  38. """)
  39. self.url = "http://filmix.net/"
  40. #self.login()
  41. def login(self,user="",password=""):
  42. return True
  43. def get_content(self, data):
  44. print "[filmix] get_content:", data
  45. if "::" in data:
  46. data = data.split("::")[1]
  47. path = data.split("?")[0]
  48. clist = path.split("/")[0]
  49. params = data[data.find("?"):] if "?" in data else ""
  50. qs = dict(map(lambda x:x.split("="),re.findall("[%\w]+=\w+",params)))
  51. lang = qs["lang"] if "lang" in qs else self.country
  52. content=[]
  53. content.append(("..return", "back","","Return back"))
  54. if clist=="home":
  55. content.extend([
  56. ("Search", "filmix::search/{0}","","Search"),
  57. ("Movies", "filmix::movies","","Movies"),
  58. ("Series", "filmix::series","","TV Series"),
  59. ("Cartoons", "filmix::cartoons","","Cartoons"),
  60. ])
  61. return content
  62. #elif clist=="search":
  63. # TODO
  64. #return content
  65. elif data in ("movies","series","cartoons"):
  66. r = self.call("")
  67. r = r.decode("cp1251").encode("utf8")
  68. if data == "movies":
  69. sname = "Фильмы"
  70. elif data=="series":
  71. sname = "Сериалы"
  72. else:
  73. sname = "Мультфильмы"
  74. # <span class="menu-title">Фильмы</span>
  75. m = re.search('<span class="menu-title">%s</span>(.+?)<li>\s+?<span'%sname, r, re.DOTALL|re.UNICODE)
  76. if not m: return content
  77. r2 = m.group(1)
  78. result = re.findall(r'<a href="http://filmix\.net/([^"]+)".*?>([^<]+)</a>', r2, re.DOTALL)
  79. for item in result:
  80. if "catalog" in item[0]: continue
  81. title = item[1]
  82. data2 = item[0]
  83. img = self.img
  84. desc = title
  85. content.append((title,self.name+"::"+data2,img,desc))
  86. return content
  87. ## Video/seriāla spēlēšana
  88. elif clist=="play":
  89. r = self.call(path)
  90. r = r.decode("cp1251").encode("utf8")
  91. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  92. m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
  93. img = m.group(1) if m else self.img
  94. m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
  95. duration = "(%s)"%m.group(1) if m else ""
  96. m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
  97. desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
  98. #url2 = re.search('<div id="play-descr" class="title-page">.+?<a href="([^"]+)"', r, re.DOTALL).group(1)
  99. #r2 = self._http_request(url2)
  100. m = re.search("videoLink = '([^']+)'", r, re.DOTALL)
  101. if not m:
  102. raise Exception("Can not find video link")
  103. video_link = m.group(1)
  104. if video_link<>'{video-link}':
  105. video_link = self.decode_uppod_text(video_link)
  106. streams = self.get_streams2(video_link)
  107. data2 = streams[0][1]
  108. content = (title,data2,img,desc)
  109. return content
  110. else:
  111. m = re.search("plLink = '([^']+)'", r, re.DOTALL)
  112. if not m:
  113. raise Exception("Can not find video link")
  114. pl_link = m.group(1)
  115. pl_link = self.decode_uppod_text(pl_link)
  116. js = self._http_request(pl_link)
  117. js = self.decode_uppod_text(js)
  118. js = json.loads(js)
  119. if "s" in qs and "e" in qs:
  120. s = int(qs["s"])
  121. e = int(qs["e"])
  122. serie = js["playlist"][s-1]["playlist"][e-1]["comment"].encode("utf8")
  123. title = title0+" - "+ serie
  124. data2 = js["playlist"][s-1]["playlist"][e-1]["file"].encode("utf8")
  125. streams = self.get_streams2(data2)
  126. data2=streams[0][1]
  127. desc = serie +"\n"+desc0
  128. content = (title,data2,img,desc)
  129. return content
  130. elif "s" in qs:
  131. s = int(qs["s"])
  132. for i,ep in enumerate(js["playlist"][s-1]["playlist"]):
  133. title = title0+" - "+js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
  134. serie = js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
  135. data2 = data+"&e=%s"%(i+1)
  136. desc = serie +"\n"+desc0
  137. content.append((title,self.name+"::"+data2,img,desc))
  138. else:
  139. for i,ep in enumerate(js["playlist"]):
  140. title = title0 +" - "+js["playlist"][i]["comment"].encode("utf8")
  141. serie = js["playlist"][i]["comment"].encode("utf8")
  142. data2 = data+"?s=%s"%(i+1)
  143. desc = serie +"\n"+desc0
  144. content.append((title,self.name+"::"+data2,img,desc))
  145. return content
  146. #r = self._http_request(url)
  147. ### saraksts ###
  148. else:
  149. r = self.call(data)
  150. r = r.decode("cp1251").encode("utf8")
  151. for r2 in re.findall('<article class="shortstory line".+?</article>', r, re.DOTALL):
  152. m = re.search(r'<a href="http://filmix\.net/play/(\d+)" class="watch icon-play">', r2, re.DOTALL)
  153. if not m: continue
  154. vid = m.group(1)
  155. data2 = "play/%s"%vid
  156. #title = re.search('itemprop="name">([^<]+)</div>', r2, re.DOTALL).group(1)
  157. title = re.search('itemprop="name" content="([^"]+)"', r2, re.DOTALL).group(1)
  158. m = re.search('itemprop="alternativeHeadline" content="([^"]+)"', r2, re.DOTALL)
  159. if m:
  160. title = title + "/"+m.group(1)
  161. m = re.search('<img src="([^"]+.jpg)"', r2, re.DOTALL)
  162. img = "http://filmix.net"+m.group(1) if m else self.img
  163. m = re.search(r'<a itemprop="copyrightYear".+?>(\d+)<', r2, re.DOTALL)
  164. if m:
  165. year = m.group(1) if m else ""
  166. title = "%s (%s)"%(title,year)
  167. title = util.unescape(title)
  168. genre = re.findall('<a itemprop="genre"[^>]+?">([^<]+)</a>', r2, re.DOTALL)
  169. genre = ",".join(genre)
  170. m = re.search('<p itemprop="description">([^<]+)</p>', r2, re.DOTALL)
  171. desc0 = util.unescape(m.group(1)) if m else ""
  172. m = re.search('<div class="quality">([^<]+)</div>', r2, re.DOTALL)
  173. quality = m.group(1) if m else ""
  174. actors = re.findall('itemprop="actor">([^<]+)<', r2, re.DOTALL)
  175. actors = ",".join(actors)
  176. desc="%s\n%s\n%s\n%s\n%s"%(title,genre,desc0,actors,quality)
  177. content.append((title,self.name+"::"+data2,img,desc))
  178. if '<div class="navigation">' in r:
  179. m = re.search(r'href="http://filmix\.net/([^"]+)" class="next icon-arowRight btn-tooltip"', r, re.DOTALL)
  180. if m:
  181. data2 = m.group(1)
  182. else:
  183. m = re.search("/page/(\d)+",data)
  184. if m:
  185. page = int(m.group(1))+1
  186. data2 = re.sub("/page/(\d)+", "/page/%s"%page, data)
  187. else:
  188. data2 = data + "/page/2"
  189. content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
  190. return content
  191. def is_video(self,data):
  192. if "::" in data:
  193. data = data.split("::")[1]
  194. path = data.split("?")[0]
  195. clist = path.split("/")[0]
  196. params = data[data.find("?"):] if "?" in data else ""
  197. if clist == "play" and "s=" in data and "e=" in data:
  198. return True
  199. elif clist=="play" and not params:
  200. r = self.call(path)
  201. #r = r.decode("cp1251").encode("utf8")
  202. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  203. m = re.search("videoLink = '([^']+)'", r, re.DOTALL)
  204. if not m:
  205. raise Exception("Can not find video link")
  206. video_link = m.group(1)
  207. if video_link=='{video-link}':
  208. return False
  209. else:
  210. return True
  211. else:
  212. return False
  213. def call(self, data,params=None,headers=None,lang=""):
  214. if not headers: headers = self.headers
  215. url = self.url+data
  216. result = self._http_request(url,params,headers=headers)
  217. return result
  218. def decode_uppod_text(self, text):
  219. Client_codec_a = ["l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "="]
  220. Client_codec_b = ["w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h"]
  221. text = text.replace("\n", "").strip()
  222. for i in range(len(Client_codec_a)):
  223. char1 = Client_codec_b[i]
  224. char2 = Client_codec_a[i]
  225. text = text.replace(char1, "___")
  226. text = text.replace(char2, char1)
  227. text = text.replace("___", char2)
  228. result = base64.b64decode(text)
  229. print result
  230. return result
  231. def get_streams2(self,url0):
  232. m = re.search("\[([\d,]+)]",url0)
  233. if not m:
  234. return [("?",url0)]
  235. res = m.group(1)
  236. streams=[]
  237. for res in res.split(","):
  238. if not res: continue
  239. url=re.sub("\[[\d,]+]",res,url0)
  240. streams.append((res,url))
  241. return streams
  242. if __name__ == "__main__":
  243. country= "lv"
  244. c = Source(country)
  245. if len(sys.argv)>1:
  246. data= sys.argv[1]
  247. else:
  248. data = "home"
  249. content = c.get_content(data)
  250. for item in content:
  251. print item
  252. #cat = api.get_categories(country)
  253. #chan = api.get_channels("lv")
  254. #prog = api.get_programs(channel=6400)
  255. #prog = api.get_programs(category=55)
  256. #seas = api.get_seasons(program=6453)
  257. #str = api.get_streams(660243)
  258. #res = api.get_videos(802)
  259. #formats = api.getAllFormats()
  260. #det = api.detailed("1516")
  261. #vid = api.getVideos("13170")
  262. pass