Enigma2 plugin to to play various online streams (mostly Latvian).

filmix.py 12KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. #!/usr/bin/env python
  2. # coding=utf8
  3. #
  4. # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
  5. # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
  6. # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
  7. #
  8. try:
  9. import json
  10. except:
  11. import simplejson as json
  12. import urllib2, urllib
  13. import datetime, re, sys,os
  14. import ConfigParser
  15. from SourceBase import SourceBase
  16. import base64
  17. from collections import OrderedDict
  18. import sys
  19. headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
  20. import util
  21. class Source(SourceBase):
  22. def __init__(self,country=""):
  23. self.name = "filmix"
  24. self.title = "Filmix.net"
  25. self.img = "http://cs5324.vk.me/g33668783/a_903fcc63.jpg"
  26. self.desc = "Filmix.net satura skatīšanās"
  27. self.country=country
  28. self.headers = headers2dict("""
  29. Host: filmix.net
  30. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  31. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  32. Accept-Language: en-US,en;q=0.5
  33. """)
  34. self.headers2 = headers2dict("""
  35. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  36. X-Requested-With: XMLHttpRequest
  37. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  38. """)
  39. self.url = "http://filmix.net/"
  40. #self.login()
  41. def login(self,user="",password=""):
  42. return True
  43. def get_content(self, data):
  44. print "[filmix] get_content:", data
  45. if "::" in data:
  46. data = data.split("::")[1]
  47. path = data.split("?")[0]
  48. clist = path.split("/")[0]
  49. params = data[data.find("?"):] if "?" in data else ""
  50. qs = dict(map(lambda x:x.split("="),re.findall("[%\w]+=\w+",params)))
  51. lang = qs["lang"] if "lang" in qs else self.country
  52. content=[]
  53. content.append(("..return", "back","","Return back"))
  54. if clist=="home":
  55. content.extend([
  56. ("Search", "filmix::search/{0}","","Search"),
  57. ("Movies", "filmix::movies","","Movies"),
  58. ("Series", "filmix::series","","TV Series"),
  59. ("Cartoons", "filmix::cartoons","","Cartoons"),
  60. ])
  61. return content
  62. #elif clist=="search":
  63. # TODO
  64. #return content
  65. elif data in ("movies","series","cartoons"):
  66. r = self.call("")
  67. r = r.decode("cp1251").encode("utf8")
  68. if data == "movies":
  69. sname = "Фильмы"
  70. elif data=="series":
  71. sname = "Сериалы"
  72. else:
  73. sname = "Мультфильмы"
  74. m = re.search('<li><span class="menu-title">%s</span>(.+?)<li><span'%sname, r, re.DOTALL|re.UNICODE)
  75. if not m: return content
  76. r2 = m.group(1)
  77. result = re.findall(r'<a href="http://filmix\.net/([^"]+)".*?>([^<]+)</a>', r2, re.DOTALL)
  78. for item in result:
  79. if "catalog" in item[0]: continue
  80. title = item[1]
  81. data2 = item[0]
  82. img = self.img
  83. desc = title
  84. content.append((title,self.name+"::"+data2,img,desc))
  85. return content
  86. ## Video/seriāla spēlēšana
  87. elif clist=="play":
  88. r = self.call(path)
  89. r = r.decode("cp1251").encode("utf8")
  90. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  91. m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
  92. img = m.group(1) if m else self.img
  93. m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
  94. duration = "(%s)"%m.group(1) if m else ""
  95. m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
  96. desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
  97. #url2 = re.search('<div id="play-descr" class="title-page">.+?<a href="([^"]+)"', r, re.DOTALL).group(1)
  98. #r2 = self._http_request(url2)
  99. m = re.search("videoLink = '([^']+)'", r, re.DOTALL)
  100. if not m:
  101. raise Exception("Can not find video link")
  102. video_link = m.group(1)
  103. if video_link<>'{video-link}':
  104. video_link = self.decode_uppod_text(video_link)
  105. streams = self.get_streams2(video_link)
  106. data2 = streams[0][1]
  107. content = (title,data2,img,desc)
  108. return content
  109. else:
  110. m = re.search("plLink = '([^']+)'", r, re.DOTALL)
  111. if not m:
  112. raise Exception("Can not find video link")
  113. pl_link = m.group(1)
  114. pl_link = self.decode_uppod_text(pl_link)
  115. js = self._http_request(pl_link)
  116. js = self.decode_uppod_text(js)
  117. js = json.loads(js)
  118. if "s" in qs and "e" in qs:
  119. s = int(qs["s"])
  120. e = int(qs["e"])
  121. serie = js["playlist"][s-1]["playlist"][e-1]["comment"].encode("utf8")
  122. title = title0+" - "+ serie
  123. data2 = js["playlist"][s-1]["playlist"][e-1]["file"].encode("utf8")
  124. streams = self.get_streams2(data2)
  125. data2=streams[0][1]
  126. desc = serie +"\n"+desc0
  127. content = (title,data2,img,desc)
  128. return content
  129. elif "s" in qs:
  130. s = int(qs["s"])
  131. for i,ep in enumerate(js["playlist"][s-1]["playlist"]):
  132. title = title0+" - "+js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
  133. serie = js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
  134. data2 = data+"&e=%s"%(i+1)
  135. desc = serie +"\n"+desc0
  136. content.append((title,self.name+"::"+data2,img,desc))
  137. else:
  138. for i,ep in enumerate(js["playlist"]):
  139. title = title0 +" - "+js["playlist"][i]["comment"].encode("utf8")
  140. serie = js["playlist"][i]["comment"].encode("utf8")
  141. data2 = data+"?s=%s"%(i+1)
  142. desc = serie +"\n"+desc0
  143. content.append((title,self.name+"::"+data2,img,desc))
  144. return content
  145. #r = self._http_request(url)
  146. ### saraksts ###
  147. else:
  148. r = self.call(data)
  149. r = r.decode("cp1251").encode("utf8")
  150. for r2 in re.findall('<article class="shortstory line".+?</article>', r, re.DOTALL):
  151. m = re.search(r'<a href="http://filmix\.net/play/(\d+)" class="watch icon-play">', r2, re.DOTALL)
  152. if not m: continue
  153. vid = m.group(1)
  154. data2 = "play/%s"%vid
  155. #title = re.search('itemprop="name">([^<]+)</div>', r2, re.DOTALL).group(1)
  156. title = re.search('itemprop="name" content="([^"]+)"', r2, re.DOTALL).group(1)
  157. m = re.search('itemprop="alternativeHeadline" content="([^"]+)"', r2, re.DOTALL)
  158. if m:
  159. title = title + "/"+m.group(1)
  160. m = re.search('<img src="([^"]+.jpg)"', r2, re.DOTALL)
  161. img = "http://filmix.net"+m.group(1) if m else self.img
  162. m = re.search(r'<a itemprop="copyrightYear".+?>(\d+)<', r2, re.DOTALL)
  163. if m:
  164. year = m.group(1) if m else ""
  165. title = "%s (%s)"%(title,year)
  166. title = util.unescape(title)
  167. genre = re.findall('<a itemprop="genre"[^>]+?">([^<]+)</a>', r2, re.DOTALL)
  168. genre = ",".join(genre)
  169. m = re.search('<p itemprop="description">([^<]+)</p>', r2, re.DOTALL)
  170. desc0 = util.unescape(m.group(1)) if m else ""
  171. m = re.search('<div class="quality">([^<]+)</div>', r2, re.DOTALL)
  172. quality = m.group(1) if m else ""
  173. actors = re.findall('itemprop="actor">([^<]+)<', r2, re.DOTALL)
  174. actors = ",".join(actors)
  175. desc="%s\n%s\n%s\n%s\n%s"%(title,genre,desc0,actors,quality)
  176. content.append((title,self.name+"::"+data2,img,desc))
  177. if '<div class="navigation">' in r:
  178. m = re.search(r'href="http://filmix\.net/([^"]+)" class="next icon-arowRight btn-tooltip"', r, re.DOTALL)
  179. if m:
  180. data2 = m.group(1)
  181. else:
  182. m = re.search("/page/(\d)+",data)
  183. if m:
  184. page = int(m.group(1))+1
  185. data2 = re.sub("/page/(\d)+", "/page/%s"%page, data)
  186. else:
  187. data2 = data + "/page/2"
  188. content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
  189. return content
  190. def is_video(self,data):
  191. if "::" in data:
  192. data = data.split("::")[1]
  193. path = data.split("?")[0]
  194. clist = path.split("/")[0]
  195. params = data[data.find("?"):] if "?" in data else ""
  196. if clist == "play" and "s=" in data and "e=" in data:
  197. return True
  198. elif clist=="play" and not params:
  199. r = self.call(path)
  200. #r = r.decode("cp1251").encode("utf8")
  201. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  202. m = re.search("videoLink = '([^']+)'", r, re.DOTALL)
  203. if not m:
  204. raise Exception("Can not find video link")
  205. video_link = m.group(1)
  206. if video_link=='{video-link}':
  207. return False
  208. else:
  209. return True
  210. else:
  211. return False
  212. def call(self, data,params=None,headers=None,lang=""):
  213. if not headers: headers = self.headers
  214. url = self.url+data
  215. result = self._http_request(url,params,headers=headers)
  216. return result
  217. def decode_uppod_text(self, text):
  218. Client_codec_a = ["l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "="]
  219. Client_codec_b = ["w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h"]
  220. text = text.replace("\n", "").strip()
  221. for i in range(len(Client_codec_a)):
  222. char1 = Client_codec_b[i]
  223. char2 = Client_codec_a[i]
  224. text = text.replace(char1, "___")
  225. text = text.replace(char2, char1)
  226. text = text.replace("___", char2)
  227. result = base64.b64decode(text)
  228. print result
  229. return result
  230. def get_streams2(self,url0):
  231. res = re.search("\[([\d,]+)]",url0).group(1)
  232. streams=[]
  233. for res in res.split(","):
  234. if not res: continue
  235. url=re.sub("\[[\d,]+]",res,url0)
  236. streams.append((res,url))
  237. return streams
  238. if __name__ == "__main__":
  239. country= "lv"
  240. c = Source(country)
  241. if len(sys.argv)>1:
  242. data= sys.argv[1]
  243. else:
  244. data = "home"
  245. content = c.get_content(data)
  246. for item in content:
  247. print item
  248. #cat = api.get_categories(country)
  249. #chan = api.get_channels("lv")
  250. #prog = api.get_programs(channel=6400)
  251. #prog = api.get_programs(category=55)
  252. #seas = api.get_seasons(program=6453)
  253. #str = api.get_streams(660243)
  254. #res = api.get_videos(802)
  255. #formats = api.getAllFormats()
  256. #det = api.detailed("1516")
  257. #vid = api.getVideos("13170")
  258. pass