Enigma2 plugin to to play various online streams (mostly Latvian).

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. #!/usr/bin/env python
  2. # coding=utf8
  3. #
  4. # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
  5. # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
  6. # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
  7. #
  8. try:
  9. import json
  10. except:
  11. import simplejson as json
  12. import urllib2, urllib
  13. import datetime, re, sys,os
  14. import ConfigParser
  15. from SourceBase import SourceBase
  16. import base64
  17. from collections import OrderedDict
  18. import sys
  19. try:
  20. import util
  21. except:
  22. sys.path.insert(0,'..')
  23. import util
  24. headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
  25. class Source(SourceBase):
  26. def __init__(self,country=""):
  27. self.name = "filmix"
  28. self.title = "filmix.me"
  29. self.img = "http://cs5324.vk.me/g33668783/a_903fcc63.jpg"
  30. self.desc = "filmix.me satura skatīšanās"
  31. self.country=country
  32. self.headers = headers2dict("""
  33. Host: filmix.me
  34. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  35. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  36. Accept-Language: en-US,en;q=0.5
  37. """)
  38. self.headers2 = headers2dict("""
  39. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  40. X-Requested-With: XMLHttpRequest
  41. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  42. """)
  43. self.url = "https://filmix.me/"
  44. #self.login()
  45. def login(self,user="",password=""):
  46. return True
  47. def get_content(self, data):
  48. print "[filmix] get_content:", data
  49. source, data, path, plist, clist, params, qs = self.parse_data(data)
  50. content=[]
  51. content.append(("..return", "back","","Return back"))
  52. if clist=="home":
  53. content.extend([
  54. ("Search", "filmix::search/{0}","","Search"),
  55. ("Movies", "filmix::movies","","Movies"),
  56. ("Series", "filmix::series","","TV Series"),
  57. ("Cartoons", "filmix::cartoons","","Cartoons"),
  58. ])
  59. return content
  60. #elif clist=="search":
  61. # TODO
  62. #return content
  63. elif data in ("movies","series","cartoons"):
  64. r = self.call("")
  65. r = r.decode("cp1251").encode("utf8")
  66. if data == "movies":
  67. sname = "Фильмы"
  68. elif data=="series":
  69. sname = "Сериалы"
  70. else:
  71. sname = "Мультфильмы"
  72. # <span class="menu-title">Фильмы</span>
  73. m = re.search('<span class="menu-title">%s</span>(.+?)<li>\s+?<span'%sname, r, re.DOTALL|re.UNICODE)
  74. if not m: return content
  75. r2 = m.group(1)
  76. result = re.findall(r'<a .*?href="https://filmix\.me/([^"]+)".*?>([^<]+)</', r2, re.DOTALL)
  77. for item in result:
  78. if "catalog" in item[0]: continue
  79. title = item[1]
  80. data2 = item[0]
  81. img = self.img
  82. desc = title
  83. content.append((title,self.name+"::"+data2,img,desc))
  84. return content
  85. ## Seriāls
  86. elif clist=="play":
  87. r = self.call(path)
  88. r = r.decode("cp1251").encode("utf8")
  89. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  90. m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
  91. img = m.group(1) if m else self.img
  92. m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
  93. duration = "(%s)"%m.group(1) if m else ""
  94. m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
  95. desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
  96. vid = plist[-1]
  97. js = self.get_movie_info(vid)
  98. translations = js["message"]["translations"]["flash"]
  99. for pl in translations:
  100. if translations[pl].startswith("http"):
  101. continue
  102. pl_link = translations[pl]
  103. break
  104. else:
  105. raise Exception("No episodes list found!")
  106. #pl_link = js["message"]["translations"]["flash"].values()[0]
  107. # TODO process several players, currently taking the first
  108. if not pl_link.startswith("http"):
  109. pl_link = self.decode_uppod_text(pl_link)
  110. js = self._http_request(pl_link)
  111. js = self.decode_uppod_text(js)
  112. js = json.loads(js)
  113. if "s" in qs:
  114. s = int(qs["s"])
  115. for i,ep in enumerate(js["playlist"][s-1]["playlist"]):
  116. title = title0+" - "+js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
  117. serie = js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
  118. data2 = data+"&e=%s"%(i+1)
  119. desc = serie +"\n"+desc0
  120. content.append((title,self.name+"::"+data2,img,desc))
  121. else:
  122. for i,ep in enumerate(js["playlist"]):
  123. title = title0 +" - "+js["playlist"][i]["comment"].encode("utf8")
  124. serie = js["playlist"][i]["comment"].encode("utf8")
  125. data2 = data+"?s=%s"%(i+1)
  126. desc = serie +"\n"+desc0
  127. content.append((title,self.name+"::"+data2,img,desc))
  128. return content
  129. #r = self._http_request(url)
  130. ### saraksts ###
  131. else:
  132. r = self.call(data)
  133. r = r.decode("cp1251").encode("utf8")
  134. for r2 in re.findall('<article class="shortstory line".+?</article>', r, re.DOTALL):
  135. m = re.search(r'<a href="https://filmix\.me/play/(\d+)" class="watch icon-play">', r2, re.DOTALL)
  136. if not m: continue
  137. vid = m.group(1)
  138. data2 = "play/%s"%vid
  139. #title = re.search('itemprop="name">([^<]+)</div>', r2, re.DOTALL).group(1)
  140. title = re.search('itemprop="name" content="([^"]+)"', r2, re.DOTALL).group(1)
  141. m = re.search('itemprop="alternativeHeadline" content="([^"]+)"', r2, re.DOTALL)
  142. if m:
  143. title = title + "/"+m.group(1)
  144. m = re.search('<img src="([^"]+.jpg)"', r2, re.DOTALL)
  145. img = m.group(1) if m else self.img
  146. m = re.search(r'<a itemprop="copyrightYear".+?>(\d+)<', r2, re.DOTALL)
  147. if m:
  148. year = m.group(1) if m else ""
  149. title = "%s (%s)"%(title,year)
  150. title = util.unescape(title)
  151. genre = re.findall('<a itemprop="genre"[^>]+?">([^<]+)</a>', r2, re.DOTALL)
  152. genre = ",".join(genre)
  153. m = re.search('<p itemprop="description">([^<]+)</p>', r2, re.DOTALL)
  154. desc0 = util.unescape(m.group(1)) if m else ""
  155. m = re.search('<div class="quality">([^<]+)</div>', r2, re.DOTALL)
  156. quality = m.group(1) if m else ""
  157. actors = re.findall('itemprop="actor">([^<]+)<', r2, re.DOTALL)
  158. actors = ",".join(actors)
  159. desc="%s\n%s\n%s\n%s\n%s"%(title,genre,desc0,actors,quality)
  160. content.append((title,self.name+"::"+data2,img,desc))
  161. if '<div class="navigation">' in r:
  162. m = re.search(r'href="https://filmix\.me/([^"]+)" class="next icon-arowRight btn-tooltip"', r, re.DOTALL)
  163. if m:
  164. data2 = m.group(1)
  165. else:
  166. m = re.search("/page/(\d)+",data)
  167. if m:
  168. page = int(m.group(1))+1
  169. data2 = re.sub("/page/(\d)+", "/page/%s"%page, data)
  170. else:
  171. data2 = data + "/page/2"
  172. content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
  173. return content
  174. def is_video(self,data):
  175. source,data,path,plist,clist,params,qs = self.parse_data(data)
  176. if clist == "play" and "s=" in data and "e=" in data:
  177. return True
  178. elif clist=="play" and not params:
  179. r = self.call(path)
  180. #r = r.decode("cp1251").encode("utf8")
  181. m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
  182. if not m:
  183. raise Exception("Can not find video link")
  184. #return False
  185. video_link = m.group(1)
  186. if video_link=='{video-link}':
  187. return False
  188. else:
  189. return True
  190. else:
  191. return False
  192. def get_streams(self, data):
  193. print "[filmix] get_streams:", data
  194. source,data,path,plist,clist,params,qs = self.parse_data(data)
  195. r = self.call(path)
  196. if not r:
  197. return []
  198. streams = []
  199. r = r.decode("cp1251").encode("utf8")
  200. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  201. m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
  202. img = m.group(1) if m else self.img
  203. m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
  204. duration = "(%s)"%m.group(1) if m else ""
  205. m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
  206. desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
  207. m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
  208. if not m:
  209. # #raise Exception("Can not find video link")
  210. return []
  211. video_link = m.group(1)
  212. series = True if video_link == '{video-link}' else False
  213. vid = plist[1]
  214. js = self.get_movie_info(vid)
  215. if js["message"]["translations"]["flash"]:
  216. video_link = js["message"]["translations"]["flash"].values()[0].encode("utf8")
  217. video_link = self.decode_uppod_text(video_link)
  218. lang = js["message"]["translations"]["flash"].keys()[0].encode("utf8") # TODO process several players/streams
  219. else:
  220. return []
  221. if not series : # Filma
  222. url0 = video_link
  223. streams2 = self.get_streams2(url0)
  224. for st in streams2:
  225. stream = util.item()
  226. stream["url"]=st[1]
  227. stream["lang"]=lang
  228. stream["quality"]=st[0]
  229. stream["name"]= title
  230. stream["desc"]=desc
  231. streams.append(stream)
  232. return streams
  233. else: # Seriāls
  234. pl_link = video_link
  235. js = self._http_request(pl_link)
  236. js = self.decode_uppod_text(js)
  237. js = json.loads(js)
  238. if "s" in qs and "e" in qs:
  239. s = int(qs["s"])
  240. e = int(qs["e"])
  241. serie = js["playlist"][s-1]["playlist"][e-1]["comment"].encode("utf8")
  242. title = title0+" - "+ serie
  243. url0 = js["playlist"][s-1]["playlist"][e-1]["file"].encode("utf8")
  244. streams2 = self.get_streams2(url0)
  245. for st in streams2:
  246. stream = util.item()
  247. stream["url"]=st[1]
  248. stream["lang"]=lang
  249. stream["quality"]=st[0]
  250. stream["name"]= title
  251. stream["desc"]=desc
  252. streams.append(stream)
  253. return streams
  254. def call(self, data,params=None,headers=None,lang=""):
  255. if not headers: headers = self.headers
  256. url = self.url+data
  257. result = self._http_request(url,params,headers=headers)
  258. return result
  259. def get_movie_info(self,vid):
  260. headers = headers2dict("""
  261. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0
  262. Accept: application/json, text/javascript, */*; q=0.01
  263. Accept-Language: en-US,en;q=0.5
  264. Content-Type: application/x-www-form-urlencoded; charset=UTF-8
  265. X-Requested-With: XMLHttpRequest
  266. Referer: https://filmix.me/play/%s
  267. """%vid )
  268. post_data = {"post_id":vid}
  269. r = util.post("https://filmix.me/api/movies/player_data", data=post_data, headers = headers)
  270. if not r:
  271. raise Exception("Can not get movie info")
  272. #return []
  273. js = json.loads(r)
  274. return js
  275. def decode_uppod_text(self, text):
  276. Client_codec_a = ["l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "="]
  277. Client_codec_b = ["w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h"]
  278. text = text.replace("\n", "").strip()
  279. for i in range(len(Client_codec_a)):
  280. char1 = Client_codec_b[i]
  281. char2 = Client_codec_a[i]
  282. text = text.replace(char1, "___")
  283. text = text.replace(char2, char1)
  284. text = text.replace("___", char2)
  285. result = base64.b64decode(text)
  286. print result
  287. return result
  288. def get_streams2(self,url0):
  289. m = re.search("\[([\d,]+)]",url0)
  290. if not m:
  291. return [("?",url0)]
  292. res = m.group(1)
  293. streams=[]
  294. for res in res.split(","):
  295. if not res: continue
  296. url=re.sub("\[[\d,]+]",res,url0)
  297. streams.append((res,url))
  298. return streams
  299. if __name__ == "__main__":
  300. c = Source()
  301. #s = "ZnVuY3Rpb24gc2VuZE1lc3NhZ2U2MDc3ODkoZSl7dmFyIGg9bWdfd3M2MDc3ODkub25tZXNzYWdlOyBtZ193czYwNzc4OS5yZWFkeVN0YXRlPT1tZ193czYwNzc4OS5DTE9TRUQmJihtZ193czYwNzc4OT1uZXcgV2ViU29ja2V0KG1nX3dzNjA3Nzg5X2xvY2F0aW9uKSksbWdfd3M2MDc3ODkub25tZXNzYWdlPWgsd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkobWdfd3M2MDc3ODksZnVuY3Rpb24oKXttZ193czYwNzc4OS5zZW5kKGUpfSl9ZnVuY3Rpb24gd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkoZSx0KXtzZXRUaW1lb3V0KGZ1bmN0aW9uKCl7cmV0dXJuIDE9PT1lLnJlYWR5U3RhdGU/dm9pZChudWxsIT10JiZ0KCkpOnZvaWQgd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkoZSx0KX0sNSl9OyB2YXIgbWdfd3M2MDc3ODlfbG9jYXRpb24gPSAid3NzOi8vd3NwLm1hcmtldGdpZC5jb20vd3MiOyBtZ193czYwNzc4OSA9IG5ldyBXZWJTb2NrZXQobWdfd3M2MDc3ODlfbG9jYXRpb24pLCBtZ193czYwNzc4OS5vbm1lc3NhZ2UgPSBmdW5jdGlvbiAodCkge3Bvc3RNZXNzYWdlKHQuZGF0YSk7fSwgb25tZXNzYWdlID0gZnVuY3Rpb24oZSl7c2VuZE1lc3NhZ2U2MDc3ODkoZS5kYXRhKX0="
  302. #txt = c.decode_uppod_text(s)
  303. if len(sys.argv)>1:
  304. data= sys.argv[1]
  305. else:
  306. data = "home"
  307. content = c.get_content(data)
  308. for item in content:
  309. print item
  310. #cat = api.get_categories(country)
  311. #chan = api.get_channels("lv")
  312. #prog = api.get_programs(channel=6400)
  313. #prog = api.get_programs(category=55)
  314. #seas = api.get_seasons(program=6453)
  315. #str = api.get_streams(660243)
  316. #res = api.get_videos(802)
  317. #formats = api.getAllFormats()
  318. #det = api.detailed("1516")
  319. #vid = api.getVideos("13170")
  320. pass