Kodi plugin to to play various online streams (mostly Latvian)

filmix.py 15KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. #!/usr/bin/env python
  2. # coding=utf8
  3. #
  4. # This file is part of PlayStream - enigma2 plugin to play video streams from various sources
  5. # Copyright (c) 2016 ivars777 (ivars777@gmail.com)
  6. # Distributed under the GNU GPL v3. For full terms see http://www.gnu.org/licenses/gpl-3.0.en.html
  7. #
  8. try:
  9. import json
  10. except:
  11. import simplejson as json
  12. import urllib2, urllib
  13. import datetime, re, sys,os
  14. import ConfigParser
  15. from SourceBase import SourceBase
  16. import base64
  17. from collections import OrderedDict
  18. import sys
  19. try:
  20. import util
  21. except:
  22. sys.path.insert(0,'..')
  23. import util
  24. headers2dict = lambda h: dict([l.strip().split(": ") for l in h.strip().splitlines()])
  25. class Source(SourceBase):
  26. def __init__(self,country=""):
  27. self.name = "filmix"
  28. self.title = "filmix.me"
  29. self.img = "http://cs5324.vk.me/g33668783/a_903fcc63.jpg"
  30. self.desc = "filmix.me satura skatīšanās"
  31. self.country=country
  32. self.headers = headers2dict("""
  33. Host: filmix.me
  34. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  35. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  36. Accept-Language: en-US,en;q=0.5
  37. """)
  38. self.headers2 = headers2dict("""
  39. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0
  40. X-Requested-With: XMLHttpRequest
  41. Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
  42. """)
  43. self.url = "https://filmix.me/"
  44. #self.login()
  45. def login(self,user="",password=""):
  46. return True
  47. def get_content(self, data):
  48. print "[filmix] get_content:", data
  49. source, data, path, plist, clist, params, qs = self.parse_data(data)
  50. content=[]
  51. content.append(("..return", "back","","Return back"))
  52. if clist=="home":
  53. content.extend([
  54. ("Search", "filmix::search/{0}","","Search"),
  55. ("Movies", "filmix::movies","","Movies"),
  56. ("Series", "filmix::series","","TV Series"),
  57. ("Cartoons", "filmix::cartoons","","Cartoons"),
  58. ])
  59. return content
  60. #elif clist=="search":
  61. # TODO
  62. #return content
  63. elif data in ("movies","series","cartoons"):
  64. r = self.call("")
  65. r = r.decode("cp1251").encode("utf8")
  66. if data == "movies":
  67. sname = "Фильмы"
  68. elif data=="series":
  69. sname = "Сериалы"
  70. else:
  71. sname = "Мультфильмы"
  72. # <span class="menu-title">Фильмы</span>
  73. m = re.search('<span class="menu-title">%s</span>(.+?)<li>\s+?<span'%sname, r, re.DOTALL|re.UNICODE)
  74. if not m: return content
  75. r2 = m.group(1)
  76. result = re.findall(r'<a .*?href="https://filmix\.me/([^"]+)".*?>([^<]+)</', r2, re.DOTALL)
  77. for item in result:
  78. if "catalog" in item[0]: continue
  79. title = item[1]
  80. data2 = item[0]
  81. img = self.img
  82. desc = title
  83. content.append((title,self.name+"::"+data2,img,desc))
  84. return content
  85. ## Seriāls
  86. elif clist=="play":
  87. r = self.call(path)
  88. r = r.decode("cp1251").encode("utf8")
  89. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  90. m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
  91. img = m.group(1) if m else self.img
  92. m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
  93. duration = "(%s)"%m.group(1) if m else ""
  94. m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
  95. desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
  96. vid = plist[-1]
  97. js = self.get_movie_info(vid)
  98. translations = js["message"]["translations"]["flash"]
  99. for pl in translations:
  100. if translations[pl].startswith("http"):
  101. continue
  102. pl_link = translations[pl]
  103. lang = pl.encode("utf8")
  104. break
  105. else:
  106. raise Exception("No episodes list found!")
  107. #pl_link = js["message"]["translations"]["flash"].values()[0]
  108. # TODO process several players, currently taking the first
  109. if not pl_link.startswith("http"):
  110. pl_link = self.decode_uppod_text(pl_link)
  111. js = self._http_request(pl_link)
  112. js = self.decode_uppod_text(js)
  113. js = json.loads(js)
  114. if "s" in qs:
  115. s = int(qs["s"])
  116. for i,ep in enumerate(js["playlist"][s-1]["playlist"]):
  117. title = title0+" - "+js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
  118. serie = js["playlist"][s-1]["playlist"][i]["comment"].encode("utf8")
  119. data2 = data+"&e=%s"%(i+1)
  120. desc = serie +"\n"+desc0
  121. content.append((title,self.name+"::"+data2,img,desc))
  122. else:
  123. for i,ep in enumerate(js["playlist"]):
  124. title = title0 +" - "+js["playlist"][i]["comment"].encode("utf8")
  125. serie = js["playlist"][i]["comment"].encode("utf8")
  126. data2 = data+"?s=%s"%(i+1)
  127. desc = serie +"\n"+desc0
  128. content.append((title,self.name+"::"+data2,img,desc))
  129. return content
  130. #r = self._http_request(url)
  131. ### saraksts ###
  132. else:
  133. r = self.call(data)
  134. r = r.decode("cp1251").encode("utf8")
  135. for r2 in re.findall('<article class="shortstory line".+?</article>', r, re.DOTALL):
  136. m = re.search(r'<a href="https://filmix\.me/play/(\d+)" class="watch icon-play">', r2, re.DOTALL)
  137. if not m: continue
  138. vid = m.group(1)
  139. data2 = "play/%s"%vid
  140. #title = re.search('itemprop="name">([^<]+)</div>', r2, re.DOTALL).group(1)
  141. title = re.search('itemprop="name" content="([^"]+)"', r2, re.DOTALL).group(1)
  142. m = re.search('itemprop="alternativeHeadline" content="([^"]+)"', r2, re.DOTALL)
  143. if m:
  144. title = title + "/"+m.group(1)
  145. m = re.search('<img src="([^"]+.jpg)"', r2, re.DOTALL)
  146. img = m.group(1) if m else self.img
  147. m = re.search(r'<a itemprop="copyrightYear".+?>(\d+)<', r2, re.DOTALL)
  148. if m:
  149. year = m.group(1) if m else ""
  150. title = "%s (%s)"%(title,year)
  151. title = util.unescape(title)
  152. genre = re.findall('<a itemprop="genre"[^>]+?">([^<]+)</a>', r2, re.DOTALL)
  153. genre = ",".join(genre)
  154. m = re.search('<p itemprop="description">([^<]+)</p>', r2, re.DOTALL)
  155. desc0 = util.unescape(m.group(1)) if m else ""
  156. m = re.search('<div class="quality">([^<]+)</div>', r2, re.DOTALL)
  157. quality = m.group(1) if m else ""
  158. actors = re.findall('itemprop="actor">([^<]+)<', r2, re.DOTALL)
  159. actors = ",".join(actors)
  160. desc="%s\n%s\n%s\n%s\n%s"%(title,genre,desc0,actors,quality)
  161. content.append((title,self.name+"::"+data2,img,desc))
  162. if '<div class="navigation">' in r:
  163. m = re.search(r'href="https://filmix\.me/([^"]+)" class="next icon-arowRight btn-tooltip"', r, re.DOTALL)
  164. if m:
  165. data2 = m.group(1)
  166. else:
  167. m = re.search("/page/(\d)+",data)
  168. if m:
  169. page = int(m.group(1))+1
  170. data2 = re.sub("/page/(\d)+", "/page/%s"%page, data)
  171. else:
  172. data2 = data + "/page/2"
  173. content.append(("Next page",self.name+"::"+data2,self.img,"Next page"))
  174. return content
  175. def is_video(self,data):
  176. source,data,path,plist,clist,params,qs = self.parse_data(data)
  177. if clist == "play" and "s=" in data and "e=" in data:
  178. return True
  179. elif clist=="play" and not params:
  180. r = self.call(path)
  181. #r = r.decode("cp1251").encode("utf8")
  182. m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
  183. if not m:
  184. raise Exception("Can not find video link")
  185. #return False
  186. video_link = m.group(1)
  187. if video_link=='{video-link}':
  188. return False
  189. else:
  190. return True
  191. else:
  192. return False
  193. def get_streams(self, data):
  194. print "[filmix] get_streams:", data
  195. source,data,path,plist,clist,params,qs = self.parse_data(data)
  196. r = self.call(path)
  197. if not r:
  198. return []
  199. streams = []
  200. r = r.decode("cp1251").encode("utf8")
  201. title = title0 = util.unescape(re.search("titlePlayer = '([^']+)'", r, re.DOTALL).group(1))
  202. m = re.search('<meta itemprop="thumbnailUrl" content="([^"]+)',r,re.DOTALL)
  203. img = m.group(1) if m else self.img
  204. m = re.search('<meta itemprop="duration" content="([^"]+)" />', r, re.DOTALL)
  205. duration = "(%s)"%m.group(1) if m else ""
  206. m = re.search('<p itemprop="description"[^>]+>([^<]+)<', r, re.DOTALL)
  207. desc = desc0 = util.unescape(m.group(1).strip()) if m else ""
  208. m = re.search('itemprop="contentUrl" content="(.+?)"', r, re.IGNORECASE | re.DOTALL)
  209. if not m:
  210. raise Exception("Can not find video link")
  211. #return []
  212. video_link = m.group(1)
  213. series = True if video_link == '{video-link}' else False
  214. vid = plist[1]
  215. js = self.get_movie_info(vid)
  216. translations = js["message"]["translations"]["flash"]
  217. for pl in translations:
  218. if translations[pl].startswith("http"):
  219. continue
  220. pl_link = translations[pl]
  221. lang = pl.encode("utf8")
  222. break
  223. else:
  224. raise Exception("No episodes list found!")
  225. if not pl_link.startswith("http"):
  226. pl_link = self.decode_uppod_text(pl_link)
  227. if not series : # Filma
  228. url0 = pl_link
  229. streams2 = self.get_streams2(url0)
  230. for st in streams2:
  231. stream = util.item()
  232. stream["url"]=st[1]
  233. stream["lang"]=lang
  234. stream["quality"]=st[0]
  235. stream["name"]= title
  236. stream["desc"]=desc
  237. streams.append(stream)
  238. return streams
  239. else: # Seriāls
  240. #pl_link = video_link
  241. js = self._http_request(pl_link)
  242. js = self.decode_uppod_text(js)
  243. js = json.loads(js)
  244. if "s" in qs and "e" in qs:
  245. s = int(qs["s"])
  246. e = int(qs["e"])
  247. serie = js["playlist"][s-1]["playlist"][e-1]["comment"].encode("utf8")
  248. title = title0+" - "+ serie
  249. url0 = js["playlist"][s-1]["playlist"][e-1]["file"].encode("utf8")
  250. streams2 = self.get_streams2(url0)
  251. for st in streams2:
  252. stream = util.item()
  253. stream["url"]=st[1]
  254. stream["lang"]=lang
  255. stream["quality"]=st[0]
  256. stream["name"]= title
  257. stream["desc"]=desc
  258. streams.append(stream)
  259. return streams
  260. def call(self, data,params=None,headers=None,lang=""):
  261. if not headers: headers = self.headers
  262. url = self.url+data
  263. result = self._http_request(url,params,headers=headers)
  264. return result
  265. def get_movie_info(self,vid):
  266. headers = headers2dict("""
  267. User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0
  268. Accept: application/json, text/javascript, */*; q=0.01
  269. Accept-Language: en-US,en;q=0.5
  270. Content-Type: application/x-www-form-urlencoded; charset=UTF-8
  271. X-Requested-With: XMLHttpRequest
  272. Referer: https://filmix.me/play/%s
  273. """%vid )
  274. post_data = {"post_id":vid}
  275. r = util.post("https://filmix.me/api/movies/player_data", data=post_data, headers = headers)
  276. if not r:
  277. raise Exception("Can not get movie info")
  278. #return []
  279. js = json.loads(r)
  280. return js
  281. def decode_uppod_text(self, text):
  282. Client_codec_a = ["l", "u", "T", "D", "Q", "H", "0", "3", "G", "1", "f", "M", "p", "U", "a", "I", "6", "k", "d", "s", "b", "W", "5", "e", "y", "="]
  283. Client_codec_b = ["w", "g", "i", "Z", "c", "R", "z", "v", "x", "n", "N", "2", "8", "J", "X", "t", "9", "V", "7", "4", "B", "m", "Y", "o", "L", "h"]
  284. text = text.replace("\n", "").strip()
  285. for i in range(len(Client_codec_a)):
  286. char1 = Client_codec_b[i]
  287. char2 = Client_codec_a[i]
  288. text = text.replace(char1, "___")
  289. text = text.replace(char2, char1)
  290. text = text.replace("___", char2)
  291. result = base64.b64decode(text)
  292. print result
  293. return result
  294. def get_streams2(self,url0):
  295. m = re.search("\[([\d\w,]+)\]",url0)
  296. if not m:
  297. return [("?",url0)]
  298. res = m.group(1)
  299. streams=[]
  300. for res in res.split(","):
  301. if not res: continue
  302. if res in ["1080p"]: continue #TODO fullhd only in PRO+ version
  303. url=re.sub("\[[\d\w,]+\]",res,url0)
  304. streams.append((res,url))
  305. return streams
  306. if __name__ == "__main__":
  307. c = Source()
  308. #s = "ZnVuY3Rpb24gc2VuZE1lc3NhZ2U2MDc3ODkoZSl7dmFyIGg9bWdfd3M2MDc3ODkub25tZXNzYWdlOyBtZ193czYwNzc4OS5yZWFkeVN0YXRlPT1tZ193czYwNzc4OS5DTE9TRUQmJihtZ193czYwNzc4OT1uZXcgV2ViU29ja2V0KG1nX3dzNjA3Nzg5X2xvY2F0aW9uKSksbWdfd3M2MDc3ODkub25tZXNzYWdlPWgsd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkobWdfd3M2MDc3ODksZnVuY3Rpb24oKXttZ193czYwNzc4OS5zZW5kKGUpfSl9ZnVuY3Rpb24gd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkoZSx0KXtzZXRUaW1lb3V0KGZ1bmN0aW9uKCl7cmV0dXJuIDE9PT1lLnJlYWR5U3RhdGU/dm9pZChudWxsIT10JiZ0KCkpOnZvaWQgd2FpdEZvclNvY2tldENvbm5lY3Rpb242MDc3ODkoZSx0KX0sNSl9OyB2YXIgbWdfd3M2MDc3ODlfbG9jYXRpb24gPSAid3NzOi8vd3NwLm1hcmtldGdpZC5jb20vd3MiOyBtZ193czYwNzc4OSA9IG5ldyBXZWJTb2NrZXQobWdfd3M2MDc3ODlfbG9jYXRpb24pLCBtZ193czYwNzc4OS5vbm1lc3NhZ2UgPSBmdW5jdGlvbiAodCkge3Bvc3RNZXNzYWdlKHQuZGF0YSk7fSwgb25tZXNzYWdlID0gZnVuY3Rpb24oZSl7c2VuZE1lc3NhZ2U2MDc3ODkoZS5kYXRhKX0="
  309. #txt = c.decode_uppod_text(s)
  310. if len(sys.argv)>1:
  311. data= sys.argv[1]
  312. else:
  313. data = "home"
  314. content = c.get_content(data)
  315. for item in content:
  316. print item
  317. #cat = api.get_categories(country)
  318. #chan = api.get_channels("lv")
  319. #prog = api.get_programs(channel=6400)
  320. #prog = api.get_programs(category=55)
  321. #seas = api.get_seasons(program=6453)
  322. #str = api.get_streams(660243)
  323. #res = api.get_videos(802)
  324. #formats = api.getAllFormats()
  325. #det = api.detailed("1516")
  326. #vid = api.getVideos("13170")
  327. pass