lambdascrapers
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

solarmoviez.py 14KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. # -*- coding: UTF-8 -*-
  2. #######################################################################
  3. # ----------------------------------------------------------------------------
  4. # "THE BEER-WARE LICENSE" (Revision 42):
  5. # @tantrumdev wrote this file. As long as you retain this notice you
  6. # can do whatever you want with this stuff. If we meet some day, and you think
  7. # this stuff is worth it, you can buy me a beer in return. - Muad'Dib
  8. # ----------------------------------------------------------------------------
  9. #######################################################################
  10. # -Cleaned and Checked on 10-27-2018 by JewBMX
  11. import re,urllib,urlparse,hashlib,random,string,json,base64,sys,time
  12. from resources.lib.modules import cleantitle
  13. from resources.lib.modules import client
  14. from resources.lib.modules import cache
  15. from resources.lib.modules import directstream
  16. from resources.lib.modules import jsunfuck
  17. from resources.lib.modules import source_utils
  18. from resources.lib.modules import cfscrape
  19. CODE = '''def retA():
  20. class Infix:
  21. def __init__(self, function):
  22. self.function = function
  23. def __ror__(self, other):
  24. return Infix(lambda x, self=self, other=other: self.function(other, x))
  25. def __or__(self, other):
  26. return self.function(other)
  27. def __rlshift__(self, other):
  28. return Infix(lambda x, self=self, other=other: self.function(other, x))
  29. def __rshift__(self, other):
  30. return self.function(other)
  31. def __call__(self, value1, value2):
  32. return self.function(value1, value2)
  33. def my_add(x, y):
  34. try: return x + y
  35. except Exception: return str(x) + str(y)
  36. x = Infix(my_add)
  37. return %s
  38. param = retA()'''
  39. class source:
  40. def __init__(self):
  41. self.priority = 1
  42. self.language = ['en']
  43. self.domains = ['solarmoviez.ru','solarmovie.mrunlock.pw']
  44. self.base_link = 'https://solarmoviez.ru'
  45. self.search_link = '/movie/search/%s.html'
  46. self.info_link = '/ajax/movie_get_info/%s.html'
  47. self.server_link = '/ajax/v4_movie_episodes/%s'
  48. self.embed_link = '/ajax/movie_embed/%s'
  49. self.token_link = '/ajax/movie_token?eid=%s&mid=%s&_=%s'
  50. self.source_link = '/ajax/movie_sources/%s?x=%s&y=%s'
  51. def matchAlias(self, title, aliases):
  52. try:
  53. for alias in aliases:
  54. if cleantitle.get(title) == cleantitle.get(alias['title']):
  55. return True
  56. except:
  57. return False
  58. def movie(self, imdb, title, localtitle, aliases, year):
  59. try:
  60. aliases.append({'country': 'us', 'title': title})
  61. url = {'imdb': imdb, 'title': title, 'year': year, 'aliases': aliases}
  62. url = urllib.urlencode(url)
  63. return url
  64. except:
  65. return
  66. def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
  67. try:
  68. aliases.append({'country': 'us', 'title': tvshowtitle})
  69. url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
  70. url = urllib.urlencode(url)
  71. return url
  72. except:
  73. return
  74. def episode(self, url, imdb, tvdb, title, premiered, season, episode):
  75. try:
  76. if url == None: return
  77. url = urlparse.parse_qs(url)
  78. url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
  79. url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
  80. url = urllib.urlencode(url)
  81. return url
  82. except:
  83. return
  84. def searchShow(self, title, season, aliases, headers):
  85. try:
  86. title = cleantitle.normalize(title)
  87. search = '%s Season %01d' % (title, int(season))
  88. url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search)))
  89. r = self.s.get(url, headers=headers).content
  90. r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
  91. r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
  92. r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
  93. r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
  94. url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
  95. return url
  96. except:
  97. return
  98. def searchMovie(self, title, year, aliases, headers):
  99. try:
  100. title = cleantitle.normalize(title)
  101. url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
  102. r = self.s.get(url, headers=headers).content
  103. r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
  104. r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
  105. r = [(i[0], i[1], re.findall('(\d+)', i[0])[0]) for i in r]
  106. results = []
  107. for i in r:
  108. try:
  109. info = client.request(urlparse.urljoin(self.base_link, self.info_link % i[2]), headers=headers, timeout='15')
  110. y = re.findall('<div\s+class="jt-info">(\d{4})', info)[0]
  111. if self.matchAlias(i[1], aliases) and (year == y):
  112. url = i[0]
  113. break
  114. #results.append([i[0], i[1], re.findall('<div\s+class="jt-info">(\d{4})', info)[0]])
  115. except:
  116. url = None
  117. pass
  118. #try:
  119. # r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
  120. # url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
  121. #except:
  122. # url = None
  123. # pass
  124. if (url == None):
  125. url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
  126. return url
  127. except:
  128. return
  129. def sources(self, url, hostDict, hostprDict):
  130. try:
  131. sources = []
  132. if url is None: return sources
  133. data = urlparse.parse_qs(url)
  134. data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
  135. aliases = eval(data['aliases'])
  136. mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
  137. headers = mozhdr
  138. headers['X-Requested-With'] = 'XMLHttpRequest'
  139. self.s = cfscrape.create_scraper()
  140. if 'tvshowtitle' in data:
  141. episode = int(data['episode'])
  142. url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
  143. else:
  144. episode = 0
  145. url = self.searchMovie(data['title'], data['year'], aliases, headers)
  146. headers['Referer'] = url
  147. ref_url = url
  148. mid = re.findall('-(\d*)\.',url)[0]
  149. data = {'id':mid}
  150. r = self.s.post(url, headers=headers)
  151. try:
  152. u = urlparse.urljoin(self.base_link, self.server_link % mid)
  153. r = self.s.get(u, headers=mozhdr).content
  154. r = json.loads(r)['html']
  155. rl = client.parseDOM(r, 'div', attrs = {'class': 'pas-list'})
  156. rh = client.parseDOM(r, 'div', attrs = {'class': 'pas-header'})
  157. ids = client.parseDOM(rl, 'li', ret='data-id')
  158. servers = client.parseDOM(rl, 'li', ret='data-server')
  159. labels = client.parseDOM(rl, 'a', ret='title')
  160. r = zip(ids, servers, labels)
  161. rrr = zip(client.parseDOM(rh, 'li', ret='data-id'), client.parseDOM(rh, 'li', ret='class'))
  162. types = {}
  163. for rr in rrr:
  164. types[rr[0]] = rr[1]
  165. for eid in r:
  166. try:
  167. try:
  168. ep = re.findall('episode.*?(\d+).*?',eid[2].lower())[0]
  169. except:
  170. ep = 0
  171. if (episode == 0) or (int(ep) == episode):
  172. t = str(int(time.time()*1000))
  173. quali = source_utils.get_release_quality(eid[2])[0]
  174. if 'embed' in types[eid[1]]:
  175. url = urlparse.urljoin(self.base_link, self.embed_link % (eid[0]))
  176. xml = self.s.get(url, headers=headers).content
  177. url = json.loads(xml)['src']
  178. valid, hoster = source_utils.is_host_valid(url, hostDict)
  179. if not valid: continue
  180. q = source_utils.check_sd_url(url)
  181. q = q if q != 'SD' else quali
  182. sources.append({'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False })
  183. continue
  184. else:
  185. url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid, t))
  186. script = self.s.get(url, headers=headers).content
  187. if '$_$' in script:
  188. params = self.uncensored1(script)
  189. elif script.startswith('[]') and script.endswith('()'):
  190. params = self.uncensored2(script)
  191. elif '_x=' in script:
  192. x = re.search('''_x=['"]([^"']+)''', script).group(1)
  193. y = re.search('''_y=['"]([^"']+)''', script).group(1)
  194. params = {'x': x, 'y': y}
  195. else:
  196. raise Exception()
  197. u = urlparse.urljoin(self.base_link, self.source_link % (eid[0], params['x'], params['y']))
  198. length = 0
  199. count = 0
  200. while length == 0 and count < 11:
  201. r = self.s.get(u, headers=headers).text
  202. length = len(r)
  203. if length == 0: count += 1
  204. uri = None
  205. uri = json.loads(r)['playlist'][0]['sources']
  206. try:
  207. uri = [i['file'] for i in uri if 'file' in i]
  208. except:
  209. try:
  210. uri = [uri['file']]
  211. except:
  212. continue
  213. for url in uri:
  214. if 'googleapis' in url:
  215. q = source_utils.check_sd_url(url)
  216. sources.append({'source': 'gvideo', 'quality': q, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
  217. continue
  218. valid, hoster = source_utils.is_host_valid(url, hostDict)
  219. #urls, host, direct = source_utils.check_directstreams(url, hoster)
  220. q = quali
  221. if valid:
  222. #for z in urls:
  223. if hoster == 'gvideo':
  224. direct = True
  225. try:
  226. q = directstream.googletag(url)[0]['quality']
  227. except:
  228. pass
  229. url = directstream.google(url, ref=ref_url)
  230. else: direct = False
  231. sources.append({'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': direct, 'debridonly': False})
  232. else:
  233. sources.append({'source': 'CDN', 'quality': q, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
  234. except:
  235. pass
  236. except:
  237. pass
  238. return sources
  239. except:
  240. return sources
  241. def resolve(self, url):
  242. try:
  243. if not url.startswith('http'):
  244. url = 'http:' + url
  245. for i in range(3):
  246. if 'google' in url and not 'googleapis' in url:
  247. url = directstream.googlepass(url)
  248. if url:
  249. break
  250. return url
  251. except Exception:
  252. return
  253. def uncensored(a, b):
  254. x = '' ; i = 0
  255. for i, y in enumerate(a):
  256. z = b[i % len(b) - 1]
  257. y = int(ord(str(y)[0])) + int(ord(str(z)[0]))
  258. x += chr(y)
  259. x = base64.b64encode(x)
  260. return x
  261. def uncensored1(self, script):
  262. try:
  263. script = '(' + script.split("(_$$)) ('_');")[0].split("/* `$$` */")[-1].strip()
  264. script = script.replace('(__$)[$$$]', '\'"\'')
  265. script = script.replace('(__$)[_$]', '"\\\\"')
  266. script = script.replace('(o^_^o)', '3')
  267. script = script.replace('(c^_^o)', '0')
  268. script = script.replace('(_$$)', '1')
  269. script = script.replace('($$_)', '4')
  270. vGlobals = {"__builtins__": None, '__name__': __name__, 'str': str, 'Exception': Exception}
  271. vLocals = {'param': None}
  272. exec (CODE % script.replace('+', '|x|'), vGlobals, vLocals)
  273. data = vLocals['param'].decode('string_escape')
  274. x = re.search('''_x=['"]([^"']+)''', data).group(1)
  275. y = re.search('''_y=['"]([^"']+)''', data).group(1)
  276. return {'x': x, 'y': y}
  277. except:
  278. pass
  279. def uncensored2(self, script):
  280. try:
  281. js = jsunfuck.JSUnfuck(script).decode()
  282. x = re.search('''_x=['"]([^"']+)''', js).group(1)
  283. y = re.search('''_y=['"]([^"']+)''', js).group(1)
  284. return {'x': x, 'y': y}
  285. except:
  286. pass