lambdascrapers
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

allucen.py 6.2KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. # -*- coding: utf-8 -*-
  2. '''
  3. allucen scraper for Exodus forks.
  4. Nov 9 2018 - Checked
  5. Updated and refactored by someone.
  6. Originally created by others.
  7. '''
  8. import re,urllib,urlparse,json
  9. from resources.lib.modules import client
  10. from resources.lib.modules import control
  11. from resources.lib.modules import source_utils
  12. class source:
  13. def __init__(self):
  14. self.priority = 0
  15. self.language = ['en']
  16. self.domains = ['alluc.ee']
  17. self.base_link = 'https://www.alluc.ee'
  18. self.search_link = '/api/search/%s/?apikey=%s&getmeta=0&query=%s&count=%d&from=%d'
  19. self.types = ['stream']
  20. self.streamLimit = control.setting('alluc.limit')
  21. if self.streamLimit == '': self.streamLimit = 100
  22. self.streamLimit = int(self.streamLimit)
  23. self.streamIncrease = 100
  24. self.api = control.setting('alluc.api')
  25. self.debrid = control.setting('alluc.download')
  26. if self.debrid == 'true': self.types = ['stream', 'download']
  27. self.rlsFilter = ['FRENCH', 'LATINO', 'SELF', 'SAMPLE', 'EXTRA']
  28. def movie(self, imdb, title, localtitle, aliases, year):
  29. try:
  30. url = {'imdb': imdb, 'title': title, 'year': year}
  31. url = urllib.urlencode(url)
  32. return url
  33. except:
  34. return
  35. def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
  36. try:
  37. url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
  38. url = urllib.urlencode(url)
  39. return url
  40. except:
  41. return
  42. def episode(self, url, imdb, tvdb, title, premiered, season, episode):
  43. try:
  44. if url == None: return
  45. url = urlparse.parse_qs(url)
  46. url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
  47. url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
  48. url = urllib.urlencode(url)
  49. return url
  50. except:
  51. return
  52. def sources(self, url, hostDict, hostprDict):
  53. sources = []
  54. try:
  55. if url == None:
  56. raise Exception()
  57. if not (self.api and not self.api == ''):
  58. raise Exception()
  59. data = urlparse.parse_qs(url)
  60. data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
  61. hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
  62. title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
  63. year = int(data['year']) if 'year' in data and not data['year'] == None else None
  64. season = int(data['season']) if 'season' in data and not data['season'] == None else None
  65. episode = int(data['episode']) if 'episode' in data and not data['episode'] == None else None
  66. query = '%s S%02dE%02d' % (title, season, episode) if 'tvshowtitle' in data else '%s %d' % (title, year)
  67. query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
  68. query += ' lang:%s' % self.language[0]
  69. query = urllib.quote_plus(query)
  70. url = urlparse.urljoin(self.base_link, self.search_link)
  71. hostDict = hostprDict + hostDict
  72. iterations = self.streamLimit/self.streamIncrease
  73. last = self.streamLimit - (iterations * self.streamIncrease)
  74. if not last:
  75. iterations = iterations - 1
  76. last = self.streamIncrease
  77. iterations = iterations + 1
  78. seen_urls = set()
  79. for type in self.types:
  80. searchFrom = 0
  81. searchCount = self.streamIncrease
  82. for offset in range(iterations):
  83. if iterations == offset + 1: searchCount = last
  84. urlNew = url % (type, self.api, query, searchCount, searchFrom)
  85. searchFrom = searchFrom + self.streamIncrease
  86. results = client.request(urlNew)
  87. results = json.loads(results)
  88. apistatus = results['status']
  89. if apistatus != 'success': break
  90. results = results['result']
  91. added = False
  92. for result in results:
  93. jsonName = result['title']
  94. jsonSize = result['sizeinternal']
  95. jsonExtension = result['extension']
  96. jsonLanguage = result['lang']
  97. jsonHoster = result['hostername'].lower()
  98. jsonLink = result['hosterurls'][0]['url']
  99. if jsonLink in seen_urls: continue
  100. seen_urls.add(jsonLink)
  101. if not hdlr in jsonName.upper(): continue
  102. if not self.releaseValid(title, jsonName): continue # filter non en releases
  103. if not jsonHoster in hostDict: continue
  104. if jsonExtension == 'rar': continue
  105. quality, info = source_utils.get_release_quality(jsonName)
  106. info.append(self.formatSize(jsonSize))
  107. info.append(jsonName)
  108. info = '|'.join(info)
  109. sources.append({'source' : jsonHoster, 'quality': quality, 'language' : jsonLanguage, 'url' : jsonLink, 'info': info, 'direct' : False, 'debridonly' : False})
  110. added = True
  111. if not added:
  112. break
  113. return sources
  114. except:
  115. return sources
  116. def resolve(self, url):
  117. return url
  118. def formatSize(self, size):
  119. if size == 0 or size is None: return ''
  120. size = int(size) / (1024 * 1024)
  121. if size > 2000:
  122. size = size / 1024
  123. unit = 'GB'
  124. else:
  125. unit = 'MB'
  126. size = '[B][%s %s][/B]' % (size, unit)
  127. return size
  128. def releaseValid (self, title, release):
  129. for unw in self.rlsFilter:
  130. if not unw in title.upper() and unw in release.upper():
  131. return False
  132. return True