lambdascrapers
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

300mbfilms.py 6.5KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. # -*- coding: UTF-8 -*-
  2. #######################################################################
  3. # ----------------------------------------------------------------------------
  4. # "THE BEER-WARE LICENSE" (Revision 42):
  5. # @tantrumdev wrote this file. As long as you retain this notice you
  6. # can do whatever you want with this stuff. If we meet some day, and you think
  7. # this stuff is worth it, you can buy me a beer in return. - Muad'Dib
  8. # ----------------------------------------------------------------------------
  9. #######################################################################
  10. # -Cleaned and Checked on 11-13-2018 by JewBMX in Scrubs.
  11. import re,urllib,urlparse
  12. from resources.lib.modules import cleantitle
  13. from resources.lib.modules import client
  14. from resources.lib.modules import debrid
  15. from resources.lib.modules import source_utils
  16. from resources.lib.modules import dom_parser2
  17. class source:
  18. def __init__(self):
  19. self.priority = 1
  20. self.language = ['en']
  21. self.domains = ['300mbfilms.co']
  22. self.base_link = 'https://300mbfilms.co' #https://www.300mbfilms.co
  23. self.search_link = '/search/%s/feed/rss2/'
  24. def movie(self, imdb, title, localtitle, aliases, year):
  25. try:
  26. url = {'imdb': imdb, 'title': title, 'year': year}
  27. url = urllib.urlencode(url)
  28. return url
  29. except:
  30. return
  31. def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
  32. try:
  33. url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
  34. url = urllib.urlencode(url)
  35. return url
  36. except:
  37. return
  38. def episode(self, url, imdb, tvdb, title, premiered, season, episode):
  39. try:
  40. if url is None: return
  41. url = urlparse.parse_qs(url)
  42. url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
  43. url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
  44. url = urllib.urlencode(url)
  45. return url
  46. except:
  47. return
  48. def sources(self, url, hostDict, hostprDict):
  49. try:
  50. sources = []
  51. if url is None: return sources
  52. if debrid.status() is False: raise Exception()
  53. data = urlparse.parse_qs(url)
  54. data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
  55. title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
  56. hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
  57. query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
  58. query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
  59. url = self.search_link % urllib.quote_plus(query)
  60. url = urlparse.urljoin(self.base_link, url)
  61. r = client.request(url)
  62. posts = client.parseDOM(r, 'item')
  63. hostDict = hostprDict + hostDict
  64. items = []
  65. for post in posts:
  66. try:
  67. t = client.parseDOM(post, 'title')[0]
  68. u = client.parseDOM(post, 'link')[0]
  69. s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', t)
  70. s = s[0] if s else '0'
  71. items += [(t, u, s) ]
  72. except:
  73. pass
  74. urls = []
  75. for item in items:
  76. try:
  77. name = item[0]
  78. name = client.replaceHTMLCodes(name)
  79. t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
  80. if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
  81. y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
  82. if not y == hdlr: raise Exception()
  83. quality, info = source_utils.get_release_quality(name, item[1])
  84. if any(x in quality for x in ['CAM', 'SD']): continue
  85. try:
  86. size = re.sub('i', '', item[2])
  87. div = 1 if size.endswith('GB') else 1024
  88. size = float(re.sub('[^0-9|/.|/,]', '', size))/div
  89. size = '%.2f GB' % size
  90. info.append(size)
  91. except:
  92. pass
  93. info = ' | '.join(info)
  94. url = item[1]
  95. links = self.links(url)
  96. urls += [(i, quality, info) for i in links]
  97. except:
  98. pass
  99. for item in urls:
  100. if 'earn-money' in item[0]: continue
  101. if any(x in item[0] for x in ['.rar', '.zip', '.iso']): continue
  102. url = client.replaceHTMLCodes(item[0])
  103. url = url.encode('utf-8')
  104. valid, host = source_utils.is_host_valid(url, hostDict)
  105. if not valid: continue
  106. host = client.replaceHTMLCodes(host)
  107. host = host.encode('utf-8')
  108. sources.append({'source': host, 'quality': item[1], 'language': 'en', 'url': url, 'info': item[2], 'direct': False, 'debridonly': True})
  109. return sources
  110. except:
  111. return sources
  112. def links(self, url):
  113. urls = []
  114. try:
  115. if url is None: return
  116. r = client.request(url)
  117. r = client.parseDOM(r, 'div', attrs={'class': 'entry'})
  118. r = client.parseDOM(r, 'a', ret='href')
  119. r1 = [(i) for i in r if 'money' in i][0]
  120. r = client.request(r1)
  121. r = client.parseDOM(r, 'div', attrs={'id': 'post-\d+'})[0]
  122. if 'enter the password' in r:
  123. plink= client.parseDOM(r, 'form', ret='action')[0]
  124. post = {'post_password': '300mbfilms', 'Submit': 'Submit'}
  125. send_post = client.request(plink, post=post, output='cookie')
  126. link = client.request(r1, cookie=send_post)
  127. else:
  128. link = client.request(r1)
  129. link = re.findall('<strong>Single(.+?)</tr', link, re.DOTALL)[0]
  130. link = client.parseDOM(link, 'a', ret='href')
  131. link = [(i.split('=')[-1]) for i in link]
  132. for i in link:
  133. urls.append(i)
  134. return urls
  135. except:
  136. pass
  137. def resolve(self, url):
  138. return url