lambdascrapers
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

l23movies.py 3.7KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. # -*- coding: UTF-8 -*-
  2. #######################################################################
  3. # ----------------------------------------------------------------------------
  4. # "THE BEER-WARE LICENSE" (Revision 42):
  5. # @tantrumdev wrote this file. As long as you retain this notice you
  6. # can do whatever you want with this stuff. If we meet some day, and you think
  7. # this stuff is worth it, you can buy me a beer in return. - Muad'Dib
  8. # ----------------------------------------------------------------------------
  9. #######################################################################
  10. # -Cleaned and Checked on 10-27-2018 by JewBMX
  11. import re
  12. import urllib
  13. import urlparse
  14. import json
  15. import base64
  16. from resources.lib.modules import client, cleantitle, directstream, dom_parser2
  17. from resources.lib.modules import debrid
  18. from resources.lib.modules import cfscrape
  19. class source:
  20. def __init__(self):
  21. self.priority = 1
  22. self.language = ['en']
  23. self.domains = ['l23movies.com']
  24. self.base_link = 'http://l23movies.com'
  25. self.movies_search_path = ('search-movies/%s.html')
  26. scraper = cfscrape.create_scraper()
  27. def movie(self, imdb, title, localtitle, aliases, year):
  28. try:
  29. scraper = cfscrape.create_scraper()
  30. clean_title = cleantitle.geturl(title).replace('-','+')
  31. url = urlparse.urljoin(self.base_link, (self.movies_search_path % clean_title))
  32. r = scraper.get(url).content
  33. r = dom_parser2.parse_dom(r, 'div', {'id': 'movie-featured'})
  34. r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
  35. r = [(i[0].attrs['href'], re.search('Release:\s*(\d+)', i[0].content)) for i in r if i]
  36. r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
  37. r = [(i[0], i[1]) for i in r if i[1] == year]
  38. if r[0]:
  39. url = r[0][0]
  40. return url
  41. else: return
  42. except Exception:
  43. return
  44. def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
  45. return
  46. def episode(self, url, imdb, tvdb, title, premiered, season, episode):
  47. return
  48. def sources(self, url, hostDict, hostprDict):
  49. try:
  50. sources = []
  51. scraper = cfscrape.create_scraper()
  52. r = scraper.get(url).content
  53. r = dom_parser2.parse_dom(r, 'p', {'class': 'server_play'})
  54. r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
  55. r = [(i[0].attrs['href'], re.search('/(\w+).html', i[0].attrs['href'])) for i in r if i]
  56. r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
  57. for i in r:
  58. try:
  59. host = i[1]
  60. if str(host) in str(hostDict):
  61. host = client.replaceHTMLCodes(host)
  62. host = host.encode('utf-8')
  63. sources.append({
  64. 'source': host,
  65. 'quality': 'SD',
  66. 'language': 'en',
  67. 'url': i[0].replace('\/','/'),
  68. 'direct': False,
  69. 'debridonly': False
  70. })
  71. except: pass
  72. return sources
  73. except Exception:
  74. return
  75. def resolve(self, url):
  76. try:
  77. scraper = cfscrape.create_scraper()
  78. r = scraper.get(url).content
  79. url = re.findall('document.write.+?"([^"]*)', r)[0]
  80. url = base64.b64decode(url)
  81. url = re.findall('src="([^"]*)', url)[0]
  82. return url
  83. except Exception:
  84. return