lambdascrapers
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

moviesonline.py 5.9KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. # -*- coding: UTF-8 -*-
  2. #######################################################################
  3. # ----------------------------------------------------------------------------
  4. # "THE BEER-WARE LICENSE" (Revision 42):
  5. # @Daddy_Blamo wrote this file. As long as you retain this notice you
  6. # can do whatever you want with this stuff. If we meet some day, and you think
  7. # this stuff is worth it, you can buy me a beer in return. - Muad'Dib
  8. # ----------------------------------------------------------------------------
  9. #######################################################################
  10. # Addon Name: Placenta
  11. # Addon id: plugin.video.placenta
  12. # Addon Provider: Mr.Blamo
  13. # Scraper Checked and Fixed 11-08-2018 -JewBMX
  14. import re, urlparse, urllib, base64
  15. from resources.lib.modules import cleantitle
  16. from resources.lib.modules import client
  17. from resources.lib.modules import cache
  18. from resources.lib.modules import dom_parser2
  19. from resources.lib.modules import debrid
  20. class source:
  21. def __init__(self):
  22. self.priority = 1
  23. self.language = ['en']
  24. self.domains = ['moviesonline.gy','moviesonline.tl']
  25. self.base_link = 'http://moviesonline.gy'
  26. self.search_link = '/search-movies/%s.html'
  27. # moviesonline.mx is now ddos protected
  28. def movie(self, imdb, title, localtitle, aliases, year):
  29. try:
  30. clean_title = cleantitle.geturl(title)
  31. search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
  32. r = cache.get(client.request, 1, search_url)
  33. r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
  34. r = [(client.parseDOM(i, 'a', ret='href'),
  35. re.findall('.+?elease:\s*(\d{4})</', i),
  36. re.findall('<b><i>(.+?)</i>', i)) for i in r]
  37. r = [(i[0][0], i[1][0], i[2][0]) for i in r if
  38. (cleantitle.get(i[2][0]) == cleantitle.get(title) and i[1][0] == year)]
  39. url = r[0][0]
  40. return url
  41. except Exception:
  42. return
  43. def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
  44. try:
  45. url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
  46. url = urllib.urlencode(url)
  47. return url
  48. except:
  49. return
  50. def episode(self, url, imdb, tvdb, title, premiered, season, episode):
  51. try:
  52. if url == None: return
  53. url = urlparse.parse_qs(url)
  54. url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
  55. url['premiered'], url['season'], url['episode'] = premiered, season, episode
  56. try:
  57. clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
  58. search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
  59. r = cache.get(client.request, 1, search_url)
  60. r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
  61. r = [(client.parseDOM(i, 'a', ret='href'),
  62. re.findall('<b><i>(.+?)</i>', i)) for i in r]
  63. r = [(i[0][0], i[1][0]) for i in r if
  64. cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
  65. url = r[0][0]
  66. except:
  67. pass
  68. data = client.request(url)
  69. data = client.parseDOM(data, 'div', attrs={'id': 'details'})
  70. data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
  71. url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]
  72. return url[0][1]
  73. except:
  74. return
  75. def sources(self, url, hostDict, hostprDict):
  76. try:
  77. sources = []
  78. r = cache.get(client.request, 1, url)
  79. try:
  80. v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0]
  81. b64 = base64.b64decode(v)
  82. url = client.parseDOM(b64, 'iframe', ret='src')[0]
  83. try:
  84. host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
  85. host = client.replaceHTMLCodes(host)
  86. host = host.encode('utf-8')
  87. sources.append({
  88. 'source': host,
  89. 'quality': 'SD',
  90. 'language': 'en',
  91. 'url': url.replace('\/', '/'),
  92. 'direct': False,
  93. 'debridonly': True
  94. })
  95. except:
  96. pass
  97. except:
  98. pass
  99. r = client.parseDOM(r, 'div', {'class': 'server_line'})
  100. r = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r]
  101. if r:
  102. for i in r:
  103. try:
  104. host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
  105. url = i[0]
  106. host = client.replaceHTMLCodes(host)
  107. host = host.encode('utf-8')
  108. if 'other'in host: continue
  109. sources.append({
  110. 'source': host,
  111. 'quality': 'SD',
  112. 'language': 'en',
  113. 'url': url.replace('\/', '/'),
  114. 'direct': False,
  115. 'debridonly': True
  116. })
  117. except:
  118. pass
  119. return sources
  120. except Exception:
  121. return
  122. def resolve(self, url):
  123. if self.base_link in url:
  124. url = client.request(url)
  125. v = re.findall('document.write\(Base64.decode\("(.+?)"\)', url)[0]
  126. b64 = base64.b64decode(v)
  127. url = client.parseDOM(b64, 'iframe', ret='src')[0]
  128. return url