Browse Source

Rebased

master
I-A-C 9 months ago
parent
commit
74de3e5fa7
100 changed files with 2441 additions and 6584 deletions
  1. 0
    65
      CleanupService.py
  2. 0
    8
      License.txt
  3. 22
    17
      addon.xml
  4. 2
    25
      changelog.txt
  5. 21
    22
      lib/default.py
  6. 1
    1
      lib/lambdascrapers/modules/cache.py
  7. 9
    28
      lib/lambdascrapers/modules/cfscrape.py
  8. 0
    171
      lib/lambdascrapers/modules/cfscrape/__init__.py
  9. 0
    171
      lib/lambdascrapers/modules/cfscrape/cfdecoder.py
  10. 1
    1
      lib/lambdascrapers/modules/cleandate.py
  11. 1
    1
      lib/lambdascrapers/modules/cleantitle.py
  12. 1
    1
      lib/lambdascrapers/modules/client.py
  13. 1
    1
      lib/lambdascrapers/modules/control.py
  14. 11
    4
      lib/lambdascrapers/modules/debrid.py
  15. 1
    1
      lib/lambdascrapers/modules/directstream.py
  16. 1
    0
      lib/lambdascrapers/modules/dom_parser.py
  17. 1
    1
      lib/lambdascrapers/modules/jsunfuck.py
  18. 1
    13
      lib/lambdascrapers/modules/jsunpack.py
  19. 1
    1
      lib/lambdascrapers/modules/log_utils.py
  20. 1
    1
      lib/lambdascrapers/modules/proxy.py
  21. 1
    1
      lib/lambdascrapers/modules/source_utils.py
  22. 2
    2
      lib/lambdascrapers/modules/trakt.py
  23. 1
    1
      lib/lambdascrapers/modules/utils.py
  24. 1
    1
      lib/lambdascrapers/modules/workers.py
  25. 69
    0
      lib/lambdascrapers/sources_ lambdascrapers/en/0123putlocker.py
  26. 0
    63
      lib/lambdascrapers/sources_ lambdascrapers/en/1080P.py
  27. 32
    40
      lib/lambdascrapers/sources_ lambdascrapers/en/123fox.py
  28. 15
    25
      lib/lambdascrapers/sources_ lambdascrapers/en/123hbo.py
  29. 87
    38
      lib/lambdascrapers/sources_ lambdascrapers/en/123hulu.py
  30. 126
    0
      lib/lambdascrapers/sources_ lambdascrapers/en/123movieshubz.py
  31. 29
    28
      lib/lambdascrapers/sources_ lambdascrapers/en/300mbdownload.py
  32. 48
    47
      lib/lambdascrapers/sources_ lambdascrapers/en/Hdmto.py
  33. 0
    74
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/1080P.py
  34. 0
    196
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/123movies.py
  35. 0
    139
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/123moviesgold.py
  36. 0
    139
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/123netflix.py
  37. 0
    146
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/1movie.py
  38. 0
    83
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/afdah.py
  39. 0
    142
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/allrls.py
  40. 0
    164
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/allucen.py
  41. 0
    156
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/animeultima.py
  42. 0
    100
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/bmoviez.py
  43. 0
    91
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/bob.py
  44. 0
    171
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/cmovies.py
  45. 0
    97
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/cooltv.py
  46. 0
    91
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/dltube.py
  47. 0
    91
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/einthusan.py
  48. 0
    97
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/filmxy.py
  49. 0
    104
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/flenix.py
  50. 0
    113
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/flixanity.py
  51. 0
    88
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/genvideos.py
  52. 0
    114
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/gogoanime.py
  53. 0
    247
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/icefilms.py
  54. 0
    97
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/icouchtuner.py
  55. 0
    103
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/m4ufree.py
  56. 0
    103
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/megahd.py
  57. 0
    116
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/movie4uch.py
  58. 0
    77
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/myputlock.py
  59. 0
    111
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/projectfree.py
  60. 0
    170
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/pron.py
  61. 0
    139
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/rajahoki88.py
  62. 0
    150
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/sezonlukdizi.py
  63. 0
    187
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/showbox.py
  64. 0
    103
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/vodly.py
  65. 0
    80
      lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/watch32.py
  66. 75
    0
      lib/lambdascrapers/sources_ lambdascrapers/en/animetoon.py
  67. 65
    0
      lib/lambdascrapers/sources_ lambdascrapers/en/azmovie.py
  68. 0
    55
      lib/lambdascrapers/sources_ lambdascrapers/en/bobmovies.py
  69. 2
    2
      lib/lambdascrapers/sources_ lambdascrapers/en/cartoonhd.py
  70. 85
    0
      lib/lambdascrapers/sources_ lambdascrapers/en/cmovieshd.py
  71. 83
    0
      lib/lambdascrapers/sources_ lambdascrapers/en/cmovieshdbz.py
  72. 50
    53
      lib/lambdascrapers/sources_ lambdascrapers/en/coolmoviezone.py
  73. 0
    175
      lib/lambdascrapers/sources_ lambdascrapers/en/darewatch.py
  74. 0
    101
      lib/lambdascrapers/sources_ lambdascrapers/en/divxcrawler.py
  75. 65
    0
      lib/lambdascrapers/sources_ lambdascrapers/en/downflix.py
  76. 132
    0
      lib/lambdascrapers/sources_ lambdascrapers/en/extramovies.py
  77. 113
    0
      lib/lambdascrapers/sources_ lambdascrapers/en/filmxy.py
  78. 30
    18
      lib/lambdascrapers/sources_ lambdascrapers/en/fmovies.py
  79. 327
    58
      lib/lambdascrapers/sources_ lambdascrapers/en/furk.py
  80. 32
    14
      lib/lambdascrapers/sources_ lambdascrapers/en/gostream.py
  81. 10
    7
      lib/lambdascrapers/sources_ lambdascrapers/en/hdpopcorns.py
  82. 95
    0
      lib/lambdascrapers/sources_ lambdascrapers/en/iwaatch.py
  83. 0
    68
      lib/lambdascrapers/sources_ lambdascrapers/en/iwatchflixxyz.py
  84. 32
    37
      lib/lambdascrapers/sources_ lambdascrapers/en/kattv.py
  85. 4
    7
      lib/lambdascrapers/sources_ lambdascrapers/en/l23movies.py
  86. 1
    1
      lib/lambdascrapers/sources_ lambdascrapers/en/moviesonline.py
  87. 0
    188
      lib/lambdascrapers/sources_ lambdascrapers/en/moviesplanet.py
  88. 73
    0
      lib/lambdascrapers/sources_ lambdascrapers/en/movietoken.py
  89. 0
    141
      lib/lambdascrapers/sources_ lambdascrapers/en/mywatchseries.py
  90. 85
    66
      lib/lambdascrapers/sources_ lambdascrapers/en/odb.py
  91. 248
    248
      lib/lambdascrapers/sources_ lambdascrapers/en/primewire.py
  92. 72
    72
      lib/lambdascrapers/sources_ lambdascrapers/en/seehd.py
  93. 173
    173
      lib/lambdascrapers/sources_ lambdascrapers/en/series9.py
  94. 1
    1
      lib/lambdascrapers/sources_ lambdascrapers/en/seriesfree.py
  95. 38
    39
      lib/lambdascrapers/sources_ lambdascrapers/en/seriesonline.py
  96. 20
    15
      lib/lambdascrapers/sources_ lambdascrapers/en/sezonlukdizi.py
  97. 1
    1
      lib/lambdascrapers/sources_ lambdascrapers/en/solarmoviez.py
  98. 0
    159
      lib/lambdascrapers/sources_ lambdascrapers/en/streamlord.py
  99. 42
    27
      lib/lambdascrapers/sources_ lambdascrapers/en/tvbox.py
  100. 0
    0
      lib/lambdascrapers/sources_ lambdascrapers/en/vdonip.py

+ 0
- 65
CleanupService.py View File

@@ -1,65 +0,0 @@
# -*- coding: utf-8 -*-
# LambdaScrapers Cleanup Service

import os
import re

import xbmc
import xbmcvfs
import xbmcaddon

from lib.lambdascrapers import getAllHosters
from lib.lambdascrapers import providerSources

'''
Temporary service to TRY to make some file changes, and then prevent itself from running again.
'''

ADDON = xbmcaddon.Addon()

# 1) Do the actual housekeeping changes.
try:
profileFolderPath = xbmc.translatePath(ADDON.getAddonInfo('profile')).decode('utf-8')
settingsPath = os.path.join(profileFolderPath, 'settings.xml')

# We rewrite the user settings file while ignoring all obsolete providers.
if xbmcvfs.exists(settingsPath):
with open(settingsPath, 'r+') as settingsFile:
# Parse an XML tree from the settings file.
from xml.etree import ElementTree
tree = ElementTree.fromstring(settingsFile.read())
currentProviders = set(getAllHosters())
if len(currentProviders) > 0:
# Traverse the tree backwards so we can safely remove elements on the go.
for element in reversed(tree):
id = element.get('id')
if id and id.startswith('provider.') and id.split('.', 1)[1] not in currentProviders:
tree.remove(element)
# Dump the cleaned up XML tree back to the file.
settingsFile.seek(0)
settingsFile.write(ElementTree.tostring(tree))
settingsFile.truncate()
# Reset obsolete module providers to Lambdascrapers.
if ADDON.getSetting('module.provider') not in providerSources():
ADDON.setSetting('module.provider', ' Lambdascrapers')
except:
pass


# 2) Disable the service in the 'addon.xml' file.
try:
addonFolderPath = xbmc.translatePath(ADDON.getAddonInfo('path')).decode('utf-8')
addonXMLPath = os.path.join(addonFolderPath, 'addon.xml')

# Disabling is done by commenting out the XML line with the service extension so it doesn't run anymore.
with open(addonXMLPath, 'r+') as addonXMLFile:
xmlText = addonXMLFile.read()
serviceFilename = 'CleanupService\.py'
pattern = r'(<\s*?extension.*?' + serviceFilename + '.*?>)'
updatedXML = re.sub(pattern, r'<!--\1-->', xmlText, count=1, flags=re.IGNORECASE)
addonXMLFile.seek(0)
addonXMLFile.write(updatedXML)
addonXMLFile.truncate()
except:
pass

+ 0
- 8
License.txt View File

@@ -1,8 +0,0 @@
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# As long as you retain this notice you can do whatever you want with this
# stuff. If we meet some day, and you think this stuff is worth it, you can
# buy me a beer in return.
# ----------------------------------------------------------------------------
#######################################################################

+ 22
- 17
addon.xml View File

@@ -1,19 +1,24 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="script.module.lambdascrapers" name="Lambdascrapers Module" version="1.8.0" provider-name="I-A-C">
<requires>
<import addon="xbmc.python" version="2.1.0" />
<import addon="script.module.beautifulsoup4"/>
</requires>
<extension point="xbmc.python.module" library="lib" />
<extension point="xbmc.python.pluginsource" library="lib/default.py">
<provides>executable</provides>
</extension>
<extension point="xbmc.service" library="CleanupService.py" start="startup" />
<extension point="xbmc.addon.metadata">
<platform>all</platform>
<summary lang="en">Lambdascrapers Module</summary>
<description>Scrape common video host for URL's to be playable in XBMC/Kodi, simplifying addon development of video plugins requiring multi video hosts.</description>
<disclaimer lang="en">The author is not responsible for the use of this addon. The author is not responsible for the content found using this addon. The author does not host or own any content found within this addon.[CR]The author is in no way affiliated with Kodi, Team Kodi, or the XBMC Foundation.[CR]This is a Non-profit resource, organized solely for educational purposes which is protected under the Fair-Use doctrine of the Copyright Act, Specifically section 107, which does promote freedom of expression, by permitting the unlicensed use of copyright-protected works.</disclaimer>
<license>Beerware rev. 42</license>
</extension>
<addon id="script.module.lambdascrapers" name="Lambdascrapers Module" version="2.0.0" provider-name="I-A-C">
<requires>
<import addon="xbmc.python" version="2.1.0" />
<import addon="script.module.beautifulsoup4"/>
</requires>
<extension point="xbmc.python.module" library="lib" />
<extension point="xbmc.python.pluginsource" library="lib/default.py">
<provides>executable</provides>
</extension>
<extension point="xbmc.addon.metadata">
<platform>all</platform>
<summary lang="en">Lambdascrapers Module</summary>
<description>Scrape common video host for URL's to be playable in XBMC/Kodi, simplifying addon development of video plugins requiring multi video hosts.</description>
<disclaimer lang="en">The author is not responsible for the use of this addon. The author is not responsible for the content found using this addon. The author does not host or own any content found within this addon.[CR]The author is in no way affiliated with Kodi, Team Kodi, or the XBMC Foundation.[CR]This is a Non-profit resource, organized solely for educational purposes which is protected under the Fair-Use doctrine of the Copyright Act, Specifically section 107, which does promote freedom of expression, by permitting the unlicensed use of copyright-protected works.</disclaimer>
<platform>all</platform>
<license>GNU GENERAL PUBLIC LICENSE. Version 3, 29 June 2007</license>
<assets>
<icon>icon.png</icon>
<fanart></fanart>
</assets>
<news></news>
</extension>
</addon>

+ 2
- 25
changelog.txt View File

@@ -1,27 +1,4 @@
Lambdascrapers Module Changelog:
1.8.0
- Scraper updates
1.7.0
- Remove nonperforming scraper from defaults
- Lambdascrapers module as default provider
- Scraper fixes
1.6.0
- Module improvements
- Added German and Polish providers
- Updated Scrapers, Removed Duplicates
1.5.0
-Updated provider settings.
1.4.0
- Added settings. Providers can be be enabled/disabled.
1.3.0
- Updated Scrapers, Removed Duplicates
1.2.0
- Added some community submitted scrapers
- General Scraper Housekeeping
- Removed/Quarantined scrapers with duplicate self.baselinks
- Removed/Quarantined scrapers that returned 404s or 503s
- Added Modules directory
1.1.0
- Merged Scrapers
1.0.0
2.0.0
- CivitasScrapers-0.0.5.8 base
- Initial commit

+ 21
- 22
lib/default.py View File

@@ -1,3 +1,5 @@
# -*- coding: utf-8 -*-

import urlparse
from lambdascrapers.modules import control
from lambdascrapers import providerSources, providerNames
@@ -49,17 +51,12 @@ if mode == "toggleAll":
toggleAll(params['setting'], open_id, sourceList=sourcelist)

if mode == "toggleAllDebrid":
sourcelist = ['2ddl','300mbfilms','bestmoviez','ddls','ddlvalley','directdl','gomovies','hevcfilm',
'myvideolink','phazeddl','power','releasebb','RLSB','rlsbb','rlsmovies','rlsscn',
'scenerls','sceper','seriescr','tvbmoviez','tvrelease','ultrahd','ultrahdindir','wrzcraft']
sourcelist = ['300mbfilms','bestmoviez','ddlvalley','ddlspot','directdl','invictus','myvideolink',
'playmovies','scenerls','ultrahdindir','wrzcraft','iwantmyshow','moviesleak']
toggleAll(params['setting'], params['open_id'], sourcelist)

if mode == "toggleAllGerman":
sourcelist = ['allucde','animebase','animeloads','bs','cine','cinenator','ddl',
'filmpalast','foxx','hdfilme','hdstreams','horrorkino','iload','kinodogs','kinoking',
'kinow','kinox','lichtspielhaus','movie2k-ac','movie2k-ag','movie2z','movie4k','moviesever',
'movietown','netzkino','proxer','pureanime','serienstream','seriesever','stream-to',
'streamdream','streamflix','streamit','tata','video4k','view4u']
sourcelist = ['gamatotv','liomenoi','tainiesonline','tainiomania','xrysoi']
toggleAll(params['setting'], params['open_id'], sourcelist)

if mode == "toggleAllPolish":
@@ -67,23 +64,25 @@ if mode == "toggleAllPolish":
'filmwebbooster','iitv','movieneo','openkatalog','paczamy','segos','szukajkatv','trt']
toggleAll(params['setting'], params['open_id'], sourcelist)

if mode == "toggleAllSpanish":
sourcelist = ['megapelistv','peliculasdk','pelisplustv','pepecine','seriespapaya']
toggleAll(params['setting'], params['open_id'], sourcelist)

if mode == "toggleAllForeign":
sourcelist = ['allucde','animebase','animeloads','bs','cine','cinenator','ddl',
'filmpalast','foxx','hdfilme','hdstreams','horrorkino','iload','kinodogs','kinoking',
'kinow','kinox','lichtspielhaus','movie2k-ac','movie2k-ag','movie2z','movie4k','moviesever',
'movietown','netzkino','proxer','pureanime','serienstream','seriesever','stream-to',
'streamdream','streamflix','streamit','tata','video4k','view4u',
sourcelist = ['gamatotv','liomenoi','tainiesonline','tainiomania','xrysoi',
'alltube','boxfilm','cdahd','cdax','ekinomaniak','ekinotv','filiser',
'filmwebbooster','iitv','movieneo','openkatalog','paczamy','segos','szukajkatv','trt']
'filmwebbooster','iitv','movieneo','openkatalog','paczamy','segos',
'szukajkatv','trt','megapelistv','peliculasdk','pelisplustv','pepecine','seriespapaya']
toggleAll(params['setting'], params['open_id'], sourcelist)

if mode == "toggleAllTorrent":
sourcelist = ['bitlord','torrentapi','yify','piratebay','eztv','zoogle','glodls','limetorrents','torrentdownloads']
toggleAll(params['setting'], params['open_id'], sourcelist)

if mode == "Defaults":
sourcelist = ['4kmovieto','1080P','bobmovies','bnwmovies',
'cartoonhd','coolmoviezone','darewatch','divxcrawler',
'fmovies','freefmovies','freeputlockers','furk','gostream',
'gowatchseries','Hdmto','hdpopcorns','kattv','library',
'moviesplanet','myprojectfreetv','odb','openloadmovie','ororo',
'plocker','primewire','putlocker','reddit','seehd','series9','seriesfree',
'seriesonline','streamlord','tvbox','videoscraper','vidics',
'watchonline','watchseries','xmovies','xwatchseries','ymovies']
sourcelist = ['123fox','123hbo','123movieshubz','animetoon','azmovies','bnwmovies','cartoonhd',
'extramovies','fmovies','freefmovies','freeputlockers','gostream','Hdmto','hdpopcorns',
'kattv','l23movies','iwaatch','openloadmovie','primewire','putlocker','reddit','rlsbb','scenerls',
'seehd','series9','seriesfree','seriesonline','solarmoviez','tvbox','vidics','watchseries',
'xwatchseries','vdonip','odb','downflix','ymovies','ddlspot','filmxy','kickass2','sezonlukdizi']
toggleAll(params['setting'], params['open_id'], sourcelist)

+ 1
- 1
lib/lambdascrapers/modules/cache.py View File

@@ -1,4 +1,4 @@
# -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-
"""
LambdaScrapers Module


+ 9
- 28
lib/lambdascrapers/modules/cfscrape.py View File

@@ -4,8 +4,8 @@ import re
import subprocess
from copy import deepcopy
from time import sleep
import requests #from Magicality
from resources.lib.modules import cfdecoder #from Magicality
import requests
from lambdascrapers.modules import cfdecoder

from requests.sessions import Session

@@ -72,43 +72,24 @@ class CloudflareScraper(Session):
return resp

def solve_cf_challenge(self, resp, **original_kwargs):
sleep(self.delay) # Cloudflare requires a delay before solving the challenge

sleep(5) # Cloudflare requires a delay before solving the challenge
body = resp.text
parsed_url = urlparse(resp.url)
domain = parsed_url.netloc
domain = urlparse(resp.url).netloc
submit_url = "%s://%s/cdn-cgi/l/chk_jschl" % (parsed_url.scheme, domain)

cloudflare_kwargs = deepcopy(original_kwargs)
params = cloudflare_kwargs.setdefault("params", {})
headers = cloudflare_kwargs.setdefault("headers", {})
headers["Referer"] = resp.url

try:
params["jschl_vc"] = re.search(r'name="jschl_vc" value="(\w+)"', body).group(1)
params["pass"] = re.search(r'name="pass" value="(.+?)"', body).group(1)

except Exception as e:
# Something is wrong with the page.
# This may indicate Cloudflare has changed their anti-bot
# technique. If you see this and are running the latest version,
# please open a GitHub issue so I can update the code accordingly.
raise ValueError("Unable to parse Cloudflare anti-bots page: %s %s" % (e.message, BUG_REPORT))

# Solve the Javascript challenge
params["jschl_answer"] = str(self.solve_challenge(body) + len(domain))

# Requests transforms any request into a GET after a redirect,
# so the redirect has to be handled manually here to allow for
# performing other types of requests even as the first request.
request = {}
request['data'] = body
request['url'] = resp.url
request['headers'] = resp.headers
submit_url = cfdecoder.Cloudflare(request).get_url()
method = resp.request.method
cloudflare_kwargs["allow_redirects"] = False
redirect = self.request(method, submit_url, **cloudflare_kwargs)

redirect_location = urlparse(redirect.headers["Location"])
if not redirect_location.netloc:
redirect_url = "%s://%s%s" % (parsed_url.scheme, domain, redirect_location.path)
return self.request(method, redirect_url, **original_kwargs)
return self.request(method, redirect.headers["Location"], **original_kwargs)

def solve_challenge(self, body):

+ 0
- 171
lib/lambdascrapers/modules/cfscrape/__init__.py View File

@@ -1,171 +0,0 @@
from time import sleep
import logging
import random
import re
import os
from requests.sessions import Session
from resources.lib.modules.cfscrape import cfdecoder #change to just cfdecoder if doesn't work
from copy import deepcopy
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse

DEFAULT_USER_AGENTS = [
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:41.0) Gecko/20100101 Firefox/41.0"
]

DEFAULT_USER_AGENT = random.choice(DEFAULT_USER_AGENTS)

BUG_REPORT = """\
Cloudflare may have changed their technique, or there may be a bug in the script.
Please read https://github.com/Anorov/cloudflare-scrape#updates, then file a \
bug report at https://github.com/Anorov/cloudflare-scrape/issues."\
"""

ANSWER_ACCEPT_ERROR = """\
The challenge answer was not properly accepted by Cloudflare. This can occur if \
the target website is under heavy load, or if Cloudflare is experiencing issues. You can
potentially resolve this by increasing the challenge answer delay (default: 5 seconds). \
For example: cfscrape.create_scraper(delay=10)
If increasing the delay does not help, please open a GitHub issue at \
https://github.com/Anorov/cloudflare-scrape/issues\
"""

class CloudflareScraper(Session):
def __init__(self, *args, **kwargs):
self.delay = kwargs.pop("delay", 0)
super(CloudflareScraper, self).__init__(*args, **kwargs)

if "requests" in self.headers["User-Agent"]:
# Set a random User-Agent if no custom User-Agent has been set
self.headers["User-Agent"] = DEFAULT_USER_AGENT

def is_cloudflare_challenge(self, resp):
return (
resp.status_code == 503
and resp.headers.get("Server", "").startswith("cloudflare")
and b"jschl_vc" in resp.content
and b"jschl_answer" in resp.content
)

def request(self, method, url, *args, **kwargs):
resp = super(CloudflareScraper, self).request(method, url, *args, **kwargs)

# Check if Cloudflare anti-bot is on
if self.is_cloudflare_challenge(resp):
resp = self.solve_cf_challenge(resp, **kwargs)
if self.is_cloudflare_challenge(resp):
raise ValueError(ANSWER_ACCEPT_ERROR)

return resp

def solve_cf_challenge(self, resp, **original_kwargs):
sleep(self.delay) # Cloudflare requires a delay before solving the challenge

body = resp.text
parsed_url = urlparse(resp.url)
domain = parsed_url.netloc
submit_url = "%s://%s/cdn-cgi/l/chk_jschl" % (parsed_url.scheme, domain)

cloudflare_kwargs = deepcopy(original_kwargs)
params = cloudflare_kwargs.setdefault("params", {})
headers = cloudflare_kwargs.setdefault("headers", {})
headers["Referer"] = resp.url

try:
params["jschl_vc"] = re.search(r'name="jschl_vc" value="(\w+)"', body).group(1)
params["pass"] = re.search(r'name="pass" value="(.+?)"', body).group(1)

except Exception as e:
# Something is wrong with the page.
# This may indicate Cloudflare has changed their anti-bot
# technique. If you see this and are running the latest version,
# please open a GitHub issue so I can update the code accordingly.
raise ValueError("Unable to parse Cloudflare anti-bots page: %s %s" % (e.message, BUG_REPORT))

# Solve the Javascript challenge

response = {'data': resp.text, 'url': resp.url, 'headers': resp.headers}
r = cfdecoder.Cloudflare(response)
r = r.get_url()

# Requests transforms any request into a GET after a redirect,
# so the redirect has to be handled manually here to allow for
# performing other types of requests even as the first request.
method = resp.request.method
cloudflare_kwargs["allow_redirects"] = False
redirect = self.request(method, r, **cloudflare_kwargs)

redirect_location = urlparse(redirect.headers["Location"])
if not redirect_location.netloc:
redirect_url = "%s://%s%s" % (parsed_url.scheme, domain, redirect_location.path)
return self.request(method, redirect_url, **original_kwargs)
return self.request(method, redirect.headers["Location"], **original_kwargs)

@classmethod
def create_scraper(cls, sess=None, **kwargs):
"""
Convenience function for creating a ready-to-go CloudflareScraper object.
"""
scraper = cls(**kwargs)

if sess:
attrs = ["auth", "cert", "cookies", "headers", "hooks", "params", "proxies", "data"]
for attr in attrs:
val = getattr(sess, attr, None)
if val:
setattr(scraper, attr, val)

return scraper


## Functions for integrating cloudflare-scrape with other applications and scripts

@classmethod
def get_tokens(cls, url, user_agent=None, **kwargs):
scraper = cls.create_scraper()
if user_agent:
scraper.headers["User-Agent"] = user_agent

try:
resp = scraper.get(url, **kwargs)
resp.raise_for_status()
except Exception as e:
logging.error("'%s' returned an error. Could not collect tokens." % url)
raise

domain = urlparse(resp.url).netloc
cookie_domain = None

for d in scraper.cookies.list_domains():
if d.startswith(".") and d in ("." + domain):
cookie_domain = d
break
else:
raise ValueError("Unable to find Cloudflare cookies. Does the site actually have Cloudflare IUAM (\"I'm Under Attack Mode\") enabled?")

return ({
"__cfduid": scraper.cookies.get("__cfduid", "", domain=cookie_domain),
"cf_clearance": scraper.cookies.get("cf_clearance", "", domain=cookie_domain)
},
scraper.headers["User-Agent"]
)

@classmethod
def get_cookie_string(cls, url, user_agent=None, **kwargs):
"""
Convenience function for building a Cookie HTTP header value.
"""
tokens, user_agent = cls.get_tokens(url, user_agent=user_agent, **kwargs)
return "; ".join("=".join(pair) for pair in tokens.items()), user_agent

create_scraper = CloudflareScraper.create_scraper
get_tokens = CloudflareScraper.get_tokens
get_cookie_string = CloudflareScraper.get_cookie_string

scraper = create_scraper()

+ 0
- 171
lib/lambdascrapers/modules/cfscrape/cfdecoder.py View File

@@ -1,171 +0,0 @@
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------------
# Cloudflare decoder
# --------------------------------------------------------------------------------

import re
import time
import urllib
import urlparse

from decimal import Decimal, ROUND_UP


class Cloudflare:
def __init__(self, response):
self.timeout = 5
self.domain = urlparse.urlparse(response["url"])[1]
self.protocol = urlparse.urlparse(response["url"])[0]
self.js_data = {}
self.header_data = {}

if not "var s,t,o,p,b,r,e,a,k,i,n,g,f" in response["data"] or "chk_jschl" in response["url"]:
return

try:
self.js_data["auth_url"] = \
re.compile('<form id="challenge-form" action="([^"]+)" method="get">').findall(response["data"])[0]
self.js_data["params"] = {}
self.js_data["params"]["jschl_vc"] = \
re.compile('<input type="hidden" name="jschl_vc" value="([^"]+)"/>').findall(response["data"])[0]
self.js_data["params"]["pass"] = \
re.compile('<input type="hidden" name="pass" value="([^"]+)"/>').findall(response["data"])[0]
var, self.js_data["value"] = \
re.compile('var s,t,o,p,b,r,e,a,k,i,n,g,f[^:]+"([^"]+)":([^\n]+)};', re.DOTALL).findall(
response["data"])[0]
self.js_data["op"] = re.compile(var + "([\+|\-|\*|\/])=([^;]+)", re.MULTILINE).findall(response["data"])
self.js_data["wait"] = int(re.compile("\}, ([\d]+)\);", re.MULTILINE).findall(response["data"])[0]) / 1000
except Exception as e:
print(e)
self.js_data = {}

if "refresh" in response["headers"]:
try:
self.header_data["wait"] = int(response["headers"]["refresh"].split(";")[0])
self.header_data["auth_url"] = response["headers"]["refresh"].split("=")[1].split("?")[0]
self.header_data["params"] = {}
self.header_data["params"]["pass"] = response["headers"]["refresh"].split("=")[2]
except Exception as e:
print(e)
self.header_data = {}

@property
def wait_time(self):
if self.js_data.get("wait", 0):
return self.js_data["wait"]
else:
return self.header_data.get("wait", 0)

@property
def is_cloudflare(self):
return self.header_data.get("wait", 0) > 0 or self.js_data.get("wait", 0) > 0

def get_url(self):
# Metodo #1 (javascript)
if self.js_data.get("wait", 0):
jschl_answer = self.decode2(self.js_data["value"])

for op, v in self.js_data["op"]:
# jschl_answer = eval(str(jschl_answer) + op + str(self.decode2(v)))
if op == '+':
jschl_answer = jschl_answer + self.decode2(v)
elif op == '-':
jschl_answer = jschl_answer - self.decode2(v)
elif op == '*':
jschl_answer = jschl_answer * self.decode2(v)
elif op == '/':
jschl_answer = jschl_answer / self.decode2(v)

self.js_data["params"]["jschl_answer"] = round(jschl_answer, 10) + len(self.domain)

response = "%s://%s%s?%s" % (
self.protocol, self.domain, self.js_data["auth_url"], urllib.urlencode(self.js_data["params"]))

time.sleep(self.js_data["wait"])

return response

# Metodo #2 (headers)
if self.header_data.get("wait", 0):
response = "%s://%s%s?%s" % (
self.protocol, self.domain, self.header_data["auth_url"], urllib.urlencode(self.header_data["params"]))

time.sleep(self.header_data["wait"])

return response

def decode2(self, data):
data = re.sub("\!\+\[\]", "1", data)
data = re.sub("\!\!\[\]", "1", data)
data = re.sub("\[\]", "0", data)

pos = data.find("/")
numerador = data[:pos]
denominador = data[pos + 1:]

aux = re.compile('\(([0-9\+]+)\)').findall(numerador)
num1 = ""
for n in aux:
num1 += str(eval(n))

aux = re.compile('\(([0-9\+]+)\)').findall(denominador)
num2 = ""
for n in aux:
num2 += str(eval(n))

# return float(num1) / float(num2)
# return Decimal(Decimal(num1) / Decimal(num2)).quantize(Decimal('.0000000000000001'), rounding=ROUND_UP)
return Decimal(Decimal(num1) / Decimal(num2)).quantize(Decimal('.0000000000000001'))

def decode(self, data):
t = time.time()
timeout = False

while not timeout:
data = re.sub("\[\]", "''", data)
data = re.sub("!\+''", "+1", data)
data = re.sub("!''", "0", data)
data = re.sub("!0", "1", data)

if "(" in data:
x, y = data.rfind("("), data.find(")", data.rfind("(")) + 1
part = data[x + 1:y - 1]
else:
x = 0
y = len(data)
part = data

val = ""

if not part.startswith("+"): part = "+" + part

for i, ch in enumerate(part):
if ch == "+":
if not part[i + 1] == "'":
if val == "": val = 0
if type(val) == str:
val = val + self.get_number(part, i + 1)
else:
val = val + int(self.get_number(part, i + 1))
else:
val = str(val)
val = val + self.get_number(part, i + 1) or "0"

if type(val) == str: val = "'%s'" % val
data = data[0:x] + str(val) + data[y:]

timeout = time.time() - t > self.timeout

if not "+" in data and not "(" in data and not ")" in data:
return int(self.get_number(data))

def get_number(self, str, start=0):
ret = ""
for chr in str[start:]:
try:
int(chr)
except:
if ret: break
else:
ret += chr
return ret

+ 1
- 1
lib/lambdascrapers/modules/cleandate.py View File

@@ -1,4 +1,4 @@
# -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-
"""
LambdaScrapers Module


+ 1
- 1
lib/lambdascrapers/modules/cleantitle.py View File

@@ -1,4 +1,4 @@
# -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-
"""
LambdaScrapers Module


+ 1
- 1
lib/lambdascrapers/modules/client.py View File

@@ -1,4 +1,4 @@
# -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-
"""
LambdaScrapers Module


+ 1
- 1
lib/lambdascrapers/modules/control.py View File

@@ -1,4 +1,4 @@
# -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-
"""
LambdaScrapers Module


+ 11
- 4
lib/lambdascrapers/modules/debrid.py View File

@@ -1,4 +1,4 @@
# -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-
"""
LambdaScrapers Module

@@ -36,9 +36,16 @@ except:
debrid_resolvers = []


def status():
return debrid_resolvers != []

def status(torrent=False):
debrid_check = debrid_resolvers != []
if debrid_check is True:
if torrent:
enabled = control.setting('torrent.enabled')
if enabled == '' or enabled.lower() == 'true':
return True
else:
return False
return debrid_check

def resolver(url, debrid):
try:

+ 1
- 1
lib/lambdascrapers/modules/directstream.py View File

@@ -1,4 +1,4 @@
# -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-
"""
LambdaScrapers Module


+ 1
- 0
lib/lambdascrapers/modules/dom_parser.py View File

@@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
"""
Based on Parsedom for XBMC plugins
Copyright (C) 2010-2011 Tobias Ussing And Henrik Mosgaard Jensen

+ 1
- 1
lib/lambdascrapers/modules/jsunfuck.py View File

@@ -1,4 +1,4 @@
# -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-
"""
LambdaScrapers Module


+ 1
- 13
lib/lambdascrapers/modules/jsunpack.py View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
"""
resolveurl XBMC Addon
Copyright (C) 2013 Bstrdsmkr

This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -14,17 +13,6 @@

You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.

Adapted for use in xbmc from:
https://github.com/einars/js-beautify/blob/master/python/jsbeautifier/unpackers/packer.py
usage:

if detect(some_string):
unpacked = unpack(some_string)


Unpacker for Dean Edward's p.a.c.k.e.r
"""

import re

+ 1
- 1
lib/lambdascrapers/modules/log_utils.py View File

@@ -1,4 +1,4 @@
# -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-


# Addon Name: LambdaScrapers Module

+ 1
- 1
lib/lambdascrapers/modules/proxy.py View File

@@ -1,4 +1,4 @@
# -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-
"""
LambdaScrapers Module


+ 1
- 1
lib/lambdascrapers/modules/source_utils.py View File

@@ -1,4 +1,4 @@
# -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-
"""
LambdaScrapers Module


+ 2
- 2
lib/lambdascrapers/modules/trakt.py View File

@@ -1,4 +1,4 @@
# -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-
"""
LambdaScrapers Module

@@ -33,7 +33,7 @@ from lambdascrapers.modules import control
from lambdascrapers.modules import log_utils
from lambdascrapers.modules import utils

BASE_URL = 'http://api.trakt.tv'
BASE_URL = 'https://api.trakt.tv'
V2_API_KEY = '42740047aba33b1f04c1ba3893ce805a9ecfebd05de544a30fe0c99fabec972e'
CLIENT_SECRET = 'c7a3e7fdf5c3863872c8f45e1d3f33797b492ed574a00a01a3fadcb3d270f926'
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'

+ 1
- 1
lib/lambdascrapers/modules/utils.py View File

@@ -1,4 +1,4 @@
# -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-
"""
LambdaScrapers Module


+ 1
- 1
lib/lambdascrapers/modules/workers.py View File

@@ -1,4 +1,4 @@
# -*- coding: UTF-8 -*-
# -*- coding: utf-8 -*-
"""
LambdaScrapers Module


+ 69
- 0
lib/lambdascrapers/sources_ lambdascrapers/en/0123putlocker.py View File

@@ -0,0 +1,69 @@
# -*- coding: UTF-8 -*-
# -Cleaned and Checked on 12-03-2018 by JewBMX in Scrubs.

import re,urllib,urlparse,base64
from resources.lib.modules import cleantitle,client,proxy,cfscrape


class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['0123putlocker.com']
self.base_link = 'http://0123putlocker.com'
self.search_link = '/search-movies/%s.html'
self.scraper = cfscrape.create_scraper()


def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = cleantitle.geturl(tvshowtitle)
url = url.replace('-','+')
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url: return
query = url + '+season+' + season
find = query.replace('+','-')
url = self.base_link + self.search_link % query
r = self.scraper.get(url).content
match = re.compile('<a href="http://0123putlocker.com/watch/(.+?)-' + find + '.html"').findall(r)
for url_id in match:
url = 'http://0123putlocker.com/watch/' + url_id + '-' + find + '.html'
r = self.scraper.get(url).content
match = re.compile('<a class="episode episode_series_link" href="(.+?)">' + episode + '</a>').findall(r)
for url in match:
return url
except:
return


def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = self.scraper.get(url).content
try:
match = re.compile('<p class="server_version"><img src="http://0123putlocker.com/themes/movies/img/icon/server/(.+?).png" width="16" height="16" /> <a href="(.+?)">').findall(r)
for host, url in match:
if host == 'internet': pass
else: sources.append({'source': host,'quality': 'SD','language': 'en','url': url,'direct': False,'debridonly': False})
except:
return
except Exception:
return
return sources


def resolve(self, url):
r = self.scraper.get(url).content
match = re.compile('decode\("(.+?)"').findall(r)
for info in match:
info = base64.b64decode(info)
match = re.compile('src="(.+?)"').findall(info)
for url in match:
return url


+ 0
- 63
lib/lambdascrapers/sources_ lambdascrapers/en/1080P.py View File

@@ -1,63 +0,0 @@
# -*- coding: UTF-8 -*-
# -Cleaned and Checked on 11-13-2018 by JewBMX in Scrubs.
# -Fixed by JewBMX thanks to Muad'Dib's youtube stream :) <3 that Kodi Guru.

import re,traceback,urllib,urlparse,json,base64,xbmcgui

from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import log_utils
from resources.lib.modules import source_utils

# Old 1080pmovie.com
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['watchhdmovie.net']
self.base_link = 'https://watchhdmovie.net'
self.search_link = '/?s=%s'

def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('1080PMovies - Exception: \n' + str(failure))
return

def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
title = urldata['title'].replace(':', ' ').lower()
year = urldata['year']
search_id = title.lower()
start_url = urlparse.urljoin(self.base_link, self.search_link % (search_id.replace(' ','+') + '+' + year))
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
html = client.request(start_url,headers=headers)
Links = re.compile('a href="(.+?)" title="(.+?)"',re.DOTALL).findall(html)
for link,name in Links:
if title.lower() in name.lower():
if year in name:
holder = client.request(link,headers=headers)
Alterjnates = re.compile('<button class="text-capitalize dropdown-item" value="(.+?)"',re.DOTALL).findall(holder)
for alt_link in Alterjnates:
alt_url = alt_link.split ("e=")[1]
valid, host = source_utils.is_host_valid(alt_url, hostDict)
sources.append({'source':host,'quality':'1080p','language': 'en','url':alt_url,'info':[],'direct':False,'debridonly':False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('1080PMovies - Exception: \n' + str(failure))
return sources

def resolve(self, url):
return directstream.googlepass(url)


lib/lambdascrapers/sources_placenta/en_placenta-1.7.8/to_be_fixed/needsfixing/monm.py → lib/lambdascrapers/sources_ lambdascrapers/en/123fox.py View File

@@ -1,49 +1,39 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################

# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
# -Cleaned and Checked on 12-03-2018 by JewBMX in Scrubs.

import re,urlparse,urllib,base64
from resources.lib.modules import cleantitle,client,dom_parser2,cfscrape

from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import dom_parser2

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['hellopiccoli.com']
self.base_link = 'http://hellopiccoli.com/'
self.domains = ['123fox.net']
self.base_link = 'http://www1.123fox.net'
self.search_link = '/search-movies/%s.html'
self.scraper = cfscrape.create_scraper()


def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
r = cache.get(client.request, 1, search_url)
r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
r = [(client.parseDOM(i, 'a', ret='href'),
re.findall('.+?elease:\s*(\d{4})</', i),
re.findall('<b><i>(.+?)</i>', i)) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if
(cleantitle.get(i[2][0]) == cleantitle.get(title) and i[1][0] == year)]
url = r[0][0]

return url
clean_title = cleantitle.geturl(title).replace('-', '+')
url = urlparse.urljoin(self.base_link, (self.search_link % clean_title))
r = self.scraper.get(url).content
r = dom_parser2.parse_dom(r, 'div', {'id': 'movie-featured'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
r = [(i[0].attrs['href'], re.search('Release:\s*(\d+)', i[0].content)) for i in r if i]
r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
r = [(i[0], i[1]) for i in r if i[1] == year]
if r[0]:
url = r[0][0]
return url
else:
return
except Exception:
return


def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
@@ -52,17 +42,17 @@ class source:
except:
return


def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return

url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['premiered'], url['season'], url['episode'] = premiered, season, episode
try:
clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
clean_title = cleantitle.geturl(url['tvshowtitle']) + '-season-%d' % int(season)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
r = cache.get(client.request, 1, search_url)
r = self.scraper.get(search_url).content
r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
r = [(client.parseDOM(i, 'a', ret='href'),
re.findall('<b><i>(.+?)</i>', i)) for i in r]
@@ -71,20 +61,19 @@ class source:
url = r[0][0]
except:
pass
data = client.request(url)
data = self.scraper.get(url).content
data = client.parseDOM(data, 'div', attrs={'id': 'details'})
data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

return url[0][1]
except:
return

def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = cache.get(client.request, 1, url)
r = self.scraper.get(url).content
try:
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0]
b64 = base64.b64decode(v)
@@ -106,7 +95,8 @@ class source:
except:
pass
r = client.parseDOM(r, 'div', {'class': 'server_line'})
r = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r]
r = [(client.parseDOM(i, 'a', ret='href')[0],
client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r]
if r:
for i in r:
try:
@@ -114,7 +104,7 @@ class source:
url = i[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
if 'other'in host: continue
if 'other' in host: continue
sources.append({
'source': host,
'quality': 'SD',
@@ -129,10 +119,12 @@ class source:
except Exception:
return


def resolve(self, url):
if self.base_link in url:
url = client.request(url)
url = self.scraper.get(url).content
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', url)[0]
b64 = base64.b64decode(v)
url = client.parseDOM(b64, 'iframe', ret='src')[0]
return url


lib/lambdascrapers/sources_placenta/en_placenta-1.7.8/123hbo.py → lib/lambdascrapers/sources_ lambdascrapers/en/123hbo.py View File

@@ -1,37 +1,25 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# -Cleaned and Checked on 12-03-2018 by JewBMX in Scrubs.

# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import re,urlparse,urllib,base64
from resources.lib.modules import cleantitle,client,cfscrape

import re, urlparse, urllib, base64

from resources.lib.modules import cleantitle
from resources.lib.modules import client

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['123hbo.com']
self.base_link = 'http://123hbo.com'
self.base_link = 'http://www0.123hbo.com'
self.search_link = '/search-movies/%s.html'
self.scraper = cfscrape.create_scraper()


def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
search_results = client.request(search_url)

search_results = self.scraper.get(search_url).content
parsed = client.parseDOM(search_results, 'div', {'id': 'movie-featured'})
parsed = [(client.parseDOM(i, 'a', ret='href'), re.findall('.+?elease:\s*(\d{4})</', i), re.findall('<b><i>(.+?)</i>', i)) for i in parsed]
parsed = [(i[0][0], i[1][0], i[2][0]) for i in parsed if (cleantitle.get(i[2][0]) == cleantitle.get(title) and i[1][0] == year)]
@@ -40,6 +28,7 @@ class source:
except:
return


def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
@@ -48,38 +37,37 @@ class source:
except:
return


def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return

url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['premiered'], url['season'], url['episode'] = premiered, season, episode
try:
clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
search_results = client.request(search_url)
search_results = self.scraper.get(search_url).content
parsed = client.parseDOM(search_results, 'div', {'id': 'movie-featured'})
parsed = [(client.parseDOM(i, 'a', ret='href'), re.findall('<b><i>(.+?)</i>', i)) for i in parsed]
parsed = [(i[0][0], i[1][0]) for i in parsed if cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
url = parsed[0][0]
except:
pass
data = client.request(url)
data = self.scraper.get(url).content
data = client.parseDOM(data, 'div', attrs={'id': 'details'})
data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

return url[0][1]
except:
return


def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources

html = client.request(url)
html = self.scraper.get(url).content
try:
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', html)[0]
b64 = base64.b64decode(v)
@@ -110,10 +98,12 @@ class source:
except:
return


def resolve(self, url):
if self.base_link in url:
url = client.request(url)
url = self.scraper.get(url).content
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', url)[0]
b64 = base64.b64decode(v)
url = client.parseDOM(b64, 'iframe', ret='src')[0]
return url


+ 87
- 38
lib/lambdascrapers/sources_ lambdascrapers/en/123hulu.py View File

@@ -6,79 +6,128 @@
Updated and refactored by someone.
Originally created by others.
'''
import re
import urllib
import urlparse
import json
import base64
import re, urllib, urlparse, json, base64
from resources.lib.modules import client, cleantitle, directstream, dom_parser2, cfscrape

from resources.lib.modules import client, cleantitle, directstream, dom_parser2

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['123hulu.com','123hulu.unblockall.org']
self.base_link = 'http://123hulu.unblockall.org/'
self.movies_search_path = ('search-movies/%s.html')
self.domains = ['123hulu.com']
self.base_link = 'http://123hulu.com'
self.search_link = '/search-movies/%s.html'

def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title).replace('-','+')
url = urlparse.urljoin(self.base_link, (self.movies_search_path % clean_title))
r = client.request(url)
scraper = cfscrape.create_scraper()
clean_title = cleantitle.geturl(title).replace('-', '+')
url = urlparse.urljoin(self.base_link, (self.search_link % clean_title))
r = scraper.get(url).content
r = dom_parser2.parse_dom(r, 'div', {'id': 'movie-featured'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
r = [(i[0].attrs['href'], re.search('Release:\s*(\d+)', i[0].content)) for i in r if i]
r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
r = [(i[0], i[1]) for i in r if i[1] == year]
if r[0]:
if r[0]:
url = r[0][0]
return url
else: return
else:
return
except Exception:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
return
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return

def episode(self, url, imdb, tvdb, title, premiered, season, episode):
return
try:
if url == None: return
scraper = cfscrape.create_scraper()
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['premiered'], url['season'], url['episode'] = premiered, season, episode
try:
clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
r = scraper.get(search_url).content
r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
r = [(client.parseDOM(i, 'a', ret='href'),
re.findall('<b><i>(.+?)</i>', i)) for i in r]
r = [(i[0][0], i[1][0]) for i in r if
cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
url = r[0][0]
except:
pass
data = scraper.get(url).content
data = client.parseDOM(data, 'div', attrs={'id': 'details'})
data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

return url[0][1]
except:
return

def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = client.request(url)
r = dom_parser2.parse_dom(r, 'p', {'class': 'server_play'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
r = [(i[0].attrs['href'], re.search('/(\w+).html', i[0].attrs['href'])) for i in r if i]
r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
for i in r:
sources = []
scraper = cfscrape.create_scraper()
r = scraper.get(url).content
try:
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0]
b64 = base64.b64decode(v)
url = client.parseDOM(b64, 'iframe', ret='src')[0]
try:
host = i[1]
if str(host) in str(hostDict):
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': url.replace('\/', '/'),
'direct': False,
'debridonly': False
})
except:
pass
except:
pass
r = client.parseDOM(r, 'div', {'class': 'server_line'})
r = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r]
if r:
for i in r:
try:
host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
url = i[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
if 'other'in host: continue
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': i[0].replace('\/','/'),
'url': url.replace('\/', '/'),
'direct': False,
'debridonly': False
})
except: pass
except:
pass
return sources
except Exception:
return
def resolve(self, url):
try:
r = client.request(url)
url = re.findall('document.write.+?"([^"]*)', r)[0]
url = base64.b64decode(url)
url = re.findall('src="([^"]*)', url)[0]
return url
except Exception:
return
if self.base_link in url:
scraper = cfscrape.create_scraper()
url = scraper.get(url).content
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', url)[0]
b64 = base64.b64decode(v)
url = client.parseDOM(b64, 'iframe', ret='src')[0]
return url

+ 126
- 0
lib/lambdascrapers/sources_ lambdascrapers/en/123movieshubz.py View File

@@ -0,0 +1,126 @@
# -*- coding: utf-8 -*-

'''
#:'######::'####:'##::::'##:'####:'########::::'###:::::'######::
#'##... ##:. ##:: ##:::: ##:. ##::... ##..::::'## ##:::'##... ##:
# ##:::..::: ##:: ##:::: ##:: ##::::: ##:::::'##:. ##:: ##:::..::
# ##:::::::: ##:: ##:::: ##:: ##::::: ##::::'##:::. ##:. ######::
# ##:::::::: ##::. ##:: ##::: ##::::: ##:::: #########::..... ##:
# ##::: ##:: ##:::. ## ##:::: ##::::: ##:::: ##.... ##:'##::: ##:
#. ######::'####:::. ###::::'####:::: ##:::: ##:::: ##:. ######::
#:......:::....:::::...:::::....:::::..:::::..:::::..:::......:::

This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''


import re
import urllib
import urlparse
import json
from resources.lib.modules import cleantitle
from resources.lib.modules import dom_parser2
from resources.lib.modules import client
from resources.lib.modules import cfscrape


class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['123movieshubz.com']
self.base_link = 'http://www1.123movieshubz.com'
self.search_link = '/watch/%s-%s-online-123movies.html'
self.scraper = cfscrape.create_scraper()

def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, year)))
return url
except:
return

def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': tvshowtitle})
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return

def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
clean_title = cleantitle.geturl(url['tvshowtitle']) + '-s%02d' % int(season)
url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, url['year'])))
r = self.scraper.get(url).content
r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
for i in r[0]:
if i.content == 'Episode %s' % episode:
url = i.attrs['href']
return url
except:
return

def sources(self, url, hostDict, hostprDict):
try:
sources = []

if url == None: return sources

r = self.scraper.get(url).content
quality = re.findall(">(\w+)<\/p", r)
if quality[0] == "HD":
quality = "720p"
else:
quality = "SD"
r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]

for i in r[0]:
url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'],
'data-name': i.attrs['data-name']}
url = urllib.urlencode(url)
sources.append({'source': i.content, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})

return sources
except:
return sources

def resolve(self, url):
try:
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
post = {'ipplugins': 1, 'ip_film': urldata['data-film'], 'ip_server': urldata['data-server'], 'ip_name': urldata['data-name'], 'fix': "0"}
p1 = self.scraper.get('http://123movieshubz.com/ip.file/swf/plugins/ipplugins.php', post=post, referer=urldata['url'], XHR=True).content
p1 = json.loads(p1)
p2 = self.scraper.get('http://123movieshubz.com/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=0' % (
p1['s'], urldata['data-server'])).content
p2 = json.loads(p2)
p3 = self.scraper.get('http://123movieshubz.com/ip.file/swf/ipplayer/api.php?hash=%s' % (p2['hash'])).content
p3 = json.loads(p3)
n = p3['status']
if n == False:
p2 = self.scraper.get('http://123movieshubz.com/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=1' % (
p1['s'], urldata['data-server'])).content
p2 = json.loads(p2)
url = "https:%s" % p2["data"].replace("\/", "/")
return url
except:
return

lib/lambdascrapers/sources_ lambdascrapers/en_DebridOnly/2ddl.py → lib/lambdascrapers/sources_ lambdascrapers/en/300mbdownload.py View File

@@ -1,28 +1,37 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# -Cleaned and Checked on 10-10-2018 by JewBMX in Yoda.

import re,traceback,urllib,urlparse
# -*- coding: utf-8 -*-

'''
Eggman Add-on

This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''


import re,urllib,urlparse

from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import source_utils
from resources.lib.modules import log_utils

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['2ddl.ws']
self.base_link = 'http://2ddl.ws'
self.domains = ['300mbdownload']
self.base_link = 'https://www.300mbdownload.club'
self.search_link = '/search/%s/feed/rss2/'

def movie(self, imdb, title, localtitle, aliases, year):
@@ -31,19 +40,14 @@ class source:
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('2DDL - Exception: \n' + str(failure))
return


def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('2DDL - Exception: \n' + str(failure))
return

def episode(self, url, imdb, tvdb, title, premiered, season, episode):
@@ -56,8 +60,6 @@ class source:
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('2DDL - Exception: \n' + str(failure))
return

def sources(self, url, hostDict, hostprDict):
@@ -76,8 +78,8 @@ class source:
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

query = '%s S%02dE%02d' % (
data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
data['title'], data['year'])
data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

url = self.search_link % urllib.quote_plus(query)
@@ -97,6 +99,7 @@ class source:
s = re.search('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', post)
s = s.groups()[0] if s else '0'
items += [(t, i, s) for i in u]

except:
pass

@@ -147,9 +150,7 @@ class source:

return sources
except:
failure = traceback.format_exc()
log_utils.log('2DDL - Exception: \n' + str(failure))
return sources
return

def resolve(self, url):
return url
return url

+ 48
- 47
lib/lambdascrapers/sources_ lambdascrapers/en/Hdmto.py View File

@@ -1,47 +1,48 @@
# -*- coding: UTF-8 -*-
'''
hdmto scraper for Exodus forks.
Nov 9 2018 - Checked
Updated and refactored by someone.
Originally created by others.
'''
import re
import urllib
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import proxy
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['hdm.to']
self.base_link = 'https://hdm.to'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = cleantitle.geturl(title)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
url = '%s/%s/' % (self.base_link,url)
r = client.request(url)
try:
match = re.compile('<iframe.+?src="(.+?)"').findall(r)
for url in match:
sources.append({'source': 'Openload.co','quality': '1080p','language': 'en','url': url,'direct': False,'debridonly': False})
except:
return
except Exception:
return
return sources
def resolve(self, url):
return url
# -*- coding: UTF-8 -*-
'''
hdmto scraper for Exodus forks.
Nov 9 2018 - Checked

Updated and refactored by someone.
Originally created by others.
'''
import re
import urllib
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import proxy
from resources.lib.modules import cfscrape

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['hdm.to']
self.base_link = 'https://hdm.to'
self.scraper = cfscrape.create_scraper()

def movie(self, imdb, title, localtitle, aliases, year):
try:
url = cleantitle.geturl(title)
return url
except:
return

def sources(self, url, hostDict, hostprDict):
try:
sources = []
url = '%s/%s/' % (self.base_link,url)
r = self.scraper.get(url).content
try:
match = re.compile('<iframe.+?src="(.+?)"').findall(r)
for url in match:
sources.append({'source': 'Openload.co','quality': '1080p','language': 'en','url': url,'direct': False,'debridonly': False})
except:
return
except Exception:
return
return sources

def resolve(self, url):
return url

+ 0
- 74
lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/1080P.py View File

@@ -1,74 +0,0 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################

# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo

import re,traceback,urllib,urlparse,json,base64

from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import log_utils
from resources.lib.modules import source_utils

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['1080pmovie.com', 'watchhdmovie.net']
self.base_link = 'https://watchhdmovie.net'
self.search_link = '/?s=%s'

def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('1080PMovies - Exception: \n' + str(failure))
return

def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
title = urldata['title'].replace(':', ' ').lower()
year = urldata['year']

search_id = title.lower()
start_url = self.search_link % (self.base_link, search_id.replace(' ','%20'))

headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
html = client.request(start_url,headers=headers)
Links = re.compile('"post","link":"(.+?)","title".+?"rendered":"(.+?)"',re.DOTALL).findall(html)
for link,name in Links:
link = link.replace('\\','')
if title.lower() in name.lower():
if year in name:
holder = client.request(link,headers=headers)
new = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(holder)[0]
end = client.request(new,headers=headers)
final_url = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(end)[0]
valid, host = source_utils.is_host_valid(final_url, hostDict)
sources.append({'source':host,'quality':'1080p','language': 'en','url':final_url,'info':[],'direct':False,'debridonly':False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('1080PMovies - Exception: \n' + str(failure))
return sources

def resolve(self, url):
return directstream.googlepass(url)


+ 0
- 196
lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/123movies.py View File

@@ -1,196 +0,0 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################

# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo

import urlparse,traceback,urllib,re,json,xbmc

from resources.lib.modules import client
from resources.lib.modules import cleantitle
from resources.lib.modules import directstream
from resources.lib.modules import log_utils
from resources.lib.modules import source_utils

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['123movies.ph']
self.base_link = 'https://123movies.ph/'
self.source_link = 'https://123movies.ph/'
self.episode_path = '/episodes/%s-%sx%s/'
self.movie_path0 = '/movies/%s-watch-online-free-123movies-%s/'
self.movie_path = '/movies/%s/'
# self.decode_file = '/decoding_v2.php'
# self.decode_file = '/decoding_v3.php'
self.decode_file = 'https://gomostream.com/decoding_v3.php'
# self.grabber_file = '/get.php'
# self.grabber_file = '/getv2.php'
self.grabber_file = 'https://gomostream.com/getv2.php'
# $.ajax({ type: "POST", url: "https://gomostream.com/decoding_v3.php" .....
# $.ajax({ type: "POST", url: "https://gomostream.com/getv2.php" .....
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'title': title, 'year': year}
return urllib.urlencode(url)

except Exception:
return

def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
data = {'tvshowtitle': tvshowtitle, 'year': year, 'imdb': imdb}
return urllib.urlencode(data)

except Exception:
return

def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict((i, data[i][0]) for i in data)
data.update({'season': season, 'episode': episode, 'title': title, 'premiered': premiered})

return urllib.urlencode(data)

except Exception:
return

def sources(self, url, hostDict, hostprDict):
try:
sources = []

data = urlparse.parse_qs(url)
data = dict((i, data[i][0]) for i in data)

if 'episode' in data:
url = self.__get_episode_url(data)
get_body = 'type=episode&%s=%s&imd_id=%s&seasonsNo=%02d&episodesNo=%02d'
else:
url = self.__get_movie_url(data)

response = client.request(url)
url = re.findall('<iframe .+? src="(.+?)"', response)[0]

response = client.request(url)

token = re.findall('var tc = \'(.+?)\'', response)[0]
# _tsd_tsd_ds(s) ~~~ .slice(3,29) ~~~~ "29"+"341404"; <----- seeds phrase has changed
# seeds = re.findall('_tsd_tsd\(s\) .+\.slice\((.+?),(.+?)\).+ return .+? \+ \"(.+?)\"\+\"(.+?)";', response)[0]
seeds = re.findall('_tsd_tsd_ds\(s\) .+\.slice\((.+?),(.+?)\).+ return .+? \+ \"(.+?)\"\+\"(.+?)\";', response)[0]
pair = re.findall('\'type\': \'.+\',\s*\'(.+?)\': \'(.+?)\'', response)[0]

header_token = self.__xtoken(token, seeds)
body = 'tokenCode=' + token

headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'x-token': header_token
}

url = self.decode_file
response = client.request(url, XHR=True, post=body, headers=headers)

sources_dict = json.loads(response)

# [u'https://video.xx.fbcdn.net/v/t42.9040-2/10000000_226259417967008_8033841240334139392_n.mp4?_nc_cat=0&efg=eyJybHIiOjE1MDAsInJsYSI6NDA5NiwidmVuY29kZV90YWciOiJzdmVfaGQifQ%3D%3D&rl=1500&vabr=616&oh=27f4d11aec3aa54dbe1ca72c81fbaa03&oe=5B4C6DF5', u'https://movienightplayer.com/tt0253754', u'https://openload.co/embed/ALXqqto-fQI', u'https://streamango.com/embed/pndcsolkpnooffdk']
for source in sources_dict:
try:
# if 'vidushare.com' in source:
if '.mp4' in source:
sources.append({
'source': 'CDN',
'quality': 'HD',
'language': 'en',
'url': source,
'direct': True,
'debridonly': False
})
except Exception:
pass

body = get_body % (pair[0], pair[1], data['imdb'], int(data['season']), int(data['episode']))

url = urlparse.urljoin(self.source_link, self.grabber_file)
response = client.request(url, XHR=True, post=body, headers=headers)

sources_dict = json.loads(response)

for source in sources_dict:
try:
quality = source_utils.label_to_quality(source['label'])
link = source['file']

if 'lh3.googleusercontent' in link:
link = directstream.googleredirect(link)

sources.append({
'source': 'gvideo',
'quality': quality,
'language': 'en',
'url': link,
'direct': True,
'debridonly': False
})

except Exception:
pass


return sources

except Exception:
return sources

def resolve(self, url):
return url

def __get_episode_url(self, data):
try:
clean_title = cleantitle.geturl(data['tvshowtitle'])
query = self.episode_path % (clean_title, data['season'], data['episode'])

url = urlparse.urljoin(self.base_link, query)
html = client.request(url)

token = re.findall('\/?watch-token=(.*?)\"', html)[0]

return url + ('?watch-token=%s' % token)

except Exception:
return

def __get_movie_url(self, data):
clean_title = cleantitle.geturl(data['title'])
query0 = self.movie_path0 % (clean_title,data['year']) # the "long" version appears to use year (and its optional)
query = self.movie_path % clean_title # no fancy stuff should work fine (at least almost always)
url = urlparse.urljoin(self.base_link, query)
html = client.request(url)

token = re.findall('\/?watch-token=(.*?)\"', html)[0]

return url + ('?watch-token=%s' % token)
def __xtoken(self, token, seeds):
try: