Преглед изворни кода

Merge pull request #43 from host505/master

Updated scrapers by Jewbmx
pull/51/head
I-A-C пре 1 година
родитељ
комит
6c03a4b7b7
No account linked to committer's email address

+ 5
- 5
lib/default.py Прегледај датотеку

@@ -50,7 +50,7 @@ if mode == "toggleAll":

if mode == "toggleAllDebrid":
sourcelist = ['2ddl','300mbfilms','bestmoviez','ddls','ddlvalley','directdl','gomovies','hevcfilm',
'moviesonline','myvideolink','phazeddl','power','releasebb','RLSB','rlsbb','rlsmovies','rlsscn',
'myvideolink','phazeddl','power','releasebb','RLSB','rlsbb','rlsmovies','rlsscn',
'scenerls','sceper','seriescr','tvbmoviez','tvrelease','ultrahd','ultrahdindir','wrzcraft']
toggleAll(params['setting'], params['open_id'], sourcelist)

@@ -78,11 +78,11 @@ if mode == "toggleAllForeign":
toggleAll(params['setting'], params['open_id'], sourcelist)

if mode == "Defaults":
sourcelist = ['4kmovieto','Hdmto','bnwmovies',
sourcelist = ['4kmovieto','1080P','bobmovies','bnwmovies',
'cartoonhd','coolmoviezone','darewatch','divxcrawler',
'fmovies','freefmovies','freeputlockers','furk',
'gowatchseries','hdpopcorns','kattv','library',
'moviesplanet','odb','openloadmovie','ororo',
'fmovies','freefmovies','freeputlockers','furk','gostream',
'gowatchseries','Hdmto','hdpopcorns','kattv','library',
'moviesplanet','myprojectfreetv','odb','openloadmovie','ororo',
'plocker','primewire','putlocker','reddit','seehd','series9','seriesfree',
'seriesonline','streamlord','tvbox','videoscraper','vidics',
'watchonline','watchseries','xmovies','xwatchseries','ymovies']

lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/1080P.py → lib/lambdascrapers/sources_ lambdascrapers/en/1080P.py Прегледај датотеку

@@ -1,18 +1,8 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# -Cleaned and Checked on 11-13-2018 by JewBMX in Scrubs.
# -Fixed by JewBMX thanks to Muad'Dib's youtube stream :) <3 that Kodi Guru.

# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo

import re,traceback,urllib,urlparse,json,base64
import re,traceback,urllib,urlparse,json,base64,xbmcgui

from resources.lib.modules import cleantitle
from resources.lib.modules import client
@@ -20,11 +10,12 @@ from resources.lib.modules import directstream
from resources.lib.modules import log_utils
from resources.lib.modules import source_utils

# Old 1080pmovie.com
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['1080pmovie.com', 'watchhdmovie.net']
self.domains = ['watchhdmovie.net']
self.base_link = 'https://watchhdmovie.net'
self.search_link = '/?s=%s'

@@ -46,23 +37,21 @@ class source:
urldata = dict((i, urldata[i][0]) for i in urldata)
title = urldata['title'].replace(':', ' ').lower()
year = urldata['year']

search_id = title.lower()
start_url = self.search_link % (self.base_link, search_id.replace(' ','%20'))

start_url = urlparse.urljoin(self.base_link, self.search_link % (search_id.replace(' ','+') + '+' + year))
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
html = client.request(start_url,headers=headers)
Links = re.compile('"post","link":"(.+?)","title".+?"rendered":"(.+?)"',re.DOTALL).findall(html)
Links = re.compile('a href="(.+?)" title="(.+?)"',re.DOTALL).findall(html)
for link,name in Links:
link = link.replace('\\','')
if title.lower() in name.lower():
if year in name:
holder = client.request(link,headers=headers)
new = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(holder)[0]
end = client.request(new,headers=headers)
final_url = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(end)[0]
valid, host = source_utils.is_host_valid(final_url, hostDict)
sources.append({'source':host,'quality':'1080p','language': 'en','url':final_url,'info':[],'direct':False,'debridonly':False})
Alterjnates = re.compile('<button class="text-capitalize dropdown-item" value="(.+?)"',re.DOTALL).findall(holder)
for alt_link in Alterjnates:
alt_url = alt_link.split ("e=")[1]
valid, host = source_utils.is_host_valid(alt_url, hostDict)
sources.append({'source':host,'quality':'1080p','language': 'en','url':alt_url,'info':[],'direct':False,'debridonly':False})
return sources
except:
failure = traceback.format_exc()

lib/lambdascrapers/sources_ lambdascrapers/en/allucen.py → lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/allucen.py Прегледај датотеку


+ 0
- 141
lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/mywatchseries.py Прегледај датотеку

@@ -1,141 +0,0 @@
# -*- coding: utf-8 -*-

'''
Yoda Add-on

This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''


import re,urllib,urlparse,json

from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import proxy


class source:
def __init__(self):
self.priority = 0
self.language = ['en']
self.domains = ['onwatchseries.to','mywatchseries.to']
self.base_link = 'http://mywatchseries.to'
self.search_link = 'http://mywatchseries.to/show/search-shows-json'
self.search_link_2 = 'http://mywatchseries.to/search/%s'


def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
t = cleantitle.get(tvshowtitle)

q = urllib.quote_plus(cleantitle.query(tvshowtitle))
p = urllib.urlencode({'term': q})

r = client.request(self.search_link, post=p, XHR=True)
try: r = json.loads(r)
except: r = None

if r:
r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i]
else:
r = proxy.request(self.search_link_2 % q, 'tv shows')
r = client.parseDOM(r, 'div', attrs = {'valign': '.+?'})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a')) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]

r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
r = [(i[0], i[1], i[2][-1]) for i in r if i[2]]
r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]

url = r[0][0]
url = proxy.parse(url)

url = url.strip('/').split('/')[-1]
url = url.encode('utf-8')
return url
except:
return


def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return

url = '%s/serie/%s' % (self.base_link, url)

r = proxy.request(url, 'tv shows')
r = client.parseDOM(r, 'li', attrs = {'itemprop': 'episode'})

t = cleantitle.get(title)

r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'itemprop': 'name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r]
r = [(i[0], i[1][0].split('&nbsp;')[-1], i[2]) for i in r if i[1]] + [(i[0], None, i[2]) for i in r if not i[1]]
r = [(i[0], i[1], i[2][0]) for i in r if i[2]] + [(i[0], i[1], None) for i in r if not i[2]]
r = [(i[0][0], i[1], i[2]) for i in r if i[0]]

url = [i for i in r if t == cleantitle.get(i[1]) and premiered == i[2]][:1]
if not url: url = [i for i in r if t == cleantitle.get(i[1])]
if len(url) > 1 or not url: url = [i for i in r if premiered == i[2]]
if len(url) > 1 or not url: raise Exception()

url = url[0][0]
url = proxy.parse(url)

url = re.findall('(?://.+?|)(/.+)', url)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return


def sources(self, url, hostDict, hostprDict):
try:
sources = []

if url == None: return sources

url = urlparse.urljoin(self.base_link, url)

r = proxy.request(url, 'tv shows')

links = client.parseDOM(r, 'a', ret='href', attrs = {'target': '.+?'})
links = [x for y,x in enumerate(links) if x not in links[:y]]

for i in links:
try:
url = i
url = proxy.parse(url)
url = urlparse.parse_qs(urlparse.urlparse(url).query)['r'][0]
url = url.decode('base64')
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')

host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = host.encode('utf-8')

sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
except:
pass

return sources
except:
return sources


def resolve(self, url):
return url



+ 0
- 274
lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/solarmoviez.py Прегледај датотеку

@@ -1,274 +0,0 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Yoda
# Addon id: plugin.video.Yoda
# Addon Provider: MuadDib
import re,traceback,urllib,urlparse,hashlib,random,string,json,base64,sys,xbmc
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import directstream
from resources.lib.modules import jsunfuck
from resources.lib.modules import log_utils
CODE = '''def retA():
class Infix:
def __init__(self, function):
self.function = function
def __ror__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __or__(self, other):
return self.function(other)
def __rlshift__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __rshift__(self, other):
return self.function(other)
def __call__(self, value1, value2):
return self.function(value1, value2)
def my_add(x, y):
try: return x + y
except Exception: return str(x) + str(y)
x = Infix(my_add)
return %s
param = retA()'''
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['solarmoviez.to', 'solarmoviez.ru']
self.base_link = 'https://solarmoviez.ru'
self.search_link = '/movie/search/%s.html'
self.info_link = '/ajax/movie_info/%s.html?is_login=false'
self.server_link = '/ajax/v4_movie_episodes/%s'
self.embed_link = '/ajax/movie_embed/%s'
self.token_link = '/ajax/movie_token?eid=%s&mid=%s'
self.source_link = '/ajax/movie_sources/%s?x=%s&y=%s'
def matchAlias(self, title, aliases):
try:
for alias in aliases:
if cleantitle.get(title) == cleantitle.get(alias['title']):
return True
except:
return False
def movie(self, imdb, title, localtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': title})
url = {'imdb': imdb, 'title': title, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('SolarMoviez - Exception: \n' + str(failure))
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': tvshowtitle})
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('SolarMoviez - Exception: \n' + str(failure))
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('SolarMoviez - Exception: \n' + str(failure))
return
def searchShow(self, title, season, aliases, headers):
try:
title = cleantitle.normalize(title)
search = '%s Season %s' % (title, season)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search)))
r = client.request(url)
url = re.findall('<a href=\"(.+?\/movie\/%s-season-%s-.+?\.html)\"' % (cleantitle.geturl(title), season), r)[0]
return url
except:
failure = traceback.format_exc()
log_utils.log('SolarMoviez - Exception: \n' + str(failure))
return
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
r = client.request(url, headers=headers, timeout='15')
r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
try:
r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
except:
url = None
pass
if (url == None):
url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
return url
except:
failure = traceback.format_exc()
log_utils.log('SolarMoviez - Exception: \n' + str(failure))
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url is None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
aliases = eval(data['aliases'])
headers = {}
if 'tvshowtitle' in data:
episode = int(data['episode'])
url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
else:
episode = 0
url = self.searchMovie(data['title'], data['year'], aliases, headers)
mid = re.findall('-(\d+)', url)[-1]
try:
headers = {'Referer': url}
u = urlparse.urljoin(self.base_link, self.server_link % mid)
r = client.request(u, headers=headers, XHR=True)
r = json.loads(r)['html']
r = client.parseDOM(r, 'div', attrs = {'class': 'pas-list'})
ids = client.parseDOM(r, 'li', ret='data-id')
servers = client.parseDOM(r, 'li', ret='data-server')
labels = client.parseDOM(r, 'a', ret='title')
r = zip(ids, servers, labels)
for eid in r:
try:
try:
ep = re.findall('episode.*?(\d+).*?', eid[2].lower())[0]
except:
ep = 0
if (episode == 0) or (int(ep) == episode):
url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid))
script = client.request(url)
if '$_$' in script:
params = self.uncensored1(script)
elif script.startswith('[]') and script.endswith('()'):
params = self.uncensored2(script)
elif '_x=' in script:
x = re.search('''_x=['"]([^"']+)''', script).group(1)
y = re.search('''_y=['"]([^"']+)''', script).group(1)
params = {'x': x, 'y': y}
else:
raise Exception()
u = urlparse.urljoin(self.base_link, self.source_link % (eid[0], params['x'], params['y']))
r = client.request(u, XHR=True)
json_sources = json.loads(r)['playlist'][0]['sources']
try:
if 'google' in json_sources['file']:
quality = 'HD'
if 'bluray' in json_sources['file'].lower():
quality = '1080p'
sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en',
'url': json_sources['file'], 'direct': True, 'debridonly': False})
except Exception:
if 'blogspot' in json_sources[0]['file']:
url = [i['file'] for i in json_sources if 'file' in i]
url = [directstream.googletag(i) for i in url]
url = [i[0] for i in url if i]
for s in url:
sources.append({'source': 'gvideo', 'quality': s['quality'], 'language': 'en',
'url': s['url'], 'direct': True, 'debridonly': False})
elif 'lemonstream' in json_sources[0]['file']:
sources.append({
'source': 'CDN',
'quality': 'HD',
'language': 'en',
'url': json_sources[0]['file'] + '|Referer=' + self.base_link,
'direct': True,
'debridonly': False})
except:
pass
except:
pass
return sources
except:
failure = traceback.format_exc()
log_utils.log('SolarMoviez - Exception: \n' + str(failure))
return sources
def resolve(self, url):
return url
def uncensored(a, b):
x = '' ; i = 0
for i, y in enumerate(a):
z = b[i % len(b) - 1]
y = int(ord(str(y)[0])) + int(ord(str(z)[0]))
x += chr(y)
x = base64.b64encode(x)
return x
def uncensored1(self, script):
try:
script = '(' + script.split("(_$$)) ('_');")[0].split("/* `$$` */")[-1].strip()
script = script.replace('(__$)[$$$]', '\'"\'')
script = script.replace('(__$)[_$]', '"\\\\"')
script = script.replace('(o^_^o)', '3')
script = script.replace('(c^_^o)', '0')
script = script.replace('(_$$)', '1')
script = script.replace('($$_)', '4')
vGlobals = {"__builtins__": None, '__name__': __name__, 'str': str, 'Exception': Exception}
vLocals = {'param': None}
exec (CODE % script.replace('+', '|x|'), vGlobals, vLocals)
data = vLocals['param'].decode('string_escape')
x = re.search('''_x=['"]([^"']+)''', data).group(1)
y = re.search('''_y=['"]([^"']+)''', data).group(1)
return {'x': x, 'y': y}
except:
pass
def uncensored2(self, script):
try:
js = jsunfuck.JSUnfuck(script).decode()
x = re.search('''_x=['"]([^"']+)''', js).group(1)
y = re.search('''_y=['"]([^"']+)''', js).group(1)
return {'x': x, 'y': y}
except:
pass

+ 55
- 0
lib/lambdascrapers/sources_ lambdascrapers/en/bobmovies.py Прегледај датотеку

@@ -0,0 +1,55 @@
# -*- coding: UTF-8 -*-

import re,urllib,urlparse

from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import log_utils
from resources.lib.modules import source_utils

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['bobmovies.site']
self.base_link = 'http://bobmovies.site'
self.search_link = '/?s=%s'

def movie(self, imdb, title, localtitle, aliases, year):
try:
search_id = title.lower()
url = urlparse.urljoin(self.base_link, self.search_link)
url = url % (search_id.replace(':', '%3A').replace('&', '%26').replace("'", '%27').replace(' ', '+').replace('...', ' '))
search_results = client.request(url)
match = re.compile('<div id="post.+?href="(.+?)".+?title="(.+?)"',re.DOTALL).findall(search_results)
for movie_url, movie_title in match:
clean_title = cleantitle.get(title)
movie_title = movie_title.replace('&#8230', ' ').replace('&#038', ' ').replace('&#8217', ' ').replace('...', ' ')
clean_movie_title = cleantitle.get(movie_title)
if clean_movie_title in clean_title:
return movie_url
return
except:
failure = traceback.format_exc()
log_utils.log('MyBobMovies - movie - Exception: \n' + str(failure))
return

def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
html = client.request(url)
links = re.compile('<iframe width=".+?src="(.+?)"',re.DOTALL).findall(html)
for link in links:
quality,info = source_utils.get_release_quality(link, url)
host = link.split('//')[1].replace('www.','')
host = host.split('/')[0].split('.')[0].title()
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': link, 'direct': False, 'debridonly': False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('MyBobMovies - sources - Exception: \n' + str(failure))
return sources

def resolve(self, url):
return url

+ 93
- 0
lib/lambdascrapers/sources_ lambdascrapers/en/gostream.py Прегледај датотеку

@@ -0,0 +1,93 @@
# -*- coding: UTF-8 -*-
# -Cleaned and Checked on 11-13-2018 by JewBMX in Scrubs.

import re,traceback,urllib,urlparse,json,base64,time

from resources.lib.modules import cleantitle
from resources.lib.modules import dom_parser2
from resources.lib.modules import client
from resources.lib.modules import debrid

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['gostream.sc']
self.base_link = 'http://gostream.sc'
self.search_link = '/watch/%s-%s-gostream.html'
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,year)))
return url
except:
return

def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': tvshowtitle})
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return

def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
clean_title = cleantitle.geturl(url['tvshowtitle'])+'-s%02d' % int(season)
url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,url['year'])))
r = client.request(url)
r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
for i in r[0]:
if i.content == 'Episode %s'%episode:
url = i.attrs['href']
return url
except:
return

def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
r = client.request(url)
quality = re.findall(">(\w+)<\/p",r)
if quality[0] == "HD":
quality = "720p"
else:
quality = "SD"
r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]

for i in r[0]:
url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'], 'data-name' : i.attrs['data-name']}
url = urllib.urlencode(url)
sources.append({'source': i.content, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
return sources

def resolve(self, url):
try:
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
post = {'ipplugins': 1,'ip_film': urldata['data-film'], 'ip_server': urldata['data-server'], 'ip_name': urldata['data-name'],'fix': "0"}
p1 = client.request('http://gostream.sc/ip.file/swf/plugins/ipplugins.php', post=post, referer=urldata['url'], XHR=True)
p1 = json.loads(p1)
p2 = client.request('http://gostream.sc/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=0' %(p1['s'],urldata['data-server']))
p2 = json.loads(p2)
p3 = client.request('http://gostream.sc/ip.file/swf/ipplayer/api.php?hash=%s' %(p2['hash']))
p3 = json.loads(p3)
n = p3['status']
if n == False:
p2 = client.request('http://gostream.sc/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=1' %(p1['s'],urldata['data-server']))
p2 = json.loads(p2)
url = "https:%s" %p2["data"].replace("\/","/")
return url
except:
return

lib/lambdascrapers/sources_ lambdascrapers/en/NotWorking_11-9/l23movies.py → lib/lambdascrapers/sources_ lambdascrapers/en/l23movies.py Прегледај датотеку

@@ -8,9 +8,7 @@
# ----------------------------------------------------------------------------
#######################################################################

# Addon Name: Yoda
# Addon id: plugin.video.Yoda
# Addon Provider: Supremacy
# -Cleaned and Checked on 10-27-2018 by JewBMX

import re
import urllib
@@ -20,6 +18,7 @@ import base64

from resources.lib.modules import client, cleantitle, directstream, dom_parser2
from resources.lib.modules import debrid
from resources.lib.modules import cfscrape

class source:
def __init__(self):
@@ -28,12 +27,14 @@ class source:
self.domains = ['l23movies.com']
self.base_link = 'http://l23movies.com'
self.movies_search_path = ('search-movies/%s.html')
scraper = cfscrape.create_scraper()

def movie(self, imdb, title, localtitle, aliases, year):
try:
scraper = cfscrape.create_scraper()
clean_title = cleantitle.geturl(title).replace('-','+')
url = urlparse.urljoin(self.base_link, (self.movies_search_path % clean_title))
r = client.request(url)
r = scraper.get(url).content

r = dom_parser2.parse_dom(r, 'div', {'id': 'movie-featured'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
@@ -55,8 +56,9 @@ class source:

def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = client.request(url)
sources = []
scraper = cfscrape.create_scraper()
r = scraper.get(url).content
r = dom_parser2.parse_dom(r, 'p', {'class': 'server_play'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
r = [(i[0].attrs['href'], re.search('/(\w+).html', i[0].attrs['href'])) for i in r if i]
@@ -82,7 +84,8 @@ class source:
def resolve(self, url):
try:
r = client.request(url)
scraper = cfscrape.create_scraper()
r = scraper.get(url).content
url = re.findall('document.write.+?"([^"]*)', r)[0]
url = base64.b64decode(url)
url = re.findall('src="([^"]*)', url)[0]

lib/lambdascrapers/sources_ lambdascrapers/en_DebridOnly/moviesonline.py → lib/lambdascrapers/sources_ lambdascrapers/en/moviesonline.py Прегледај датотеку

@@ -12,6 +12,8 @@
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo

# Scraper Checked and Fixed 11-08-2018 -JewBMX

import re, urlparse, urllib, base64

from resources.lib.modules import cleantitle
@@ -24,9 +26,10 @@ class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['moviesonline.mx']
self.base_link = 'http://moviesonline.mx'
self.domains = ['moviesonline.gy','moviesonline.tl']
self.base_link = 'http://moviesonline.gy'
self.search_link = '/search-movies/%s.html'
# moviesonline.mx is now ddos protected


def movie(self, imdb, title, localtitle, aliases, year):

+ 59
- 0
lib/lambdascrapers/sources_ lambdascrapers/en/myprojectfreetv.py Прегледај датотеку

@@ -0,0 +1,59 @@
# -*- coding: UTF-8 -*-
# -Cleaned and Checked on 10-10-2018 by JewBMX in Yoda.

import re,urllib,urlparse

from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import proxy

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['my-project-free.tv']
self.base_link = 'https://my-project-free.tv' #https://www8.project-free-tv.ag
self.search_link = '/episode/%s-season-%s-episode-%s'

def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
clean_title = cleantitle.geturl(tvshowtitle)
url = clean_title
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url: return
tvshowtitle = url
url = self.base_link + self.search_link % (tvshowtitle, int(season), int(episode))
return url
except:
return

def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = client.request(url)
try:
data = re.compile("callvalue\('.+?','.+?','(.+?)://(.+?)/(.+?)'\)",re.DOTALL).findall(r)
for http,host,url in data:
url = '%s://%s/%s' % (http,host,url)
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': url,
'direct': False,
'debridonly': False
})
except:
pass
return sources
except Exception:
return


def resolve(self, url):
return url

lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/mywatchseries.py → lib/lambdascrapers/sources_ lambdascrapers/en/mywatchseries.py Прегледај датотеку

@@ -29,10 +29,10 @@ class source:
def __init__(self):
self.priority = 0
self.language = ['en']
self.domains = ['onwatchseries.to','mywatchseries.to']
self.base_link = 'http://mywatchseries.to'
self.search_link = 'http://mywatchseries.to/show/search-shows-json'
self.search_link_2 = 'http://mywatchseries.to/search/%s'
self.domains = ['swatchseries.to']
self.base_link = 'http://www1.swatchseries.to'
self.search_link = 'http://www1.swatchseries.to/show/search-shows-json'
self.search_link_2 = 'http://www1.swatchseries.to/search/%s'


def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):

lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/solarmoviez.py → lib/lambdascrapers/sources_ lambdascrapers/en/solarmoviez.py Прегледај датотеку

@@ -1,274 +1,314 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Yoda
# Addon id: plugin.video.Yoda
# Addon Provider: MuadDib
import re,traceback,urllib,urlparse,hashlib,random,string,json,base64,sys,xbmc
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import directstream
from resources.lib.modules import jsunfuck
from resources.lib.modules import log_utils
CODE = '''def retA():
class Infix:
def __init__(self, function):
self.function = function
def __ror__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __or__(self, other):
return self.function(other)
def __rlshift__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __rshift__(self, other):
return self.function(other)
def __call__(self, value1, value2):
return self.function(value1, value2)
def my_add(x, y):
try: return x + y
except Exception: return str(x) + str(y)
x = Infix(my_add)
return %s
param = retA()'''
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['solarmoviez.to', 'solarmoviez.ru']
self.base_link = 'https://solarmoviez.ru'
self.search_link = '/movie/search/%s.html'
self.info_link = '/ajax/movie_info/%s.html?is_login=false'
self.server_link = '/ajax/v4_movie_episodes/%s'
self.embed_link = '/ajax/movie_embed/%s'
self.token_link = '/ajax/movie_token?eid=%s&mid=%s'
self.source_link = '/ajax/movie_sources/%s?x=%s&y=%s'
def matchAlias(self, title, aliases):
try:
for alias in aliases:
if cleantitle.get(title) == cleantitle.get(alias['title']):
return True
except:
return False
def movie(self, imdb, title, localtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': title})
url = {'imdb': imdb, 'title': title, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('SolarMoviez - Exception: \n' + str(failure))
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': tvshowtitle})
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('SolarMoviez - Exception: \n' + str(failure))
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('SolarMoviez - Exception: \n' + str(failure))
return
def searchShow(self, title, season, aliases, headers):
try:
title = cleantitle.normalize(title)
search = '%s Season %s' % (title, season)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search)))
r = client.request(url)
url = re.findall('<a href=\"(.+?\/movie\/%s-season-%s-.+?\.html)\"' % (cleantitle.geturl(title), season), r)[0]
return url
except:
failure = traceback.format_exc()
log_utils.log('SolarMoviez - Exception: \n' + str(failure))
return
def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
r = client.request(url, headers=headers, timeout='15')
r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
try:
r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
except:
url = None
pass
if (url == None):
url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
return url
except:
failure = traceback.format_exc()
log_utils.log('SolarMoviez - Exception: \n' + str(failure))
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url is None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
aliases = eval(data['aliases'])
headers = {}
if 'tvshowtitle' in data:
episode = int(data['episode'])
url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
else:
episode = 0
url = self.searchMovie(data['title'], data['year'], aliases, headers)
mid = re.findall('-(\d+)', url)[-1]
try:
headers = {'Referer': url}
u = urlparse.urljoin(self.base_link, self.server_link % mid)
r = client.request(u, headers=headers, XHR=True)
r = json.loads(r)['html']
r = client.parseDOM(r, 'div', attrs = {'class': 'pas-list'})
ids = client.parseDOM(r, 'li', ret='data-id')
servers = client.parseDOM(r, 'li', ret='data-server')
labels = client.parseDOM(r, 'a', ret='title')
r = zip(ids, servers, labels)
for eid in r:
try:
try:
ep = re.findall('episode.*?(\d+).*?', eid[2].lower())[0]
except:
ep = 0
if (episode == 0) or (int(ep) == episode):
url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid))
script = client.request(url)
if '$_$' in script:
params = self.uncensored1(script)
elif script.startswith('[]') and script.endswith('()'):
params = self.uncensored2(script)
elif '_x=' in script:
x = re.search('''_x=['"]([^"']+)''', script).group(1)
y = re.search('''_y=['"]([^"']+)''', script).group(1)
params = {'x': x, 'y': y}
else:
raise Exception()
u = urlparse.urljoin(self.base_link, self.source_link % (eid[0], params['x'], params['y']))
r = client.request(u, XHR=True)
json_sources = json.loads(r)['playlist'][0]['sources']
try:
if 'google' in json_sources['file']:
quality = 'HD'
if 'bluray' in json_sources['file'].lower():
quality = '1080p'
sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en',
'url': json_sources['file'], 'direct': True, 'debridonly': False})
except Exception:
if 'blogspot' in json_sources[0]['file']:
url = [i['file'] for i in json_sources if 'file' in i]
url = [directstream.googletag(i) for i in url]
url = [i[0] for i in url if i]
for s in url:
sources.append({'source': 'gvideo', 'quality': s['quality'], 'language': 'en',
'url': s['url'], 'direct': True, 'debridonly': False})
elif 'lemonstream' in json_sources[0]['file']:
sources.append({
'source': 'CDN',
'quality': 'HD',
'language': 'en',
'url': json_sources[0]['file'] + '|Referer=' + self.base_link,
'direct': True,
'debridonly': False})
except:
pass
except:
pass
return sources
except:
failure = traceback.format_exc()
log_utils.log('SolarMoviez - Exception: \n' + str(failure))
return sources
def resolve(self, url):
return url
def uncensored(a, b):
x = '' ; i = 0
for i, y in enumerate(a):
z = b[i % len(b) - 1]
y = int(ord(str(y)[0])) + int(ord(str(z)[0]))
x += chr(y)
x = base64.b64encode(x)
return x
def uncensored1(self, script):
try:
script = '(' + script.split("(_$$)) ('_');")[0].split("/* `$$` */")[-1].strip()
script = script.replace('(__$)[$$$]', '\'"\'')
script = script.replace('(__$)[_$]', '"\\\\"')
script = script.replace('(o^_^o)', '3')
script = script.replace('(c^_^o)', '0')
script = script.replace('(_$$)', '1')
script = script.replace('($$_)', '4')
vGlobals = {"__builtins__": None, '__name__': __name__, 'str': str, 'Exception': Exception}
vLocals = {'param': None}
exec (CODE % script.replace('+', '|x|'), vGlobals, vLocals)
data = vLocals['param'].decode('string_escape')
x = re.search('''_x=['"]([^"']+)''', data).group(1)
y = re.search('''_y=['"]([^"']+)''', data).group(1)
return {'x': x, 'y': y}
except:
pass
def uncensored2(self, script):
try:
js = jsunfuck.JSUnfuck(script).decode()
x = re.search('''_x=['"]([^"']+)''', js).group(1)
y = re.search('''_y=['"]([^"']+)''', js).group(1)
return {'x': x, 'y': y}
except:
pass
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################

# -Cleaned and Checked on 10-27-2018 by JewBMX

import re,urllib,urlparse,hashlib,random,string,json,base64,sys,time

from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import directstream
from resources.lib.modules import jsunfuck
from resources.lib.modules import source_utils
from resources.lib.modules import cfscrape

CODE = '''def retA():
class Infix:
def __init__(self, function):
self.function = function
def __ror__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __or__(self, other):
return self.function(other)
def __rlshift__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __rshift__(self, other):
return self.function(other)
def __call__(self, value1, value2):
return self.function(value1, value2)
def my_add(x, y):
try: return x + y
except Exception: return str(x) + str(y)
x = Infix(my_add)
return %s
param = retA()'''

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['solarmoviez.ru','solarmovie.mrunlock.pw']
self.base_link = 'https://solarmoviez.ru'
self.search_link = '/movie/search/%s.html'
self.info_link = '/ajax/movie_get_info/%s.html'
self.server_link = '/ajax/v4_movie_episodes/%s'
self.embed_link = '/ajax/movie_embed/%s'
self.token_link = '/ajax/movie_token?eid=%s&mid=%s&_=%s'
self.source_link = '/ajax/movie_sources/%s?x=%s&y=%s'

def matchAlias(self, title, aliases):
try:
for alias in aliases:
if cleantitle.get(title) == cleantitle.get(alias['title']):
return True
except:
return False

def movie(self, imdb, title, localtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': title})
url = {'imdb': imdb, 'title': title, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return

def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': tvshowtitle})
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return


def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return

def searchShow(self, title, season, aliases, headers):
try:
title = cleantitle.normalize(title)
search = '%s Season %01d' % (title, int(season))
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(search)))
r = self.s.get(url, headers=headers).content
r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
return url
except:
return

def searchMovie(self, title, year, aliases, headers):
try:
title = cleantitle.normalize(title)
url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
r = self.s.get(url, headers=headers).content
r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
r = [(i[0], i[1], re.findall('(\d+)', i[0])[0]) for i in r]
results = []
for i in r:
try:
info = client.request(urlparse.urljoin(self.base_link, self.info_link % i[2]), headers=headers, timeout='15')
y = re.findall('<div\s+class="jt-info">(\d{4})', info)[0]
if self.matchAlias(i[1], aliases) and (year == y):
url = i[0]
break
#results.append([i[0], i[1], re.findall('<div\s+class="jt-info">(\d{4})', info)[0]])
except:
url = None
pass

#try:
# r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
# url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
#except:
# url = None
# pass

if (url == None):
url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]
return url
except:
return

def sources(self, url, hostDict, hostprDict):
try:
sources = []

if url is None: return sources

data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
aliases = eval(data['aliases'])
mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
headers = mozhdr
headers['X-Requested-With'] = 'XMLHttpRequest'
self.s = cfscrape.create_scraper()
if 'tvshowtitle' in data:
episode = int(data['episode'])
url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
else:
episode = 0
url = self.searchMovie(data['title'], data['year'], aliases, headers)

headers['Referer'] = url
ref_url = url
mid = re.findall('-(\d*)\.',url)[0]
data = {'id':mid}
r = self.s.post(url, headers=headers)
try:
u = urlparse.urljoin(self.base_link, self.server_link % mid)
r = self.s.get(u, headers=mozhdr).content
r = json.loads(r)['html']
rl = client.parseDOM(r, 'div', attrs = {'class': 'pas-list'})
rh = client.parseDOM(r, 'div', attrs = {'class': 'pas-header'})
ids = client.parseDOM(rl, 'li', ret='data-id')
servers = client.parseDOM(rl, 'li', ret='data-server')
labels = client.parseDOM(rl, 'a', ret='title')
r = zip(ids, servers, labels)
rrr = zip(client.parseDOM(rh, 'li', ret='data-id'), client.parseDOM(rh, 'li', ret='class'))
types = {}
for rr in rrr:
types[rr[0]] = rr[1]
for eid in r:
try:
try:
ep = re.findall('episode.*?(\d+).*?',eid[2].lower())[0]
except:
ep = 0
if (episode == 0) or (int(ep) == episode):
t = str(int(time.time()*1000))
quali = source_utils.get_release_quality(eid[2])[0]
if 'embed' in types[eid[1]]:
url = urlparse.urljoin(self.base_link, self.embed_link % (eid[0]))
xml = self.s.get(url, headers=headers).content
url = json.loads(xml)['src']
valid, hoster = source_utils.is_host_valid(url, hostDict)
if not valid: continue
q = source_utils.check_sd_url(url)
q = q if q != 'SD' else quali
sources.append({'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False })
continue
else:
url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid, t))
script = self.s.get(url, headers=headers).content
if '$_$' in script:
params = self.uncensored1(script)
elif script.startswith('[]') and script.endswith('()'):
params = self.uncensored2(script)
elif '_x=' in script:
x = re.search('''_x=['"]([^"']+)''', script).group(1)
y = re.search('''_y=['"]([^"']+)''', script).group(1)
params = {'x': x, 'y': y}
else:
raise Exception()
u = urlparse.urljoin(self.base_link, self.source_link % (eid[0], params['x'], params['y']))
length = 0
count = 0
while length == 0 and count < 11:
r = self.s.get(u, headers=headers).text
length = len(r)
if length == 0: count += 1
uri = None
uri = json.loads(r)['playlist'][0]['sources']
try:
uri = [i['file'] for i in uri if 'file' in i]
except:
try:
uri = [uri['file']]
except:
continue
for url in uri:
if 'googleapis' in url:
q = source_utils.check_sd_url(url)
sources.append({'source': 'gvideo', 'quality': q, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
continue

valid, hoster = source_utils.is_host_valid(url, hostDict)
#urls, host, direct = source_utils.check_directstreams(url, hoster)
q = quali
if valid:
#for z in urls:
if hoster == 'gvideo':
direct = True
try:
q = directstream.googletag(url)[0]['quality']
except:
pass
url = directstream.google(url, ref=ref_url)
else: direct = False
sources.append({'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': direct, 'debridonly': False})
else:
sources.append({'source': 'CDN', 'quality': q, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
except:
pass
except:
pass

return sources
except:
return sources

def resolve(self, url):
try:
if not url.startswith('http'):
url = 'http:' + url
for i in range(3):
if 'google' in url and not 'googleapis' in url:
url = directstream.googlepass(url)
if url:
break
return url
except Exception:
return

def uncensored(a, b):
x = '' ; i = 0
for i, y in enumerate(a):
z = b[i % len(b) - 1]
y = int(ord(str(y)[0])) + int(ord(str(z)[0]))
x += chr(y)
x = base64.b64encode(x)
return x

def uncensored1(self, script):
try:
script = '(' + script.split("(_$$)) ('_');")[0].split("/* `$$` */")[-1].strip()
script = script.replace('(__$)[$$$]', '\'"\'')
script = script.replace('(__$)[_$]', '"\\\\"')
script = script.replace('(o^_^o)', '3')
script = script.replace('(c^_^o)', '0')
script = script.replace('(_$$)', '1')
script = script.replace('($$_)', '4')

vGlobals = {"__builtins__": None, '__name__': __name__, 'str': str, 'Exception': Exception}
vLocals = {'param': None}
exec (CODE % script.replace('+', '|x|'), vGlobals, vLocals)
data = vLocals['param'].decode('string_escape')
x = re.search('''_x=['"]([^"']+)''', data).group(1)
y = re.search('''_y=['"]([^"']+)''', data).group(1)
return {'x': x, 'y': y}
except:
pass

def uncensored2(self, script):
try:
js = jsunfuck.JSUnfuck(script).decode()
x = re.search('''_x=['"]([^"']+)''', js).group(1)
y = re.search('''_y=['"]([^"']+)''', js).group(1)
return {'x': x, 'y': y}
except:
pass

+ 2
- 6
lib/lambdascrapers/sources_ lambdascrapers/en_DebridOnly/300mbfilms.py Прегледај датотеку

@@ -8,11 +8,7 @@
# ----------------------------------------------------------------------------
#######################################################################

# Addon Name: Yoda
# Addon id: plugin.video.Yoda
# Addon Provider: Supremacy


# -Cleaned and Checked on 11-13-2018 by JewBMX in Scrubs.

import re,urllib,urlparse

@@ -27,7 +23,7 @@ class source:
self.priority = 1
self.language = ['en']
self.domains = ['300mbfilms.co']
self.base_link = 'https://www.300mbfilms.co/'
self.base_link = 'https://300mbfilms.co' #https://www.300mbfilms.co
self.search_link = '/search/%s/feed/rss2/'

def movie(self, imdb, title, localtitle, aliases, year):

+ 4
- 4
lib/lambdascrapers/sources_ lambdascrapers/en_DebridOnly/bestmoviez.py Прегледај датотеку

@@ -8,9 +8,7 @@
# ----------------------------------------------------------------------------
#######################################################################

# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
# Scraper Checked and Fixed 11-08-2018 -JewBMX

import re,urllib,urlparse

@@ -19,6 +17,7 @@ from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import log_utils
from resources.lib.modules import source_utils
from resources.lib.modules import cfscrape

# Working: https://www.best-moviez.ws/deadpool-2-2018-1080p-web-dl-dd5-1-h264-cmrg/
# Working: https://www.best-moviez.ws/deadpool-2-2018
@@ -66,6 +65,7 @@ class source:
def sources(self, url, hostDict, hostprDict):
try:
sources = []
scraper = cfscrape.create_scraper()

if url == None: return sources

@@ -84,7 +84,7 @@ class source:
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
#log_utils.log('\n\n\n\n\n\nquery, url: %s, %s' % (query,url))
r = client.request(url)
r = scraper.get(url).content

# grab the (only?) relevant div and cut off the footer

+ 6
- 3
resources/settings.xml Прегледај датотеку

@@ -28,7 +28,7 @@
<setting id="provider.123moviesgold" type="bool" label="$NUMBER[123]MOVIESGOLD" default="false" /> <!--Lambdascrapers-->
<setting id="provider.123netflix" type="bool" label="$NUMBER[123]NETFLIX" default="false" /> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.321movies" type="bool" label="$NUMBER[321]MOVIES" default="false" /> <!--Placenta-->
<setting id="provider.1080P" type="bool" label="$NUMBER[1080]P" default="false" /> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.1080P" type="bool" label="$NUMBER[1080]P" default="true" /> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.1123movies" type="bool" label="$NUMBER[1123]MOVIES" default="false" /> <!--Placenta-->
<setting id="provider.afdah" type="bool" label="AFDAH" default="false" /> <!--Lambdascrapers-->
<setting id="provider.allrls" type="bool" label="ALLRLS" default="false" /> <!--Incursion--> <!--Placenta--> <!--Lambdascrapers-->
@@ -37,6 +37,7 @@
<setting id="provider.bmoviez" type="bool" label="BMOVIEZ" default="false" /> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.bnwmovies" type="bool" label="BNWMOVIES" default="true" /> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.bob" type="bool" label="BOB" default="false" /> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.bobmovies" type="bool" label="BOBMOVIES" default="true" />
<setting id="provider.cafehulu" type="bool" label="CAFEHULU" default="false" /> <!--Placenta-->
<setting id="provider.cartoonhd" type="bool" label="CARTOONHD" default="true" /> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.cinemamega" type="bool" label="CINEMAMEGA" default="false" /> <!--Placenta-->
@@ -59,6 +60,7 @@
<setting id="provider.furk" type="bool" label="FURK" default="true" /> <!--Incursion--> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.genvideos" type="bool" label="GENVIDEOS" default="false" /> <!--Lambdascrapers-->
<setting id="provider.gogoanime" type="bool" label="GOGOANIME" default="false" /> <!--Incursion--> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.gostream" type="bool" label="GOSTREAM" default="true" />
<setting id="provider.gowatchseries" type="bool" label="GOWATCHSERIES" default="true" /> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.hd" type="bool" label="HD" default="false" /> <!--Placenta-->
<setting id="provider.hdmega" type="bool" label="HDMEGA" default="false" /> <!--Placenta-->
@@ -81,8 +83,10 @@
<setting id="provider.movie4kto" type="bool" label="MOVIE4KTO" default="false" /> <!--Placenta-->
<setting id="provider.movie4uch" type="bool" label="MOVIE4UCH" default="false" /> <!--Incursion--> <!--Lambdascrapers-->
<setting id="provider.moviesgold" type="bool" label="MOVIESGOLD" default="false" /> <!--Placenta-->
<setting id="provider.moviesonline" type="bool" label="MOVIESONLINE" default="false" />
<setting id="provider.moviesplanet" type="bool" label="MOVIESPLANET" default="true" /> <!--Incursion--> <!--Lambdascrapers-->
<setting id="provider.moviexk" type="bool" label="MOVIEXK" default="false" /> <!--Placenta-->
<setting id="provider.myprojectfreetv" type="bool" label="MYPROJECTFREETV" default="true" />
<setting id="provider.myputlock" type="bool" label="MYPUTLOCK" default="false" /> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.mywatchseries" type="bool" label="MYWATCHSERIES" default="false" /> <!--Incursion--> <!--Lambdascrapers-->
<!-- <setting id="provider.mzmovies" type="bool" label="MZMOVIES" default="false" />--> <!--Incursion-->
@@ -105,7 +109,7 @@
<setting id="provider.sezonlukdizi" type="bool" label="SEZONLUKDIZI" default="false" /> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.showbox" type="bool" label="SHOWBOX" default="false" /> <!--Lambdascrapers-->
<setting id="provider.solarmovie" type="bool" label="SOLARMOVIE" default="false" /> <!--Incursion--> <!--Placenta-->
<setting id="provider.solarmoviez" type="bool" label="SOLARMOVIEZ" default="false" /> <!--Lambdascrapers-->
<setting id="provider.solarmoviez" type="bool" label="SOLARMOVIEZ" default="true" /> <!--Lambdascrapers-->
<setting id="provider.streamlord" type="bool" label="STREAMLORD" default="true" /> <!--Incursion--> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.sunmovies" type="bool" label="SUNMOVIES" default="false" /> <!--Placenta--> <!--Placenta-->
<setting id="provider.timewatch" type="bool" label="TIMETOWATCH" default="false" /> <!--Placenta--> <!--Placenta-->
@@ -192,7 +196,6 @@
<setting id="provider.directdl" type="bool" label="DIRECTDL" default="false" /> <!--Incursion--> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.gomovies" type="bool" label="GOMOVIES" default="false" /> <!--Placenta--> <!--Lambdascrapers-->
<!-- <setting id="provider.hevcfilm" type="bool" label="HEVCFILM" default="false" />--> <!--Incursion--> <!--Placenta-->
<setting id="provider.moviesonline" type="bool" label="MOVIESONLINE" default="false" /> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.myvideolink" type="bool" label="MYVIDEOLINK" default="false" /> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.phazeddl" type="bool" label="PHAZEDLL" default="false" /> <!--Incursion--> <!--Placenta--> <!--Lambdascrapers-->
<setting id="provider.power" type="bool" label="POWER" default="false" /> <!--Lambdascrapers-->

Loading…
Откажи
Сачувај