Browse Source

move the nonworking providers directory

pull/43/head
I-A-C 1 year ago
parent
commit
c7a4540290
35 changed files with 4421 additions and 0 deletions
  1. 74
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/1080P.py
  2. 196
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/123movies.py
  3. 139
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/123moviesgold.py
  4. 139
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/123netflix.py
  5. 146
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/1movie.py
  6. 83
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/afdah.py
  7. 142
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/allrls.py
  8. 156
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/animeultima.py
  9. 100
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/bmoviez.py
  10. 91
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/bob.py
  11. 171
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/cmovies.py
  12. 97
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/cooltv.py
  13. 91
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/dltube.py
  14. 91
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/einthusan.py
  15. 97
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/filmxy.py
  16. 104
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/flenix.py
  17. 113
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/flixanity.py
  18. 88
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/genvideos.py
  19. 114
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/gogoanime.py
  20. 247
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/icefilms.py
  21. 97
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/icouchtuner.py
  22. 91
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/l23movies.py
  23. 103
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/m4ufree.py
  24. 103
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/megahd.py
  25. 116
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/movie4uch.py
  26. 77
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/myputlock.py
  27. 141
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/mywatchseries.py
  28. 111
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/projectfree.py
  29. 170
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/pron.py
  30. 139
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/rajahoki88.py
  31. 150
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/sezonlukdizi.py
  32. 187
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/showbox.py
  33. 274
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/solarmoviez.py
  34. 103
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/vodly.py
  35. 80
    0
      lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/watch32.py

+ 74
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/1080P.py View File

@@ -0,0 +1,74 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################

# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo

import re,traceback,urllib,urlparse,json,base64

from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import log_utils
from resources.lib.modules import source_utils

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['1080pmovie.com', 'watchhdmovie.net']
self.base_link = 'https://watchhdmovie.net'
self.search_link = '/?s=%s'

def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('1080PMovies - Exception: \n' + str(failure))
return

def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
title = urldata['title'].replace(':', ' ').lower()
year = urldata['year']

search_id = title.lower()
start_url = self.search_link % (self.base_link, search_id.replace(' ','%20'))

headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
html = client.request(start_url,headers=headers)
Links = re.compile('"post","link":"(.+?)","title".+?"rendered":"(.+?)"',re.DOTALL).findall(html)
for link,name in Links:
link = link.replace('\\','')
if title.lower() in name.lower():
if year in name:
holder = client.request(link,headers=headers)
new = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(holder)[0]
end = client.request(new,headers=headers)
final_url = re.compile('<iframe src="(.+?)"',re.DOTALL).findall(end)[0]
valid, host = source_utils.is_host_valid(final_url, hostDict)
sources.append({'source':host,'quality':'1080p','language': 'en','url':final_url,'info':[],'direct':False,'debridonly':False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('1080PMovies - Exception: \n' + str(failure))
return sources

def resolve(self, url):
return directstream.googlepass(url)


+ 196
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/123movies.py View File

@@ -0,0 +1,196 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################

# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo

import urlparse,traceback,urllib,re,json,xbmc

from resources.lib.modules import client
from resources.lib.modules import cleantitle
from resources.lib.modules import directstream
from resources.lib.modules import log_utils
from resources.lib.modules import source_utils

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['123movies.ph']
self.base_link = 'https://123movies.ph/'
self.source_link = 'https://123movies.ph/'
self.episode_path = '/episodes/%s-%sx%s/'
self.movie_path0 = '/movies/%s-watch-online-free-123movies-%s/'
self.movie_path = '/movies/%s/'
# self.decode_file = '/decoding_v2.php'
# self.decode_file = '/decoding_v3.php'
self.decode_file = 'https://gomostream.com/decoding_v3.php'
# self.grabber_file = '/get.php'
# self.grabber_file = '/getv2.php'
self.grabber_file = 'https://gomostream.com/getv2.php'
# $.ajax({ type: "POST", url: "https://gomostream.com/decoding_v3.php" .....
# $.ajax({ type: "POST", url: "https://gomostream.com/getv2.php" .....
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'title': title, 'year': year}
return urllib.urlencode(url)

except Exception:
return

def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
data = {'tvshowtitle': tvshowtitle, 'year': year, 'imdb': imdb}
return urllib.urlencode(data)

except Exception:
return

def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict((i, data[i][0]) for i in data)
data.update({'season': season, 'episode': episode, 'title': title, 'premiered': premiered})

return urllib.urlencode(data)

except Exception:
return

def sources(self, url, hostDict, hostprDict):
try:
sources = []

data = urlparse.parse_qs(url)
data = dict((i, data[i][0]) for i in data)

if 'episode' in data:
url = self.__get_episode_url(data)
get_body = 'type=episode&%s=%s&imd_id=%s&seasonsNo=%02d&episodesNo=%02d'
else:
url = self.__get_movie_url(data)

response = client.request(url)
url = re.findall('<iframe .+? src="(.+?)"', response)[0]

response = client.request(url)

token = re.findall('var tc = \'(.+?)\'', response)[0]
# _tsd_tsd_ds(s) ~~~ .slice(3,29) ~~~~ "29"+"341404"; <----- seeds phrase has changed
# seeds = re.findall('_tsd_tsd\(s\) .+\.slice\((.+?),(.+?)\).+ return .+? \+ \"(.+?)\"\+\"(.+?)";', response)[0]
seeds = re.findall('_tsd_tsd_ds\(s\) .+\.slice\((.+?),(.+?)\).+ return .+? \+ \"(.+?)\"\+\"(.+?)\";', response)[0]
pair = re.findall('\'type\': \'.+\',\s*\'(.+?)\': \'(.+?)\'', response)[0]

header_token = self.__xtoken(token, seeds)
body = 'tokenCode=' + token

headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'x-token': header_token
}

url = self.decode_file
response = client.request(url, XHR=True, post=body, headers=headers)

sources_dict = json.loads(response)

# [u'https://video.xx.fbcdn.net/v/t42.9040-2/10000000_226259417967008_8033841240334139392_n.mp4?_nc_cat=0&efg=eyJybHIiOjE1MDAsInJsYSI6NDA5NiwidmVuY29kZV90YWciOiJzdmVfaGQifQ%3D%3D&rl=1500&vabr=616&oh=27f4d11aec3aa54dbe1ca72c81fbaa03&oe=5B4C6DF5', u'https://movienightplayer.com/tt0253754', u'https://openload.co/embed/ALXqqto-fQI', u'https://streamango.com/embed/pndcsolkpnooffdk']
for source in sources_dict:
try:
# if 'vidushare.com' in source:
if '.mp4' in source:
sources.append({
'source': 'CDN',
'quality': 'HD',
'language': 'en',
'url': source,
'direct': True,
'debridonly': False
})
except Exception:
pass

body = get_body % (pair[0], pair[1], data['imdb'], int(data['season']), int(data['episode']))

url = urlparse.urljoin(self.source_link, self.grabber_file)
response = client.request(url, XHR=True, post=body, headers=headers)

sources_dict = json.loads(response)

for source in sources_dict:
try:
quality = source_utils.label_to_quality(source['label'])
link = source['file']

if 'lh3.googleusercontent' in link:
link = directstream.googleredirect(link)

sources.append({
'source': 'gvideo',
'quality': quality,
'language': 'en',
'url': link,
'direct': True,
'debridonly': False
})

except Exception:
pass


return sources

except Exception:
return sources

def resolve(self, url):
return url

def __get_episode_url(self, data):
try:
clean_title = cleantitle.geturl(data['tvshowtitle'])
query = self.episode_path % (clean_title, data['season'], data['episode'])

url = urlparse.urljoin(self.base_link, query)
html = client.request(url)

token = re.findall('\/?watch-token=(.*?)\"', html)[0]

return url + ('?watch-token=%s' % token)

except Exception:
return

def __get_movie_url(self, data):
clean_title = cleantitle.geturl(data['title'])
query0 = self.movie_path0 % (clean_title,data['year']) # the "long" version appears to use year (and its optional)
query = self.movie_path % clean_title # no fancy stuff should work fine (at least almost always)
url = urlparse.urljoin(self.base_link, query)
html = client.request(url)

token = re.findall('\/?watch-token=(.*?)\"', html)[0]

return url + ('?watch-token=%s' % token)
def __xtoken(self, token, seeds):
try:
xtoken = token[int(seeds[0]):int(seeds[1])]
xtoken = list(xtoken)
xtoken.reverse()

return ''.join(xtoken) + seeds[2] + seeds[3]

except Exception:
return

+ 139
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/123moviesgold.py View File

@@ -0,0 +1,139 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################

# Addon Name: Yoda
# Addon id: plugin.video.Yoda
# Addon Provider: Supremacy

import re, urlparse, urllib, base64

from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import dom_parser2


class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['123movies.gold']
self.base_link = 'http://123movies.gold'
self.search_link = '/search-movies/%s.html'


def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
r = cache.get(client.request, 1, search_url)
r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
r = [(client.parseDOM(i, 'a', ret='href'),
re.findall('.+?elease:\s*(\d{4})</', i),
re.findall('<b><i>(.+?)</i>', i)) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if
(cleantitle.get(i[2][0]) == cleantitle.get(title) and i[1][0] == year)]
url = r[0][0]

return url
except Exception:
return

def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return

def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return

url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['premiered'], url['season'], url['episode'] = premiered, season, episode
try:
clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
r = cache.get(client.request, 1, search_url)
r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
r = [(client.parseDOM(i, 'a', ret='href'),
re.findall('<b><i>(.+?)</i>', i)) for i in r]
r = [(i[0][0], i[1][0]) for i in r if
cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
url = r[0][0]
except:
pass
data = client.request(url)
data = client.parseDOM(data, 'div', attrs={'id': 'details'})
data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

return url[0][1]
except:
return

def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = cache.get(client.request, 1, url)
try:
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0]
b64 = base64.b64decode(v)
url = client.parseDOM(b64, 'iframe', ret='src')[0]
try:
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': url.replace('\/', '/'),
'direct': False,
'debridonly': False
})
except:
pass
except:
pass
r = client.parseDOM(r, 'div', {'class': 'server_line'})
r = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r]
if r:
for i in r:
try:
host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
url = i[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
if 'other'in host: continue
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': url.replace('\/', '/'),
'direct': False,
'debridonly': False
})
except:
pass
return sources
except Exception:
return

def resolve(self, url):
if self.base_link in url:
url = client.request(url)
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', url)[0]
b64 = base64.b64decode(v)
url = client.parseDOM(b64, 'iframe', ret='src')[0]
return url

+ 139
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/123netflix.py View File

@@ -0,0 +1,139 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Yoda
# Addon id: plugin.video.Yoda
# Addon Provider: Supremacy
import re, urlparse, urllib, base64
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import dom_parser2
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['123netflix.com']
self.base_link = 'http://123netflix.unblockall.org'
self.search_link = '/search-movies/%s.html'
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
r = cache.get(client.request, 1, search_url)
r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
r = [(client.parseDOM(i, 'a', ret='href'),
re.findall('.+?elease:\s*(\d{4})</', i),
re.findall('<b><i>(.+?)</i>', i)) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if
(cleantitle.get(i[2][0]) == cleantitle.get(title) and i[1][0] == year)]
url = r[0][0]
return url
except Exception:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['premiered'], url['season'], url['episode'] = premiered, season, episode
try:
clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
r = cache.get(client.request, 1, search_url)
r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
r = [(client.parseDOM(i, 'a', ret='href'),
re.findall('<b><i>(.+?)</i>', i)) for i in r]
r = [(i[0][0], i[1][0]) for i in r if
cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
url = r[0][0]
except:
pass
data = client.request(url)
data = client.parseDOM(data, 'div', attrs={'id': 'details'})
data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]
return url[0][1]
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = cache.get(client.request, 1, url)
try:
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0]
b64 = base64.b64decode(v)
url = client.parseDOM(b64, 'iframe', ret='src')[0]
try:
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': url.replace('\/', '/'),
'direct': False,
'debridonly': False
})
except:
pass
except:
pass
r = client.parseDOM(r, 'div', {'class': 'server_line'})
r = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r]
if r:
for i in r:
try:
host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
url = i[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
if 'other'in host: continue
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': url.replace('\/', '/'),
'direct': False,
'debridonly': False
})
except:
pass
return sources
except Exception:
return
def resolve(self, url):
if self.base_link in url:
url = client.request(url)
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', url)[0]
b64 = base64.b64decode(v)
url = client.parseDOM(b64, 'iframe', ret='src')[0]
return url

+ 146
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/1movie.py View File

@@ -0,0 +1,146 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Yoda
# Addon id: plugin.video.Yoda
# Addon Provider: Supremacy
import json,re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['1movies.se','1movies.to']
self.base_link = 'https://1movies.se'
self.search_link = '/search_all/%s'
self.player_link = '/ajax/movie/load_player_v3'
def movie(self, imdb, title, localtitle, aliases, year):
try:
return self.__search([title] + source_utils.aliases_to_array(aliases), year)
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'tvshowtitle': tvshowtitle, 'aliases': aliases, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = self.__search([data['tvshowtitle']] + source_utils.aliases_to_array(eval(data['aliases'])), data['year'], season)
if not url: return
r = client.request(urlparse.urljoin(self.base_link, url))
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'ep_link'})
r = dom_parser.parse_dom(r, 'a', req='href')
r = [(i.attrs['href'], i.content) for i in r if i]
r = [(i[0], re.findall("^(?:episode)\s*(\d+)$", i[1], re.I)) for i in r]
r = [(i[0], i[1][0] if i[1] else '0') for i in r]
r = [i[0] for i in r if int(i[1]) == int(episode)][0]
return source_utils.strip_domain(r)
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
ref = urlparse.urljoin(self.base_link, url)
r = client.request(ref)
p = re.findall('load_player\((\d+)\)', r)
r = client.request(urlparse.urljoin(self.base_link, self.player_link), post={'id': p[0]}, referer=ref, XHR=True)
url = json.loads(r).get('value')
link = client.request(url, XHR=True, output='geturl', referer=ref)
if '1movies.' in link:
r = client.request(link, XHR=True, referer=ref)
r = [(match[1], match[0]) for match in re.findall('''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''', r, re.DOTALL)]
r = [(re.sub('[^\d]+', '', x[0]), x[1].replace('\/', '/')) for x in r]
r = [x for x in r if x[0]]
links = [(x[1], '4K') for x in r if int(x[0]) >= 2160]
links += [(x[1], '1440p') for x in r if int(x[0]) >= 1440]
links += [(x[1], '1080p') for x in r if int(x[0]) >= 1080]
links += [(x[1], 'HD') for x in r if 720 <= int(x[0]) < 1080]
links += [(x[1], 'SD') for x in r if int(x[0]) < 720]
for url, quality in links:
sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
else:
valid, host = source_utils.is_host_valid(link, hostDict)
if not valid: return
urls = []
if 'google' in link: host = 'gvideo'; direct = True; urls = directstream.google(link);
if 'google' in link and not urls and directstream.googletag(link): host = 'gvideo'; direct = True; urls = [{'quality': directstream.googletag(link)[0]['quality'], 'url': link}]
elif 'ok.ru' in link: host = 'vk'; direct = True; urls = directstream.odnoklassniki(link)
elif 'vk.com' in link: host = 'vk'; direct = True; urls = directstream.vk(link)
else: direct = False; urls = [{'quality': 'HD', 'url': link}]
for x in urls: sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year, season='0'):
try:
query = self.search_link % (urllib.quote_plus(titles[0]))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'list_movies'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'item_movie'})
r = dom_parser.parse_dom(r, 'h2', attrs={'class': 'tit'})
r = dom_parser.parse_dom(r, 'a', req='href')
r = [(i.attrs['href'], i.content.lower()) for i in r if i]
r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:\s*-?\s*(?:season|s))\s*(\d+)', i[1])) for i in r]
r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
r = [(i[0], i[1], i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]
return source_utils.strip_domain(r)
except:
return

+ 83
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/afdah.py View File

@@ -0,0 +1,83 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Yoda
# Addon id: plugin.video.Yoda
# Addon Provider: MuadDib
import re,traceback,json,urllib,urlparse,base64
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['afdah.to']
self.base_link = 'http://afdah.to/'
self.search_link = '%s/search?q=afdah.to+%s+%s'
self.goog = 'https://www.google.co.uk'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('AFDAH - Exception: \n' + str(failure))
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
title = urldata['title']
year = urldata['year']
scrape = title.lower().replace(' ','+').replace(':', '')
start_url = self.search_link %(self.goog,scrape,year)
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
html = client.request(start_url,headers=headers)
results = re.compile('href="(.+?)"',re.DOTALL).findall(html)
for url in results:
if self.base_link in url:
if 'webcache' in url:
continue
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
html = client.request(url,headers=headers)
chktitle = re.compile('property="og:title" content="(.+?)" ',re.DOTALL).findall(html)[0]
if cleantitle.get(title) in cleantitle.get(chktitle):
# pulls all the links from Alternate Servers tab
alt_server_cont = re.compile('<div id="cont_5" class="tabContent" style=".+?">(.+?)</div>',re.DOTALL).findall(html)[0]
alt_links = re.compile('<a rel="nofollow" href="(.+?)"',re.DOTALL).findall(alt_server_cont)
for vid_url in alt_links:
host = vid_url.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
sources.append({'source': host, 'quality': 'HD', 'language': 'en', 'url': vid_url, 'info': [], 'direct': False, 'debridonly': False})
return sources
return sources
except:
failure = traceback.format_exc()
log_utils.log('AFDAH - Exception: \n' + str(failure))
return sources
def resolve(self, url):
return url

+ 142
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/allrls.py View File

@@ -0,0 +1,142 @@
# -*- coding: UTF-8 -*-
"""
LambdaScrapers Module

This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""

# Addon Name: LambdaScrapers Module
# Addon id: script.module.lambdascrapers
# Provider: throwaway-scraper
# Rev.1: Sep 25 2018

import re,traceback,urllib,urlparse,json

from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import control
from resources.lib.modules import log_utils
from resources.lib.modules import source_utils

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['oorls.xyz']
self.base_link = 'http://oorls.xyz'
self.search_link = '?s=%s+%s&go=Search'

def movie(self, imdb, title, localtitle, aliases, year):
try:
pages = []
scrape_title = cleantitle.geturl(title).replace('-', '+')
start_url = urlparse.urljoin(self.base_link, self.search_link % (scrape_title, year))

html = client.request(start_url)
results = client.parseDOM(html, 'h2', attrs={'class':'entry-title'})
for content in results:
found_link = client.parseDOM(content, 'a', ret='href')[0]
if self.base_link in found_link:
if cleantitle.get(title) in cleantitle.get(found_link):
if year in found_link:
pages.append(found_link)
return pages
except:
failure = traceback.format_exc()
log_utils.log('ALLRLS - Exception: \n' + str(failure))
return pages

def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
data = {'tvshowtitle': tvshowtitle, 'year': year}
return urllib.urlencode(data)
except:
failure = traceback.format_exc()
log_utils.log('ALLRLS - Exception: \n' + str(failure))
return

def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
pages = []
data = urlparse.parse_qs(url)
data = dict((i, data[i][0]) for i in data)
data.update({'season': season, 'episode': episode, 'title': title, 'premiered': premiered})

season_base = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', season_base)
tvshowtitle = data['tvshowtitle']
tvshowtitle = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', tvshowtitle)

query = query.replace("&", "and")
query = query.replace(" ", " ")
query = query.replace(" ", "+")
tvshowtitle = tvshowtitle.replace("&", "and")
tvshowtitle = tvshowtitle.replace(" ", " ")
tvshowtitle = tvshowtitle.replace(" ", "+")

start_url = urlparse.urljoin(self.base_link, self.search_link % (tvshowtitle, query))

html = client.request(start_url)
results = client.parseDOM(html, 'h2', attrs={'class':'entry-title'})
for content in results:
found_link = client.parseDOM(content, 'a', ret='href')[0]
if self.base_link in found_link:
if cleantitle.get(data['tvshowtitle']) in cleantitle.get(found_link):
if cleantitle.get(season_base) in cleantitle.get(found_link):
pages.append(found_link)
return pages
except:
failure = traceback.format_exc()
log_utils.log('ALLRLS - Exception: \n' + str(failure))
return pages

def sources(self, url, hostDict, hostprDict):
try:
sources = []

if url == None: return sources

hostDict = hostprDict + hostDict
pages = url
for page_url in pages:
r = client.request(page_url)
urls = client.parseDOM(r, 'a', ret = 'href')
for url in urls:
try:
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()

if any(x in url for x in ['.rar', '.zip', '.iso']): continue

quality, info = source_utils.get_release_quality(url)

info = []

if any(x in url.upper() for x in ['HEVC', 'X265', 'H265']): info.append('HEVC')

info = ' | '.join(info)

host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
failure = traceback.format_exc()
log_utils.log('ALLRLS - Exception: \n' + str(failure))
return sources

def resolve(self, url):
return url

+ 156
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/animeultima.py View File

@@ -0,0 +1,156 @@
# -*- coding: utf-8 -*-

'''
Yoda Add-on

This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''


import re,urllib,urlparse,json

from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import tvmaze

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.genre_filter = ['animation', 'anime']
self.domains = ['animeultima.io']
self.base_link = 'http://www.animeultima.io'
self.search_link = '/search.html?searchquery=%s'


def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
tv_maze = tvmaze.tvMaze()
tvshowtitle = tv_maze.showLookup('thetvdb', tvdb)
tvshowtitle = tvshowtitle['name']

t = cleantitle.get(tvshowtitle)

q = self.search_link % (urllib.quote_plus(tvshowtitle))
q = urlparse.urljoin(self.base_link, q)

r = client.request(q)

r = client.parseDOM(r, 'ol', attrs = {'id': 'searchresult'})[0]
r = client.parseDOM(r, 'h2')
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a')) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [(i[0], re.sub('<.+?>|</.+?>','', i[1])) for i in r]
r = [i for i in r if t == cleantitle.get(i[1])]
r = r[-1][0]

url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return


def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return

tv_maze = tvmaze.tvMaze()
num = tv_maze.episodeAbsoluteNumber(tvdb, int(season), int(episode))
num = str(num)

url = urlparse.urljoin(self.base_link, url)

r = client.request(url)

r = client.parseDOM(r, 'tr', attrs = {'class': ''})
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'td', attrs = {'class': 'epnum'})) for i in r]
r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and len(i[1]) > 0]
r = [i[0] for i in r if num == i[1]][0]

url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return


def sources(self, url, hostDict, hostprDict):
try:
sources = []

if url == None: return sources

url = urlparse.urljoin(self.base_link, url)

hostDict = [(i.rsplit('.', 1)[0], i) for i in hostDict]
locDict = [i[0] for i in hostDict]

result = client.request(url)

links = []

try:
r = client.parseDOM(result, 'div', attrs = {'class': 'player-embed'})[0]
r = client.parseDOM(r, 'iframe', ret='src')[0]
links += [(r, url)]
except:
pass

try:
r = client.parseDOM(result, 'div', attrs = {'class': 'generic-video-item'})
r = [(i.split('</div>', 1)[-1].split()[0], client.parseDOM(i, 'a', ret='href', attrs = {'rel': '.+?'})) for i in r]
links += [(i[0], i[1][0]) for i in r if i[1]]
except:
pass

for i in links:
try:
try: host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(i[0].strip().lower()).netloc)[0]
except: host = i[0].lower()
host = host.rsplit('.', 1)[0]
if not host in locDict: raise Exception()
host = [x[1] for x in hostDict if x[0] == host][0]
host = host.encode('utf-8')

url = i[1]
url = urlparse.urljoin(self.base_link, url)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')

sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
except:
pass

return sources
except:
return sources


def resolve(self, url):
try:
result = client.request(url)
result = result.decode('iso-8859-1').encode('utf-8')

url = client.parseDOM(result, 'div', attrs = {'class': 'player-embed'})[0]
url = client.parseDOM(url, 'iframe', ret='src')[0]

return url
except:
return



+ 100
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/bmoviez.py View File

@@ -0,0 +1,100 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Yoda
# Addon id: plugin.video.Yoda
# Addon Provider: MuadDib
import re,traceback,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import log_utils
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 0
self.language = ['en']
self.domains = ['best-moviez.ws']
self.base_link = 'http://www.best-moviez.ws'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('Best-Moviez - Exception: \n' + str(failure))
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
data = {'tvshowtitle': tvshowtitle, 'year': year}
return urllib.urlencode(data)
except:
failure = traceback.format_exc()
log_utils.log('Best-Moviez - Exception: \n' + str(failure))
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict((i, data[i][0]) for i in data)
data.update({'season': season, 'episode': episode, 'title': title, 'premiered': premiered})
return urllib.urlencode(data)
except:
failure = traceback.format_exc()
log_utils.log('Best-Moviez - Exception: \n' + str(failure))
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
posts = client.parseDOM(r, 'item')
for post in posts:
Links = client.parseDOM(post, 'enclosure', ret='url')
if not len(Links) == None:
for vid_url in Links:
quality,info = source_utils.get_release_quality(url, vid_url)
host = vid_url.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': vid_url, 'info': info, 'direct': False, 'debridonly': False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('Best-Moviez - Exception: \n' + str(failure))
return sources
def resolve(self, url):
return url

+ 91
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/bob.py View File

@@ -0,0 +1,91 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################

# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo

import re,traceback,urllib,urlparse,base64
import requests

from resources.lib.modules import client
from resources.lib.modules import cleantitle
from resources.lib.modules import log_utils

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['bobmovies.net','bobmovies.online']
self.base_link = 'https://bobmovies.online/' #'https://bobmovies.mrunlock.trade' # 'https://bobmovies.mrunlock.trade' # https://mrunlock.bid/ # https://mrunlock.stream/ # http://gomov.download/ # MRUNLOCK.INFO
self.goog = 'https://www.google.com/search?q=bobmovies.online+'

def movie(self, imdb, title, localtitle, aliases, year):
try:
scrape = cleantitle.get_simple(title)
google = '%s%s'%(self.goog,scrape.replace(' ','+'))
get_page = requests.get(google).content
log_utils.log('Scraper bobmovies - Movie - title: ' + str(title))
log_utils.log('Scraper bobmovies - Movie - search_id: ' + str(scrape))

match = re.compile('<a href="(.+?)"',re.DOTALL).findall(get_page)
for url1 in match:
if '/url?q=' in url1:
if self.base_link in url1 and 'google' not in url1:
url2 = url1.split('/url?q=')[1]
url2 = url2.split('&amp')[0]
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
html = requests.get(url2,headers=headers,timeout=5).content
results = re.compile('<div class="page_film_top full_film_top">.+?<h1>(.+?)</h1>.+?<td class="name">Quality:</td><td><a href=.+?">(.+?)</a>.+?<td class="name">Year:</td><td><a href=.+?">(.+?)</a>',re.DOTALL).findall(html)
for item_title, qual, date in results:
if not scrape == cleantitle.get_simple(item_title):
continue
if not year in date:
continue
log_utils.log('Scraper bobmovies - Movie - url2: ' + str(url2))
return url2
return
except:
failure = traceback.format_exc()
log_utils.log('BobMovies - Exception: \n' + str(failure))
return

def sources(self, url, hostDict, hostprDict):
try:
if url == None: return
sources = []

headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
html = client.request(url,headers=headers)

vidpage = re.compile('id="tab-movie".+?data-file="(.+?)"',re.DOTALL).findall(html)
for link in vidpage:
if 'trailer' not in link.lower():
link = urlparse.urljoin(self.base_link, link)
sources.append({'source':'DirectLink','quality':'SD','language': 'en','url':link,'info':[],'direct':True,'debridonly':False})
other_links = re.findall('data-url="(.+?)"',html)
for link in other_links:
if link.startswith('//'):
link = 'http:'+link
sources.append({'source':'DirectLink','quality':'SD','language': 'en','url':link,'info':[],'direct':False,'debridonly':False})
else:
sources.append({'source':'DirectLink','quality':'SD','language': 'en','url':link,'info':[],'direct':False,'debridonly':False})


return sources
except:
failure = traceback.format_exc()
log_utils.log('BobMovies - Exception: \n' + str(failure))
return

def resolve(self, url):
return url

+ 171
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/cmovies.py View File

@@ -0,0 +1,171 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################

# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo


import re,base64, requests, sys, urllib
from resources.lib.modules import jsunpack
from resources.lib.modules import cleantitle
from bs4 import BeautifulSoup

from resources.lib.modules import cfscrape

def streamdor(html, src, olod):
source = ''
try:
with requests.Session() as s:
episodeId = re.findall('.*streamdor.co/video/(\d+)', html)[0]
p = s.get('https://embed.streamdor.co/video/' + episodeId, headers={'referer': src})
p = re.findall(r'JuicyCodes.Run\(([^\)]+)', p.text, re.IGNORECASE)[0]
p = re.sub(r'\"\s*\+\s*\"', '', p)
p = re.sub(r'[^A-Za-z0-9+\\/=]', '', p)
p = base64.b64decode(p)
p = jsunpack.unpack(p.decode('utf-8'))
qual = 'SD'
try:
qual = re.findall(r'label:"(.*?)"', p)[0]
except:
pass
try:
url = re.findall(r'(https://streamango.com/embed/.*?)"', p, re.IGNORECASE)[0]
source = "streamango.com"
details = {'source': source, 'quality': qual, 'language': "en", 'url': url, 'info': '',
'direct': False, 'debridonly': False}
except:
if olod == True:
url = ''
source = 'openload.co'
details = {'source': source, 'quality': qual, 'language': "en", 'url': url, 'info': '',
'direct': False, 'debridonly': False}
else: return ''


return details
except:
print("Unexpected error in CMOVIES STREAMDOR Script:")
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, exc_tb.tb_lineno)
return details


class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['cmovieshd.io']
self.base_link = 'https://www3.cmovies.io/'
self.tv_link = 'https://www3.cmovies.io/tv-series/'
self.movie_link = 'https://www3.cmovies.io/movie/'
self.search_link = 'https://www3.cmovies.io/search/?q='

def movie(self, imdb, title, localtitle, aliases, year):
url = []
sources = []
with requests.Session() as s:
p = s.get(self.search_link + title + "+" + year)
soup = BeautifulSoup(p.text, 'html.parser').find_all('div', {'class':'movies-list'})[0]
soup = soup.find_all('a', {'class':'ml-mask'})
movie_link = ''
for i in soup:
if i['title'].lower() == title.lower() or i['title'].lower() == title.lower() + " " + year:
movie_link = i['href']
p = s.get(movie_link +"watch")
soup = BeautifulSoup(p.text, 'html.parser').find_all('a', {'class': 'btn-eps'})
movie_links = []
for i in soup:
movie_links.append(i['href'])
for i in movie_links:
p = s.get(i)
if re.findall(r'http.+://openload.co/embed/.+\"', p.text):
openload_link = re.findall(r'http.+://openload.co/embed/.+\"', p.text)[0].strip('"')
olo_source = streamdor(p.text, i, True)
olo_source['url'] = openload_link
sources.append(olo_source)

else:
sources.append(streamdor(p.text, i, False))
return sources

return sources

def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'tvshowtitle':tvshowtitle, 'aliases':aliases}
return url
except:
print("Unexpected error in CMOVIES TV Script:")
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, exc_tb.tb_lineno)
return url

def episode(self, url, imdb, tvdb, title, premiered, season, episode):
if not url:
return url
try:

aliases = url['aliases']
aliases.append({'title':url['tvshowtitle']})
sources = []
if len(episode) == 1:
episode = "0" + episode
with requests.Session() as s:
for i in aliases:
search_text = i['title'] + ' season ' + season
p = s.get(self.search_link + search_text)
soup = BeautifulSoup(p.text, 'html.parser')
soup = soup.find_all('div', {'class': 'ml-item'})[0].find_all('a', href=True)[0]
if re.sub(r'\W+', '', soup['title'].lower()) \
== re.sub(r'\W+', '', ((i['title'] + " - season " + season).lower())):
break
else:
soup = None
pass
if soup == None:
return sources
p = s.get(soup['href'] + 'watch')
soup = BeautifulSoup(p.text, 'html.parser').find_all('a', {'class': 'btn-eps'})
episode_links = []
for i in soup:
if re.sub(r'\W+','',title.lower()) in re.sub(r'\W+', '', i.text.lower()):
episode_links.append(i['href'])
for i in episode_links:
p = s.get(i)
if re.findall(r'http.+://openload.co/embed/.+\"', p.text):
openload_link = re.findall(r'http.+://openload.co/embed/.+\"', p.text)[0].strip('"')
olo_source = streamdor(p.text, i, True)
olo_source['url'] = openload_link
sources.append(olo_source)

else:
sources.append(streamdor(p.text, i, False))
return sources

except:
print("Unexpected error in CMOVIES EPISODE Script:")
exc_type, exc_obj, exc_tb = sys.exc_info()
print(exc_type, exc_tb.tb_lineno)
return sources


def sources(self, url, hostDict, hostprDict):
url = filter(None, url)
sources = url
return sources


def resolve(self, url):
return url

#url = source.tvshow(source(), '', '', 'Vikings','',[],'2016')
#uurl = source.episode(source(),url,'', '', 'A Good Treason', '', '4', '1')
#url = source.sources(source(),url,'','')

+ 97
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/cooltv.py View File

@@ -0,0 +1,97 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################

# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo

import re,traceback,urllib,urlparse,base64
import requests

from resources.lib.modules import client
from resources.lib.modules import cleantitle
from resources.lib.modules import source_utils
from resources.lib.modules import log_utils

session = requests.Session()

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['cooltvseries.com']
self.base_link = 'https://cooltvseries.com/'
self.show_link = '/%s/%s/season-%s/'

def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('CoolTV - Exception: \n' + str(failure))
return

def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
tvshowtitle = urldata['tvshowtitle'].replace(' ', '-').lower()
start_url = self.show_link % (self.base_link,tvshowtitle,season)

html = client.request(start_url)
container = client.parseDOM(html, 'div', attrs={'class':'dwn-box'})[1]
Links = client.parseDOM(container, 'a', ret='href')

for epi_url in Links:
if cleantitle.get(title) in cleantitle.get(epi_url):
return epi_url
except:
failure = traceback.format_exc()
log_utils.log('CoolTV - Exception: \n' + str(failure))
return

def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources

html = client.request(url)
try:
iframe = client.parseDOM(html, 'iframe', attrs = {'class': 'embed-responsive-item'}, ret='src')[0]
host = iframe.split('//')[1].replace('www.','')
host = host.split('/')[0].split('.')[0].title()
sources.append({'source':host,'quality':'SD','language': 'en','url':iframe,'direct':False,'debridonly':False})
except:
flashvar = client.parseDOM(html, 'param', attrs = {'name': 'flashvars'}, ret='value')[0]
link = flashvar.split('file=')[1]
host = link.split('//')[1].replace('www.','')
host = host.split('/')[0].split('.')[0].title()
sources.append({'source':host,'quality':'SD','language': 'en','url':link,'direct':False,'debridonly':False})

containers = client.parseDOM(html, 'div', attrs={'class':'dwn-box'})

for list in containers:
link = client.parseDOM(list, 'a', attrs={'rel':'nofollow'}, ret='href')[0]
redirect = client.request(link, output='geturl')
quality,info = source_utils.get_release_quality(redirect)
sources.append({'source':'DirectLink','quality':quality,'language': 'en','url':redirect,'info':info,'direct':True,'debridonly':False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('CoolTV - Exception: \n' + str(failure))
return

def resolve(self, url):
return url



+ 91
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/dltube.py View File

@@ -0,0 +1,91 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Yoda
# Addon id: plugin.video.Yoda
# Addon Provider: MuadDib
import re,traceback,base64,urllib,urlparse,json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import log_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['mydownloadtube.com','mydownloadtube.to']
self.base_link = 'https://www.mydownloadtube.to/'
self.search_link = '%ssearch/%s'
self.download_link = '/movies/play_online'
def movie(self, imdb, title, localtitle, aliases, year):
try:
query = self.search_link % (self.base_link, urllib.quote_plus(title).replace('+', '-'))
html = client.request(query, XHR=True)
results = re.compile('<ul id=first-carousel1(.+?)</ul>',re.DOTALL).findall(html)
result = re.compile('alt="(.+?)".+?<h2><a href="(.+?)".+?</h2>.+?>(.+?)</p>',re.DOTALL).findall(str(results))
for found_title,url,date in result:
new_url = self.base_link + url
if cleantitle.get(title) in cleantitle.get(found_title):
if year in date:
return new_url
except:
failure = traceback.format_exc()
log_utils.log('DLTube - Exception: \n' + str(failure))
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
r = client.request(url)
mov_id = re.compile('id=movie value=(.+?)/>',re.DOTALL).findall(r)[0]
mov_id = mov_id.rstrip()
headers = {'Origin':'https://mydownloadtube.to', 'Referer':url,
'X-Requested-With':'XMLHttpRequest', 'User_Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
request_url = 'https://mydownloadtube.to/movies/play_online'
form_data = {'movie':mov_id}
links_page = client.request(request_url, headers=headers, post=form_data)
matches = re.compile("sources:(.+?)controlbar",re.DOTALL).findall(links_page)
match = re.compile("file:window.atob.+?'(.+?)'.+?label:\"(.+?)\"",re.DOTALL).findall(str(matches))
for link,res in match:
vid_url = base64.b64decode(link).replace(' ','%20')
res = res.replace('3Dp','3D').replace(' HD','')
sources.append({'source': 'DirectLink', 'quality': res, 'language': 'en', 'url': vid_url, 'info': '', 'direct': True, 'debridonly': False})
match2 = re.compile('<[iI][fF][rR][aA][mM][eE].+?[sS][rR][cC]="(.+?)"',re.DOTALL).findall(links_page)
for link in match2:
host = link.split('//')[1].replace('www.','')
host = host.split('/')[0].split('.')[0].title()
if '1080' in link:
res='1080p'
elif '720' in link:
res='720p'
else:res = 'SD'
if 'flashx' not in link:
sources.append({'source': host, 'quality': res, 'language': 'en', 'url': link, 'info': 'AC3', 'direct': False, 'debridonly': False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('DLTube - Exception: \n' + str(failure))
return sources
def resolve(self, url):
try:
url = client.request(url, output='geturl')
return url
except:
return

+ 91
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/einthusan.py View File

@@ -0,0 +1,91 @@
# -*- coding: utf-8 -*-

'''
Yoda Add-on

This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''


import re,urllib,urllib2,urlparse,json

from resources.lib.modules import cleantitle
from resources.lib.modules import client


class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['einthusan.com', 'einthusan.tv']
self.base_link = 'https://einthusan.tv'
self.search_link = '/movie/results/?lang=%s&query=%s'
self.movie_link = '/movie/watch/%s/'


def movie(self, imdb, title, localtitle, aliases, year):
try:
langMap = {'hi':'hindi', 'ta':'tamil', 'te':'telugu', 'ml':'malayalam', 'kn':'kannada', 'bn':'bengali', 'mr':'marathi', 'pa':'punjabi'}

lang = 'http://www.imdb.com/title/%s/' % imdb
lang = client.request(lang)
lang = re.findall('href\s*=\s*[\'|\"](.+?)[\'|\"]', lang)
lang = [i for i in lang if 'primary_language' in i]
lang = [urlparse.parse_qs(urlparse.urlparse(i).query) for i in lang]
lang = [i['primary_language'] for i in lang if 'primary_language' in i]
lang = langMap[lang[0][0]]

q = self.search_link % (lang, urllib.quote_plus(title))
q = urlparse.urljoin(self.base_link, q)

t = cleantitle.get(title)

r = client.request(q)

r = client.parseDOM(r, 'li')
r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h3'), client.parseDOM(i, 'div', attrs = {'class': 'info'})) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]
r = [(re.findall('(\d+)', i[0]), i[1], re.findall('(\d{4})', i[2])) for i in r]
r = [(i[0][0], i[1], i[2][0]) for i in r if i[0] and i[2]]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]

url = str(r)
return url
except:
return


def sources(self, url, hostDict, hostprDict):
try:
sources = []

if url == None: return sources

url = self.movie_link % url
url = urlparse.urljoin(self.base_link, url)

return sources

r = client.request(url)

sources.append({'source': 'einthusan', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
return sources
except:
return sources


def resolve(self, url):
return url



+ 97
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/filmxy.py View File

@@ -0,0 +1,97 @@
# NEEDS FIXING

# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################

# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo


import re,urllib,urlparse,json,base64

from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import source_utils

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['filmxy.cc,filmxy.me']
self.base_link = 'https://www.filmxy.me/'
self.search_link = '/%s/wp-json/wp/v2/posts?search=%s'


def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return

def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
title = urldata['title'].replace(':', ' ').replace('-', ' ').lower()
year = urldata['year']

search_id = title.lower()
start_url = self.search_link % (self.base_link, search_id.replace(' ','%20'))

headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
html = client.request(start_url,headers=headers)
Links = re.compile('"post","link":"(.+?)","title".+?"rendered":"(.+?)"',re.DOTALL).findall(html)
for link,name in Links:
link = link.replace('\\','')
name = name.replace('&#038;', '')
if title.lower() in name.lower():
if year in name:
holder = client.request(link,headers=headers)
dpage = re.compile('id="main-down".+?href="(.+?)"',re.DOTALL).findall(holder)[0]
sources = self.scrape_results(dpage, title, year)
return sources
return sources
except:
return sources

def scrape_results(self,url,title,year):
sources = []
try:
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
html = client.request(url,headers=headers)
block720 = re.compile('class="links_720p"(.+?)</ul>',re.DOTALL).findall(html)
Links720 = re.compile('href="(.+?)"',re.DOTALL).findall(str(block720))
for link in Links720:
host = link.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
if host not in ['upload.af', 'upload.mn', 'uploadx.org']:
sources.append({'source':host,'quality':'720p','language': 'en','url':link,'info':[],'direct':False,'debridonly':False})

block1080 = re.compile('class="links_1080p"(.+?)</ul>',re.DOTALL).findall(html)
Links1080 = re.compile('href="(.+?)"',re.DOTALL).findall(str(block1080))
for link in Links1080:
host = link.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
if host not in ['upload.af', 'upload.mn', 'uploadx.org']:
sources.append({'source':host,'quality':'1080p','language': 'en','url':link,'info':[],'direct':False,'debridonly':False})

return sources
except:
return sources

def resolve(self, url):
return url

+ 104
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/flenix.py View File

@@ -0,0 +1,104 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################

# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo

import re,traceback,urllib,urlparse,json,base64,time

from resources.lib.modules import cleantitle
from resources.lib.modules import dom_parser2
from resources.lib.modules import client
from resources.lib.modules import debrid

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['flenix.org']
self.base_link = 'http://flenix.org'
self.search_link = '/watch/%s-%s-online-flenix.html'
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,year)))
return url
except:
return

def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': tvshowtitle})
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return

def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
clean_title = cleantitle.geturl(url['tvshowtitle'])+'-s%02d' % int(season)
url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,url['year'])))
r = client.request(url)
r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
for i in r[0]:
if i.content == 'Episode %s'%episode:
url = i.attrs['href']
return url
except:
return

def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
r = client.request(url)
quality = re.findall(">(\w+)<\/p",r)
if quality[0] == "HD":
quality = "720p"
else:
quality = "SD"
r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]

for i in r[0]:
url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'], 'data-name' : i.attrs['data-name']}
url = urllib.urlencode(url)
sources.append({'source': i.content, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
return sources

def resolve(self, url):
try:
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
post = {'ipplugins': 1,'ip_film': urldata['data-film'], 'ip_server': urldata['data-server'], 'ip_name': urldata['data-name'],'fix': "0"}
p1 = client.request('http://flenix.org/ip.file/swf/plugins/ipplugins.php', post=post, referer=urldata['url'], XHR=True)
p1 = json.loads(p1)
p2 = client.request('http://flenix.org/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=0' %(p1['s'],urldata['data-server']))
p2 = json.loads(p2)
p3 = client.request('http://flenix.org/ip.file/swf/ipplayer/api.php?hash=%s' %(p2['hash']))
p3 = json.loads(p3)
n = p3['status']
if n == False:
p2 = client.request('http://flenix.org/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=1' %(p1['s'],urldata['data-server']))
p2 = json.loads(p2)
url = "https:%s" %p2["data"].replace("\/","/")
return url
except:
return

+ 113
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/flixanity.py View File

@@ -0,0 +1,113 @@
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################

# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo

import re,traceback,urllib,urlparse,json,base64,time

from resources.lib.modules import cleantitle
from resources.lib.modules import dom_parser2
from resources.lib.modules import client

class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['movieshd.tv', 'movieshd.is', 'movieshd.watch', 'flixanity.is', 'flixanity.me','istream.is','flixanity.online','flixanity.cc','123movies.it']
self.base_link = 'http://123movieser.com'
self.search_link = '/watch/%s-%s-online-free-123movies.html'
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,year)))
return url
except:
failure = traceback.format_exc()
log_utils.log('Flixanity - Exception: \n' + str(failure))
return

def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'us', 'title': tvshowtitle})
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('Flixanity - Exception: \n' + str(failure))
return

def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
clean_title = cleantitle.geturl(url['tvshowtitle'])+'-s%02d' % int(season)
url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,url['year'])))
r = client.request(url)
r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
for i in r[0]:
if i.content == 'Episode %s'%episode:
url = i.attrs['href']
return url
except:
failure = traceback.format_exc()
log_utils.log('Flixanity - Exception: \n' + str(failure))
return

def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
r = client.request(url)
quality = re.findall(">(\w+)<\/p",r)
if quality[0] == "HD":
quality = "720p"
else:
quality = "SD"
r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]

for i in r[0]:
url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'], 'data-name' : i.attrs['data-name']}
url = urllib.urlencode(url)
sources.append({'source': i.content, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('Flixanity - Exception: \n' + str(failure))
return

def resolve(self, url):
try:
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
post = {'ipplugins': 1,'ip_film': urldata['data-film'], 'ip_server': urldata['data-server'], 'ip_name': urldata['data-name'],'fix': "0"}
p1 = client.request('http://123movieser.com/ip.file/swf/plugins/ipplugins.php', post=post, referer=urldata['url'], XHR=True)
p1 = json.loads(p1)
p2 = client.request('http://123movieser.com/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=0' %(p1['s'],urldata['data-server']))
p2 = json.loads(p2)
p3 = client.request('http://123movieser.com/ip.file/swf/ipplayer/api.php?hash=%s' %(p2['hash']))
p3 = json.loads(p3)
n = p3['status']
if n == False:
p2 = client.request('http://123movieser.com/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=1' %(p1['s'],urldata['data-server']))
p2 = json.loads(p2)
url = "https:%s" %p2["data"].replace("\/","/")
return url
except:
failure = traceback.format_exc()
log_utils.log('Flixanity - Exception: \n' + str(failure))
return

+ 88
- 0
lib/lambdascrapers/sources_notworking/lambdascrapers(11_9)/genvideos.py View File

@@ -0,0 +1,88 @@
# -*- coding: utf-8 -*-

'''
Yoda Add-on

This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''


import re,json,urllib,urlparse

from resources.lib.modules import cleantitle