lambdascrapers
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

mywatchseries.py 5.1KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. # -*- coding: utf-8 -*-
  2. '''
  3. Yoda Add-on
  4. This program is free software: you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation, either version 3 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program. If not, see <http://www.gnu.org/licenses/>.
  14. '''
  15. import re,urllib,urlparse,json
  16. from resources.lib.modules import cleantitle
  17. from resources.lib.modules import client
  18. from resources.lib.modules import proxy
  19. class source:
  20. def __init__(self):
  21. self.priority = 0
  22. self.language = ['en']
  23. self.domains = ['onwatchseries.to','mywatchseries.to']
  24. self.base_link = 'http://mywatchseries.to'
  25. self.search_link = 'http://mywatchseries.to/show/search-shows-json'
  26. self.search_link_2 = 'http://mywatchseries.to/search/%s'
  27. def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
  28. try:
  29. t = cleantitle.get(tvshowtitle)
  30. q = urllib.quote_plus(cleantitle.query(tvshowtitle))
  31. p = urllib.urlencode({'term': q})
  32. r = client.request(self.search_link, post=p, XHR=True)
  33. try: r = json.loads(r)
  34. except: r = None
  35. if r:
  36. r = [(i['seo_url'], i['value'], i['label']) for i in r if 'value' in i and 'label' in i and 'seo_url' in i]
  37. else:
  38. r = proxy.request(self.search_link_2 % q, 'tv shows')
  39. r = client.parseDOM(r, 'div', attrs = {'valign': '.+?'})
  40. r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title'), client.parseDOM(i, 'a')) for i in r]
  41. r = [(i[0][0], i[1][0], i[2][0]) for i in r if i[0] and i[1] and i[2]]
  42. r = [(i[0], i[1], re.findall('(\d{4})', i[2])) for i in r]
  43. r = [(i[0], i[1], i[2][-1]) for i in r if i[2]]
  44. r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
  45. url = r[0][0]
  46. url = proxy.parse(url)
  47. url = url.strip('/').split('/')[-1]
  48. url = url.encode('utf-8')
  49. return url
  50. except:
  51. return
  52. def episode(self, url, imdb, tvdb, title, premiered, season, episode):
  53. try:
  54. if url == None: return
  55. url = '%s/serie/%s' % (self.base_link, url)
  56. r = proxy.request(url, 'tv shows')
  57. r = client.parseDOM(r, 'li', attrs = {'itemprop': 'episode'})
  58. t = cleantitle.get(title)
  59. r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'itemprop': 'name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in r]
  60. r = [(i[0], i[1][0].split('&nbsp;')[-1], i[2]) for i in r if i[1]] + [(i[0], None, i[2]) for i in r if not i[1]]
  61. r = [(i[0], i[1], i[2][0]) for i in r if i[2]] + [(i[0], i[1], None) for i in r if not i[2]]
  62. r = [(i[0][0], i[1], i[2]) for i in r if i[0]]
  63. url = [i for i in r if t == cleantitle.get(i[1]) and premiered == i[2]][:1]
  64. if not url: url = [i for i in r if t == cleantitle.get(i[1])]
  65. if len(url) > 1 or not url: url = [i for i in r if premiered == i[2]]
  66. if len(url) > 1 or not url: raise Exception()
  67. url = url[0][0]
  68. url = proxy.parse(url)
  69. url = re.findall('(?://.+?|)(/.+)', url)[0]
  70. url = client.replaceHTMLCodes(url)
  71. url = url.encode('utf-8')
  72. return url
  73. except:
  74. return
  75. def sources(self, url, hostDict, hostprDict):
  76. try:
  77. sources = []
  78. if url == None: return sources
  79. url = urlparse.urljoin(self.base_link, url)
  80. r = proxy.request(url, 'tv shows')
  81. links = client.parseDOM(r, 'a', ret='href', attrs = {'target': '.+?'})
  82. links = [x for y,x in enumerate(links) if x not in links[:y]]
  83. for i in links:
  84. try:
  85. url = i
  86. url = proxy.parse(url)
  87. url = urlparse.parse_qs(urlparse.urlparse(url).query)['r'][0]
  88. url = url.decode('base64')
  89. url = client.replaceHTMLCodes(url)
  90. url = url.encode('utf-8')
  91. host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
  92. if not host in hostDict: raise Exception()
  93. host = host.encode('utf-8')
  94. sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
  95. except:
  96. pass
  97. return sources
  98. except:
  99. return sources
  100. def resolve(self, url):
  101. return url