A command line (CLI) program for monitoring and downloading 8chan threads. Licensed under MIT.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

web_methods.py 3.2KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. import json
  2. import re
  3. import requests
  4. import constants
  5. import time_methods
  6. from file_io import *
  7. from file_io import generate_json_path
  8. from json_methods import extract_filenames
  9. def fetch_and_parse_thread_json(board, thread_no):
  10. url = generate_thread_json_url(board, thread_no)
  11. p = generate_json_path(board, thread_no)
  12. if not download_file(url, p):
  13. return None
  14. else:
  15. return json.load(open(p))
  16. def dump_thread_html(board, thread_no):
  17. print('Downloading HTML for /{}/{}...'.format(board, thread_no))
  18. url = generate_thread_html_url(board, thread_no)
  19. p = generate_html_path(board, thread_no)
  20. print('Downloading html to {}'.format(p))
  21. download_file(url, p)
  22. def dump_thread_files(board, thread_no, thread_json):
  23. """ Downloads the files referenced in the supplied JSON. """
  24. # Make a list of the files in the thread
  25. filenames = extract_filenames(thread_json)
  26. # Filter out files that already exist in the cache
  27. filtered_filenames = [f for f in filenames if not file_exists_in_cache(board, thread_no, normalize_filename(f))]
  28. # Download the files
  29. print('Downloading {} files:'.format(len(filtered_filenames)))
  30. for f in filtered_filenames:
  31. normalized = normalize_filename(f)
  32. print('\t{}'.format(normalized))
  33. file_url = generate_file_url(f['hashed_name'])
  34. p = path_to_cached_file(board, thread_no, normalized)
  35. if not download_file(file_url, p):
  36. print('\t\tGot a 404, trying alternate link.')
  37. # Try alternate link
  38. alternate_url = generate_alternate_file_url(board, f['hashed_name'])
  39. download_file(alternate_url, p)
  40. def download_file(file_url, file_path):
  41. """ If the request succeeds, downloads the file and returns True.
  42. On a 404, returns False. On other responses, raises exception. """
  43. r = requests.get(file_url, headers={'user-agent': constants.user_agent})
  44. if r.status_code == requests.codes.ok:
  45. save_file(file_path, r.content)
  46. return True
  47. elif r.status_code == 404:
  48. return False
  49. else:
  50. raise "Unexpected status code {} while trying to fetch {} - try opening in the browser, if that doesn't work " \
  51. "submit an issue to the tracker.".format(r.status_code, file_url)
  52. def generate_thread_json_url(board, thread_no):
  53. url = 'https://8ch.net/{}/res/{}.json'.format(board, thread_no)
  54. return url
  55. def generate_thread_html_url(board, thread_no):
  56. url = 'https://8ch.net/{}/res/{}.html'.format(board, thread_no)
  57. return url
  58. def generate_file_url(filename):
  59. url = 'https://media.8ch.net/file_store/{}'.format(filename)
  60. return url
  61. def generate_alternate_file_url(board, filename):
  62. """Some images, like the OP pic of the /tech/ sticky, use an alternate media2 URL. """
  63. url = 'https://media2.8ch.net/{}/src/{}'.format(board, filename)
  64. return url
  65. def parse_url(url):
  66. """Extracts the board name and thread no from a URL.
  67. """
  68. parts = url.split('#', 1)
  69. board, thread_no = re.findall('(\w+)\/res\/(\d+)', parts[0])[0]
  70. anchored_reply = '' if len(parts)<2 else re.findall('q?(\d+)$', parts[1])[0]
  71. return [board, thread_no, anchored_reply]