diff --git a/api/api.py b/api/api.py index 7ec0755..ba0fee9 100644 --- a/api/api.py +++ b/api/api.py @@ -14,29 +14,37 @@ class Client: self.session.headers.update({ 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0' }) - self.base = 'https://read-api.marvel.com/' + self.base = 'https://bifrost.marvel.com/' + def set_cookies(self, cookies): self.session.cookies.update(cookies) def get_id(self, url): r = self.session.get(url) - regex = r'digital_comic_id : "(([0-9]+))"' + regex = r'"digitalComicID":(([0-9]+))' + print(f"SEARCH") + print(re.search(regex, r.text).groups()) return re.search(regex, r.text).group(1) def make_call(self, epoint, params=None): r = self.session.get(self.base+epoint, params=params) r.raise_for_status() return r + + def get_next_comic(self, id): + self.session.headers.update({'Referer': 'https://read.marvel.com/'}) + r = self.make_call(f'v1/catalog/digital-comics/metadata/{id}?') + return r.json()['data']['results'][0].get('prev_next_issue',{}).get('next_issue_meta',{}).get('id') def get_comic_meta(self, id): self.session.headers.update({'Referer': 'https://read.marvel.com/'}) - r = self.make_call('issue/v1/digitalcomics/'+id+'?') + r = self.make_call(f'v1/catalog/digital-comics/metadata/{id}?') return r.json()['data']['results'][0]['issue_meta'] def get_comic(self, id): params={'rand': randint(10000, 99999)} - r = self.make_call('asset/v1/digitalcomics/'+id+'?', params=params) + r = self.make_call(f'v1/catalog/digital-comics/web/assets/{id}?', params=params) j = r.json()['data']['results'][0] if not j['auth_state']['subscriber']: raise IneligibleError('Marvel Unlimited subscription required.') diff --git a/mur.py b/mur.py index 298ba16..961c404 100644 --- a/mur.py +++ b/mur.py @@ -64,6 +64,12 @@ def parse_args(): help="Write comic's metadata to JSON file.", action='store_true' ) + parser.add_argument( + '-a', '--all', + help="Download all issues in series", + required=False, + action='store_true' + ) return parser.parse_args() def parse_cookies(cd, out_cookies={}): @@ -94,6 +100,7 @@ def check_url(url): for regex in regexes: match = re.match(regex, url) if match: + print(match.groups()) return match.group(1), match.group(2) def download(urls, tmp_dir, cur=0): @@ -131,6 +138,8 @@ def err(e, cur, tot): print(e) if cur == tot: sys.exit(1) + +# def download_and_save(id): def main(): if hasattr(sys, 'frozen'): @@ -144,23 +153,35 @@ def main(): args = parse_args() tot = len(args.url) cur = 0 - for url in args.url: + urls = [] + args.url + + for url in urls: cur += 1 try: print("Comic {} of {}:".format(cur, tot)) - try: - type, id = check_url(url) - except TypeError: - err('Invalid URL: '+str(url), cur, tot) - continue - if type == "www": - id = client.get_id(url) + if isinstance(url, str): + try: + type, id = check_url(url) + except TypeError: + err('Invalid URL: '+str(url), cur, tot) + continue + if type == "www": + id = client.get_id(url) + else: + id = url fmt = args.format meta = client.get_comic_meta(id) + if args.all: + next_id = client.get_next_comic(id) + if next_id: + urls.append(next_id) title = meta['title'] title_s = sanitize(title) print(str(title) + "\n") - abs = os.path.join(dl_dir, '{}.{}'.format(title_s, fmt)) + book_dir = f"{dl_dir}/{title_s.split(' #')[0]}" + if not os.path.isdir(book_dir): + os.makedirs(book_dir) + abs = os.path.join(book_dir, '{}.{}'.format(title_s, fmt)) if exist_check(abs): err('Comic already exists locally.', cur, tot) continue @@ -189,7 +210,7 @@ def main(): make_cbz(abs, images) if args.meta: print("Writing metadata to JSON file...") - meta_abs = os.path.join(dl_dir, '{}_meta.json'.format(title_s)) + meta_abs = os.path.join(book_dir, '{}_meta.json'.format(title_s)) write_meta(meta_abs, meta) for i in images: os.remove(i)