| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354 |
- # -*- coding: UTF-8 -*-
- import os
- import time
- from random import randint
- import httpx
- from urllib.parse import unquote
- import threading
- text = ''
- headers = {
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
- }
- def get_jpg(file_name, urls):
- jpg_path = os.path.join(save_path, file_name)
- if not os.path.exists(jpg_path):
- os.makedirs(jpg_path)
- for url in urls:
- jpg_name = url.split('/')[-1]
- jpg_save_path = os.path.join(jpg_path, jpg_name)
- if not os.path.exists(jpg_save_path):
- time.sleep(randint(3, 5))
- jpg = httpx.get(url, headers=headers)
- if jpg.status_code == 200:
- print(f'正在保存 {url} 到 {file_name}')
- with open(jpg_save_path, 'wb') as f:
- f.write(jpg.content)
- else:
- print(f'{jpg_name} 已存在, 跳过')
- with open('test.txt', 'r') as file:
- lines = file.readlines()
- for line in lines:
- text += unquote(line.strip())
- save_path = os.path.join(os.getcwd(), 'fantasy')
- if not os.path.exists(save_path):
- os.mkdir(save_path)
- threads = []
- for file_name, urls in eval(text).items():
- t = threading.Thread(target=get_jpg, args=(file_name, urls,))
- t.start()
- threads.append(t)
- for t in threads:
- t.join()
- print("all done")
|