looks great

This commit is contained in:
phillychi3 2023-03-12 14:39:48 +08:00
parent 1ca169aef8
commit 861b29a142

View File

@ -27,6 +27,25 @@ now = 1
allnumbers = [] allnumbers = []
allnames = [] allnames = []
alltags = [] alltags = []
ua = fake_useragent.UserAgent()
useragent = ua.random
# request
def wtfcloudflare(url,method="get",data=None):
session = requests.Session()
session.headers = {
'Referer': "https://nhentai.net/login/",
'User-Agent': "",
'Cookie': cookie,
'Accept-Language': 'en-US,en;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
}
if method == "get":
r = session.get(url)
elif method == "post":
r = session.post(url,data=data)
return r
class gettagonline(threading.Thread): class gettagonline(threading.Thread):
@ -35,22 +54,22 @@ class gettagonline(threading.Thread):
self.number = number self.number = number
self.queue = queue self.queue = queue
def run(self): # def run(self):
while self.queue.qsize() > 0: # while self.queue.qsize() > 0:
num = self.queue.get() # num = self.queue.get()
# print("get %d: %s" % (self.number, num)) # # print("get %d: %s" % (self.number, num))
ua = fake_useragent.UserAgent() # ua = fake_useragent.UserAgent()
useragent = ua.random # useragent = ua.random
headers = { # headers = {
'user-agent': useragent # 'user-agent': useragent
} # }
r = requests.get(apiurl + num, headers=headers) # r = requests.get(apiurl + num, headers=headers)
data = r.json() # data = r.json()
ctag = [] # ctag = []
for i in enumerate(data['tags']): # for i in enumerate(data['tags']):
ctag.append(i[1]['name']) # ctag.append(i[1]['name'])
alltags.append(ctag) # alltags.append(ctag)
time.sleep(random.uniform(0.5, 1)) # time.sleep(random.uniform(0.5, 1))
set1 = input("請問要使用離線資料嗎?(y/n)(默認為否)") set1 = input("請問要使用離線資料嗎?(y/n)(默認為否)")
@ -75,13 +94,7 @@ else:
spinner = PixelSpinner('抓取資料中...') spinner = PixelSpinner('抓取資料中...')
while True: while True:
ua = fake_useragent.UserAgent() data = wtfcloudflare(f"{url}?page={now}")
useragent = ua.random
headers = {
'user-agent': useragent,
'cookie': cookie
}
data = requests.get(f"{url}?page={now}", headers=headers)
soup = BeautifulSoup(data.text, 'html.parser') soup = BeautifulSoup(data.text, 'html.parser')
book = soup.find_all("div", class_='gallery-favorite') book = soup.find_all("div", class_='gallery-favorite')
if book == []: if book == []: