This commit is contained in:
phillychi3 2024-02-22 18:55:21 +08:00
parent 55d27d0402
commit bbfab783b5
3 changed files with 197 additions and 195 deletions

File diff suppressed because one or more lines are too long

View File

@ -1,62 +1,64 @@
from bs4 import BeautifulSoup
import requests
import json
import yaml
URL = "https://nhentai.net/tags/"
def wtfcloudflare(url, method="get", useragent=None, cookie=None, data=None):
session = requests.Session()
session.headers = {
'Referer': "https://nhentai.net/login/",
'User-Agent': useragent,
'Cookie': cookie,
'Accept-Language': 'en-US,en;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
}
if method == "get":
r = session.get(url)
elif method == "post":
r = session.post(url, data=data)
return r
def get_tags():
with open('set.yaml', 'r') as f:
cookie = yaml.load(f, Loader=yaml.CLoader)["cookid"]
useragent = yaml.load(f, Loader=yaml.CLoader)["useragent"]
if cookie == "":
print("Please edit set.yaml")
exit()
now = 1
tagjson = {}
while True:
data = wtfcloudflare(f"{URL}?page={now}",
useragent=useragent, cookie=cookie)
soup = BeautifulSoup(data.text, 'html.parser')
print(data.text)
tags = soup.find_all("a", class_='tag')
if tags == []:
break
tagnumbers = [t.get('class') for t in tags]
tagnames = [t.find('span', class_='name').get_text() for t in tags]
tagnumber = []
for i in tagnumbers:
fixnum = i[1].replace('tag-', '')
tagnumber.append(fixnum)
for i in enumerate(tagnumber):
tagjson[i[1]] = tagnames[i[0]]
now += 1
if tagjson == {}:
print("something wrong with your cookie or useragent")
exit()
with open('tag.json', 'w') as f:
json.dump(tagjson, f)
return
if __name__ == '__main__':
get_tags()
from bs4 import BeautifulSoup
import requests
import json
import yaml
URL = "https://nhentai.net/tags/"
def wtfcloudflare(url, method="get", useragent=None, cookie=None, data=None):
session = requests.Session()
session.headers = {
'Referer': "https://nhentai.net/login/",
'User-Agent': useragent,
'Cookie': cookie,
'Accept-Language': 'en-US,en;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
}
if method == "get":
r = session.get(url)
elif method == "post":
r = session.post(url, data=data)
return r
def get_tags():
with open('set.yaml', 'r') as f:
data = yaml.load(f, Loader=yaml.CLoader)
cookie = data["cookid"]
useragent = data["useragent"]
if cookie == "":
print("Please edit set.yaml")
exit()
now = 1
tagjson = {}
while True:
data = wtfcloudflare(f"{URL}?page={now}",
useragent=useragent, cookie=cookie)
soup = BeautifulSoup(data.text, 'html.parser')
tags = soup.find_all("a", class_='tag')
if tags == []:
break
tagnumbers = [t.get('class') for t in tags]
tagnames = [t.find('span', class_='name').get_text() for t in tags]
tagnumber = []
for i in tagnumbers:
fixnum = i[1].replace('tag-', '')
tagnumber.append(fixnum)
for i in enumerate(tagnumber):
tagjson[i[1]] = tagnames[i[0]]
print(f"page {now} done")
now += 1
if tagjson == {}:
print("something wrong with your cookie or useragent")
exit()
with open('tag.json', 'w') as f:
json.dump(tagjson, f)
print("tag.json saved")
return
if __name__ == '__main__':
get_tags()

View File

@ -1,132 +1,132 @@
from gettags import get_tags
from progress.spinner import PixelSpinner
from bs4 import BeautifulSoup
import yaml
import requests
import locale
import os
import json
import csv
if not os.path.isfile("set.yaml"):
with open('set.yaml', 'w') as f:
yaml.dump({"cookid": "", "useragent": ""}, f)
print("Please edit set.yaml")
exit()
with open('set.yaml', 'r') as f:
data = yaml.load(f, Loader=yaml.CLoader)
cookie = data["cookid"]
useragent = data["useragent"]
if cookie == "":
print("Please edit set.yaml")
exit()
# setting
URL = "https://nhentai.net/favorites/"
APIURL = "https://nhentai.net/api/gallery/"
table = [
["id", "name", "tags"]
]
now = 1
allnumbers = []
allnames = []
alltags = []
locate = locale.getdefaultlocale()[0]
if locate == "zh_TW":
language = {
"nodata": "沒有發現離線資料 抓取中請稍後...",
"nodata2": "抓取完畢",
"usedata": "使用離線資料",
"getdata": "抓取資料中...",
"403": "403 錯誤,可能被 cloudflare 阻擋,請檢查 cookie 是否正確",
}
else:
language = {
"nodata": "No offline data found, please wait a moment...",
"nodata2": "Done",
"usedata": "Use offline data",
"getdata": "Getting data...",
"403": "403 error, maby block by cloudflare , please check if the cookie is correct",
}
def banner():
data = """ _ _ _ ___ _
_ __ ___| |__ _ __ | |_ __ _(_) / __\/_\/\ /\
| '_ \ / _ \ '_ \| '_ \| __/ _` | |_____ / _\ //_\\ \ / /
| | | | __/ | | | | | | || (_| | |_____/ / / _ \ V /
|_| |_|\___|_| |_|_| |_|\__\__,_|_| \/ \_/ \_/\_/
"""
print(data)
# request
def wtfcloudflare(url, method="get", data=None):
session = requests.Session()
session.headers = {
'Referer': "https://nhentai.net/login/",
'User-Agent': useragent,
'Cookie': cookie,
'Accept-Language': 'en-US,en;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
}
if method == "get":
r = session.get(url)
elif method == "post":
r = session.post(url, data=data)
return r
def check_pass():
res = wtfcloudflare("https://nhentai.net/")
if res.status_code == 403:
print(language["403"])
exit()
# --- main ---
banner()
check_pass()
if not os.path.isfile("tag.json"):
print(language["nodata"])
get_tags()
print(language["nodata2"])
print(language["usedata"])
spinner = PixelSpinner(language["getdata"])
while True:
data = wtfcloudflare(f"{URL}?page={now}")
soup = BeautifulSoup(data.text, 'html.parser')
book = soup.find_all("div", class_='gallery-favorite')
if book == []:
break
numbers = [t.get('data-id') for t in book]
names = [t.find('div', class_="caption").get_text() for t in book]
tags_ = [t.find('div', class_="gallery").get('data-tags') for t in book]
tags = []
for i in tags_:
tags__ = i.split(' ')
tags.append(tags__)
allnumbers.extend(numbers)
allnames.extend(names)
alltags.extend(tags)
now += 1
spinner.next()
with open('tag.json', 'r') as f:
tagjson = json.load(f)
for i in enumerate(allnumbers):
tagstr = ""
for j in alltags[i[0]]:
if j in tagjson:
tagstr += tagjson[j] + ", "
table.append([i[1], allnames[i[0]], tagstr])
with open('output.csv', 'w', newline='', encoding="utf_8_sig") as csvfile:
writer = csv.writer(csvfile)
writer.writerows(table)
from gettags import get_tags
from progress.spinner import PixelSpinner
from bs4 import BeautifulSoup
import yaml
import requests
import locale
import os
import json
import csv
if not os.path.isfile("set.yaml"):
with open('set.yaml', 'w') as f:
yaml.dump({"cookid": "", "useragent": ""}, f)
print("Please edit set.yaml")
exit()
with open('set.yaml', 'r') as f:
data = yaml.load(f, Loader=yaml.CLoader)
cookie = data["cookid"]
useragent = data["useragent"]
if cookie == "":
print("Please edit set.yaml")
exit()
# setting
URL = "https://nhentai.net/favorites/"
APIURL = "https://nhentai.net/api/gallery/"
table = [
["id", "name", "tags"]
]
now = 1
allnumbers = []
allnames = []
alltags = []
locate = locale.getdefaultlocale()[0]
if locate == "zh_TW":
language = {
"nodata": "沒有發現離線資料 抓取中請稍後...",
"nodata2": "抓取完畢",
"usedata": "使用離線資料",
"getdata": "抓取資料中...",
"403": "403 錯誤,可能被 cloudflare 阻擋,請檢查 cookie 是否正確",
}
else:
language = {
"nodata": "No offline data found, please wait a moment...",
"nodata2": "Done",
"usedata": "Use offline data",
"getdata": "Getting data...",
"403": "403 error, maby block by cloudflare , please check if the cookie is correct",
}
def banner():
data = """ _ _ _ ___ _
_ __ ___| |__ _ __ | |_ __ _(_) / __\/_\/\ /\
| '_ \ / _ \ '_ \| '_ \| __/ _` | |_____ / _\ //_\\ \ / /
| | | | __/ | | | | | | || (_| | |_____/ / / _ \ V /
|_| |_|\___|_| |_|_| |_|\__\__,_|_| \/ \_/ \_/\_/
"""
print(data)
# request
def wtfcloudflare(url, method="get", data=None):
session = requests.Session()
session.headers = {
'Referer': "https://nhentai.net/login/",
'User-Agent': useragent,
'Cookie': cookie,
'Accept-Language': 'en-US,en;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
}
if method == "get":
r = session.get(url)
elif method == "post":
r = session.post(url, data=data)
return r
def check_pass():
res = wtfcloudflare("https://nhentai.net/")
if res.status_code == 403:
print(language["403"])
exit()
# --- main ---
banner()
check_pass()
if not os.path.isfile("tag.json"):
print(language["nodata"])
get_tags()
print(language["nodata2"])
print(language["usedata"])
spinner = PixelSpinner(language["getdata"])
while True:
data = wtfcloudflare(f"{URL}?page={now}")
soup = BeautifulSoup(data.text, 'html.parser')
book = soup.find_all("div", class_='gallery-favorite')
if book == []:
break
numbers = [t.get('data-id') for t in book]
names = [t.find('div', class_="caption").get_text() for t in book]
tags_ = [t.find('div', class_="gallery").get('data-tags') for t in book]
tags = []
for i in tags_:
tags__ = i.split(' ')
tags.append(tags__)
allnumbers.extend(numbers)
allnames.extend(names)
alltags.extend(tags)
now += 1
spinner.next()
with open('tag.json', 'r') as f:
tagjson = json.load(f)
for i in enumerate(allnumbers):
tagstr = ""
for j in alltags[i[0]]:
if j in tagjson:
tagstr += tagjson[j] + ", "
table.append([i[1], allnames[i[0]], tagstr])
with open('output.csv', 'w', newline='', encoding="utf_8_sig") as csvfile:
writer = csv.writer(csvfile)
writer.writerows(table)