You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
pixiv/PixivSearch/pixiv/pixiv.py

150 lines
5.0 KiB

#!/usr/bin/env python
# coding:utf-8
import json
import os
import sys
import threading
from concurrent import futures
from datetime import datetime
import requests
from bs4 import BeautifulSoup
import root
from PixivSearch.model import config
from PixivSearch.settings import logger
from PixivSearch.util import Util
headers = {
'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/56.0.2924.87 Safari/537.36'
}
lock = threading.Lock() # 多线程全局资源锁
total = 1
address = '127.0.0.1:1080'
# proxies = {"http": "socks5://{0}".format(address), "https": "socks5://{0}".format(address)}
proxies={"http": address,"https": address}
def get_cookies():
_cookies = {}
array = config.param.objects.get(param_name='pixiv_cookie').param_value.split(';')
for row in array:
k, v = row.strip().split('=', 1)
_cookies[k] = v
return _cookies
def getReq(url, **kwargs):
return requests.get(url, headers=headers, cookies=get_cookies(), proxies=proxies, **kwargs)
def crawl(url):
global total
res = getReq(url)
tag = BeautifulSoup(res.content, 'html.parser').select('#js-mount-point-search-result-list')[0].attrs['data-items']
imageNodes = json.loads(tag)
for imageNode in imageNodes:
with lock:
nodes.append(imageNode)
def get_urls(search, page):
fmt = 'https://www.pixiv.net/search.php?word={}&order=date_d&p={}'
return [fmt.format(search, p) for p in range(1, page)]
def get_Img(params):
params[1]['imgUrl'] = 'https://i.pximg.net/img-original/img/' + params[1]['url'][-46:-15]
headers['referer'] = 'https://www.pixiv.net/member_illust.php?mode=medium&illust_id=' + params[1]['illustId']
suffix = ".jpg"
logger.info('开始下载图片:%s%s' % (params[1]['imgUrl'], suffix))
s = getReq(params[1]['imgUrl'] + suffix)
if (s.status_code == 404):
suffix = '.png'
s = getReq(params[1]['imgUrl'] + suffix)
if (s.status_code == 404):
logger.error('无法下载图片:%s' % (params[1]['illustTitle']))
return
logger.info('下载图片:"%s"%s' % (
params[1]['illustTitle'], os.getcwd().replace('\\', '/') + '/' + imgPath + params[1]['illustId'] + suffix))
f = open(imgPath + params[1]['illustId'] + suffix, 'wb') # 写入多媒体文件要 b 这个参数
f.write(s.content) # 多媒体文件要是用conctent
f.close()
params[1]['localName'] = params[1]['illustId'] + suffix
logger.info('排行第%d名,收藏数%d,标题:%s,标签:%s,(%s)前投稿,链接:%s' % (
params[0], params[1]['bookmarkCount'], params[1]['illustTitle'], ','.join(params[1]['tags']), '',
params[1]['imgUrl']))
fsize = ''
def get_nodes(param):
global nodes, fsize, imgPath
nodes = []
start = datetime.now()
urls = get_urls(param[1], int(param[2]) + 1)
logger.info('开始从P站获取图片数据')
res = getReq("https://www.pixiv.net", allow_redirects=False)
if res.status_code == 302:
raise Exception("更新Cookie")
with futures.ThreadPoolExecutor(32) as executor:
executor.map(crawl, urls)
# for url in urls:
# crawl(url)
length = len(nodes)
logger.info('获取到%d张图片' % (length))
logger.info('对图片收藏数进行排序')
nodes = sorted(nodes, key=lambda v: v.get('bookmarkCount'), reverse=True)[:int(param[3])] # 按star数降序排序
if (param[4] != None and param[4] == 'img'):
imgPath = root.getConfigValue('imgPath')
if not os.path.exists(imgPath):
os.makedirs(imgPath)
for file in os.listdir(imgPath):
os.remove(imgPath + file)
nodes_tup = []
start_d = datetime.now()
for index, img in enumerate(nodes):
nodes_tup.append((index + 1, img))
# get_Img((index+1,img))
with futures.ThreadPoolExecutor(32) as executor:
executor.map(get_Img, nodes_tup)
print('下载图片花费时间:%s' % (datetime.now() - start_d))
logger.info('%s张图片下载完毕' % (len(os.listdir(imgPath))))
baseZipPath = root.getConfigValue('baseZipPath')
if not os.path.exists(baseZipPath):
os.makedirs(baseZipPath)
zipPath = baseZipPath + param[1] + '.zip'
logger.info('图片打包到:%s' % (zipPath))
Util.zip(imgPath, zipPath)
fsize = str(round(os.path.getsize(zipPath) / float(1024 * 1024), 2)) + 'MB'
logger.info('图包大小:%s' % (fsize))
tip = '%d张图片中筛选出收藏数前%s的图片,处理耗时:%s' % (length, param[3], datetime.now() - start)
logger.info(tip)
return [nodes, tip, fsize]
if __name__ == "__main__":
if (len(sys.argv)) == 5 and sys.argv[2].isdigit() and sys.argv[3].isdigit():
try:
get_nodes(sys.argv)
except BaseException as e:
repr(e)
else:
logger.error('参数不合法')