docker打包

master
潘啟华 5 years ago
parent 22f6819e54
commit 7359aed30d
  1. 8
      Dockerfile
  2. 0
      PixivSearch/aliyun/__init__.py
  3. 140
      PixivSearch/aliyun/photo/AliyunPhoto.py
  4. 0
      PixivSearch/baidu/__init__.py
  5. 118
      PixivSearch/baidu/downLoadBduImg.py
  6. 119
      PixivSearch/dao/Comment.py
  7. 180
      PixivSearch/dao/bangumi.py
  8. 61
      PixivSearch/migu/GetVideoUrl.py
  9. 43
      PixivSearch/model/config.py
  10. 778
      PixivSearch/pac/CreatePacFile.py
  11. 16
      PixivSearch/pac/RuleManage.py
  12. 2
      PixivSearch/pixiv/pixiv.py
  13. 23
      PixivSearch/thrift/QueryComment/Client.py
  14. 131
      PixivSearch/thrift/QueryComment/QueryComment-remote
  15. 660
      PixivSearch/thrift/QueryComment/QueryComment.py
  16. 48
      PixivSearch/thrift/QueryComment/Server.py
  17. 1
      PixivSearch/thrift/QueryComment/__init__.py
  18. 14
      PixivSearch/thrift/QueryComment/constants.py
  19. 18
      PixivSearch/thrift/QueryComment/ttypes.py
  20. 0
      PixivSearch/thrift/__init__.py
  21. 22
      PixivSearch/thrift/task/Server.py
  22. 124
      PixivSearch/thrift/task/TSDM-remote
  23. 366
      PixivSearch/thrift/task/TSDM.py
  24. 1
      PixivSearch/thrift/task/__init__.py
  25. 14
      PixivSearch/thrift/task/constants.py
  26. 18
      PixivSearch/thrift/task/ttypes.py
  27. 7
      PixivSearch/urls.py
  28. 44
      PixivSearch/view.py
  29. 20890
      init/get-pip.py
  30. 18
      init/packages.txt
  31. BIN
      mydatabase
  32. 4
      privoxy_config
  33. 6
      run.sh
  34. 9
      sources.list

@ -4,8 +4,10 @@ COPY pip.conf /root/.pip/
COPY . /usr/src/app/ COPY . /usr/src/app/
RUN pip install --upgrade pip RUN pip install --upgrade pip
RUN pip install -r /usr/src/app/requirements.txt RUN pip install -r /usr/src/app/requirements.txt
WORKDIR /usr/src/app/ #更新apt-get源 使用163的源
COPY sources.list /etc/apt/
RUN apt-get update \ RUN apt-get update \
&& apt-get install privoxy && apt-get install privoxy shadowsocks-libev -y
COPY config /etc/privoxy/ COPY privoxy_config /etc/privoxy/config
WORKDIR /usr/src/app/
CMD ["sh", "./run.sh"] CMD ["sh", "./run.sh"]

@ -1,140 +0,0 @@
import hashlib
import json
import os
import time
import aliyunsdkcore
import oss2 as oss2
from aliyunsdkcloudphoto.request.v20170711 import ListPhotoStoresRequest, FetchLibrariesRequest, \
CreateTransactionRequest, CreatePhotoRequest, EditPhotosRequest, GetPublicAccessUrlsRequest, ListPhotosRequest, \
DeletePhotosRequest, InactivatePhotosRequest, GetLibraryRequest
from aliyunsdkcore.auth.credentials import RamRoleArnCredential
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.profile import region_provider
from aliyunsdkcore.request import CommonRequest
region_id = 'cn-shanghai'
ak = 'LTAIWzPnmkJs2qpL'
secret = 'LIIq3HumctXPp0WT8c06yDiFbKKiVe'
region_provider.add_endpoint('Oss', region_id, 'oss-cn-shanghai.aliyuncs.com')
region_provider.add_endpoint('CloudPhoto', region_id, 'cloudphoto.cn-shanghai.aliyuncs.com')
aliyunsdkcore.request.set_default_protocol_type("https")
class MyClient(AcsClient):
def __init__(self, arn, sessionName):
super().__init__(region_id=region_id, credential=RamRoleArnCredential(ak, secret, arn, sessionName))
self.StoreName = None
def get_md5_01(self, file_path):
md5 = None
if os.path.isfile(file_path):
f = open(file_path, 'rb')
md5_obj = hashlib.md5()
md5_obj.update(f.read())
hash_code = md5_obj.hexdigest()
f.close()
md5 = str(hash_code).lower()
return md5
def do_action_with_exception(self, acs_request):
return json.loads(super().do_action_with_exception(acs_request).decode())
def showOption(self, options, key, desc):
choose = []
for index, option in enumerate(options):
choose.append(option[key])
print('{index}:{name}'.format(index=index + 1, name=option[key]))
return choose[(int)(input('输入{desc}\n'.format(desc=desc))) - 1]
def listPhotoStores(self):
request = ListPhotoStoresRequest.ListPhotoStoresRequest()
response = self.do_action_with_exception(request)
print('PhotoStores:')
photoStores = response['PhotoStores']
self.StoreName = self.showOption(photoStores, 'Name', 'StoreName')
def listLibraries(self):
request = FetchLibrariesRequest.FetchLibrariesRequest()
request.set_StoreName(self.StoreName)
response = self.do_action_with_exception(request)
self.Libraries = response['Libraries']
def uploadPhoto(self):
if self.StoreName is None:
self.listPhotoStores()
request = CreateTransactionRequest.CreateTransactionRequest()
filePath = input('输入上传文件路径\n').replace('\\', '/')
fileName = filePath.split('/')[-1]
request.set_Size(os.path.getsize(filePath))
request.set_Ext(fileName[-fileName[::-1].index('.'):])
request.set_Md5(self.get_md5_01(filePath))
request.set_StoreName(self.StoreName)
response = self.do_action_with_exception(request)
print(response)
Upload = response['Transaction']['Upload']
FileId = Upload['FileId']
SessionId = Upload['SessionId']
Bucket = Upload['Bucket']
OssEndpoint = Upload['OssEndpoint']
ObjectKey = Upload['ObjectKey']
auth = oss2.StsAuth(self._signer._session_credential[0], self._signer._session_credential[1],
self._signer._session_credential[2])
bucket = oss2.Bucket(auth, OssEndpoint, Bucket)
with open(filePath, 'rb') as fileobj:
result = bucket.put_object(ObjectKey, fileobj)
print('文件上传状态{status}'.format(status=result.status))
request = CreatePhotoRequest.CreatePhotoRequest()
request.set_FileId(FileId)
request.set_PhotoTitle(fileName)
request.set_SessionId(SessionId)
request.set_StoreName(self.StoreName)
request.set_UploadType('manual')
response = self.do_action_with_exception(request)
print(response)
def listPhotos(self):
if self.StoreName == None:
self.listPhotoStores()
request = ListPhotosRequest.ListPhotosRequest()
request.set_StoreName(self.StoreName)
request.set_State('all')
response = self.do_action_with_exception(request)
print(response)
return response['Photos']
def getPhoto(self):
return self.showOption(self.listPhotos(), 'IdStr', '照片Id')
def sharePhoto(self):
IdStr = self.getPhoto()
request = EditPhotosRequest.EditPhotosRequest()
request.set_PhotoIds([IdStr])
request.set_StoreName(self.StoreName)
request.set_ShareExpireTime((int(round(time.time())) + 60 * 60) * 1000)
response = self.do_action_with_exception(request)
print(response)
request = GetPublicAccessUrlsRequest.GetPublicAccessUrlsRequest()
request.set_DomainType('OSS')
request.set_PhotoIds([IdStr])
request.set_StoreName(self.StoreName)
request.set_ZoomType('style/2')
response = self.do_action_with_exception(request)
print(response)
def client(arn, sessionName):
ram_role_arn_credential = RamRoleArnCredential('LTAIWzPnmkJs2qpL', 'LIIq3HumctXPp0WT8c06yDiFbKKiVe',
arn, sessionName)
return AcsClient(region_id='cn-shanghai', credential=ram_role_arn_credential)
if __name__ == '__main__':
myClient = MyClient('acs:ram::1098806312754985:role/aliyunosstokengeneratorrole', 'pqh001')
myClient.listPhotoStores()
myClient.sharePhoto()

@ -1,118 +0,0 @@
import json
import os
import re
from concurrent import futures
from datetime import datetime
import requests
def get_cookies():
_cookies = {}
array = "BDqhfp=fate%26%26NaN%26%260%26%261; BIDUPSID=8689C23BFD1526702A4EF173F3A809DD; BDRCVFR[dG2JNJb_ajR]=mk3SLVN4HKm; userFrom=null; BAIDUID=8689C23BFD152670722FAAEB4DDC55FA:FG=1; BDRCVFR[-pGxjrCMryR]=mk3SLVN4HKm".split(
';')
for row in array:
k, v = row.strip().split('=', 1)
_cookies[k] = v
return _cookies
# 图片保存路径
savePath = None
threadNum = 10
startTime = None
def getBaiduImage(word):
global startTime, savePath
params = []
startTime = datetime.now()
start = threadNum
i = 0
filepath = None
savePath = r'{savePath}/{word}'.format(savePath=savePath, word=word)
if not os.path.exists(savePath):
os.makedirs(savePath)
while True:
try:
url = r"https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord={" \
r"queryWord}&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&word={" \
r"word}&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&pn={pn}&rn={rn}&gsm=3c&1523890541764= "
url = url.format(queryWord=word, word=word, pn=start, rn=threadNum)
print('request url:%s' % url)
req = requests.get(url)
if req.status_code == 200:
req.encoding = 'utf-8'
obj = json.loads(req.text.replace('\\\'', ''))
if len(obj['data']) == 1:
break
for img in obj['data']:
if 'fromPageTitle' in img:
print('图片:%s\t添加到下载队列' % img['fromPageTitleEnc'])
if 'replaceUrl' in img:
url = img['replaceUrl'][0]['ObjURL']
params.append((url, i))
i += 1
if not filepath is None and os.path.exists(filepath):
os.remove(filepath)
filepath = r'{savePath}/图片下载队列填充:{i}'.format(savePath=savePath, word=word, i=i)
file = open(filepath, 'w')
file.close()
start += threadNum
except BaseException as e:
print(repr(e))
if not filepath is None and os.path.exists(filepath):
os.remove(filepath)
executors = futures.ThreadPoolExecutor(threadNum)
try:
with executors as executor:
executor.map(downImage, params)
except BaseException as e:
print(repr(e))
def downImage(params):
try:
url = params[0]
index = params[1]
print(r'开始下载图片{url}'.format(url=url))
imgurl = requests.get(url, headers={"Referer": "image.baidu.com"})
if imgurl.status_code == 200:
format = url[-url[::-1].index('.'):]
imgPath = r'{savePath}/fate_{index}.{format}'.format(savePath=savePath,
index=index,
format=format)
f = open(imgPath, 'wb')
f.write(imgurl.content)
f.close()
print(r'图片{url}成功下载到{imgPath}'.format(url=url, imgPath=imgPath))
except BaseException as e:
print(repr(e))
if __name__ == '__main__':
# str = ''
# while True:
# str = input('输入要下载图片的关键字,输入 exit 退出程序\n')
# if not str == 'exit':
# while True:
# savePath = input('输入图片存放目录:例如 E:/图片,注意目录之间使用正斜杠隔开"/"\n')
# if re.fullmatch(r"[a-zA-z]:(/[\u4e00-\u9fa5_a-zA-Z0-9]+)+", savePath) is None:
# print(r'图片目录{savePath}不合法请重新输入'.format(savePath=savePath))
# else:
# break
# getBaiduImage(str)
# print(r'使用{threadNum}线程成功下载{count}张图片到目录{path}下,耗时:{second}'.format(threadNum=threadNum,
# count=len(os.listdir(savePath)),
# path=savePath,
# second=datetime.now() - startTime))
# flag = False
# else:
# print('exit')
# break
req = requests.post('https://gifmaker.develophelper.com/gif/make', {'tplid': 1,
'content': '好啊##$@?$?@$##就算你是一流工程师##$@?$?@$##就算你出报告再完美##$@?$?@$##我叫你改报告你就要改##$@?$?@$##毕竟我是客户##$@?$?@$##客户了不起啊##$@?$?@$##sorry 客户真的了不起##$@?$?@$##以后叫他天天改报告##$@?$?@$##天天改 天天改'})
if req.status_code==200:
response = json.loads(req.content.decode())
if 'd' in response:
print(response['d'])

@ -1,119 +0,0 @@
import json
import os
import shutil
import threading
from concurrent import futures
import requests
from lxml import etree
class Comment:
lock = threading.Lock() # 多线程全局资源锁
def __init__(self, keywords_=None) -> None:
super().__init__()
self.obj = {'data': {}, 'flag': False}
self.keywords = keywords_
# 获取番剧合集弹幕排行榜
def getCommentSort(self, cids):
urls = []
for cid in cids:
urls.extend(getCidUrls(cid))
with futures.ThreadPoolExecutor(32) as executor:
executor.map(self.count, urls)
for index, data in enumerate(
sorted(self.obj["data"].items(), key=lambda d: d[1], reverse=True)[
:50]):
print('{index}:{data}'.format(index=index + 1, data=data))
# 获取番剧合集弹幕排行榜
def count(self, url, desc=None):
bytes = requests.get(url).content
comment_selector = etree.HTML(bytes)
if not desc is None:
print(desc)
print("url=%s" % url)
for comment in comment_selector.xpath('//i//d/text()'):
if comment in self.obj["data"]:
with self.lock:
self.obj["data"][comment] = self.obj["data"][comment] + 1
else:
with self.lock:
self.obj["data"][comment] = 1
if not self.obj["flag"]:
for keyword in self.keywords:
if keyword in comment:
self.obj["flag"] = True
# 根据cid获取历史弹幕地址
def getCidUrls(cid):
urls = []
url = "https://comment.bilibili.com/rolldate,%d" % cid
req = requests.get(url)
if len(req.text) > 0:
for i in json.loads(req.text):
urls.append("https://comment.bilibili.com/dmroll,%s,%d" % (i['timestamp'], cid))
else:
urls.append("https://comment.bilibili.com/%d.xml" % cid)
return urls
# 下载弹幕
def downloadXml(path, cid, size=None, histroy=True):
dlist = set()
flag = None
if histroy:
flag = parseXml(getCidUrls(cid), dlist, size)
else:
parseXml("https://comment.bilibili.com/%d.xml" % cid, dlist, size)
if size is None or (histroy and not size is None and flag):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
f = open('{path}/{cid}.xml'.format(path=path, cid=cid), 'wb')
f.write(b'<?xml version="1.0" encoding="UTF-8"?><i>')
for i in dlist:
f.write(('\r\n' + i).encode())
f.write(b'\r\n</i>')
f.close()
def xml(url):
bytes = requests.get(url).content
return etree.HTML(bytes)
def parseXml(urls, dlist, size=None):
if isinstance(urls, str):
urls = [urls]
if not size is None:
size = float(size.strip('%')) / 100.0
for url in urls:
comment_selector = xml(url)
list = comment_selector.xpath('//i//d/text()')
maxlimit = int(comment_selector.xpath('//i//maxlimit/text()')[0])
if len(list) > 0:
print('弹幕数:{list},最大弹幕数:{maxlimit},弹幕池填充:{p}'.format(list=len(list), maxlimit=maxlimit,
p='%.2f%%' % (len(list) / maxlimit * 100)))
for element in comment_selector.xpath('//i//d'):
if len(element.xpath("text()")) > 0:
fstr = '<d p="{p}">{content}</d>'.format(p=str(element.xpath("@p")[0]),
content=str(element.xpath("text()")[0]))
dlist.add(fstr)
currentSize = len(dlist) / maxlimit
print('填充率:{l}'.format(l='%.2f%%' % (currentSize * 100)))
if not size is None and currentSize >= size:
return True
return False
if __name__ == '__main__':
cids = [7636499, 7636501, 7636500, 7636503, 7636504, 7636502, 7636509, 7636508, 7636506, 7636507, 7636505]
downloadXml('F:/ABC',12026697,histroy=False)

@ -1,180 +0,0 @@
import _thread
import json
import math
from concurrent import futures
from queue import Queue
import requests
from bs4 import BeautifulSoup
from PixivSearch.model.config import mediaInfo, stat
from PixivSearch.settings import logger
current_mediaInfo = None
isStop = None
def stop_():
global isStop
isStop = True
def save(params):
if isStop:
return
logger.info(params)
bangumi_id = params[0]
season_id = params[1]
media_id = params[2]
url = "https://www.bilibili.com/bangumi/media/md%d" % media_id
try:
req = requests.get(url, timeout=10)
except BaseException as e:
logger.error(repr(e))
save(params)
logger.info("request_url=%s,status_code=%d" % (url, req.status_code))
if req.status_code == 200:
json_obj = getJsonText(req, 3)
try:
if 'mediaInfo' in json_obj and 'stat' in json_obj['mediaInfo'] and 'chn_name' in json_obj['mediaInfo']:
stat_info = json_obj['mediaInfo']['stat']
print(stat_info)
mediaInfo(bangumi_id=bangumi_id, season_id=season_id, media_id=media_id,
chn_name=json_obj['mediaInfo']['chn_name']).save()
global current_mediaInfo
current_mediaInfo = mediaInfo.objects.get(pk=season_id)
stat(id=season_id, danmakus=int(stat_info['danmakus']), favorites=stat_info['favorites'],
views=stat_info['views']).save()
except BaseException as e:
logger.error(repr(e))
def getJsonText(req, index):
tag = BeautifulSoup(req.text, 'lxml')
script = tag.select("script")[index].text
json_str = script[script.index("=") + 1:script.index("function") - 2]
return json.loads(json_str)
def get_():
global current_mediaInfo
return current_mediaInfo
page_size = 10
queue = Queue(page_size)
def listen():
while True:
ids = queue.get()
try:
executors = futures.ThreadPoolExecutor(page_size)
with executors as executor:
executor.map(save, ids)
logger.info('结束爬虫')
except BaseException as e:
logger.error(repr(e))
_thread.start_new_thread(listen, ())
#遍历所有专题视频收藏数信息
def getIds():
seasonIdList = []
page = 1
pages = None
name = 'seasonListCallback'
global isStop
isStop = False
while isStop == False and (pages is None or page <= pages):
url = 'https://bangumi.bilibili.com/web_api/season/index_global?page=%d&page_size=%d' % (page, page_size)
logger.info(url)
try:
req = requests.get(url, timeout=10)
if req.status_code == 200:
json_obj = json.loads(req.text)
if 'result' in json_obj and 'list' in json_obj['result']:
bangumiList = json_obj['result']['list']
ids = []
for bangumi in bangumiList:
if isStop:
break
if 'season_id' in bangumi:
season_id = int(bangumi['season_id'])
if season_id in seasonIdList:
continue
url = 'https://bangumi.bilibili.com/jsonp/seasoninfo/%d.ver?callback=%s&jsonp=jsonp' % (
season_id, name)
logger.info(url)
req = requests.get(url, timeout=10)
if req.status_code == 200:
child_json_obj = json.loads(
req.text.replace('seasonListCallback(', '').replace(');', ''))
if 'result' in child_json_obj and 'bangumi_id' in child_json_obj['result']:
bangumi_id = int(child_json_obj['result']['bangumi_id'])
if 'media' in child_json_obj['result']:
media_id = int(child_json_obj['result']['media']['media_id'])
ids.append((bangumi_id, season_id, media_id))
seasonIdList.append(season_id)
if pages is None and 'count' in json_obj['result']:
pages = int(math.ceil(int(json_obj['result']['count']) / page_size))
page = page + 1
logger.info('获取id数量%d' % len(ids))
queue.put(ids)
except BaseException as e:
logger.error(repr(e))
continue
#根据aid获取cid
def getCid(aid, type=None):
while True and aid > 0:
url = "https://api.bilibili.com/x/web-interface/archive/stat?aid=%d" % aid
print(url)
req = requests.get(url)
code = json.loads(req.text)["code"]
if code == 0:
req = requests.get("https://www.bilibili.com/video/av%d" % aid)
if req.status_code == 200:
json_obj = getJsonText(req, 9)
if "videoData" in json_obj and "pages" in json_obj['videoData'] and len(
json_obj['videoData']['pages']) > 0 and "cid" in json_obj['videoData']['pages'][0]:
cid = json_obj['videoData']['pages'][0]['cid']
print('cid=%s' % cid)
return cid
if type is None:
break
else:
if type:
aid = aid - 1
else:
aid = aid + 1
#根据aid获取cid
def getCids(aid):
s = {"min": getCid(aid, True), "max": getCid(aid, False)}
return s
#获取专题所有cid
def episodeIdToCid(episode_id):
cids = []
url = "https://www.bilibili.com/bangumi/play/ep%d" % episode_id
print("url=%s" % url)
req = requests.get(url)
json_obj = getJsonText(req, 8)
if "epList" in json_obj:
for i in json_obj["epList"]:
cids.append(i['cid'])
return cids
if __name__ == '__main__':
# print(getCids(29416))
req=requests.post('https://api.bilibili.com/x/v2/history/shadow/set','')
# obj = loadData([34807341], [])

@ -1,61 +0,0 @@
import json
import re
import requests
from bs4 import BeautifulSoup
def getUrl(playurl):
# 请求头
headers = {
'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/56.0.2924.87 Safari/537.36'
}
# headers['referer']='http://www.miguvideo.com/wap/resource/pc/detail/miguplay.jsp?cid=650644142'
baseurl = 'www.miguvideo.com/wap/resource/pc/detail/miguplay.jsp'
try:
if re.match("^(http|https):\/\/www\.miguvideo\.com\/wap\/resource\/pc\/detail\/miguplay\.jsp\?cid=\d+$",playurl):
req = requests.get(playurl, headers=headers)
sessionID = 0
playId = 0
# 提取接口参数
cid = re.findall('\d+', playurl)[0]
# 获取sessionID
result={'msg':'网络异常'}
if req.status_code == 200:
document = BeautifulSoup(req.text, 'lxml')
print('sessionID=%s' % sessionID)
sessionID = document.select('#sessionID')[0].get('value')
print('sessionID=%s' % sessionID)
# 获取playId
if req.status_code == 200:
req = requests.get('http://www.miguvideo.com/wap/resource/pc/data/miguData.jsp?cid=%s' % cid,
headers=headers)
miguData = json.loads(req.text)
print('playId=%s' % playId)
playId = miguData[0]['playId']
print('playId=%s' % playId)
# 使用播放地址接口获取视频信息
req = requests.get(
'http://www.miguvideo.com/playurl/v1/play/playurlh5?contId=%s&rateType=1,2,3&clientId=%s' % (
playId, sessionID))
if req.status_code == 200:
videoInfo = json.loads(req.text)
# 解析出json结构视频信息,获取视频真实地址
result = {'name': miguData[0]['Detail'],'video':[]};
print("视频信息=%s" % miguData[0]['Detail'])
if videoInfo['code'] == '200' and 'body' in videoInfo and 'urlInfos' in videoInfo['body']:
for info in videoInfo['body']['urlInfos']:
result['video'].append({'rateDesc':info['rateDesc'],'url':info['url'],'mediaSize':info['mediaSize']})
print('清晰度=%s' % info['rateDesc'])
print('真实地址=%s' % info['url'])
print('视频大小=%s字节' % info['mediaSize'])
else:
result = {'msg': '不是合法播放地址'}
except BaseException as e:
result={'msg':'程序异常'}
return result

@ -1,46 +1,5 @@
from django.db import models from django.db import models
class param(models.Model): class param(models.Model):
param_name = models.CharField(max_length=10, primary_key=True) param_name = models.CharField(max_length=10, primary_key=True)
param_value = models.CharField(max_length=128) param_value = models.CharField(max_length=128)
class stat(models.Model):
id = models.IntegerField(primary_key=True)
danmakus = models.IntegerField()
favorites = models.IntegerField()
views = models.IntegerField()
# def json(self):
# return bangumi_stat(self['danmakus'],self['favorites'],self['views'])
class mediaInfo(models.Model):
bangumi_id = models.IntegerField()
season_id = models.IntegerField(primary_key=True)
media_id = models.IntegerField()
chn_name = models.CharField(max_length=128)
def __str__(self) -> str:
i = {'media_id': self.id, 'chn_name': self.chn_name}
return i
class bangumi_list(models.Model):
season_id = models.IntegerField(primary_key=True)
badge = models.CharField(max_length=128)
brief = models.CharField(max_length=128)
copyright = models.CharField(max_length=128)
cover = models.CharField(max_length=128)
favorites = models.IntegerField()
is_finish = models.IntegerField()
newest_ep_index = models.IntegerField()
pub_time = models.DateTimeField()
season_status = models.IntegerField()
title = models.CharField(max_length=128)
total_count = models.IntegerField()
trailer_aid = models.IntegerField()
class rule(models.Model):
regex=models.CharField(primary_key=True,max_length=128)

@ -1,778 +0,0 @@
# from PixivSearch.model.config import rule
import base64
import requests
from PixivSearch.model.config import rule
def create():
response = requests.get('https://raw.githubusercontent.com/gfwlist/gfwlist/master/gfwlist.txt')
if response.status_code == 200:
gfwlistText = (str)(base64.b64decode(response.content).decode('utf-8'))
gfwlist=gfwlistText.split('\n')[1:]
pacScript= 'var proxy = "PROXY 127.0.0.1:1080;";\n' \
'var rules = [\n'
for line in gfwlist:
if len(line)>0 and not line.startswith('!'):
# print(line)
pacScript+='"'+line+'",\n'
for line in rule.objects.all():
pacScript+='"'+line.regex+'",\n'
# print(pacScript[:-2])
pacScript=pacScript[:-2]+'\n];\n' \
'function createDict()\n' \
'{\n' \
' var result = {};\n' \
' result.__proto__ = null;\n' \
' return result;\n' \
'}\n' \
'\n' \
'function getOwnPropertyDescriptor(obj, key)\n' \
'{\n' \
' if (obj.hasOwnProperty(key))\n' \
' {\n' \
' return obj[key];\n' \
' }\n' \
' return null;\n' \
'}\n' \
'\n' \
'function extend(subclass, superclass, definition)\n' \
'{\n' \
' if (Object.__proto__)\n' \
' {\n' \
' definition.__proto__ = superclass.prototype;\n' \
' subclass.prototype = definition;\n' \
' }\n' \
' else\n' \
' {\n' \
' var tmpclass = function(){}, ret;\n' \
' tmpclass.prototype = superclass.prototype;\n' \
' subclass.prototype = new tmpclass();\n' \
' subclass.prototype.constructor = superclass;\n' \
' for (var i in definition)\n' \
' {\n' \
' if (definition.hasOwnProperty(i))\n' \
' {\n' \
' subclass.prototype[i] = definition[i];\n' \
' }\n' \
' }\n' \
' }\n' \
'}\n' \
'\n' \
'function Filter(text)\n' \
'{\n' \
' this.text = text;\n' \
' this.subscriptions = [];\n' \
'}\n' \
'Filter.prototype = {\n' \
' text: null,\n' \
' subscriptions: null,\n' \
' toString: function()\n' \
' {\n' \
' return this.text;\n' \
' }\n' \
'};\n' \
'Filter.knownFilters = createDict();\n' \
'Filter.elemhideRegExp = /^([^\/\*\|\@"!]*?)#(\@)?(?:([\w\-]+|\*)((?:\([\w\-]+(?:[$^*]?=[^\(\)"]*)?\))*)|#([^{}]+))$/;\n' \
'Filter.regexpRegExp = /^(@@)?\/.*\/(?:\$~?[\w\-]+(?:=[^,\s]+)?(?:,~?[\w\-]+(?:=[^,\s]+)?)*)?$/;\n' \
'Filter.optionsRegExp = /\$(~?[\w\-]+(?:=[^,\s]+)?(?:,~?[\w\-]+(?:=[^,\s]+)?)*)$/;\n' \
'Filter.fromText = function(text)\n' \
'{\n' \
' if (text in Filter.knownFilters)\n' \
' {\n' \
' return Filter.knownFilters[text];\n' \
' }\n' \
' var ret;\n' \
' if (text.charAt(0) == "!")\n' \
' {\n' \
' ret = new CommentFilter(text);\n' \
' }\n' \
' else\n' \
' {\n' \
' ret = RegExpFilter.fromText(text);\n' \
' }\n' \
' Filter.knownFilters[ret.text] = ret;\n' \
' return ret;\n' \
'};\n' \
'\n' \
'function InvalidFilter(text, reason)\n' \
'{\n' \
' Filter.call(this, text);\n' \
' this.reason = reason;\n' \
'}\n' \
'extend(InvalidFilter, Filter, {\n' \
' reason: null\n' \
'});\n' \
'\n' \
'function CommentFilter(text)\n' \
'{\n' \
' Filter.call(this, text);\n' \
'}\n' \
'extend(CommentFilter, Filter, {\n' \
'});\n' \
'\n' \
'function ActiveFilter(text, domains)\n' \
'{\n' \
' Filter.call(this, text);\n' \
' this.domainSource = domains;\n' \
'}\n' \
'extend(ActiveFilter, Filter, {\n' \
' domainSource: null,\n' \
' domainSeparator: null,\n' \
' ignoreTrailingDot: true,\n' \
' domainSourceIsUpperCase: false,\n' \
' getDomains: function()\n' \
' {\n' \
' var prop = getOwnPropertyDescriptor(this, "domains");\n' \
' if (prop)\n' \
' {\n' \
' return prop;\n' \
' }\n' \
' var domains = null;\n' \
' if (this.domainSource)\n' \
' {\n' \
' var source = this.domainSource;\n' \
' if (!this.domainSourceIsUpperCase)\n' \
' {\n' \
' source = source.toUpperCase();\n' \
' }\n' \
' var list = source.split(this.domainSeparator);\n' \
' if (list.length == 1 && (list[0]).charAt(0) != "~")\n' \
' {\n' \
' domains = createDict();\n' \
' domains[""] = false;\n' \
' if (this.ignoreTrailingDot)\n' \
' {\n' \
' list[0] = list[0].replace(/\.+$/, "");\n' \
' }\n' \
' domains[list[0]] = true;\n' \
' }\n' \
' else\n' \
' {\n' \
' var hasIncludes = false;\n' \
' for (var i = 0; i < list.length; i++)\n' \
' {\n' \
' var domain = list[i];\n' \
' if (this.ignoreTrailingDot)\n' \
' {\n' \
' domain = domain.replace(/\.+$/, "");\n' \
' }\n' \
' if (domain == "")\n' \
' {\n' \
' continue;\n' \
' }\n' \
' var include;\n' \
' if (domain.charAt(0) == "~")\n' \
' {\n' \
' include = false;\n' \
' domain = domain.substr(1);\n' \
' }\n' \
' else\n' \
' {\n' \
' include = true;\n' \
' hasIncludes = true;\n' \
' }\n' \
' if (!domains)\n' \
' {\n' \
' domains = createDict();\n' \
' }\n' \
' domains[domain] = include;\n' \
' }\n' \
' domains[""] = !hasIncludes;\n' \
' }\n' \
' this.domainSource = null;\n' \
' }\n' \
' return this.domains;\n' \
' },\n' \
' sitekeys: null,\n' \
' isActiveOnDomain: function(docDomain, sitekey)\n' \
' {\n' \
' if (this.getSitekeys() && (!sitekey || this.getSitekeys().indexOf(sitekey.toUpperCase()) < 0))\n' \
' {\n' \
' return false;\n' \
' }\n' \
' if (!this.getDomains())\n' \
' {\n' \
' return true;\n' \
' }\n' \
' if (!docDomain)\n' \
' {\n' \
' return this.getDomains()[""];\n' \
' }\n' \
' if (this.ignoreTrailingDot)\n' \
' {\n' \
' docDomain = docDomain.replace(/\.+$/, "");\n' \
' }\n' \
' docDomain = docDomain.toUpperCase();\n' \
' while (true)\n' \
' {\n' \
' if (docDomain in this.getDomains())\n' \
' {\n' \
' return this.domains[docDomain];\n' \
' }\n' \
' var nextDot = docDomain.indexOf(".");\n' \
' if (nextDot < 0)\n' \
' {\n' \
' break;\n' \
' }\n' \
' docDomain = docDomain.substr(nextDot + 1);\n' \
' }\n' \
' return this.domains[""];\n' \
' },\n' \
' isActiveOnlyOnDomain: function(docDomain)\n' \
' {\n' \
' if (!docDomain || !this.getDomains() || this.getDomains()[""])\n' \
' {\n' \
' return false;\n' \
' }\n' \
' if (this.ignoreTrailingDot)\n' \
' {\n' \
' docDomain = docDomain.replace(/\.+$/, "");\n' \
' }\n' \
' docDomain = docDomain.toUpperCase();\n' \
' for (var domain in this.getDomains())\n' \
' {\n' \
' if (this.domains[domain] && domain != docDomain && (domain.length <= docDomain.length || domain.indexOf("." + docDomain) != domain.length - docDomain.length - 1))\n' \
' {\n' \
' return false;\n' \
' }\n' \
' }\n' \
' return true;\n' \
' }\n' \
'});\n' \
'\n' \
'function RegExpFilter(text, regexpSource, contentType, matchCase, domains, thirdParty, sitekeys)\n' \
'{\n' \
' ActiveFilter.call(this, text, domains, sitekeys);\n' \
' if (contentType != null)\n' \
' {\n' \
' this.contentType = contentType;\n' \
' }\n' \
' if (matchCase)\n' \
' {\n' \
' this.matchCase = matchCase;\n' \
' }\n' \
' if (thirdParty != null)\n' \
' {\n' \
' this.thirdParty = thirdParty;\n' \
' }\n' \
' if (sitekeys != null)\n' \
' {\n' \
' this.sitekeySource = sitekeys;\n' \
' }\n' \
' if (regexpSource.length >= 2 && regexpSource.charAt(0) == "/" && regexpSource.charAt(regexpSource.length - 1) == "/")\n' \
' {\n' \
' var regexp = new RegExp(regexpSource.substr(1, regexpSource.length - 2), this.matchCase ? "" : "i");\n' \
' this.regexp = regexp;\n' \
' }\n' \
' else\n' \
' {\n' \
' this.regexpSource = regexpSource;\n' \
' }\n' \
'}\n' \
'extend(RegExpFilter, ActiveFilter, {\n' \
' domainSourceIsUpperCase: true,\n' \
' length: 1,\n' \
' domainSeparator: "|",\n' \
' regexpSource: null,\n' \
' getRegexp: function()\n' \
' {\n' \
' var prop = getOwnPropertyDescriptor(this, "regexp");\n' \
' if (prop)\n' \
' {\n' \
' return prop;\n' \
' }\n' \
' var source = this.regexpSource.replace(/\*+/g, "*").replace(/\^\|$/, "^").replace(/\W/g, "\\$&").replace(/\\\*/g, ".*").replace(/\\\^/g, "(?:[\\x00-\\x24\\x26-\\x2C\\x2F\\x3A-\\x40\\x5B-\\x5E\\x60\\x7B-\\x7F]|$)").replace(/^\\\|\\\|/, "^[\\w\\-]+:\\/+(?!\\/)(?:[^\\/]+\\.)?").replace(/^\\\|/, "^").replace(/\\\|$/, "$").replace(/^(\.\*)/, "").replace(/(\.\*)$/, "");\n' \
' var regexp = new RegExp(source, this.matchCase ? "" : "i");\n' \
' this.regexp = regexp;\n' \
' return regexp;\n' \
' },\n' \
' contentType: 2147483647,\n' \
' matchCase: false,\n' \
' thirdParty: null,\n' \
' sitekeySource: null,\n' \
' getSitekeys: function()\n' \
' {\n' \
' var prop = getOwnPropertyDescriptor(this, "sitekeys");\n' \
' if (prop)\n' \
' {\n' \
' return prop;\n' \
' }\n' \
' var sitekeys = null;\n' \
' if (this.sitekeySource)\n' \
' {\n' \
' sitekeys = this.sitekeySource.split("|");\n' \
' this.sitekeySource = null;\n' \
' }\n' \
' this.sitekeys = sitekeys;\n' \
' return this.sitekeys;\n' \
' },\n' \
' matches: function(location, contentType, docDomain, thirdParty, sitekey)\n' \
' {\n' \
' if (this.getRegexp().test(location) && this.isActiveOnDomain(docDomain, sitekey))\n' \
' {\n' \
' return true;\n' \
' }\n' \
' return false;\n' \
' }\n' \
'});\n' \
'RegExpFilter.prototype["0"] = "#this";\n' \
'RegExpFilter.fromText = function(text)\n' \
'{\n' \
' var blocking = true;\n' \
' var origText = text;\n' \
' if (text.indexOf("@@") == 0)\n' \
' {\n' \
' blocking = false;\n' \
' text = text.substr(2);\n' \
' }\n' \
' var contentType = null;\n' \
' var matchCase = null;\n' \
' var domains = null;\n' \
' var sitekeys = null;\n' \
' var thirdParty = null;\n' \
' var collapse = null;\n' \
' var options;\n' \
' var match = text.indexOf("$") >= 0 ? Filter.optionsRegExp.exec(text) : null;\n' \
' if (match)\n' \
' {\n' \
' options = match[1].toUpperCase().split(",");\n' \
' text = match.input.substr(0, match.index);\n' \
' for (var _loopIndex6 = 0; _loopIndex6 < options.length; ++_loopIndex6)\n' \
' {\n' \
' var option = options[_loopIndex6];\n' \
' var value = null;\n' \
' var separatorIndex = option.indexOf("=");\n' \
' if (separatorIndex >= 0)\n' \
' {\n' \
' value = option.substr(separatorIndex + 1);\n' \
' option = option.substr(0, separatorIndex);\n' \
' }\n' \
' option = option.replace(/-/, "_");\n' \
' if (option in RegExpFilter.typeMap)\n' \
' {\n' \
' if (contentType == null)\n' \
' {\n' \
' contentType = 0;\n' \
' }\n' \
' contentType |= RegExpFilter.typeMap[option];\n' \
' }\n' \
' else if (option.charAt(0) == "~" && option.substr(1) in RegExpFilter.typeMap)\n' \
' {\n' \
' if (contentType == null)\n' \
' {\n' \
' contentType = RegExpFilter.prototype.contentType;\n' \
' }\n' \
' contentType &= ~RegExpFilter.typeMap[option.substr(1)];\n' \
' }\n' \
' else if (option == "MATCH_CASE")\n' \
' {\n' \
' matchCase = true;\n' \
' }\n' \
' else if (option == "~MATCH_CASE")\n' \
' {\n' \
' matchCase = false;\n' \
' }\n' \
' else if (option == "DOMAIN" && typeof value != "undefined")\n' \
' {\n' \
' domains = value;\n' \
' }\n' \
' else if (option == "THIRD_PARTY")\n' \
' {\n' \
' thirdParty = true;\n' \
' }\n' \
' else if (option == "~THIRD_PARTY")\n' \
' {\n' \
' thirdParty = false;\n' \
' }\n' \
' else if (option == "COLLAPSE")\n' \
' {\n' \
' collapse = true;\n' \
' }\n' \
' else if (option == "~COLLAPSE")\n' \
' {\n' \
' collapse = false;\n' \
' }\n' \
' else if (option == "SITEKEY" && typeof value != "undefined")\n' \
' {\n' \
' sitekeys = value;\n' \
' }\n' \
' else\n' \
' {\n' \
' return new InvalidFilter(origText, "Unknown option " + option.toLowerCase());\n' \
' }\n' \
' }\n' \
' }\n' \
' if (!blocking && (contentType == null || contentType & RegExpFilter.typeMap.DOCUMENT) && (!options || options.indexOf("DOCUMENT") < 0) && !/^\|?[\w\-]+:/.test(text))\n' \
' {\n' \
' if (contentType == null)\n' \
' {\n' \
' contentType = RegExpFilter.prototype.contentType;\n' \
' }\n' \
' contentType &= ~RegExpFilter.typeMap.DOCUMENT;\n' \
' }\n' \
' try\n' \
' {\n' \
' if (blocking)\n' \
' {\n' \
' return new BlockingFilter(origText, text, contentType, matchCase, domains, thirdParty, sitekeys, collapse);\n' \
' }\n' \
' else\n' \
' {\n' \
' return new WhitelistFilter(origText, text, contentType, matchCase, domains, thirdParty, sitekeys);\n' \
' }\n' \
' }\n' \
' catch (e)\n' \
' {\n' \
' return new InvalidFilter(origText, e);\n' \
' }\n' \
'};\n' \
'RegExpFilter.typeMap = {\n' \
' OTHER: 1,\n' \
' SCRIPT: 2,\n' \
' IMAGE: 4,\n' \
' STYLESHEET: 8,\n' \
' OBJECT: 16,\n' \
' SUBDOCUMENT: 32,\n' \
' DOCUMENT: 64,\n' \
' XBL: 1,\n' \
' PING: 1,\n' \
' XMLHTTPREQUEST: 2048,\n' \
' OBJECT_SUBREQUEST: 4096,\n' \
' DTD: 1,\n' \
' MEDIA: 16384,\n' \
' FONT: 32768,\n' \
' BACKGROUND: 4,\n' \
' POPUP: 268435456,\n' \
' ELEMHIDE: 1073741824\n' \
'};\n' \
'RegExpFilter.prototype.contentType &= ~ (RegExpFilter.typeMap.ELEMHIDE | RegExpFilter.typeMap.POPUP);\n' \
'\n' \
'function BlockingFilter(text, regexpSource, contentType, matchCase, domains, thirdParty, sitekeys, collapse)\n' \
'{\n' \
' RegExpFilter.call(this, text, regexpSource, contentType, matchCase, domains, thirdParty, sitekeys);\n' \
' this.collapse = collapse;\n' \
'}\n' \
'extend(BlockingFilter, RegExpFilter, {\n' \
' collapse: null\n' \
'});\n' \
'\n' \
'function WhitelistFilter(text, regexpSource, contentType, matchCase, domains, thirdParty, sitekeys)\n' \
'{\n' \
' RegExpFilter.call(this, text, regexpSource, contentType, matchCase, domains, thirdParty, sitekeys);\n' \
'}\n' \
'extend(WhitelistFilter, RegExpFilter, {\n' \
'});\n' \
'\n' \
'function Matcher()\n' \
'{\n' \
' this.clear();\n' \
'}\n' \
'Matcher.prototype = {\n' \
' filterByKeyword: null,\n' \
' keywordByFilter: null,\n' \
' clear: function()\n' \
' {\n' \
' this.filterByKeyword = createDict();\n' \
' this.keywordByFilter = createDict();\n' \
' },\n' \
' add: function(filter)\n' \
' {\n' \
' if (filter.text in this.keywordByFilter)\n' \
' {\n' \
' return;\n' \
' }\n' \
' var keyword = this.findKeyword(filter);\n' \
' var oldEntry = this.filterByKeyword[keyword];\n' \
' if (typeof oldEntry == "undefined")\n' \
' {\n' \
' this.filterByKeyword[keyword] = filter;\n' \
' }\n' \
' else if (oldEntry.length == 1)\n' \
' {\n' \
' this.filterByKeyword[keyword] = [oldEntry, filter];\n' \
' }\n' \
' else\n' \
' {\n' \
' oldEntry.push(filter);\n' \
' }\n' \
' this.keywordByFilter[filter.text] = keyword;\n' \
' },\n' \
' remove: function(filter)\n' \
' {\n' \
' if (!(filter.text in this.keywordByFilter))\n' \
' {\n' \
' return;\n' \
' }\n' \
' var keyword = this.keywordByFilter[filter.text];\n' \
' var list = this.filterByKeyword[keyword];\n' \
' if (list.length <= 1)\n' \
' {\n' \
' delete this.filterByKeyword[keyword];\n' \
' }\n' \
' else\n' \
' {\n' \
' var index = list.indexOf(filter);\n' \
' if (index >= 0)\n' \
' {\n' \
' list.splice(index, 1);\n' \
' if (list.length == 1)\n' \
' {\n' \
' this.filterByKeyword[keyword] = list[0];\n' \
' }\n' \
' }\n' \
' }\n' \
' delete this.keywordByFilter[filter.text];\n' \
' },\n' \
' findKeyword: function(filter)\n' \
' {\n' \
' var result = "";\n' \
' var text = filter.text;\n' \
' if (Filter.regexpRegExp.test(text))\n' \
' {\n' \
' return result;\n' \
' }\n' \
' var match = Filter.optionsRegExp.exec(text);\n' \
' if (match)\n' \
' {\n' \
' text = match.input.substr(0, match.index);\n' \
' }\n' \
' if (text.substr(0, 2) == "@@")\n' \
' {\n' \
' text = text.substr(2);\n' \
' }\n' \
' var candidates = text.toLowerCase().match(/[^a-z0-9%*][a-z0-9%]{3,}(?=[^a-z0-9%*])/g);\n' \
' if (!candidates)\n' \
' {\n' \
' return result;\n' \
' }\n' \
' var hash = this.filterByKeyword;\n' \
' var resultCount = 16777215;\n' \
' var resultLength = 0;\n' \
' for (var i = 0, l = candidates.length; i < l; i++)\n' \
' {\n' \
' var candidate = candidates[i].substr(1);\n' \
' var count = candidate in hash ? hash[candidate].length : 0;\n' \
' if (count < resultCount || count == resultCount && candidate.length > resultLength)\n' \
' {\n' \
' result = candidate;\n' \
' resultCount = count;\n' \
' resultLength = candidate.length;\n' \
' }\n' \
' }\n' \
' return result;\n' \
' },\n' \
' hasFilter: function(filter)\n' \
' {\n' \
' return filter.text in this.keywordByFilter;\n' \
' },\n' \
' getKeywordForFilter: function(filter)\n' \
' {\n' \
' if (filter.text in this.keywordByFilter)\n' \
' {\n' \
' return this.keywordByFilter[filter.text];\n' \
' }\n' \
' else\n' \
' {\n' \
' return null;\n' \
' }\n' \
' },\n' \
' _checkEntryMatch: function(keyword, location, contentType, docDomain, thirdParty, sitekey)\n' \
' {\n' \
' var list = this.filterByKeyword[keyword];\n' \
' for (var i = 0; i < list.length; i++)\n' \
' {\n' \
' var filter = list[i];\n' \
' if (filter == "#this")\n' \
' {\n' \
' filter = list;\n' \
' }\n' \
' if (filter.matches(location, contentType, docDomain, thirdParty, sitekey))\n' \
' {\n' \
' return filter;\n' \
' }\n' \
' }\n' \
' return null;\n' \
' },\n' \
' matchesAny: function(location, contentType, docDomain, thirdParty, sitekey)\n' \
' {\n' \
' var candidates = location.toLowerCase().match(/[a-z0-9%]{3,}/g);\n' \
' if (candidates === null)\n' \
' {\n' \
' candidates = [];\n' \
' }\n' \
' candidates.push("");\n' \
' for (var i = 0, l = candidates.length; i < l; i++)\n' \
' {\n' \
' var substr = candidates[i];\n' \
' if (substr in this.filterByKeyword)\n' \
' {\n' \
' var result = this._checkEntryMatch(substr, location, contentType, docDomain, thirdParty, sitekey);\n' \
' if (result)\n' \
' {\n' \
' return result;\n' \
' }\n' \
' }\n' \
' }\n' \
' return null;\n' \
' }\n' \
'};\n' \
'\n' \
'function CombinedMatcher()\n' \
'{\n' \
' this.blacklist = new Matcher();\n' \
' this.whitelist = new Matcher();\n' \
' this.resultCache = createDict();\n' \
'}\n' \
'CombinedMatcher.maxCacheEntries = 1000;\n' \
'CombinedMatcher.prototype = {\n' \
' blacklist: null,\n' \
' whitelist: null,\n' \
' resultCache: null,\n' \
' cacheEntries: 0,\n' \
' clear: function()\n' \
' {\n' \
' this.blacklist.clear();\n' \
' this.whitelist.clear();\n' \
' this.resultCache = createDict();\n' \
' this.cacheEntries = 0;\n' \
' },\n' \
' add: function(filter)\n' \
' {\n' \
' if (filter instanceof WhitelistFilter)\n' \
' {\n' \
' this.whitelist.add(filter);\n' \
' }\n' \
' else\n' \
' {\n' \
' this.blacklist.add(filter);\n' \
' }\n' \
' if (this.cacheEntries > 0)\n' \
' {\n' \
' this.resultCache = createDict();\n' \
' this.cacheEntries = 0;\n' \
' }\n' \
' },\n' \
' remove: function(filter)\n' \
' {\n' \
' if (filter instanceof WhitelistFilter)\n' \
' {\n' \
' this.whitelist.remove(filter);\n' \
' }\n' \
' else\n' \
' {\n' \
' this.blacklist.remove(filter);\n' \
' }\n' \
' if (this.cacheEntries > 0)\n' \
' {\n' \
' this.resultCache = createDict();\n' \
' this.cacheEntries = 0;\n' \
' }\n' \
' },\n' \
' findKeyword: function(filter)\n' \
' {\n' \
' if (filter instanceof WhitelistFilter)\n' \
' {\n' \
' return this.whitelist.findKeyword(filter);\n' \
' }\n' \
' else\n' \
' {\n' \
' return this.blacklist.findKeyword(filter);\n' \
' }\n' \
' },\n' \
' hasFilter: function(filter)\n' \
' {\n' \
' if (filter instanceof WhitelistFilter)\n' \
' {\n' \
' return this.whitelist.hasFilter(filter);\n' \
' }\n' \
' else\n' \
' {\n' \
' return this.blacklist.hasFilter(filter);\n' \
' }\n' \
' },\n' \
' getKeywordForFilter: function(filter)\n' \
' {\n' \
' if (filter instanceof WhitelistFilter)\n' \
' {\n' \
' return this.whitelist.getKeywordForFilter(filter);\n' \
' }\n' \
' else\n' \
' {\n' \
' return this.blacklist.getKeywordForFilter(filter);\n' \
' }\n' \
' },\n' \
' isSlowFilter: function(filter)\n' \
' {\n' \
' var matcher = filter instanceof WhitelistFilter ? this.whitelist : this.blacklist;\n' \
' if (matcher.hasFilter(filter))\n' \
' {\n' \
' return !matcher.getKeywordForFilter(filter);\n' \
' }\n' \
' else\n' \
' {\n' \
' return !matcher.findKeyword(filter);\n' \
' }\n' \
' },\n' \
' matchesAnyInternal: function(location, contentType, docDomain, thirdParty, sitekey)\n' \
' {\n' \
' var candidates = location.toLowerCase().match(/[a-z0-9%]{3,}/g);\n' \
' if (candidates === null)\n' \
' {\n' \
' candidates = [];\n' \
' }\n' \
' candidates.push("");\n' \
' var blacklistHit = null;\n' \
' for (var i = 0, l = candidates.length; i < l; i++)\n' \
' {\n' \
' var substr = candidates[i];\n' \
' if (substr in this.whitelist.filterByKeyword)\n' \
' {\n' \
' var result = this.whitelist._checkEntryMatch(substr, location, contentType, docDomain, thirdParty, sitekey);\n' \
' if (result)\n' \
' {\n' \
' return result;\n' \
' }\n' \
' }\n' \
' if (substr in this.blacklist.filterByKeyword && blacklistHit === null)\n' \
' {\n' \
' blacklistHit = this.blacklist._checkEntryMatch(substr, location, contentType, docDomain, thirdParty, sitekey);\n' \
' }\n' \
' }\n' \
' return blacklistHit;\n' \
' },\n' \
' matchesAny: function(location, docDomain)\n' \
' {\n' \
' var key = location + " " + docDomain + " ";\n' \
' if (key in this.resultCache)\n' \
' {\n' \
' return this.resultCache[key];\n' \
' }\n' \
' var result = this.matchesAnyInternal(location, 0, docDomain, null, null);\n' \
' if (this.cacheEntries >= CombinedMatcher.maxCacheEntries)\n' \
' {\n' \
' this.resultCache = createDict();\n' \
' this.cacheEntries = 0;\n' \
' }\n' \
' this.resultCache[key] = result;\n' \
' this.cacheEntries++;\n' \
' return result;\n' \
' }\n' \
'};\n' \
'var defaultMatcher = new CombinedMatcher();\n' \
'\n' \
'var direct = \'DIRECT;\';\n' \
'\n' \
'for (var i = 0; i < rules.length; i++) {\n' \
' defaultMatcher.add(Filter.fromText(rules[i]));\n' \
'}\n' \
'\n' \
'function FindProxyForURL(url, host) {\n' \
' if (defaultMatcher.matchesAny(url, host) instanceof BlockingFilter) {\n' \
' return proxy;\n' \
' }\n' \
' return direct;\n' \
'}\n'
return pacScript
if __name__ == '__main__':
print(create())

@ -1,16 +0,0 @@
from PixivSearch.model.config import rule
def insert(value):
rule(regex=value).save()
def delete(value):
select(value).delete()
def select(value=None):
if value == None:
return rule.objects.all()
else:
return rule.objects.filter(regex=value)

@ -24,7 +24,7 @@ headers = {
lock = threading.Lock() # 多线程全局资源锁 lock = threading.Lock() # 多线程全局资源锁
total = 1 total = 1
address = '192.168.0.21:8118' address = '127.0.0.1:8118'
# proxies = {"http": "socks5://{0}".format(address), "https": "socks5://{0}".format(address)} # proxies = {"http": "socks5://{0}".format(address), "https": "socks5://{0}".format(address)}
proxies = {"http": "http://{0}".format(address), "https": "https://{0}".format(address)} proxies = {"http": "http://{0}".format(address), "https": "https://{0}".format(address)}

@ -1,23 +0,0 @@
from thrift.protocol import TBinaryProtocol
from thrift.transport import TSocket, TTransport
from PixivSearch.thrift.QueryComment.QueryComment import Client
if __name__ == '__main__':
socket = TSocket.TSocket('sukura.top', 2233)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TFramedTransport(socket)
if not transport.isOpen():
transport.open()
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = Client(protocol)
cids = [7636499, 7636501, 7636500, 7636503, 7636504, 7636502, 7636509, 7636508, 7636506, 7636507, 7636505]
print(client.commentSumMap(cids))

@ -1,131 +0,0 @@
#!/usr/bin/env python
#
# Autogenerated by Thrift Compiler (0.11.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
import sys
import pprint
if sys.version_info[0] > 2:
from urllib.parse import urlparse
else:
from urlparse import urlparse
from thrift.transport import TTransport, TSocket, TSSLSocket, THttpClient
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from QueryComment import QueryComment
from QueryComment.ttypes import *
if len(sys.argv) <= 1 or sys.argv[1] == '--help':
print('')
print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] [-s[sl]] [-novalidate] [-ca_certs certs] [-keyfile keyfile] [-certfile certfile] function [arg1 [arg2...]]')
print('')
print('Functions:')
print(' i32 commentSum(i32 cid)')
print(' commentSumList( cids)')
print(' string download( cids, string fileName)')
print('')
sys.exit(0)
pp = pprint.PrettyPrinter(indent=2)
host = 'localhost'
port = 9090
uri = ''
framed = False
ssl = False
validate = True
ca_certs = None
keyfile = None
certfile = None
http = False
argi = 1
if sys.argv[argi] == '-h':
parts = sys.argv[argi + 1].split(':')
host = parts[0]
if len(parts) > 1:
port = int(parts[1])
argi += 2
if sys.argv[argi] == '-u':
url = urlparse(sys.argv[argi + 1])
parts = url[1].split(':')
host = parts[0]
if len(parts) > 1:
port = int(parts[1])
else:
port = 80
uri = url[2]
if url[4]:
uri += '?%s' % url[4]
http = True
argi += 2
if sys.argv[argi] == '-f' or sys.argv[argi] == '-framed':
framed = True
argi += 1
if sys.argv[argi] == '-s' or sys.argv[argi] == '-ssl':
ssl = True
argi += 1
if sys.argv[argi] == '-novalidate':
validate = False
argi += 1
if sys.argv[argi] == '-ca_certs':
ca_certs = sys.argv[argi+1]
argi += 2
if sys.argv[argi] == '-keyfile':
keyfile = sys.argv[argi+1]
argi += 2
if sys.argv[argi] == '-certfile':
certfile = sys.argv[argi+1]
argi += 2
cmd = sys.argv[argi]
args = sys.argv[argi + 1:]
if http:
transport = THttpClient.THttpClient(host, port, uri)
else:
if ssl:
socket = TSSLSocket.TSSLSocket(host, port, validate=validate, ca_certs=ca_certs, keyfile=keyfile, certfile=certfile)
else:
socket = TSocket.TSocket(host, port)
if framed:
transport = TTransport.TFramedTransport(socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol(transport)
client = QueryComment.Client(protocol)
transport.open()
if cmd == 'commentSum':
if len(args) != 1:
print('commentSum requires 1 args')
sys.exit(1)
pp.pprint(client.commentSum(eval(args[0]),))
elif cmd == 'commentSumList':
if len(args) != 1:
print('commentSumList requires 1 args')
sys.exit(1)
pp.pprint(client.commentSumList(eval(args[0]),))
elif cmd == 'download':
if len(args) != 2:
print('download requires 2 args')
sys.exit(1)
pp.pprint(client.download(eval(args[0]), args[1],))
else:
print('Unrecognized method %s' % cmd)
sys.exit(1)
transport.close()

@ -1,660 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.11.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def commentSum(self, cid):
"""
查询单个cid弹幕数
Parameters:
- cid
"""
pass
def commentSumList(self, cids):
"""
* 批量查询多个cid弹幕数
*
Parameters:
- cids
"""
pass
def download(self, cids, fileName):
"""
* 批量下载弹幕
*
Parameters:
- cids
- fileName
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def commentSum(self, cid):
"""
查询单个cid弹幕数
Parameters:
- cid
"""
self.send_commentSum(cid)
return self.recv_commentSum()
def send_commentSum(self, cid):
self._oprot.writeMessageBegin('commentSum', TMessageType.CALL, self._seqid)
args = commentSum_args()
args.cid = cid
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_commentSum(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = commentSum_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "commentSum failed: unknown result")
def commentSumList(self, cids):
"""
* 批量查询多个cid弹幕数
*
Parameters:
- cids
"""
self.send_commentSumList(cids)
return self.recv_commentSumList()
def send_commentSumList(self, cids):
self._oprot.writeMessageBegin('commentSumList', TMessageType.CALL, self._seqid)
args = commentSumList_args()
args.cids = cids
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_commentSumList(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = commentSumList_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "commentSumList failed: unknown result")
def download(self, cids, fileName):
"""
* 批量下载弹幕
*
Parameters:
- cids
- fileName
"""
self.send_download(cids, fileName)
return self.recv_download()
def send_download(self, cids, fileName):
self._oprot.writeMessageBegin('download', TMessageType.CALL, self._seqid)
args = download_args()
args.cids = cids
args.fileName = fileName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_download(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = download_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "download failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["commentSum"] = Processor.process_commentSum
self._processMap["commentSumList"] = Processor.process_commentSumList
self._processMap["download"] = Processor.process_download
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_commentSum(self, seqid, iprot, oprot):
args = commentSum_args()
args.read(iprot)
iprot.readMessageEnd()
result = commentSum_result()
try:
result.success = self._handler.commentSum(args.cid)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("commentSum", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_commentSumList(self, seqid, iprot, oprot):
args = commentSumList_args()
args.read(iprot)
iprot.readMessageEnd()
result = commentSumList_result()
try:
result.success = self._handler.commentSumList(args.cids)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("commentSumList", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_download(self, seqid, iprot, oprot):
args = download_args()
args.read(iprot)
iprot.readMessageEnd()
result = download_result()
try:
result.success = self._handler.download(args.cids, args.fileName)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("download", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class commentSum_args(object):
"""
Attributes:
- cid
"""
def __init__(self, cid=None,):
self.cid = cid
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.cid = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('commentSum_args')
if self.cid is not None:
oprot.writeFieldBegin('cid', TType.I32, 1)
oprot.writeI32(self.cid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(commentSum_args)
commentSum_args.thrift_spec = (
None, # 0
(1, TType.I32, 'cid', None, None, ), # 1
)
class commentSum_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('commentSum_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(commentSum_result)
commentSum_result.thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
)
class commentSumList_args(object):
"""
Attributes:
- cids
"""
def __init__(self, cids=None,):
self.cids = cids
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.cids = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = iprot.readI32()
self.cids.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('commentSumList_args')
if self.cids is not None:
oprot.writeFieldBegin('cids', TType.LIST, 1)
oprot.writeListBegin(TType.I32, len(self.cids))
for iter6 in self.cids:
oprot.writeI32(iter6)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(commentSumList_args)
commentSumList_args.thrift_spec = (
None, # 0
(1, TType.LIST, 'cids', (TType.I32, None, False), None, ), # 1
)
class commentSumList_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in range(_size7):
_elem12 = iprot.readI32()
self.success.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('commentSumList_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.I32, len(self.success))
for iter13 in self.success:
oprot.writeI32(iter13)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(commentSumList_result)
commentSumList_result.thrift_spec = (
(0, TType.LIST, 'success', (TType.I32, None, False), None, ), # 0
)
class download_args(object):
"""
Attributes:
- cids
- fileName
"""
def __init__(self, cids=None, fileName=None,):
self.cids = cids
self.fileName = fileName
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.cids = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in range(_size14):
_elem19 = iprot.readI32()
self.cids.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.fileName = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('download_args')
if self.cids is not None:
oprot.writeFieldBegin('cids', TType.LIST, 1)
oprot.writeListBegin(TType.I32, len(self.cids))
for iter20 in self.cids:
oprot.writeI32(iter20)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.fileName is not None:
oprot.writeFieldBegin('fileName', TType.STRING, 2)
oprot.writeString(self.fileName.encode('utf-8') if sys.version_info[0] == 2 else self.fileName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(download_args)
download_args.thrift_spec = (
None, # 0
(1, TType.LIST, 'cids', (TType.I32, None, False), None, ), # 1
(2, TType.STRING, 'fileName', 'UTF8', None, ), # 2
)
class download_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('download_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(download_result)
download_result.thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
)
fix_spec(all_structs)
del all_structs

@ -1,48 +0,0 @@
import os
import sys
from thrift.server.TNonblockingServer import TNonblockingServer
from thrift.transport import TSocket
sys.path.append('/root/PixivSearch')
from PixivSearch.util import Util
from PixivSearch.dao.Comment import xml, downloadXml
from PixivSearch.thrift.QueryComment import QueryComment
from PixivSearch.thrift.QueryComment.QueryComment import Iface
commentPath = '/root/PixivSearch/PixivSearch/thrift/tmpFile/comment'
#commentPath='/home/hua/下载/comment'
class Server(Iface):
def commentSumList(self, cids):
result = []
for cid in cids:
comment_selector = xml('https://comment.bilibili.com/{cid}.xml'.format(cid=cid))
length = len(comment_selector.xpath('//i//d/text()'))
print('cid:{cid},弹幕数:{length}'.format(cid=cid, length=length))
result.append(length)
return result
def commentSum(self, cid):
return self.commentSumList([cid])[0];
def download(self, cids, fileName):
path = '{commentPath}/{fileName}'.format(commentPath=commentPath, fileName=fileName)
for cid in cids:
downloadXml(path, cid,
histroy=False)
zipFile = '{commentPath}/{fileName}.zip'.format(commentPath=commentPath,fileName=fileName)
print(zipFile)
Util.zip(path, zipFile)
if os.path.isfile(zipFile):
print('压缩包成功生成到{zipFile}'.format(zipFile=zipFile))
return zipFile
else:
return None
if __name__ == '__main__':
socket = TSocket.TServerSocket(port=2233)
processor = QueryComment.Processor(Server())
server = TNonblockingServer(processor, socket)
server.serve()

@ -1 +0,0 @@
__all__ = ['ttypes', 'constants', 'QueryComment']

@ -1,14 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.11.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from .ttypes import *

@ -1,18 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.11.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
fix_spec(all_structs)
del all_structs

@ -1,22 +0,0 @@
import sys
from thrift.server.TNonblockingServer import TNonblockingServer
from thrift.transport import TSocket
sys.path.append('/root/PixivSearch')
from PixivSearch.thrift.task import TSDM
from PixivSearch.thrift.task.TSDM import Iface
class Server(Iface):
def qiandao(self):
return False
def word(self):
return True
if __name__ == '__main__':
socket = TSocket.TServerSocket(port=2233)
processor = TSDM.Processor(Server())
server = TNonblockingServer(processor, socket)
server.serve()

@ -1,124 +0,0 @@
#!/usr/bin/env python
#
# Autogenerated by Thrift Compiler (0.11.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
import sys
import pprint
if sys.version_info[0] > 2:
from urllib.parse import urlparse
else:
from urlparse import urlparse
from thrift.transport import TTransport, TSocket, TSSLSocket, THttpClient
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from core.thrift.task import TSDM
from core.thrift.task.ttypes import *
if len(sys.argv) <= 1 or sys.argv[1] == '--help':
print('')
print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] [-s[sl]] [-novalidate] [-ca_certs certs] [-keyfile keyfile] [-certfile certfile] function [arg1 [arg2...]]')
print('')
print('Functions:')
print(' bool qiandao()')
print(' bool word()')
print('')
sys.exit(0)
pp = pprint.PrettyPrinter(indent=2)
host = 'localhost'
port = 9090
uri = ''
framed = False
ssl = False
validate = True
ca_certs = None
keyfile = None
certfile = None
http = False
argi = 1
if sys.argv[argi] == '-h':
parts = sys.argv[argi + 1].split(':')
host = parts[0]
if len(parts) > 1:
port = int(parts[1])
argi += 2
if sys.argv[argi] == '-u':
url = urlparse(sys.argv[argi + 1])
parts = url[1].split(':')
host = parts[0]
if len(parts) > 1:
port = int(parts[1])
else:
port = 80
uri = url[2]
if url[4]:
uri += '?%s' % url[4]
http = True
argi += 2
if sys.argv[argi] == '-f' or sys.argv[argi] == '-framed':
framed = True
argi += 1
if sys.argv[argi] == '-s' or sys.argv[argi] == '-ssl':
ssl = True
argi += 1
if sys.argv[argi] == '-novalidate':
validate = False
argi += 1
if sys.argv[argi] == '-ca_certs':
ca_certs = sys.argv[argi+1]
argi += 2
if sys.argv[argi] == '-keyfile':
keyfile = sys.argv[argi+1]
argi += 2
if sys.argv[argi] == '-certfile':
certfile = sys.argv[argi+1]
argi += 2
cmd = sys.argv[argi]
args = sys.argv[argi + 1:]
if http:
transport = THttpClient.THttpClient(host, port, uri)
else:
if ssl:
socket = TSSLSocket.TSSLSocket(host, port, validate=validate, ca_certs=ca_certs, keyfile=keyfile, certfile=certfile)
else:
socket = TSocket.TSocket(host, port)
if framed:
transport = TTransport.TFramedTransport(socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol(transport)
client = TSDM.Client(protocol)
transport.open()
if cmd == 'qiandao':
if len(args) != 0:
print('qiandao requires 0 args')
sys.exit(1)
pp.pprint(client.qiandao())
elif cmd == 'word':
if len(args) != 0:
print('word requires 0 args')
sys.exit(1)
pp.pprint(client.word())
else:
print('Unrecognized method %s' % cmd)
sys.exit(1)
transport.close()

@ -1,366 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.11.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def qiandao(self):
pass
def word(self):
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def qiandao(self):
self.send_qiandao()
return self.recv_qiandao()
def send_qiandao(self):
self._oprot.writeMessageBegin('qiandao', TMessageType.CALL, self._seqid)
args = qiandao_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_qiandao(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = qiandao_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "qiandao failed: unknown result")
def word(self):
self.send_word()
return self.recv_word()
def send_word(self):
self._oprot.writeMessageBegin('word', TMessageType.CALL, self._seqid)
args = word_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_word(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = word_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "word failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["qiandao"] = Processor.process_qiandao
self._processMap["word"] = Processor.process_word
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_qiandao(self, seqid, iprot, oprot):
args = qiandao_args()
args.read(iprot)
iprot.readMessageEnd()
result = qiandao_result()
try:
result.success = self._handler.qiandao()
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("qiandao", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_word(self, seqid, iprot, oprot):
args = word_args()
args.read(iprot)
iprot.readMessageEnd()
result = word_result()
try:
result.success = self._handler.word()
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("word", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class qiandao_args(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('qiandao_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(qiandao_args)
qiandao_args.thrift_spec = (
)
class qiandao_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('qiandao_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(qiandao_result)
qiandao_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
class word_args(object):
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('word_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(word_args)
word_args.thrift_spec = (
)
class word_result(object):
"""
Attributes:
- success
"""
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('word_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(word_result)
word_result.thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
fix_spec(all_structs)
del all_structs

@ -1 +0,0 @@
__all__ = ['ttypes', 'constants', 'TSDM']

@ -1,14 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.11.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from .ttypes import *

@ -1,18 +0,0 @@
#
# Autogenerated by Thrift Compiler (0.11.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
fix_spec(all_structs)
del all_structs

@ -16,18 +16,13 @@ Including another URLconf
from django.conf.urls import url from django.conf.urls import url
from django.views import static from django.views import static
from PixivSearch.view import search, index, download, get, start, stop, migu, pac, page_permission_denied, \ from PixivSearch.view import search, index, download,page_permission_denied, \
page_inter_error, page_not_found page_inter_error, page_not_found
urlpatterns = [ urlpatterns = [
url(r'^$', index), url(r'^$', index),
url(r'^pixiv/search', search), url(r'^pixiv/search', search),
url(r'^pixiv/download', download), url(r'^pixiv/download', download),
url(r'^bangumi/get$', get),
url(r'^bangumi/start$', start),
url(r'^bangumi/stop$', stop),
url(r'^migu$',migu),
url(r'^pac$',pac),
# 增加以下一行,以识别静态资源 # 增加以下一行,以识别静态资源
url(r'^static/(?P<path>.*)$', static.serve, url(r'^static/(?P<path>.*)$', static.serve,
{'document_root': 'PixivSearch/static'}, name='static') {'document_root': 'PixivSearch/static'}, name='static')

@ -1,18 +1,13 @@
# coding=utf-8 # coding=utf-8
import _thread import _thread
import json
import os import os
from django.http import Http404, StreamingHttpResponse, HttpResponse from django.http import Http404, StreamingHttpResponse, HttpResponse
from django.shortcuts import render from django.shortcuts import render
import root import root
from PixivSearch.dao.bangumi import get_, stop_, getIds
from PixivSearch.migu import GetVideoUrl
from PixivSearch.pac import CreatePacFile
from PixivSearch.pixiv.pixiv import get_nodes from PixivSearch.pixiv.pixiv import get_nodes
from PixivSearch.settings import logger from PixivSearch.settings import logger
from PixivSearch.pac import RuleManage
def search(request): def search(request):
@ -48,30 +43,6 @@ def search(request):
except ValueError: except ValueError:
raise Http404() raise Http404()
def migu(request):
if request.GET.get('playurl') != '' and request.GET.get('playurl') is not None:
result = GetVideoUrl.getUrl(request.GET.get('playurl'))
else:
result = {'error': '参数不能为空'}
return HttpResponse(json.dumps(result, ensure_ascii=False), content_type='application/json', charset='utf-8')
def pac(request):
action = request.GET.get('action')
value = request.GET.get('value')
if action == 'create':
return HttpResponse(CreatePacFile.create(), content_type='application/x-ns-proxy-autoconfig', charset='utf-8')
else:
if action == 'insert' and value != None and len(value) > 0:
RuleManage.insert(value)
elif action == 'select' and value != None and len(value) > 0:
return render(request, 'pac/list.html', {'rules': RuleManage.select(value)})
elif action == 'delete' and value != None and len(value) > 0:
RuleManage.delete(value)
return render(request, 'pac/list.html', {'rules': RuleManage.select()})
def index(request): def index(request):
return render(request, 'pixiv/index.html', {'tip': '输入参数进行搜索', 'word': 'R-18', 'pageSize': '10', 'order': '10'}) return render(request, 'pixiv/index.html', {'tip': '输入参数进行搜索', 'word': 'R-18', 'pageSize': '10', 'order': '10'})
@ -94,21 +65,6 @@ def download(request):
response['Content-Disposition'] = 'attachment;filename="{0}"'.format(request.GET.get('fileName').encode('utf-8')) response['Content-Disposition'] = 'attachment;filename="{0}"'.format(request.GET.get('fileName').encode('utf-8'))
return response return response
def get(request):
return HttpResponse(str(get_().__str__()))
# 测试方法
def start(request):
_thread.start_new_thread(getIds, ())
return HttpResponse("start success")
def stop(request):
stop_()
return HttpResponse("stop success")
def page_not_found(request,exception): def page_not_found(request,exception):
return render(request, '404.html') return render(request, '404.html')

File diff suppressed because it is too large Load Diff

@ -1,18 +0,0 @@
aliyun-python-sdk-cloudphoto==1.1.18
aliyun-python-sdk-core-v3==2.8.6
beautifulsoup4==4.6.0
bs4==0.0.1
certifi==2018.4.16
chardet==3.0.4
crcmod==1.7
Django==2.0.5
idna==2.6
lxml==4.2.1
mysqlclient==1.3.12
oss2==2.4.0
param==1.6.1
pytz==2018.4
requests==2.18.4
six==1.11.0
thrift==0.11.0
urllib3==1.22

Binary file not shown.

@ -780,7 +780,7 @@ logfile logfile
# #
# listen-address [::1]:8118 # listen-address [::1]:8118
# #
listen-address 192.168.0.21:8118 listen-address 127.0.0.1:8118
# #
# 4.2. toggle # 4.2. toggle
# ============ # ============
@ -1333,7 +1333,7 @@ enable-proxy-authentication-forwarding 0
# To chain Privoxy and Tor, both running on the same system, you # To chain Privoxy and Tor, both running on the same system, you
# would use something like: # would use something like:
# #
forward-socks5t / 192.168.0.21:1080 . forward-socks5t / 127.0.0.1:1080 .
# #
# Note that if you got Tor through one of the bundles, you may # Note that if you got Tor through one of the bundles, you may
# have to change the port from 9050 to 9150 (or even another # have to change the port from 9050 to 9150 (or even another

@ -1,2 +1,4 @@
#!/usr/bin/env bash #!/bin/bash
python3 manage.py runserver 0.0.0.0:8000 privoxy --pidfile /run/privoxy.pid --user privoxy "/etc/privoxy/config" &
nohup `$SS_CONFIG` &
/usr/local/bin/python3 manage.py runserver 0.0.0.0:8000

@ -1,8 +1,3 @@
#更换apt-get官方源为163源
deb http://mirrors.163.com/debian/ jessie main non-free contrib deb http://mirrors.163.com/debian/ jessie main non-free contrib
deb http://mirrors.163.com/debian/ jessie-updates main non-free contrib deb http://mirrors.163.com/debian/ buster main non-free contrib
deb http://mirrors.163.com/debian/ jessie-backports main non-free contrib
deb-src http://mirrors.163.com/debian/ jessie main non-free contrib
deb-src http://mirrors.163.com/debian/ jessie-updates main non-free contrib
deb-src http://mirrors.163.com/debian/ jessie-backports main non-free contrib
deb http://mirrors.163.com/debian-security/ jessie/updates main non-free contrib
deb-src http://mirrors.163.com/debian-security/ jessie/updates main non-free contrib
Loading…
Cancel
Save