#!/bin/bash config_file=$1 if [ -z $config_file ] then echo 'The cache directory must be specified' exit 1 fi if [ ! -f $config_file ] then echo 'config file:$config_file not found' exit 1 fi if [ ! -f `which jq` ] then echo 'To run this script, you need to install jq,https://stedolan.github.io/jq/' exit 1 else echo "jq version:`jq --version`" fi # ==================== Config ==================== Host=`cat $config_file | jq -r .aliyun_oss.Host` accelerateHost=`cat $config_file | jq -r .aliyun_oss.accelerateHost` bucketname=`cat $config_file | jq -r .aliyun_oss.bucketname` AccessKeyId=`cat $config_file | jq -r .aliyun_oss.AccessKeyId` AccessKeySecret=`cat $config_file | jq -r .aliyun_oss.AccessKeySecret` db_file=`cat $config_file | jq -r .aliyun_oss.db` db_file_err=$db_file.err imageParam=`cat $config_file | jq -r .aliyun_oss.imageParam` CompressHost=`cat $config_file | jq -r .compress.Host` CompressPort=`cat $config_file | jq -r .compress.Port` CompressUser=`cat $config_file | jq -r .compress.User` CompressPath=`cat $config_file | jq -r .compress.Path` CronitorKey=`cat $config_file | jq -r .Cronitor.API_KEY` CronitorJobName=`cat $config_file | jq -r .Cronitor.JOB_NAME` # ================================================ if [ ! -f $db_file ] then touch $db_file fi function upload(){ VERB="PUT" file=$1 Content_MD5="" Content_Type="application/x-www-form-urlencoded" Content_Type="text/plain" Content_Type=`file -b --mime-type $file` Date=`TZ=GMT env LANG=en_US.UTF-8 date +'%a, %d %b %Y %H:%M:%S GMT'` CanonicalizedOSSHeaders="x-oss-object-acl:public-read\n" CanonicalizedResource="/$bucketname/$file" stringToSign="$VERB\n$Content_MD5\n$Content_Type\n$Date\n$CanonicalizedOSSHeaders$CanonicalizedResource" Signature=`echo -en $stringToSign | openssl sha1 -hmac $AccessKeySecret -binary | base64` Authorization="OSS $AccessKeyId:$Signature" http_code=`curl -v -w "%{http_code}" -X PUT -H "HOST:$bucketname.$Host" -H "x-oss-object-acl:public-read" -H "Date:$Date" -H "Content-Type:$Content_Type" -H "Authorization:$Authorization" --data-binary "@$file" "https://$bucketname.$Host/$file"` if [ $http_code -eq "200" ] then echo $file>>$db_file rm -f $file else echo $file>>$db_file_err fi } basePath=`cat $config_file|jq -r .basePath` if [ ! -f $basePath ] then mkdir -p $basePath fi cd $basePath telegramToken=`cat $config_file | jq -r .telegramToken` baseApi="https://api.telegram.org/bot$telegramToken" chat_id=`cat $config_file | jq .chatId` mode=`cat $config_file | jq -r .mode` content=`cat $config_file | jq -r .content` rank_url="https://www.pixiv.net/ranking.php?mode=$mode&content=$content&p=1&format=json" today=`date "+%Y-%m-%d"` _today=`date "+%Y%m%d"` rank_json=$today.json commands_file=$today.sh anonfiles_token=`cat $config_file | jq .anonfilesToken` rule=`cat $config_file | jq .rule|jq to_entries|jq 'map("sed -e \"s/\\\\"+.key+"/\\\\"+.value+"/g\"")'|jq -r '.[]'|sed ':a;N;s/\n/|/;t a;'` touch $commands_file && chmod +x $commands_file sleep `cat $config_file | jq .sleep.ready` curl -v -d chat_id=$chat_id -d parse_mode=HTML -d text="Pixiv排行榜已更新,30秒后开始处理$today日榜数据。#date$_today%0A%0A排名是什么?%0A排名是以pixiv上所有公开作品为对象的统计以及排名。%0A毎日0:00~23时59分59秒的阅览树・「赞!」数等为排名的依据,期结果由pixiv独自的算法「pixiv rank β」决定。统计结果于每日中午12:00公开。%0A有关排行榜" $baseApi/sendMessage if [ ! -f $rank_json ] then echo "get data from $rank_url" curl -v $rank_url >$rank_json fi length=`jq '.contents|length' $rank_json` fileCountSize=0 fileList='' media='' fileCount=0 maxFileCount=10 tarFile=$today.tar.gz maxFileSize=20971520 maxFileSize_M="$((maxFileSize/1024/1024))M" start_rank='' end_rank='' for index in `seq 1 $length` do index=$((index-1)) pid=`jq --argjson index $index '.contents[$index].illust_id' $rank_json` artworkLink="https://www.pixiv.net/artworks/$pid" rank=`jq --argjson index $index '.contents[$index].rank' $rank_json` yes_rank=`jq --argjson index $index '.contents[$index].yes_rank' $rank_json` if [ $(((index+1) % 10)) == 1 ] then start_rank=$rank fi if [ $(((index+1) % 10)) == 0 ] then end_rank=$rank fi if [ $yes_rank -eq 0 ] then rank_info="\#rank$rank \#首次登场" else rank_info="\#rank$rank 之前 \#rank$yes_rank" fi echo "pid=$pid,artworkLink=$artworkLink,rank_info=$rank_info" png_html_file=$pid.html if [ ! -f $png_html_file ] then sleep 1 echo "get data from $artworkLink" curl -v $artworkLink >$png_html_file fi json_file=$pid.json if [ ! -f $json_file ] then egrep -o "content='{\"timestamp.*].{3}" $png_html_file | sed -e "s/content='//" >$json_file fi pageCount=`jq --arg pid $pid '.illust[$pid].pageCount' $json_file` original_url=`jq -r --arg pid $pid '.illust[$pid].urls.original' $json_file` small_url=`jq -r --arg pid $pid '.illust[$pid].urls.small' $json_file` title=`jq -r --arg pid $pid '.illust[$pid].title' $json_file|sed -e 's/\"/\\\"/g'` title=`bash -c "echo '$title'|$rule"` description=`jq -r --arg pid $pid '.illust[$pid].description' $json_file` userName=`jq -r --arg pid $pid '.illust[$pid].userName' $json_file|sed -e 's/\"/\\\"/g'` userName=`bash -c "echo '$userName'|$rule"` userId=`jq -r --arg pid $pid '.illust[$pid].userId' $json_file` likeCount=`jq --arg pid $pid '.illust[$pid].likeCount' $json_file` bookmarkCount=`jq --arg pid $pid '.illust[$pid].bookmarkCount' $json_file` viewCount=`jq --arg pid $pid '.illust[$pid].viewCount' $json_file` tag=`jq -r --arg pid $pid '.illust[$pid].tags.tags[].tag' $json_file|sed -e 's/^/\\#/g'|sed ':a;N;s/\n/ /;t a;'` tag=`bash -c "echo '$tag'|$rule"` echo -e "pageCount=$pageCount,original_url=$original_url,small_url=$small_url\n\ title=$title,description=$description,userName=$userName\n\ likeCount=$likeCount,bookmarkCount=$bookmarkCount,viewCount=$viewCount\n\ tag=$tag" for page in `seq 1 $pageCount` do page=$((page - 1)) page_original_url=`echo $original_url | sed -e "s/p0/p$page/"` page_small_url=`echo $small_url | sed -e"s/p0/p$page/"` original_file_name=`echo $page_original_url | egrep -o "$pid.*"` small_file_name=`echo $page_small_url | egrep -o "$pid.*"` webp_file_name=`echo $original_file_name|sed 's/jpg/webp/'|sed 's/png/webp/'` if [ "`cat $db_file|grep $original_file_name`" != "$original_file_name" ] then echo "download image file name=$original_file_name,url=$page_original_url" if [ ! -f $original_file_name ] then curl -v -H 'referer: https://www.pixiv.net/' $page_original_url -o $original_file_name fi if [ `du -b $original_file_name|awk '{print $1}'` -gt $maxFileSize ] then echo "图片:$original_file_name 体积:`du -h $original_file_name|awk '{print $1}'` 超过 $maxFileSize_M,需要压缩" scp -i ~/.ssh/$CompressHost -P $CompressPort $original_file_name $CompressUser@$CompressHost:$CompressPath/$original_file_name ssh -i ~/.ssh/$CompressHost -p $CompressPort $CompressUser@$CompressHost "cd $CompressPath;jpegoptim --size=$maxFileSize_M $original_file_name" scp -i ~/.ssh/$CompressHost -P $CompressPort $CompressUser@$CompressHost:$CompressPath/$original_file_name $original_file_name echo "图片:$original_file_name 压缩体积:`du -h $original_file_name`" fi upload $original_file_name fi if [ ! -f $webp_file_name ] then webp_url="https://$bucketname.$accelerateHost/$original_file_name$imageParam" echo "download image file name=$webp_file_name,url=$webp_url" curl -v $webp_url -o $webp_file_name fi if [ ! -f $small_file_name ] then echo "download image file name=$small_file_name,url=$page_small_url" curl -v -H 'referer: https://www.pixiv.net/' $page_small_url -o $small_file_name fi if [ $page -eq 0 ] then media="$media,{\"type\":\"photo\",\"media\":\"attach://$webp_file_name\",\"parse_mode\":\"HTML\",\"caption\":\"$rank_info\n$title\n$userName\n$tag\"}" fileList="$fileList -F $webp_file_name=@$webp_file_name" fileCount=$((fileCount + 1)) fileSize=`du $small_file_name | awk '{print $1}'` fileCountSize=$((fileCountSize + fileSize)) echo "fileCountSize=$fileCountSize,fileCount=$fileCount" fi if [[ $fileCount -eq $maxFileCount ]] then echo "sleep `cat $config_file | jq .sleep.request`" >>$commands_file echo "curl -v -F chat_id=$chat_id $fileList -F media='[`echo $media | cut -c 2-`]' $baseApi/sendMediaGroup" >>$commands_file echo "curl -v -d chat_id=$chat_id -d text='以上作品日榜排名分别是 #rank${start_rank}_${end_rank} ,点击作品可以查看pid/标题/画师/tag信息' $baseApi/sendMessage" >>$commands_file echo >>$commands_file echo >>$commands_file echo >>$commands_file fileCountSize=0 fileList='' media='' fileCount=0 fi done done if [ $fileCount -gt 0 ] then echo "curl -v -F chat_id=$chat_id $fileList -F media='[`echo $media | cut -c 2-`]' $baseApi/sendMediaGroup" >>$commands_file fi sed -i '1d' $commands_file hasSend=$today.hasSend if [ ! -f $hasSend ] then bash -c ./$commands_file touch $hasSend next_expected_at=`curl -v https://cronitor.io/api/monitors/$CronitorJobName -u $CronitorKey:''|jq .next_expected_at` curl -v -d chat_id=$chat_id -d text="以上就是$today日榜前${length}名作品,本次推送完毕,下次推送时间预计是`date -d @$next_expected_at '+%Y-%m-%d %H:%M:%S'`,如有问题请联系管理员。 #date$_today" $baseApi/sendMessage fi