模板:成就
刷
历
编
跳到导航
跳到搜索
成就自动化更新-请使用以下python脚本:
接口暂时未做,待发售后补充
使用前请安装requests库,lxml库,mwclient库
pip install requests pip install lxml pip install mwclient
import os
from Spider_Toolkit import spidertools
import requests
from lxml import etree
import re
from mwclient import Site
# cookie请使用steam成就页面的
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Cookie": "",
"Pragma": "no-cache",
"Referer": "",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "same-origin",
"Sec-Fetch-User": "?1",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
"^sec-ch-ua": "^\\^Not/A)Brand^^;v=^\\^8^^, ^\\^Chromium^^;v=^\\^126^^, ^\\^Google",
"sec-ch-ua-mobile": "?0",
"^sec-ch-ua-platform": "^\\^Windows^^^"
}
def download_picture(picture_list, h3_list):
for i,v in enumerate(picture_list):
print(i, v)
response = requests.get(v)
img = response.content
filepath = './achieve_picture/成就-'+str(h3_list[i])+'.jpg'
with open(filepath, 'wb') as f:
f.write(img)
def api_wiki_upload_():
#上传图片时SESSDATA使用BWiki的
site = Site("wiki.biligame.com/wukong", path='/', scheme='https')
site.login(cookies={'SESSDATA':''})
filepath = './achieve_picture'
for filename in os.listdir(filepath):
file_path = os.path.join(filepath, filename)
if os.path.isfile(file_path):
with open(file_path, 'rb') as f:
site.upload(f, filename=filename)
def request_achieve():
#url请填写黑神话成就界面
url = ""
response = requests.get(url, headers=headers)
parse_html = etree.HTML(response.text)
h2 = parse_html.xpath("//div[@class='profile_small_header_bg']/div[@class='profile_small_header_texture']/div/h2/text()")
print(h2[0])
h1 = parse_html.xpath("//div[@class='profile_small_header_bg']/div[@class='profile_small_header_texture']/div/h1/text()")
print(h1[0])
tabOn = parse_html.xpath("//div[@id='BG_bottom']/div[@id='mainContents']/div[@id='tabs']/div[@class='tab']/div/text()")
print(tabOn[0])
header_right_list = parse_html.xpath("//div[@id='headerContentRight']/text()")
for header_right in header_right_list:
print(header_right)
header_left = parse_html.xpath("//div[@id='headerContentLeft']/span/text()")
print('总成就:' + header_left[0])
achieve_percent_list = parse_html.xpath("//div[@class='achieveTxtHolder']/div[@class='achievePercent']/text()")
print(achieve_percent_list)
achieve_text_h3_list = parse_html.xpath("//div[@class='achieveTxtHolder']/div[@class='achieveTxt']/h3/text()")
print(achieve_text_h3_list)
achieve_text_h5_list = re.findall('<h5>(.*?)</h5>',response.text)
print(achieve_text_h5_list)
achieve_picture_list = parse_html.xpath("//div[contains(@class, 'achieveRow')]/div[@class='achieveImgHolder']/img/@src")
print(achieve_picture_list)
download_picture(achieve_picture_list, achieve_text_h3_list)
if __name__ == '__main__':
# request_achieve()
# api_wiki_upload_()

沪公网安备 31011002002714 号