更新文档

This commit is contained in:
luzhisheng 2024-01-13 00:06:44 +08:00
parent e957334b65
commit 3d6f24b18f
4 changed files with 26 additions and 37 deletions

View File

@ -0,0 +1,3 @@
解题文档
https://blog.csdn.net/u010226586/article/details/135563851

View File

@ -1,11 +0,0 @@
HTTP/2.0 - 最新超强反爬虫方案,禁用所有 HTTP 1.x 的请求
现在很多爬虫库其实对 HTTP/2.0 支持得不好,比如大名鼎鼎的 Python 库 —— requests到现在为止还只支持 HTTP/1.1,啥时候支持 HTTP/2.0 还不知道。
地址 https://blog.csdn.net/weixin_42277380/article/details/117440390
控制查看 h2 请求
![debugger](../img/76.png)
[案例.py](./案例.py)

View File

@ -0,0 +1,23 @@
import requests
from hyper.contrib import HTTP20Adapter
url_ = 'https://match.yuanrenxue.cn/api/match/17?page={}'
headers_ = {
"User-Agent": "yuanrenxue.project",
"cookie": "sessionid=ieirkmfqa2qsj29uapsh4bdaj59vggj2"
}
# 创建session对象并设置请求头
s = requests.session()
s.headers = headers_
# 使用http2.0
s.mount('https://match.yuanrenxue.cn', HTTP20Adapter())
num = 0
for i in range(5):
# 发送请求
json_data = s.get(url_.format(i + 1)).json()
print(json_data)
data_list = json_data.get('data')
for j in data_list:
j_num = j.get('value')
num += j_num
print('num:', num)

View File

@ -1,26 +0,0 @@
import requests
from hyper.contrib import HTTP20Adapter
if __name__ == '__main__':
url_ = 'https://match.yuanrenxue.com/api/match/17?page={}'
headers_ = {
"User-Agent": "yuanrenxue.project",
"cookie": "sessionid=nbfta7wd1srjcwh558k0vh1mt0tq7i1v"
}
# 创建session对象并设置请求头
s = requests.session()
s.headers = headers_
# 使用http2.0
s.mount('https://match.yuanrenxue.com', HTTP20Adapter())
num = 0
for i in range(5):
# 发送请求
json_data = s.get(url_.format(i + 1)).json()
print(json_data)
data_list = json_data.get('data')
for j in data_list:
print(j.get('value'))
j_num = j.get('value')
num += j_num
print()
print('num:', num)