fix: 修复爬虫网络层、验证队列卡死及 API 500 错误

- 修复 BaseHTTPPlugin 连接池、并发控制、异常日志、超时策略
- 修复/增强 8 个爬虫插件的稳定性和 fallback 机制
- 清理 validation_tasks 表 4 万+ pending 任务,避免队列卡死
- 修复 app/api/main.py 缺失全局 app 实例导致的 500 错误
- 提升前端 Axios 超时到 120 秒,避免请求断开
- 修复插件统计持久化和调度器生命周期问题
This commit is contained in:
祀梦
2026-04-04 19:27:36 +08:00
parent 635c524a7e
commit f09a8e16c4
19 changed files with 505 additions and 161 deletions

View File

@@ -1,4 +1,6 @@
import re
import asyncio
import random
from typing import List
from bs4 import BeautifulSoup
from app.core.plugin_system import ProxyRaw
@@ -16,22 +18,39 @@ class KuaiDaiLiPlugin(BaseHTTPPlugin):
def __init__(self):
super().__init__()
# 减少页数,降低被反爬概率,确保至少能拿到数据
self.urls = [
f"https://www.kuaidaili.com/free/inha/{i}/" for i in range(1, 11)
] + [
f"https://www.kuaidaili.com/free/intr/{i}/" for i in range(1, 11)
"https://www.kuaidaili.com/free/inha/1/",
"https://www.kuaidaili.com/free/intr/1/",
]
def get_headers(self) -> dict:
headers = super().get_headers()
headers["Referer"] = "https://www.kuaidaili.com/free/inha/"
headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
headers["Accept-Encoding"] = "gzip, deflate, br"
headers["Accept-Language"] = "zh-CN,zh;q=0.9,en;q=0.8"
headers["Sec-Fetch-Dest"] = "document"
headers["Sec-Fetch-Mode"] = "navigate"
headers["Sec-Fetch-Site"] = "same-origin"
headers["Upgrade-Insecure-Requests"] = "1"
return headers
async def crawl(self) -> List[ProxyRaw]:
results = []
# 先访问首页预热会话,获取 cookie降低被反爬概率
await self.fetch("https://www.kuaidaili.com/", timeout=10)
await asyncio.sleep(random.uniform(2, 4))
# 顺序请求免费代理页面
for url in self.urls:
html = await self.fetch(url, timeout=15)
html = await self.fetch(url, timeout=10)
if not html:
continue
soup = BeautifulSoup(html, "lxml")
table = soup.find("table")
if not table:
logger.warning(f"{self.display_name} 未能找到表格,可能是触发了反爬")
logger.warning(f"{self.display_name} 未能找到表格,可能是触发了反爬: {url}")
continue
for row in table.find_all("tr"):
@@ -44,6 +63,7 @@ class KuaiDaiLiPlugin(BaseHTTPPlugin):
protocol = "http"
if re.match(r"^\d+\.\d+\.\d+\.\d+$", ip) and port.isdigit():
results.append(ProxyRaw(ip, int(port), protocol))
await asyncio.sleep(random.uniform(5, 8))
if results:
logger.info(f"{self.display_name} 解析完成,获取 {len(results)} 个潜在代理")