fix: 修复爬虫网络层、验证队列卡死及 API 500 错误
- 修复 BaseHTTPPlugin 连接池、并发控制、异常日志、超时策略 - 修复/增强 8 个爬虫插件的稳定性和 fallback 机制 - 清理 validation_tasks 表 4 万+ pending 任务,避免队列卡死 - 修复 app/api/main.py 缺失全局 app 实例导致的 500 错误 - 提升前端 Axios 超时到 120 秒,避免请求断开 - 修复插件统计持久化和调度器生命周期问题
This commit is contained in:
@@ -12,24 +12,29 @@ class Ip3366Plugin(BaseHTTPPlugin):
|
||||
name = "ip3366"
|
||||
display_name = "IP3366"
|
||||
description = "从 IP3366 网站爬取免费代理"
|
||||
default_config = {"max_pages": 5}
|
||||
default_config = {"max_pages": 3}
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._update_urls()
|
||||
|
||||
def _update_urls(self):
|
||||
max_pages = self.config.get("max_pages", 5)
|
||||
max_pages = self.config.get("max_pages", 3)
|
||||
self.urls = [
|
||||
f"http://www.ip3366.net/free/?stype=1&page={i}" for i in range(1, max_pages + 1)
|
||||
] + [
|
||||
f"http://www.ip3366.net/free/?stype=2&page={i}" for i in range(1, max_pages + 1)
|
||||
]
|
||||
|
||||
def get_headers(self) -> dict:
|
||||
headers = super().get_headers()
|
||||
headers["Referer"] = "http://www.ip3366.net/free/"
|
||||
return headers
|
||||
|
||||
async def crawl(self) -> List[ProxyRaw]:
|
||||
results = []
|
||||
for url in self.urls:
|
||||
html = await self.fetch(url, timeout=15)
|
||||
htmls = await self.fetch_all(self.urls)
|
||||
for html in htmls:
|
||||
if not html:
|
||||
continue
|
||||
soup = BeautifulSoup(html, "lxml")
|
||||
|
||||
Reference in New Issue
Block a user