fix: 修复爬虫网络层、验证队列卡死及 API 500 错误

- 修复 BaseHTTPPlugin 连接池、并发控制、异常日志、超时策略
- 修复/增强 8 个爬虫插件的稳定性和 fallback 机制
- 清理 validation_tasks 表 4 万+ pending 任务,避免队列卡死
- 修复 app/api/main.py 缺失全局 app 实例导致的 500 错误
- 提升前端 Axios 超时到 120 秒,避免请求断开
- 修复插件统计持久化和调度器生命周期问题
This commit is contained in:
祀梦
2026-04-04 19:27:36 +08:00
parent 635c524a7e
commit f09a8e16c4
19 changed files with 505 additions and 161 deletions

View File

@@ -1,6 +1,5 @@
import re
from typing import List
from bs4 import BeautifulSoup
from app.core.plugin_system import ProxyRaw
from app.plugins.base import BaseHTTPPlugin
from app.core.log import logger
@@ -12,41 +11,71 @@ class YunDaiLiPlugin(BaseHTTPPlugin):
default_config = {"max_pages": 5}
name = "yundaili"
display_name = "云代理"
description = "云代理网站爬取免费代理"
description = " GitHub 公开代理列表获取免费代理"
def __init__(self):
super().__init__()
# 主数据源GitHub raw
self.urls = [
f"http://www.ip3366.net/free/?stype=1&page={i}" for i in range(1, 6)
] + [
f"http://www.ip3366.net/free/?stype=2&page={i}" for i in range(1, 6)
("http", "https://raw.githubusercontent.com/mmpx12/proxy-list/master/http.txt"),
("socks4", "https://raw.githubusercontent.com/mmpx12/proxy-list/master/socks4.txt"),
("socks5", "https://raw.githubusercontent.com/mmpx12/proxy-list/master/socks5.txt"),
]
# Fallbackjsdelivr CDN 加速
self.fallback_urls = [
("http", "https://cdn.jsdelivr.net/gh/mmpx12/proxy-list@master/http.txt"),
("socks4", "https://cdn.jsdelivr.net/gh/mmpx12/proxy-list@master/socks4.txt"),
("socks5", "https://cdn.jsdelivr.net/gh/mmpx12/proxy-list@master/socks5.txt"),
]
async def crawl(self) -> List[ProxyRaw]:
results = []
for url in self.urls:
html = await self.fetch(url, timeout=15)
def _parse_htmls(self, htmls: List[str], url_mapping: List[tuple]) -> List[ProxyRaw]:
results: List[ProxyRaw] = []
for (protocol, _), html in zip(url_mapping, htmls):
if not html:
continue
soup = BeautifulSoup(html, "lxml")
list_table = soup.find("div", id="list")
if not list_table:
continue
table = list_table.find("table")
if not table:
logger.warning(f"{self.display_name} {protocol.upper()} 返回空内容,可能网络受限或源已失效")
continue
for row in table.find_all("tr"):
tds = row.find_all("td")
if len(tds) >= 5:
ip = tds[0].get_text(strip=True)
port = tds[1].get_text(strip=True)
protocol = tds[4].get_text(strip=True).lower() if len(tds) > 4 else "http"
if protocol not in VALID_PROTOCOLS:
protocol = "http"
if re.match(r"^\d+\.\d+\.\d+\.\d+$", ip) and port.isdigit():
results.append(ProxyRaw(ip, int(port), protocol))
count = 0
for line in html.splitlines():
line = line.strip()
if not line or ":" not in line:
continue
parts = line.split(":")
if len(parts) < 2:
continue
ip = parts[0].strip()
port_str = parts[1].strip()
if not re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
continue
if not port_str.isdigit() or not (1 <= int(port_str) <= 65535):
continue
final_protocol = protocol if protocol in VALID_PROTOCOLS else "http"
results.append(ProxyRaw(ip, int(port_str), final_protocol))
count += 1
if count:
logger.info(f"{self.display_name} {protocol.upper()} 解析完成,获取 {count} 个潜在代理")
return results
async def crawl(self) -> List[ProxyRaw]:
results: List[ProxyRaw] = []
# 顺序请求主源,避免某个 URL 卡住拖慢整体
for protocol, url in self.urls:
html = await self.fetch(url, timeout=12)
if html:
results.extend(self._parse_htmls([html], [(protocol, url)]))
# 主源为空时尝试 fallback也顺序请求
if not results:
logger.warning(f"{self.display_name} GitHub 主源全部返回空,尝试 jsdelivr fallback")
for protocol, url in self.fallback_urls:
html = await self.fetch(url, timeout=12)
if html:
results.extend(self._parse_htmls([html], [(protocol, url)]))
if results:
logger.info(f"{self.display_name} 解析完成,获取 {len(results)} 个潜在代理")
logger.info(f"{self.display_name} 总计解析完成,获取 {len(results)} 个潜在代理")
else:
logger.warning(f"{self.display_name} 未获取到任何代理")
return results