Files
ProxyPool/app/plugins/kuaidaili.py
祀梦 4ef7931941 fix: 全面修复代码问题并优化架构
修复问题:
- 添加缺失的 httpx 依赖到 requirements.txt
- 修复前端批量删除参数格式与后端不匹配(数组->对象数组)
- 移除 app/api/main.py 中重复创建 app 的冗余代码
- 修复 Plugins.vue v-model 直接修改 store 状态的 Vue 警告
- 修复 README 端口/启动命令文档与实际配置不一致
- 修正 pytest.ini 过时配置 (asyncio_default_fixture_loop_scope)
- 修复 WebUI index.html 语言设置为 zh-CN
- 修复 .gitignore 错误忽略 tests/ 目录

后端优化:
- 修复调度器默认间隔从 5 秒改为 30 分钟,避免无节制验证
- 修复 validate_all_now 在调度器停止时无法执行的 bug
- 设置保存后热更新运行中调度器的验证间隔
- 将 update_score 优化为原子单事务 SQL,消除并发竞态
- 导出功能改为真正的流式分批读取(iter_batches),降低大导出内存占用
- ProxyResponse Schema 补齐 response_time_ms 字段
- 日志级别改为从配置动态读取,不再硬编码 INFO
- 清理 validator_service 中的冗余 try/finally 代码

插件健壮性:
- 修复 ip3366/ip89/kuaidaili/proxylist_download/speedx/yundaili/proxyscrape
  的端口范围检查和 IPv6 地址解析问题(改用 rsplit + 1-65535 校验)
- 修复 PluginService.list_plugins 并发竞争条件
- 修复 run_all_plugins 去重逻辑与数据库 UNIQUE 约束保持一致
- 修复 proxyscrape 异常时错误跳过 fallback 的 bug

测试:
- 新增 7 个插件解析单元测试
- 新增 update_score 自动删除和 iter_batches 流式读取测试
- 全部 74 个测试通过
2026-04-04 21:03:43 +08:00

74 lines
2.8 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import re
import asyncio
import random
from typing import List
from bs4 import BeautifulSoup
from app.core.plugin_system import ProxyRaw
from app.plugins.base import BaseHTTPPlugin
from app.core.log import logger
VALID_PROTOCOLS = ("http", "https", "socks4", "socks5")
class KuaiDaiLiPlugin(BaseHTTPPlugin):
default_config = {"max_pages": 5}
name = "kuaidaili"
display_name = "快代理"
description = "从快代理网站爬取免费代理"
def __init__(self):
super().__init__()
# 减少页数,降低被反爬概率,确保至少能拿到数据
self.urls = [
"https://www.kuaidaili.com/free/inha/1/",
"https://www.kuaidaili.com/free/intr/1/",
]
def get_headers(self) -> dict:
headers = super().get_headers()
headers["Referer"] = "https://www.kuaidaili.com/free/inha/"
headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
headers["Accept-Encoding"] = "gzip, deflate, br"
headers["Accept-Language"] = "zh-CN,zh;q=0.9,en;q=0.8"
headers["Sec-Fetch-Dest"] = "document"
headers["Sec-Fetch-Mode"] = "navigate"
headers["Sec-Fetch-Site"] = "same-origin"
headers["Upgrade-Insecure-Requests"] = "1"
return headers
async def crawl(self) -> List[ProxyRaw]:
results = []
# 先访问首页预热会话,获取 cookie降低被反爬概率
await self.fetch("https://www.kuaidaili.com/", timeout=10)
await asyncio.sleep(random.uniform(2, 4))
# 顺序请求免费代理页面
for url in self.urls:
html = await self.fetch(url, timeout=10)
if not html:
continue
soup = BeautifulSoup(html, "lxml")
table = soup.find("table")
if not table:
logger.warning(f"{self.display_name} 未能找到表格,可能是触发了反爬: {url}")
continue
for row in table.find_all("tr"):
tds = row.find_all("td")
if len(tds) >= 5:
ip = tds[0].get_text(strip=True)
port = tds[1].get_text(strip=True)
protocol = tds[4].get_text(strip=True).lower() if len(tds) > 4 else "http"
if protocol not in VALID_PROTOCOLS:
protocol = "http"
if re.match(r"^\d+\.\d+\.\d+\.\d+$", ip) and port.isdigit() and 1 <= int(port) <= 65535:
try:
results.append(ProxyRaw(ip, int(port), protocol))
except ValueError:
continue
await asyncio.sleep(random.uniform(5, 8))
if results:
logger.info(f"{self.display_name} 解析完成,获取 {len(results)} 个潜在代理")
return results