Files
ProxyPool/app/plugins/proxylist_download.py
祀梦 4ef7931941 fix: 全面修复代码问题并优化架构
修复问题:
- 添加缺失的 httpx 依赖到 requirements.txt
- 修复前端批量删除参数格式与后端不匹配(数组->对象数组)
- 移除 app/api/main.py 中重复创建 app 的冗余代码
- 修复 Plugins.vue v-model 直接修改 store 状态的 Vue 警告
- 修复 README 端口/启动命令文档与实际配置不一致
- 修正 pytest.ini 过时配置 (asyncio_default_fixture_loop_scope)
- 修复 WebUI index.html 语言设置为 zh-CN
- 修复 .gitignore 错误忽略 tests/ 目录

后端优化:
- 修复调度器默认间隔从 5 秒改为 30 分钟,避免无节制验证
- 修复 validate_all_now 在调度器停止时无法执行的 bug
- 设置保存后热更新运行中调度器的验证间隔
- 将 update_score 优化为原子单事务 SQL,消除并发竞态
- 导出功能改为真正的流式分批读取(iter_batches),降低大导出内存占用
- ProxyResponse Schema 补齐 response_time_ms 字段
- 日志级别改为从配置动态读取,不再硬编码 INFO
- 清理 validator_service 中的冗余 try/finally 代码

插件健壮性:
- 修复 ip3366/ip89/kuaidaili/proxylist_download/speedx/yundaili/proxyscrape
  的端口范围检查和 IPv6 地址解析问题(改用 rsplit + 1-65535 校验)
- 修复 PluginService.list_plugins 并发竞争条件
- 修复 run_all_plugins 去重逻辑与数据库 UNIQUE 约束保持一致
- 修复 proxyscrape 异常时错误跳过 fallback 的 bug

测试:
- 新增 7 个插件解析单元测试
- 新增 update_score 自动删除和 iter_batches 流式读取测试
- 全部 74 个测试通过
2026-04-04 21:03:43 +08:00

98 lines
4.2 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

from typing import List
from app.core.plugin_system import ProxyRaw
from app.plugins.base import BaseHTTPPlugin
from app.core.log import logger
class ProxyListDownloadPlugin(BaseHTTPPlugin):
default_config = {"max_pages": 5}
name = "proxylist_download"
display_name = "ProxyListDownload"
description = "从 GitHub 公开代理列表获取代理"
def __init__(self):
super().__init__()
# 首选 GitHub raw + fallback 备用源jsdelivr CDN 或 ProxyScrape API
self.sources = [
{
"primary": "https://raw.githubusercontent.com/komutan234/Proxy-List-Free/main/proxies/http.txt",
"fallbacks": [
"https://cdn.jsdelivr.net/gh/komutan234/Proxy-List-Free@main/proxies/http.txt",
"https://api.proxyscrape.com/v2/?request=get&protocol=http&timeout=10000&country=all&ssl=all&anonymity=all",
],
"protocol": "http",
},
{
"primary": "https://raw.githubusercontent.com/komutan234/Proxy-List-Free/main/proxies/socks4.txt",
"fallbacks": [
"https://cdn.jsdelivr.net/gh/komutan234/Proxy-List-Free@main/proxies/socks4.txt",
"https://api.proxyscrape.com/v2/?request=get&protocol=socks4&timeout=10000&country=all",
],
"protocol": "socks4",
},
{
"primary": "https://raw.githubusercontent.com/komutan234/Proxy-List-Free/main/proxies/socks5.txt",
"fallbacks": [
"https://cdn.jsdelivr.net/gh/komutan234/Proxy-List-Free@main/proxies/socks5.txt",
"https://api.proxyscrape.com/v2/?request=get&protocol=socks5&timeout=10000&country=all",
],
"protocol": "socks5",
},
]
def _detect_protocol(self, url: str) -> str:
"""根据 URL 判断协议(注意不要用 https:// 来判断)"""
if "socks4" in url:
return "socks4"
elif "socks5" in url:
return "socks5"
elif "/http.txt" in url or "protocol=http" in url:
return "http"
return "http"
def _parse_lines(self, html: str, protocol: str) -> List[ProxyRaw]:
"""解析代理文本,统一处理 \r\n\n 两种换行以及可能存在的空行"""
results = []
# 统一替换为 \n 后再分割
text = html.replace("\r\n", "\n").replace("\r", "\n")
for line in text.split("\n"):
line = line.strip()
if not line or ":" not in line:
continue
ip, _, port = line.rpartition(":")
ip = ip.strip()
port = port.strip()
if ip and port.isdigit() and 1 <= int(port) <= 65535:
try:
results.append(ProxyRaw(ip, int(port), protocol))
except ValueError:
continue
return results
async def crawl(self) -> List[ProxyRaw]:
results = []
# 并发请求所有 primary URL
primary_urls = [s["primary"] for s in self.sources]
primary_htmls = await self.fetch_all(primary_urls, timeout=15)
for idx, html in enumerate(primary_htmls):
source = self.sources[idx]
protocol = source.get("protocol") or self._detect_protocol(source["primary"])
if html and html.strip():
results.extend(self._parse_lines(html, protocol))
continue
# primary 返回空或仅空白字符,依次尝试 fallback
logger.warning(f"{self.display_name} 主源返回空,尝试 fallback: {source['primary']}")
for fallback_url in source["fallbacks"]:
fallback_html = await self.fetch(fallback_url, timeout=15)
if fallback_html and fallback_html.strip():
fb_protocol = source.get("protocol") or self._detect_protocol(fallback_url)
results.extend(self._parse_lines(fallback_html, fb_protocol))
break
if results:
logger.info(f"{self.display_name} 解析完成,获得 {len(results)} 个潜在代理")
return results