feat: fpw plugins, validation/crawl perf, WS stats, test DB isolation

- Add Free_Proxy_Website-style fpw_* plugins and register them
- Per-plugin crawl timeout (crawl_timeout_seconds=120); remove global crawl_timeout setting
- Validator: fix connect vs total timeout on save; SOCKS session LRU cache; drop redundant semaphore
- Validation handler uses single DB connection; batch upsert after crawl; WorkerPool put_nowait
- Remove unused max_retries from settings API/UI; settings maintenance SQL + init_db cleanup of deprecated keys
- WebSocket dashboard stats; ProxyList pool_filter and API alignment
- POST /api/proxies/delete-one for IPv6-safe deletes; task poll stops on 404
- pytest uses PROXYPOOL_DB_PATH=db/proxies.test.sqlite so tests do not wipe production DB
- .gitignore: explicit proxies.test.sqlite patterns; fix plugin_service ValidationException import

Made-with: Cursor
This commit is contained in:
祀梦
2026-04-05 13:39:19 +08:00
parent 92c7fa19e2
commit 0131c8b408
63 changed files with 2331 additions and 531 deletions

View File

@@ -0,0 +1,56 @@
"""socks-proxy.net / sslproxies.org 表格README 参考 GetProxyFromSocks-proxy.py"""
import re
from typing import List
from app.core.plugin_system import ProxyRaw
from app.plugins.base import BaseHTTPPlugin
from app.core.log import logger
class FpwSocksSslProxyPlugin(BaseHTTPPlugin):
name = "fpw_socks_ssl_proxy"
display_name = "Socks-Proxy / SSLProxies"
description = "socks-proxy.net 与 sslproxies.org 首页表格HTTP/HTTPS 列表)"
def __init__(self):
super().__init__()
self.max_concurrency = 6
# 与 sslproxies 同模板的镜像站较多socks-proxy 在部分网络下不稳定,多源提高成功率
self.urls = [
"https://www.sslproxies.org/",
"https://free-proxy-list.net/",
"https://www.us-proxy.org/",
"https://www.socks-proxy.net/",
]
def _parse_page(self, html: str, default_protocol: str) -> List[ProxyRaw]:
results = []
pattern = re.compile(
r"(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})</td>\s*<td[^>]*>\s*(\d+)",
re.I,
)
for ip, port in pattern.findall(html):
if port.isdigit() and 1 <= int(port) <= 65535:
try:
results.append(ProxyRaw(ip, int(port), default_protocol))
except ValueError:
continue
return results
async def crawl(self) -> List[ProxyRaw]:
results: List[ProxyRaw] = []
htmls = await self.fetch_all(self.urls, timeout=12, retries=1)
for url, html in zip(self.urls, htmls):
if not html:
continue
if "socks-proxy" in url:
proto = "socks4"
else:
proto = "http"
batch = self._parse_page(html, proto)
results.extend(batch)
if batch:
logger.info(f"{self.display_name} {url}: {len(batch)}")
if results:
logger.info(f"{self.display_name} 合计 {len(results)}")
return results