全面架构重构:建立分层架构与高度可扩展的插件系统

后端重构:
- 新增分层架构:API Routes -> Services -> Repositories -> Infrastructure
- 彻底移除全局单例,全面采用 FastAPI 依赖注入
- 新增 api/ 目录拆分路由(proxies, plugins, scheduler, settings, stats)
- 新增 services/ 业务逻辑层:ProxyService, PluginService, SchedulerService, ValidatorService, SettingsService
- 新增 repositories/ 数据访问层:ProxyRepository, SettingsRepository, PluginSettingsRepository
- 新增 models/ 层:Pydantic Schemas + Domain Models
- 重写 core/config.py:采用 Pydantic Settings 管理配置
- 新增 core/db.py:基于 asynccontextmanager 的连接管理,支持数据库迁移
- 新增 core/exceptions.py:统一业务异常体系

插件系统重构(核心):
- 新增 core/plugin_system/:BaseCrawlerPlugin + PluginRegistry
- 采用显式注册模式(装饰器 + plugins/__init__.py),类型安全、测试友好
- 新增 plugins/base.py:BaseHTTPPlugin 通用 HTTP 爬虫基类
- 迁移全部 7 个插件到新架构(fate0, proxylist_download, ip3366, ip89, kuaidaili, speedx, yundaili)
- 插件状态持久化到 plugin_settings 表

任务调度重构:
- 新增 core/tasks/queue.py:ValidationQueue + WorkerPool
- 解耦爬取与验证:爬虫只负责爬取,代理提交队列后由 Worker 异步验证
- 调度器定时从数据库拉取存量代理并分批投入验证队列

前端调整:
- 新增 frontend/src/services/ 层拆分 API 调用逻辑
- 调整 stores/ 和 views/ 使用 Service 层
- 保持 API 兼容性,页面无需大幅修改

其他:
- 新增 main.py 作为新入口
- 新增 DESIGN.md 架构设计文档
- 更新 requirements.txt 增加 pydantic-settings
This commit is contained in:
祀梦
2026-04-02 11:55:05 +08:00
parent a79f78b338
commit 209a744d94
56 changed files with 2891 additions and 2095 deletions

19
plugins/__init__.py Normal file
View File

@@ -0,0 +1,19 @@
"""插件包 - 在这里显式注册所有爬虫插件"""
from core.plugin_system import registry
from .fate0 import Fate0Plugin
from .proxylist_download import ProxyListDownloadPlugin
from .ip3366 import Ip3366Plugin
from .ip89 import Ip89Plugin
from .kuaidaili import KuaiDaiLiPlugin
from .speedx import SpeedXPlugin
from .yundaili import YunDaiLiPlugin
# 显式注册所有插件
registry.register(Fate0Plugin)
registry.register(ProxyListDownloadPlugin)
registry.register(Ip3366Plugin)
registry.register(Ip89Plugin)
registry.register(KuaiDaiLiPlugin)
registry.register(SpeedXPlugin)
registry.register(YunDaiLiPlugin)

52
plugins/base.py Normal file
View File

@@ -0,0 +1,52 @@
"""通用 HTTP 爬虫基类 - 为基于 HTTP 请求的插件提供封装"""
import random
import asyncio
import aiohttp
from typing import List
from core.plugin_system import BaseCrawlerPlugin
class BaseHTTPPlugin(BaseCrawlerPlugin):
"""基于 HTTP 的爬虫插件基类"""
def __init__(self):
super().__init__()
self.user_agents = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/121.0",
]
self.urls: List[str] = []
self.current_url: str = ""
def get_headers(self) -> dict:
return {
"User-Agent": random.choice(self.user_agents),
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
"Connection": "keep-alive",
}
async def fetch(self, url: str, timeout: float = 10.0, retries: int = 3) -> str:
"""异步抓取指定 URL 的 HTML 内容"""
headers = self.get_headers()
async with aiohttp.ClientSession(headers=headers) as session:
for attempt in range(retries):
try:
async with session.get(
url, timeout=aiohttp.ClientTimeout(total=timeout)
) as response:
if response.status == 200:
content = await response.read()
encoding = response.get_encoding()
if encoding == "utf-8" or not encoding:
try:
return content.decode("utf-8")
except UnicodeDecodeError:
return content.decode("gbk", errors="ignore")
return content.decode(encoding, errors="ignore")
except Exception:
pass
await asyncio.sleep(random.uniform(1, 3))
return ""

View File

@@ -1,66 +1,38 @@
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from core.crawler import BasePlugin
from core.log import logger
import json
import asyncio
from typing import List
from core.plugin_system import ProxyRaw
from plugins.base import BaseHTTPPlugin
from core.log import logger
class Fate0Plugin(BaseHTTPPlugin):
name = "fate0"
display_name = "Fate0聚合源"
description = "从 GitHub 持续更新的高质量代理聚合列表"
class Fate0Plugin(BasePlugin):
def __init__(self):
super().__init__()
self.name = "Fate0聚合源"
# 这是一个持续更新的高质量代理聚合列表
self.urls = ["https://raw.githubusercontent.com/fate0/proxylist/master/proxy.list"]
async def parse(self, html):
if not html:
return
count = 0
# fate0 的数据格式是每行一个 JSON 对象
for line in html.split('\n'):
if not line.strip():
async def crawl(self) -> List[ProxyRaw]:
results = []
for url in self.urls:
html = await self.fetch(url, timeout=30)
if not html:
continue
try:
data = json.loads(line)
ip = data.get('host')
port = data.get('port')
protocol = data.get('type', 'http')
# 协议标准化
protocol = protocol.lower().strip()
if protocol not in ('http', 'https', 'socks4', 'socks5'):
protocol = 'http'
if ip and port:
yield ip, int(port), protocol
count += 1
except Exception:
continue
if count > 0:
logger.info(f"{self.name} 解析完成,获得 {count} 个潜在代理")
if __name__ == "__main__":
async def test_plugin():
plugin = Fate0Plugin()
print(f"========== 测试 {plugin.name} ==========")
print(f"目标URL数量: {len(plugin.urls)}")
print(f"开始抓取...\n")
proxies = await plugin.run()
print(f"\n========== 抓取结果 ==========")
print(f"总计获取 {len(proxies)} 个代理:")
print("-" * 60)
for idx, (ip, port, protocol) in enumerate(proxies, 1):
print(f"{idx:3d}. {ip:15s} : {str(port):5s} | {protocol}")
print("-" * 60)
print(f"完成!共 {len(proxies)} 个代理~")
asyncio.run(test_plugin())
for line in html.split("\n"):
line = line.strip()
if not line:
continue
try:
data = json.loads(line)
ip = data.get("host")
port = data.get("port")
protocol = data.get("type", "http")
if ip and port:
results.append(ProxyRaw(ip, int(port), protocol))
except Exception:
continue
if results:
logger.info(f"{self.display_name} 解析完成,获得 {len(results)} 个潜在代理")
return results

View File

@@ -1,74 +1,51 @@
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from core.crawler import BasePlugin
from core.log import logger
from bs4 import BeautifulSoup
import re
import asyncio
from typing import List
from bs4 import BeautifulSoup
from core.plugin_system import ProxyRaw
from plugins.base import BaseHTTPPlugin
from core.log import logger
VALID_PROTOCOLS = ['http', 'https', 'socks4', 'socks5']
VALID_PROTOCOLS = ("http", "https", "socks4", "socks5")
class Ip3366Plugin(BaseHTTPPlugin):
name = "ip3366"
display_name = "IP3366"
description = "从 IP3366 网站爬取免费代理"
class Ip3366Plugin(BasePlugin):
def __init__(self):
super().__init__()
self.name = "IP3366"
# 抓取高匿和普通代理的前 5 页
self.urls = [
f"http://www.ip3366.net/free/?stype=1&page={i}" for i in range(1, 6)
] + [
f"http://www.ip3366.net/free/?stype=2&page={i}" for i in range(1, 6)
]
async def parse(self, html):
if not html:
return
soup = BeautifulSoup(html, 'lxml')
list_div = soup.find('div', id='list')
if not list_div: return
table = list_div.find('table')
if not table: return
async def crawl(self) -> List[ProxyRaw]:
results = []
for url in self.urls:
html = await self.fetch(url, timeout=15)
if not html:
continue
soup = BeautifulSoup(html, "lxml")
list_div = soup.find("div", id="list")
if not list_div:
continue
table = list_div.find("table")
if not table:
continue
rows = table.find_all('tr')
count = 0
for row in rows:
tds = row.find_all('td')
if len(tds) >= 5:
ip = tds[0].get_text(strip=True)
port = tds[1].get_text(strip=True)
protocol = tds[4].get_text(strip=True).lower() if len(tds) > 4 else 'http'
if protocol not in VALID_PROTOCOLS:
protocol = 'http'
if re.match(r'^\d+\.\d+\.\d+\.\d+$', ip) and port.isdigit():
yield ip, int(port), protocol
count += 1
if count > 0:
logger.info(f"{self.name} 解析完成,获得 {count} 个潜在代理")
for row in table.find_all("tr"):
tds = row.find_all("td")
if len(tds) >= 5:
ip = tds[0].get_text(strip=True)
port = tds[1].get_text(strip=True)
protocol = tds[4].get_text(strip=True).lower() if len(tds) > 4 else "http"
if protocol not in VALID_PROTOCOLS:
protocol = "http"
if re.match(r"^\d+\.\d+\.\d+\.\d+$", ip) and port.isdigit():
results.append(ProxyRaw(ip, int(port), protocol))
if __name__ == "__main__":
async def test_plugin():
plugin = Ip3366Plugin()
print(f"========== 测试 {plugin.name} ==========")
print(f"目标URL数量: {len(plugin.urls)}")
print(f"开始抓取...\n")
proxies = await plugin.run()
print(f"\n========== 抓取结果 ==========")
print(f"总计获取 {len(proxies)} 个代理:")
print("-" * 60)
for idx, (ip, port, protocol) in enumerate(proxies, 1):
print(f"{idx:3d}. {ip:15s} : {str(port):5s} | {protocol}")
print("-" * 60)
print(f"完成!共 {len(proxies)} 个代理~")
asyncio.run(test_plugin())
if results:
logger.info(f"{self.display_name} 解析完成,获得 {len(results)} 个潜在代理")
return results

View File

@@ -1,69 +1,39 @@
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from core.crawler import BasePlugin
from core.log import logger
from bs4 import BeautifulSoup
import re
import asyncio
from typing import List
from bs4 import BeautifulSoup
from core.plugin_system import ProxyRaw
from plugins.base import BaseHTTPPlugin
from core.log import logger
class Ip89Plugin(BaseHTTPPlugin):
name = "ip89"
display_name = "89免费代理"
description = "从 89ip.cn 爬取免费代理"
class Ip89Plugin(BasePlugin):
def __init__(self):
super().__init__()
self.name = "89免费代理"
# 抓取前 5 页
self.urls = [
f"https://www.89ip.cn/index_{i}.html" for i in range(1, 6)
]
self.urls = [f"https://www.89ip.cn/index_{i}.html" for i in range(1, 6)]
async def parse(self, html):
"""
解析 89ip 页面
"""
if not html:
return
soup = BeautifulSoup(html, 'lxml')
table = soup.find('table', class_='layui-table')
if not table:
return
async def crawl(self) -> List[ProxyRaw]:
results = []
for url in self.urls:
html = await self.fetch(url, timeout=15)
if not html:
continue
soup = BeautifulSoup(html, "lxml")
table = soup.find("table", class_="layui-table")
if not table:
continue
rows = table.find_all('tr')
count = 0
for row in rows:
tds = row.find_all('td')
if len(tds) >= 2:
ip = tds[0].get_text(strip=True)
port = tds[1].get_text(strip=True)
# 89ip 通常不直接写协议,默认尝试 http
protocol = 'http'
if re.match(r'^\d+\.\d+\.\d+\.\d+$', ip) and port.isdigit():
yield ip, int(port), protocol
count += 1
if count > 0:
logger.info(f"{self.name} 解析完成,获得 {count} 个潜在代理")
for row in table.find_all("tr"):
tds = row.find_all("td")
if len(tds) >= 2:
ip = tds[0].get_text(strip=True)
port = tds[1].get_text(strip=True)
if re.match(r"^\d+\.\d+\.\d+\.\d+$", ip) and port.isdigit():
results.append(ProxyRaw(ip, int(port), "http"))
if __name__ == "__main__":
async def test_plugin():
plugin = Ip89Plugin()
print(f"========== 测试 {plugin.name} ==========")
print(f"目标URL数量: {len(plugin.urls)}")
print(f"开始抓取...\n")
proxies = await plugin.run()
print(f"\n========== 抓取结果 ==========")
print(f"总计获取 {len(proxies)} 个代理:")
print("-" * 60)
for idx, (ip, port, protocol) in enumerate(proxies, 1):
print(f"{idx:3d}. {ip:15s} : {str(port):5s} | {protocol}")
print("-" * 60)
print(f"完成!共 {len(proxies)} 个代理~")
asyncio.run(test_plugin())
if results:
logger.info(f"{self.display_name} 解析完成,获得 {len(results)} 个潜在代理")
return results

View File

@@ -1,79 +1,49 @@
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from core.crawler import BasePlugin
from core.log import logger
from bs4 import BeautifulSoup
import re
import asyncio
from typing import List
from bs4 import BeautifulSoup
from core.plugin_system import ProxyRaw
from plugins.base import BaseHTTPPlugin
from core.log import logger
VALID_PROTOCOLS = ['http', 'https', 'socks4', 'socks5']
VALID_PROTOCOLS = ("http", "https", "socks4", "socks5")
class KuaiDaiLiPlugin(BaseHTTPPlugin):
name = "kuaidaili"
display_name = "快代理"
description = "从快代理网站爬取免费代理"
class KuaiDaiLiPlugin(BasePlugin):
def __init__(self):
super().__init__()
self.name = "快代理"
# 抓取国内高匿和国内普通代理的前 10 页
self.urls = [
f"https://www.kuaidaili.com/free/inha/{i}/" for i in range(1, 11)
] + [
f"https://www.kuaidaili.com/free/intr/{i}/" for i in range(1, 11)
]
async def parse(self, html):
"""
解析快代理页面
"""
if not html:
return
soup = BeautifulSoup(html, 'lxml')
# 快代理的表格在 tbody 中
table = soup.find('table')
if not table:
# 尝试通过正则表达式匹配可能被加密或特殊处理的数据
logger.warning(f"{self.name} 未能找到表格,可能是触发了反爬或结构变化")
return
async def crawl(self) -> List[ProxyRaw]:
results = []
for url in self.urls:
html = await self.fetch(url, timeout=15)
if not html:
continue
soup = BeautifulSoup(html, "lxml")
table = soup.find("table")
if not table:
logger.warning(f"{self.display_name} 未能找到表格,可能是触发了反爬")
continue
rows = table.find_all('tr')
count = 0
for row in rows:
tds = row.find_all('td')
if len(tds) >= 5:
ip = tds[0].get_text(strip=True)
port = tds[1].get_text(strip=True)
protocol = tds[4].get_text(strip=True).lower() if len(tds) > 4 else 'http'
if protocol not in VALID_PROTOCOLS:
protocol = 'http'
# 简单校验格式
if re.match(r'^\d+\.\d+\.\d+\.\d+$', ip) and port.isdigit():
yield ip, int(port), protocol
count += 1
if count > 0:
logger.info(f"{self.name} 解析完成,获得 {count} 个潜在代理")
for row in table.find_all("tr"):
tds = row.find_all("td")
if len(tds) >= 5:
ip = tds[0].get_text(strip=True)
port = tds[1].get_text(strip=True)
protocol = tds[4].get_text(strip=True).lower() if len(tds) > 4 else "http"
if protocol not in VALID_PROTOCOLS:
protocol = "http"
if re.match(r"^\d+\.\d+\.\d+\.\d+$", ip) and port.isdigit():
results.append(ProxyRaw(ip, int(port), protocol))
if __name__ == "__main__":
async def test_plugin():
plugin = KuaiDaiLiPlugin()
print(f"========== 测试 {plugin.name} ==========")
print(f"目标URL数量: {len(plugin.urls)}")
print(f"开始抓取...\n")
proxies = await plugin.run()
print(f"\n========== 抓取结果 ==========")
print(f"总计获取 {len(proxies)} 个代理:")
print("-" * 60)
for idx, (ip, port, protocol) in enumerate(proxies, 1):
print(f"{idx:3d}. {ip:15s} : {str(port):5s} | {protocol}")
print("-" * 60)
print(f"完成!共 {len(proxies)} 个代理~")
asyncio.run(test_plugin())
if results:
logger.info(f"{self.display_name} 解析完成,获得 {len(results)} 个潜在代理")
return results

View File

@@ -1,75 +1,55 @@
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from core.crawler import BasePlugin
from typing import List
from core.plugin_system import ProxyRaw
from plugins.base import BaseHTTPPlugin
from core.log import logger
import asyncio
class ProxyListDownloadPlugin(BasePlugin):
class ProxyListDownloadPlugin(BaseHTTPPlugin):
name = "proxylist_download"
display_name = "ProxyListDownload"
description = "从 ProxyListDownload API 获取代理"
def __init__(self):
super().__init__()
self.name = "ProxyListDownload"
self.urls = [
"https://www.proxy-list.download/api/v1/get?type=http",
"https://www.proxy-list.download/api/v1/get?type=https",
"https://www.proxy-list.download/api/v1/get?type=socks4",
"https://www.proxy-list.download/api/v1/get?type=socks5"
"https://www.proxy-list.download/api/v1/get?type=socks5",
]
async def parse(self, html):
if not html:
return
lines = html.split('\r\n')
if len(lines) <= 1:
lines = html.split('\n')
count = 0
# 根据 URL 判断协议类型
if 'type=socks4' in self.current_url:
protocol = 'socks4'
elif 'type=socks5' in self.current_url:
protocol = 'socks5'
elif 'type=https' in self.current_url:
protocol = 'https'
else:
protocol = 'http'
for line in lines:
line = line.strip()
if not line:
async def crawl(self) -> List[ProxyRaw]:
results = []
for url in self.urls:
html = await self.fetch(url, timeout=30)
if not html:
continue
if ':' in line:
parts = line.split(':')
# 根据 URL 判断协议
if "type=socks4" in url:
protocol = "socks4"
elif "type=socks5" in url:
protocol = "socks5"
elif "type=https" in url:
protocol = "https"
else:
protocol = "http"
lines = html.split("\r\n")
if len(lines) <= 1:
lines = html.split("\n")
for line in lines:
line = line.strip()
if not line or ":" not in line:
continue
parts = line.split(":")
if len(parts) >= 2:
ip = parts[0]
port = parts[1]
yield ip, int(port), protocol
count += 1
if count > 0:
logger.info(f"{self.name} 解析完成,从 {self.current_url} 获得 {count} 个潜在代理")
ip = parts[0].strip()
port = parts[1].strip()
if ip and port.isdigit():
results.append(ProxyRaw(ip, int(port), protocol))
if __name__ == "__main__":
async def test_plugin():
plugin = ProxyListDownloadPlugin()
print(f"========== 测试 {plugin.name} ==========")
print(f"目标URL数量: {len(plugin.urls)}")
print(f"开始抓取...\n")
proxies = await plugin.run()
print(f"\n========== 抓取结果 ==========")
print(f"总计获取 {len(proxies)} 个代理:")
print("-" * 60)
for idx, (ip, port, protocol) in enumerate(proxies, 1):
print(f"{idx:3d}. {ip:15s} : {str(port):5s} | {protocol}")
print("-" * 60)
print(f"完成!共 {len(proxies)} 个代理~")
asyncio.run(test_plugin())
if results:
logger.info(f"{self.display_name} 解析完成,获得 {len(results)} 个潜在代理")
return results

View File

@@ -1,78 +1,51 @@
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from core.crawler import BasePlugin
from core.log import logger
import re
import asyncio
from typing import List
from core.plugin_system import ProxyRaw
from plugins.base import BaseHTTPPlugin
from core.log import logger
class SpeedXPlugin(BaseHTTPPlugin):
name = "speedx"
display_name = "SpeedX代理源"
description = "从 SpeedX GitHub 仓库获取 SOCKS 代理列表"
class SpeedXPlugin(BasePlugin):
def __init__(self):
super().__init__()
self.name = "SpeedX代理源"
self.urls = [
"https://raw.githubusercontent.com/TheSpeedX/SOCKS-List/master/http.txt",
"https://raw.githubusercontent.com/TheSpeedX/SOCKS-List/master/socks4.txt",
"https://raw.githubusercontent.com/TheSpeedX/SOCKS-List/master/socks5.txt"
"https://raw.githubusercontent.com/TheSpeedX/SOCKS-List/master/socks5.txt",
]
async def parse(self, html):
if not html:
return
lines = html.split('\n')
count = 0
for line in lines:
line = line.strip()
if not line:
async def crawl(self) -> List[ProxyRaw]:
results = []
for url in self.urls:
html = await self.fetch(url, timeout=30)
if not html:
continue
if ':' in line:
parts = line.split(':')
# 根据 URL 判断协议
protocol = "http"
if "socks5" in url:
protocol = "socks5"
elif "socks4" in url:
protocol = "socks4"
for line in html.split("\n"):
line = line.strip()
if not line or ":" not in line:
continue
parts = line.split(":")
if len(parts) >= 2:
ip = parts[0].strip()
port = parts[1].strip()
# 验证IP地址格式
if not re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', ip):
if not re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
continue
# 验证端口是数字
if not port.isdigit() or not (1 <= int(port) <= 65535):
continue
results.append(ProxyRaw(ip, int(port), protocol))
# 根据 URL 判断协议
protocol = 'http'
if 'socks5' in self.current_url:
protocol = 'socks5'
elif 'socks4' in self.current_url:
protocol = 'socks4'
yield ip, int(port), protocol
count += 1
if count > 0:
logger.info(f"{self.name} 解析完成,从 {self.current_url} 获得 {count} 个潜在代理")
if __name__ == "__main__":
async def test_plugin():
plugin = SpeedXPlugin()
print(f"========== 测试 {plugin.name} ==========")
print(f"目标URL数量: {len(plugin.urls)}")
print(f"开始抓取...\n")
proxies = await plugin.run()
print(f"\n========== 抓取结果 ==========")
print(f"总计获取 {len(proxies)} 个代理:")
print("-" * 60)
for idx, (ip, port, protocol) in enumerate(proxies, 1):
print(f"{idx:3d}. {ip:15s} : {str(port):5s} | {protocol}")
print("-" * 60)
print(f"完成!共 {len(proxies)} 个代理~")
asyncio.run(test_plugin())
if results:
logger.info(f"{self.display_name} 解析完成,获得 {len(results)} 个潜在代理")
return results

View File

@@ -1,79 +1,51 @@
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from core.crawler import BasePlugin
from core.log import logger
from bs4 import BeautifulSoup
import re
import asyncio
from typing import List
from bs4 import BeautifulSoup
from core.plugin_system import ProxyRaw
from plugins.base import BaseHTTPPlugin
from core.log import logger
VALID_PROTOCOLS = ['http', 'https', 'socks4', 'socks5']
VALID_PROTOCOLS = ("http", "https", "socks4", "socks5")
class YunDaiLiPlugin(BaseHTTPPlugin):
name = "yundaili"
display_name = "云代理"
description = "从云代理网站爬取免费代理"
class YunDaiLiPlugin(BasePlugin):
def __init__(self):
super().__init__()
self.name = "云代理"
# 抓取高匿和普通代理的前 5 页
self.urls = [
f"http://www.ip3366.net/free/?stype=1&page={i}" for i in range(1, 6)
] + [
f"http://www.ip3366.net/free/?stype=2&page={i}" for i in range(1, 6)
]
async def parse(self, html):
"""
解析云代理/IP3366 页面 (两者结构相似)
"""
if not html:
return
soup = BeautifulSoup(html, 'lxml')
list_table = soup.find('div', id='list')
if not list_table:
return
table = list_table.find('table')
if not table:
return
async def crawl(self) -> List[ProxyRaw]:
results = []
for url in self.urls:
html = await self.fetch(url, timeout=15)
if not html:
continue
soup = BeautifulSoup(html, "lxml")
list_table = soup.find("div", id="list")
if not list_table:
continue
table = list_table.find("table")
if not table:
continue
rows = table.find_all('tr')
count = 0
for row in rows:
tds = row.find_all('td')
if len(tds) >= 5:
ip = tds[0].get_text(strip=True)
port = tds[1].get_text(strip=True)
protocol = tds[4].get_text(strip=True).lower() if len(tds) > 4 else 'http'
if protocol not in VALID_PROTOCOLS:
protocol = 'http'
if re.match(r'^\d+\.\d+\.\d+\.\d+$', ip) and port.isdigit():
yield ip, int(port), protocol
count += 1
if count > 0:
logger.info(f"{self.name} 解析完成,获得 {count} 个潜在代理")
for row in table.find_all("tr"):
tds = row.find_all("td")
if len(tds) >= 5:
ip = tds[0].get_text(strip=True)
port = tds[1].get_text(strip=True)
protocol = tds[4].get_text(strip=True).lower() if len(tds) > 4 else "http"
if protocol not in VALID_PROTOCOLS:
protocol = "http"
if re.match(r"^\d+\.\d+\.\d+\.\d+$", ip) and port.isdigit():
results.append(ProxyRaw(ip, int(port), protocol))
if __name__ == "__main__":
async def test_plugin():
plugin = YunDaiLiPlugin()
print(f"========== 测试 {plugin.name} ==========")
print(f"目标URL数量: {len(plugin.urls)}")
print(f"开始抓取...\n")
proxies = await plugin.run()
print(f"\n========== 抓取结果 ==========")
print(f"总计获取 {len(proxies)} 个代理:")
print("-" * 60)
for idx, (ip, port, protocol) in enumerate(proxies, 1):
print(f"{idx:3d}. {ip:15s} : {str(port):5s} | {protocol}")
print("-" * 60)
print(f"完成!共 {len(proxies)} 个代理~")
asyncio.run(test_plugin())
if results:
logger.info(f"{self.display_name} 解析完成,获得 {len(results)} 个潜在代理")
return results