Files
ProxyPool/app/plugins/ip3366.py
祀梦 f09a8e16c4 fix: 修复爬虫网络层、验证队列卡死及 API 500 错误
- 修复 BaseHTTPPlugin 连接池、并发控制、异常日志、超时策略
- 修复/增强 8 个爬虫插件的稳定性和 fallback 机制
- 清理 validation_tasks 表 4 万+ pending 任务,避免队列卡死
- 修复 app/api/main.py 缺失全局 app 实例导致的 500 错误
- 提升前端 Axios 超时到 120 秒,避免请求断开
- 修复插件统计持久化和调度器生命周期问题
2026-04-04 19:27:36 +08:00

62 lines
2.1 KiB
Python

import re
from typing import List
from bs4 import BeautifulSoup
from app.core.plugin_system import ProxyRaw
from app.plugins.base import BaseHTTPPlugin
from app.core.log import logger
VALID_PROTOCOLS = ("http", "https", "socks4", "socks5")
class Ip3366Plugin(BaseHTTPPlugin):
name = "ip3366"
display_name = "IP3366"
description = "从 IP3366 网站爬取免费代理"
default_config = {"max_pages": 3}
def __init__(self):
super().__init__()
self._update_urls()
def _update_urls(self):
max_pages = self.config.get("max_pages", 3)
self.urls = [
f"http://www.ip3366.net/free/?stype=1&page={i}" for i in range(1, max_pages + 1)
] + [
f"http://www.ip3366.net/free/?stype=2&page={i}" for i in range(1, max_pages + 1)
]
def get_headers(self) -> dict:
headers = super().get_headers()
headers["Referer"] = "http://www.ip3366.net/free/"
return headers
async def crawl(self) -> List[ProxyRaw]:
results = []
htmls = await self.fetch_all(self.urls)
for html in htmls:
if not html:
continue
soup = BeautifulSoup(html, "lxml")
list_div = soup.find("div", id="list")
if not list_div:
continue
table = list_div.find("table")
if not table:
continue
for row in table.find_all("tr"):
tds = row.find_all("td")
if len(tds) >= 5:
ip = tds[0].get_text(strip=True)
port = tds[1].get_text(strip=True)
protocol = tds[4].get_text(strip=True).lower() if len(tds) > 4 else "http"
if protocol not in VALID_PROTOCOLS:
protocol = "http"
if re.match(r"^\d+\.\d+\.\d+\.\d+$", ip) and port.isdigit():
results.append(ProxyRaw(ip, int(port), protocol))
if results:
logger.info(f"{self.display_name} 解析完成,获得 {len(results)} 个潜在代理")
return results