import sys import os sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from core.crawler import BasePlugin from core.log import logger from bs4 import BeautifulSoup import re import asyncio VALID_PROTOCOLS = ['http', 'https', 'socks4', 'socks5'] class KuaiDaiLiPlugin(BasePlugin): def __init__(self): super().__init__() self.name = "快代理" # 抓取国内高匿和国内普通代理的前 10 页 self.urls = [ f"https://www.kuaidaili.com/free/inha/{i}/" for i in range(1, 11) ] + [ f"https://www.kuaidaili.com/free/intr/{i}/" for i in range(1, 11) ] async def parse(self, html): """ 解析快代理页面 """ if not html: return soup = BeautifulSoup(html, 'lxml') # 快代理的表格在 tbody 中 table = soup.find('table') if not table: # 尝试通过正则表达式匹配可能被加密或特殊处理的数据 logger.warning(f"{self.name} 未能找到表格,可能是触发了反爬或结构变化") return rows = table.find_all('tr') count = 0 for row in rows: tds = row.find_all('td') if len(tds) >= 5: ip = tds[0].get_text(strip=True) port = tds[1].get_text(strip=True) protocol = tds[4].get_text(strip=True).lower() if len(tds) > 4 else 'http' if protocol not in VALID_PROTOCOLS: protocol = 'http' # 简单校验格式 if re.match(r'^\d+\.\d+\.\d+\.\d+$', ip) and port.isdigit(): yield ip, int(port), protocol count += 1 if count > 0: logger.info(f"{self.name} 解析完成,获得 {count} 个潜在代理") if __name__ == "__main__": async def test_plugin(): plugin = KuaiDaiLiPlugin() print(f"========== 测试 {plugin.name} ==========") print(f"目标URL数量: {len(plugin.urls)}") print(f"开始抓取...\n") proxies = await plugin.run() print(f"\n========== 抓取结果 ==========") print(f"总计获取 {len(proxies)} 个代理:") print("-" * 60) for idx, (ip, port, protocol) in enumerate(proxies, 1): print(f"{idx:3d}. {ip:15s} : {str(port):5s} | {protocol}") print("-" * 60) print(f"完成!共 {len(proxies)} 个代理~") asyncio.run(test_plugin())