import sys import os sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from core.crawler import BasePlugin from core.log import logger from bs4 import BeautifulSoup import re import asyncio class Ip89Plugin(BasePlugin): def __init__(self): super().__init__() self.name = "89免费代理" # 抓取前 5 页 self.urls = [ f"https://www.89ip.cn/index_{i}.html" for i in range(1, 6) ] async def parse(self, html): """ 解析 89ip 页面 """ if not html: return soup = BeautifulSoup(html, 'lxml') table = soup.find('table', class_='layui-table') if not table: return rows = table.find_all('tr') count = 0 for row in rows: tds = row.find_all('td') if len(tds) >= 2: ip = tds[0].get_text(strip=True) port = tds[1].get_text(strip=True) # 89ip 通常不直接写协议,默认尝试 http protocol = 'http' if re.match(r'^\d+\.\d+\.\d+\.\d+$', ip) and port.isdigit(): yield ip, int(port), protocol count += 1 if count > 0: logger.info(f"{self.name} 解析完成,获得 {count} 个潜在代理") if __name__ == "__main__": async def test_plugin(): plugin = Ip89Plugin() print(f"========== 测试 {plugin.name} ==========") print(f"目标URL数量: {len(plugin.urls)}") print(f"开始抓取...\n") proxies = await plugin.run() print(f"\n========== 抓取结果 ==========") print(f"总计获取 {len(proxies)} 个代理:") print("-" * 60) for idx, (ip, port, protocol) in enumerate(proxies, 1): print(f"{idx:3d}. {ip:15s} : {str(port):5s} | {protocol}") print("-" * 60) print(f"完成!共 {len(proxies)} 个代理~") asyncio.run(test_plugin())