fix(crawl): throttle concurrent CrawlJobs and relax fpw/proxyscrape HTTP
- CrawlJob waits on crawl_slot before JobExecutor semaphore so crawl-all does not fill slots while queued - BaseHTTPPlugin: longer connect budget for slow international links - proxyscrape: jsDelivr mirror + longer GitHub/API phases - fpw_*: higher timeouts/retries; lower internal concurrency on heavy multi-URL plugins Made-with: Cursor
This commit is contained in:
@@ -20,13 +20,16 @@ class ProxyScrapePlugin(BaseHTTPPlugin):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# GitHub raw 源作为首选
|
||||
# GitHub raw 首选;国内/高负载时 jsDelivr 镜像常更稳
|
||||
self.urls = [
|
||||
("http", "https://raw.githubusercontent.com/monosans/proxy-list/main/proxies/http.txt"),
|
||||
("https", "https://raw.githubusercontent.com/monosans/proxy-list/main/proxies/https.txt"),
|
||||
("socks4", "https://raw.githubusercontent.com/monosans/proxy-list/main/proxies/socks4.txt"),
|
||||
("socks5", "https://raw.githubusercontent.com/monosans/proxy-list/main/proxies/socks5.txt"),
|
||||
]
|
||||
self._mirror_prefix = (
|
||||
"https://cdn.jsdelivr.net/gh/monosans/proxy-list@main/proxies/"
|
||||
)
|
||||
# ProxyScrape 官方 API 作为 fallback
|
||||
self.api_urls = {
|
||||
"http": "https://api.proxyscrape.com/v2/?request=get&protocol=http&timeout=10000&country=all&ssl=all&anonymity=all",
|
||||
@@ -56,14 +59,18 @@ class ProxyScrapePlugin(BaseHTTPPlugin):
|
||||
results: List[ProxyRaw] = []
|
||||
protocols = [protocol for protocol, _ in self.urls]
|
||||
urls = [url for _, url in self.urls]
|
||||
fetch_timeout = 28.0
|
||||
|
||||
# 1. 并发请求所有 GitHub raw 源,整体限时 10s,先完成的保留结果
|
||||
tasks = [asyncio.create_task(self.fetch(url, timeout=12)) for url in urls]
|
||||
done, pending = await asyncio.wait(tasks, timeout=10)
|
||||
# 1. GitHub raw:放宽总等待,避免 crawl-all 时与其它插件抢带宽导致集体超时
|
||||
tasks = [
|
||||
asyncio.create_task(self.fetch(url, timeout=fetch_timeout))
|
||||
for url in urls
|
||||
]
|
||||
done, pending = await asyncio.wait(tasks, timeout=45)
|
||||
for task in pending:
|
||||
task.cancel()
|
||||
htmls = []
|
||||
done_protocols = set()
|
||||
htmls: list[str] = []
|
||||
done_protocols: set[str] = set()
|
||||
for i, task in enumerate(tasks):
|
||||
try:
|
||||
if task in done:
|
||||
@@ -73,35 +80,60 @@ class ProxyScrapePlugin(BaseHTTPPlugin):
|
||||
htmls.append("")
|
||||
except Exception:
|
||||
htmls.append("")
|
||||
# 异常时不加入 done_protocols,以便触发 API fallback
|
||||
|
||||
fallback_protocols = []
|
||||
need_mirror: list[str] = []
|
||||
for protocol, html in zip(protocols, htmls):
|
||||
proxies = self._parse_proxies(html or "", protocol) if html else []
|
||||
if proxies:
|
||||
logger.info(f"ProxyScrape {protocol.upper()} GitHub raw 获取 {len(proxies)} 个代理")
|
||||
logger.info(
|
||||
f"ProxyScrape {protocol.upper()} GitHub raw 获取 {len(proxies)} 个代理"
|
||||
)
|
||||
results.extend(proxies)
|
||||
else:
|
||||
if protocol in done_protocols:
|
||||
logger.warning(f"ProxyScrape {protocol.upper()} GitHub raw 返回空或无效,将尝试 API fallback")
|
||||
logger.warning(
|
||||
f"ProxyScrape {protocol.upper()} GitHub raw 返回空或无效,尝试镜像与 API"
|
||||
)
|
||||
else:
|
||||
logger.warning(f"ProxyScrape {protocol.upper()} GitHub raw 请求超时,将尝试 API fallback")
|
||||
fallback_protocols.append(protocol)
|
||||
logger.warning(
|
||||
f"ProxyScrape {protocol.upper()} GitHub raw 请求超时,尝试镜像与 API"
|
||||
)
|
||||
need_mirror.append(protocol)
|
||||
|
||||
# 2. 对 GitHub raw 失败的协议,并发请求 ProxyScrape API fallback
|
||||
if fallback_protocols:
|
||||
fallback_urls = [self.api_urls[p] for p in fallback_protocols]
|
||||
# 2. jsDelivr 镜像(顺序请求,减轻与其它插件的瞬时并发叠加)
|
||||
still_need_api: list[str] = []
|
||||
for protocol in need_mirror:
|
||||
mirror_url = f"{self._mirror_prefix}{protocol}.txt"
|
||||
text = await self.fetch(mirror_url, timeout=fetch_timeout, retries=2)
|
||||
proxies = self._parse_proxies(text or "", protocol) if text else []
|
||||
if proxies:
|
||||
logger.info(
|
||||
f"ProxyScrape {protocol.upper()} jsDelivr 镜像获取 {len(proxies)} 个代理"
|
||||
)
|
||||
results.extend(proxies)
|
||||
else:
|
||||
still_need_api.append(protocol)
|
||||
|
||||
# 3. ProxyScrape 官方 API
|
||||
if still_need_api:
|
||||
fallback_urls = [self.api_urls[p] for p in still_need_api]
|
||||
try:
|
||||
api_htmls = await asyncio.wait_for(
|
||||
self.fetch_all(fallback_urls, timeout=10), timeout=10
|
||||
self.fetch_all(fallback_urls, timeout=25), timeout=35
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"ProxyScrape API fallback 批量请求超时,跳过 {len(fallback_protocols)} 个协议")
|
||||
api_htmls = [""] * len(fallback_protocols)
|
||||
for protocol, api_html in zip(fallback_protocols, api_htmls):
|
||||
proxies = self._parse_proxies(api_html or "", protocol) if api_html else []
|
||||
logger.warning(
|
||||
f"ProxyScrape API fallback 批量请求超时,跳过 {len(still_need_api)} 个协议"
|
||||
)
|
||||
api_htmls = [""] * len(still_need_api)
|
||||
for protocol, api_html in zip(still_need_api, api_htmls):
|
||||
proxies = (
|
||||
self._parse_proxies(api_html or "", protocol) if api_html else []
|
||||
)
|
||||
if proxies:
|
||||
logger.info(f"ProxyScrape {protocol.upper()} API 获取 {len(proxies)} 个代理")
|
||||
logger.info(
|
||||
f"ProxyScrape {protocol.upper()} API 获取 {len(proxies)} 个代理"
|
||||
)
|
||||
results.extend(proxies)
|
||||
else:
|
||||
logger.warning(f"ProxyScrape {protocol.upper()} API 返回空或无效")
|
||||
|
||||
Reference in New Issue
Block a user