mirror of
https://github.com/ArchiveBox/ArchiveBox.git
synced 2026-04-05 15:27:53 +10:00
233 lines
6.8 KiB
Python
Executable File
233 lines
6.8 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""
|
|
Download scientific papers from a URL using papers-dl.
|
|
|
|
Usage: on_Snapshot__papersdl.py --url=<url> --snapshot-id=<uuid>
|
|
Output: Downloads paper PDFs to $PWD/
|
|
|
|
Environment variables:
|
|
PAPERSDL_BINARY: Path to papers-dl binary
|
|
PAPERSDL_TIMEOUT: Timeout in seconds (default: 300 for paper downloads)
|
|
PAPERSDL_EXTRA_ARGS: Extra arguments for papers-dl (space-separated)
|
|
|
|
# papers-dl feature toggles
|
|
SAVE_PAPERSDL: Enable papers-dl paper extraction (default: True)
|
|
|
|
# Fallback to ARCHIVING_CONFIG values if PAPERSDL_* not set:
|
|
TIMEOUT: Fallback timeout
|
|
"""
|
|
|
|
import json
|
|
import os
|
|
import re
|
|
import shutil
|
|
import subprocess
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
import rich_click as click
|
|
|
|
|
|
# Extractor metadata
|
|
EXTRACTOR_NAME = 'papersdl'
|
|
BIN_NAME = 'papers-dl'
|
|
BIN_PROVIDERS = 'pip,env'
|
|
OUTPUT_DIR = '.'
|
|
|
|
|
|
def get_env(name: str, default: str = '') -> str:
|
|
return os.environ.get(name, default).strip()
|
|
|
|
|
|
def get_env_bool(name: str, default: bool = False) -> bool:
|
|
val = get_env(name, '').lower()
|
|
if val in ('true', '1', 'yes', 'on'):
|
|
return True
|
|
if val in ('false', '0', 'no', 'off'):
|
|
return False
|
|
return default
|
|
|
|
|
|
def get_env_int(name: str, default: int = 0) -> int:
|
|
try:
|
|
return int(get_env(name, str(default)))
|
|
except ValueError:
|
|
return default
|
|
|
|
|
|
def find_papersdl() -> str | None:
|
|
"""Find papers-dl binary."""
|
|
papersdl = get_env('PAPERSDL_BINARY')
|
|
if papersdl and os.path.isfile(papersdl):
|
|
return papersdl
|
|
|
|
binary = shutil.which('papers-dl')
|
|
if binary:
|
|
return binary
|
|
|
|
return None
|
|
|
|
|
|
def get_version(binary: str) -> str:
|
|
"""Get papers-dl version."""
|
|
try:
|
|
result = subprocess.run([binary, '--version'], capture_output=True, text=True, timeout=10)
|
|
return result.stdout.strip()[:64]
|
|
except Exception:
|
|
return ''
|
|
|
|
|
|
def extract_doi_from_url(url: str) -> str | None:
|
|
"""Extract DOI from common paper URLs."""
|
|
# Match DOI pattern in URL
|
|
doi_pattern = r'10\.\d{4,}/[^\s]+'
|
|
match = re.search(doi_pattern, url)
|
|
if match:
|
|
return match.group(0)
|
|
return None
|
|
|
|
|
|
def save_paper(url: str, binary: str) -> tuple[bool, str | None, str]:
|
|
"""
|
|
Download paper using papers-dl.
|
|
|
|
Returns: (success, output_path, error_message)
|
|
"""
|
|
# Get config from env
|
|
timeout = get_env_int('PAPERSDL_TIMEOUT') or get_env_int('TIMEOUT', 300)
|
|
extra_args = get_env('PAPERSDL_EXTRA_ARGS', '')
|
|
|
|
# Output directory is current directory (hook already runs in output dir)
|
|
output_dir = Path(OUTPUT_DIR)
|
|
|
|
# Try to extract DOI from URL
|
|
doi = extract_doi_from_url(url)
|
|
if not doi:
|
|
# If no DOI found, papers-dl might handle the URL directly
|
|
identifier = url
|
|
else:
|
|
identifier = doi
|
|
|
|
# Build command - papers-dl fetch <identifier> -o <output_dir>
|
|
cmd = [binary, 'fetch', identifier, '-o', str(output_dir)]
|
|
|
|
if extra_args:
|
|
cmd.extend(extra_args.split())
|
|
|
|
try:
|
|
result = subprocess.run(cmd, capture_output=True, timeout=timeout, text=True)
|
|
|
|
# Check if any PDF files were downloaded
|
|
pdf_files = list(output_dir.glob('*.pdf'))
|
|
|
|
if pdf_files:
|
|
# Return first PDF file
|
|
return True, str(pdf_files[0]), ''
|
|
else:
|
|
stderr = result.stderr
|
|
stdout = result.stdout
|
|
|
|
# These are NOT errors - page simply has no downloadable paper
|
|
stderr_lower = stderr.lower()
|
|
stdout_lower = stdout.lower()
|
|
if 'not found' in stderr_lower or 'not found' in stdout_lower:
|
|
return True, None, '' # Paper not available - success, no output
|
|
if 'no results' in stderr_lower or 'no results' in stdout_lower:
|
|
return True, None, '' # No paper found - success, no output
|
|
if result.returncode == 0:
|
|
return True, None, '' # papers-dl exited cleanly, just no paper - success
|
|
|
|
# These ARE errors - something went wrong
|
|
if '404' in stderr or '404' in stdout:
|
|
return False, None, '404 Not Found'
|
|
if '403' in stderr or '403' in stdout:
|
|
return False, None, '403 Forbidden'
|
|
|
|
return False, None, f'papers-dl error: {stderr[:200] or stdout[:200]}'
|
|
|
|
except subprocess.TimeoutExpired:
|
|
return False, None, f'Timed out after {timeout} seconds'
|
|
except Exception as e:
|
|
return False, None, f'{type(e).__name__}: {e}'
|
|
|
|
|
|
@click.command()
|
|
@click.option('--url', required=True, help='URL to download paper from')
|
|
@click.option('--snapshot-id', required=True, help='Snapshot UUID')
|
|
def main(url: str, snapshot_id: str):
|
|
"""Download scientific paper from a URL using papers-dl."""
|
|
|
|
version = ''
|
|
output = None
|
|
status = 'failed'
|
|
error = ''
|
|
binary = None
|
|
cmd_str = ''
|
|
|
|
try:
|
|
# Check if papers-dl is enabled
|
|
if not get_env_bool('SAVE_PAPERSDL', True):
|
|
print('Skipping papers-dl (SAVE_PAPERSDL=False)')
|
|
status = 'skipped'
|
|
print(f'STATUS={status}')
|
|
print(f'RESULT_JSON={json.dumps({"extractor": EXTRACTOR_NAME, "status": status, "url": url, "snapshot_id": snapshot_id})}')
|
|
sys.exit(0)
|
|
|
|
# Find binary
|
|
binary = find_papersdl()
|
|
if not binary:
|
|
print(f'ERROR: {BIN_NAME} binary not found', file=sys.stderr)
|
|
print(f'DEPENDENCY_NEEDED={BIN_NAME}', file=sys.stderr)
|
|
print(f'BIN_PROVIDERS={BIN_PROVIDERS}', file=sys.stderr)
|
|
print(f'INSTALL_HINT=pip install papers-dl', file=sys.stderr)
|
|
sys.exit(1)
|
|
|
|
version = get_version(binary)
|
|
cmd_str = f'{binary} fetch {url}'
|
|
|
|
# Run extraction
|
|
success, output, error = save_paper(url, binary)
|
|
status = 'succeeded' if success else 'failed'
|
|
|
|
if success:
|
|
if output:
|
|
output_path = Path(output)
|
|
file_size = output_path.stat().st_size
|
|
print(f'papers-dl completed: {output_path.name} ({file_size} bytes)')
|
|
else:
|
|
print(f'papers-dl completed: no paper found for this URL (this is normal)')
|
|
|
|
except Exception as e:
|
|
error = f'{type(e).__name__}: {e}'
|
|
status = 'failed'
|
|
|
|
# Print results
|
|
if cmd_str:
|
|
print(f'CMD={cmd_str}')
|
|
if version:
|
|
print(f'VERSION={version}')
|
|
if output:
|
|
print(f'OUTPUT={output}')
|
|
print(f'STATUS={status}')
|
|
|
|
if error:
|
|
print(f'ERROR={error}', file=sys.stderr)
|
|
|
|
# Print JSON result
|
|
result_json = {
|
|
'extractor': EXTRACTOR_NAME,
|
|
'url': url,
|
|
'snapshot_id': snapshot_id,
|
|
'status': status,
|
|
'cmd_version': version,
|
|
'output': output,
|
|
'error': error or None,
|
|
}
|
|
print(f'RESULT_JSON={json.dumps(result_json)}')
|
|
|
|
sys.exit(0 if status == 'succeeded' else 1)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|