Files
ArchiveBox/archivebox/plugins/readability/on_Snapshot__55_readability.py
Claude 1b5a816022 Implement hook step-based concurrency system
This implements the hook concurrency plan from TODO_hook_concurrency.md:

## Schema Changes
- Add Snapshot.current_step (IntegerField 0-9, default=0)
- Create migration 0034_snapshot_current_step.py
- Fix uuid_compat imports in migrations 0032 and 0003

## Core Logic
- Add extract_step(hook_name) utility - extracts step from __XX_ pattern
- Add is_background_hook(hook_name) utility - checks for .bg. suffix
- Update Snapshot.create_pending_archiveresults() to create one AR per hook
- Update ArchiveResult.run() to handle hook_name field
- Add Snapshot.advance_step_if_ready() method for step advancement
- Integrate with SnapshotMachine.is_finished() to call advance_step_if_ready()

## Worker Coordination
- Update ArchiveResultWorker.get_queue() for step-based filtering
- ARs are only claimable when their step <= snapshot.current_step

## Hook Renumbering
- Step 5 (DOM extraction): singlefile→50, screenshot→51, pdf→52, dom→53,
  title→54, readability→55, headers→55, mercury→56, htmltotext→57
- Step 6 (post-DOM): wget→61, git→62, media→63.bg, gallerydl→64.bg,
  forumdl→65.bg, papersdl→66.bg
- Step 7 (URL extraction): parse_* hooks moved to 70-75

Background hooks (.bg suffix) don't block step advancement, enabling
long-running downloads to continue while other hooks proceed.
2025-12-28 13:47:25 +00:00

158 lines
4.8 KiB
Python

#!/usr/bin/env python3
"""
Extract article content using Mozilla's Readability.
Usage: on_Snapshot__readability.py --url=<url> --snapshot-id=<uuid>
Output: Creates readability/ directory with content.html, content.txt, article.json
Environment variables:
READABILITY_BINARY: Path to readability-extractor binary
READABILITY_TIMEOUT: Timeout in seconds (default: 60)
# Fallback to ARCHIVING_CONFIG values if READABILITY_* not set:
TIMEOUT: Fallback timeout
Note: Requires readability-extractor from https://github.com/ArchiveBox/readability-extractor
This extractor looks for HTML source from other extractors (wget, singlefile, dom)
"""
import json
import os
import subprocess
import sys
import tempfile
from pathlib import Path
import rich_click as click
# Extractor metadata
PLUGIN_NAME = 'readability'
BIN_NAME = 'readability-extractor'
BIN_PROVIDERS = 'npm,env'
OUTPUT_DIR = '.'
def get_env(name: str, default: str = '') -> str:
return os.environ.get(name, default).strip()
def get_env_int(name: str, default: int = 0) -> int:
try:
return int(get_env(name, str(default)))
except ValueError:
return default
def find_html_source() -> str | None:
"""Find HTML content from other extractors in the snapshot directory."""
# Hooks run in snapshot_dir, sibling extractor outputs are in subdirectories
search_patterns = [
'singlefile/singlefile.html',
'singlefile/*.html',
'dom/output.html',
'dom/*.html',
'wget/**/*.html',
'wget/**/*.htm',
]
cwd = Path.cwd()
for pattern in search_patterns:
matches = list(cwd.glob(pattern))
for match in matches:
if match.is_file() and match.stat().st_size > 0:
return str(match)
return None
def extract_readability(url: str, binary: str) -> tuple[bool, str | None, str]:
"""
Extract article using Readability.
Returns: (success, output_path, error_message)
"""
timeout = get_env_int('READABILITY_TIMEOUT') or get_env_int('TIMEOUT', 60)
# Find HTML source
html_source = find_html_source()
if not html_source:
return False, None, 'No HTML source found (run singlefile, dom, or wget first)'
# Output directory is current directory (hook already runs in output dir)
output_dir = Path(OUTPUT_DIR)
try:
# Run readability-extractor (outputs JSON by default)
cmd = [binary, html_source]
result = subprocess.run(cmd, capture_output=True, timeout=timeout)
if result.returncode != 0:
stderr = result.stderr.decode('utf-8', errors='replace')
return False, None, f'readability-extractor failed: {stderr[:200]}'
# Parse JSON output
try:
result_json = json.loads(result.stdout)
except json.JSONDecodeError:
return False, None, 'readability-extractor returned invalid JSON'
# Extract and save content
# readability-extractor uses camelCase field names (textContent, content)
text_content = result_json.pop('textContent', result_json.pop('text-content', ''))
html_content = result_json.pop('content', result_json.pop('html-content', ''))
if not text_content and not html_content:
return False, None, 'No content extracted'
(output_dir / 'content.html').write_text(html_content, encoding='utf-8')
(output_dir / 'content.txt').write_text(text_content, encoding='utf-8')
(output_dir / 'article.json').write_text(json.dumps(result_json, indent=2), encoding='utf-8')
return True, OUTPUT_DIR, ''
except subprocess.TimeoutExpired:
return False, None, f'Timed out after {timeout} seconds'
except Exception as e:
return False, None, f'{type(e).__name__}: {e}'
@click.command()
@click.option('--url', required=True, help='URL to extract article from')
@click.option('--snapshot-id', required=True, help='Snapshot UUID')
def main(url: str, snapshot_id: str):
"""Extract article content using Mozilla's Readability."""
output = None
status = 'failed'
error = ''
try:
# Get binary from environment
binary = get_env('READABILITY_BINARY', 'readability-extractor')
# Run extraction
success, output, error = extract_readability(url, binary)
status = 'succeeded' if success else 'failed'
except Exception as e:
error = f'{type(e).__name__}: {e}'
status = 'failed'
if error:
print(f'ERROR: {error}', file=sys.stderr)
# Output clean JSONL (no RESULT_JSON= prefix)
result = {
'type': 'ArchiveResult',
'status': status,
'output_str': output or error or '',
}
print(json.dumps(result))
sys.exit(0 if status == 'succeeded' else 1)
if __name__ == '__main__':
main()