mirror of
https://github.com/ArchiveBox/ArchiveBox.git
synced 2026-01-06 19:06:08 +10:00
wip major changes
This commit is contained in:
203
archivebox/plugins/headers/on_Snapshot__33_headers.js
Normal file
203
archivebox/plugins/headers/on_Snapshot__33_headers.js
Normal file
@@ -0,0 +1,203 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Extract HTTP response headers for a URL.
|
||||
*
|
||||
* If a Chrome session exists (from chrome_session extractor), reads the captured
|
||||
* response headers from chrome_session/response_headers.json.
|
||||
* Otherwise falls back to making an HTTP HEAD request.
|
||||
*
|
||||
* Usage: on_Snapshot__12_headers.js --url=<url> --snapshot-id=<uuid>
|
||||
* Output: Writes headers/headers.json
|
||||
*
|
||||
* Environment variables:
|
||||
* TIMEOUT: Timeout in seconds (default: 30)
|
||||
* USER_AGENT: User agent string (optional)
|
||||
* CHECK_SSL_VALIDITY: Whether to check SSL certificates (default: true)
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const https = require('https');
|
||||
const http = require('http');
|
||||
|
||||
// Extractor metadata
|
||||
const EXTRACTOR_NAME = 'headers';
|
||||
const OUTPUT_DIR = 'headers';
|
||||
const OUTPUT_FILE = 'headers.json';
|
||||
const CHROME_SESSION_DIR = 'chrome_session';
|
||||
const CHROME_HEADERS_FILE = 'response_headers.json';
|
||||
|
||||
// Parse command line arguments
|
||||
function parseArgs() {
|
||||
const args = {};
|
||||
process.argv.slice(2).forEach(arg => {
|
||||
if (arg.startsWith('--')) {
|
||||
const [key, ...valueParts] = arg.slice(2).split('=');
|
||||
args[key.replace(/-/g, '_')] = valueParts.join('=') || true;
|
||||
}
|
||||
});
|
||||
return args;
|
||||
}
|
||||
|
||||
// Get environment variable with default
|
||||
function getEnv(name, defaultValue = '') {
|
||||
return (process.env[name] || defaultValue).trim();
|
||||
}
|
||||
|
||||
function getEnvBool(name, defaultValue = false) {
|
||||
const val = getEnv(name, '').toLowerCase();
|
||||
if (['true', '1', 'yes', 'on'].includes(val)) return true;
|
||||
if (['false', '0', 'no', 'off'].includes(val)) return false;
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
function getEnvInt(name, defaultValue = 0) {
|
||||
const val = parseInt(getEnv(name, String(defaultValue)), 10);
|
||||
return isNaN(val) ? defaultValue : val;
|
||||
}
|
||||
|
||||
// Get headers from chrome_session if available
|
||||
function getHeadersFromChromeSession() {
|
||||
const headersFile = path.join(CHROME_SESSION_DIR, CHROME_HEADERS_FILE);
|
||||
if (fs.existsSync(headersFile)) {
|
||||
try {
|
||||
const data = JSON.parse(fs.readFileSync(headersFile, 'utf8'));
|
||||
return data;
|
||||
} catch (e) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// Fetch headers via HTTP HEAD request (fallback)
|
||||
function fetchHeaders(url) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const timeout = getEnvInt('TIMEOUT', 30) * 1000;
|
||||
const userAgent = getEnv('USER_AGENT', 'Mozilla/5.0 (compatible; ArchiveBox/1.0)');
|
||||
const checkSsl = getEnvBool('CHECK_SSL_VALIDITY', getEnvBool('CHECK_SSL_VALIDITY', true));
|
||||
|
||||
const parsedUrl = new URL(url);
|
||||
const client = parsedUrl.protocol === 'https:' ? https : http;
|
||||
|
||||
const options = {
|
||||
method: 'HEAD',
|
||||
hostname: parsedUrl.hostname,
|
||||
port: parsedUrl.port || (parsedUrl.protocol === 'https:' ? 443 : 80),
|
||||
path: parsedUrl.pathname + parsedUrl.search,
|
||||
headers: { 'User-Agent': userAgent },
|
||||
timeout,
|
||||
rejectUnauthorized: checkSsl,
|
||||
};
|
||||
|
||||
const req = client.request(options, (res) => {
|
||||
resolve({
|
||||
url: url,
|
||||
status: res.statusCode,
|
||||
statusText: res.statusMessage,
|
||||
headers: res.headers,
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', reject);
|
||||
req.on('timeout', () => {
|
||||
req.destroy();
|
||||
reject(new Error('Request timeout'));
|
||||
});
|
||||
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
async function extractHeaders(url) {
|
||||
// Create output directory
|
||||
if (!fs.existsSync(OUTPUT_DIR)) {
|
||||
fs.mkdirSync(OUTPUT_DIR, { recursive: true });
|
||||
}
|
||||
const outputPath = path.join(OUTPUT_DIR, OUTPUT_FILE);
|
||||
|
||||
// Try Chrome session first
|
||||
const chromeHeaders = getHeadersFromChromeSession();
|
||||
if (chromeHeaders && chromeHeaders.headers) {
|
||||
fs.writeFileSync(outputPath, JSON.stringify(chromeHeaders, null, 2), 'utf8');
|
||||
return { success: true, output: outputPath, method: 'chrome_session', status: chromeHeaders.status };
|
||||
}
|
||||
|
||||
// Fallback to HTTP HEAD request
|
||||
try {
|
||||
const headers = await fetchHeaders(url);
|
||||
fs.writeFileSync(outputPath, JSON.stringify(headers, null, 2), 'utf8');
|
||||
return { success: true, output: outputPath, method: 'http', status: headers.status };
|
||||
} catch (e) {
|
||||
return { success: false, error: e.message };
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const args = parseArgs();
|
||||
const url = args.url;
|
||||
const snapshotId = args.snapshot_id;
|
||||
|
||||
if (!url || !snapshotId) {
|
||||
console.error('Usage: on_Snapshot__12_headers.js --url=<url> --snapshot-id=<uuid>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const startTs = new Date();
|
||||
let status = 'failed';
|
||||
let output = null;
|
||||
let error = '';
|
||||
|
||||
try {
|
||||
const result = await extractHeaders(url);
|
||||
|
||||
if (result.success) {
|
||||
status = 'succeeded';
|
||||
output = result.output;
|
||||
console.log(`Headers extracted (${result.method}): HTTP ${result.status}`);
|
||||
} else {
|
||||
status = 'failed';
|
||||
error = result.error;
|
||||
}
|
||||
} catch (e) {
|
||||
error = `${e.name}: ${e.message}`;
|
||||
status = 'failed';
|
||||
}
|
||||
|
||||
const endTs = new Date();
|
||||
const duration = (endTs - startTs) / 1000;
|
||||
|
||||
// Print results
|
||||
console.log(`START_TS=${startTs.toISOString()}`);
|
||||
console.log(`END_TS=${endTs.toISOString()}`);
|
||||
console.log(`DURATION=${duration.toFixed(2)}`);
|
||||
if (output) {
|
||||
console.log(`OUTPUT=${output}`);
|
||||
}
|
||||
console.log(`STATUS=${status}`);
|
||||
|
||||
if (error) {
|
||||
console.error(`ERROR=${error}`);
|
||||
}
|
||||
|
||||
// Print JSON result
|
||||
const resultJson = {
|
||||
extractor: EXTRACTOR_NAME,
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
status,
|
||||
start_ts: startTs.toISOString(),
|
||||
end_ts: endTs.toISOString(),
|
||||
duration: Math.round(duration * 100) / 100,
|
||||
output,
|
||||
error: error || null,
|
||||
};
|
||||
console.log(`RESULT_JSON=${JSON.stringify(resultJson)}`);
|
||||
|
||||
process.exit(status === 'succeeded' ? 0 : 1);
|
||||
}
|
||||
|
||||
main().catch(e => {
|
||||
console.error(`Fatal error: ${e.message}`);
|
||||
process.exit(1);
|
||||
});
|
||||
319
archivebox/plugins/headers/tests/test_headers.py
Normal file
319
archivebox/plugins/headers/tests/test_headers.py
Normal file
@@ -0,0 +1,319 @@
|
||||
"""
|
||||
Integration tests for headers plugin
|
||||
|
||||
Tests verify:
|
||||
1. Plugin script exists and is executable
|
||||
2. Node.js is available
|
||||
3. Headers extraction works for real example.com
|
||||
4. Output JSON contains actual HTTP headers
|
||||
5. Fallback to HTTP HEAD when chrome_session not available
|
||||
6. Uses chrome_session headers when available
|
||||
7. Config options work (TIMEOUT, USER_AGENT, CHECK_SSL_VALIDITY)
|
||||
"""
|
||||
|
||||
import json
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
PLUGIN_DIR = Path(__file__).parent.parent
|
||||
HEADERS_HOOK = PLUGIN_DIR / 'on_Snapshot__33_headers.js'
|
||||
TEST_URL = 'https://example.com'
|
||||
|
||||
|
||||
def test_hook_script_exists():
|
||||
"""Verify hook script exists."""
|
||||
assert HEADERS_HOOK.exists(), f"Hook script not found: {HEADERS_HOOK}"
|
||||
|
||||
|
||||
def test_node_is_available():
|
||||
"""Test that Node.js is available on the system."""
|
||||
result = subprocess.run(
|
||||
['which', 'node'],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
pytest.skip("node not installed on system")
|
||||
|
||||
binary_path = result.stdout.strip()
|
||||
assert Path(binary_path).exists(), f"Binary should exist at {binary_path}"
|
||||
|
||||
# Test that node is executable and get version
|
||||
result = subprocess.run(
|
||||
['node', '--version'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
)
|
||||
assert result.returncode == 0, f"node not executable: {result.stderr}"
|
||||
assert result.stdout.startswith('v'), f"Unexpected node version format: {result.stdout}"
|
||||
|
||||
|
||||
def test_extracts_headers_from_example_com():
|
||||
"""Test full workflow: extract headers from real example.com."""
|
||||
|
||||
# Check node is available
|
||||
if not shutil.which('node'):
|
||||
pytest.skip("node not installed")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
tmpdir = Path(tmpdir)
|
||||
|
||||
# Run headers extraction
|
||||
result = subprocess.run(
|
||||
['node', str(HEADERS_HOOK), f'--url={TEST_URL}', '--snapshot-id=test789'],
|
||||
cwd=tmpdir,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60
|
||||
)
|
||||
|
||||
assert result.returncode == 0, f"Extraction failed: {result.stderr}"
|
||||
|
||||
# Verify output in stdout
|
||||
assert 'STATUS=succeeded' in result.stdout, "Should report success"
|
||||
assert 'Headers extracted' in result.stdout, "Should report completion"
|
||||
|
||||
# Verify output directory created
|
||||
headers_dir = tmpdir / 'headers'
|
||||
assert headers_dir.exists(), "Output directory not created"
|
||||
|
||||
# Verify output file exists
|
||||
headers_file = headers_dir / 'headers.json'
|
||||
assert headers_file.exists(), "headers.json not created"
|
||||
|
||||
# Verify headers JSON contains REAL example.com response
|
||||
headers_data = json.loads(headers_file.read_text())
|
||||
|
||||
assert 'url' in headers_data, "Should have url field"
|
||||
assert headers_data['url'] == TEST_URL, f"URL should be {TEST_URL}"
|
||||
|
||||
assert 'status' in headers_data, "Should have status field"
|
||||
assert headers_data['status'] in [200, 301, 302], \
|
||||
f"Should have valid HTTP status, got {headers_data['status']}"
|
||||
|
||||
assert 'headers' in headers_data, "Should have headers field"
|
||||
assert isinstance(headers_data['headers'], dict), "Headers should be a dict"
|
||||
assert len(headers_data['headers']) > 0, "Headers dict should not be empty"
|
||||
|
||||
# Verify common HTTP headers are present
|
||||
headers_lower = {k.lower(): v for k, v in headers_data['headers'].items()}
|
||||
assert 'content-type' in headers_lower or 'content-length' in headers_lower, \
|
||||
"Should have at least one common HTTP header"
|
||||
|
||||
# Verify RESULT_JSON is present and valid
|
||||
assert 'RESULT_JSON=' in result.stdout, "Should output RESULT_JSON"
|
||||
|
||||
for line in result.stdout.split('\n'):
|
||||
if line.startswith('RESULT_JSON='):
|
||||
result_json = json.loads(line.replace('RESULT_JSON=', ''))
|
||||
assert result_json['extractor'] == 'headers'
|
||||
assert result_json['status'] == 'succeeded'
|
||||
assert result_json['url'] == TEST_URL
|
||||
assert result_json['snapshot_id'] == 'test789'
|
||||
assert 'duration' in result_json
|
||||
assert result_json['duration'] >= 0
|
||||
break
|
||||
|
||||
|
||||
def test_uses_chrome_session_headers_when_available():
|
||||
"""Test that headers plugin prefers chrome_session headers over HTTP HEAD."""
|
||||
|
||||
if not shutil.which('node'):
|
||||
pytest.skip("node not installed")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
tmpdir = Path(tmpdir)
|
||||
|
||||
# Create mock chrome_session directory with response_headers.json
|
||||
chrome_session_dir = tmpdir / 'chrome_session'
|
||||
chrome_session_dir.mkdir()
|
||||
|
||||
mock_headers = {
|
||||
'url': TEST_URL,
|
||||
'status': 200,
|
||||
'statusText': 'OK',
|
||||
'headers': {
|
||||
'content-type': 'text/html; charset=UTF-8',
|
||||
'server': 'MockChromeServer',
|
||||
'x-test-header': 'from-chrome-session'
|
||||
}
|
||||
}
|
||||
|
||||
headers_file = chrome_session_dir / 'response_headers.json'
|
||||
headers_file.write_text(json.dumps(mock_headers))
|
||||
|
||||
# Run headers extraction
|
||||
result = subprocess.run(
|
||||
['node', str(HEADERS_HOOK), f'--url={TEST_URL}', '--snapshot-id=testchrome'],
|
||||
cwd=tmpdir,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30
|
||||
)
|
||||
|
||||
assert result.returncode == 0, f"Extraction failed: {result.stderr}"
|
||||
assert 'STATUS=succeeded' in result.stdout, "Should report success"
|
||||
assert 'chrome_session' in result.stdout, "Should report using chrome_session method"
|
||||
|
||||
# Verify it used chrome_session headers
|
||||
output_headers_file = tmpdir / 'headers' / 'headers.json'
|
||||
assert output_headers_file.exists(), "Output headers.json not created"
|
||||
|
||||
output_data = json.loads(output_headers_file.read_text())
|
||||
assert output_data['headers']['x-test-header'] == 'from-chrome-session', \
|
||||
"Should use headers from chrome_session"
|
||||
assert output_data['headers']['server'] == 'MockChromeServer', \
|
||||
"Should use headers from chrome_session"
|
||||
|
||||
|
||||
def test_falls_back_to_http_when_chrome_session_unavailable():
|
||||
"""Test that headers plugin falls back to HTTP HEAD when chrome_session unavailable."""
|
||||
|
||||
if not shutil.which('node'):
|
||||
pytest.skip("node not installed")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
tmpdir = Path(tmpdir)
|
||||
|
||||
# Don't create chrome_session directory - force HTTP fallback
|
||||
|
||||
# Run headers extraction
|
||||
result = subprocess.run(
|
||||
['node', str(HEADERS_HOOK), f'--url={TEST_URL}', '--snapshot-id=testhttp'],
|
||||
cwd=tmpdir,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60
|
||||
)
|
||||
|
||||
assert result.returncode == 0, f"Extraction failed: {result.stderr}"
|
||||
assert 'STATUS=succeeded' in result.stdout, "Should report success"
|
||||
assert 'http' in result.stdout.lower() or 'HEAD' not in result.stdout, \
|
||||
"Should use HTTP method"
|
||||
|
||||
# Verify output exists and has real HTTP headers
|
||||
output_headers_file = tmpdir / 'headers' / 'headers.json'
|
||||
assert output_headers_file.exists(), "Output headers.json not created"
|
||||
|
||||
output_data = json.loads(output_headers_file.read_text())
|
||||
assert output_data['url'] == TEST_URL
|
||||
assert output_data['status'] in [200, 301, 302]
|
||||
assert isinstance(output_data['headers'], dict)
|
||||
assert len(output_data['headers']) > 0
|
||||
|
||||
|
||||
def test_config_timeout_honored():
|
||||
"""Test that TIMEOUT config is respected."""
|
||||
|
||||
if not shutil.which('node'):
|
||||
pytest.skip("node not installed")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
tmpdir = Path(tmpdir)
|
||||
|
||||
# Set very short timeout (but example.com should still succeed)
|
||||
import os
|
||||
env = os.environ.copy()
|
||||
env['TIMEOUT'] = '5'
|
||||
|
||||
result = subprocess.run(
|
||||
['node', str(HEADERS_HOOK), f'--url={TEST_URL}', '--snapshot-id=testtimeout'],
|
||||
cwd=tmpdir,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
env=env,
|
||||
timeout=30
|
||||
)
|
||||
|
||||
# Should complete (success or fail, but not hang)
|
||||
assert result.returncode in (0, 1), "Should complete without hanging"
|
||||
|
||||
|
||||
def test_config_user_agent():
|
||||
"""Test that USER_AGENT config is used."""
|
||||
|
||||
if not shutil.which('node'):
|
||||
pytest.skip("node not installed")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
tmpdir = Path(tmpdir)
|
||||
|
||||
# Set custom user agent
|
||||
import os
|
||||
env = os.environ.copy()
|
||||
env['USER_AGENT'] = 'TestBot/1.0'
|
||||
|
||||
result = subprocess.run(
|
||||
['node', str(HEADERS_HOOK), f'--url={TEST_URL}', '--snapshot-id=testua'],
|
||||
cwd=tmpdir,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
env=env,
|
||||
timeout=60
|
||||
)
|
||||
|
||||
# Should succeed (example.com doesn't block)
|
||||
if result.returncode == 0:
|
||||
assert 'STATUS=succeeded' in result.stdout
|
||||
|
||||
|
||||
def test_handles_https_urls():
|
||||
"""Test that HTTPS URLs work correctly."""
|
||||
|
||||
if not shutil.which('node'):
|
||||
pytest.skip("node not installed")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
tmpdir = Path(tmpdir)
|
||||
|
||||
result = subprocess.run(
|
||||
['node', str(HEADERS_HOOK), '--url=https://example.org', '--snapshot-id=testhttps'],
|
||||
cwd=tmpdir,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
output_headers_file = tmpdir / 'headers' / 'headers.json'
|
||||
if output_headers_file.exists():
|
||||
output_data = json.loads(output_headers_file.read_text())
|
||||
assert output_data['url'] == 'https://example.org'
|
||||
assert output_data['status'] in [200, 301, 302]
|
||||
|
||||
|
||||
def test_handles_404_gracefully():
|
||||
"""Test that headers plugin handles 404s gracefully."""
|
||||
|
||||
if not shutil.which('node'):
|
||||
pytest.skip("node not installed")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
tmpdir = Path(tmpdir)
|
||||
|
||||
result = subprocess.run(
|
||||
['node', str(HEADERS_HOOK), '--url=https://example.com/nonexistent-page-404', '--snapshot-id=test404'],
|
||||
cwd=tmpdir,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60
|
||||
)
|
||||
|
||||
# May succeed or fail depending on server behavior
|
||||
# If it succeeds, verify 404 status is captured
|
||||
if result.returncode == 0:
|
||||
output_headers_file = tmpdir / 'headers' / 'headers.json'
|
||||
if output_headers_file.exists():
|
||||
output_data = json.loads(output_headers_file.read_text())
|
||||
assert output_data['status'] == 404, "Should capture 404 status"
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
pytest.main([__file__, '-v'])
|
||||
Reference in New Issue
Block a user