mirror of
https://github.com/ArchiveBox/ArchiveBox.git
synced 2026-01-04 09:55:33 +10:00
tons of ui fixes and plugin fixes
This commit is contained in:
@@ -8,8 +8,8 @@
|
||||
* Extension: https://chromewebstore.google.com/detail/ifibfemgeogfhoebkmokieepdoobkbpo
|
||||
* Documentation: https://2captcha.com/blog/how-to-use-2captcha-solver-extension-in-puppeteer
|
||||
*
|
||||
* Priority: 01 (early) - Must install before Chrome session starts
|
||||
* Hook: on_Snapshot
|
||||
* Priority: 01 (early) - Must install before Chrome session starts at Crawl level
|
||||
* Hook: on_Crawl (runs once per crawl, not per snapshot)
|
||||
*
|
||||
* Requirements:
|
||||
* - API_KEY_2CAPTCHA environment variable must be set
|
||||
@@ -2,11 +2,11 @@
|
||||
/**
|
||||
* 2Captcha Extension Configuration
|
||||
*
|
||||
* Configures the 2captcha extension with API key after Chrome session starts.
|
||||
* Runs once per browser session to inject API key into extension storage.
|
||||
* Configures the 2captcha extension with API key after Crawl-level Chrome session starts.
|
||||
* Runs once per crawl to inject API key into extension storage.
|
||||
*
|
||||
* Priority: 21 (after chrome_session at 20, before navigation at 30)
|
||||
* Hook: on_Snapshot
|
||||
* Priority: 11 (after chrome_session at 10)
|
||||
* Hook: on_Crawl (runs once per crawl, not per snapshot)
|
||||
*
|
||||
* Requirements:
|
||||
* - API_KEY_2CAPTCHA environment variable must be set
|
||||
@@ -17,8 +17,19 @@ const path = require('path');
|
||||
const fs = require('fs');
|
||||
const puppeteer = require('puppeteer-core');
|
||||
|
||||
const OUTPUT_DIR = 'chrome_session';
|
||||
const CONFIG_MARKER = path.join(OUTPUT_DIR, '.captcha2_configured');
|
||||
// Get crawl ID from args to find the crawl-level chrome session
|
||||
function getCrawlChromeSessionDir() {
|
||||
const args = parseArgs();
|
||||
const crawlId = args.crawl_id;
|
||||
if (!crawlId) {
|
||||
return null;
|
||||
}
|
||||
const dataDir = process.env.DATA_DIR || '.';
|
||||
return path.join(dataDir, 'tmp', `crawl_${crawlId}`, 'chrome_session');
|
||||
}
|
||||
|
||||
const CHROME_SESSION_DIR = getCrawlChromeSessionDir() || '../chrome_session';
|
||||
const CONFIG_MARKER = path.join(CHROME_SESSION_DIR, '.captcha2_configured');
|
||||
|
||||
// Get environment variable with default
|
||||
function getEnv(name, defaultValue = '') {
|
||||
@@ -53,7 +64,7 @@ async function configure2Captcha() {
|
||||
}
|
||||
|
||||
// Load extensions metadata
|
||||
const extensionsFile = path.join(OUTPUT_DIR, 'extensions.json');
|
||||
const extensionsFile = path.join(CHROME_SESSION_DIR, 'extensions.json');
|
||||
if (!fs.existsSync(extensionsFile)) {
|
||||
return { success: false, error: 'extensions.json not found - chrome_session must run first' };
|
||||
}
|
||||
@@ -70,7 +81,7 @@ async function configure2Captcha() {
|
||||
|
||||
try {
|
||||
// Connect to the existing Chrome session via CDP
|
||||
const cdpFile = path.join(OUTPUT_DIR, 'cdp_url.txt');
|
||||
const cdpFile = path.join(CHROME_SESSION_DIR, 'cdp_url.txt');
|
||||
if (!fs.existsSync(cdpFile)) {
|
||||
return { success: false, error: 'CDP URL not found - chrome_session must run first' };
|
||||
}
|
||||
@@ -3,10 +3,11 @@
|
||||
Clean up Chrome browser session started by chrome_session extractor.
|
||||
|
||||
This extractor runs after all Chrome-based extractors (screenshot, pdf, dom)
|
||||
to terminate the Chrome process and clean up any leftover files.
|
||||
to clean up the Chrome session. For shared sessions (crawl-level Chrome), it
|
||||
closes only this snapshot's tab. For standalone sessions, it kills Chrome.
|
||||
|
||||
Usage: on_Snapshot__24_chrome_cleanup.py --url=<url> --snapshot-id=<uuid>
|
||||
Output: Terminates Chrome process and removes lock files
|
||||
Usage: on_Snapshot__45_chrome_cleanup.py --url=<url> --snapshot-id=<uuid>
|
||||
Output: Closes tab or terminates Chrome process
|
||||
|
||||
Environment variables:
|
||||
CHROME_USER_DATA_DIR: Chrome profile directory (for lock file cleanup)
|
||||
@@ -18,6 +19,7 @@ import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import urllib.request
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
@@ -33,18 +35,126 @@ def get_env(name: str, default: str = '') -> str:
|
||||
return os.environ.get(name, default).strip()
|
||||
|
||||
|
||||
def close_tab_via_cdp(cdp_url: str, page_id: str) -> bool:
|
||||
"""
|
||||
Close a specific tab via Chrome DevTools Protocol.
|
||||
|
||||
Returns True if tab was closed successfully.
|
||||
"""
|
||||
try:
|
||||
# Extract port from WebSocket URL (ws://127.0.0.1:PORT/...)
|
||||
import re
|
||||
match = re.search(r':(\d+)/', cdp_url)
|
||||
if not match:
|
||||
return False
|
||||
port = match.group(1)
|
||||
|
||||
# Use CDP HTTP endpoint to close the target
|
||||
close_url = f'http://127.0.0.1:{port}/json/close/{page_id}'
|
||||
req = urllib.request.Request(close_url, method='GET')
|
||||
|
||||
with urllib.request.urlopen(req, timeout=5) as resp:
|
||||
return resp.status == 200
|
||||
|
||||
except Exception as e:
|
||||
print(f'Failed to close tab via CDP: {e}', file=sys.stderr)
|
||||
return False
|
||||
|
||||
|
||||
def kill_listener_processes() -> list[str]:
|
||||
"""
|
||||
Kill any daemonized listener processes (consolelog, ssl, responses, etc.).
|
||||
|
||||
These hooks write listener.pid files that we need to kill.
|
||||
Returns list of killed process descriptions.
|
||||
"""
|
||||
killed = []
|
||||
snapshot_dir = Path('.').resolve().parent # Go up from chrome_cleanup dir
|
||||
|
||||
# Look for listener.pid files in sibling directories
|
||||
for extractor_dir in snapshot_dir.iterdir():
|
||||
if not extractor_dir.is_dir():
|
||||
continue
|
||||
|
||||
pid_file = extractor_dir / 'listener.pid'
|
||||
if not pid_file.exists():
|
||||
continue
|
||||
|
||||
try:
|
||||
pid = int(pid_file.read_text().strip())
|
||||
try:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
# Brief wait for graceful shutdown
|
||||
for _ in range(5):
|
||||
try:
|
||||
os.kill(pid, 0)
|
||||
time.sleep(0.05)
|
||||
except OSError:
|
||||
break
|
||||
else:
|
||||
# Force kill if still running
|
||||
try:
|
||||
os.kill(pid, signal.SIGKILL)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
killed.append(f'{extractor_dir.name} listener (PID {pid})')
|
||||
except OSError as e:
|
||||
if e.errno != 3: # Not "No such process"
|
||||
killed.append(f'{extractor_dir.name} listener (already dead)')
|
||||
except (ValueError, FileNotFoundError):
|
||||
pass
|
||||
|
||||
return killed
|
||||
|
||||
|
||||
def cleanup_chrome_session() -> tuple[bool, str | None, str]:
|
||||
"""
|
||||
Clean up Chrome session started by chrome_session extractor.
|
||||
|
||||
For shared sessions (crawl-level Chrome), closes only this snapshot's tab.
|
||||
For standalone sessions, kills the Chrome process.
|
||||
|
||||
Returns: (success, output_info, error_message)
|
||||
"""
|
||||
# First, kill any daemonized listener processes
|
||||
killed = kill_listener_processes()
|
||||
if killed:
|
||||
print(f'Killed listener processes: {", ".join(killed)}')
|
||||
|
||||
session_dir = Path(CHROME_SESSION_DIR)
|
||||
|
||||
if not session_dir.exists():
|
||||
return True, 'No chrome_session directory found', ''
|
||||
|
||||
# Check if this is a shared session
|
||||
shared_file = session_dir / 'shared_session.txt'
|
||||
is_shared = False
|
||||
if shared_file.exists():
|
||||
is_shared = shared_file.read_text().strip().lower() == 'true'
|
||||
|
||||
pid_file = session_dir / 'pid.txt'
|
||||
cdp_file = session_dir / 'cdp_url.txt'
|
||||
page_id_file = session_dir / 'page_id.txt'
|
||||
|
||||
if is_shared:
|
||||
# Shared session - only close this snapshot's tab
|
||||
if cdp_file.exists() and page_id_file.exists():
|
||||
try:
|
||||
cdp_url = cdp_file.read_text().strip()
|
||||
page_id = page_id_file.read_text().strip()
|
||||
|
||||
if close_tab_via_cdp(cdp_url, page_id):
|
||||
return True, f'Closed tab {page_id[:8]}... (shared Chrome session)', ''
|
||||
else:
|
||||
return True, f'Tab may already be closed (shared Chrome session)', ''
|
||||
|
||||
except Exception as e:
|
||||
return True, f'Tab cleanup attempted: {e}', ''
|
||||
|
||||
return True, 'Shared session - Chrome stays running', ''
|
||||
|
||||
# Standalone session - kill the Chrome process
|
||||
killed = False
|
||||
|
||||
if pid_file.exists():
|
||||
|
||||
@@ -2,38 +2,27 @@
|
||||
/**
|
||||
* Navigate the Chrome browser to the target URL.
|
||||
*
|
||||
* This extractor runs AFTER pre-load extractors (21-29) have registered their
|
||||
* CDP listeners. It connects to the existing Chrome session, navigates to the URL,
|
||||
* waits for page load, and captures response headers.
|
||||
* This is a simple hook that ONLY navigates - nothing else.
|
||||
* Pre-load hooks (21-29) should set up their own CDP listeners.
|
||||
* Post-load hooks (31+) can then read from the loaded page.
|
||||
*
|
||||
* Usage: on_Snapshot__30_chrome_navigate.js --url=<url> --snapshot-id=<uuid>
|
||||
* Output: Writes to chrome_session/:
|
||||
* - response_headers.json: HTTP response headers from main document
|
||||
* - final_url.txt: Final URL after any redirects
|
||||
* - page_loaded.txt: Marker file indicating navigation is complete
|
||||
* Output: Writes page_loaded.txt marker when navigation completes
|
||||
*
|
||||
* Environment variables:
|
||||
* CHROME_PAGELOAD_TIMEOUT: Timeout for page load in seconds (default: 60)
|
||||
* CHROME_PAGELOAD_TIMEOUT: Timeout in seconds (default: 60)
|
||||
* CHROME_DELAY_AFTER_LOAD: Extra delay after load in seconds (default: 0)
|
||||
* CHROME_WAIT_FOR: Wait condition (default: networkidle2)
|
||||
* - domcontentloaded: DOM is ready, resources may still load
|
||||
* - load: Page fully loaded including resources
|
||||
* - networkidle0: No network activity for 500ms (strictest)
|
||||
* - networkidle2: At most 2 network connections for 500ms
|
||||
*
|
||||
* # Fallbacks
|
||||
* TIMEOUT: Fallback timeout
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const puppeteer = require('puppeteer-core');
|
||||
|
||||
// Extractor metadata
|
||||
const EXTRACTOR_NAME = 'chrome_navigate';
|
||||
const CHROME_SESSION_DIR = '../chrome_session';
|
||||
const OUTPUT_DIR = '.';
|
||||
|
||||
// Parse command line arguments
|
||||
function parseArgs() {
|
||||
const args = {};
|
||||
process.argv.slice(2).forEach(arg => {
|
||||
@@ -45,18 +34,10 @@ function parseArgs() {
|
||||
return args;
|
||||
}
|
||||
|
||||
// Get environment variable with default
|
||||
function getEnv(name, defaultValue = '') {
|
||||
return (process.env[name] || defaultValue).trim();
|
||||
}
|
||||
|
||||
function getEnvBool(name, defaultValue = false) {
|
||||
const val = getEnv(name, '').toLowerCase();
|
||||
if (['true', '1', 'yes', 'on'].includes(val)) return true;
|
||||
if (['false', '0', 'no', 'off'].includes(val)) return false;
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
function getEnvInt(name, defaultValue = 0) {
|
||||
const val = parseInt(getEnv(name, String(defaultValue)), 10);
|
||||
return isNaN(val) ? defaultValue : val;
|
||||
@@ -67,159 +48,79 @@ function getEnvFloat(name, defaultValue = 0) {
|
||||
return isNaN(val) ? defaultValue : val;
|
||||
}
|
||||
|
||||
// Read CDP URL from chrome_session
|
||||
function getCdpUrl() {
|
||||
const cdpFile = path.join(CHROME_SESSION_DIR, 'cdp_url.txt');
|
||||
if (!fs.existsSync(cdpFile)) {
|
||||
return null;
|
||||
}
|
||||
if (!fs.existsSync(cdpFile)) return null;
|
||||
return fs.readFileSync(cdpFile, 'utf8').trim();
|
||||
}
|
||||
|
||||
// Read URL from chrome_session (set by chrome_session extractor)
|
||||
function getTargetUrl() {
|
||||
const urlFile = path.join(CHROME_SESSION_DIR, 'url.txt');
|
||||
if (!fs.existsSync(urlFile)) {
|
||||
return null;
|
||||
}
|
||||
return fs.readFileSync(urlFile, 'utf8').trim();
|
||||
function getPageId() {
|
||||
const pageIdFile = path.join(CHROME_SESSION_DIR, 'page_id.txt');
|
||||
if (!fs.existsSync(pageIdFile)) return null;
|
||||
return fs.readFileSync(pageIdFile, 'utf8').trim();
|
||||
}
|
||||
|
||||
// Validate wait condition
|
||||
function getWaitCondition() {
|
||||
const waitFor = getEnv('CHROME_WAIT_FOR', 'networkidle2').toLowerCase();
|
||||
const validConditions = ['domcontentloaded', 'load', 'networkidle0', 'networkidle2'];
|
||||
if (validConditions.includes(waitFor)) {
|
||||
return waitFor;
|
||||
}
|
||||
console.error(`Warning: Invalid CHROME_WAIT_FOR="${waitFor}", using networkidle2`);
|
||||
return 'networkidle2';
|
||||
const valid = ['domcontentloaded', 'load', 'networkidle0', 'networkidle2'];
|
||||
return valid.includes(waitFor) ? waitFor : 'networkidle2';
|
||||
}
|
||||
|
||||
// Sleep helper
|
||||
function sleep(ms) {
|
||||
return new Promise(resolve => setTimeout(resolve, ms));
|
||||
}
|
||||
|
||||
async function navigateToUrl(url, cdpUrl) {
|
||||
async function navigate(url, cdpUrl) {
|
||||
const timeout = (getEnvInt('CHROME_PAGELOAD_TIMEOUT') || getEnvInt('CHROME_TIMEOUT') || getEnvInt('TIMEOUT', 60)) * 1000;
|
||||
const delayAfterLoad = getEnvFloat('CHROME_DELAY_AFTER_LOAD', 0) * 1000;
|
||||
const waitUntil = getWaitCondition();
|
||||
const pageId = getPageId();
|
||||
|
||||
let browser = null;
|
||||
let responseHeaders = {};
|
||||
let redirectChain = [];
|
||||
let finalUrl = url;
|
||||
|
||||
try {
|
||||
// Connect to existing browser
|
||||
browser = await puppeteer.connect({
|
||||
browserWSEndpoint: cdpUrl,
|
||||
});
|
||||
browser = await puppeteer.connect({ browserWSEndpoint: cdpUrl });
|
||||
|
||||
// Get all pages and find our target page
|
||||
const pages = await browser.pages();
|
||||
if (pages.length === 0) {
|
||||
return { success: false, error: 'No pages found in browser' };
|
||||
}
|
||||
|
||||
// Use the last created page (most likely the one chrome_session created)
|
||||
const page = pages[pages.length - 1];
|
||||
|
||||
// Set up response interception to capture headers and redirects
|
||||
page.on('response', async (response) => {
|
||||
const request = response.request();
|
||||
|
||||
// Track redirects
|
||||
if (response.status() >= 300 && response.status() < 400) {
|
||||
redirectChain.push({
|
||||
url: response.url(),
|
||||
status: response.status(),
|
||||
location: response.headers()['location'] || null,
|
||||
});
|
||||
}
|
||||
|
||||
// Capture headers from the main document request
|
||||
if (request.isNavigationRequest() && request.frame() === page.mainFrame()) {
|
||||
try {
|
||||
responseHeaders = {
|
||||
url: response.url(),
|
||||
status: response.status(),
|
||||
statusText: response.statusText(),
|
||||
headers: response.headers(),
|
||||
};
|
||||
finalUrl = response.url();
|
||||
} catch (e) {
|
||||
// Ignore errors capturing headers
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Navigate to URL and wait for load
|
||||
console.log(`Navigating to ${url} (wait: ${waitUntil}, timeout: ${timeout}ms)`);
|
||||
|
||||
const response = await page.goto(url, {
|
||||
waitUntil,
|
||||
timeout,
|
||||
});
|
||||
|
||||
// Capture final response if not already captured
|
||||
if (response && Object.keys(responseHeaders).length === 0) {
|
||||
responseHeaders = {
|
||||
url: response.url(),
|
||||
status: response.status(),
|
||||
statusText: response.statusText(),
|
||||
headers: response.headers(),
|
||||
};
|
||||
finalUrl = response.url();
|
||||
// Find page by target ID if available
|
||||
let page = null;
|
||||
if (pageId) {
|
||||
page = pages.find(p => {
|
||||
const target = p.target();
|
||||
return target && target._targetId === pageId;
|
||||
});
|
||||
}
|
||||
if (!page) {
|
||||
page = pages[pages.length - 1];
|
||||
}
|
||||
|
||||
// Apply optional delay after load
|
||||
// Navigate
|
||||
console.log(`Navigating to ${url} (wait: ${waitUntil}, timeout: ${timeout}ms)`);
|
||||
const response = await page.goto(url, { waitUntil, timeout });
|
||||
|
||||
// Optional delay
|
||||
if (delayAfterLoad > 0) {
|
||||
console.log(`Waiting ${delayAfterLoad}ms after load...`);
|
||||
await sleep(delayAfterLoad);
|
||||
}
|
||||
|
||||
// Write response headers
|
||||
if (Object.keys(responseHeaders).length > 0) {
|
||||
// Add redirect chain to headers
|
||||
responseHeaders.redirect_chain = redirectChain;
|
||||
const finalUrl = page.url();
|
||||
const status = response ? response.status() : null;
|
||||
|
||||
fs.writeFileSync(
|
||||
path.join(CHROME_SESSION_DIR, 'response_headers.json'),
|
||||
JSON.stringify(responseHeaders, null, 2)
|
||||
);
|
||||
}
|
||||
// Write marker file
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'page_loaded.txt'), new Date().toISOString());
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'final_url.txt'), finalUrl);
|
||||
|
||||
// Write final URL (after redirects)
|
||||
fs.writeFileSync(path.join(CHROME_SESSION_DIR, 'final_url.txt'), finalUrl);
|
||||
|
||||
// Write marker file indicating page is loaded
|
||||
fs.writeFileSync(
|
||||
path.join(CHROME_SESSION_DIR, 'page_loaded.txt'),
|
||||
new Date().toISOString()
|
||||
);
|
||||
|
||||
// Disconnect but leave browser running for post-load extractors
|
||||
browser.disconnect();
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: CHROME_SESSION_DIR,
|
||||
finalUrl,
|
||||
status: responseHeaders.status,
|
||||
redirectCount: redirectChain.length,
|
||||
};
|
||||
return { success: true, finalUrl, status };
|
||||
|
||||
} catch (e) {
|
||||
// Don't close browser on error - let cleanup handle it
|
||||
if (browser) {
|
||||
try {
|
||||
browser.disconnect();
|
||||
} catch (disconnectErr) {
|
||||
// Ignore
|
||||
}
|
||||
}
|
||||
if (browser) browser.disconnect();
|
||||
return { success: false, error: `${e.name}: ${e.message}` };
|
||||
}
|
||||
}
|
||||
@@ -239,55 +140,33 @@ async function main() {
|
||||
let output = null;
|
||||
let error = '';
|
||||
|
||||
try {
|
||||
// Check for chrome_session
|
||||
const cdpUrl = getCdpUrl();
|
||||
if (!cdpUrl) {
|
||||
console.error('ERROR: chrome_session not found (cdp_url.txt missing)');
|
||||
console.error('chrome_navigate requires chrome_session to run first');
|
||||
process.exit(1);
|
||||
}
|
||||
const cdpUrl = getCdpUrl();
|
||||
if (!cdpUrl) {
|
||||
console.error('ERROR: chrome_session not found');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Get URL from chrome_session or use provided URL
|
||||
const targetUrl = getTargetUrl() || url;
|
||||
const result = await navigate(url, cdpUrl);
|
||||
|
||||
const result = await navigateToUrl(targetUrl, cdpUrl);
|
||||
|
||||
if (result.success) {
|
||||
status = 'succeeded';
|
||||
output = result.output;
|
||||
console.log(`Page loaded: ${result.finalUrl}`);
|
||||
console.log(`HTTP status: ${result.status}`);
|
||||
if (result.redirectCount > 0) {
|
||||
console.log(`Redirects: ${result.redirectCount}`);
|
||||
}
|
||||
} else {
|
||||
status = 'failed';
|
||||
error = result.error;
|
||||
}
|
||||
} catch (e) {
|
||||
error = `${e.name}: ${e.message}`;
|
||||
status = 'failed';
|
||||
if (result.success) {
|
||||
status = 'succeeded';
|
||||
output = OUTPUT_DIR;
|
||||
console.log(`Page loaded: ${result.finalUrl} (HTTP ${result.status})`);
|
||||
} else {
|
||||
error = result.error;
|
||||
}
|
||||
|
||||
const endTs = new Date();
|
||||
const duration = (endTs - startTs) / 1000;
|
||||
|
||||
// Print results
|
||||
console.log(`START_TS=${startTs.toISOString()}`);
|
||||
console.log(`END_TS=${endTs.toISOString()}`);
|
||||
console.log(`DURATION=${duration.toFixed(2)}`);
|
||||
if (output) {
|
||||
console.log(`OUTPUT=${output}`);
|
||||
}
|
||||
if (output) console.log(`OUTPUT=${output}`);
|
||||
console.log(`STATUS=${status}`);
|
||||
if (error) console.error(`ERROR=${error}`);
|
||||
|
||||
if (error) {
|
||||
console.error(`ERROR=${error}`);
|
||||
}
|
||||
|
||||
// Print JSON result
|
||||
const resultJson = {
|
||||
console.log(`RESULT_JSON=${JSON.stringify({
|
||||
extractor: EXTRACTOR_NAME,
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
@@ -297,8 +176,7 @@ async function main() {
|
||||
duration: Math.round(duration * 100) / 100,
|
||||
output,
|
||||
error: error || null,
|
||||
};
|
||||
console.log(`RESULT_JSON=${JSON.stringify(resultJson)}`);
|
||||
})}`);
|
||||
|
||||
process.exit(status === 'succeeded' ? 0 : 1);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,141 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Clean up Chrome browser session at the end of a crawl.
|
||||
|
||||
This runs after all snapshots in a crawl have been processed to terminate
|
||||
the shared Chrome session that was started by on_Crawl__10_chrome_session.js.
|
||||
|
||||
Usage: on_Crawl__99_chrome_cleanup.py --crawl-id=<uuid>
|
||||
Output: Terminates the crawl's Chrome process
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
import rich_click as click
|
||||
|
||||
|
||||
# Extractor metadata
|
||||
EXTRACTOR_NAME = 'chrome_cleanup'
|
||||
CHROME_SESSION_DIR = 'chrome_session'
|
||||
|
||||
|
||||
def get_env(name: str, default: str = '') -> str:
|
||||
return os.environ.get(name, default).strip()
|
||||
|
||||
|
||||
def cleanup_crawl_chrome() -> tuple[bool, str | None, str]:
|
||||
"""
|
||||
Clean up Chrome session for the crawl.
|
||||
|
||||
Returns: (success, output_info, error_message)
|
||||
"""
|
||||
session_dir = Path(CHROME_SESSION_DIR)
|
||||
|
||||
if not session_dir.exists():
|
||||
return True, 'No chrome_session directory found', ''
|
||||
|
||||
pid_file = session_dir / 'pid.txt'
|
||||
killed = False
|
||||
|
||||
if pid_file.exists():
|
||||
try:
|
||||
pid = int(pid_file.read_text().strip())
|
||||
|
||||
# Try graceful termination first
|
||||
try:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
killed = True
|
||||
print(f'[*] Sent SIGTERM to Chrome PID {pid}')
|
||||
|
||||
# Wait briefly for graceful shutdown
|
||||
for _ in range(20):
|
||||
try:
|
||||
os.kill(pid, 0) # Check if still running
|
||||
time.sleep(0.1)
|
||||
except OSError:
|
||||
print(f'[+] Chrome process {pid} terminated')
|
||||
break # Process is gone
|
||||
else:
|
||||
# Force kill if still running
|
||||
print(f'[!] Chrome still running, sending SIGKILL')
|
||||
try:
|
||||
os.kill(pid, signal.SIGKILL)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
except OSError as e:
|
||||
# Process might already be dead, that's fine
|
||||
if e.errno == 3: # No such process
|
||||
print(f'[*] Chrome process {pid} already terminated')
|
||||
else:
|
||||
return False, None, f'Failed to kill Chrome PID {pid}: {e}'
|
||||
|
||||
except ValueError:
|
||||
return False, None, f'Invalid PID in {pid_file}'
|
||||
except Exception as e:
|
||||
return False, None, f'{type(e).__name__}: {e}'
|
||||
|
||||
result_info = f'Crawl Chrome cleanup: PID {"killed" if killed else "not found or already terminated"}'
|
||||
return True, result_info, ''
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option('--crawl-id', required=True, help='Crawl UUID')
|
||||
@click.option('--source-url', default='', help='Source URL (unused)')
|
||||
def main(crawl_id: str, source_url: str):
|
||||
"""Clean up shared Chrome browser session for crawl."""
|
||||
|
||||
start_ts = datetime.now(timezone.utc)
|
||||
output = None
|
||||
status = 'failed'
|
||||
error = ''
|
||||
|
||||
try:
|
||||
success, output, error = cleanup_crawl_chrome()
|
||||
status = 'succeeded' if success else 'failed'
|
||||
|
||||
if success:
|
||||
print(f'Crawl Chrome cleanup completed: {output}')
|
||||
|
||||
except Exception as e:
|
||||
error = f'{type(e).__name__}: {e}'
|
||||
status = 'failed'
|
||||
|
||||
# Print results
|
||||
end_ts = datetime.now(timezone.utc)
|
||||
duration = (end_ts - start_ts).total_seconds()
|
||||
|
||||
print(f'START_TS={start_ts.isoformat()}')
|
||||
print(f'END_TS={end_ts.isoformat()}')
|
||||
print(f'DURATION={duration:.2f}')
|
||||
if output:
|
||||
print(f'OUTPUT={output}')
|
||||
print(f'STATUS={status}')
|
||||
|
||||
if error:
|
||||
print(f'ERROR={error}', file=sys.stderr)
|
||||
|
||||
# Print JSON result
|
||||
result_json = {
|
||||
'extractor': EXTRACTOR_NAME,
|
||||
'crawl_id': crawl_id,
|
||||
'status': status,
|
||||
'start_ts': start_ts.isoformat(),
|
||||
'end_ts': end_ts.isoformat(),
|
||||
'duration': round(duration, 2),
|
||||
'output': output,
|
||||
'error': error or None,
|
||||
}
|
||||
print(f'RESULT_JSON={json.dumps(result_json)}')
|
||||
|
||||
sys.exit(0 if status == 'succeeded' else 1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
343
archivebox/plugins/chrome_session/on_Crawl__10_chrome_session.js
Normal file
343
archivebox/plugins/chrome_session/on_Crawl__10_chrome_session.js
Normal file
@@ -0,0 +1,343 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Launch a shared Chrome browser session for the entire crawl.
|
||||
*
|
||||
* This runs once per crawl and keeps Chrome alive for all snapshots to share.
|
||||
* Each snapshot creates its own tab via on_Snapshot__20_chrome_session.js.
|
||||
*
|
||||
* Usage: on_Crawl__10_chrome_session.js --crawl-id=<uuid> --source-url=<url>
|
||||
* Output: Creates chrome_session/ with:
|
||||
* - cdp_url.txt: WebSocket URL for CDP connection
|
||||
* - pid.txt: Chrome process ID (for cleanup)
|
||||
*
|
||||
* Environment variables:
|
||||
* CHROME_BINARY: Path to Chrome/Chromium binary
|
||||
* CHROME_RESOLUTION: Page resolution (default: 1440,2000)
|
||||
* CHROME_HEADLESS: Run in headless mode (default: true)
|
||||
* CHROME_CHECK_SSL_VALIDITY: Whether to check SSL certificates (default: true)
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { spawn } = require('child_process');
|
||||
const http = require('http');
|
||||
|
||||
// Extractor metadata
|
||||
const EXTRACTOR_NAME = 'chrome_session';
|
||||
const OUTPUT_DIR = 'chrome_session';
|
||||
|
||||
// Parse command line arguments
|
||||
function parseArgs() {
|
||||
const args = {};
|
||||
process.argv.slice(2).forEach(arg => {
|
||||
if (arg.startsWith('--')) {
|
||||
const [key, ...valueParts] = arg.slice(2).split('=');
|
||||
args[key.replace(/-/g, '_')] = valueParts.join('=') || true;
|
||||
}
|
||||
});
|
||||
return args;
|
||||
}
|
||||
|
||||
// Get environment variable with default
|
||||
function getEnv(name, defaultValue = '') {
|
||||
return (process.env[name] || defaultValue).trim();
|
||||
}
|
||||
|
||||
function getEnvBool(name, defaultValue = false) {
|
||||
const val = getEnv(name, '').toLowerCase();
|
||||
if (['true', '1', 'yes', 'on'].includes(val)) return true;
|
||||
if (['false', '0', 'no', 'off'].includes(val)) return false;
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
// Find Chrome binary
|
||||
function findChrome() {
|
||||
const chromeBinary = getEnv('CHROME_BINARY');
|
||||
if (chromeBinary && fs.existsSync(chromeBinary)) {
|
||||
return chromeBinary;
|
||||
}
|
||||
|
||||
const candidates = [
|
||||
// Linux
|
||||
'/usr/bin/google-chrome',
|
||||
'/usr/bin/google-chrome-stable',
|
||||
'/usr/bin/chromium',
|
||||
'/usr/bin/chromium-browser',
|
||||
// macOS
|
||||
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
|
||||
'/Applications/Chromium.app/Contents/MacOS/Chromium',
|
||||
];
|
||||
|
||||
for (const candidate of candidates) {
|
||||
if (fs.existsSync(candidate)) {
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
// Parse resolution string
|
||||
function parseResolution(resolution) {
|
||||
const [width, height] = resolution.split(',').map(x => parseInt(x.trim(), 10));
|
||||
return { width: width || 1440, height: height || 2000 };
|
||||
}
|
||||
|
||||
// Find a free port
|
||||
function findFreePort() {
|
||||
return new Promise((resolve, reject) => {
|
||||
const server = require('net').createServer();
|
||||
server.unref();
|
||||
server.on('error', reject);
|
||||
server.listen(0, () => {
|
||||
const port = server.address().port;
|
||||
server.close(() => resolve(port));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// Wait for Chrome's DevTools port to be ready
|
||||
function waitForDebugPort(port, timeout = 30000) {
|
||||
const startTime = Date.now();
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const tryConnect = () => {
|
||||
if (Date.now() - startTime > timeout) {
|
||||
reject(new Error(`Timeout waiting for Chrome debug port ${port}`));
|
||||
return;
|
||||
}
|
||||
|
||||
const req = http.get(`http://127.0.0.1:${port}/json/version`, (res) => {
|
||||
let data = '';
|
||||
res.on('data', chunk => data += chunk);
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const info = JSON.parse(data);
|
||||
resolve(info);
|
||||
} catch (e) {
|
||||
setTimeout(tryConnect, 100);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', () => {
|
||||
setTimeout(tryConnect, 100);
|
||||
});
|
||||
|
||||
req.setTimeout(1000, () => {
|
||||
req.destroy();
|
||||
setTimeout(tryConnect, 100);
|
||||
});
|
||||
};
|
||||
|
||||
tryConnect();
|
||||
});
|
||||
}
|
||||
|
||||
async function launchChrome(binary) {
|
||||
const resolution = getEnv('CHROME_RESOLUTION') || getEnv('RESOLUTION', '1440,2000');
|
||||
const checkSsl = getEnvBool('CHROME_CHECK_SSL_VALIDITY', getEnvBool('CHECK_SSL_VALIDITY', true));
|
||||
const headless = getEnvBool('CHROME_HEADLESS', true);
|
||||
|
||||
const { width, height } = parseResolution(resolution);
|
||||
|
||||
// Create output directory
|
||||
if (!fs.existsSync(OUTPUT_DIR)) {
|
||||
fs.mkdirSync(OUTPUT_DIR, { recursive: true });
|
||||
}
|
||||
|
||||
// Find a free port for Chrome DevTools
|
||||
const debugPort = await findFreePort();
|
||||
console.log(`[*] Using debug port: ${debugPort}`);
|
||||
|
||||
// Load any installed extensions
|
||||
const extensionUtils = require('../chrome_extensions/chrome_extension_utils.js');
|
||||
const extensionsDir = getEnv('CHROME_EXTENSIONS_DIR') ||
|
||||
path.join(getEnv('DATA_DIR', '.'), 'personas', getEnv('ACTIVE_PERSONA', 'Default'), 'chrome_extensions');
|
||||
|
||||
const installedExtensions = [];
|
||||
if (fs.existsSync(extensionsDir)) {
|
||||
const files = fs.readdirSync(extensionsDir);
|
||||
for (const file of files) {
|
||||
if (file.endsWith('.extension.json')) {
|
||||
try {
|
||||
const extPath = path.join(extensionsDir, file);
|
||||
const extData = JSON.parse(fs.readFileSync(extPath, 'utf-8'));
|
||||
if (extData.unpacked_path && fs.existsSync(extData.unpacked_path)) {
|
||||
installedExtensions.push(extData);
|
||||
console.log(`[*] Loading extension: ${extData.name || file}`);
|
||||
}
|
||||
} catch (e) {
|
||||
// Skip invalid cache files
|
||||
console.warn(`[!] Skipping invalid extension cache: ${file}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get extension launch arguments
|
||||
const extensionArgs = extensionUtils.getExtensionLaunchArgs(installedExtensions);
|
||||
if (extensionArgs.length > 0) {
|
||||
console.log(`[+] Loaded ${installedExtensions.length} extension(s)`);
|
||||
// Write extensions metadata for config hooks to use
|
||||
fs.writeFileSync(
|
||||
path.join(OUTPUT_DIR, 'extensions.json'),
|
||||
JSON.stringify(installedExtensions, null, 2)
|
||||
);
|
||||
}
|
||||
|
||||
// Build Chrome arguments
|
||||
const chromeArgs = [
|
||||
`--remote-debugging-port=${debugPort}`,
|
||||
'--remote-debugging-address=127.0.0.1',
|
||||
'--no-sandbox',
|
||||
'--disable-setuid-sandbox',
|
||||
'--disable-dev-shm-usage',
|
||||
'--disable-gpu',
|
||||
'--disable-sync',
|
||||
'--no-first-run',
|
||||
'--no-default-browser-check',
|
||||
'--disable-default-apps',
|
||||
'--disable-infobars',
|
||||
'--disable-blink-features=AutomationControlled',
|
||||
'--disable-component-update',
|
||||
'--disable-domain-reliability',
|
||||
'--disable-breakpad',
|
||||
'--disable-background-networking',
|
||||
'--disable-background-timer-throttling',
|
||||
'--disable-backgrounding-occluded-windows',
|
||||
'--disable-renderer-backgrounding',
|
||||
'--disable-ipc-flooding-protection',
|
||||
'--password-store=basic',
|
||||
'--use-mock-keychain',
|
||||
'--font-render-hinting=none',
|
||||
'--force-color-profile=srgb',
|
||||
`--window-size=${width},${height}`,
|
||||
...extensionArgs, // Load extensions
|
||||
...(headless ? ['--headless=new'] : []),
|
||||
...(checkSsl ? [] : ['--ignore-certificate-errors']),
|
||||
'about:blank', // Start with blank page
|
||||
];
|
||||
|
||||
// Launch Chrome as a child process (NOT detached - stays with crawl process)
|
||||
// Using stdio: 'ignore' so we don't block on output but Chrome stays as our child
|
||||
const chromeProcess = spawn(binary, chromeArgs, {
|
||||
stdio: ['ignore', 'ignore', 'ignore'],
|
||||
});
|
||||
|
||||
const chromePid = chromeProcess.pid;
|
||||
console.log(`[*] Launched Chrome (PID: ${chromePid}), waiting for debug port...`);
|
||||
|
||||
// Write PID immediately for cleanup
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'pid.txt'), String(chromePid));
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'port.txt'), String(debugPort));
|
||||
|
||||
try {
|
||||
// Wait for Chrome to be ready
|
||||
const versionInfo = await waitForDebugPort(debugPort, 30000);
|
||||
console.log(`[+] Chrome ready: ${versionInfo.Browser}`);
|
||||
|
||||
// Build WebSocket URL
|
||||
const wsUrl = versionInfo.webSocketDebuggerUrl;
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'cdp_url.txt'), wsUrl);
|
||||
|
||||
return { success: true, cdpUrl: wsUrl, pid: chromePid, port: debugPort };
|
||||
|
||||
} catch (e) {
|
||||
// Kill Chrome if setup failed
|
||||
try {
|
||||
process.kill(chromePid, 'SIGTERM');
|
||||
} catch (killErr) {
|
||||
// Ignore
|
||||
}
|
||||
return { success: false, error: `${e.name}: ${e.message}` };
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const args = parseArgs();
|
||||
const crawlId = args.crawl_id;
|
||||
|
||||
const startTs = new Date();
|
||||
let status = 'failed';
|
||||
let output = null;
|
||||
let error = '';
|
||||
let version = '';
|
||||
|
||||
try {
|
||||
const binary = findChrome();
|
||||
if (!binary) {
|
||||
console.error('ERROR: Chrome/Chromium binary not found');
|
||||
console.error('DEPENDENCY_NEEDED=chrome');
|
||||
console.error('BIN_PROVIDERS=puppeteer,env,playwright,apt,brew');
|
||||
console.error('INSTALL_HINT=npx @puppeteer/browsers install chrome@stable');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Get Chrome version
|
||||
try {
|
||||
const { execSync } = require('child_process');
|
||||
version = execSync(`"${binary}" --version`, { encoding: 'utf8', timeout: 5000 }).trim().slice(0, 64);
|
||||
} catch (e) {
|
||||
version = '';
|
||||
}
|
||||
|
||||
const result = await launchChrome(binary);
|
||||
|
||||
if (result.success) {
|
||||
status = 'succeeded';
|
||||
output = OUTPUT_DIR;
|
||||
console.log(`[+] Chrome session started for crawl ${crawlId}`);
|
||||
console.log(`[+] CDP URL: ${result.cdpUrl}`);
|
||||
console.log(`[+] PID: ${result.pid}`);
|
||||
} else {
|
||||
status = 'failed';
|
||||
error = result.error;
|
||||
}
|
||||
} catch (e) {
|
||||
error = `${e.name}: ${e.message}`;
|
||||
status = 'failed';
|
||||
}
|
||||
|
||||
const endTs = new Date();
|
||||
const duration = (endTs - startTs) / 1000;
|
||||
|
||||
// Print results
|
||||
console.log(`START_TS=${startTs.toISOString()}`);
|
||||
console.log(`END_TS=${endTs.toISOString()}`);
|
||||
console.log(`DURATION=${duration.toFixed(2)}`);
|
||||
if (version) {
|
||||
console.log(`VERSION=${version}`);
|
||||
}
|
||||
if (output) {
|
||||
console.log(`OUTPUT=${output}`);
|
||||
}
|
||||
console.log(`STATUS=${status}`);
|
||||
|
||||
if (error) {
|
||||
console.error(`ERROR=${error}`);
|
||||
}
|
||||
|
||||
// Print JSON result
|
||||
const resultJson = {
|
||||
extractor: EXTRACTOR_NAME,
|
||||
crawl_id: crawlId,
|
||||
status,
|
||||
start_ts: startTs.toISOString(),
|
||||
end_ts: endTs.toISOString(),
|
||||
duration: Math.round(duration * 100) / 100,
|
||||
cmd_version: version,
|
||||
output,
|
||||
error: error || null,
|
||||
};
|
||||
console.log(`RESULT_JSON=${JSON.stringify(resultJson)}`);
|
||||
|
||||
// Exit with success - Chrome stays running as our child process
|
||||
// It will be cleaned up when the crawl process terminates
|
||||
process.exit(status === 'succeeded' ? 0 : 1);
|
||||
}
|
||||
|
||||
main().catch(e => {
|
||||
console.error(`Fatal error: ${e.message}`);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -1,20 +1,21 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Start a Chrome browser session for use by other extractors.
|
||||
* Create a Chrome tab for this snapshot in the shared crawl Chrome session.
|
||||
*
|
||||
* This extractor ONLY launches Chrome and creates a blank page - it does NOT navigate.
|
||||
* Pre-load extractors (21-29) can connect via CDP to register listeners before navigation.
|
||||
* The chrome_navigate extractor (30) performs the actual page load.
|
||||
* If a crawl-level Chrome session exists (from on_Crawl__10_chrome_session.js),
|
||||
* this connects to it and creates a new tab. Otherwise, falls back to launching
|
||||
* its own Chrome instance.
|
||||
*
|
||||
* Usage: on_Snapshot__20_chrome_session.js --url=<url> --snapshot-id=<uuid>
|
||||
* Usage: on_Snapshot__20_chrome_session.js --url=<url> --snapshot-id=<uuid> --crawl-id=<uuid>
|
||||
* Output: Creates chrome_session/ with:
|
||||
* - cdp_url.txt: WebSocket URL for CDP connection
|
||||
* - pid.txt: Chrome process ID (for cleanup)
|
||||
* - page_id.txt: Target ID of the page for other extractors to use
|
||||
* - url.txt: The URL to be navigated to (for chrome_navigate)
|
||||
* - cdp_url.txt: WebSocket URL for CDP connection (copied or new)
|
||||
* - pid.txt: Chrome process ID (from crawl or new)
|
||||
* - page_id.txt: Target ID of this snapshot's tab
|
||||
* - url.txt: The URL to be navigated to
|
||||
*
|
||||
* Environment variables:
|
||||
* CHROME_BINARY: Path to Chrome/Chromium binary
|
||||
* DATA_DIR: Data directory (to find crawl's Chrome session)
|
||||
* CHROME_BINARY: Path to Chrome/Chromium binary (for fallback)
|
||||
* CHROME_RESOLUTION: Page resolution (default: 1440,2000)
|
||||
* CHROME_USER_AGENT: User agent string (optional)
|
||||
* CHROME_CHECK_SSL_VALIDITY: Whether to check SSL certificates (default: true)
|
||||
@@ -23,18 +24,13 @@
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { spawn } = require('child_process');
|
||||
const http = require('http');
|
||||
const puppeteer = require('puppeteer-core');
|
||||
|
||||
// Import extension utilities
|
||||
const extensionUtils = require('../chrome_extensions/chrome_extension_utils.js');
|
||||
|
||||
// Extractor metadata
|
||||
const EXTRACTOR_NAME = 'chrome_session';
|
||||
const OUTPUT_DIR = 'chrome_session';
|
||||
|
||||
// Get extensions directory from environment or use default
|
||||
const EXTENSIONS_DIR = process.env.CHROME_EXTENSIONS_DIR ||
|
||||
path.join(process.env.DATA_DIR || './data', 'personas', process.env.ACTIVE_PERSONA || 'Default', 'chrome_extensions');
|
||||
const OUTPUT_DIR = '.'; // Hook already runs in the output directory
|
||||
|
||||
// Parse command line arguments
|
||||
function parseArgs() {
|
||||
@@ -60,13 +56,7 @@ function getEnvBool(name, defaultValue = false) {
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
function getEnvInt(name, defaultValue = 0) {
|
||||
const val = parseInt(getEnv(name, String(defaultValue)), 10);
|
||||
return isNaN(val) ? defaultValue : val;
|
||||
}
|
||||
|
||||
|
||||
// Find Chrome binary
|
||||
// Find Chrome binary (for fallback)
|
||||
function findChrome() {
|
||||
const chromeBinary = getEnv('CHROME_BINARY');
|
||||
if (chromeBinary && fs.existsSync(chromeBinary)) {
|
||||
@@ -74,12 +64,10 @@ function findChrome() {
|
||||
}
|
||||
|
||||
const candidates = [
|
||||
// Linux
|
||||
'/usr/bin/google-chrome',
|
||||
'/usr/bin/google-chrome-stable',
|
||||
'/usr/bin/chromium',
|
||||
'/usr/bin/chromium-browser',
|
||||
// macOS
|
||||
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
|
||||
'/Applications/Chromium.app/Contents/MacOS/Chromium',
|
||||
];
|
||||
@@ -99,40 +87,132 @@ function parseResolution(resolution) {
|
||||
return { width: width || 1440, height: height || 2000 };
|
||||
}
|
||||
|
||||
// Load installed extensions from cache files
|
||||
function loadInstalledExtensions() {
|
||||
const extensions = [];
|
||||
// Find a free port
|
||||
function findFreePort() {
|
||||
return new Promise((resolve, reject) => {
|
||||
const server = require('net').createServer();
|
||||
server.unref();
|
||||
server.on('error', reject);
|
||||
server.listen(0, () => {
|
||||
const port = server.address().port;
|
||||
server.close(() => resolve(port));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
if (!fs.existsSync(EXTENSIONS_DIR)) {
|
||||
return extensions;
|
||||
}
|
||||
// Wait for Chrome's DevTools port to be ready
|
||||
function waitForDebugPort(port, timeout = 30000) {
|
||||
const startTime = Date.now();
|
||||
|
||||
// Look for *.extension.json cache files created by extension plugins
|
||||
const files = fs.readdirSync(EXTENSIONS_DIR);
|
||||
const extensionFiles = files.filter(f => f.endsWith('.extension.json'));
|
||||
return new Promise((resolve, reject) => {
|
||||
const tryConnect = () => {
|
||||
if (Date.now() - startTime > timeout) {
|
||||
reject(new Error(`Timeout waiting for Chrome debug port ${port}`));
|
||||
return;
|
||||
}
|
||||
|
||||
for (const file of extensionFiles) {
|
||||
const req = http.get(`http://127.0.0.1:${port}/json/version`, (res) => {
|
||||
let data = '';
|
||||
res.on('data', chunk => data += chunk);
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const info = JSON.parse(data);
|
||||
resolve(info);
|
||||
} catch (e) {
|
||||
setTimeout(tryConnect, 100);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', () => {
|
||||
setTimeout(tryConnect, 100);
|
||||
});
|
||||
|
||||
req.setTimeout(1000, () => {
|
||||
req.destroy();
|
||||
setTimeout(tryConnect, 100);
|
||||
});
|
||||
};
|
||||
|
||||
tryConnect();
|
||||
});
|
||||
}
|
||||
|
||||
// Try to find the crawl's Chrome session
|
||||
function findCrawlChromeSession(crawlId) {
|
||||
if (!crawlId) return null;
|
||||
|
||||
const dataDir = getEnv('DATA_DIR', '.');
|
||||
const crawlChromeDir = path.join(dataDir, 'tmp', `crawl_${crawlId}`, 'chrome_session');
|
||||
|
||||
const cdpFile = path.join(crawlChromeDir, 'cdp_url.txt');
|
||||
const pidFile = path.join(crawlChromeDir, 'pid.txt');
|
||||
|
||||
if (fs.existsSync(cdpFile) && fs.existsSync(pidFile)) {
|
||||
try {
|
||||
const filePath = path.join(EXTENSIONS_DIR, file);
|
||||
const data = fs.readFileSync(filePath, 'utf-8');
|
||||
const extension = JSON.parse(data);
|
||||
const cdpUrl = fs.readFileSync(cdpFile, 'utf-8').trim();
|
||||
const pid = parseInt(fs.readFileSync(pidFile, 'utf-8').trim(), 10);
|
||||
|
||||
// Verify extension is actually installed
|
||||
const manifestPath = path.join(extension.unpacked_path, 'manifest.json');
|
||||
if (fs.existsSync(manifestPath)) {
|
||||
extensions.push(extension);
|
||||
console.log(`[+] Loaded extension: ${extension.name} (${extension.webstore_id})`);
|
||||
// Verify the process is still running
|
||||
try {
|
||||
process.kill(pid, 0); // Signal 0 = check if process exists
|
||||
return { cdpUrl, pid };
|
||||
} catch (e) {
|
||||
// Process not running
|
||||
return null;
|
||||
}
|
||||
} catch (e) {
|
||||
console.warn(`[⚠️] Failed to load extension from ${file}: ${e.message}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
return extensions;
|
||||
return null;
|
||||
}
|
||||
|
||||
// Create a new tab in an existing Chrome session
|
||||
async function createTabInExistingChrome(cdpUrl, url, pid) {
|
||||
const resolution = getEnv('CHROME_RESOLUTION') || getEnv('RESOLUTION', '1440,2000');
|
||||
const userAgent = getEnv('CHROME_USER_AGENT') || getEnv('USER_AGENT', '');
|
||||
const { width, height } = parseResolution(resolution);
|
||||
|
||||
async function startChromeSession(url, binary) {
|
||||
console.log(`[*] Connecting to existing Chrome session: ${cdpUrl}`);
|
||||
|
||||
// Connect Puppeteer to the running Chrome
|
||||
const browser = await puppeteer.connect({
|
||||
browserWSEndpoint: cdpUrl,
|
||||
defaultViewport: { width, height },
|
||||
});
|
||||
|
||||
// Create a new tab for this snapshot
|
||||
const page = await browser.newPage();
|
||||
|
||||
// Set viewport
|
||||
await page.setViewport({ width, height });
|
||||
|
||||
// Set user agent if specified
|
||||
if (userAgent) {
|
||||
await page.setUserAgent(userAgent);
|
||||
}
|
||||
|
||||
// Get the page target ID
|
||||
const target = page.target();
|
||||
const targetId = target._targetId;
|
||||
|
||||
// Write session info
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'cdp_url.txt'), cdpUrl);
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'pid.txt'), String(pid));
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'page_id.txt'), targetId);
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'url.txt'), url);
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'shared_session.txt'), 'true');
|
||||
|
||||
// Disconnect Puppeteer (Chrome and tab stay alive)
|
||||
browser.disconnect();
|
||||
|
||||
return { success: true, output: OUTPUT_DIR, cdpUrl, targetId, pid, shared: true };
|
||||
}
|
||||
|
||||
// Fallback: Launch a new Chrome instance for this snapshot
|
||||
async function launchNewChrome(url, binary) {
|
||||
const resolution = getEnv('CHROME_RESOLUTION') || getEnv('RESOLUTION', '1440,2000');
|
||||
const userAgent = getEnv('CHROME_USER_AGENT') || getEnv('USER_AGENT', '');
|
||||
const checkSsl = getEnvBool('CHROME_CHECK_SSL_VALIDITY', getEnvBool('CHECK_SSL_VALIDITY', true));
|
||||
@@ -140,115 +220,98 @@ async function startChromeSession(url, binary) {
|
||||
|
||||
const { width, height } = parseResolution(resolution);
|
||||
|
||||
// Load installed extensions
|
||||
const extensions = loadInstalledExtensions();
|
||||
const extensionArgs = extensionUtils.getExtensionLaunchArgs(extensions);
|
||||
// Find a free port for Chrome DevTools
|
||||
const debugPort = await findFreePort();
|
||||
console.log(`[*] Launching new Chrome on port: ${debugPort}`);
|
||||
|
||||
if (extensions.length > 0) {
|
||||
console.log(`[*] Loading ${extensions.length} Chrome extensions...`);
|
||||
}
|
||||
// Build Chrome arguments
|
||||
const chromeArgs = [
|
||||
`--remote-debugging-port=${debugPort}`,
|
||||
'--remote-debugging-address=127.0.0.1',
|
||||
'--no-sandbox',
|
||||
'--disable-setuid-sandbox',
|
||||
'--disable-dev-shm-usage',
|
||||
'--disable-gpu',
|
||||
'--disable-sync',
|
||||
'--no-first-run',
|
||||
'--no-default-browser-check',
|
||||
'--disable-default-apps',
|
||||
'--disable-infobars',
|
||||
'--disable-blink-features=AutomationControlled',
|
||||
'--disable-component-update',
|
||||
'--disable-domain-reliability',
|
||||
'--disable-breakpad',
|
||||
'--disable-background-networking',
|
||||
'--disable-background-timer-throttling',
|
||||
'--disable-backgrounding-occluded-windows',
|
||||
'--disable-renderer-backgrounding',
|
||||
'--disable-ipc-flooding-protection',
|
||||
'--password-store=basic',
|
||||
'--use-mock-keychain',
|
||||
'--font-render-hinting=none',
|
||||
'--force-color-profile=srgb',
|
||||
`--window-size=${width},${height}`,
|
||||
...(headless ? ['--headless=new'] : []),
|
||||
...(checkSsl ? [] : ['--ignore-certificate-errors']),
|
||||
'about:blank',
|
||||
];
|
||||
|
||||
// Create output directory
|
||||
if (!fs.existsSync(OUTPUT_DIR)) {
|
||||
fs.mkdirSync(OUTPUT_DIR, { recursive: true });
|
||||
}
|
||||
// Launch Chrome as a detached process (since no crawl-level Chrome exists)
|
||||
const chromeProcess = spawn(binary, chromeArgs, {
|
||||
detached: true,
|
||||
stdio: ['ignore', 'ignore', 'ignore'],
|
||||
});
|
||||
chromeProcess.unref();
|
||||
|
||||
let browser = null;
|
||||
const chromePid = chromeProcess.pid;
|
||||
console.log(`[*] Launched Chrome (PID: ${chromePid}), waiting for debug port...`);
|
||||
|
||||
// Write PID immediately for cleanup
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'pid.txt'), String(chromePid));
|
||||
|
||||
try {
|
||||
// Launch browser with Puppeteer
|
||||
browser = await puppeteer.launch({
|
||||
executablePath: binary,
|
||||
headless: headless ? 'new' : false,
|
||||
args: [
|
||||
'--no-sandbox',
|
||||
'--disable-setuid-sandbox',
|
||||
'--disable-dev-shm-usage',
|
||||
'--disable-gpu',
|
||||
'--disable-sync',
|
||||
'--no-first-run',
|
||||
'--no-default-browser-check',
|
||||
'--disable-default-apps',
|
||||
'--disable-infobars',
|
||||
'--disable-blink-features=AutomationControlled',
|
||||
'--disable-component-update',
|
||||
'--disable-domain-reliability',
|
||||
'--disable-breakpad',
|
||||
'--disable-background-networking',
|
||||
'--disable-background-timer-throttling',
|
||||
'--disable-backgrounding-occluded-windows',
|
||||
'--disable-renderer-backgrounding',
|
||||
'--disable-ipc-flooding-protection',
|
||||
'--password-store=basic',
|
||||
'--use-mock-keychain',
|
||||
'--font-render-hinting=none',
|
||||
'--force-color-profile=srgb',
|
||||
`--window-size=${width},${height}`,
|
||||
...(checkSsl ? [] : ['--ignore-certificate-errors']),
|
||||
...extensionArgs,
|
||||
],
|
||||
// Wait for Chrome to be ready
|
||||
const versionInfo = await waitForDebugPort(debugPort, 30000);
|
||||
console.log(`[+] Chrome ready: ${versionInfo.Browser}`);
|
||||
|
||||
const wsUrl = versionInfo.webSocketDebuggerUrl;
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'cdp_url.txt'), wsUrl);
|
||||
|
||||
// Connect Puppeteer to get page info
|
||||
const browser = await puppeteer.connect({
|
||||
browserWSEndpoint: wsUrl,
|
||||
defaultViewport: { width, height },
|
||||
});
|
||||
|
||||
// Get the WebSocket endpoint URL
|
||||
const cdpUrl = browser.wsEndpoint();
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'cdp_url.txt'), cdpUrl);
|
||||
let pages = await browser.pages();
|
||||
let page = pages[0];
|
||||
|
||||
// Write PID for cleanup
|
||||
const browserProcess = browser.process();
|
||||
if (browserProcess) {
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'pid.txt'), String(browserProcess.pid));
|
||||
if (!page) {
|
||||
page = await browser.newPage();
|
||||
}
|
||||
|
||||
// Create a new page (but DON'T navigate yet)
|
||||
const page = await browser.newPage();
|
||||
await page.setViewport({ width, height });
|
||||
|
||||
// Set user agent if specified
|
||||
if (userAgent) {
|
||||
await page.setUserAgent(userAgent);
|
||||
}
|
||||
|
||||
// Write the page target ID so other extractors can find this specific page
|
||||
const target = page.target();
|
||||
const targetId = target._targetId;
|
||||
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'page_id.txt'), targetId);
|
||||
|
||||
// Write the URL for chrome_navigate to use
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'url.txt'), url);
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, 'shared_session.txt'), 'false');
|
||||
|
||||
// Connect to loaded extensions at runtime (only if not already done)
|
||||
const extensionsFile = path.join(OUTPUT_DIR, 'extensions.json');
|
||||
if (extensions.length > 0 && !fs.existsSync(extensionsFile)) {
|
||||
console.log('[*] Connecting to loaded extensions (first time setup)...');
|
||||
try {
|
||||
const loadedExtensions = await extensionUtils.loadAllExtensionsFromBrowser(browser, extensions);
|
||||
|
||||
// Write loaded extensions metadata for other extractors to use
|
||||
fs.writeFileSync(extensionsFile, JSON.stringify(loadedExtensions, null, 2));
|
||||
|
||||
console.log(`[+] Extensions loaded and available at ${extensionsFile}`);
|
||||
console.log(`[+] ${loadedExtensions.length} extensions ready for configuration by subsequent plugins`);
|
||||
} catch (e) {
|
||||
console.warn(`[⚠️] Failed to load extensions from browser: ${e.message}`);
|
||||
}
|
||||
} else if (extensions.length > 0) {
|
||||
console.log('[*] Extensions already loaded from previous snapshot');
|
||||
}
|
||||
|
||||
// Don't close browser - leave it running for other extractors
|
||||
// Detach puppeteer from browser so it stays running
|
||||
browser.disconnect();
|
||||
|
||||
return { success: true, output: OUTPUT_DIR, cdpUrl, targetId };
|
||||
return { success: true, output: OUTPUT_DIR, cdpUrl: wsUrl, targetId, pid: chromePid, shared: false };
|
||||
|
||||
} catch (e) {
|
||||
// Kill browser if startup failed
|
||||
if (browser) {
|
||||
try {
|
||||
await browser.close();
|
||||
} catch (closeErr) {
|
||||
// Ignore
|
||||
}
|
||||
try {
|
||||
process.kill(chromePid, 'SIGTERM');
|
||||
} catch (killErr) {
|
||||
// Ignore
|
||||
}
|
||||
return { success: false, error: `${e.name}: ${e.message}` };
|
||||
}
|
||||
@@ -258,9 +321,10 @@ async function main() {
|
||||
const args = parseArgs();
|
||||
const url = args.url;
|
||||
const snapshotId = args.snapshot_id;
|
||||
const crawlId = args.crawl_id;
|
||||
|
||||
if (!url || !snapshotId) {
|
||||
console.error('Usage: on_Snapshot__20_chrome_session.js --url=<url> --snapshot-id=<uuid>');
|
||||
console.error('Usage: on_Snapshot__20_chrome_session.js --url=<url> --snapshot-id=<uuid> [--crawl-id=<uuid>]');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
@@ -271,9 +335,6 @@ async function main() {
|
||||
let version = '';
|
||||
|
||||
try {
|
||||
// chrome_session launches Chrome and creates a blank page
|
||||
// Pre-load extractors (21-29) register CDP listeners
|
||||
// chrome_navigate (30) performs actual navigation
|
||||
const binary = findChrome();
|
||||
if (!binary) {
|
||||
console.error('ERROR: Chrome/Chromium binary not found');
|
||||
@@ -291,13 +352,24 @@ async function main() {
|
||||
version = '';
|
||||
}
|
||||
|
||||
const result = await startChromeSession(url, binary);
|
||||
// Try to use existing crawl Chrome session
|
||||
const crawlSession = findCrawlChromeSession(crawlId);
|
||||
let result;
|
||||
|
||||
if (crawlSession) {
|
||||
console.log(`[*] Found existing Chrome session from crawl ${crawlId}`);
|
||||
result = await createTabInExistingChrome(crawlSession.cdpUrl, url, crawlSession.pid);
|
||||
} else {
|
||||
console.log(`[*] No crawl Chrome session found, launching new Chrome`);
|
||||
result = await launchNewChrome(url, binary);
|
||||
}
|
||||
|
||||
if (result.success) {
|
||||
status = 'succeeded';
|
||||
output = result.output;
|
||||
console.log(`Chrome session started (no navigation yet): ${result.cdpUrl}`);
|
||||
console.log(`Page target ID: ${result.targetId}`);
|
||||
console.log(`[+] Chrome session ready (shared: ${result.shared})`);
|
||||
console.log(`[+] CDP URL: ${result.cdpUrl}`);
|
||||
console.log(`[+] Page target ID: ${result.targetId}`);
|
||||
} else {
|
||||
status = 'failed';
|
||||
error = result.error;
|
||||
@@ -331,6 +403,7 @@ async function main() {
|
||||
extractor: EXTRACTOR_NAME,
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
crawl_id: crawlId || null,
|
||||
status,
|
||||
start_ts: startTs.toISOString(),
|
||||
end_ts: endTs.toISOString(),
|
||||
|
||||
@@ -1,31 +1,24 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Capture console output from a page.
|
||||
* Capture console output from a page (DAEMON MODE).
|
||||
*
|
||||
* Captures all console messages during page load:
|
||||
* - log, warn, error, info, debug
|
||||
* - Includes stack traces for errors
|
||||
* - Timestamps for each message
|
||||
* This hook daemonizes and stays alive to capture console logs throughout
|
||||
* the snapshot lifecycle. It's killed by chrome_cleanup at the end.
|
||||
*
|
||||
* Usage: on_Snapshot__14_consolelog.js --url=<url> --snapshot-id=<uuid>
|
||||
* Output: Writes consolelog/console.jsonl (one message per line)
|
||||
*
|
||||
* Environment variables:
|
||||
* SAVE_CONSOLELOG: Enable console log capture (default: true)
|
||||
* CONSOLELOG_TIMEOUT: Capture duration in seconds (default: 5)
|
||||
* Usage: on_Snapshot__21_consolelog.js --url=<url> --snapshot-id=<uuid>
|
||||
* Output: Writes console.jsonl + listener.pid
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const puppeteer = require('puppeteer-core');
|
||||
|
||||
// Extractor metadata
|
||||
const EXTRACTOR_NAME = 'consolelog';
|
||||
const OUTPUT_DIR = '.';
|
||||
const OUTPUT_FILE = 'console.jsonl';
|
||||
const PID_FILE = 'listener.pid';
|
||||
const CHROME_SESSION_DIR = '../chrome_session';
|
||||
|
||||
// Parse command line arguments
|
||||
function parseArgs() {
|
||||
const args = {};
|
||||
process.argv.slice(2).forEach(arg => {
|
||||
@@ -37,7 +30,6 @@ function parseArgs() {
|
||||
return args;
|
||||
}
|
||||
|
||||
// Get environment variable with default
|
||||
function getEnv(name, defaultValue = '') {
|
||||
return (process.env[name] || defaultValue).trim();
|
||||
}
|
||||
@@ -49,12 +41,6 @@ function getEnvBool(name, defaultValue = false) {
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
function getEnvInt(name, defaultValue = 0) {
|
||||
const val = parseInt(getEnv(name, String(defaultValue)), 10);
|
||||
return isNaN(val) ? defaultValue : val;
|
||||
}
|
||||
|
||||
// Get CDP URL from chrome_session
|
||||
function getCdpUrl() {
|
||||
const cdpFile = path.join(CHROME_SESSION_DIR, 'cdp_url.txt');
|
||||
if (fs.existsSync(cdpFile)) {
|
||||
@@ -63,7 +49,14 @@ function getCdpUrl() {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Serialize console message arguments
|
||||
function getPageId() {
|
||||
const pageIdFile = path.join(CHROME_SESSION_DIR, 'page_id.txt');
|
||||
if (fs.existsSync(pageIdFile)) {
|
||||
return fs.readFileSync(pageIdFile, 'utf8').trim();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
async function serializeArgs(args) {
|
||||
const serialized = [];
|
||||
for (const arg of args) {
|
||||
@@ -71,7 +64,6 @@ async function serializeArgs(args) {
|
||||
const json = await arg.jsonValue();
|
||||
serialized.push(json);
|
||||
} catch (e) {
|
||||
// If jsonValue() fails, try to get text representation
|
||||
try {
|
||||
serialized.push(String(arg));
|
||||
} catch (e2) {
|
||||
@@ -82,128 +74,84 @@ async function serializeArgs(args) {
|
||||
return serialized;
|
||||
}
|
||||
|
||||
// Capture console logs
|
||||
async function captureConsoleLogs(url) {
|
||||
const captureTimeout = (getEnvInt('CONSOLELOG_TIMEOUT') || 5) * 1000;
|
||||
|
||||
// Output directory is current directory (hook already runs in output dir)
|
||||
async function setupListeners() {
|
||||
const outputPath = path.join(OUTPUT_DIR, OUTPUT_FILE);
|
||||
fs.writeFileSync(outputPath, ''); // Clear existing
|
||||
|
||||
// Clear existing file
|
||||
fs.writeFileSync(outputPath, '');
|
||||
|
||||
let browser = null;
|
||||
const consoleLogs = [];
|
||||
|
||||
try {
|
||||
// Connect to existing Chrome session
|
||||
const cdpUrl = getCdpUrl();
|
||||
if (!cdpUrl) {
|
||||
return { success: false, error: 'No Chrome session found (chrome_session extractor must run first)' };
|
||||
}
|
||||
|
||||
browser = await puppeteer.connect({
|
||||
browserWSEndpoint: cdpUrl,
|
||||
});
|
||||
|
||||
// Get the page
|
||||
const pages = await browser.pages();
|
||||
const page = pages.find(p => p.url().startsWith('http')) || pages[0];
|
||||
|
||||
if (!page) {
|
||||
return { success: false, error: 'No page found in Chrome session' };
|
||||
}
|
||||
|
||||
// Listen for console messages
|
||||
page.on('console', async (msg) => {
|
||||
try {
|
||||
const type = msg.type();
|
||||
const text = msg.text();
|
||||
const location = msg.location();
|
||||
const args = await serializeArgs(msg.args());
|
||||
|
||||
const logEntry = {
|
||||
timestamp: new Date().toISOString(),
|
||||
type,
|
||||
text,
|
||||
args,
|
||||
location: {
|
||||
url: location.url || '',
|
||||
lineNumber: location.lineNumber,
|
||||
columnNumber: location.columnNumber,
|
||||
},
|
||||
};
|
||||
|
||||
// Write immediately to file
|
||||
fs.appendFileSync(outputPath, JSON.stringify(logEntry) + '\n');
|
||||
consoleLogs.push(logEntry);
|
||||
} catch (e) {
|
||||
// Error processing console message, skip it
|
||||
console.error(`Error processing console message: ${e.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
// Listen for page errors
|
||||
page.on('pageerror', (error) => {
|
||||
try {
|
||||
const logEntry = {
|
||||
timestamp: new Date().toISOString(),
|
||||
type: 'error',
|
||||
text: error.message,
|
||||
stack: error.stack || '',
|
||||
location: {},
|
||||
};
|
||||
|
||||
fs.appendFileSync(outputPath, JSON.stringify(logEntry) + '\n');
|
||||
consoleLogs.push(logEntry);
|
||||
} catch (e) {
|
||||
console.error(`Error processing page error: ${e.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
// Listen for request failures
|
||||
page.on('requestfailed', (request) => {
|
||||
try {
|
||||
const failure = request.failure();
|
||||
const logEntry = {
|
||||
timestamp: new Date().toISOString(),
|
||||
type: 'request_failed',
|
||||
text: `Request failed: ${request.url()}`,
|
||||
error: failure ? failure.errorText : 'Unknown error',
|
||||
url: request.url(),
|
||||
location: {},
|
||||
};
|
||||
|
||||
fs.appendFileSync(outputPath, JSON.stringify(logEntry) + '\n');
|
||||
consoleLogs.push(logEntry);
|
||||
} catch (e) {
|
||||
console.error(`Error processing request failure: ${e.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
// Wait to capture logs
|
||||
await new Promise(resolve => setTimeout(resolve, captureTimeout));
|
||||
|
||||
// Group logs by type
|
||||
const logStats = consoleLogs.reduce((acc, log) => {
|
||||
acc[log.type] = (acc[log.type] || 0) + 1;
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: outputPath,
|
||||
logCount: consoleLogs.length,
|
||||
logStats,
|
||||
};
|
||||
|
||||
} catch (e) {
|
||||
return { success: false, error: `${e.name}: ${e.message}` };
|
||||
} finally {
|
||||
if (browser) {
|
||||
browser.disconnect();
|
||||
}
|
||||
const cdpUrl = getCdpUrl();
|
||||
if (!cdpUrl) {
|
||||
throw new Error('No Chrome session found');
|
||||
}
|
||||
|
||||
const browser = await puppeteer.connect({ browserWSEndpoint: cdpUrl });
|
||||
|
||||
// Find our page
|
||||
const pages = await browser.pages();
|
||||
const pageId = getPageId();
|
||||
let page = null;
|
||||
|
||||
if (pageId) {
|
||||
page = pages.find(p => {
|
||||
const target = p.target();
|
||||
return target && target._targetId === pageId;
|
||||
});
|
||||
}
|
||||
if (!page) {
|
||||
page = pages[pages.length - 1];
|
||||
}
|
||||
|
||||
if (!page) {
|
||||
throw new Error('No page found');
|
||||
}
|
||||
|
||||
// Set up listeners that write directly to file
|
||||
page.on('console', async (msg) => {
|
||||
try {
|
||||
const logEntry = {
|
||||
timestamp: new Date().toISOString(),
|
||||
type: msg.type(),
|
||||
text: msg.text(),
|
||||
args: await serializeArgs(msg.args()),
|
||||
location: msg.location(),
|
||||
};
|
||||
fs.appendFileSync(outputPath, JSON.stringify(logEntry) + '\n');
|
||||
} catch (e) {
|
||||
// Ignore errors
|
||||
}
|
||||
});
|
||||
|
||||
page.on('pageerror', (error) => {
|
||||
try {
|
||||
const logEntry = {
|
||||
timestamp: new Date().toISOString(),
|
||||
type: 'error',
|
||||
text: error.message,
|
||||
stack: error.stack || '',
|
||||
};
|
||||
fs.appendFileSync(outputPath, JSON.stringify(logEntry) + '\n');
|
||||
} catch (e) {
|
||||
// Ignore
|
||||
}
|
||||
});
|
||||
|
||||
page.on('requestfailed', (request) => {
|
||||
try {
|
||||
const failure = request.failure();
|
||||
const logEntry = {
|
||||
timestamp: new Date().toISOString(),
|
||||
type: 'request_failed',
|
||||
text: `Request failed: ${request.url()}`,
|
||||
error: failure ? failure.errorText : 'Unknown error',
|
||||
url: request.url(),
|
||||
};
|
||||
fs.appendFileSync(outputPath, JSON.stringify(logEntry) + '\n');
|
||||
} catch (e) {
|
||||
// Ignore
|
||||
}
|
||||
});
|
||||
|
||||
// Don't disconnect - keep browser connection alive
|
||||
return { browser, page };
|
||||
}
|
||||
|
||||
async function main() {
|
||||
@@ -212,80 +160,83 @@ async function main() {
|
||||
const snapshotId = args.snapshot_id;
|
||||
|
||||
if (!url || !snapshotId) {
|
||||
console.error('Usage: on_Snapshot__14_consolelog.js --url=<url> --snapshot-id=<uuid>');
|
||||
console.error('Usage: on_Snapshot__21_consolelog.js --url=<url> --snapshot-id=<uuid>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (!getEnvBool('SAVE_CONSOLELOG', true)) {
|
||||
console.log('Skipping (SAVE_CONSOLELOG=False)');
|
||||
const result = {
|
||||
extractor: EXTRACTOR_NAME,
|
||||
status: 'skipped',
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
};
|
||||
console.log(`RESULT_JSON=${JSON.stringify(result)}`);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const startTs = new Date();
|
||||
let status = 'failed';
|
||||
let output = null;
|
||||
let error = '';
|
||||
let logCount = 0;
|
||||
|
||||
try {
|
||||
// Check if enabled
|
||||
if (!getEnvBool('SAVE_CONSOLELOG', true)) {
|
||||
console.log('Skipping console log (SAVE_CONSOLELOG=False)');
|
||||
status = 'skipped';
|
||||
const endTs = new Date();
|
||||
console.log(`START_TS=${startTs.toISOString()}`);
|
||||
console.log(`END_TS=${endTs.toISOString()}`);
|
||||
console.log(`STATUS=${status}`);
|
||||
console.log(`RESULT_JSON=${JSON.stringify({extractor: EXTRACTOR_NAME, status, url, snapshot_id: snapshotId})}`);
|
||||
process.exit(0);
|
||||
}
|
||||
// Set up listeners
|
||||
await setupListeners();
|
||||
|
||||
const result = await captureConsoleLogs(url);
|
||||
// Write PID file so chrome_cleanup can kill us
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, PID_FILE), String(process.pid));
|
||||
|
||||
if (result.success) {
|
||||
status = 'succeeded';
|
||||
output = result.output;
|
||||
logCount = result.logCount || 0;
|
||||
const statsStr = Object.entries(result.logStats || {})
|
||||
.map(([type, count]) => `${count} ${type}`)
|
||||
.join(', ');
|
||||
console.log(`Captured ${logCount} console messages: ${statsStr}`);
|
||||
} else {
|
||||
status = 'failed';
|
||||
error = result.error;
|
||||
// Report success immediately (we're staying alive in background)
|
||||
const endTs = new Date();
|
||||
const duration = (endTs - startTs) / 1000;
|
||||
|
||||
console.log(`START_TS=${startTs.toISOString()}`);
|
||||
console.log(`END_TS=${endTs.toISOString()}`);
|
||||
console.log(`DURATION=${duration.toFixed(2)}`);
|
||||
console.log(`OUTPUT=${OUTPUT_FILE}`);
|
||||
console.log(`STATUS=succeeded`);
|
||||
|
||||
const result = {
|
||||
extractor: EXTRACTOR_NAME,
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
status: 'succeeded',
|
||||
start_ts: startTs.toISOString(),
|
||||
end_ts: endTs.toISOString(),
|
||||
duration: Math.round(duration * 100) / 100,
|
||||
output: OUTPUT_FILE,
|
||||
};
|
||||
console.log(`RESULT_JSON=${JSON.stringify(result)}`);
|
||||
|
||||
// Daemonize: detach from parent and keep running
|
||||
// This process will be killed by chrome_cleanup
|
||||
if (process.stdin.isTTY) {
|
||||
process.stdin.pause();
|
||||
}
|
||||
process.stdin.unref();
|
||||
process.stdout.end();
|
||||
process.stderr.end();
|
||||
|
||||
// Keep the process alive indefinitely
|
||||
// Will be killed by chrome_cleanup via the PID file
|
||||
setInterval(() => {}, 1000);
|
||||
|
||||
} catch (e) {
|
||||
error = `${e.name}: ${e.message}`;
|
||||
status = 'failed';
|
||||
}
|
||||
|
||||
const endTs = new Date();
|
||||
const duration = (endTs - startTs) / 1000;
|
||||
|
||||
// Print results
|
||||
console.log(`START_TS=${startTs.toISOString()}`);
|
||||
console.log(`END_TS=${endTs.toISOString()}`);
|
||||
console.log(`DURATION=${duration.toFixed(2)}`);
|
||||
if (output) {
|
||||
console.log(`OUTPUT=${output}`);
|
||||
}
|
||||
console.log(`STATUS=${status}`);
|
||||
|
||||
if (error) {
|
||||
const error = `${e.name}: ${e.message}`;
|
||||
console.error(`ERROR=${error}`);
|
||||
|
||||
const endTs = new Date();
|
||||
const result = {
|
||||
extractor: EXTRACTOR_NAME,
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
status: 'failed',
|
||||
start_ts: startTs.toISOString(),
|
||||
end_ts: endTs.toISOString(),
|
||||
error,
|
||||
};
|
||||
console.log(`RESULT_JSON=${JSON.stringify(result)}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Print JSON result
|
||||
const resultJson = {
|
||||
extractor: EXTRACTOR_NAME,
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
status,
|
||||
start_ts: startTs.toISOString(),
|
||||
end_ts: endTs.toISOString(),
|
||||
duration: Math.round(duration * 100) / 100,
|
||||
output,
|
||||
log_count: logCount,
|
||||
error: error || null,
|
||||
};
|
||||
console.log(`RESULT_JSON=${JSON.stringify(resultJson)}`);
|
||||
|
||||
process.exit(status === 'succeeded' ? 0 : 1);
|
||||
}
|
||||
|
||||
main().catch(e => {
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
*
|
||||
* Extension: https://chromewebstore.google.com/detail/edibdbjcniadpccecjdfdjjppcpchdlm
|
||||
*
|
||||
* Priority: 02 (early) - Must install before Chrome session starts
|
||||
* Hook: on_Snapshot
|
||||
* Priority: 02 (early) - Must install before Chrome session starts at Crawl level
|
||||
* Hook: on_Crawl (runs once per crawl, not per snapshot)
|
||||
*
|
||||
* This extension automatically:
|
||||
* - Dismisses cookie consent popups
|
||||
@@ -1,278 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Track complete redirect chains for a URL.
|
||||
*
|
||||
* Captures:
|
||||
* - HTTP redirects (301, 302, 303, 307, 308)
|
||||
* - Meta refresh redirects
|
||||
* - JavaScript redirects (basic detection)
|
||||
* - Full redirect chain with timestamps
|
||||
*
|
||||
* Usage: on_Snapshot__15_redirects.js --url=<url> --snapshot-id=<uuid>
|
||||
* Output: Writes redirects/redirects.json
|
||||
*
|
||||
* Environment variables:
|
||||
* SAVE_REDIRECTS: Enable redirect tracking (default: true)
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const puppeteer = require('puppeteer-core');
|
||||
|
||||
// Extractor metadata
|
||||
const EXTRACTOR_NAME = 'redirects';
|
||||
const OUTPUT_DIR = '.';
|
||||
const OUTPUT_FILE = 'redirects.json';
|
||||
const CHROME_SESSION_DIR = '../chrome_session';
|
||||
|
||||
// Parse command line arguments
|
||||
function parseArgs() {
|
||||
const args = {};
|
||||
process.argv.slice(2).forEach(arg => {
|
||||
if (arg.startsWith('--')) {
|
||||
const [key, ...valueParts] = arg.slice(2).split('=');
|
||||
args[key.replace(/-/g, '_')] = valueParts.join('=') || true;
|
||||
}
|
||||
});
|
||||
return args;
|
||||
}
|
||||
|
||||
// Get environment variable with default
|
||||
function getEnv(name, defaultValue = '') {
|
||||
return (process.env[name] || defaultValue).trim();
|
||||
}
|
||||
|
||||
function getEnvBool(name, defaultValue = false) {
|
||||
const val = getEnv(name, '').toLowerCase();
|
||||
if (['true', '1', 'yes', 'on'].includes(val)) return true;
|
||||
if (['false', '0', 'no', 'off'].includes(val)) return false;
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
// Get CDP URL from chrome_session
|
||||
function getCdpUrl() {
|
||||
const cdpFile = path.join(CHROME_SESSION_DIR, 'cdp_url.txt');
|
||||
if (fs.existsSync(cdpFile)) {
|
||||
return fs.readFileSync(cdpFile, 'utf8').trim();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// Track redirect chain
|
||||
async function trackRedirects(url) {
|
||||
// Output directory is current directory (hook already runs in output dir)
|
||||
const outputPath = path.join(OUTPUT_DIR, OUTPUT_FILE);
|
||||
|
||||
let browser = null;
|
||||
const redirectChain = [];
|
||||
|
||||
try {
|
||||
// Connect to existing Chrome session
|
||||
const cdpUrl = getCdpUrl();
|
||||
if (!cdpUrl) {
|
||||
return { success: false, error: 'No Chrome session found (chrome_session extractor must run first)' };
|
||||
}
|
||||
|
||||
browser = await puppeteer.connect({
|
||||
browserWSEndpoint: cdpUrl,
|
||||
});
|
||||
|
||||
// Get the page
|
||||
const pages = await browser.pages();
|
||||
const page = pages.find(p => p.url().startsWith('http')) || pages[0];
|
||||
|
||||
if (!page) {
|
||||
return { success: false, error: 'No page found in Chrome session' };
|
||||
}
|
||||
|
||||
// Track all responses to capture redirects
|
||||
page.on('response', async (response) => {
|
||||
const status = response.status();
|
||||
const responseUrl = response.url();
|
||||
const headers = response.headers();
|
||||
|
||||
// Check if it's a redirect
|
||||
if (status >= 300 && status < 400) {
|
||||
redirectChain.push({
|
||||
timestamp: new Date().toISOString(),
|
||||
url: responseUrl,
|
||||
status,
|
||||
statusText: response.statusText(),
|
||||
location: headers['location'] || headers['Location'] || '',
|
||||
type: 'http',
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Get the current URL (which is the final destination after redirects)
|
||||
const finalUrl = page.url();
|
||||
|
||||
// Check for meta refresh redirects
|
||||
const metaRefresh = await page.evaluate(() => {
|
||||
const meta = document.querySelector('meta[http-equiv="refresh"]');
|
||||
if (meta) {
|
||||
const content = meta.getAttribute('content') || '';
|
||||
const match = content.match(/url=['"]?([^'"]+)['"]?/i);
|
||||
return {
|
||||
content,
|
||||
url: match ? match[1] : null,
|
||||
};
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
if (metaRefresh && metaRefresh.url) {
|
||||
redirectChain.push({
|
||||
timestamp: new Date().toISOString(),
|
||||
url: finalUrl,
|
||||
status: null,
|
||||
statusText: 'Meta Refresh',
|
||||
location: metaRefresh.url,
|
||||
type: 'meta_refresh',
|
||||
content: metaRefresh.content,
|
||||
});
|
||||
}
|
||||
|
||||
// Check for JavaScript redirects (basic detection)
|
||||
const jsRedirect = await page.evaluate(() => {
|
||||
// Check for common JavaScript redirect patterns
|
||||
const html = document.documentElement.outerHTML;
|
||||
const patterns = [
|
||||
/window\.location\s*=\s*['"]([^'"]+)['"]/i,
|
||||
/window\.location\.href\s*=\s*['"]([^'"]+)['"]/i,
|
||||
/window\.location\.replace\s*\(\s*['"]([^'"]+)['"]\s*\)/i,
|
||||
/document\.location\s*=\s*['"]([^'"]+)['"]/i,
|
||||
];
|
||||
|
||||
for (const pattern of patterns) {
|
||||
const match = html.match(pattern);
|
||||
if (match) {
|
||||
return {
|
||||
pattern: pattern.toString(),
|
||||
url: match[1],
|
||||
};
|
||||
}
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
if (jsRedirect && jsRedirect.url) {
|
||||
redirectChain.push({
|
||||
timestamp: new Date().toISOString(),
|
||||
url: finalUrl,
|
||||
status: null,
|
||||
statusText: 'JavaScript Redirect',
|
||||
location: jsRedirect.url,
|
||||
type: 'javascript',
|
||||
pattern: jsRedirect.pattern,
|
||||
});
|
||||
}
|
||||
|
||||
const redirectData = {
|
||||
original_url: url,
|
||||
final_url: finalUrl,
|
||||
redirect_count: redirectChain.length,
|
||||
redirects: redirectChain,
|
||||
is_redirect: redirectChain.length > 0,
|
||||
};
|
||||
|
||||
// Write output
|
||||
fs.writeFileSync(outputPath, JSON.stringify(redirectData, null, 2));
|
||||
|
||||
return { success: true, output: outputPath, redirectData };
|
||||
|
||||
} catch (e) {
|
||||
return { success: false, error: `${e.name}: ${e.message}` };
|
||||
} finally {
|
||||
if (browser) {
|
||||
browser.disconnect();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const args = parseArgs();
|
||||
const url = args.url;
|
||||
const snapshotId = args.snapshot_id;
|
||||
|
||||
if (!url || !snapshotId) {
|
||||
console.error('Usage: on_Snapshot__15_redirects.js --url=<url> --snapshot-id=<uuid>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const startTs = new Date();
|
||||
let status = 'failed';
|
||||
let output = null;
|
||||
let error = '';
|
||||
|
||||
try {
|
||||
// Check if enabled
|
||||
if (!getEnvBool('SAVE_REDIRECTS', true)) {
|
||||
console.log('Skipping redirects (SAVE_REDIRECTS=False)');
|
||||
status = 'skipped';
|
||||
const endTs = new Date();
|
||||
console.log(`START_TS=${startTs.toISOString()}`);
|
||||
console.log(`END_TS=${endTs.toISOString()}`);
|
||||
console.log(`STATUS=${status}`);
|
||||
console.log(`RESULT_JSON=${JSON.stringify({extractor: EXTRACTOR_NAME, status, url, snapshot_id: snapshotId})}`);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const result = await trackRedirects(url);
|
||||
|
||||
if (result.success) {
|
||||
status = 'succeeded';
|
||||
output = result.output;
|
||||
const redirectCount = result.redirectData.redirect_count;
|
||||
const finalUrl = result.redirectData.final_url;
|
||||
if (redirectCount > 0) {
|
||||
console.log(`Tracked ${redirectCount} redirect(s) to: ${finalUrl}`);
|
||||
} else {
|
||||
console.log('No redirects detected');
|
||||
}
|
||||
} else {
|
||||
status = 'failed';
|
||||
error = result.error;
|
||||
}
|
||||
} catch (e) {
|
||||
error = `${e.name}: ${e.message}`;
|
||||
status = 'failed';
|
||||
}
|
||||
|
||||
const endTs = new Date();
|
||||
const duration = (endTs - startTs) / 1000;
|
||||
|
||||
// Print results
|
||||
console.log(`START_TS=${startTs.toISOString()}`);
|
||||
console.log(`END_TS=${endTs.toISOString()}`);
|
||||
console.log(`DURATION=${duration.toFixed(2)}`);
|
||||
if (output) {
|
||||
console.log(`OUTPUT=${output}`);
|
||||
}
|
||||
console.log(`STATUS=${status}`);
|
||||
|
||||
if (error) {
|
||||
console.error(`ERROR=${error}`);
|
||||
}
|
||||
|
||||
// Print JSON result
|
||||
const resultJson = {
|
||||
extractor: EXTRACTOR_NAME,
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
status,
|
||||
start_ts: startTs.toISOString(),
|
||||
end_ts: endTs.toISOString(),
|
||||
duration: Math.round(duration * 100) / 100,
|
||||
output,
|
||||
error: error || null,
|
||||
};
|
||||
console.log(`RESULT_JSON=${JSON.stringify(resultJson)}`);
|
||||
|
||||
process.exit(status === 'succeeded' ? 0 : 1);
|
||||
}
|
||||
|
||||
main().catch(e => {
|
||||
console.error(`Fatal error: ${e.message}`);
|
||||
process.exit(1);
|
||||
});
|
||||
248
archivebox/plugins/redirects/on_Snapshot__31_redirects.js
Executable file
248
archivebox/plugins/redirects/on_Snapshot__31_redirects.js
Executable file
@@ -0,0 +1,248 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Detect redirects by comparing original URL to final URL.
|
||||
*
|
||||
* This runs AFTER chrome_navigate and checks:
|
||||
* - URL changed (HTTP redirect occurred)
|
||||
* - Meta refresh tags (pending redirects)
|
||||
* - JavaScript redirects (basic detection)
|
||||
*
|
||||
* Usage: on_Snapshot__31_redirects.js --url=<url> --snapshot-id=<uuid>
|
||||
* Output: Writes redirects.json
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const puppeteer = require('puppeteer-core');
|
||||
|
||||
const EXTRACTOR_NAME = 'redirects';
|
||||
const OUTPUT_DIR = '.';
|
||||
const OUTPUT_FILE = 'redirects.json';
|
||||
const CHROME_SESSION_DIR = '../chrome_session';
|
||||
const CHROME_NAVIGATE_DIR = '../chrome_navigate';
|
||||
|
||||
function parseArgs() {
|
||||
const args = {};
|
||||
process.argv.slice(2).forEach(arg => {
|
||||
if (arg.startsWith('--')) {
|
||||
const [key, ...valueParts] = arg.slice(2).split('=');
|
||||
args[key.replace(/-/g, '_')] = valueParts.join('=') || true;
|
||||
}
|
||||
});
|
||||
return args;
|
||||
}
|
||||
|
||||
function getEnv(name, defaultValue = '') {
|
||||
return (process.env[name] || defaultValue).trim();
|
||||
}
|
||||
|
||||
function getEnvBool(name, defaultValue = false) {
|
||||
const val = getEnv(name, '').toLowerCase();
|
||||
if (['true', '1', 'yes', 'on'].includes(val)) return true;
|
||||
if (['false', '0', 'no', 'off'].includes(val)) return false;
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
function getCdpUrl() {
|
||||
const cdpFile = path.join(CHROME_SESSION_DIR, 'cdp_url.txt');
|
||||
if (fs.existsSync(cdpFile)) {
|
||||
return fs.readFileSync(cdpFile, 'utf8').trim();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function getPageId() {
|
||||
const pageIdFile = path.join(CHROME_SESSION_DIR, 'page_id.txt');
|
||||
if (fs.existsSync(pageIdFile)) {
|
||||
return fs.readFileSync(pageIdFile, 'utf8').trim();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function getFinalUrl() {
|
||||
// Try chrome_navigate output first
|
||||
const navFile = path.join(CHROME_NAVIGATE_DIR, 'final_url.txt');
|
||||
if (fs.existsSync(navFile)) {
|
||||
return fs.readFileSync(navFile, 'utf8').trim();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
async function detectRedirects(originalUrl) {
|
||||
const outputPath = path.join(OUTPUT_DIR, OUTPUT_FILE);
|
||||
const redirects = [];
|
||||
|
||||
// Get final URL from chrome_navigate
|
||||
let finalUrl = getFinalUrl() || originalUrl;
|
||||
|
||||
// Check if URL changed (indicates redirect)
|
||||
const urlChanged = originalUrl !== finalUrl;
|
||||
if (urlChanged) {
|
||||
redirects.push({
|
||||
timestamp: new Date().toISOString(),
|
||||
from_url: originalUrl,
|
||||
to_url: finalUrl,
|
||||
type: 'http',
|
||||
detected_by: 'url_comparison',
|
||||
});
|
||||
}
|
||||
|
||||
// Connect to Chrome to check for meta refresh and JS redirects
|
||||
const cdpUrl = getCdpUrl();
|
||||
if (cdpUrl) {
|
||||
let browser = null;
|
||||
try {
|
||||
browser = await puppeteer.connect({ browserWSEndpoint: cdpUrl });
|
||||
|
||||
const pages = await browser.pages();
|
||||
const pageId = getPageId();
|
||||
let page = null;
|
||||
|
||||
if (pageId) {
|
||||
page = pages.find(p => {
|
||||
const target = p.target();
|
||||
return target && target._targetId === pageId;
|
||||
});
|
||||
}
|
||||
if (!page) {
|
||||
page = pages.find(p => p.url().startsWith('http')) || pages[pages.length - 1];
|
||||
}
|
||||
|
||||
if (page) {
|
||||
// Update finalUrl from actual page
|
||||
const pageUrl = page.url();
|
||||
if (pageUrl && pageUrl !== 'about:blank') {
|
||||
finalUrl = pageUrl;
|
||||
}
|
||||
|
||||
// Check for meta refresh
|
||||
try {
|
||||
const metaRefresh = await page.evaluate(() => {
|
||||
const meta = document.querySelector('meta[http-equiv="refresh"]');
|
||||
if (meta) {
|
||||
const content = meta.getAttribute('content') || '';
|
||||
const match = content.match(/url=['"]?([^'";\s]+)['"]?/i);
|
||||
return { content, url: match ? match[1] : null };
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
if (metaRefresh && metaRefresh.url) {
|
||||
redirects.push({
|
||||
timestamp: new Date().toISOString(),
|
||||
from_url: finalUrl,
|
||||
to_url: metaRefresh.url,
|
||||
type: 'meta_refresh',
|
||||
content: metaRefresh.content,
|
||||
});
|
||||
}
|
||||
} catch (e) { /* ignore */ }
|
||||
|
||||
// Check for JS redirects
|
||||
try {
|
||||
const jsRedirect = await page.evaluate(() => {
|
||||
const html = document.documentElement.outerHTML;
|
||||
const patterns = [
|
||||
/window\.location\s*=\s*['"]([^'"]+)['"]/i,
|
||||
/window\.location\.href\s*=\s*['"]([^'"]+)['"]/i,
|
||||
/window\.location\.replace\s*\(\s*['"]([^'"]+)['"]\s*\)/i,
|
||||
];
|
||||
for (const pattern of patterns) {
|
||||
const match = html.match(pattern);
|
||||
if (match) return { url: match[1], pattern: pattern.toString() };
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
if (jsRedirect && jsRedirect.url) {
|
||||
redirects.push({
|
||||
timestamp: new Date().toISOString(),
|
||||
from_url: finalUrl,
|
||||
to_url: jsRedirect.url,
|
||||
type: 'javascript',
|
||||
});
|
||||
}
|
||||
} catch (e) { /* ignore */ }
|
||||
}
|
||||
|
||||
browser.disconnect();
|
||||
} catch (e) {
|
||||
console.error(`Warning: Could not connect to Chrome: ${e.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
const result = {
|
||||
original_url: originalUrl,
|
||||
final_url: finalUrl,
|
||||
redirect_count: redirects.length,
|
||||
redirects,
|
||||
is_redirect: originalUrl !== finalUrl || redirects.length > 0,
|
||||
};
|
||||
|
||||
fs.writeFileSync(outputPath, JSON.stringify(result, null, 2));
|
||||
return { success: true, output: outputPath, data: result };
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const args = parseArgs();
|
||||
const url = args.url;
|
||||
const snapshotId = args.snapshot_id;
|
||||
|
||||
if (!url || !snapshotId) {
|
||||
console.error('Usage: on_Snapshot__31_redirects.js --url=<url> --snapshot-id=<uuid>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const startTs = new Date();
|
||||
let status = 'failed';
|
||||
let output = null;
|
||||
let error = '';
|
||||
|
||||
if (!getEnvBool('SAVE_REDIRECTS', true)) {
|
||||
console.log('Skipping redirects (SAVE_REDIRECTS=False)');
|
||||
status = 'skipped';
|
||||
} else {
|
||||
try {
|
||||
const result = await detectRedirects(url);
|
||||
status = 'succeeded';
|
||||
output = result.output;
|
||||
|
||||
if (result.data.is_redirect) {
|
||||
console.log(`Redirect detected: ${url} -> ${result.data.final_url}`);
|
||||
} else {
|
||||
console.log('No redirects detected');
|
||||
}
|
||||
} catch (e) {
|
||||
error = `${e.name}: ${e.message}`;
|
||||
}
|
||||
}
|
||||
|
||||
const endTs = new Date();
|
||||
const duration = (endTs - startTs) / 1000;
|
||||
|
||||
console.log(`START_TS=${startTs.toISOString()}`);
|
||||
console.log(`END_TS=${endTs.toISOString()}`);
|
||||
console.log(`DURATION=${duration.toFixed(2)}`);
|
||||
if (output) console.log(`OUTPUT=${output}`);
|
||||
console.log(`STATUS=${status}`);
|
||||
if (error) console.error(`ERROR=${error}`);
|
||||
|
||||
console.log(`RESULT_JSON=${JSON.stringify({
|
||||
extractor: EXTRACTOR_NAME,
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
status,
|
||||
start_ts: startTs.toISOString(),
|
||||
end_ts: endTs.toISOString(),
|
||||
duration: Math.round(duration * 100) / 100,
|
||||
output,
|
||||
error: error || null,
|
||||
})}`);
|
||||
|
||||
process.exit(status === 'succeeded' ? 0 : 1);
|
||||
}
|
||||
|
||||
main().catch(e => {
|
||||
console.error(`Fatal error: ${e.message}`);
|
||||
process.exit(1);
|
||||
});
|
||||
@@ -1,22 +1,12 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Archive all network responses during page load.
|
||||
* Archive all network responses during page load (DAEMON MODE).
|
||||
*
|
||||
* Connects to Chrome session and captures ALL network responses (XHR, images, scripts, etc.)
|
||||
* Saves them in an organized directory structure with both timestamped unique files
|
||||
* and URL-organized symlinks.
|
||||
* This hook daemonizes and stays alive to capture network responses throughout
|
||||
* the snapshot lifecycle. It's killed by chrome_cleanup at the end.
|
||||
*
|
||||
* Usage: on_Snapshot__23_responses.js --url=<url> --snapshot-id=<uuid>
|
||||
* Output: Creates responses/ directory with:
|
||||
* - all/<timestamp>__<METHOD>__<URL>.<ext>: Timestamped unique files
|
||||
* - <type>/<domain>/<path>/: URL-organized symlinks by resource type
|
||||
* - index.jsonl: Searchable index of all responses
|
||||
*
|
||||
* Environment variables:
|
||||
* SAVE_RESPONSES: Enable response archiving (default: true)
|
||||
* RESPONSES_TIMEOUT: Timeout in seconds (default: 120)
|
||||
* RESPONSES_TYPES: Comma-separated resource types to save (default: all)
|
||||
* Options: script,stylesheet,font,image,media,xhr,websocket,document
|
||||
* Usage: on_Snapshot__24_responses.js --url=<url> --snapshot-id=<uuid>
|
||||
* Output: Creates responses/ directory with index.jsonl + listener.pid
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
@@ -27,6 +17,7 @@ const puppeteer = require('puppeteer-core');
|
||||
// Extractor metadata
|
||||
const EXTRACTOR_NAME = 'responses';
|
||||
const OUTPUT_DIR = '.';
|
||||
const PID_FILE = 'listener.pid';
|
||||
const CHROME_SESSION_DIR = '../chrome_session';
|
||||
|
||||
// Resource types to capture (by default, capture everything)
|
||||
@@ -70,6 +61,14 @@ function getCdpUrl() {
|
||||
return null;
|
||||
}
|
||||
|
||||
function getPageId() {
|
||||
const pageIdFile = path.join(CHROME_SESSION_DIR, 'page_id.txt');
|
||||
if (fs.existsSync(pageIdFile)) {
|
||||
return fs.readFileSync(pageIdFile, 'utf8').trim();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// Get file extension from MIME type
|
||||
function getExtensionFromMimeType(mimeType) {
|
||||
const mimeMap = {
|
||||
@@ -139,17 +138,14 @@ async function createSymlink(target, linkPath) {
|
||||
fs.symlinkSync(relativePath, linkPath);
|
||||
} catch (e) {
|
||||
// Ignore symlink errors (file conflicts, permissions, etc.)
|
||||
console.error(`Failed to create symlink: ${e.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Archive responses by intercepting network traffic
|
||||
async function archiveResponses(originalUrl) {
|
||||
const timeout = (getEnvInt('RESPONSES_TIMEOUT') || getEnvInt('TIMEOUT', 120)) * 1000;
|
||||
// Set up response listener
|
||||
async function setupListener() {
|
||||
const typesStr = getEnv('RESPONSES_TYPES', DEFAULT_TYPES.join(','));
|
||||
const typesToSave = typesStr.split(',').map(t => t.trim().toLowerCase());
|
||||
|
||||
// Output directory is current directory (hook already runs in output dir)
|
||||
// Create subdirectories for organizing responses
|
||||
const allDir = path.join(OUTPUT_DIR, 'all');
|
||||
if (!fs.existsSync(allDir)) {
|
||||
@@ -160,138 +156,119 @@ async function archiveResponses(originalUrl) {
|
||||
const indexPath = path.join(OUTPUT_DIR, 'index.jsonl');
|
||||
fs.writeFileSync(indexPath, ''); // Clear existing
|
||||
|
||||
let browser = null;
|
||||
let savedCount = 0;
|
||||
const savedResponses = [];
|
||||
|
||||
try {
|
||||
// Connect to existing Chrome session
|
||||
const cdpUrl = getCdpUrl();
|
||||
if (!cdpUrl) {
|
||||
return { success: false, error: 'No Chrome session found (chrome_session extractor must run first)' };
|
||||
}
|
||||
|
||||
browser = await puppeteer.connect({
|
||||
browserWSEndpoint: cdpUrl,
|
||||
});
|
||||
|
||||
// Get the page
|
||||
const pages = await browser.pages();
|
||||
const page = pages.find(p => p.url().startsWith('http')) || pages[0];
|
||||
|
||||
if (!page) {
|
||||
return { success: false, error: 'No page found in Chrome session' };
|
||||
}
|
||||
|
||||
// Enable request interception
|
||||
await page.setRequestInterception(false); // Don't block requests
|
||||
|
||||
// Listen for responses
|
||||
page.on('response', async (response) => {
|
||||
try {
|
||||
const request = response.request();
|
||||
const url = response.url();
|
||||
const resourceType = request.resourceType().toLowerCase();
|
||||
const method = request.method();
|
||||
const status = response.status();
|
||||
|
||||
// Skip redirects and errors
|
||||
if (status >= 300 && status < 400) return;
|
||||
if (status >= 400 && status < 600) return;
|
||||
|
||||
// Check if we should save this resource type
|
||||
if (typesToSave.length && !typesToSave.includes(resourceType)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get response body
|
||||
let bodyBuffer = null;
|
||||
try {
|
||||
bodyBuffer = await response.buffer();
|
||||
} catch (e) {
|
||||
// Some responses can't be captured (already consumed, etc.)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!bodyBuffer || bodyBuffer.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Determine file extension
|
||||
const mimeType = response.headers()['content-type'] || '';
|
||||
let extension = getExtensionFromMimeType(mimeType) || getExtensionFromUrl(url);
|
||||
|
||||
// Create timestamp-based unique filename
|
||||
const timestamp = new Date().toISOString().replace(/[-:]/g, '').replace(/\..+/, '');
|
||||
const urlHash = sanitizeFilename(encodeURIComponent(url).slice(0, 64));
|
||||
const uniqueFilename = `${timestamp}__${method}__${urlHash}${extension ? '.' + extension : ''}`;
|
||||
const uniquePath = path.join(allDir, uniqueFilename);
|
||||
|
||||
// Save to unique file
|
||||
fs.writeFileSync(uniquePath, bodyBuffer);
|
||||
|
||||
// Create URL-organized symlink
|
||||
try {
|
||||
const urlObj = new URL(url);
|
||||
const hostname = urlObj.hostname;
|
||||
const pathname = urlObj.pathname || '/';
|
||||
const filename = path.basename(pathname) || 'index' + (extension ? '.' + extension : '');
|
||||
const dirPath = path.dirname(pathname);
|
||||
|
||||
// Create symlink: responses/<type>/<hostname>/<path>/<filename>
|
||||
const symlinkDir = path.join(OUTPUT_DIR, resourceType, hostname, dirPath);
|
||||
const symlinkPath = path.join(symlinkDir, filename);
|
||||
await createSymlink(uniquePath, symlinkPath);
|
||||
} catch (e) {
|
||||
// URL parsing or symlink creation failed, skip
|
||||
}
|
||||
|
||||
// Calculate SHA256
|
||||
const sha256 = crypto.createHash('sha256').update(bodyBuffer).digest('hex');
|
||||
const urlSha256 = crypto.createHash('sha256').update(url).digest('hex');
|
||||
|
||||
// Write to index
|
||||
const indexEntry = {
|
||||
ts: timestamp,
|
||||
method,
|
||||
url: method === 'DATA' ? url.slice(0, 128) : url, // Truncate data: URLs
|
||||
urlSha256,
|
||||
status,
|
||||
resourceType,
|
||||
mimeType: mimeType.split(';')[0],
|
||||
responseSha256: sha256,
|
||||
path: './' + path.relative(OUTPUT_DIR, uniquePath),
|
||||
extension,
|
||||
};
|
||||
|
||||
fs.appendFileSync(indexPath, JSON.stringify(indexEntry) + '\n');
|
||||
savedResponses.push(indexEntry);
|
||||
savedCount++;
|
||||
|
||||
} catch (e) {
|
||||
// Log but don't fail the whole extraction
|
||||
console.error(`Error capturing response: ${e.message}`);
|
||||
}
|
||||
});
|
||||
|
||||
// Wait a bit to ensure we capture responses
|
||||
// (chrome_session already loaded the page, just capture any remaining traffic)
|
||||
await new Promise(resolve => setTimeout(resolve, 2000));
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: OUTPUT_DIR,
|
||||
savedCount,
|
||||
indexPath,
|
||||
};
|
||||
|
||||
} catch (e) {
|
||||
return { success: false, error: `${e.name}: ${e.message}` };
|
||||
} finally {
|
||||
if (browser) {
|
||||
browser.disconnect();
|
||||
}
|
||||
const cdpUrl = getCdpUrl();
|
||||
if (!cdpUrl) {
|
||||
throw new Error('No Chrome session found');
|
||||
}
|
||||
|
||||
const browser = await puppeteer.connect({ browserWSEndpoint: cdpUrl });
|
||||
|
||||
// Find our page
|
||||
const pages = await browser.pages();
|
||||
const pageId = getPageId();
|
||||
let page = null;
|
||||
|
||||
if (pageId) {
|
||||
page = pages.find(p => {
|
||||
const target = p.target();
|
||||
return target && target._targetId === pageId;
|
||||
});
|
||||
}
|
||||
if (!page) {
|
||||
page = pages[pages.length - 1];
|
||||
}
|
||||
|
||||
if (!page) {
|
||||
throw new Error('No page found');
|
||||
}
|
||||
|
||||
// Set up response listener to capture network traffic
|
||||
page.on('response', async (response) => {
|
||||
try {
|
||||
const request = response.request();
|
||||
const url = response.url();
|
||||
const resourceType = request.resourceType().toLowerCase();
|
||||
const method = request.method();
|
||||
const status = response.status();
|
||||
|
||||
// Skip redirects and errors
|
||||
if (status >= 300 && status < 400) return;
|
||||
if (status >= 400 && status < 600) return;
|
||||
|
||||
// Check if we should save this resource type
|
||||
if (typesToSave.length && !typesToSave.includes(resourceType)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Get response body
|
||||
let bodyBuffer = null;
|
||||
try {
|
||||
bodyBuffer = await response.buffer();
|
||||
} catch (e) {
|
||||
// Some responses can't be captured (already consumed, etc.)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!bodyBuffer || bodyBuffer.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Determine file extension
|
||||
const mimeType = response.headers()['content-type'] || '';
|
||||
let extension = getExtensionFromMimeType(mimeType) || getExtensionFromUrl(url);
|
||||
|
||||
// Create timestamp-based unique filename
|
||||
const timestamp = new Date().toISOString().replace(/[-:]/g, '').replace(/\..+/, '');
|
||||
const urlHash = sanitizeFilename(encodeURIComponent(url).slice(0, 64));
|
||||
const uniqueFilename = `${timestamp}__${method}__${urlHash}${extension ? '.' + extension : ''}`;
|
||||
const uniquePath = path.join(allDir, uniqueFilename);
|
||||
|
||||
// Save to unique file
|
||||
fs.writeFileSync(uniquePath, bodyBuffer);
|
||||
|
||||
// Create URL-organized symlink
|
||||
try {
|
||||
const urlObj = new URL(url);
|
||||
const hostname = urlObj.hostname;
|
||||
const pathname = urlObj.pathname || '/';
|
||||
const filename = path.basename(pathname) || 'index' + (extension ? '.' + extension : '');
|
||||
const dirPath = path.dirname(pathname);
|
||||
|
||||
// Create symlink: responses/<type>/<hostname>/<path>/<filename>
|
||||
const symlinkDir = path.join(OUTPUT_DIR, resourceType, hostname, dirPath);
|
||||
const symlinkPath = path.join(symlinkDir, filename);
|
||||
await createSymlink(uniquePath, symlinkPath);
|
||||
} catch (e) {
|
||||
// URL parsing or symlink creation failed, skip
|
||||
}
|
||||
|
||||
// Calculate SHA256
|
||||
const sha256 = crypto.createHash('sha256').update(bodyBuffer).digest('hex');
|
||||
const urlSha256 = crypto.createHash('sha256').update(url).digest('hex');
|
||||
|
||||
// Write to index
|
||||
const indexEntry = {
|
||||
ts: timestamp,
|
||||
method,
|
||||
url: method === 'DATA' ? url.slice(0, 128) : url, // Truncate data: URLs
|
||||
urlSha256,
|
||||
status,
|
||||
resourceType,
|
||||
mimeType: mimeType.split(';')[0],
|
||||
responseSha256: sha256,
|
||||
path: './' + path.relative(OUTPUT_DIR, uniquePath),
|
||||
extension,
|
||||
};
|
||||
|
||||
fs.appendFileSync(indexPath, JSON.stringify(indexEntry) + '\n');
|
||||
|
||||
} catch (e) {
|
||||
// Ignore errors
|
||||
}
|
||||
});
|
||||
|
||||
// Don't disconnect - keep browser connection alive
|
||||
return { browser, page };
|
||||
}
|
||||
|
||||
async function main() {
|
||||
@@ -300,77 +277,83 @@ async function main() {
|
||||
const snapshotId = args.snapshot_id;
|
||||
|
||||
if (!url || !snapshotId) {
|
||||
console.error('Usage: on_Snapshot__23_responses.js --url=<url> --snapshot-id=<uuid>');
|
||||
console.error('Usage: on_Snapshot__24_responses.js --url=<url> --snapshot-id=<uuid>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (!getEnvBool('SAVE_RESPONSES', true)) {
|
||||
console.log('Skipping (SAVE_RESPONSES=False)');
|
||||
const result = {
|
||||
extractor: EXTRACTOR_NAME,
|
||||
status: 'skipped',
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
};
|
||||
console.log(`RESULT_JSON=${JSON.stringify(result)}`);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const startTs = new Date();
|
||||
let status = 'failed';
|
||||
let output = null;
|
||||
let error = '';
|
||||
let savedCount = 0;
|
||||
|
||||
try {
|
||||
// Check if enabled
|
||||
if (!getEnvBool('SAVE_RESPONSES', true)) {
|
||||
console.log('Skipping responses (SAVE_RESPONSES=False)');
|
||||
status = 'skipped';
|
||||
const endTs = new Date();
|
||||
console.log(`START_TS=${startTs.toISOString()}`);
|
||||
console.log(`END_TS=${endTs.toISOString()}`);
|
||||
console.log(`STATUS=${status}`);
|
||||
console.log(`RESULT_JSON=${JSON.stringify({extractor: EXTRACTOR_NAME, status, url, snapshot_id: snapshotId})}`);
|
||||
process.exit(0);
|
||||
}
|
||||
// Set up listener
|
||||
await setupListener();
|
||||
|
||||
const result = await archiveResponses(url);
|
||||
// Write PID file so chrome_cleanup can kill us
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, PID_FILE), String(process.pid));
|
||||
|
||||
if (result.success) {
|
||||
status = 'succeeded';
|
||||
output = result.output;
|
||||
savedCount = result.savedCount || 0;
|
||||
console.log(`Saved ${savedCount} network responses to ${output}/`);
|
||||
} else {
|
||||
status = 'failed';
|
||||
error = result.error;
|
||||
// Report success immediately (we're staying alive in background)
|
||||
const endTs = new Date();
|
||||
const duration = (endTs - startTs) / 1000;
|
||||
|
||||
console.log(`START_TS=${startTs.toISOString()}`);
|
||||
console.log(`END_TS=${endTs.toISOString()}`);
|
||||
console.log(`DURATION=${duration.toFixed(2)}`);
|
||||
console.log(`OUTPUT=responses/`);
|
||||
console.log(`STATUS=succeeded`);
|
||||
|
||||
const result = {
|
||||
extractor: EXTRACTOR_NAME,
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
status: 'succeeded',
|
||||
start_ts: startTs.toISOString(),
|
||||
end_ts: endTs.toISOString(),
|
||||
duration: Math.round(duration * 100) / 100,
|
||||
output: 'responses/',
|
||||
};
|
||||
console.log(`RESULT_JSON=${JSON.stringify(result)}`);
|
||||
|
||||
// Daemonize: detach from parent and keep running
|
||||
// This process will be killed by chrome_cleanup
|
||||
if (process.stdin.isTTY) {
|
||||
process.stdin.pause();
|
||||
}
|
||||
process.stdin.unref();
|
||||
process.stdout.end();
|
||||
process.stderr.end();
|
||||
|
||||
// Keep the process alive indefinitely
|
||||
// Will be killed by chrome_cleanup via the PID file
|
||||
setInterval(() => {}, 1000);
|
||||
|
||||
} catch (e) {
|
||||
error = `${e.name}: ${e.message}`;
|
||||
status = 'failed';
|
||||
}
|
||||
|
||||
const endTs = new Date();
|
||||
const duration = (endTs - startTs) / 1000;
|
||||
|
||||
// Print results
|
||||
console.log(`START_TS=${startTs.toISOString()}`);
|
||||
console.log(`END_TS=${endTs.toISOString()}`);
|
||||
console.log(`DURATION=${duration.toFixed(2)}`);
|
||||
if (output) {
|
||||
console.log(`OUTPUT=${output}`);
|
||||
}
|
||||
console.log(`STATUS=${status}`);
|
||||
|
||||
if (error) {
|
||||
const error = `${e.name}: ${e.message}`;
|
||||
console.error(`ERROR=${error}`);
|
||||
|
||||
const endTs = new Date();
|
||||
const result = {
|
||||
extractor: EXTRACTOR_NAME,
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
status: 'failed',
|
||||
start_ts: startTs.toISOString(),
|
||||
end_ts: endTs.toISOString(),
|
||||
error,
|
||||
};
|
||||
console.log(`RESULT_JSON=${JSON.stringify(result)}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Print JSON result
|
||||
const resultJson = {
|
||||
extractor: EXTRACTOR_NAME,
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
status,
|
||||
start_ts: startTs.toISOString(),
|
||||
end_ts: endTs.toISOString(),
|
||||
duration: Math.round(duration * 100) / 100,
|
||||
output,
|
||||
saved_count: savedCount,
|
||||
error: error || null,
|
||||
};
|
||||
console.log(`RESULT_JSON=${JSON.stringify(resultJson)}`);
|
||||
|
||||
process.exit(status === 'succeeded' ? 0 : 1);
|
||||
}
|
||||
|
||||
main().catch(e => {
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
*
|
||||
* Extension: https://chromewebstore.google.com/detail/mpiodijhokgodhhofbcjdecpffjipkle
|
||||
*
|
||||
* Priority: 04 (early) - Must install before Chrome session starts
|
||||
* Hook: on_Snapshot
|
||||
* Priority: 04 (early) - Must install before Chrome session starts at Crawl level
|
||||
* Hook: on_Crawl (runs once per crawl, not per snapshot)
|
||||
*
|
||||
* This extension automatically:
|
||||
* - Saves complete web pages as single HTML files
|
||||
@@ -1,18 +1,12 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Extract SSL/TLS certificate details from a URL.
|
||||
* Extract SSL/TLS certificate details from a URL (DAEMON MODE).
|
||||
*
|
||||
* Connects to Chrome session and retrieves security details including:
|
||||
* - Protocol (TLS 1.2, TLS 1.3, etc.)
|
||||
* - Cipher suite
|
||||
* - Certificate issuer, validity period
|
||||
* - Security state
|
||||
* This hook daemonizes and stays alive to capture SSL details throughout
|
||||
* the snapshot lifecycle. It's killed by chrome_cleanup at the end.
|
||||
*
|
||||
* Usage: on_Snapshot__16_ssl.js --url=<url> --snapshot-id=<uuid>
|
||||
* Output: Writes ssl/ssl.json
|
||||
*
|
||||
* Environment variables:
|
||||
* SAVE_SSL: Enable SSL extraction (default: true)
|
||||
* Usage: on_Snapshot__23_ssl.js --url=<url> --snapshot-id=<uuid>
|
||||
* Output: Writes ssl.json + listener.pid
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
@@ -23,6 +17,7 @@ const puppeteer = require('puppeteer-core');
|
||||
const EXTRACTOR_NAME = 'ssl';
|
||||
const OUTPUT_DIR = '.';
|
||||
const OUTPUT_FILE = 'ssl.json';
|
||||
const PID_FILE = 'listener.pid';
|
||||
const CHROME_SESSION_DIR = '../chrome_session';
|
||||
|
||||
// Parse command line arguments
|
||||
@@ -58,103 +53,103 @@ function getCdpUrl() {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Extract SSL details
|
||||
async function extractSsl(url) {
|
||||
// Output directory is current directory (hook already runs in output dir)
|
||||
function getPageId() {
|
||||
const pageIdFile = path.join(CHROME_SESSION_DIR, 'page_id.txt');
|
||||
if (fs.existsSync(pageIdFile)) {
|
||||
return fs.readFileSync(pageIdFile, 'utf8').trim();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// Set up SSL listener
|
||||
async function setupListener(url) {
|
||||
const outputPath = path.join(OUTPUT_DIR, OUTPUT_FILE);
|
||||
|
||||
// Only extract SSL for HTTPS URLs
|
||||
if (!url.startsWith('https://')) {
|
||||
return { success: false, error: 'URL is not HTTPS' };
|
||||
throw new Error('URL is not HTTPS');
|
||||
}
|
||||
|
||||
let browser = null;
|
||||
let sslInfo = {};
|
||||
const cdpUrl = getCdpUrl();
|
||||
if (!cdpUrl) {
|
||||
throw new Error('No Chrome session found');
|
||||
}
|
||||
|
||||
try {
|
||||
// Connect to existing Chrome session
|
||||
const cdpUrl = getCdpUrl();
|
||||
if (!cdpUrl) {
|
||||
return { success: false, error: 'No Chrome session found (chrome_session extractor must run first)' };
|
||||
}
|
||||
const browser = await puppeteer.connect({ browserWSEndpoint: cdpUrl });
|
||||
|
||||
browser = await puppeteer.connect({
|
||||
browserWSEndpoint: cdpUrl,
|
||||
// Find our page
|
||||
const pages = await browser.pages();
|
||||
const pageId = getPageId();
|
||||
let page = null;
|
||||
|
||||
if (pageId) {
|
||||
page = pages.find(p => {
|
||||
const target = p.target();
|
||||
return target && target._targetId === pageId;
|
||||
});
|
||||
}
|
||||
if (!page) {
|
||||
page = pages[pages.length - 1];
|
||||
}
|
||||
|
||||
// Get the page
|
||||
const pages = await browser.pages();
|
||||
const page = pages.find(p => p.url().startsWith('http')) || pages[0];
|
||||
if (!page) {
|
||||
throw new Error('No page found');
|
||||
}
|
||||
|
||||
if (!page) {
|
||||
return { success: false, error: 'No page found in Chrome session' };
|
||||
}
|
||||
// Set up listener to capture SSL details when chrome_navigate loads the page
|
||||
page.on('response', async (response) => {
|
||||
try {
|
||||
const request = response.request();
|
||||
|
||||
// Get CDP client for low-level access
|
||||
const client = await page.target().createCDPSession();
|
||||
|
||||
// Enable Security domain
|
||||
await client.send('Security.enable');
|
||||
|
||||
// Get security details from the loaded page
|
||||
const securityState = await client.send('Security.getSecurityState');
|
||||
|
||||
sslInfo = {
|
||||
url,
|
||||
securityState: securityState.securityState,
|
||||
schemeIsCryptographic: securityState.schemeIsCryptographic,
|
||||
summary: securityState.summary || '',
|
||||
};
|
||||
|
||||
// Try to get detailed certificate info if available
|
||||
if (securityState.securityStateIssueIds && securityState.securityStateIssueIds.length > 0) {
|
||||
sslInfo.issues = securityState.securityStateIssueIds;
|
||||
}
|
||||
|
||||
// Get response security details from navigation
|
||||
let mainResponse = null;
|
||||
page.on('response', async (response) => {
|
||||
if (response.url() === url || response.request().isNavigationRequest()) {
|
||||
mainResponse = response;
|
||||
// Only capture the main navigation request
|
||||
if (!request.isNavigationRequest() || request.frame() !== page.mainFrame()) {
|
||||
return;
|
||||
}
|
||||
});
|
||||
|
||||
// If we have security details from response
|
||||
if (mainResponse) {
|
||||
try {
|
||||
const securityDetails = await mainResponse.securityDetails();
|
||||
if (securityDetails) {
|
||||
sslInfo.protocol = securityDetails.protocol();
|
||||
sslInfo.subjectName = securityDetails.subjectName();
|
||||
sslInfo.issuer = securityDetails.issuer();
|
||||
sslInfo.validFrom = securityDetails.validFrom();
|
||||
sslInfo.validTo = securityDetails.validTo();
|
||||
sslInfo.certificateId = securityDetails.subjectName();
|
||||
// Only capture if it's for our target URL
|
||||
if (!response.url().startsWith(url.split('?')[0])) {
|
||||
return;
|
||||
}
|
||||
|
||||
const sanList = securityDetails.sanList();
|
||||
if (sanList && sanList.length > 0) {
|
||||
sslInfo.subjectAlternativeNames = sanList;
|
||||
}
|
||||
// Get security details from the response
|
||||
const securityDetails = response.securityDetails();
|
||||
let sslInfo = {};
|
||||
|
||||
if (securityDetails) {
|
||||
sslInfo.protocol = securityDetails.protocol();
|
||||
sslInfo.subjectName = securityDetails.subjectName();
|
||||
sslInfo.issuer = securityDetails.issuer();
|
||||
sslInfo.validFrom = securityDetails.validFrom();
|
||||
sslInfo.validTo = securityDetails.validTo();
|
||||
sslInfo.certificateId = securityDetails.subjectName();
|
||||
sslInfo.securityState = 'secure';
|
||||
sslInfo.schemeIsCryptographic = true;
|
||||
|
||||
const sanList = securityDetails.sanList();
|
||||
if (sanList && sanList.length > 0) {
|
||||
sslInfo.subjectAlternativeNames = sanList;
|
||||
}
|
||||
} catch (e) {
|
||||
// Security details not available
|
||||
} else if (response.url().startsWith('https://')) {
|
||||
// HTTPS URL but no security details means something went wrong
|
||||
sslInfo.securityState = 'unknown';
|
||||
sslInfo.schemeIsCryptographic = true;
|
||||
sslInfo.error = 'No security details available';
|
||||
} else {
|
||||
// Non-HTTPS URL
|
||||
sslInfo.securityState = 'insecure';
|
||||
sslInfo.schemeIsCryptographic = false;
|
||||
}
|
||||
|
||||
// Write output directly to file
|
||||
fs.writeFileSync(outputPath, JSON.stringify(sslInfo, null, 2));
|
||||
|
||||
} catch (e) {
|
||||
// Ignore errors
|
||||
}
|
||||
});
|
||||
|
||||
await client.detach();
|
||||
|
||||
// Write output
|
||||
fs.writeFileSync(outputPath, JSON.stringify(sslInfo, null, 2));
|
||||
|
||||
return { success: true, output: outputPath, sslInfo };
|
||||
|
||||
} catch (e) {
|
||||
return { success: false, error: `${e.name}: ${e.message}` };
|
||||
} finally {
|
||||
if (browser) {
|
||||
browser.disconnect();
|
||||
}
|
||||
}
|
||||
// Don't disconnect - keep browser connection alive
|
||||
return { browser, page };
|
||||
}
|
||||
|
||||
async function main() {
|
||||
@@ -163,75 +158,83 @@ async function main() {
|
||||
const snapshotId = args.snapshot_id;
|
||||
|
||||
if (!url || !snapshotId) {
|
||||
console.error('Usage: on_Snapshot__16_ssl.js --url=<url> --snapshot-id=<uuid>');
|
||||
console.error('Usage: on_Snapshot__23_ssl.js --url=<url> --snapshot-id=<uuid>');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (!getEnvBool('SAVE_SSL', true)) {
|
||||
console.log('Skipping (SAVE_SSL=False)');
|
||||
const result = {
|
||||
extractor: EXTRACTOR_NAME,
|
||||
status: 'skipped',
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
};
|
||||
console.log(`RESULT_JSON=${JSON.stringify(result)}`);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const startTs = new Date();
|
||||
let status = 'failed';
|
||||
let output = null;
|
||||
let error = '';
|
||||
|
||||
try {
|
||||
// Check if enabled
|
||||
if (!getEnvBool('SAVE_SSL', true)) {
|
||||
console.log('Skipping SSL (SAVE_SSL=False)');
|
||||
status = 'skipped';
|
||||
const endTs = new Date();
|
||||
console.log(`START_TS=${startTs.toISOString()}`);
|
||||
console.log(`END_TS=${endTs.toISOString()}`);
|
||||
console.log(`STATUS=${status}`);
|
||||
console.log(`RESULT_JSON=${JSON.stringify({extractor: EXTRACTOR_NAME, status, url, snapshot_id: snapshotId})}`);
|
||||
process.exit(0);
|
||||
}
|
||||
// Set up listener
|
||||
await setupListener(url);
|
||||
|
||||
const result = await extractSsl(url);
|
||||
// Write PID file so chrome_cleanup can kill us
|
||||
fs.writeFileSync(path.join(OUTPUT_DIR, PID_FILE), String(process.pid));
|
||||
|
||||
if (result.success) {
|
||||
status = 'succeeded';
|
||||
output = result.output;
|
||||
const protocol = result.sslInfo?.protocol || 'unknown';
|
||||
console.log(`SSL details extracted: ${protocol}`);
|
||||
} else {
|
||||
status = 'failed';
|
||||
error = result.error;
|
||||
// Report success immediately (we're staying alive in background)
|
||||
const endTs = new Date();
|
||||
const duration = (endTs - startTs) / 1000;
|
||||
|
||||
console.log(`START_TS=${startTs.toISOString()}`);
|
||||
console.log(`END_TS=${endTs.toISOString()}`);
|
||||
console.log(`DURATION=${duration.toFixed(2)}`);
|
||||
console.log(`OUTPUT=${OUTPUT_FILE}`);
|
||||
console.log(`STATUS=succeeded`);
|
||||
|
||||
const result = {
|
||||
extractor: EXTRACTOR_NAME,
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
status: 'succeeded',
|
||||
start_ts: startTs.toISOString(),
|
||||
end_ts: endTs.toISOString(),
|
||||
duration: Math.round(duration * 100) / 100,
|
||||
output: OUTPUT_FILE,
|
||||
};
|
||||
console.log(`RESULT_JSON=${JSON.stringify(result)}`);
|
||||
|
||||
// Daemonize: detach from parent and keep running
|
||||
// This process will be killed by chrome_cleanup
|
||||
if (process.stdin.isTTY) {
|
||||
process.stdin.pause();
|
||||
}
|
||||
process.stdin.unref();
|
||||
process.stdout.end();
|
||||
process.stderr.end();
|
||||
|
||||
// Keep the process alive indefinitely
|
||||
// Will be killed by chrome_cleanup via the PID file
|
||||
setInterval(() => {}, 1000);
|
||||
|
||||
} catch (e) {
|
||||
error = `${e.name}: ${e.message}`;
|
||||
status = 'failed';
|
||||
}
|
||||
|
||||
const endTs = new Date();
|
||||
const duration = (endTs - startTs) / 1000;
|
||||
|
||||
// Print results
|
||||
console.log(`START_TS=${startTs.toISOString()}`);
|
||||
console.log(`END_TS=${endTs.toISOString()}`);
|
||||
console.log(`DURATION=${duration.toFixed(2)}`);
|
||||
if (output) {
|
||||
console.log(`OUTPUT=${output}`);
|
||||
}
|
||||
console.log(`STATUS=${status}`);
|
||||
|
||||
if (error) {
|
||||
const error = `${e.name}: ${e.message}`;
|
||||
console.error(`ERROR=${error}`);
|
||||
|
||||
const endTs = new Date();
|
||||
const result = {
|
||||
extractor: EXTRACTOR_NAME,
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
status: 'failed',
|
||||
start_ts: startTs.toISOString(),
|
||||
end_ts: endTs.toISOString(),
|
||||
error,
|
||||
};
|
||||
console.log(`RESULT_JSON=${JSON.stringify(result)}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Print JSON result
|
||||
const resultJson = {
|
||||
extractor: EXTRACTOR_NAME,
|
||||
url,
|
||||
snapshot_id: snapshotId,
|
||||
status,
|
||||
start_ts: startTs.toISOString(),
|
||||
end_ts: endTs.toISOString(),
|
||||
duration: Math.round(duration * 100) / 100,
|
||||
output,
|
||||
error: error || null,
|
||||
};
|
||||
console.log(`RESULT_JSON=${JSON.stringify(resultJson)}`);
|
||||
|
||||
process.exit(status === 'succeeded' ? 0 : 1);
|
||||
}
|
||||
|
||||
main().catch(e => {
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
*
|
||||
* Extension: https://chromewebstore.google.com/detail/cjpalhdlnbpafiamejdnhcphjbkeiagm
|
||||
*
|
||||
* Priority: 03 (early) - Must install before Chrome session starts
|
||||
* Hook: on_Snapshot
|
||||
* Priority: 03 (early) - Must install before Chrome session starts at Crawl level
|
||||
* Hook: on_Crawl (runs once per crawl, not per snapshot)
|
||||
*
|
||||
* This extension automatically:
|
||||
* - Blocks ads, trackers, and malware domains
|
||||
Reference in New Issue
Block a user