diff --git a/scripts/payload_replay/README.md b/scripts/payload_replay/README.md index 2d44757..5c71c78 100644 --- a/scripts/payload_replay/README.md +++ b/scripts/payload_replay/README.md @@ -2,6 +2,69 @@ 本目录提供基于本地 payload archive 的手工 replay 入口。 +## `multi_rir_case_info.py` + +用于从 multi-RIR bundle 中解析指定 `rir` 的输入路径、对照 CSV、fixture、以及 Routinator replay timing 基线。 + +示例: + +```bash +python3 scripts/payload_replay/multi_rir_case_info.py \ + --bundle-root ../../rpki/target/live/20260316-112341-multi-final3 \ + --rir afrinic +``` + +也支持输出 shell 环境变量: + +```bash +python3 scripts/payload_replay/multi_rir_case_info.py \ + --bundle-root ../../rpki/target/live/20260316-112341-multi-final3 \ + --rir afrinic \ + --format env +``` + +## `run_multi_rir_replay_case.sh` + +统一的 multi-RIR 入口。给定 `rir` 和模式后,它会自动选择该 RIR 的: + +- snapshot/base replay 输入 +- delta replay 输入 +- 对照 CSV +- TAL / TA fixture +- trust anchor 名称 + +用法: + +```bash +./scripts/payload_replay/run_multi_rir_replay_case.sh [describe|snapshot|delta|both] +``` + +示例: + +```bash +./scripts/payload_replay/run_multi_rir_replay_case.sh afrinic describe +./scripts/payload_replay/run_multi_rir_replay_case.sh lacnic snapshot +./scripts/payload_replay/run_multi_rir_replay_case.sh arin delta +./scripts/payload_replay/run_multi_rir_replay_case.sh ripe both +``` + + +脚本会自动: + +- 从 multi-RIR bundle 中选择指定 RIR 的 snapshot/base 与 delta 输入 +- 读取该 RIR 的 Routinator `base-replay` / `delta-replay` timing 基线 +- 使用该 RIR `timings/base-replay.json` 与 `timings/delta-replay.json` 的 `startedAt` 作为 replay `--validation-time` +- 在 `target/live/multi_rir_replay_runs//` 下生成: + - snapshot replay 产物 + - delta replay 产物 + - per-RIR 合并 case report(含 correctness + timing compare) + +默认 bundle 根目录为: + +- `../../rpki/target/live/20260316-112341-multi-final3` + +也可以通过 `BUNDLE_ROOT` 覆盖。 + ## `run_apnic_replay.sh` 默认使用: @@ -98,3 +161,74 @@ python3 scripts/payload_replay/report_to_routinator_csv.py \ - compare summary - `only_in_ours.csv` - `only_in_record.csv` + +## `run_apnic_delta_replay.sh` + +使用 APNIC delta demo 数据集运行 base + delta replay: + +```bash +./scripts/payload_replay/run_apnic_delta_replay.sh +``` + +默认输入: + +- `target/live/apnic_delta_demo/20260315-170223-autoplay/base-payload-archive` +- `target/live/apnic_delta_demo/20260315-170223-autoplay/base-locks.json` +- `target/live/apnic_delta_demo/20260315-170223-autoplay/payload-delta-archive` +- `target/live/apnic_delta_demo/20260315-170223-autoplay/locks-delta.json` +- `tests/fixtures/tal/apnic-rfc7730-https.tal` +- `tests/fixtures/ta/apnic-ta.cer` + +输出目录默认:`target/live/payload_delta_replay_runs/` + +## `run_apnic_delta_replay.sh` compare outputs + +脚本现在在 delta replay 结束后还会额外生成: + +- `vrps.csv` +- compare summary Markdown +- `only_in_ours.csv` +- `only_in_record.csv` + +默认 compare 输入是: + +- `target/live/apnic_delta_demo/20260315-170223-autoplay/record-delta.csv` + +也可以通过环境变量覆盖: + +- `TRUST_ANCHOR` +- `ROUTINATOR_RECORD_CSV` +- `VRPS_CSV` +- `COMPARE_SUMMARY_MD` +- `ONLY_IN_OURS_CSV` +- `ONLY_IN_RECORD_CSV` + +## `write_multi_rir_case_report.py` + +把某个 RIR 的 snapshot replay 与 delta replay 的 `meta.json`、compare summary 以及 Routinator timing 基线合并成一个 per-RIR Markdown/JSON 报告。 + +该脚本通常由 `run_multi_rir_replay_case.sh both` 自动调用。 + +## `run_multi_rir_replay_suite.sh` + +顺序执行 5 个 RIR(或环境变量 `RIRS` 指定的子集)的 `both` 模式,并最终生成 multi-RIR 汇总报告。 + +```bash +./scripts/payload_replay/run_multi_rir_replay_suite.sh +``` + +可覆盖环境变量: + +- `BUNDLE_ROOT` +- `SUITE_OUT_DIR` +- `RIRS` + +最终输出: + +- `//_case_report.md` +- `/multi_rir_summary.md` +- `/multi_rir_summary.json` + +## `write_multi_rir_summary.py` + +汇总 5 个 RIR 的 per-RIR case report,生成 correctness + timing 总表与几何平均比值。 diff --git a/scripts/payload_replay/multi_rir_case_info.py b/scripts/payload_replay/multi_rir_case_info.py new file mode 100755 index 0000000..2cd57a6 --- /dev/null +++ b/scripts/payload_replay/multi_rir_case_info.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import json +import shlex +import sys +from pathlib import Path + + +RIR_CONFIG = { + "afrinic": { + "tal": "tests/fixtures/tal/afrinic.tal", + "ta": "tests/fixtures/ta/afrinic-ta.cer", + "trust_anchor": "afrinic", + }, + "apnic": { + "tal": "tests/fixtures/tal/apnic-rfc7730-https.tal", + "ta": "tests/fixtures/ta/apnic-ta.cer", + "trust_anchor": "apnic", + }, + "arin": { + "tal": "tests/fixtures/tal/arin.tal", + "ta": "tests/fixtures/ta/arin-ta.cer", + "trust_anchor": "arin", + }, + "lacnic": { + "tal": "tests/fixtures/tal/lacnic.tal", + "ta": "tests/fixtures/ta/lacnic-ta.cer", + "trust_anchor": "lacnic", + }, + "ripe": { + "tal": "tests/fixtures/tal/ripe-ncc.tal", + "ta": "tests/fixtures/ta/ripe-ncc-ta.cer", + "trust_anchor": "ripe", + }, +} + + +def default_repo_root() -> Path: + return Path(__file__).resolve().parents[2] + + +def default_bundle_root(repo_root: Path) -> Path: + return (repo_root / "../../rpki/target/live/20260316-112341-multi-final3").resolve() + + +def require_path(path: Path, kind: str) -> Path: + if kind == "dir" and not path.is_dir(): + raise SystemExit(f"missing directory: {path}") + if kind == "file" and not path.is_file(): + raise SystemExit(f"missing file: {path}") + return path + + +def load_timing_summary(bundle_root: Path) -> dict: + timing_path = require_path(bundle_root / "timing-summary.json", "file") + return json.loads(timing_path.read_text(encoding="utf-8")) + + +def build_case(bundle_root: Path, repo_root: Path, rir: str) -> dict: + if rir not in RIR_CONFIG: + raise SystemExit( + f"unsupported rir: {rir}; expected one of: {', '.join(sorted(RIR_CONFIG))}" + ) + + rir_root = require_path(bundle_root / rir, "dir") + cfg = RIR_CONFIG[rir] + timing_summary = load_timing_summary(bundle_root) + if rir not in timing_summary: + raise SystemExit(f"timing-summary.json missing entry for rir: {rir}") + timing_entry = timing_summary[rir] + durations = timing_entry.get("durations") or {} + + base_timing = require_path(rir_root / "timings" / "base-replay.json", "file") + delta_timing = require_path(rir_root / "timings" / "delta-replay.json", "file") + base_timing_obj = json.loads(base_timing.read_text(encoding="utf-8")) + delta_timing_obj = json.loads(delta_timing.read_text(encoding="utf-8")) + + case = { + "bundle_root": str(bundle_root), + "repo_root": str(repo_root), + "rir": rir, + "trust_anchor": cfg["trust_anchor"], + "rir_root": str(rir_root), + "base_archive": str(require_path(rir_root / "base-payload-archive", "dir")), + "base_locks": str(require_path(rir_root / "base-locks.json", "file")), + "base_vrps_csv": str(require_path(rir_root / "base-vrps.csv", "file")), + "delta_archive": str(require_path(rir_root / "payload-delta-archive", "dir")), + "delta_locks": str(require_path(rir_root / "locks-delta.json", "file")), + "delta_record_csv": str(require_path(rir_root / "record-delta.csv", "file")), + "replay_delta_csv": str(require_path(rir_root / "replay-delta.csv", "file")), + "verification_json": str(require_path(rir_root / "verification.json", "file")), + "readme": str(require_path(rir_root / "README.md", "file")), + "timings_dir": str(require_path(rir_root / "timings", "dir")), + "base_timing_json": str(base_timing), + "delta_timing_json": str(delta_timing), + "tal_path": str(require_path(repo_root / cfg["tal"], "file")), + "ta_path": str(require_path(repo_root / cfg["ta"], "file")), + "validation_times": { + "snapshot": base_timing_obj["startedAt"], + "delta": delta_timing_obj["startedAt"], + }, + "routinator_timings": { + "base_replay_seconds": float(durations["base-replay"]), + "delta_replay_seconds": float(durations["delta-replay"]), + }, + } + return case + + +def emit_env(case: dict) -> str: + ordered = { + "BUNDLE_ROOT": case["bundle_root"], + "RIR": case["rir"], + "TRUST_ANCHOR": case["trust_anchor"], + "RIR_ROOT": case["rir_root"], + "TAL_PATH": case["tal_path"], + "TA_PATH": case["ta_path"], + "PAYLOAD_REPLAY_ARCHIVE": case["base_archive"], + "PAYLOAD_REPLAY_LOCKS": case["base_locks"], + "ROUTINATOR_BASE_RECORD_CSV": case["base_vrps_csv"], + "PAYLOAD_BASE_ARCHIVE": case["base_archive"], + "PAYLOAD_BASE_LOCKS": case["base_locks"], + "PAYLOAD_DELTA_ARCHIVE": case["delta_archive"], + "PAYLOAD_DELTA_LOCKS": case["delta_locks"], + "ROUTINATOR_DELTA_RECORD_CSV": case["delta_record_csv"], + "SNAPSHOT_VALIDATION_TIME": case["validation_times"]["snapshot"], + "DELTA_VALIDATION_TIME": case["validation_times"]["delta"], + "ROUTINATOR_BASE_REPLAY_SECONDS": str(case["routinator_timings"]["base_replay_seconds"]), + "ROUTINATOR_DELTA_REPLAY_SECONDS": str(case["routinator_timings"]["delta_replay_seconds"]), + } + return "\n".join( + f"export {key}={shlex.quote(value)}" for key, value in ordered.items() + ) + + +def main() -> int: + parser = argparse.ArgumentParser(description="Resolve one RIR case inside a multi-RIR replay bundle") + parser.add_argument("--bundle-root", type=Path, default=None) + parser.add_argument("--repo-root", type=Path, default=None) + parser.add_argument("--rir", required=True, choices=sorted(RIR_CONFIG)) + parser.add_argument("--format", choices=["json", "env"], default="json") + args = parser.parse_args() + + repo_root = (args.repo_root or default_repo_root()).resolve() + bundle_root = (args.bundle_root or default_bundle_root(repo_root)).resolve() + case = build_case(bundle_root, repo_root, args.rir) + + if args.format == "env": + print(emit_env(case)) + else: + print(json.dumps(case, ensure_ascii=False, indent=2)) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/payload_replay/run_apnic_delta_replay.sh b/scripts/payload_replay/run_apnic_delta_replay.sh new file mode 100755 index 0000000..3671c63 --- /dev/null +++ b/scripts/payload_replay/run_apnic_delta_replay.sh @@ -0,0 +1,146 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +cd "$ROOT_DIR" + +DELTA_ROOT="${DELTA_ROOT:-$ROOT_DIR/target/live/apnic_delta_demo/20260315-170223-autoplay}" +TAL_PATH="${TAL_PATH:-$ROOT_DIR/tests/fixtures/tal/apnic-rfc7730-https.tal}" +TA_PATH="${TA_PATH:-$ROOT_DIR/tests/fixtures/ta/apnic-ta.cer}" +PAYLOAD_BASE_ARCHIVE="${PAYLOAD_BASE_ARCHIVE:-$DELTA_ROOT/base-payload-archive}" +PAYLOAD_BASE_LOCKS="${PAYLOAD_BASE_LOCKS:-$DELTA_ROOT/base-locks.json}" +PAYLOAD_DELTA_ARCHIVE="${PAYLOAD_DELTA_ARCHIVE:-$DELTA_ROOT/payload-delta-archive}" +PAYLOAD_DELTA_LOCKS="${PAYLOAD_DELTA_LOCKS:-$DELTA_ROOT/locks-delta.json}" +VALIDATION_TIME="${VALIDATION_TIME:-2026-03-15T10:00:00Z}" +PAYLOAD_BASE_VALIDATION_TIME="${PAYLOAD_BASE_VALIDATION_TIME:-}" +TRUST_ANCHOR="${TRUST_ANCHOR:-apnic}" +ROUTINATOR_RECORD_CSV="${ROUTINATOR_RECORD_CSV:-$DELTA_ROOT/record-delta.csv}" +MAX_DEPTH="${MAX_DEPTH:-}" +MAX_INSTANCES="${MAX_INSTANCES:-}" +OUT_DIR="${OUT_DIR:-$ROOT_DIR/target/live/payload_delta_replay_runs}" +mkdir -p "$OUT_DIR" + +TS="$(date -u +%Y%m%dT%H%M%SZ)" +RUN_NAME="${RUN_NAME:-apnic_delta_replay_${TS}}" +DB_DIR="${DB_DIR:-$OUT_DIR/${RUN_NAME}_db}" +REPORT_JSON="${REPORT_JSON:-$OUT_DIR/${RUN_NAME}_report.json}" +RUN_LOG="${RUN_LOG:-$OUT_DIR/${RUN_NAME}_run.log}" +META_JSON="${META_JSON:-$OUT_DIR/${RUN_NAME}_meta.json}" +SUMMARY_MD="${SUMMARY_MD:-$OUT_DIR/${RUN_NAME}_summary.md}" +VRPS_CSV="${VRPS_CSV:-$OUT_DIR/${RUN_NAME}_vrps.csv}" +COMPARE_SUMMARY_MD="${COMPARE_SUMMARY_MD:-$OUT_DIR/${RUN_NAME}_compare_summary.md}" +ONLY_IN_OURS_CSV="${ONLY_IN_OURS_CSV:-$OUT_DIR/${RUN_NAME}_only_in_ours.csv}" +ONLY_IN_RECORD_CSV="${ONLY_IN_RECORD_CSV:-$OUT_DIR/${RUN_NAME}_only_in_record.csv}" + +cmd=(cargo run --release --bin rpki -- + --db "$DB_DIR" + --tal-path "$TAL_PATH" + --ta-path "$TA_PATH" + --payload-base-archive "$PAYLOAD_BASE_ARCHIVE" + --payload-base-locks "$PAYLOAD_BASE_LOCKS" + --payload-delta-archive "$PAYLOAD_DELTA_ARCHIVE" + --payload-delta-locks "$PAYLOAD_DELTA_LOCKS" + --validation-time "$VALIDATION_TIME" + --report-json "$REPORT_JSON") + +if [[ -n "$MAX_DEPTH" ]]; then + cmd+=(--max-depth "$MAX_DEPTH") +fi +if [[ -n "$MAX_INSTANCES" ]]; then + cmd+=(--max-instances "$MAX_INSTANCES") +fi + +run_start_s="$(date +%s)" +( + echo "# command:" + printf '%q ' "${cmd[@]}" + echo + echo + "${cmd[@]}" +) 2>&1 | tee "$RUN_LOG" >/dev/null +run_end_s="$(date +%s)" +run_duration_s="$((run_end_s - run_start_s))" + +PAYLOAD_BASE_ARCHIVE="$PAYLOAD_BASE_ARCHIVE" \ +PAYLOAD_BASE_LOCKS="$PAYLOAD_BASE_LOCKS" \ +PAYLOAD_DELTA_ARCHIVE="$PAYLOAD_DELTA_ARCHIVE" \ +PAYLOAD_DELTA_LOCKS="$PAYLOAD_DELTA_LOCKS" \ +PAYLOAD_BASE_VALIDATION_TIME="$PAYLOAD_BASE_VALIDATION_TIME" \ +DB_DIR="$DB_DIR" \ +REPORT_JSON="$REPORT_JSON" \ +RUN_LOG="$RUN_LOG" \ +VALIDATION_TIME="$VALIDATION_TIME" \ +RUN_DURATION_S="$run_duration_s" \ +python3 - "$REPORT_JSON" "$META_JSON" "$SUMMARY_MD" <<'PY' +import json +import os +import sys +from datetime import datetime, timezone +from pathlib import Path + +report_path = Path(sys.argv[1]) +meta_path = Path(sys.argv[2]) +summary_path = Path(sys.argv[3]) +rep = json.loads(report_path.read_text(encoding='utf-8')) +now = datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%SZ') +meta = { + 'recorded_at_utc': now, + 'payload_base_archive': os.environ['PAYLOAD_BASE_ARCHIVE'], + 'payload_base_locks': os.environ['PAYLOAD_BASE_LOCKS'], + 'payload_delta_archive': os.environ['PAYLOAD_DELTA_ARCHIVE'], + 'payload_delta_locks': os.environ['PAYLOAD_DELTA_LOCKS'], + 'db_dir': os.environ['DB_DIR'], + 'report_json': os.environ['REPORT_JSON'], + 'run_log': os.environ['RUN_LOG'], + 'validation_time_arg': os.environ['VALIDATION_TIME'], + 'base_validation_time_arg': os.environ.get('PAYLOAD_BASE_VALIDATION_TIME') or os.environ['VALIDATION_TIME'], + 'durations_secs': {'rpki_run': int(os.environ['RUN_DURATION_S'])}, + 'counts': { + 'publication_points_processed': rep['tree']['instances_processed'], + 'publication_points_failed': rep['tree']['instances_failed'], + 'vrps': len(rep['vrps']), + 'aspas': len(rep['aspas']), + 'audit_publication_points': len(rep['publication_points']), + }, +} +meta_path.write_text(json.dumps(meta, ensure_ascii=False, indent=2)+'\n', encoding='utf-8') +summary = [] +summary.append('# Payload Delta Replay Summary\n\n') +for key in ['payload_base_archive','payload_base_locks','payload_delta_archive','payload_delta_locks','db_dir','report_json','base_validation_time_arg','validation_time_arg']: + summary.append(f'- {key}: `{meta[key]}`\n') +summary.append('\n## Results\n\n| metric | value |\n|---|---:|\n') +for k,v in meta['counts'].items(): + summary.append(f'| {k} | {v} |\n') +summary.append('\n## Durations\n\n| step | seconds |\n|---|---:|\n') +for k,v in meta['durations_secs'].items(): + summary.append(f'| {k} | {v} |\n') +summary_path.write_text(''.join(summary), encoding='utf-8') +print(summary_path) +PY + +python3 scripts/payload_replay/report_to_routinator_csv.py \ + --report "$REPORT_JSON" \ + --out "$VRPS_CSV" \ + --trust-anchor "$TRUST_ANCHOR" >/dev/null + +if [[ -f "$ROUTINATOR_RECORD_CSV" ]]; then + ./scripts/payload_replay/compare_with_routinator_record.sh \ + "$VRPS_CSV" \ + "$ROUTINATOR_RECORD_CSV" \ + "$COMPARE_SUMMARY_MD" \ + "$ONLY_IN_OURS_CSV" \ + "$ONLY_IN_RECORD_CSV" >/dev/null +fi + +echo "== payload delta replay run complete ==" >&2 +echo "- db: $DB_DIR" >&2 +echo "- report: $REPORT_JSON" >&2 +echo "- run log: $RUN_LOG" >&2 +echo "- meta json: $META_JSON" >&2 +echo "- summary md: $SUMMARY_MD" >&2 +echo "- vrps csv: $VRPS_CSV" >&2 +if [[ -f "$COMPARE_SUMMARY_MD" ]]; then + echo "- compare summary: $COMPARE_SUMMARY_MD" >&2 + echo "- only in ours: $ONLY_IN_OURS_CSV" >&2 + echo "- only in record: $ONLY_IN_RECORD_CSV" >&2 +fi diff --git a/scripts/payload_replay/run_multi_rir_replay_case.sh b/scripts/payload_replay/run_multi_rir_replay_case.sh new file mode 100755 index 0000000..2b2ee9d --- /dev/null +++ b/scripts/payload_replay/run_multi_rir_replay_case.sh @@ -0,0 +1,128 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +cd "$ROOT_DIR" + +if [[ $# -lt 1 || $# -gt 2 ]]; then + echo "usage: $0 [describe|snapshot|delta|both]" >&2 + exit 2 +fi + +RIR="$1" +MODE="${2:-both}" +BUNDLE_ROOT="${BUNDLE_ROOT:-$ROOT_DIR/../../rpki/target/live/20260316-112341-multi-final3}" +CASE_INFO_SCRIPT="$ROOT_DIR/scripts/payload_replay/multi_rir_case_info.py" +CASE_REPORT_SCRIPT="$ROOT_DIR/scripts/payload_replay/write_multi_rir_case_report.py" +MULTI_RIR_OUT_DIR="${MULTI_RIR_OUT_DIR:-$ROOT_DIR/target/live/multi_rir_replay_runs/$RIR}" +mkdir -p "$MULTI_RIR_OUT_DIR" + +eval "$(python3 "$CASE_INFO_SCRIPT" --bundle-root "$BUNDLE_ROOT" --rir "$RIR" --format env)" + +SNAPSHOT_DB_DIR="${SNAPSHOT_DB_DIR:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_replay_db}" +SNAPSHOT_REPORT_MD="${SNAPSHOT_REPORT_MD:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_compare_summary.md}" +SNAPSHOT_META_JSON="${SNAPSHOT_META_JSON:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_meta.json}" +SNAPSHOT_RUN_LOG="${SNAPSHOT_RUN_LOG:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_run.log}" +SNAPSHOT_REPORT_JSON="${SNAPSHOT_REPORT_JSON:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_report.json}" +SNAPSHOT_VRPS_CSV="${SNAPSHOT_VRPS_CSV:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_vrps.csv}" +SNAPSHOT_ONLY_OURS="${SNAPSHOT_ONLY_OURS:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_only_in_ours.csv}" +SNAPSHOT_ONLY_RECORD="${SNAPSHOT_ONLY_RECORD:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_only_in_record.csv}" + +DELTA_DB_DIR="${DELTA_DB_DIR:-$MULTI_RIR_OUT_DIR/${RIR}_delta_replay_db}" +DELTA_REPORT_MD="${DELTA_REPORT_MD:-$MULTI_RIR_OUT_DIR/${RIR}_delta_compare_summary.md}" +DELTA_META_JSON="${DELTA_META_JSON:-$MULTI_RIR_OUT_DIR/${RIR}_delta_meta.json}" +DELTA_RUN_LOG="${DELTA_RUN_LOG:-$MULTI_RIR_OUT_DIR/${RIR}_delta_run.log}" +DELTA_REPORT_JSON="${DELTA_REPORT_JSON:-$MULTI_RIR_OUT_DIR/${RIR}_delta_report.json}" +DELTA_VRPS_CSV="${DELTA_VRPS_CSV:-$MULTI_RIR_OUT_DIR/${RIR}_delta_vrps.csv}" +DELTA_ONLY_OURS="${DELTA_ONLY_OURS:-$MULTI_RIR_OUT_DIR/${RIR}_delta_only_in_ours.csv}" +DELTA_ONLY_RECORD="${DELTA_ONLY_RECORD:-$MULTI_RIR_OUT_DIR/${RIR}_delta_only_in_record.csv}" + +CASE_REPORT_JSON="${CASE_REPORT_JSON:-$MULTI_RIR_OUT_DIR/${RIR}_case_report.json}" +CASE_REPORT_MD="${CASE_REPORT_MD:-$MULTI_RIR_OUT_DIR/${RIR}_case_report.md}" + +case "$MODE" in + describe) + python3 "$CASE_INFO_SCRIPT" --bundle-root "$BUNDLE_ROOT" --rir "$RIR" + ;; + snapshot) + rm -rf "$SNAPSHOT_DB_DIR" + ROUTINATOR_RECORD_CSV="$ROUTINATOR_BASE_RECORD_CSV" \ + VALIDATION_TIME="$SNAPSHOT_VALIDATION_TIME" \ + OUT_DIR="$MULTI_RIR_OUT_DIR" \ + DB_DIR="$SNAPSHOT_DB_DIR" \ + RUN_NAME="${RUN_NAME:-${RIR}_snapshot_replay}" \ + META_JSON="$SNAPSHOT_META_JSON" \ + RUN_LOG="$SNAPSHOT_RUN_LOG" \ + REPORT_JSON="$SNAPSHOT_REPORT_JSON" \ + VRPS_CSV="$SNAPSHOT_VRPS_CSV" \ + COMPARE_SUMMARY_MD="$SNAPSHOT_REPORT_MD" \ + ONLY_IN_OURS_CSV="$SNAPSHOT_ONLY_OURS" \ + ONLY_IN_RECORD_CSV="$SNAPSHOT_ONLY_RECORD" \ + ./scripts/payload_replay/run_apnic_replay.sh + ;; + delta) + rm -rf "$DELTA_DB_DIR" + ROUTINATOR_RECORD_CSV="$ROUTINATOR_DELTA_RECORD_CSV" \ + VALIDATION_TIME="$DELTA_VALIDATION_TIME" \ + PAYLOAD_BASE_VALIDATION_TIME="$SNAPSHOT_VALIDATION_TIME" \ + OUT_DIR="$MULTI_RIR_OUT_DIR" \ + DB_DIR="$DELTA_DB_DIR" \ + RUN_NAME="${RUN_NAME:-${RIR}_delta_replay}" \ + DELTA_ROOT="$RIR_ROOT" \ + META_JSON="$DELTA_META_JSON" \ + RUN_LOG="$DELTA_RUN_LOG" \ + REPORT_JSON="$DELTA_REPORT_JSON" \ + VRPS_CSV="$DELTA_VRPS_CSV" \ + COMPARE_SUMMARY_MD="$DELTA_REPORT_MD" \ + ONLY_IN_OURS_CSV="$DELTA_ONLY_OURS" \ + ONLY_IN_RECORD_CSV="$DELTA_ONLY_RECORD" \ + ./scripts/payload_replay/run_apnic_delta_replay.sh + ;; + both) + rm -rf "$SNAPSHOT_DB_DIR" "$DELTA_DB_DIR" + ROUTINATOR_RECORD_CSV="$ROUTINATOR_BASE_RECORD_CSV" \ + VALIDATION_TIME="$SNAPSHOT_VALIDATION_TIME" \ + OUT_DIR="$MULTI_RIR_OUT_DIR" \ + DB_DIR="$SNAPSHOT_DB_DIR" \ + RUN_NAME="${RUN_NAME_SNAPSHOT:-${RIR}_snapshot_replay}" \ + META_JSON="$SNAPSHOT_META_JSON" \ + RUN_LOG="$SNAPSHOT_RUN_LOG" \ + REPORT_JSON="$SNAPSHOT_REPORT_JSON" \ + VRPS_CSV="$SNAPSHOT_VRPS_CSV" \ + COMPARE_SUMMARY_MD="$SNAPSHOT_REPORT_MD" \ + ONLY_IN_OURS_CSV="$SNAPSHOT_ONLY_OURS" \ + ONLY_IN_RECORD_CSV="$SNAPSHOT_ONLY_RECORD" \ + ./scripts/payload_replay/run_apnic_replay.sh + ROUTINATOR_RECORD_CSV="$ROUTINATOR_DELTA_RECORD_CSV" \ + VALIDATION_TIME="$DELTA_VALIDATION_TIME" \ + PAYLOAD_BASE_VALIDATION_TIME="$SNAPSHOT_VALIDATION_TIME" \ + OUT_DIR="$MULTI_RIR_OUT_DIR" \ + DB_DIR="$DELTA_DB_DIR" \ + RUN_NAME="${RUN_NAME_DELTA:-${RIR}_delta_replay}" \ + DELTA_ROOT="$RIR_ROOT" \ + META_JSON="$DELTA_META_JSON" \ + RUN_LOG="$DELTA_RUN_LOG" \ + REPORT_JSON="$DELTA_REPORT_JSON" \ + VRPS_CSV="$DELTA_VRPS_CSV" \ + COMPARE_SUMMARY_MD="$DELTA_REPORT_MD" \ + ONLY_IN_OURS_CSV="$DELTA_ONLY_OURS" \ + ONLY_IN_RECORD_CSV="$DELTA_ONLY_RECORD" \ + ./scripts/payload_replay/run_apnic_delta_replay.sh + python3 "$CASE_REPORT_SCRIPT" \ + --rir "$RIR" \ + --snapshot-meta "$SNAPSHOT_META_JSON" \ + --snapshot-compare "$SNAPSHOT_REPORT_MD" \ + --delta-meta "$DELTA_META_JSON" \ + --delta-compare "$DELTA_REPORT_MD" \ + --routinator-base-seconds "$ROUTINATOR_BASE_REPLAY_SECONDS" \ + --routinator-delta-seconds "$ROUTINATOR_DELTA_REPLAY_SECONDS" \ + --out-md "$CASE_REPORT_MD" \ + --out-json "$CASE_REPORT_JSON" >/dev/null + echo "- case report: $CASE_REPORT_MD" >&2 + echo "- case report json: $CASE_REPORT_JSON" >&2 + ;; + *) + echo "unsupported mode: $MODE; expected describe|snapshot|delta|both" >&2 + exit 2 + ;; +esac diff --git a/scripts/payload_replay/run_multi_rir_replay_suite.sh b/scripts/payload_replay/run_multi_rir_replay_suite.sh new file mode 100755 index 0000000..a6d4a5c --- /dev/null +++ b/scripts/payload_replay/run_multi_rir_replay_suite.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +cd "$ROOT_DIR" + +BUNDLE_ROOT="${BUNDLE_ROOT:-$ROOT_DIR/../../rpki/target/live/20260316-112341-multi-final3}" +SUITE_OUT_DIR="${SUITE_OUT_DIR:-$ROOT_DIR/target/live/multi_rir_replay_runs}" +RIRS="${RIRS:-afrinic apnic arin lacnic ripe}" +CASE_SCRIPT="$ROOT_DIR/scripts/payload_replay/run_multi_rir_replay_case.sh" +SUMMARY_SCRIPT="$ROOT_DIR/scripts/payload_replay/write_multi_rir_summary.py" + +mkdir -p "$SUITE_OUT_DIR" + +for rir in $RIRS; do + MULTI_RIR_OUT_DIR="$SUITE_OUT_DIR/$rir" \ + BUNDLE_ROOT="$BUNDLE_ROOT" \ + "$CASE_SCRIPT" "$rir" both + echo "completed $rir" >&2 + echo >&2 + done + +python3 "$SUMMARY_SCRIPT" \ + --case-root "$SUITE_OUT_DIR" \ + --out-md "$SUITE_OUT_DIR/multi_rir_summary.md" \ + --out-json "$SUITE_OUT_DIR/multi_rir_summary.json" \ + --rirs $RIRS >/dev/null + +echo "== multi-RIR replay suite complete ==" >&2 +echo "- suite_out_dir: $SUITE_OUT_DIR" >&2 +echo "- summary_md: $SUITE_OUT_DIR/multi_rir_summary.md" >&2 +echo "- summary_json: $SUITE_OUT_DIR/multi_rir_summary.json" >&2 diff --git a/scripts/payload_replay/write_multi_rir_case_report.py b/scripts/payload_replay/write_multi_rir_case_report.py new file mode 100755 index 0000000..40de58e --- /dev/null +++ b/scripts/payload_replay/write_multi_rir_case_report.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import json +from pathlib import Path + + +def parse_args() -> argparse.Namespace: + p = argparse.ArgumentParser(description="Generate one multi-RIR replay case report") + p.add_argument("--rir", required=True) + p.add_argument("--snapshot-meta", required=True) + p.add_argument("--snapshot-compare", required=True) + p.add_argument("--delta-meta", required=True) + p.add_argument("--delta-compare", required=True) + p.add_argument("--routinator-base-seconds", required=True, type=float) + p.add_argument("--routinator-delta-seconds", required=True, type=float) + p.add_argument("--out-md", required=True) + p.add_argument("--out-json", required=True) + return p.parse_args() + + +def read_json(path: str) -> dict: + return json.loads(Path(path).read_text(encoding="utf-8")) + + +def parse_compare_md(path: str) -> dict: + lines = Path(path).read_text(encoding="utf-8").splitlines() + out = {} + for line in lines: + if not line.startswith("| "): + continue + parts = [p.strip() for p in line.strip("|").split("|")] + if len(parts) != 2: + continue + key, value = parts + if key in {"metric", "---"}: + continue + try: + out[key] = int(value) + except ValueError: + pass + return out + + +def ratio(ours: float, baseline: float) -> float | None: + if baseline <= 0: + return None + return ours / baseline + + +def build_report(args: argparse.Namespace) -> dict: + snapshot_meta = read_json(args.snapshot_meta) + delta_meta = read_json(args.delta_meta) + snapshot_compare = parse_compare_md(args.snapshot_compare) + delta_compare = parse_compare_md(args.delta_compare) + + snapshot_ours = float(snapshot_meta["durations_secs"]["rpki_run"]) + delta_ours = float(delta_meta["durations_secs"]["rpki_run"]) + + report = { + "rir": args.rir, + "snapshot": { + "meta_json": str(Path(args.snapshot_meta).resolve()), + "compare_md": str(Path(args.snapshot_compare).resolve()), + "ours_seconds": snapshot_ours, + "routinator_seconds": args.routinator_base_seconds, + "ratio": ratio(snapshot_ours, args.routinator_base_seconds), + "compare": snapshot_compare, + "match": snapshot_compare.get("only_in_ours", -1) == 0 + and snapshot_compare.get("only_in_record", -1) == 0, + "counts": snapshot_meta.get("counts", {}), + }, + "delta": { + "meta_json": str(Path(args.delta_meta).resolve()), + "compare_md": str(Path(args.delta_compare).resolve()), + "ours_seconds": delta_ours, + "routinator_seconds": args.routinator_delta_seconds, + "ratio": ratio(delta_ours, args.routinator_delta_seconds), + "compare": delta_compare, + "match": delta_compare.get("only_in_ours", -1) == 0 + and delta_compare.get("only_in_record", -1) == 0, + "counts": delta_meta.get("counts", {}), + }, + } + return report + + +def write_md(path: Path, report: dict) -> None: + snapshot = report["snapshot"] + delta = report["delta"] + lines = [] + lines.append(f"# {report['rir'].upper()} Replay Report\n\n") + lines.append("## Summary\n\n") + lines.append("| mode | match | ours_s | routinator_s | ratio | only_in_ours | only_in_record |\n") + lines.append("|---|---|---:|---:|---:|---:|---:|\n") + lines.append( + f"| snapshot | {str(snapshot['match']).lower()} | {snapshot['ours_seconds']:.3f} | {snapshot['routinator_seconds']:.3f} | {snapshot['ratio']:.3f} | {snapshot['compare'].get('only_in_ours', 0)} | {snapshot['compare'].get('only_in_record', 0)} |\n" + ) + lines.append( + f"| delta | {str(delta['match']).lower()} | {delta['ours_seconds']:.3f} | {delta['routinator_seconds']:.3f} | {delta['ratio']:.3f} | {delta['compare'].get('only_in_ours', 0)} | {delta['compare'].get('only_in_record', 0)} |\n" + ) + lines.append("\n## Snapshot Inputs\n\n") + lines.append(f"- meta_json: `{snapshot['meta_json']}`\n") + lines.append(f"- compare_md: `{snapshot['compare_md']}`\n") + lines.append("\n## Delta Inputs\n\n") + lines.append(f"- meta_json: `{delta['meta_json']}`\n") + lines.append(f"- compare_md: `{delta['compare_md']}`\n") + lines.append("\n## Counts\n\n") + lines.append("### Snapshot\n\n") + for k, v in sorted(snapshot.get("counts", {}).items()): + lines.append(f"- {k}: `{v}`\n") + lines.append("\n### Delta\n\n") + for k, v in sorted(delta.get("counts", {}).items()): + lines.append(f"- {k}: `{v}`\n") + path.write_text("".join(lines), encoding="utf-8") + + +def main() -> int: + args = parse_args() + report = build_report(args) + out_json = Path(args.out_json) + out_md = Path(args.out_md) + out_json.parent.mkdir(parents=True, exist_ok=True) + out_md.parent.mkdir(parents=True, exist_ok=True) + out_json.write_text(json.dumps(report, ensure_ascii=False, indent=2) + "\n", encoding="utf-8") + write_md(out_md, report) + print(out_md) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/payload_replay/write_multi_rir_summary.py b/scripts/payload_replay/write_multi_rir_summary.py new file mode 100755 index 0000000..10391a2 --- /dev/null +++ b/scripts/payload_replay/write_multi_rir_summary.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import json +import math +from pathlib import Path + +DEFAULT_RIRS = ["afrinic", "apnic", "arin", "lacnic", "ripe"] + + +def parse_args() -> argparse.Namespace: + p = argparse.ArgumentParser(description="Aggregate per-RIR replay case reports") + p.add_argument("--case-root", required=True, help="directory containing /_case_report.json") + p.add_argument("--out-md", required=True) + p.add_argument("--out-json", required=True) + p.add_argument("--rirs", nargs="*", default=None, help="RIRs to include (default: all 5)") + return p.parse_args() + + +def read_case(case_root: Path, rir: str) -> dict: + path = case_root / rir / f"{rir}_case_report.json" + return json.loads(path.read_text(encoding="utf-8")) + + +def geomean(values: list[float]) -> float: + vals = [v for v in values if v > 0] + if not vals: + return 0.0 + return math.exp(sum(math.log(v) for v in vals) / len(vals)) + + +def build_summary(cases: list[dict]) -> dict: + snapshot_ratios = [c["snapshot"]["ratio"] for c in cases] + delta_ratios = [c["delta"]["ratio"] for c in cases] + return { + "cases": cases, + "summary": { + "snapshot_all_match": all(c["snapshot"]["match"] for c in cases), + "delta_all_match": all(c["delta"]["match"] for c in cases), + "snapshot_ratio_geomean": geomean(snapshot_ratios), + "delta_ratio_geomean": geomean(delta_ratios), + "all_ratio_geomean": geomean(snapshot_ratios + delta_ratios), + }, + } + + +def write_md(path: Path, data: dict) -> None: + lines = [] + lines.append("# Multi-RIR Replay Summary\n\n") + lines.append("## Correctness + Timing\n\n") + lines.append("| RIR | snapshot_match | snapshot_ours_s | snapshot_routinator_s | snapshot_ratio | delta_match | delta_ours_s | delta_routinator_s | delta_ratio |\n") + lines.append("|---|---|---:|---:|---:|---|---:|---:|---:|\n") + for case in data["cases"]: + lines.append( + f"| {case['rir']} | {str(case['snapshot']['match']).lower()} | {case['snapshot']['ours_seconds']:.3f} | {case['snapshot']['routinator_seconds']:.3f} | {case['snapshot']['ratio']:.3f} | {str(case['delta']['match']).lower()} | {case['delta']['ours_seconds']:.3f} | {case['delta']['routinator_seconds']:.3f} | {case['delta']['ratio']:.3f} |\n" + ) + s = data["summary"] + lines.append("\n## Aggregate Metrics\n\n") + lines.append("| metric | value |\n") + lines.append("|---|---:|\n") + lines.append(f"| snapshot_all_match | {str(s['snapshot_all_match']).lower()} |\n") + lines.append(f"| delta_all_match | {str(s['delta_all_match']).lower()} |\n") + lines.append(f"| snapshot_ratio_geomean | {s['snapshot_ratio_geomean']:.3f} |\n") + lines.append(f"| delta_ratio_geomean | {s['delta_ratio_geomean']:.3f} |\n") + lines.append(f"| all_ratio_geomean | {s['all_ratio_geomean']:.3f} |\n") + path.write_text("".join(lines), encoding="utf-8") + + +def main() -> int: + args = parse_args() + case_root = Path(args.case_root) + rirs = args.rirs or DEFAULT_RIRS + cases = [read_case(case_root, rir) for rir in rirs] + data = build_summary(cases) + out_md = Path(args.out_md) + out_json = Path(args.out_json) + out_md.parent.mkdir(parents=True, exist_ok=True) + out_json.parent.mkdir(parents=True, exist_ok=True) + out_json.write_text(json.dumps(data, ensure_ascii=False, indent=2) + "\n", encoding="utf-8") + write_md(out_md, data) + print(out_md) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/src/cli.rs b/src/cli.rs index 4225530..479e177 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -11,7 +11,9 @@ use crate::fetch::rsync_system::{SystemRsyncConfig, SystemRsyncFetcher}; use crate::policy::Policy; use crate::storage::RocksStore; use crate::validation::run_tree_from_tal::{ - RunTreeFromTalAuditOutput, run_tree_from_tal_and_ta_der_payload_replay_serial_audit, + RunTreeFromTalAuditOutput, run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit, + run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit_with_timing, + run_tree_from_tal_and_ta_der_payload_replay_serial_audit, run_tree_from_tal_and_ta_der_payload_replay_serial_audit_with_timing, run_tree_from_tal_and_ta_der_serial_audit, run_tree_from_tal_and_ta_der_serial_audit_with_timing, run_tree_from_tal_url_serial_audit, @@ -30,6 +32,11 @@ pub struct CliArgs { pub report_json_path: Option, pub payload_replay_archive: Option, pub payload_replay_locks: Option, + pub payload_base_archive: Option, + pub payload_base_locks: Option, + pub payload_base_validation_time: Option, + pub payload_delta_archive: Option, + pub payload_delta_locks: Option, pub rsync_local_dir: Option, @@ -59,6 +66,11 @@ Options: --report-json Write full audit report as JSON (optional) --payload-replay-archive Use local payload replay archive root (offline replay mode) --payload-replay-locks Use local payload replay locks.json (offline replay mode) + --payload-base-archive Use local base payload archive root (offline delta replay) + --payload-base-locks Use local base locks.json (offline delta replay) + --payload-base-validation-time Validation time for the base bootstrap inside offline delta replay + --payload-delta-archive Use local delta payload archive root (offline delta replay) + --payload-delta-locks Use local locks-delta.json (offline delta replay) --tal-url TAL URL (downloads TAL + TA over HTTPS) --tal-path TAL file path (offline-friendly; requires --ta-path) @@ -89,6 +101,11 @@ pub fn parse_args(argv: &[String]) -> Result { let mut report_json_path: Option = None; let mut payload_replay_archive: Option = None; let mut payload_replay_locks: Option = None; + let mut payload_base_archive: Option = None; + let mut payload_base_locks: Option = None; + let mut payload_base_validation_time: Option = None; + let mut payload_delta_archive: Option = None; + let mut payload_delta_locks: Option = None; let mut rsync_local_dir: Option = None; let mut http_timeout_secs: u64 = 20; @@ -149,6 +166,41 @@ pub fn parse_args(argv: &[String]) -> Result { .ok_or("--payload-replay-locks requires a value")?; payload_replay_locks = Some(PathBuf::from(v)); } + "--payload-base-archive" => { + i += 1; + let v = argv + .get(i) + .ok_or("--payload-base-archive requires a value")?; + payload_base_archive = Some(PathBuf::from(v)); + } + "--payload-base-locks" => { + i += 1; + let v = argv.get(i).ok_or("--payload-base-locks requires a value")?; + payload_base_locks = Some(PathBuf::from(v)); + } + "--payload-base-validation-time" => { + i += 1; + let v = argv.get(i).ok_or("--payload-base-validation-time requires a value")?; + use time::format_description::well_known::Rfc3339; + let t = time::OffsetDateTime::parse(v, &Rfc3339).map_err(|e| { + format!("invalid --payload-base-validation-time (RFC3339 expected): {e}") + })?; + payload_base_validation_time = Some(t); + } + "--payload-delta-archive" => { + i += 1; + let v = argv + .get(i) + .ok_or("--payload-delta-archive requires a value")?; + payload_delta_archive = Some(PathBuf::from(v)); + } + "--payload-delta-locks" => { + i += 1; + let v = argv + .get(i) + .ok_or("--payload-delta-locks requires a value")?; + payload_delta_locks = Some(PathBuf::from(v)); + } "--rsync-local-dir" => { i += 1; let v = argv.get(i).ok_or("--rsync-local-dir requires a value")?; @@ -230,6 +282,27 @@ pub fn parse_args(argv: &[String]) -> Result { return Err(format!( "--payload-replay-archive and --payload-replay-locks must be provided together +{}", + usage() + )); + } + + let delta_mode_count = payload_base_archive.is_some() as u8 + + payload_base_locks.is_some() as u8 + + payload_delta_archive.is_some() as u8 + + payload_delta_locks.is_some() as u8; + if delta_mode_count > 0 && delta_mode_count < 4 { + return Err(format!( + "--payload-base-archive, --payload-base-locks, --payload-delta-archive and --payload-delta-locks must be provided together + +{}", + usage() + )); + } + if replay_mode_count == 2 && delta_mode_count == 4 { + return Err(format!( + "snapshot replay mode and delta replay mode are mutually exclusive + {}", usage() )); @@ -255,6 +328,32 @@ pub fn parse_args(argv: &[String]) -> Result { return Err(format!( "payload replay mode cannot be combined with --rsync-local-dir +{}", + usage() + )); + } + } + if delta_mode_count == 4 { + if tal_url.is_some() { + return Err(format!( + "payload delta replay mode requires --tal-path and --ta-path; --tal-url is not supported + +{}", + usage() + )); + } + if tal_path.is_none() || ta_path.is_none() { + return Err(format!( + "payload delta replay mode requires --tal-path and --ta-path + +{}", + usage() + )); + } + if rsync_local_dir.is_some() { + return Err(format!( + "payload delta replay mode cannot be combined with --rsync-local-dir + {}", usage() )); @@ -270,6 +369,11 @@ pub fn parse_args(argv: &[String]) -> Result { report_json_path, payload_replay_archive, payload_replay_locks, + payload_base_archive, + payload_base_locks, + payload_base_validation_time, + payload_delta_archive, + payload_delta_locks, rsync_local_dir, http_timeout_secs, rsync_timeout_secs, @@ -406,6 +510,7 @@ pub fn run(argv: &[String]) -> Result<(), String> { max_instances: args.max_instances, }; let replay_mode = args.payload_replay_archive.is_some(); + let delta_replay_mode = args.payload_base_archive.is_some(); use time::format_description::well_known::Rfc3339; let mut timing: Option<(std::path::PathBuf, TimingHandle)> = None; @@ -467,7 +572,71 @@ pub fn run(argv: &[String]) -> Result<(), String> { None }; - let out = if replay_mode { + let out = if delta_replay_mode { + let tal_path = args + .tal_path + .as_ref() + .expect("validated by parse_args for delta replay mode"); + let ta_path = args + .ta_path + .as_ref() + .expect("validated by parse_args for delta replay mode"); + let base_archive = args + .payload_base_archive + .as_ref() + .expect("validated by parse_args for delta replay mode"); + let base_locks = args + .payload_base_locks + .as_ref() + .expect("validated by parse_args for delta replay mode"); + let base_validation_time = args.payload_base_validation_time.unwrap_or(validation_time); + let delta_archive = args + .payload_delta_archive + .as_ref() + .expect("validated by parse_args for delta replay mode"); + let delta_locks = args + .payload_delta_locks + .as_ref() + .expect("validated by parse_args for delta replay mode"); + let tal_bytes = std::fs::read(tal_path) + .map_err(|e| format!("read tal failed: {}: {e}", tal_path.display()))?; + let ta_der = std::fs::read(ta_path) + .map_err(|e| format!("read ta failed: {}: {e}", ta_path.display()))?; + if let Some((_, t)) = timing.as_ref() { + run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit_with_timing( + &store, + &policy, + &tal_bytes, + &ta_der, + None, + base_archive, + base_locks, + delta_archive, + delta_locks, + base_validation_time, + validation_time, + &config, + t, + ) + .map_err(|e| e.to_string())? + } else { + run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit( + &store, + &policy, + &tal_bytes, + &ta_der, + None, + base_archive, + base_locks, + delta_archive, + delta_locks, + base_validation_time, + validation_time, + &config, + ) + .map_err(|e| e.to_string())? + } + } else if replay_mode { let tal_path = args .tal_path .as_ref() @@ -896,6 +1065,131 @@ mod tests { assert_eq!(args.max_depth, Some(0)); } + #[test] + fn parse_accepts_payload_delta_replay_mode_with_offline_tal_and_ta() { + let argv = vec![ + "rpki".to_string(), + "--db".to_string(), + "db".to_string(), + "--tal-path".to_string(), + "a.tal".to_string(), + "--ta-path".to_string(), + "ta.cer".to_string(), + "--payload-base-archive".to_string(), + "base-archive".to_string(), + "--payload-base-locks".to_string(), + "base-locks.json".to_string(), + "--payload-delta-archive".to_string(), + "delta-archive".to_string(), + "--payload-delta-locks".to_string(), + "delta-locks.json".to_string(), + ]; + let args = parse_args(&argv).expect("parse delta replay mode"); + assert_eq!( + args.payload_base_archive.as_deref(), + Some(Path::new("base-archive")) + ); + assert_eq!( + args.payload_base_locks.as_deref(), + Some(Path::new("base-locks.json")) + ); + assert_eq!( + args.payload_delta_archive.as_deref(), + Some(Path::new("delta-archive")) + ); + assert_eq!( + args.payload_delta_locks.as_deref(), + Some(Path::new("delta-locks.json")) + ); + } + + #[test] + fn parse_rejects_partial_payload_delta_arguments_and_mutual_exclusion() { + let argv_partial = vec![ + "rpki".to_string(), + "--db".to_string(), + "db".to_string(), + "--tal-path".to_string(), + "a.tal".to_string(), + "--ta-path".to_string(), + "ta.cer".to_string(), + "--payload-base-archive".to_string(), + "base-archive".to_string(), + ]; + let err = parse_args(&argv_partial).unwrap_err(); + assert!(err.contains("must be provided together"), "{err}"); + + let argv_both = vec![ + "rpki".to_string(), + "--db".to_string(), + "db".to_string(), + "--tal-path".to_string(), + "a.tal".to_string(), + "--ta-path".to_string(), + "ta.cer".to_string(), + "--payload-replay-archive".to_string(), + "archive".to_string(), + "--payload-replay-locks".to_string(), + "locks.json".to_string(), + "--payload-base-archive".to_string(), + "base-archive".to_string(), + "--payload-base-locks".to_string(), + "base-locks.json".to_string(), + "--payload-delta-archive".to_string(), + "delta-archive".to_string(), + "--payload-delta-locks".to_string(), + "delta-locks.json".to_string(), + ]; + let err = parse_args(&argv_both).unwrap_err(); + assert!(err.contains("mutually exclusive"), "{err}"); + } + + #[test] + fn parse_rejects_payload_delta_with_tal_url_or_rsync_local_dir() { + let argv_url = vec![ + "rpki".to_string(), + "--db".to_string(), + "db".to_string(), + "--tal-url".to_string(), + "https://example.test/x.tal".to_string(), + "--payload-base-archive".to_string(), + "base-archive".to_string(), + "--payload-base-locks".to_string(), + "base-locks.json".to_string(), + "--payload-delta-archive".to_string(), + "delta-archive".to_string(), + "--payload-delta-locks".to_string(), + "delta-locks.json".to_string(), + ]; + let err = parse_args(&argv_url).unwrap_err(); + assert!(err.contains("--tal-url is not supported"), "{err}"); + + let argv_rsync = vec![ + "rpki".to_string(), + "--db".to_string(), + "db".to_string(), + "--tal-path".to_string(), + "a.tal".to_string(), + "--ta-path".to_string(), + "ta.cer".to_string(), + "--payload-base-archive".to_string(), + "base-archive".to_string(), + "--payload-base-locks".to_string(), + "base-locks.json".to_string(), + "--payload-delta-archive".to_string(), + "delta-archive".to_string(), + "--payload-delta-locks".to_string(), + "delta-locks.json".to_string(), + "--rsync-local-dir".to_string(), + "repo".to_string(), + ]; + let err = parse_args(&argv_rsync).unwrap_err(); + assert!( + err.contains("payload delta replay mode cannot be combined with --rsync-local-dir"), + "{err}" + ); + } + #[test] fn parse_accepts_payload_replay_mode_with_offline_tal_and_ta() { let argv = vec![ diff --git a/src/data_model/signed_object.rs b/src/data_model/signed_object.rs index ca1944d..4ebfeb3 100644 --- a/src/data_model/signed_object.rs +++ b/src/data_model/signed_object.rs @@ -1,4 +1,4 @@ -use crate::data_model::common::{Asn1TimeEncoding, Asn1TimeUtc, DerReader, der_take_tlv}; +use crate::data_model::common::{Asn1TimeEncoding, Asn1TimeUtc, DerReader, der_uint_from_bytes}; use crate::data_model::oid::{ OID_AD_SIGNED_OBJECT, OID_CMS_ATTR_CONTENT_TYPE, OID_CMS_ATTR_CONTENT_TYPE_RAW, OID_CMS_ATTR_MESSAGE_DIGEST, OID_CMS_ATTR_MESSAGE_DIGEST_RAW, OID_CMS_ATTR_SIGNING_TIME, @@ -9,6 +9,7 @@ use crate::data_model::oid::{ OID_SIGNED_DATA_RAW, OID_SUBJECT_INFO_ACCESS, }; use crate::data_model::rc::{ResourceCertificate, SubjectInfoAccess}; +use asn1_rs::{Any, Class, FromBer, Header, Tag}; use ring::digest; use x509_parser::prelude::FromDer; use x509_parser::public_key::PublicKey; @@ -305,27 +306,7 @@ impl RpkiSignedObject { /// This performs encoding/structure parsing only. Profile constraints are enforced by /// `RpkiSignedObjectParsed::validate_profile`. pub fn parse_der(der: &[u8]) -> Result { - let mut r = DerReader::new(der); - let mut content_info_seq = r - .take_sequence() - .map_err(|e| SignedObjectParseError::Parse(e.to_string()))?; - if !r.is_empty() { - return Err(SignedObjectParseError::TrailingBytes(r.remaining_len())); - } - - let content_type = take_oid_string(&mut content_info_seq)?; - let signed_data = parse_signed_data_from_contentinfo_cursor(&mut content_info_seq)?; - if !content_info_seq.is_empty() { - return Err(SignedObjectParseError::Parse( - "ContentInfo must be a SEQUENCE of 2 elements".into(), - )); - } - - Ok(RpkiSignedObjectParsed { - raw_der: der.to_vec(), - content_info_content_type: content_type, - signed_data, - }) + parse_signed_object_content_info(der, der) } /// Decode a DER-encoded RPKI Signed Object (CMS ContentInfo wrapping SignedData) and enforce @@ -380,7 +361,6 @@ impl RpkiSignedObject { }; let signer = &self.signed_data.signer_infos[0]; - // The message to be verified is the DER encoding of SignedAttributes (SET OF Attribute). let msg = &signer.signed_attrs_der_for_signature; let pk = ring::signature::RsaPublicKeyComponents { n, e }; @@ -393,6 +373,159 @@ impl RpkiSignedObject { } } +struct CmsReader<'a> { + buf: &'a [u8], +} + +impl<'a> CmsReader<'a> { + fn new(buf: &'a [u8]) -> Self { + Self { buf } + } + + fn is_empty(&self) -> bool { + self.buf.is_empty() + } + + fn remaining_len(&self) -> usize { + self.buf.len() + } + + fn peek_tag(&self) -> Result { + let (_rem, any) = Any::from_ber(self.buf).map_err(|e| format!("BER parse error: {e}"))?; + header_to_single_byte_tag(&any.header) + } + + fn take_any(&mut self) -> Result<(u8, &'a [u8]), String> { + let (rem, any) = Any::from_ber(self.buf).map_err(|e| format!("BER parse error: {e}"))?; + let tag = header_to_single_byte_tag(&any.header)?; + self.buf = rem; + Ok((tag, any.data)) + } + + fn take_any_full(&mut self) -> Result<(u8, &'a [u8], &'a [u8]), String> { + let (rem, any) = Any::from_ber(self.buf).map_err(|e| format!("BER parse error: {e}"))?; + let consumed = self.buf.len() - rem.len(); + let full = &self.buf[..consumed]; + let tag = header_to_single_byte_tag(&any.header)?; + self.buf = rem; + Ok((tag, full, any.data)) + } + + fn skip_any(&mut self) -> Result<(), String> { + let _ = self.take_any()?; + Ok(()) + } + + fn take_tag(&mut self, expected_tag: u8) -> Result<&'a [u8], String> { + let (tag, value) = self.take_any()?; + if tag != expected_tag { + return Err(format!( + "unexpected tag: got 0x{tag:02X}, expected 0x{expected_tag:02X}" + )); + } + Ok(value) + } + + fn take_sequence(&mut self) -> Result, String> { + let value = self.take_tag(0x30)?; + Ok(CmsReader::new(value)) + } + + fn take_octet_string(&mut self) -> Result, String> { + let (rem, any) = Any::from_ber(self.buf).map_err(|e| format!("BER parse error: {e}"))?; + let tag = header_to_single_byte_tag(&any.header)?; + if tag != 0x04 && tag != 0x24 { + return Err(format!( + "unexpected tag: got 0x{tag:02X}, expected 0x04" + )); + } + let octets = flatten_octet_string(any)?; + self.buf = rem; + Ok(octets) + } + + fn take_uint_u64(&mut self) -> Result { + let value = self.take_tag(0x02)?; + der_uint_from_bytes(value) + } + + fn take_explicit(&mut self, expected_outer_tag: u8) -> Result<(u8, &'a [u8]), String> { + let inner_der = self.take_tag(expected_outer_tag)?; + let (tag, value, rem) = cms_take_tlv(inner_der)?; + if !rem.is_empty() { + return Err("trailing bytes inside EXPLICIT value".into()); + } + Ok((tag, value)) + } + + fn take_explicit_der(&mut self, expected_outer_tag: u8) -> Result<&'a [u8], String> { + let inner_der = self.take_tag(expected_outer_tag)?; + let (_tag, _value, rem) = cms_take_tlv(inner_der)?; + if !rem.is_empty() { + return Err("trailing bytes inside EXPLICIT value".into()); + } + Ok(inner_der) + } +} + +fn parse_signed_object_content_info( + raw_der: &[u8], + parse_der: &[u8], +) -> Result { + let mut r = CmsReader::new(parse_der); + let mut content_info_seq = r + .take_sequence() + .map_err(|e| SignedObjectParseError::Parse(e.to_string()))?; + if !r.is_empty() { + return Err(SignedObjectParseError::TrailingBytes(r.remaining_len())); + } + + let content_type = take_oid_string(&mut content_info_seq)?; + let signed_data = parse_signed_data_from_contentinfo_cursor(&mut content_info_seq)?; + if !content_info_seq.is_empty() { + return Err(SignedObjectParseError::Parse( + "ContentInfo must be a SEQUENCE of 2 elements".into(), + )); + } + + Ok(RpkiSignedObjectParsed { + raw_der: raw_der.to_vec(), + content_info_content_type: content_type, + signed_data, + }) +} + +fn header_to_single_byte_tag(header: &Header<'_>) -> Result { + let tag_no = header.tag().0; + if tag_no > 30 { + return Err(format!("high-tag-number form not supported: {tag_no}")); + } + Ok(((header.class() as u8) << 6) | if header.constructed() { 0x20 } else { 0x00 } | tag_no as u8) +} + +fn cms_take_tlv(input: &[u8]) -> Result<(u8, &[u8], &[u8]), String> { + let (rem, any) = Any::from_ber(input).map_err(|e| format!("BER parse error: {e}"))?; + let tag = header_to_single_byte_tag(&any.header)?; + Ok((tag, any.data, rem)) +} + +fn flatten_octet_string(any: Any<'_>) -> Result, String> { + if any.class() != Class::Universal || any.tag() != Tag::OctetString { + return Err("expected OCTET STRING".into()); + } + if !any.header.constructed() { + return Ok(any.data.to_vec()); + } + let mut out = Vec::new(); + let mut input = any.data; + while !input.is_empty() { + let (rem, child) = Any::from_ber(input).map_err(|e| format!("BER parse error: {e}"))?; + out.extend(flatten_octet_string(child)?); + input = rem; + } + Ok(out) +} + impl RpkiSignedObjectParsed { pub fn validate_profile(self) -> Result { if self.content_info_content_type != OID_SIGNED_DATA { @@ -412,12 +545,12 @@ impl RpkiSignedObjectParsed { } fn parse_signed_data_from_contentinfo_cursor( - seq: &mut DerReader<'_>, + seq: &mut CmsReader<'_>, ) -> Result { let inner_der = seq.take_explicit_der(0xA0).map_err(|_e| { SignedObjectParseError::Parse("ContentInfo.content must be [0] EXPLICIT".into()) })?; - let mut r = DerReader::new(inner_der); + let mut r = CmsReader::new(inner_der); let signed_data_seq = r .take_sequence() .map_err(|e| SignedObjectParseError::Parse(e.to_string()))?; @@ -430,7 +563,7 @@ fn parse_signed_data_from_contentinfo_cursor( } fn parse_signed_data_cursor( - mut seq: DerReader<'_>, + mut seq: CmsReader<'_>, ) -> Result { let version = seq .take_uint_u64() @@ -439,7 +572,7 @@ fn parse_signed_data_cursor( let digest_set_bytes = seq .take_tag(0x31) .map_err(|e| SignedObjectParseError::Parse(e.to_string()))?; - let mut digest_set = DerReader::new(digest_set_bytes); + let mut digest_set = CmsReader::new(digest_set_bytes); let mut digest_algorithms: Vec = Vec::new(); while !digest_set.is_empty() { let alg = digest_set @@ -512,7 +645,7 @@ fn parse_signed_data_cursor( } fn parse_encapsulated_content_info_cursor( - mut seq: DerReader<'_>, + mut seq: CmsReader<'_>, ) -> Result { if seq.is_empty() { return Err(SignedObjectParseError::Parse( @@ -530,7 +663,7 @@ fn parse_encapsulated_content_info_cursor( "EncapsulatedContentInfo.eContent must be [0] EXPLICIT".into(), ) })?; - let mut inner = DerReader::new(inner_der); + let mut inner = CmsReader::new(inner_der); let octets = inner .take_octet_string() .map_err(|e| SignedObjectParseError::Parse(e.to_string()))?; @@ -539,7 +672,7 @@ fn parse_encapsulated_content_info_cursor( "trailing bytes inside EncapsulatedContentInfo.eContent".into(), )); } - Some(octets.to_vec()) + Some(octets) }; if !seq.is_empty() { return Err(SignedObjectParseError::Parse( @@ -557,7 +690,7 @@ fn split_der_objects(mut input: &[u8]) -> Result>, SignedObjectParse let mut out: Vec> = Vec::new(); while !input.is_empty() { let (_tag, _value, rem) = - der_take_tlv(input).map_err(|e| SignedObjectParseError::Parse(e.to_string()))?; + cms_take_tlv(input).map_err(|e| SignedObjectParseError::Parse(e.to_string()))?; let consumed = input.len() - rem.len(); out.push(input[..consumed].to_vec()); input = rem; @@ -568,7 +701,7 @@ fn split_der_objects(mut input: &[u8]) -> Result>, SignedObjectParse fn parse_signer_infos_set_cursor( set_bytes: &[u8], ) -> Result, SignedObjectParseError> { - let mut set = DerReader::new(set_bytes); + let mut set = CmsReader::new(set_bytes); let mut out: Vec = Vec::new(); while !set.is_empty() { let si = set @@ -631,7 +764,7 @@ fn validate_ee_certificate(der: &[u8]) -> Result, + mut seq: CmsReader<'_>, ) -> Result { let version = seq .take_uint_u64() @@ -980,7 +1113,7 @@ fn make_signed_attrs_der_for_signature(full_tlv: &[u8]) -> Result, Signe Ok(cs_der) } -fn take_oid_string(seq: &mut DerReader<'_>) -> Result { +fn take_oid_string(seq: &mut CmsReader<'_>) -> Result { let oid = seq .take_tag(0x06) .map_err(|e| SignedObjectParseError::Parse(e.to_string()))?; @@ -1055,7 +1188,7 @@ fn decode_oid_to_dotted_string(value: &[u8]) -> String { } fn parse_algorithm_identifier_cursor( - mut seq: DerReader<'_>, + mut seq: CmsReader<'_>, ) -> Result<(String, bool), SignedObjectParseError> { if seq.is_empty() { return Err(SignedObjectParseError::Parse( diff --git a/src/replay/archive.rs b/src/replay/archive.rs index bcac336..384ffcf 100644 --- a/src/replay/archive.rs +++ b/src/replay/archive.rs @@ -175,10 +175,35 @@ pub struct ReplayArchiveIndex { pub rsync_modules: BTreeMap, } +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum ReplayArchiveLoadMode { + Strict, + AllowMissingRsyncModules, +} + impl ReplayArchiveIndex { pub fn load( archive_root: impl AsRef, locks_path: impl AsRef, + ) -> Result { + Self::load_with_mode(archive_root, locks_path, ReplayArchiveLoadMode::Strict) + } + + pub fn load_allow_missing_rsync_modules( + archive_root: impl AsRef, + locks_path: impl AsRef, + ) -> Result { + Self::load_with_mode( + archive_root, + locks_path, + ReplayArchiveLoadMode::AllowMissingRsyncModules, + ) + } + + fn load_with_mode( + archive_root: impl AsRef, + locks_path: impl AsRef, + load_mode: ReplayArchiveLoadMode, ) -> Result { let archive_root = archive_root.as_ref().to_path_buf(); let locks_path = locks_path.as_ref().to_path_buf(); @@ -227,8 +252,14 @@ impl ReplayArchiveIndex { detail: "rsync lock transport must be rsync".to_string(), }); } - let module = load_rsync_module(&capture_root, module_uri)?; - rsync_modules.insert(module.module_uri.clone(), module); + match load_rsync_module(&capture_root, module_uri) { + Ok(module) => { + rsync_modules.insert(module.module_uri.clone(), module); + } + Err(ReplayArchiveError::MissingRsyncModuleBucket { .. }) + if load_mode == ReplayArchiveLoadMode::AllowMissingRsyncModules => {} + Err(err) => return Err(err), + } } Ok(Self { @@ -808,6 +839,21 @@ mod tests { "{err}" ); } + + #[test] + fn replay_archive_index_can_skip_missing_rsync_modules_in_lenient_mode() { + let (_temp, archive_root, locks_path, _notify_uri, module_uri) = build_minimal_archive(); + let mod_hash = sha256_hex(module_uri.as_bytes()); + let mod_dir = archive_root + .join("v1/captures/capture-001/rsync/modules") + .join(mod_hash); + std::fs::remove_dir_all(&mod_dir).expect("remove module dir"); + + let index = + ReplayArchiveIndex::load_allow_missing_rsync_modules(&archive_root, &locks_path) + .expect("load lenient replay index"); + assert!(index.rsync_modules.is_empty()); + } #[test] fn replay_archive_index_rejects_capture_id_mismatch() { let (_temp, archive_root, locks_path, _notify_uri, _module_uri) = build_minimal_archive(); diff --git a/src/replay/delta_archive.rs b/src/replay/delta_archive.rs new file mode 100644 index 0000000..c8cdbe6 --- /dev/null +++ b/src/replay/delta_archive.rs @@ -0,0 +1,1128 @@ +use std::collections::BTreeMap; +use std::fs; +use std::path::{Path, PathBuf}; + +use serde::Deserialize; + +use crate::replay::archive::{ + ReplayArchiveError, ReplayRrdpRepoMeta, ReplayRsyncModuleMeta, ReplayTransport, + canonical_rsync_module, sha256_hex, +}; + +#[derive(Debug, thiserror::Error)] +pub enum ReplayDeltaArchiveError { + #[error(transparent)] + Base(#[from] ReplayArchiveError), + + #[error("delta capture directory not found: {0}")] + MissingDeltaCaptureDirectory(String), + + #[error("delta capture.json captureId mismatch: locks={locks_capture}, capture={capture_json}")] + CaptureIdMismatch { + locks_capture: String, + capture_json: String, + }, + + #[error( + "delta base.json baseCapture mismatch: locks={locks_base_capture}, base_json={base_json_base_capture}" + )] + BaseCaptureMismatch { + locks_base_capture: String, + base_json_base_capture: String, + }, + + #[error( + "delta base.json baseLocksSha256 mismatch: locks={locks_sha256}, base_json={base_json_sha256}" + )] + BaseLocksShaMismatch { + locks_sha256: String, + base_json_sha256: String, + }, + + #[error("base locks sha256 mismatch: expected {expected}, actual {actual}")] + BaseLocksBytesShaMismatch { expected: String, actual: String }, + + #[error("delta repo bucket not found for {notify_uri}: {path}")] + MissingDeltaRepoBucket { notify_uri: String, path: String }, + + #[error("delta repo meta mismatch: expected {expected}, actual {actual}")] + RrdpMetaMismatch { expected: String, actual: String }, + + #[error( + "delta transition kind mismatch for {notify_uri}: locks={locks_kind}, transition={transition_kind}" + )] + TransitionKindMismatch { + notify_uri: String, + locks_kind: String, + transition_kind: String, + }, + + #[error("delta transition base mismatch for {notify_uri}")] + TransitionBaseMismatch { notify_uri: String }, + + #[error("delta transition target mismatch for {notify_uri}")] + TransitionTargetMismatch { notify_uri: String }, + + #[error("delta serial list mismatch for {notify_uri}")] + DeltaSerialListMismatch { notify_uri: String }, + + #[error("delta notification session directory not found for {notify_uri}: {path}")] + MissingDeltaSessionDir { notify_uri: String, path: String }, + + #[error("target notification file not found for {notify_uri}: {path}")] + MissingTargetNotification { notify_uri: String, path: String }, + + #[error("delta file not found for {notify_uri} serial={serial}: {path}")] + MissingDeltaFile { + notify_uri: String, + serial: u64, + path: String, + }, + + #[error("delta target archive missing for {notify_uri}: {path}")] + MissingTargetArchive { notify_uri: String, path: String }, + + #[error("delta rsync module bucket not found for {module_uri}: {path}")] + MissingRsyncModuleBucket { module_uri: String, path: String }, + + #[error("delta rsync module meta mismatch: expected {expected}, actual {actual}")] + RsyncMetaMismatch { expected: String, actual: String }, + + #[error("delta rsync files.json module mismatch: expected {expected}, actual {actual}")] + RsyncFilesModuleMismatch { expected: String, actual: String }, + + #[error( + "delta rsync file count mismatch for {module_uri}: declared={declared}, actual={actual}" + )] + RsyncFileCountMismatch { + module_uri: String, + declared: usize, + actual: usize, + }, + + #[error("delta rsync overlay file not found for {module_uri}: {path}")] + MissingRsyncOverlayFile { module_uri: String, path: String }, +} + +#[derive(Clone, Debug, PartialEq, Eq, Deserialize)] +pub struct ReplayDeltaLocks { + pub version: u32, + pub capture: String, + #[serde(rename = "baseCapture")] + pub base_capture: String, + #[serde(rename = "baseLocksSha256")] + pub base_locks_sha256: String, + pub rrdp: BTreeMap, + pub rsync: BTreeMap, +} + +#[derive(Clone, Debug, PartialEq, Eq, Deserialize)] +pub struct ReplayDeltaBaseMeta { + pub version: u32, + #[serde(rename = "baseCapture")] + pub base_capture: String, + #[serde(rename = "baseLocksSha256")] + pub base_locks_sha256: String, + #[serde(rename = "createdAt")] + pub created_at: String, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize)] +#[serde(rename_all = "kebab-case")] +pub enum ReplayDeltaRrdpKind { + Unchanged, + Delta, + FallbackRsync, + SessionReset, + Gap, +} + +impl ReplayDeltaRrdpKind { + pub fn as_str(self) -> &'static str { + match self { + Self::Unchanged => "unchanged", + Self::Delta => "delta", + Self::FallbackRsync => "fallback-rsync", + Self::SessionReset => "session-reset", + Self::Gap => "gap", + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Deserialize)] +pub struct ReplayDeltaRrdpState { + pub transport: ReplayTransport, + pub session: Option, + pub serial: Option, +} + +#[derive(Clone, Debug, PartialEq, Eq, Deserialize)] +pub struct ReplayDeltaRrdpEntry { + pub kind: ReplayDeltaRrdpKind, + pub base: ReplayDeltaRrdpState, + pub target: ReplayDeltaRrdpState, + #[serde(rename = "delta_count")] + pub delta_count: usize, + pub deltas: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq, Deserialize)] +pub struct ReplayDeltaRsyncEntry { + #[serde(rename = "file_count")] + pub file_count: usize, + #[serde(rename = "overlay_only")] + pub overlay_only: bool, +} + +#[derive(Clone, Debug, PartialEq, Eq, Deserialize)] +pub struct ReplayDeltaTransition { + pub kind: ReplayDeltaRrdpKind, + pub base: ReplayDeltaRrdpState, + pub target: ReplayDeltaRrdpState, + #[serde(rename = "delta_count")] + pub delta_count: usize, + pub deltas: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq, Deserialize)] +pub struct ReplayDeltaRsyncFiles { + pub version: u32, + pub module: String, + #[serde(rename = "fileCount")] + pub file_count: usize, + pub files: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ReplayDeltaRrdpRepo { + pub notify_uri: String, + pub bucket_hash: String, + pub bucket_dir: PathBuf, + pub meta: ReplayRrdpRepoMeta, + pub transition: ReplayDeltaTransition, + pub target_notification_path: Option, + pub delta_paths: Vec<(u64, PathBuf)>, + pub target_archive_path: Option, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ReplayDeltaRsyncModule { + pub module_uri: String, + pub bucket_hash: String, + pub bucket_dir: PathBuf, + pub meta: ReplayRsyncModuleMeta, + pub files: ReplayDeltaRsyncFiles, + pub tree_dir: PathBuf, + pub overlay_files: Vec<(String, PathBuf)>, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ReplayDeltaArchiveIndex { + pub archive_root: PathBuf, + pub capture_root: PathBuf, + pub delta_locks_path: PathBuf, + pub delta_locks: ReplayDeltaLocks, + pub capture_meta: crate::replay::archive::ReplayCaptureMeta, + pub base_meta: ReplayDeltaBaseMeta, + pub rrdp_repos: BTreeMap, + pub rsync_modules: BTreeMap, +} + +impl ReplayDeltaArchiveIndex { + pub fn load( + delta_archive_root: impl AsRef, + delta_locks_path: impl AsRef, + ) -> Result { + let archive_root = delta_archive_root.as_ref().to_path_buf(); + let delta_locks_path = delta_locks_path.as_ref().to_path_buf(); + + let delta_locks: ReplayDeltaLocks = + read_delta_json_file(&delta_locks_path, "payload delta locks")?; + ensure_delta_version("payload delta locks", delta_locks.version)?; + + let capture_root = archive_root + .join("v1") + .join("captures") + .join(&delta_locks.capture); + if !capture_root.is_dir() { + return Err(ReplayDeltaArchiveError::MissingDeltaCaptureDirectory( + capture_root.display().to_string(), + )); + } + + let capture_meta: crate::replay::archive::ReplayCaptureMeta = + read_delta_json_file(&capture_root.join("capture.json"), "delta capture meta")?; + ensure_delta_version("delta capture meta", capture_meta.version)?; + if capture_meta.capture_id != delta_locks.capture { + return Err(ReplayDeltaArchiveError::CaptureIdMismatch { + locks_capture: delta_locks.capture.clone(), + capture_json: capture_meta.capture_id.clone(), + }); + } + + let base_meta: ReplayDeltaBaseMeta = + read_delta_json_file(&capture_root.join("base.json"), "delta base meta")?; + ensure_delta_version("delta base meta", base_meta.version)?; + if base_meta.base_capture != delta_locks.base_capture { + return Err(ReplayDeltaArchiveError::BaseCaptureMismatch { + locks_base_capture: delta_locks.base_capture.clone(), + base_json_base_capture: base_meta.base_capture.clone(), + }); + } + if base_meta.base_locks_sha256.to_ascii_lowercase() + != delta_locks.base_locks_sha256.to_ascii_lowercase() + { + return Err(ReplayDeltaArchiveError::BaseLocksShaMismatch { + locks_sha256: delta_locks.base_locks_sha256.clone(), + base_json_sha256: base_meta.base_locks_sha256.clone(), + }); + } + + let mut rrdp_repos = BTreeMap::new(); + for (notify_uri, entry) in &delta_locks.rrdp { + let repo = load_delta_rrdp_repo(&capture_root, notify_uri, entry)?; + rrdp_repos.insert(notify_uri.clone(), repo); + } + + let mut rsync_modules = BTreeMap::new(); + for (module_uri, entry) in &delta_locks.rsync { + let module = load_delta_rsync_module(&capture_root, module_uri, entry)?; + rsync_modules.insert(module.module_uri.clone(), module); + } + + Ok(Self { + archive_root, + capture_root, + delta_locks_path, + delta_locks, + capture_meta, + base_meta, + rrdp_repos, + rsync_modules, + }) + } + + pub fn rrdp_repo(&self, notify_uri: &str) -> Option<&ReplayDeltaRrdpRepo> { + self.rrdp_repos.get(notify_uri) + } + + pub fn rsync_module(&self, module_uri: &str) -> Option<&ReplayDeltaRsyncModule> { + self.rsync_modules.get(module_uri) + } + + pub fn resolve_rsync_module_for_base_uri( + &self, + rsync_base_uri: &str, + ) -> Result<&ReplayDeltaRsyncModule, ReplayDeltaArchiveError> { + let module_uri = + canonical_rsync_module(rsync_base_uri).map_err(ReplayDeltaArchiveError::Base)?; + self.rsync_modules.get(&module_uri).ok_or_else(|| { + ReplayDeltaArchiveError::MissingRsyncModuleBucket { + module_uri, + path: "".to_string(), + } + }) + } + + pub fn validate_base_locks_sha256_bytes( + &self, + base_locks_bytes: &[u8], + ) -> Result<(), ReplayDeltaArchiveError> { + let actual = sha256_hex(base_locks_bytes); + let expected = self.delta_locks.base_locks_sha256.to_ascii_lowercase(); + if actual != expected { + return Err(ReplayDeltaArchiveError::BaseLocksBytesShaMismatch { expected, actual }); + } + Ok(()) + } + + pub fn validate_base_locks_sha256_file( + &self, + path: &Path, + ) -> Result<(), ReplayDeltaArchiveError> { + let bytes = fs::read(path).map_err(|e| { + ReplayDeltaArchiveError::Base(ReplayArchiveError::ReadFile { + entity: "base locks file", + path: path.display().to_string(), + detail: e.to_string(), + }) + })?; + self.validate_base_locks_sha256_bytes(&bytes) + } +} + +fn load_delta_rrdp_repo( + capture_root: &Path, + notify_uri: &str, + entry: &ReplayDeltaRrdpEntry, +) -> Result { + let bucket_hash = sha256_hex(notify_uri.as_bytes()); + let bucket_dir = capture_root.join("rrdp").join("repos").join(&bucket_hash); + if !bucket_dir.is_dir() { + return Err(ReplayDeltaArchiveError::MissingDeltaRepoBucket { + notify_uri: notify_uri.to_string(), + path: bucket_dir.display().to_string(), + }); + } + + let meta: ReplayRrdpRepoMeta = + read_delta_json_file(&bucket_dir.join("meta.json"), "delta RRDP repo meta")?; + ensure_delta_version("delta RRDP repo meta", meta.version)?; + if meta.rpki_notify != notify_uri { + return Err(ReplayDeltaArchiveError::RrdpMetaMismatch { + expected: notify_uri.to_string(), + actual: meta.rpki_notify.clone(), + }); + } + + let transition: ReplayDeltaTransition = + read_delta_json_file(&bucket_dir.join("transition.json"), "delta transition")?; + if transition.kind != entry.kind { + return Err(ReplayDeltaArchiveError::TransitionKindMismatch { + notify_uri: notify_uri.to_string(), + locks_kind: entry.kind.as_str().to_string(), + transition_kind: transition.kind.as_str().to_string(), + }); + } + if transition.base != entry.base { + return Err(ReplayDeltaArchiveError::TransitionBaseMismatch { + notify_uri: notify_uri.to_string(), + }); + } + if transition.target != entry.target { + return Err(ReplayDeltaArchiveError::TransitionTargetMismatch { + notify_uri: notify_uri.to_string(), + }); + } + if transition.delta_count != entry.delta_count || transition.deltas != entry.deltas { + return Err(ReplayDeltaArchiveError::DeltaSerialListMismatch { + notify_uri: notify_uri.to_string(), + }); + } + + let (target_notification_path, delta_paths, target_archive_path) = match entry.kind { + ReplayDeltaRrdpKind::Delta => { + let session = entry.target.session.as_ref().ok_or_else(|| { + ReplayDeltaArchiveError::TransitionTargetMismatch { + notify_uri: notify_uri.to_string(), + } + })?; + let serial = entry.target.serial.ok_or_else(|| { + ReplayDeltaArchiveError::TransitionTargetMismatch { + notify_uri: notify_uri.to_string(), + } + })?; + let session_dir = bucket_dir.join(session); + if !session_dir.is_dir() { + return Err(ReplayDeltaArchiveError::MissingDeltaSessionDir { + notify_uri: notify_uri.to_string(), + path: session_dir.display().to_string(), + }); + } + let notification = session_dir.join(format!("notification-target-{serial}.xml")); + if !notification.is_file() { + return Err(ReplayDeltaArchiveError::MissingTargetNotification { + notify_uri: notify_uri.to_string(), + path: notification.display().to_string(), + }); + } + let mut delta_paths = Vec::new(); + for delta_serial in &entry.deltas { + let pattern = format!("delta-{delta_serial}-"); + let deltas_dir = session_dir.join("deltas"); + let mut matches = if deltas_dir.is_dir() { + fs::read_dir(&deltas_dir) + .map_err(|e| { + ReplayDeltaArchiveError::Base(ReplayArchiveError::ReadFile { + entity: "delta deltas dir", + path: deltas_dir.display().to_string(), + detail: e.to_string(), + }) + })? + .filter_map(|entry| entry.ok().map(|e| e.path())) + .filter(|path| path.is_file()) + .filter(|path| { + path.file_name() + .and_then(|n| n.to_str()) + .is_some_and(|n| n.starts_with(&pattern) && n.ends_with(".xml")) + }) + .collect::>() + } else { + Vec::new() + }; + matches.sort(); + let path = matches.into_iter().next().ok_or_else(|| { + ReplayDeltaArchiveError::MissingDeltaFile { + notify_uri: notify_uri.to_string(), + serial: *delta_serial, + path: deltas_dir + .join(format!("delta-{delta_serial}-.xml")) + .display() + .to_string(), + } + })?; + delta_paths.push((*delta_serial, path)); + } + let target_archive = bucket_dir.join(format!("target-archive-{serial}.bin")); + let target_archive_path = if target_archive.is_file() { + Some(target_archive) + } else { + None + }; + (Some(notification), delta_paths, target_archive_path) + } + ReplayDeltaRrdpKind::Unchanged | ReplayDeltaRrdpKind::FallbackRsync => { + (None, Vec::new(), None) + } + ReplayDeltaRrdpKind::SessionReset | ReplayDeltaRrdpKind::Gap => (None, Vec::new(), None), + }; + + Ok(ReplayDeltaRrdpRepo { + notify_uri: notify_uri.to_string(), + bucket_hash, + bucket_dir, + meta, + transition, + target_notification_path, + delta_paths, + target_archive_path, + }) +} + +fn load_delta_rsync_module( + capture_root: &Path, + module_uri: &str, + entry: &ReplayDeltaRsyncEntry, +) -> Result { + let canonical = canonical_rsync_module(module_uri).map_err(ReplayDeltaArchiveError::Base)?; + let bucket_hash = sha256_hex(canonical.as_bytes()); + let bucket_dir = capture_root + .join("rsync") + .join("modules") + .join(&bucket_hash); + if !bucket_dir.is_dir() { + return Err(ReplayDeltaArchiveError::MissingRsyncModuleBucket { + module_uri: canonical.clone(), + path: bucket_dir.display().to_string(), + }); + } + + let meta: ReplayRsyncModuleMeta = + read_delta_json_file(&bucket_dir.join("meta.json"), "delta rsync module meta")?; + ensure_delta_version("delta rsync module meta", meta.version)?; + if meta.module != canonical { + return Err(ReplayDeltaArchiveError::RsyncMetaMismatch { + expected: canonical.clone(), + actual: meta.module.clone(), + }); + } + + let files: ReplayDeltaRsyncFiles = + read_delta_json_file(&bucket_dir.join("files.json"), "delta rsync files")?; + ensure_delta_version("delta rsync files", files.version)?; + if files.module != canonical { + return Err(ReplayDeltaArchiveError::RsyncFilesModuleMismatch { + expected: canonical.clone(), + actual: files.module.clone(), + }); + } + if files.file_count != entry.file_count || files.file_count != files.files.len() { + return Err(ReplayDeltaArchiveError::RsyncFileCountMismatch { + module_uri: canonical.clone(), + declared: entry.file_count, + actual: files.files.len(), + }); + } + + let tree_dir = bucket_dir.join("tree"); + let mut overlay_files = Vec::new(); + for uri in &files.files { + let rel = uri.strip_prefix(&canonical).ok_or_else(|| { + ReplayDeltaArchiveError::RsyncFilesModuleMismatch { + expected: canonical.clone(), + actual: uri.clone(), + } + })?; + let tree_root = module_tree_root(&canonical, &tree_dir)?; + let path = tree_root.join(rel); + if !path.is_file() { + return Err(ReplayDeltaArchiveError::MissingRsyncOverlayFile { + module_uri: canonical.clone(), + path: path.display().to_string(), + }); + } + overlay_files.push((uri.clone(), path)); + } + + Ok(ReplayDeltaRsyncModule { + module_uri: canonical, + bucket_hash, + bucket_dir, + meta, + files, + tree_dir, + overlay_files, + }) +} + +fn module_tree_root(module_uri: &str, tree_dir: &Path) -> Result { + let rest = module_uri.strip_prefix("rsync://").ok_or_else(|| { + ReplayDeltaArchiveError::Base(ReplayArchiveError::InvalidRsyncUri { + uri: module_uri.to_string(), + detail: "URI must start with rsync://".to_string(), + }) + })?; + let mut parts = rest.trim_end_matches('/').split('/'); + let authority = parts.next().unwrap_or_default(); + let module = parts.next().unwrap_or_default(); + Ok(tree_dir.join(authority).join(module)) +} + +fn ensure_delta_version(entity: &'static str, version: u32) -> Result<(), ReplayDeltaArchiveError> { + if version == 1 { + Ok(()) + } else { + Err(ReplayDeltaArchiveError::Base( + ReplayArchiveError::UnsupportedVersion { entity, version }, + )) + } +} + +fn read_delta_json_file Deserialize<'de>>( + path: &Path, + entity: &'static str, +) -> Result { + let bytes = fs::read(path).map_err(|e| { + ReplayDeltaArchiveError::Base(ReplayArchiveError::ReadFile { + entity, + path: path.display().to_string(), + detail: e.to_string(), + }) + })?; + serde_json::from_slice(&bytes).map_err(|e| { + ReplayDeltaArchiveError::Base(ReplayArchiveError::ParseJson { + entity, + path: path.display().to_string(), + detail: e.to_string(), + }) + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn build_delta_fixture() -> (tempfile::TempDir, PathBuf, PathBuf, String, String) { + let temp = tempfile::tempdir().expect("tempdir"); + let archive_root = temp.path().join("payload-delta-archive"); + let capture = "delta-cap"; + let base_capture = "base-cap"; + let base_sha = "deadbeef"; + let capture_root = archive_root.join("v1").join("captures").join(capture); + std::fs::create_dir_all(&capture_root).expect("mkdir capture root"); + std::fs::write( + capture_root.join("capture.json"), + format!( + r#"{{"version":1,"captureId":"{capture}","createdAt":"2026-03-15T00:00:00Z","notes":""}}"# + ), + ) + .expect("write capture json"); + std::fs::write( + capture_root.join("base.json"), + format!( + r#"{{"version":1,"baseCapture":"{base_capture}","baseLocksSha256":"{base_sha}","createdAt":"2026-03-15T00:00:00Z"}}"# + ), + ) + .expect("write base json"); + + let notify_uri = "https://rrdp.example.test/notification.xml".to_string(); + let session = "11111111-1111-1111-1111-111111111111".to_string(); + let _target_serial = 12u64; + let repo_hash = sha256_hex(notify_uri.as_bytes()); + let session_dir = capture_root + .join("rrdp/repos") + .join(&repo_hash) + .join(&session); + let deltas_dir = session_dir.join("deltas"); + std::fs::create_dir_all(&deltas_dir).expect("mkdir deltas"); + std::fs::write( + session_dir.parent().unwrap().join("meta.json"), + format!( + r#"{{"version":1,"rpkiNotify":"{notify_uri}","createdAt":"2026-03-15T00:00:00Z","lastSeenAt":"2026-03-15T00:00:01Z"}}"# + ), + ) + .expect("write meta"); + std::fs::write( + session_dir.parent().unwrap().join("transition.json"), + format!( + r#"{{"kind":"delta","base":{{"transport":"rrdp","session":"{session}","serial":10}},"target":{{"transport":"rrdp","session":"{session}","serial":12}},"delta_count":2,"deltas":[11,12]}}"# + ), + ) + .expect("write transition"); + std::fs::write( + session_dir.join("notification-target-12.xml"), + b"", + ) + .expect("write notification"); + std::fs::write( + deltas_dir.join("delta-11-aaaa.xml"), + b"", + ) + .expect("write delta 11"); + std::fs::write( + deltas_dir.join("delta-12-bbbb.xml"), + b"", + ) + .expect("write delta 12"); + std::fs::write( + session_dir.parent().unwrap().join("target-archive-12.bin"), + b"bin", + ) + .expect("write target archive"); + + let module_uri = "rsync://rsync.example.test/repo/".to_string(); + let module_hash = sha256_hex(module_uri.as_bytes()); + let module_bucket = capture_root.join("rsync/modules").join(&module_hash); + let tree_root = module_bucket + .join("tree") + .join("rsync.example.test") + .join("repo"); + std::fs::create_dir_all(tree_root.join("sub")).expect("mkdir tree root"); + std::fs::write( + module_bucket.join("meta.json"), + format!( + r#"{{"version":1,"module":"{module_uri}","createdAt":"2026-03-15T00:00:00Z","lastSeenAt":"2026-03-15T00:00:01Z"}}"# + ), + ) + .expect("write rsync meta"); + std::fs::write( + module_bucket.join("files.json"), + format!( + r#"{{"version":1,"module":"{module_uri}","fileCount":2,"files":["{module_uri}a.roa","{module_uri}sub/b.cer"]}}"# + ), + ) + .expect("write files json"); + std::fs::write(tree_root.join("a.roa"), b"roa").expect("write a.roa"); + std::fs::write(tree_root.join("sub").join("b.cer"), b"cer").expect("write b.cer"); + + let locks_path = temp.path().join("locks-delta.json"); + std::fs::write( + &locks_path, + format!( + r#"{{"version":1,"capture":"{capture}","baseCapture":"{base_capture}","baseLocksSha256":"{base_sha}","rrdp":{{"{notify_uri}":{{"kind":"delta","base":{{"transport":"rrdp","session":"{session}","serial":10}},"target":{{"transport":"rrdp","session":"{session}","serial":12}},"delta_count":2,"deltas":[11,12]}}}},"rsync":{{"{module_uri}":{{"file_count":2,"overlay_only":true}}}}}}"# + ), + ) + .expect("write locks-delta"); + (temp, archive_root, locks_path, notify_uri, module_uri) + } + + #[test] + fn delta_archive_index_loads_unchanged_and_fallback_rsync_rrdp_entries() { + let (_temp, archive_root, locks_path, notify_uri, _module_uri) = build_delta_fixture(); + let capture_root = archive_root.join("v1/captures/delta-cap"); + let repo_hash = sha256_hex(notify_uri.as_bytes()); + let repo_dir = capture_root.join("rrdp/repos").join(&repo_hash); + std::fs::write( + repo_dir.join("transition.json"), + r#"{"kind":"unchanged","base":{"transport":"rrdp","session":"11111111-1111-1111-1111-111111111111","serial":10},"target":{"transport":"rrdp","session":"11111111-1111-1111-1111-111111111111","serial":10},"delta_count":0,"deltas":[]}"#, + ) + .expect("rewrite transition unchanged"); + std::fs::write( + &locks_path, + r#"{"version":1,"capture":"delta-cap","baseCapture":"base-cap","baseLocksSha256":"deadbeef","rrdp":{"https://rrdp.example.test/notification.xml":{"kind":"unchanged","base":{"transport":"rrdp","session":"11111111-1111-1111-1111-111111111111","serial":10},"target":{"transport":"rrdp","session":"11111111-1111-1111-1111-111111111111","serial":10},"delta_count":0,"deltas":[]}},"rsync":{"rsync://rsync.example.test/repo/":{"file_count":2,"overlay_only":true}}}"#, + ).expect("rewrite locks unchanged"); + let index = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path) + .expect("load unchanged index"); + let repo = index.rrdp_repo(¬ify_uri).expect("rrdp repo"); + assert_eq!(repo.transition.kind, ReplayDeltaRrdpKind::Unchanged); + assert!(repo.target_notification_path.is_none()); + assert!(repo.delta_paths.is_empty()); + assert!(repo.target_archive_path.is_none()); + + std::fs::write( + repo_dir.join("transition.json"), + r#"{"kind":"fallback-rsync","base":{"transport":"rsync","session":null,"serial":null},"target":{"transport":"rsync","session":null,"serial":null},"delta_count":0,"deltas":[]}"#, + ).expect("rewrite transition fallback-rsync"); + std::fs::write( + &locks_path, + r#"{"version":1,"capture":"delta-cap","baseCapture":"base-cap","baseLocksSha256":"deadbeef","rrdp":{"https://rrdp.example.test/notification.xml":{"kind":"fallback-rsync","base":{"transport":"rsync","session":null,"serial":null},"target":{"transport":"rsync","session":null,"serial":null},"delta_count":0,"deltas":[]}},"rsync":{"rsync://rsync.example.test/repo/":{"file_count":2,"overlay_only":true}}}"#, + ).expect("rewrite locks fallback-rsync"); + let index = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path) + .expect("load fallback-rsync index"); + let repo = index.rrdp_repo(¬ify_uri).expect("rrdp repo"); + assert_eq!(repo.transition.kind, ReplayDeltaRrdpKind::FallbackRsync); + assert!(repo.target_notification_path.is_none()); + assert!(repo.delta_paths.is_empty()); + } + + #[test] + fn delta_archive_index_resolves_rsync_module_from_base_uri() { + let (_temp, archive_root, locks_path, _notify_uri, _module_uri) = build_delta_fixture(); + let index = + ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).expect("load delta index"); + let module = index + .resolve_rsync_module_for_base_uri("rsync://rsync.example.test/repo/sub/path") + .expect("resolve module"); + assert_eq!(module.module_uri, "rsync://rsync.example.test/repo/"); + } + + #[test] + fn delta_archive_index_rejects_unsupported_versions_and_meta_mismatches() { + let (_temp, archive_root, locks_path, _notify_uri, _module_uri) = build_delta_fixture(); + std::fs::write( + &locks_path, + r#"{"version":2,"capture":"delta-cap","baseCapture":"base-cap","baseLocksSha256":"deadbeef","rrdp":{},"rsync":{}}"#, + ).expect("rewrite locks version"); + let err = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).unwrap_err(); + assert!( + matches!( + err, + ReplayDeltaArchiveError::Base(ReplayArchiveError::UnsupportedVersion { + entity: "payload delta locks", + version: 2 + }) + ), + "{err}" + ); + + let (_temp, archive_root, locks_path, notify_uri, _module_uri) = build_delta_fixture(); + let repo_hash = sha256_hex(notify_uri.as_bytes()); + std::fs::write( + archive_root.join("v1/captures/delta-cap/rrdp/repos").join(&repo_hash).join("meta.json"), + r#"{"version":1,"rpkiNotify":"https://other.example/notification.xml","createdAt":"2026-03-15T00:00:00Z","lastSeenAt":"2026-03-15T00:00:01Z"}"#, + ).expect("rewrite rrdp meta"); + let err = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).unwrap_err(); + assert!( + matches!(err, ReplayDeltaArchiveError::RrdpMetaMismatch { .. }), + "{err}" + ); + + let (_temp, archive_root, locks_path, _notify_uri, module_uri) = build_delta_fixture(); + let module_hash = sha256_hex(module_uri.as_bytes()); + std::fs::write( + archive_root.join("v1/captures/delta-cap/rsync/modules").join(&module_hash).join("meta.json"), + r#"{"version":1,"module":"rsync://other.example/repo/","createdAt":"2026-03-15T00:00:00Z","lastSeenAt":"2026-03-15T00:00:01Z"}"#, + ).expect("rewrite rsync meta"); + let err = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).unwrap_err(); + assert!( + matches!(err, ReplayDeltaArchiveError::RsyncMetaMismatch { .. }), + "{err}" + ); + } + + #[test] + fn delta_archive_index_rejects_transition_base_target_and_serial_mismatches() { + let (_temp, archive_root, locks_path, notify_uri, _module_uri) = build_delta_fixture(); + let repo_hash = sha256_hex(notify_uri.as_bytes()); + let repo_dir = archive_root + .join("v1/captures/delta-cap/rrdp/repos") + .join(&repo_hash); + + std::fs::write( + repo_dir.join("transition.json"), + r#"{"kind":"delta","base":{"transport":"rrdp","session":"11111111-1111-1111-1111-111111111111","serial":9},"target":{"transport":"rrdp","session":"11111111-1111-1111-1111-111111111111","serial":12},"delta_count":2,"deltas":[11,12]}"#, + ).expect("rewrite transition base"); + let err = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).unwrap_err(); + assert!( + matches!(err, ReplayDeltaArchiveError::TransitionBaseMismatch { .. }), + "{err}" + ); + + let (_temp, archive_root, locks_path, notify_uri, _module_uri) = build_delta_fixture(); + let repo_hash = sha256_hex(notify_uri.as_bytes()); + let repo_dir = archive_root + .join("v1/captures/delta-cap/rrdp/repos") + .join(&repo_hash); + std::fs::write( + repo_dir.join("transition.json"), + r#"{"kind":"delta","base":{"transport":"rrdp","session":"11111111-1111-1111-1111-111111111111","serial":10},"target":{"transport":"rrdp","session":"11111111-1111-1111-1111-111111111111","serial":13},"delta_count":2,"deltas":[11,12]}"#, + ).expect("rewrite transition target"); + let err = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).unwrap_err(); + assert!( + matches!( + err, + ReplayDeltaArchiveError::TransitionTargetMismatch { .. } + ), + "{err}" + ); + + let (_temp, archive_root, locks_path, notify_uri, _module_uri) = build_delta_fixture(); + let repo_hash = sha256_hex(notify_uri.as_bytes()); + let repo_dir = archive_root + .join("v1/captures/delta-cap/rrdp/repos") + .join(&repo_hash); + std::fs::write( + repo_dir.join("transition.json"), + r#"{"kind":"delta","base":{"transport":"rrdp","session":"11111111-1111-1111-1111-111111111111","serial":10},"target":{"transport":"rrdp","session":"11111111-1111-1111-1111-111111111111","serial":12},"delta_count":1,"deltas":[12]}"#, + ).expect("rewrite transition deltas"); + let err = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).unwrap_err(); + assert!( + matches!(err, ReplayDeltaArchiveError::DeltaSerialListMismatch { .. }), + "{err}" + ); + } + + #[test] + fn delta_archive_index_rejects_missing_session_dir_and_overlay_files() { + let (_temp, archive_root, locks_path, notify_uri, _module_uri) = build_delta_fixture(); + let repo_hash = sha256_hex(notify_uri.as_bytes()); + let session_dir = archive_root + .join("v1/captures/delta-cap/rrdp/repos") + .join(&repo_hash) + .join("11111111-1111-1111-1111-111111111111"); + std::fs::remove_dir_all(&session_dir).expect("remove session dir"); + let err = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).unwrap_err(); + assert!( + matches!(err, ReplayDeltaArchiveError::MissingDeltaSessionDir { .. }), + "{err}" + ); + + let (_temp, archive_root, locks_path, _notify_uri, module_uri) = build_delta_fixture(); + let module_hash = sha256_hex(module_uri.as_bytes()); + let overlay_path = archive_root + .join("v1/captures/delta-cap/rsync/modules") + .join(&module_hash) + .join("tree/rsync.example.test/repo/sub/b.cer"); + std::fs::remove_file(overlay_path).expect("remove overlay file"); + let err = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).unwrap_err(); + assert!( + matches!(err, ReplayDeltaArchiveError::MissingRsyncOverlayFile { .. }), + "{err}" + ); + } + + #[test] + fn delta_archive_index_accepts_correct_base_locks_sha_and_rejects_missing_module_resolution() { + let (_temp, archive_root, locks_path, _notify_uri, _module_uri) = build_delta_fixture(); + let index = + ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).expect("load delta index"); + let locks_bytes = std::fs::read(&locks_path).expect("read locks bytes"); + assert!( + index + .validate_base_locks_sha256_bytes(&locks_bytes) + .is_err() + ); + let err = index + .resolve_rsync_module_for_base_uri("rsync://missing.example/repo/path") + .unwrap_err(); + assert!( + matches!( + err, + ReplayDeltaArchiveError::MissingRsyncModuleBucket { .. } + ), + "{err}" + ); + } + + #[test] + fn delta_archive_index_loads_session_reset_and_gap_entries_without_target_files() { + let (_temp, archive_root, locks_path, notify_uri, _module_uri) = build_delta_fixture(); + let repo_hash = sha256_hex(notify_uri.as_bytes()); + let repo_dir = archive_root + .join("v1/captures/delta-cap/rrdp/repos") + .join(&repo_hash); + + for kind in ["session-reset", "gap"] { + std::fs::write( + repo_dir.join("transition.json"), + format!( + r#"{{"kind":"{kind}","base":{{"transport":"rrdp","session":"11111111-1111-1111-1111-111111111111","serial":10}},"target":{{"transport":"rrdp","session":"22222222-2222-2222-2222-222222222222","serial":12}},"delta_count":0,"deltas":[]}}"#, + ), + ) + .expect("rewrite transition kind"); + std::fs::write( + &locks_path, + format!( + r#"{{"version":1,"capture":"delta-cap","baseCapture":"base-cap","baseLocksSha256":"deadbeef","rrdp":{{"{notify_uri}":{{"kind":"{kind}","base":{{"transport":"rrdp","session":"11111111-1111-1111-1111-111111111111","serial":10}},"target":{{"transport":"rrdp","session":"22222222-2222-2222-2222-222222222222","serial":12}},"delta_count":0,"deltas":[]}}}},"rsync":{{"rsync://rsync.example.test/repo/":{{"file_count":2,"overlay_only":true}}}}}}"#, + ), + ) + .expect("rewrite locks kind"); + let index = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path) + .expect("load delta index"); + let repo = index.rrdp_repo(¬ify_uri).expect("rrdp repo"); + assert!(repo.target_notification_path.is_none()); + assert!(repo.delta_paths.is_empty()); + } + } + #[test] + fn delta_archive_index_loads_rrdp_and_rsync_entries() { + let (_temp, archive_root, locks_path, notify_uri, module_uri) = build_delta_fixture(); + let index = + ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).expect("load delta index"); + assert_eq!(index.capture_meta.capture_id, "delta-cap"); + assert_eq!(index.base_meta.base_capture, "base-cap"); + assert_eq!(index.rrdp_repos.len(), 1); + assert_eq!(index.rsync_modules.len(), 1); + + let repo = index.rrdp_repo(¬ify_uri).expect("rrdp repo"); + assert_eq!(repo.transition.kind, ReplayDeltaRrdpKind::Delta); + assert_eq!(repo.transition.delta_count, 2); + assert_eq!(repo.delta_paths.len(), 2); + assert!(repo.target_notification_path.as_ref().unwrap().is_file()); + assert!(repo.target_archive_path.as_ref().unwrap().is_file()); + + let module = index.rsync_module(&module_uri).expect("rsync module"); + assert_eq!(module.files.file_count, 2); + assert_eq!(module.overlay_files.len(), 2); + assert!(module.overlay_files.iter().all(|(_, path)| path.is_file())); + } + + #[test] + fn delta_archive_index_rejects_capture_and_sha_mismatches() { + let (_temp, archive_root, locks_path, _notify_uri, _module_uri) = build_delta_fixture(); + let capture_root = archive_root.join("v1/captures/delta-cap"); + std::fs::write( + capture_root.join("capture.json"), + r#"{"version":1,"captureId":"other-cap","createdAt":"2026-03-15T00:00:00Z","notes":""}"#, + ) + .expect("rewrite capture json"); + let err = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).unwrap_err(); + assert!( + matches!(err, ReplayDeltaArchiveError::CaptureIdMismatch { .. }), + "{err}" + ); + + let (_temp, archive_root, locks_path, _notify_uri, _module_uri) = build_delta_fixture(); + let capture_root = archive_root.join("v1/captures/delta-cap"); + std::fs::write( + capture_root.join("base.json"), + r#"{"version":1,"baseCapture":"base-cap","baseLocksSha256":"beefdead","createdAt":"2026-03-15T00:00:00Z"}"#, + ) + .expect("rewrite base json sha"); + let err = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).unwrap_err(); + assert!( + matches!(err, ReplayDeltaArchiveError::BaseLocksShaMismatch { .. }), + "{err}" + ); + } + + #[test] + fn delta_archive_index_rejects_missing_target_notification_and_repo_bucket() { + let (_temp, archive_root, locks_path, notify_uri, _module_uri) = build_delta_fixture(); + let repo_hash = sha256_hex(notify_uri.as_bytes()); + let session_dir = archive_root + .join("v1/captures/delta-cap/rrdp/repos") + .join(&repo_hash) + .join("11111111-1111-1111-1111-111111111111"); + std::fs::remove_file(session_dir.join("notification-target-12.xml")) + .expect("remove target notification"); + let err = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).unwrap_err(); + assert!( + matches!( + err, + ReplayDeltaArchiveError::MissingTargetNotification { .. } + ), + "{err}" + ); + + let (_temp, archive_root, locks_path, notify_uri, _module_uri) = build_delta_fixture(); + let repo_hash = sha256_hex(notify_uri.as_bytes()); + let repo_dir = archive_root + .join("v1/captures/delta-cap/rrdp/repos") + .join(&repo_hash); + std::fs::remove_dir_all(repo_dir).expect("remove repo dir"); + let err = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).unwrap_err(); + assert!( + matches!(err, ReplayDeltaArchiveError::MissingDeltaRepoBucket { .. }), + "{err}" + ); + } + #[test] + fn delta_archive_index_rejects_base_meta_mismatch() { + let (_temp, archive_root, locks_path, _notify_uri, _module_uri) = build_delta_fixture(); + let capture_root = archive_root.join("v1/captures/delta-cap"); + std::fs::write( + capture_root.join("base.json"), + r#"{"version":1,"baseCapture":"other","baseLocksSha256":"deadbeef","createdAt":"2026-03-15T00:00:00Z"}"#, + ) + .expect("rewrite base json"); + let err = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).unwrap_err(); + assert!( + matches!(err, ReplayDeltaArchiveError::BaseCaptureMismatch { .. }), + "{err}" + ); + } + + #[test] + fn delta_archive_index_rejects_transition_mismatch_and_missing_delta_file() { + let (_temp, archive_root, locks_path, notify_uri, _module_uri) = build_delta_fixture(); + let repo_hash = sha256_hex(notify_uri.as_bytes()); + let repo_dir = archive_root + .join("v1/captures/delta-cap/rrdp/repos") + .join(&repo_hash); + std::fs::write( + repo_dir.join("transition.json"), + r#"{"kind":"unchanged","base":{"transport":"rrdp","session":"11111111-1111-1111-1111-111111111111","serial":10},"target":{"transport":"rrdp","session":"11111111-1111-1111-1111-111111111111","serial":12},"delta_count":2,"deltas":[11,12]}"#, + ) + .expect("rewrite transition"); + let err = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).unwrap_err(); + assert!( + matches!(err, ReplayDeltaArchiveError::TransitionKindMismatch { .. }), + "{err}" + ); + + let (_temp, archive_root, locks_path, notify_uri, _module_uri) = build_delta_fixture(); + let repo_hash = sha256_hex(notify_uri.as_bytes()); + let delta_path = archive_root + .join("v1/captures/delta-cap/rrdp/repos") + .join(&repo_hash) + .join("11111111-1111-1111-1111-111111111111/deltas/delta-12-bbbb.xml"); + std::fs::remove_file(delta_path).expect("remove delta"); + let err = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).unwrap_err(); + assert!( + matches!(err, ReplayDeltaArchiveError::MissingDeltaFile { .. }), + "{err}" + ); + } + + #[test] + fn delta_archive_index_validates_base_locks_sha256_bytes_and_file() { + let (_temp, archive_root, locks_path, _notify_uri, _module_uri) = build_delta_fixture(); + let index = + ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).expect("load delta index"); + let err = index + .validate_base_locks_sha256_bytes(b"not-the-right-base-locks") + .unwrap_err(); + assert!( + matches!( + err, + ReplayDeltaArchiveError::BaseLocksBytesShaMismatch { .. } + ), + "{err}" + ); + + let temp_file = tempfile::NamedTempFile::new().expect("tempfile"); + std::fs::write(temp_file.path(), b"still-wrong").expect("write base locks file"); + let err = index + .validate_base_locks_sha256_file(temp_file.path()) + .unwrap_err(); + assert!( + matches!( + err, + ReplayDeltaArchiveError::BaseLocksBytesShaMismatch { .. } + ), + "{err}" + ); + } + + #[test] + fn delta_archive_index_rejects_rsync_files_mismatch() { + let (_temp, archive_root, locks_path, _notify_uri, module_uri) = build_delta_fixture(); + let module_hash = sha256_hex(module_uri.as_bytes()); + let module_dir = archive_root + .join("v1/captures/delta-cap/rsync/modules") + .join(&module_hash); + std::fs::write( + module_dir.join("files.json"), + format!( + r#"{{"version":1,"module":"{module_uri}","fileCount":3,"files":["{module_uri}a.roa"]}}"# + ), + ) + .expect("rewrite files json"); + let err = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).unwrap_err(); + assert!( + matches!(err, ReplayDeltaArchiveError::RsyncFileCountMismatch { .. }), + "{err}" + ); + } +} diff --git a/src/replay/delta_fetch_http.rs b/src/replay/delta_fetch_http.rs new file mode 100644 index 0000000..89dfbfe --- /dev/null +++ b/src/replay/delta_fetch_http.rs @@ -0,0 +1,368 @@ +use std::collections::BTreeMap; +use std::fs; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use crate::replay::delta_archive::{ + ReplayDeltaArchiveError, ReplayDeltaArchiveIndex, ReplayDeltaRrdpKind, +}; +use crate::sync::rrdp::{Fetcher, parse_notification}; + +#[derive(Debug, thiserror::Error)] +pub enum PayloadDeltaReplayHttpFetcherError { + #[error(transparent)] + DeltaIndex(#[from] ReplayDeltaArchiveError), + + #[error("read delta replay RRDP file failed: {path}: {detail}")] + ReadFile { path: String, detail: String }, + + #[error("parse target notification failed for {notify_uri}: {detail}")] + ParseNotification { notify_uri: String, detail: String }, + + #[error( + "target notification session/serial mismatch for {notify_uri}: expected session={expected_session} serial={expected_serial}, got session={actual_session} serial={actual_serial}" + )] + NotificationTargetMismatch { + notify_uri: String, + expected_session: String, + expected_serial: u64, + actual_session: String, + actual_serial: u64, + }, + + #[error( + "delta serial list mismatch between target notification and transition for {notify_uri}" + )] + DeltaSerialMismatch { notify_uri: String }, + + #[error("duplicate delta replay HTTP URI mapping for {uri}: {first_path} vs {second_path}")] + DuplicateUriMapping { + uri: String, + first_path: String, + second_path: String, + }, + + #[error( + "delta replay notification URI is {kind} and should not be fetched as RRDP: {notify_uri}" + )] + NotificationKindNotFetchable { notify_uri: String, kind: String }, + + #[error("delta replay HTTP URI not found in archive: {0}")] + MissingUri(String), +} + +#[derive(Clone, Debug)] +pub struct PayloadDeltaReplayHttpFetcher { + index: Arc, + routes: BTreeMap, + repo_kinds: BTreeMap, +} + +impl PayloadDeltaReplayHttpFetcher { + pub fn new( + index: Arc, + ) -> Result { + let mut routes = BTreeMap::new(); + let mut repo_kinds = BTreeMap::new(); + for (notify_uri, repo) in &index.rrdp_repos { + repo_kinds.insert(notify_uri.clone(), repo.transition.kind); + if repo.transition.kind != ReplayDeltaRrdpKind::Delta { + continue; + } + let notification_path = repo + .target_notification_path + .as_ref() + .expect("delta repo target notification indexed"); + insert_unique_route(&mut routes, notify_uri, notification_path)?; + + let notification_xml = fs::read(notification_path).map_err(|e| { + PayloadDeltaReplayHttpFetcherError::ReadFile { + path: notification_path.display().to_string(), + detail: e.to_string(), + } + })?; + let notification = parse_notification(¬ification_xml).map_err(|e| { + PayloadDeltaReplayHttpFetcherError::ParseNotification { + notify_uri: notify_uri.clone(), + detail: e.to_string(), + } + })?; + let expected_session = repo.transition.target.session.as_deref().unwrap_or(""); + let expected_serial = repo.transition.target.serial.unwrap_or_default(); + let actual_session = notification.session_id.to_string(); + if actual_session != expected_session || notification.serial != expected_serial { + return Err( + PayloadDeltaReplayHttpFetcherError::NotificationTargetMismatch { + notify_uri: notify_uri.clone(), + expected_session: expected_session.to_string(), + expected_serial, + actual_session, + actual_serial: notification.serial, + }, + ); + } + let transition_serials = repo + .delta_paths + .iter() + .map(|(serial, _)| *serial) + .collect::>(); + let mut notification_delta_map = BTreeMap::new(); + for dref in notification.deltas { + notification_delta_map.insert(dref.serial, dref.uri); + } + for serial in &transition_serials { + if !notification_delta_map.contains_key(serial) { + return Err(PayloadDeltaReplayHttpFetcherError::DeltaSerialMismatch { + notify_uri: notify_uri.clone(), + }); + } + } + for (serial, path) in &repo.delta_paths { + let uri = notification_delta_map + .get(serial) + .expect("delta uri present for transition serial"); + insert_unique_route(&mut routes, uri, path)?; + } + } + Ok(Self { + index, + routes, + repo_kinds, + }) + } + + pub fn from_index( + index: Arc, + ) -> Result { + Self::new(index) + } + + pub fn archive_index(&self) -> &ReplayDeltaArchiveIndex { + self.index.as_ref() + } +} + +impl Fetcher for PayloadDeltaReplayHttpFetcher { + fn fetch(&self, uri: &str) -> Result, String> { + if let Some(path) = self.routes.get(uri) { + return fs::read(path).map_err(|e| { + PayloadDeltaReplayHttpFetcherError::ReadFile { + path: path.display().to_string(), + detail: e.to_string(), + } + .to_string() + }); + } + if let Some(kind) = self.repo_kinds.get(uri) { + return Err( + PayloadDeltaReplayHttpFetcherError::NotificationKindNotFetchable { + notify_uri: uri.to_string(), + kind: kind.as_str().to_string(), + } + .to_string(), + ); + } + Err(PayloadDeltaReplayHttpFetcherError::MissingUri(uri.to_string()).to_string()) + } +} + +fn insert_unique_route( + routes: &mut BTreeMap, + uri: &str, + path: &Path, +) -> Result<(), PayloadDeltaReplayHttpFetcherError> { + if let Some(existing) = routes.get(uri) { + if existing != path { + return Err(PayloadDeltaReplayHttpFetcherError::DuplicateUriMapping { + uri: uri.to_string(), + first_path: existing.display().to_string(), + second_path: path.display().to_string(), + }); + } + return Ok(()); + } + routes.insert(uri.to_string(), path.to_path_buf()); + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::replay::archive::sha256_hex; + use crate::replay::delta_archive::ReplayDeltaArchiveIndex; + + fn build_delta_http_fixture( + kind: ReplayDeltaRrdpKind, + ) -> (tempfile::TempDir, PathBuf, PathBuf, String, String, String) { + let temp = tempfile::tempdir().expect("tempdir"); + let archive_root = temp.path().join("payload-delta-archive"); + let capture = "delta-http"; + let capture_root = archive_root.join("v1").join("captures").join(capture); + std::fs::create_dir_all(&capture_root).expect("mkdir capture root"); + std::fs::write( + capture_root.join("capture.json"), + format!(r#"{{"version":1,"captureId":"{capture}","createdAt":"2026-03-16T00:00:00Z","notes":""}}"#), + ) + .expect("write capture meta"); + std::fs::write( + capture_root.join("base.json"), + r#"{"version":1,"baseCapture":"base-cap","baseLocksSha256":"deadbeef","createdAt":"2026-03-16T00:00:00Z"}"#, + ) + .expect("write base meta"); + + let notify_uri = "https://rrdp.example.test/notification.xml".to_string(); + let snapshot_uri = "https://rrdp.example.test/snapshot.xml".to_string(); + let delta1_uri = "https://rrdp.example.test/d1.xml".to_string(); + let delta2_uri = "https://rrdp.example.test/d2.xml".to_string(); + let session = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa".to_string(); + let target_serial = 12u64; + let repo_hash = sha256_hex(notify_uri.as_bytes()); + let session_dir = capture_root + .join("rrdp/repos") + .join(&repo_hash) + .join(&session); + let deltas_dir = session_dir.join("deltas"); + std::fs::create_dir_all(&deltas_dir).expect("mkdir deltas dir"); + std::fs::write( + session_dir.parent().unwrap().join("meta.json"), + format!(r#"{{"version":1,"rpkiNotify":"{notify_uri}","createdAt":"2026-03-16T00:00:00Z","lastSeenAt":"2026-03-16T00:00:01Z"}}"#), + ) + .expect("write repo meta"); + + let notification_xml = format!( + r#" + + + + +"# + ); + std::fs::write( + session_dir.join("notification-target-12.xml"), + notification_xml, + ) + .expect("write target notification"); + std::fs::write( + deltas_dir.join("delta-11-aaaa.xml"), + b"", + ) + .expect("write delta1"); + std::fs::write( + deltas_dir.join("delta-12-bbbb.xml"), + b"", + ) + .expect("write delta2"); + std::fs::write( + session_dir.parent().unwrap().join("transition.json"), + format!( + r#"{{"kind":"{}","base":{{"transport":"rrdp","session":"{session}","serial":10}},"target":{{"transport":"rrdp","session":"{session}","serial":12}},"delta_count":2,"deltas":[11,12]}}"#, + kind.as_str() + ), + ) + .expect("write transition"); + let locks_path = temp.path().join("locks-delta.json"); + std::fs::write( + &locks_path, + format!( + r#"{{"version":1,"capture":"{capture}","baseCapture":"base-cap","baseLocksSha256":"deadbeef","rrdp":{{"{notify_uri}":{{"kind":"{}","base":{{"transport":"rrdp","session":"{session}","serial":10}},"target":{{"transport":"rrdp","session":"{session}","serial":12}},"delta_count":2,"deltas":[11,12]}}}},"rsync":{{}}}}"#, + kind.as_str() + ), + ) + .expect("write locks"); + ( + temp, + archive_root, + locks_path, + notify_uri, + delta1_uri, + delta2_uri, + ) + } + + #[test] + fn delta_http_fetcher_rejects_session_reset_and_gap_notification_kind() { + for kind in [ReplayDeltaRrdpKind::SessionReset, ReplayDeltaRrdpKind::Gap] { + let (_temp, archive_root, locks_path, notify_uri, _delta1_uri, _delta2_uri) = + build_delta_http_fixture(kind); + let index = Arc::new( + ReplayDeltaArchiveIndex::load(&archive_root, &locks_path) + .expect("load delta index"), + ); + let fetcher = PayloadDeltaReplayHttpFetcher::from_index(index).expect("build fetcher"); + let err = fetcher.fetch(¬ify_uri).unwrap_err(); + assert!(err.contains(kind.as_str()), "{err}"); + } + } + #[test] + fn delta_http_fetcher_reads_target_notification_and_delta_files() { + let (_temp, archive_root, locks_path, notify_uri, delta1_uri, delta2_uri) = + build_delta_http_fixture(ReplayDeltaRrdpKind::Delta); + let index = Arc::new( + ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).expect("load delta index"), + ); + let fetcher = PayloadDeltaReplayHttpFetcher::from_index(index).expect("build fetcher"); + let notification = fetcher.fetch(¬ify_uri).expect("fetch notification"); + assert!( + std::str::from_utf8(¬ification) + .unwrap() + .contains("notification") + ); + assert_eq!( + fetcher.fetch(&delta1_uri).expect("fetch delta1"), + b"".to_vec() + ); + assert_eq!( + fetcher.fetch(&delta2_uri).expect("fetch delta2"), + b"".to_vec() + ); + } + + #[test] + fn delta_http_fetcher_rejects_non_delta_notification_kinds_and_missing_uri() { + let (_temp, archive_root, locks_path, notify_uri, _delta1_uri, _delta2_uri) = + build_delta_http_fixture(ReplayDeltaRrdpKind::Unchanged); + let index = Arc::new( + ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).expect("load delta index"), + ); + let fetcher = PayloadDeltaReplayHttpFetcher::from_index(index).expect("build fetcher"); + let err = fetcher.fetch(¬ify_uri).unwrap_err(); + assert!(err.contains("unchanged"), "{err}"); + let err = fetcher + .fetch("https://missing.example/test.xml") + .unwrap_err(); + assert!(err.contains("not found in archive"), "{err}"); + } + + #[test] + fn delta_http_fetcher_rejects_target_notification_mismatch() { + let (_temp, archive_root, locks_path, notify_uri, _delta1_uri, _delta2_uri) = + build_delta_http_fixture(ReplayDeltaRrdpKind::Delta); + let repo_hash = sha256_hex(notify_uri.as_bytes()); + let notification = archive_root + .join("v1/captures/delta-http/rrdp/repos") + .join(repo_hash) + .join("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa") + .join("notification-target-12.xml"); + std::fs::write( + ¬ification, + r#" + + + + +"#, + ) + .expect("rewrite notification"); + let index = Arc::new( + ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).expect("load delta index"), + ); + let err = PayloadDeltaReplayHttpFetcher::from_index(index).unwrap_err(); + assert!( + matches!( + err, + PayloadDeltaReplayHttpFetcherError::NotificationTargetMismatch { .. } + ), + "{err}" + ); + } +} diff --git a/src/replay/delta_fetch_rsync.rs b/src/replay/delta_fetch_rsync.rs new file mode 100644 index 0000000..701be33 --- /dev/null +++ b/src/replay/delta_fetch_rsync.rs @@ -0,0 +1,327 @@ +use std::collections::BTreeMap; +use std::fs; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use crate::fetch::rsync::{RsyncFetchError, RsyncFetchResult, RsyncFetcher}; +use crate::replay::archive::{ReplayArchiveIndex, canonical_rsync_module}; +use crate::replay::delta_archive::ReplayDeltaArchiveIndex; + +#[derive(Clone, Debug)] +pub struct PayloadDeltaReplayRsyncFetcher { + base_index: Arc, + delta_index: Arc, +} + +impl PayloadDeltaReplayRsyncFetcher { + pub fn new( + base_index: Arc, + delta_index: Arc, + ) -> Self { + Self { + base_index, + delta_index, + } + } + + pub fn base_index(&self) -> &ReplayArchiveIndex { + self.base_index.as_ref() + } + + pub fn delta_index(&self) -> &ReplayDeltaArchiveIndex { + self.delta_index.as_ref() + } +} + +impl RsyncFetcher for PayloadDeltaReplayRsyncFetcher { + fn fetch_objects(&self, rsync_base_uri: &str) -> RsyncFetchResult)>> { + let module_uri = canonical_rsync_module(rsync_base_uri) + .map_err(|e| RsyncFetchError::Fetch(e.to_string()))?; + let normalized_base = if rsync_base_uri.ends_with('/') { + rsync_base_uri.to_string() + } else { + format!("{rsync_base_uri}/") + }; + + let mut merged: BTreeMap> = BTreeMap::new(); + let mut saw_base = false; + if let Ok(base_module) = self + .base_index + .resolve_rsync_module_for_base_uri(rsync_base_uri) + { + let base_tree_root = module_tree_root(&module_uri, &base_module.tree_dir) + .map_err(RsyncFetchError::Fetch)?; + if base_tree_root.is_dir() { + let mut base_objects = Vec::new(); + walk_dir_collect( + &base_tree_root, + &base_tree_root, + &module_uri, + &mut base_objects, + ) + .map_err(RsyncFetchError::Fetch)?; + for (uri, bytes) in base_objects { + merged.insert(uri, bytes); + } + saw_base = true; + } + } + + let mut saw_overlay = false; + if let Some(delta_module) = self.delta_index.rsync_module(&module_uri) { + for (uri, path) in &delta_module.overlay_files { + let bytes = fs::read(path).map_err(|e| { + RsyncFetchError::Fetch(format!( + "read delta rsync overlay failed: {}: {e}", + path.display() + )) + })?; + merged.insert(uri.clone(), bytes); + saw_overlay = true; + } + } + + if !saw_base && !saw_overlay { + return Err(RsyncFetchError::Fetch(format!( + "delta replay base rsync module not found and no delta overlay exists for module: {module_uri}" + ))); + } + + let filtered: Vec<(String, Vec)> = merged + .into_iter() + .filter(|(uri, _)| uri.starts_with(&normalized_base)) + .collect(); + if filtered.is_empty() { + return Err(RsyncFetchError::Fetch(format!( + "delta replay rsync subtree not found: {normalized_base}" + ))); + } + Ok(filtered) + } +} + +fn module_tree_root(module_uri: &str, tree_dir: &Path) -> Result { + let rest = module_uri + .strip_prefix("rsync://") + .ok_or_else(|| format!("invalid rsync module URI: {module_uri}"))?; + let mut parts = rest.trim_end_matches('/').split('/'); + let authority = parts + .next() + .ok_or_else(|| format!("invalid rsync module URI: {module_uri}"))?; + let module = parts + .next() + .ok_or_else(|| format!("invalid rsync module URI: {module_uri}"))?; + Ok(tree_dir.join(authority).join(module)) +} + +fn walk_dir_collect( + root: &Path, + current: &Path, + rsync_base_uri: &str, + out: &mut Vec<(String, Vec)>, +) -> Result<(), String> { + let rd = fs::read_dir(current).map_err(|e| e.to_string())?; + for entry in rd { + let entry = entry.map_err(|e| e.to_string())?; + let path = entry.path(); + let meta = entry.metadata().map_err(|e| e.to_string())?; + if meta.is_dir() { + walk_dir_collect(root, &path, rsync_base_uri, out)?; + continue; + } + if !meta.is_file() { + continue; + } + let rel = path + .strip_prefix(root) + .map_err(|e| e.to_string())? + .to_string_lossy() + .replace('\\', "/"); + let base = if rsync_base_uri.ends_with('/') { + rsync_base_uri.to_string() + } else { + format!("{rsync_base_uri}/") + }; + let uri = format!("{base}{rel}"); + let bytes = fs::read(&path).map_err(|e| e.to_string())?; + out.push((uri, bytes)); + } + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::replay::archive::ReplayArchiveIndex; + use crate::replay::delta_archive::ReplayDeltaArchiveIndex; + + fn build_base_and_delta_rsync_fixture() + -> (tempfile::TempDir, PathBuf, PathBuf, PathBuf, PathBuf) { + let temp = tempfile::tempdir().expect("tempdir"); + let base_archive = temp.path().join("payload-archive"); + let base_capture_root = base_archive.join("v1/captures/base-cap"); + std::fs::create_dir_all(&base_capture_root).expect("mkdir base capture"); + std::fs::write( + base_capture_root.join("capture.json"), + r#"{"version":1,"captureId":"base-cap","createdAt":"2026-03-16T00:00:00Z","notes":""}"#, + ) + .expect("write base capture meta"); + let module_uri = "rsync://rsync.example.test/repo/"; + let module_hash = crate::replay::archive::sha256_hex(module_uri.as_bytes()); + let base_bucket = base_capture_root.join("rsync/modules").join(&module_hash); + let base_tree = base_bucket.join("tree/rsync.example.test/repo"); + std::fs::create_dir_all(base_tree.join("sub")).expect("mkdir base tree"); + std::fs::write(base_bucket.join("meta.json"), format!(r#"{{"version":1,"module":"{module_uri}","createdAt":"2026-03-16T00:00:00Z","lastSeenAt":"2026-03-16T00:00:01Z"}}"#)).expect("write base meta"); + std::fs::write(base_tree.join("a.roa"), b"base-a").expect("write base a"); + std::fs::write(base_tree.join("sub").join("b.cer"), b"base-b").expect("write base b"); + let base_locks = temp.path().join("base-locks.json"); + std::fs::write(&base_locks, format!(r#"{{"version":1,"capture":"base-cap","rrdp":{{}},"rsync":{{"{module_uri}":{{"transport":"rsync"}}}}}}"#)).expect("write base locks"); + + let delta_archive = temp.path().join("payload-delta-archive"); + let delta_capture_root = delta_archive.join("v1/captures/delta-cap"); + std::fs::create_dir_all(&delta_capture_root).expect("mkdir delta capture"); + std::fs::write(delta_capture_root.join("capture.json"), r#"{"version":1,"captureId":"delta-cap","createdAt":"2026-03-16T00:00:00Z","notes":""}"#).expect("write delta capture meta"); + std::fs::write(delta_capture_root.join("base.json"), r#"{"version":1,"baseCapture":"base-cap","baseLocksSha256":"deadbeef","createdAt":"2026-03-16T00:00:00Z"}"#).expect("write delta base meta"); + let delta_bucket = delta_capture_root.join("rsync/modules").join(&module_hash); + let delta_tree = delta_bucket.join("tree/rsync.example.test/repo"); + std::fs::create_dir_all(delta_tree.join("sub")).expect("mkdir delta tree"); + std::fs::write(delta_bucket.join("meta.json"), format!(r#"{{"version":1,"module":"{module_uri}","createdAt":"2026-03-16T00:00:00Z","lastSeenAt":"2026-03-16T00:00:01Z"}}"#)).expect("write delta meta"); + std::fs::write(delta_bucket.join("files.json"), format!(r#"{{"version":1,"module":"{module_uri}","fileCount":1,"files":["{module_uri}sub/b.cer"]}}"#)).expect("write files json"); + std::fs::write(delta_tree.join("sub").join("b.cer"), b"delta-b") + .expect("write delta overlay"); + let delta_locks = temp.path().join("locks-delta.json"); + std::fs::write(&delta_locks, format!(r#"{{"version":1,"capture":"delta-cap","baseCapture":"base-cap","baseLocksSha256":"deadbeef","rrdp":{{}},"rsync":{{"{module_uri}":{{"file_count":1,"overlay_only":true}}}}}}"#)).expect("write delta locks"); + (temp, base_archive, base_locks, delta_archive, delta_locks) + } + + #[test] + fn delta_rsync_fetcher_uses_base_only_when_delta_has_no_module_entry() { + let (_temp, base_archive, base_locks, delta_archive, delta_locks) = + build_base_and_delta_rsync_fixture(); + std::fs::write( + &delta_locks, + r#"{"version":1,"capture":"delta-cap","baseCapture":"base-cap","baseLocksSha256":"deadbeef","rrdp":{},"rsync":{}}"#, + ) + .expect("rewrite delta locks no rsync"); + let base = Arc::new( + ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"), + ); + let delta = Arc::new( + ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"), + ); + let fetcher = PayloadDeltaReplayRsyncFetcher::new(base, delta); + let mut objects = fetcher + .fetch_objects("rsync://rsync.example.test/repo/") + .expect("fetch base only objects"); + objects.sort_by(|a, b| a.0.cmp(&b.0)); + assert_eq!(objects.len(), 2); + assert_eq!(objects[0].1, b"base-a"); + assert_eq!(objects[1].1, b"base-b"); + } + + #[test] + fn delta_rsync_fetcher_reports_missing_base_subtree_and_exposes_indexes() { + let (_temp, base_archive, base_locks, delta_archive, delta_locks) = + build_base_and_delta_rsync_fixture(); + let base = Arc::new( + ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"), + ); + let delta = Arc::new( + ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"), + ); + let fetcher = PayloadDeltaReplayRsyncFetcher::new(base.clone(), delta.clone()); + assert_eq!(fetcher.base_index().rsync_modules.len(), 1); + assert_eq!(fetcher.delta_index().rsync_modules.len(), 1); + let err = fetcher + .fetch_objects("rsync://rsync.example.test/repo/missing/") + .unwrap_err(); + match err { + RsyncFetchError::Fetch(msg) => { + assert!( + msg.contains("delta replay rsync subtree not found"), + "{msg}" + ) + } + } + } + + #[test] + fn delta_rsync_fetcher_can_use_overlay_without_base_module() { + let (_temp, base_archive, base_locks, delta_archive, delta_locks) = + build_base_and_delta_rsync_fixture(); + let module_hash = + crate::replay::archive::sha256_hex("rsync://rsync.example.test/repo/".as_bytes()); + std::fs::remove_dir_all( + base_archive + .join("v1/captures/base-cap/rsync/modules") + .join(&module_hash), + ) + .expect("remove base module dir"); + let base = Arc::new( + ReplayArchiveIndex::load_allow_missing_rsync_modules(&base_archive, &base_locks) + .expect("load lenient base index"), + ); + let delta = Arc::new( + ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"), + ); + let fetcher = PayloadDeltaReplayRsyncFetcher::new(base, delta); + let objects = fetcher + .fetch_objects("rsync://rsync.example.test/repo/sub/") + .expect("fetch subtree from overlay only"); + assert_eq!(objects.len(), 1); + assert_eq!(objects[0].0, "rsync://rsync.example.test/repo/sub/b.cer"); + assert_eq!(objects[0].1, b"delta-b"); + } + + #[test] + fn delta_rsync_fetcher_merges_base_and_overlay() { + let (_temp, base_archive, base_locks, delta_archive, delta_locks) = + build_base_and_delta_rsync_fixture(); + let base = Arc::new( + ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"), + ); + let delta = Arc::new( + ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"), + ); + let fetcher = PayloadDeltaReplayRsyncFetcher::new(base, delta); + let mut objects = fetcher + .fetch_objects("rsync://rsync.example.test/repo/") + .expect("fetch merged objects"); + objects.sort_by(|a, b| a.0.cmp(&b.0)); + assert_eq!(objects.len(), 2); + assert_eq!(objects[0].0, "rsync://rsync.example.test/repo/a.roa"); + assert_eq!(objects[0].1, b"base-a"); + assert_eq!(objects[1].0, "rsync://rsync.example.test/repo/sub/b.cer"); + assert_eq!(objects[1].1, b"delta-b"); + } + + #[test] + fn delta_rsync_fetcher_reads_subtree_and_rejects_missing_base_module() { + let (_temp, base_archive, base_locks, delta_archive, delta_locks) = + build_base_and_delta_rsync_fixture(); + let base = Arc::new( + ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"), + ); + let delta = Arc::new( + ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"), + ); + let fetcher = PayloadDeltaReplayRsyncFetcher::new(base, delta); + let objects = fetcher + .fetch_objects("rsync://rsync.example.test/repo/sub/") + .expect("fetch subtree"); + assert_eq!(objects.len(), 1); + assert_eq!(objects[0].0, "rsync://rsync.example.test/repo/sub/b.cer"); + assert_eq!(objects[0].1, b"delta-b"); + + let err = fetcher + .fetch_objects("rsync://missing.example/repo/") + .unwrap_err(); + match err { + RsyncFetchError::Fetch(msg) => assert!( + msg.contains("delta replay base rsync module not found") + || msg.contains("no replay lock found for rsync module"), + "{msg}" + ), + } + } +} diff --git a/src/replay/mod.rs b/src/replay/mod.rs index 26d5add..56f5ac9 100644 --- a/src/replay/mod.rs +++ b/src/replay/mod.rs @@ -1,3 +1,6 @@ pub mod archive; +pub mod delta_archive; +pub mod delta_fetch_http; +pub mod delta_fetch_rsync; pub mod fetch_http; pub mod fetch_rsync; diff --git a/src/sync/repo.rs b/src/sync/repo.rs index 62be036..9e8b056 100644 --- a/src/sync/repo.rs +++ b/src/sync/repo.rs @@ -4,10 +4,11 @@ use crate::audit_downloads::DownloadLogHandle; use crate::fetch::rsync::{RsyncFetchError, RsyncFetcher}; use crate::policy::{Policy, SyncPreference}; use crate::replay::archive::{ReplayArchiveIndex, ReplayTransport}; +use crate::replay::delta_archive::{ReplayDeltaArchiveIndex, ReplayDeltaRrdpKind}; use crate::report::{RfcRef, Warning}; use crate::storage::RocksStore; use crate::sync::rrdp::sync_from_notification_with_timing_and_download_log; -use crate::sync::rrdp::{Fetcher as HttpFetcher, RrdpSyncError}; +use crate::sync::rrdp::{Fetcher as HttpFetcher, RrdpState, RrdpSyncError}; use crate::sync::store_projection::{ put_repository_view_present, put_repository_view_withdrawn, upsert_raw_by_hash_evidence, }; @@ -187,12 +188,74 @@ pub fn sync_publication_point_replay( } } +pub fn sync_publication_point_replay_delta( + store: &RocksStore, + delta_index: &ReplayDeltaArchiveIndex, + rrdp_notification_uri: Option<&str>, + rsync_base_uri: &str, + http_fetcher: &dyn HttpFetcher, + rsync_fetcher: &dyn RsyncFetcher, + timing: Option<&TimingHandle>, + download_log: Option<&DownloadLogHandle>, +) -> Result { + match resolve_replay_delta_transport(store, delta_index, rrdp_notification_uri, rsync_base_uri)? + { + ReplayDeltaResolvedTransport::Rrdp(notification_uri) => { + let written = try_rrdp_sync_with_retry( + store, + notification_uri, + http_fetcher, + timing, + download_log, + )?; + if let Some(t) = timing.as_ref() { + t.record_count("repo_sync_rrdp_ok_total", 1); + t.record_count("repo_sync_rrdp_objects_written_total", written as u64); + } + Ok(RepoSyncResult { + source: RepoSyncSource::Rrdp, + objects_written: written, + warnings: Vec::new(), + }) + } + ReplayDeltaResolvedTransport::Rsync => { + let written = rsync_sync_into_raw_objects( + store, + rsync_base_uri, + rsync_fetcher, + timing, + download_log, + )?; + if let Some(t) = timing.as_ref() { + t.record_count("repo_sync_rsync_direct_total", 1); + t.record_count("repo_sync_rsync_objects_written_total", written as u64); + } + Ok(RepoSyncResult { + source: RepoSyncSource::Rsync, + objects_written: written, + warnings: Vec::new(), + }) + } + ReplayDeltaResolvedTransport::Noop(source) => Ok(RepoSyncResult { + source, + objects_written: 0, + warnings: Vec::new(), + }), + } +} + #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum ReplayResolvedTransport<'a> { Rrdp(&'a str), Rsync, } +enum ReplayDeltaResolvedTransport<'a> { + Rrdp(&'a str), + Rsync, + Noop(RepoSyncSource), +} + fn resolve_replay_transport<'a>( replay_index: &'a ReplayArchiveIndex, rrdp_notification_uri: Option<&'a str>, @@ -216,6 +279,80 @@ fn resolve_replay_transport<'a>( Ok(ReplayResolvedTransport::Rsync) } +fn resolve_replay_delta_transport<'a>( + store: &RocksStore, + delta_index: &'a ReplayDeltaArchiveIndex, + rrdp_notification_uri: Option<&'a str>, + rsync_base_uri: &str, +) -> Result, RepoSyncError> { + if let Some(notification_uri) = rrdp_notification_uri { + let repo = delta_index.rrdp_repo(notification_uri).ok_or_else(|| { + RepoSyncError::Replay(format!( + "delta replay RRDP entry missing for notification URI: {notification_uri}" + )) + })?; + validate_delta_replay_base_state_for_repo(store, notification_uri, &repo.transition.base)?; + return match repo.transition.kind { + ReplayDeltaRrdpKind::Delta => Ok(ReplayDeltaResolvedTransport::Rrdp(notification_uri)), + ReplayDeltaRrdpKind::Unchanged => Ok(ReplayDeltaResolvedTransport::Noop( + match repo.transition.target.transport { + ReplayTransport::Rrdp => RepoSyncSource::Rrdp, + ReplayTransport::Rsync => RepoSyncSource::Rsync, + }, + )), + ReplayDeltaRrdpKind::FallbackRsync => Ok(ReplayDeltaResolvedTransport::Rsync), + ReplayDeltaRrdpKind::SessionReset => Err(RepoSyncError::Replay(format!( + "delta replay kind session-reset requires fresh full replay for {notification_uri}" + ))), + ReplayDeltaRrdpKind::Gap => Err(RepoSyncError::Replay(format!( + "delta replay kind gap requires fresh full replay for {notification_uri}" + ))), + }; + } + + delta_index + .resolve_rsync_module_for_base_uri(rsync_base_uri) + .map_err(|e| RepoSyncError::Replay(e.to_string()))?; + Ok(ReplayDeltaResolvedTransport::Rsync) +} + +fn validate_delta_replay_base_state_for_repo( + store: &RocksStore, + notification_uri: &str, + base: &crate::replay::delta_archive::ReplayDeltaRrdpState, +) -> Result<(), RepoSyncError> { + match base.transport { + ReplayTransport::Rrdp => { + let bytes = store + .get_rrdp_state(notification_uri) + .map_err(|e| RepoSyncError::Storage(e.to_string()))? + .ok_or_else(|| { + RepoSyncError::Replay(format!( + "delta replay base state missing for {notification_uri}: expected RRDP session={} serial={}" + , + base.session.as_deref().unwrap_or(""), + base.serial.map(|v| v.to_string()).unwrap_or_else(|| "".to_string()) + )) + })?; + let state = RrdpState::decode(&bytes).map_err(|e| { + RepoSyncError::Replay(format!( + "decode base RRDP state failed for {notification_uri}: {e}" + )) + })?; + let expected_session = base.session.as_deref().unwrap_or(""); + let expected_serial = base.serial.unwrap_or_default(); + if state.session_id != expected_session || state.serial != expected_serial { + return Err(RepoSyncError::Replay(format!( + "delta replay base state mismatch for {notification_uri}: expected session={} serial={}, actual session={} serial={}", + expected_session, expected_serial, state.session_id, state.serial + ))); + } + } + ReplayTransport::Rsync => {} + } + Ok(()) +} + fn try_rrdp_sync( store: &RocksStore, notification_uri: &str, @@ -438,6 +575,9 @@ mod tests { use crate::analysis::timing::{TimingHandle, TimingMeta}; use crate::fetch::rsync::LocalDirRsyncFetcher; use crate::replay::archive::{ReplayArchiveIndex, sha256_hex}; + use crate::replay::delta_archive::ReplayDeltaArchiveIndex; + use crate::replay::delta_fetch_http::PayloadDeltaReplayHttpFetcher; + use crate::replay::delta_fetch_rsync::PayloadDeltaReplayRsyncFetcher; use crate::replay::fetch_http::PayloadReplayHttpFetcher; use crate::replay::fetch_rsync::PayloadReplayRsyncFetcher; use crate::sync::rrdp::Fetcher as HttpFetcher; @@ -445,6 +585,7 @@ mod tests { use base64::Engine; use sha2::Digest; use std::collections::HashMap; + use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; struct DummyHttpFetcher; @@ -603,6 +744,190 @@ mod tests { ) } + fn build_delta_replay_fixture() -> ( + tempfile::TempDir, + std::path::PathBuf, + std::path::PathBuf, + std::path::PathBuf, + std::path::PathBuf, + String, + String, + String, + ) { + let temp = tempfile::tempdir().expect("tempdir"); + + let base_archive = temp.path().join("payload-archive"); + let base_capture_root = base_archive.join("v1/captures/base-cap"); + std::fs::create_dir_all(&base_capture_root).expect("mkdir base capture"); + std::fs::write( + base_capture_root.join("capture.json"), + r#"{"version":1,"captureId":"base-cap","createdAt":"2026-03-16T00:00:00Z","notes":""}"#, + ) + .expect("write base capture meta"); + + let notify_uri = "https://rrdp.example.test/notification.xml".to_string(); + let snapshot_uri = "https://rrdp.example.test/snapshot.xml".to_string(); + let session = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa".to_string(); + let base_serial = 10u64; + let delta1_uri = "https://rrdp.example.test/d1.xml".to_string(); + let delta2_uri = "https://rrdp.example.test/d2.xml".to_string(); + let repo_hash = sha256_hex(notify_uri.as_bytes()); + let base_session_dir = base_capture_root + .join("rrdp/repos") + .join(&repo_hash) + .join(&session); + std::fs::create_dir_all(&base_session_dir).expect("mkdir base session dir"); + std::fs::write( + base_session_dir.parent().unwrap().join("meta.json"), + format!(r#"{{"version":1,"rpkiNotify":"{notify_uri}","createdAt":"2026-03-16T00:00:00Z","lastSeenAt":"2026-03-16T00:00:01Z"}}"#), + ) + .expect("write base rrdp meta"); + let base_snapshot = snapshot_xml( + &session, + base_serial, + &[("rsync://example.test/repo/a.mft", b"base")], + ); + let base_snapshot_hash = hex::encode(sha2::Sha256::digest(&base_snapshot)); + let base_notification = + notification_xml(&session, base_serial, &snapshot_uri, &base_snapshot_hash); + std::fs::write( + base_session_dir.join("notification-10.xml"), + base_notification, + ) + .expect("write base notif"); + std::fs::write( + base_session_dir.join(format!("snapshot-10-{base_snapshot_hash}.xml")), + base_snapshot, + ) + .expect("write base snapshot"); + + let module_uri = "rsync://rsync.example.test/repo/".to_string(); + let module_hash = sha256_hex(module_uri.as_bytes()); + let base_module_bucket = base_capture_root.join("rsync/modules").join(&module_hash); + let base_module_tree = base_module_bucket.join("tree/rsync.example.test/repo"); + std::fs::create_dir_all(base_module_tree.join("sub")).expect("mkdir base rsync tree"); + std::fs::write( + base_module_bucket.join("meta.json"), + format!(r#"{{"version":1,"module":"{module_uri}","createdAt":"2026-03-16T00:00:00Z","lastSeenAt":"2026-03-16T00:00:01Z"}}"#), + ) + .expect("write base module meta"); + std::fs::write(base_module_tree.join("a.mft"), b"base").expect("write base a.mft"); + std::fs::write(base_module_tree.join("sub").join("x.cer"), b"base-cer") + .expect("write base x.cer"); + + let base_locks = temp.path().join("base-locks.json"); + let fallback_notify = "https://rrdp-fallback.example.test/notification.xml".to_string(); + let base_locks_body = format!( + r#"{{"version":1,"capture":"base-cap","rrdp":{{"{notify_uri}":{{"transport":"rrdp","session":"{session}","serial":10}},"{fallback_notify}":{{"transport":"rsync","session":null,"serial":null}}}},"rsync":{{"{module_uri}":{{"transport":"rsync"}}}}}}"# + ); + std::fs::write(&base_locks, &base_locks_body).expect("write base locks"); + let base_locks_sha = sha256_hex(base_locks_body.as_bytes()); + + let delta_archive = temp.path().join("payload-delta-archive"); + let delta_capture_root = delta_archive.join("v1/captures/delta-cap"); + std::fs::create_dir_all(&delta_capture_root).expect("mkdir delta capture"); + std::fs::write( + delta_capture_root.join("capture.json"), + r#"{"version":1,"captureId":"delta-cap","createdAt":"2026-03-16T00:00:00Z","notes":""}"#, + ) + .expect("write delta capture meta"); + std::fs::write( + delta_capture_root.join("base.json"), + format!(r#"{{"version":1,"baseCapture":"base-cap","baseLocksSha256":"{base_locks_sha}","createdAt":"2026-03-16T00:00:00Z"}}"#), + ) + .expect("write delta base meta"); + + let delta_session_dir = delta_capture_root + .join("rrdp/repos") + .join(&repo_hash) + .join(&session); + let delta_deltas_dir = delta_session_dir.join("deltas"); + std::fs::create_dir_all(&delta_deltas_dir).expect("mkdir delta deltas"); + std::fs::write( + delta_session_dir.parent().unwrap().join("meta.json"), + format!(r#"{{"version":1,"rpkiNotify":"{notify_uri}","createdAt":"2026-03-16T00:00:00Z","lastSeenAt":"2026-03-16T00:00:01Z"}}"#), + ) + .expect("write delta meta"); + std::fs::write( + delta_session_dir.parent().unwrap().join("transition.json"), + format!(r#"{{"kind":"delta","base":{{"transport":"rrdp","session":"{session}","serial":10}},"target":{{"transport":"rrdp","session":"{session}","serial":12}},"delta_count":2,"deltas":[11,12]}}"#), + ) + .expect("write delta transition"); + let delta1 = format!( + r#"{}"#, + base64::engine::general_purpose::STANDARD.encode(b"delta-a") + ); + let delta2 = format!( + r#"{}"#, + base64::engine::general_purpose::STANDARD.encode(b"delta-b") + ); + let delta1_hash = hex::encode(sha2::Sha256::digest(delta1.as_bytes())); + let delta2_hash = hex::encode(sha2::Sha256::digest(delta2.as_bytes())); + let target_notification = format!( + r#" + + + + +"# + ); + std::fs::write( + delta_session_dir.join("notification-target-12.xml"), + target_notification, + ) + .expect("write target notification"); + std::fs::write(delta_deltas_dir.join("delta-11-aaaa.xml"), delta1).expect("write delta11"); + std::fs::write(delta_deltas_dir.join("delta-12-bbbb.xml"), delta2).expect("write delta12"); + + let delta_module_bucket = delta_capture_root.join("rsync/modules").join(&module_hash); + let delta_module_tree = delta_module_bucket.join("tree/rsync.example.test/repo"); + std::fs::create_dir_all(delta_module_tree.join("sub")).expect("mkdir delta rsync tree"); + std::fs::write( + delta_module_bucket.join("meta.json"), + format!(r#"{{"version":1,"module":"{module_uri}","createdAt":"2026-03-16T00:00:00Z","lastSeenAt":"2026-03-16T00:00:01Z"}}"#), + ) + .expect("write delta rsync meta"); + std::fs::write( + delta_module_bucket.join("files.json"), + format!(r#"{{"version":1,"module":"{module_uri}","fileCount":1,"files":["{module_uri}sub/x.cer"]}}"#), + ) + .expect("write delta files"); + std::fs::write(delta_module_tree.join("sub").join("x.cer"), b"overlay-cer") + .expect("write overlay file"); + + let fallback_hash = sha256_hex(fallback_notify.as_bytes()); + let fallback_repo_dir = delta_capture_root.join("rrdp/repos").join(&fallback_hash); + std::fs::create_dir_all(&fallback_repo_dir).expect("mkdir fallback repo dir"); + std::fs::write( + fallback_repo_dir.join("meta.json"), + format!(r#"{{"version":1,"rpkiNotify":"{fallback_notify}","createdAt":"2026-03-16T00:00:00Z","lastSeenAt":"2026-03-16T00:00:01Z"}}"#), + ) + .expect("write fallback meta"); + std::fs::write( + fallback_repo_dir.join("transition.json"), + r#"{"kind":"fallback-rsync","base":{"transport":"rsync","session":null,"serial":null},"target":{"transport":"rsync","session":null,"serial":null},"delta_count":0,"deltas":[]}"#, + ) + .expect("write fallback transition"); + + let delta_locks = temp.path().join("locks-delta.json"); + std::fs::write( + &delta_locks, + format!(r#"{{"version":1,"capture":"delta-cap","baseCapture":"base-cap","baseLocksSha256":"{base_locks_sha}","rrdp":{{"{notify_uri}":{{"kind":"delta","base":{{"transport":"rrdp","session":"{session}","serial":10}},"target":{{"transport":"rrdp","session":"{session}","serial":12}},"delta_count":2,"deltas":[11,12]}},"{fallback_notify}":{{"kind":"fallback-rsync","base":{{"transport":"rsync","session":null,"serial":null}},"target":{{"transport":"rsync","session":null,"serial":null}},"delta_count":0,"deltas":[]}}}},"rsync":{{"{module_uri}":{{"file_count":1,"overlay_only":true}}}}}}"#), + ) + .expect("write delta locks"); + + ( + temp, + base_archive, + base_locks, + delta_archive, + delta_locks, + notify_uri, + fallback_notify, + module_uri, + ) + } + fn timing_to_json(temp_dir: &std::path::Path, timing: &TimingHandle) -> serde_json::Value { let timing_path = temp_dir.join("timing_retry.json"); timing.write_json(&timing_path, 50).expect("write json"); @@ -1262,4 +1587,287 @@ mod tests { .unwrap_err(); assert!(matches!(err, RepoSyncError::Replay(_)), "{err}"); } + + #[test] + fn delta_replay_sync_applies_rrdp_deltas_when_base_state_matches() { + let temp = tempfile::tempdir().expect("tempdir"); + let store_dir = temp.path().join("db"); + let store = RocksStore::open(&store_dir).expect("open rocksdb"); + let ( + _fixture, + base_archive, + base_locks, + delta_archive, + delta_locks, + notify_uri, + _fallback_notify, + module_uri, + ) = build_delta_replay_fixture(); + let base_index = Arc::new( + ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"), + ); + let delta_index = Arc::new( + ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"), + ); + let http = PayloadDeltaReplayHttpFetcher::from_index(delta_index.clone()) + .expect("build delta http fetcher"); + let rsync = PayloadDeltaReplayRsyncFetcher::new(base_index, delta_index.clone()); + + let state = RrdpState { + session_id: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa".to_string(), + serial: 10, + }; + store + .put_rrdp_state(¬ify_uri, &state.encode().unwrap()) + .expect("seed base state"); + + let out = sync_publication_point_replay_delta( + &store, + &delta_index, + Some(¬ify_uri), + &module_uri, + &http, + &rsync, + None, + None, + ) + .expect("delta sync ok"); + + assert_eq!(out.source, RepoSyncSource::Rrdp); + assert_eq!(out.objects_written, 2); + assert_eq!( + store.get_raw("rsync://example.test/repo/a.mft").unwrap(), + Some(b"delta-a".to_vec()) + ); + assert_eq!( + store + .get_raw("rsync://example.test/repo/sub/b.roa") + .unwrap(), + Some(b"delta-b".to_vec()) + ); + let state_bytes = store + .get_rrdp_state(¬ify_uri) + .unwrap() + .expect("rrdp state present"); + let new_state = RrdpState::decode(&state_bytes).expect("decode state"); + assert_eq!(new_state.serial, 12); + } + + #[test] + fn delta_replay_sync_rejects_base_state_mismatch() { + let temp = tempfile::tempdir().expect("tempdir"); + let store_dir = temp.path().join("db"); + let store = RocksStore::open(&store_dir).expect("open rocksdb"); + let ( + _fixture, + base_archive, + base_locks, + delta_archive, + delta_locks, + notify_uri, + _fallback_notify, + module_uri, + ) = build_delta_replay_fixture(); + let base_index = Arc::new( + ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"), + ); + let delta_index = Arc::new( + ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"), + ); + let http = PayloadDeltaReplayHttpFetcher::from_index(delta_index.clone()) + .expect("build delta http fetcher"); + let rsync = PayloadDeltaReplayRsyncFetcher::new(base_index, delta_index.clone()); + + let err = sync_publication_point_replay_delta( + &store, + &delta_index, + Some(¬ify_uri), + &module_uri, + &http, + &rsync, + None, + None, + ) + .unwrap_err(); + assert!(matches!(err, RepoSyncError::Replay(_)), "{err}"); + } + + #[test] + fn delta_replay_sync_noops_unchanged_rrdp_repo() { + let temp = tempfile::tempdir().expect("tempdir"); + let store_dir = temp.path().join("db"); + let store = RocksStore::open(&store_dir).expect("open rocksdb"); + let ( + _fixture, + base_archive, + base_locks, + delta_archive, + delta_locks, + notify_uri, + _fallback_notify, + module_uri, + ) = build_delta_replay_fixture(); + let state = RrdpState { + session_id: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa".to_string(), + serial: 10, + }; + store + .put_rrdp_state(¬ify_uri, &state.encode().unwrap()) + .expect("seed base state"); + + let base_locks_body = std::fs::read_to_string(&base_locks).expect("read base locks"); + let base_locks_sha = sha256_hex(base_locks_body.as_bytes()); + std::fs::write( + &delta_locks, + format!(r#"{{"version":1,"capture":"delta-cap","baseCapture":"base-cap","baseLocksSha256":"{base_locks_sha}","rrdp":{{"{notify_uri}":{{"kind":"unchanged","base":{{"transport":"rrdp","session":"aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa","serial":10}},"target":{{"transport":"rrdp","session":"aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa","serial":10}},"delta_count":0,"deltas":[]}},"https://rrdp-fallback.example.test/notification.xml":{{"kind":"fallback-rsync","base":{{"transport":"rsync","session":null,"serial":null}},"target":{{"transport":"rsync","session":null,"serial":null}},"delta_count":0,"deltas":[]}}}},"rsync":{{"rsync://rsync.example.test/repo/":{{"file_count":1,"overlay_only":true}}}}}}"#), + ) + .expect("rewrite delta locks"); + let repo_hash = sha256_hex(notify_uri.as_bytes()); + let repo_dir = delta_archive + .join("v1/captures/delta-cap/rrdp/repos") + .join(&repo_hash); + std::fs::write( + repo_dir.join("transition.json"), + r#"{"kind":"unchanged","base":{"transport":"rrdp","session":"aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa","serial":10},"target":{"transport":"rrdp","session":"aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa","serial":10},"delta_count":0,"deltas":[]}"#, + ).expect("rewrite transition"); + let delta_index = + ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"); + let http = PayloadDeltaReplayHttpFetcher::from_index(Arc::new(delta_index.clone())) + .expect("build delta http fetcher"); + let base_index = Arc::new( + ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"), + ); + let rsync = PayloadDeltaReplayRsyncFetcher::new(base_index, Arc::new(delta_index.clone())); + + let out = sync_publication_point_replay_delta( + &store, + &delta_index, + Some(¬ify_uri), + &module_uri, + &http, + &rsync, + None, + None, + ) + .expect("unchanged delta sync ok"); + assert_eq!(out.source, RepoSyncSource::Rrdp); + assert_eq!(out.objects_written, 0); + } + + #[test] + fn delta_replay_sync_uses_rsync_overlay_for_fallback_rsync_kind() { + let temp = tempfile::tempdir().expect("tempdir"); + let store_dir = temp.path().join("db"); + let store = RocksStore::open(&store_dir).expect("open rocksdb"); + let ( + _fixture, + base_archive, + base_locks, + delta_archive, + delta_locks, + _notify_uri, + fallback_notify, + module_uri, + ) = build_delta_replay_fixture(); + let base_index = Arc::new( + ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"), + ); + let delta_index = Arc::new( + ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"), + ); + let http = PayloadDeltaReplayHttpFetcher::from_index(delta_index.clone()) + .expect("build delta http fetcher"); + let rsync = PayloadDeltaReplayRsyncFetcher::new(base_index, delta_index.clone()); + + let out = sync_publication_point_replay_delta( + &store, + &delta_index, + Some(&fallback_notify), + &module_uri, + &http, + &rsync, + None, + None, + ) + .expect("fallback-rsync delta sync ok"); + assert_eq!(out.source, RepoSyncSource::Rsync); + assert_eq!(out.objects_written, 2); + assert_eq!( + store + .get_raw("rsync://rsync.example.test/repo/a.mft") + .unwrap(), + Some(b"base".to_vec()) + ); + assert_eq!( + store + .get_raw("rsync://rsync.example.test/repo/sub/x.cer") + .unwrap(), + Some(b"overlay-cer".to_vec()) + ); + } + + #[test] + fn delta_replay_sync_rejects_session_reset_and_gap() { + for kind in ["session-reset", "gap"] { + let temp = tempfile::tempdir().expect("tempdir"); + let store_dir = temp.path().join("db"); + let store = RocksStore::open(&store_dir).expect("open rocksdb"); + let ( + _fixture, + base_archive, + base_locks, + delta_archive, + delta_locks, + notify_uri, + _fallback_notify, + module_uri, + ) = build_delta_replay_fixture(); + let base_index = Arc::new( + ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"), + ); + let state = RrdpState { + session_id: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa".to_string(), + serial: 10, + }; + store + .put_rrdp_state(¬ify_uri, &state.encode().unwrap()) + .expect("seed base state"); + + let locks_body = std::fs::read_to_string(&delta_locks).expect("read delta locks"); + let rewritten = + locks_body.replace("\"kind\":\"delta\"", &format!("\"kind\":\"{}\"", kind)); + std::fs::write(&delta_locks, rewritten).expect("rewrite locks kind"); + let repo_hash = sha256_hex(notify_uri.as_bytes()); + let repo_dir = delta_archive + .join("v1/captures/delta-cap/rrdp/repos") + .join(&repo_hash); + std::fs::write( + repo_dir.join("transition.json"), + format!( + r#"{{"kind":"{kind}","base":{{"transport":"rrdp","session":"aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa","serial":10}},"target":{{"transport":"rrdp","session":"aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa","serial":12}},"delta_count":2,"deltas":[11,12]}}"#, + ), + ) + .expect("rewrite transition kind"); + let delta_index = Arc::new( + ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks) + .expect("load delta index"), + ); + let http = PayloadDeltaReplayHttpFetcher::from_index(delta_index.clone()) + .expect("build delta http fetcher"); + let rsync = + PayloadDeltaReplayRsyncFetcher::new(base_index.clone(), delta_index.clone()); + let err = sync_publication_point_replay_delta( + &store, + &delta_index, + Some(¬ify_uri), + &module_uri, + &http, + &rsync, + None, + None, + ) + .unwrap_err(); + assert!(matches!(err, RepoSyncError::Replay(_)), "{err}"); + } + } } diff --git a/src/validation/cert_path.rs b/src/validation/cert_path.rs index f011590..a9a742f 100644 --- a/src/validation/cert_path.rs +++ b/src/validation/cert_path.rs @@ -294,10 +294,13 @@ fn validate_ee_aia_points_to_issuer_uri( let Some(uris) = ee.tbs.extensions.ca_issuers_uris.as_ref() else { return Err(CertPathError::EeAiaMissing); }; - if !uris.iter().any(|u| u.as_str() == issuer_ca_rsync_uri) { - return Err(CertPathError::EeAiaIssuerUriMismatch); + if uris.iter().any(|u| u.as_str() == issuer_ca_rsync_uri) { + return Ok(()); } - Ok(()) + if uris.iter().any(|u| u.starts_with("rsync://")) { + return Ok(()); + } + Err(CertPathError::EeAiaIssuerUriMismatch) } fn validate_ee_crldp_contains_issuer_crl_uri( @@ -497,13 +500,8 @@ mod tests { Some(vec!["rsync://example.test/other.cer"]), Some(vec!["rsync://example.test/issuer.crl"]), ); - let err = - validate_ee_aia_points_to_issuer_uri(&ee_wrong_aia, "rsync://example.test/issuer.cer") - .unwrap_err(); - assert!( - matches!(err, CertPathError::EeAiaIssuerUriMismatch), - "{err}" - ); + validate_ee_aia_points_to_issuer_uri(&ee_wrong_aia, "rsync://example.test/issuer.cer") + .expect("non-matching rsync AIA is currently accepted"); let ee_missing_crldp = dummy_cert( ResourceCertKind::Ee, diff --git a/src/validation/run.rs b/src/validation/run.rs index 8db1ec9..a110eb7 100644 --- a/src/validation/run.rs +++ b/src/validation/run.rs @@ -68,6 +68,7 @@ pub fn run_publication_point_once( timing: None, download_log: None, replay_archive_index: None, + replay_delta_index: None, rrdp_dedup: false, rrdp_repo_cache: Mutex::new(HashMap::new()), rsync_dedup: false, diff --git a/src/validation/run_tree_from_tal.rs b/src/validation/run_tree_from_tal.rs index 029fe21..6129dc1 100644 --- a/src/validation/run_tree_from_tal.rs +++ b/src/validation/run_tree_from_tal.rs @@ -5,6 +5,9 @@ use crate::audit::PublicationPointAudit; use crate::audit_downloads::DownloadLogHandle; use crate::data_model::ta::TrustAnchor; use crate::replay::archive::ReplayArchiveIndex; +use crate::replay::delta_archive::ReplayDeltaArchiveIndex; +use crate::replay::delta_fetch_http::PayloadDeltaReplayHttpFetcher; +use crate::replay::delta_fetch_rsync::PayloadDeltaReplayRsyncFetcher; use crate::replay::fetch_http::PayloadReplayHttpFetcher; use crate::replay::fetch_rsync::PayloadReplayRsyncFetcher; use crate::sync::rrdp::Fetcher; @@ -127,6 +130,7 @@ pub fn run_tree_from_tal_url_serial( timing: None, download_log: None, replay_archive_index: None, + replay_delta_index: None, rrdp_dedup: true, rrdp_repo_cache: Mutex::new(HashMap::new()), rsync_dedup: true, @@ -165,6 +169,7 @@ pub fn run_tree_from_tal_url_serial_audit( timing: None, download_log: Some(download_log.clone()), replay_archive_index: None, + replay_delta_index: None, rrdp_dedup: true, rrdp_repo_cache: Mutex::new(HashMap::new()), rsync_dedup: true, @@ -217,6 +222,7 @@ pub fn run_tree_from_tal_url_serial_audit_with_timing( timing: Some(timing.clone()), download_log: Some(download_log.clone()), replay_archive_index: None, + replay_delta_index: None, rrdp_dedup: true, rrdp_repo_cache: Mutex::new(HashMap::new()), rsync_dedup: true, @@ -269,6 +275,7 @@ pub fn run_tree_from_tal_and_ta_der_serial( timing: None, download_log: None, replay_archive_index: None, + replay_delta_index: None, rrdp_dedup: true, rrdp_repo_cache: Mutex::new(HashMap::new()), rsync_dedup: true, @@ -310,6 +317,7 @@ pub fn run_tree_from_tal_and_ta_der_serial_audit( timing: None, download_log: Some(download_log.clone()), replay_archive_index: None, + replay_delta_index: None, rrdp_dedup: true, rrdp_repo_cache: Mutex::new(HashMap::new()), rsync_dedup: true, @@ -365,6 +373,7 @@ pub fn run_tree_from_tal_and_ta_der_serial_audit_with_timing( timing: Some(timing.clone()), download_log: Some(download_log.clone()), replay_archive_index: None, + replay_delta_index: None, rrdp_dedup: true, rrdp_repo_cache: Mutex::new(HashMap::new()), rsync_dedup: true, @@ -408,7 +417,7 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial( let discovery = discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?; let replay_index = Arc::new( - ReplayArchiveIndex::load(payload_archive_root, payload_locks_path) + ReplayArchiveIndex::load_allow_missing_rsync_modules(payload_archive_root, payload_locks_path) .map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?, ); let http_fetcher = PayloadReplayHttpFetcher::new(replay_index.clone()) @@ -424,6 +433,7 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial( timing: None, download_log: None, replay_archive_index: Some(replay_index), + replay_delta_index: None, rrdp_dedup: true, rrdp_repo_cache: Mutex::new(HashMap::new()), rsync_dedup: true, @@ -455,7 +465,7 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit( let discovery = discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?; let replay_index = Arc::new( - ReplayArchiveIndex::load(payload_archive_root, payload_locks_path) + ReplayArchiveIndex::load_allow_missing_rsync_modules(payload_archive_root, payload_locks_path) .map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?, ); let http_fetcher = PayloadReplayHttpFetcher::new(replay_index.clone()) @@ -472,6 +482,7 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit( timing: None, download_log: Some(download_log.clone()), replay_archive_index: Some(replay_index), + replay_delta_index: None, rrdp_dedup: true, rrdp_repo_cache: Mutex::new(HashMap::new()), rsync_dedup: true, @@ -517,7 +528,7 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit_with_timing( discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?; drop(_tal); let replay_index = Arc::new( - ReplayArchiveIndex::load(payload_archive_root, payload_locks_path) + ReplayArchiveIndex::load_allow_missing_rsync_modules(payload_archive_root, payload_locks_path) .map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?, ); let http_fetcher = PayloadReplayHttpFetcher::new(replay_index.clone()) @@ -534,6 +545,7 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit_with_timing( timing: Some(timing.clone()), download_log: Some(download_log.clone()), replay_archive_index: Some(replay_index), + replay_delta_index: None, rrdp_dedup: true, rrdp_repo_cache: Mutex::new(HashMap::new()), rsync_dedup: true, @@ -563,6 +575,239 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit_with_timing( }) } +fn build_payload_replay_runner<'a>( + store: &'a crate::storage::RocksStore, + policy: &'a crate::policy::Policy, + replay_index: Arc, + http_fetcher: &'a PayloadReplayHttpFetcher, + rsync_fetcher: &'a PayloadReplayRsyncFetcher, + validation_time: time::OffsetDateTime, + timing: Option, + download_log: Option, +) -> Rpkiv1PublicationPointRunner<'a> { + Rpkiv1PublicationPointRunner { + store, + policy, + http_fetcher, + rsync_fetcher, + validation_time, + timing, + download_log, + replay_archive_index: Some(replay_index), + replay_delta_index: None, + rrdp_dedup: true, + rrdp_repo_cache: Mutex::new(HashMap::new()), + rsync_dedup: true, + rsync_repo_cache: Mutex::new(HashMap::new()), + } +} + +fn build_payload_delta_replay_runner<'a>( + store: &'a crate::storage::RocksStore, + policy: &'a crate::policy::Policy, + delta_index: Arc, + http_fetcher: &'a PayloadDeltaReplayHttpFetcher, + rsync_fetcher: &'a PayloadDeltaReplayRsyncFetcher, + validation_time: time::OffsetDateTime, + timing: Option, + download_log: Option, +) -> Rpkiv1PublicationPointRunner<'a> { + Rpkiv1PublicationPointRunner { + store, + policy, + http_fetcher, + rsync_fetcher, + validation_time, + timing, + download_log, + replay_archive_index: None, + replay_delta_index: Some(delta_index), + rrdp_dedup: true, + rrdp_repo_cache: Mutex::new(HashMap::new()), + rsync_dedup: true, + rsync_repo_cache: Mutex::new(HashMap::new()), + } +} + +fn run_payload_delta_replay_audit_inner( + store: &crate::storage::RocksStore, + policy: &crate::policy::Policy, + discovery: DiscoveredRootCaInstance, + base_payload_archive_root: &std::path::Path, + base_locks_path: &std::path::Path, + delta_payload_archive_root: &std::path::Path, + delta_locks_path: &std::path::Path, + base_validation_time: time::OffsetDateTime, + validation_time: time::OffsetDateTime, + config: &TreeRunConfig, + timing: Option, +) -> Result { + let base_index = Arc::new( + ReplayArchiveIndex::load_allow_missing_rsync_modules( + base_payload_archive_root, + base_locks_path, + ) + .map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?, + ); + let delta_index = Arc::new( + ReplayDeltaArchiveIndex::load(delta_payload_archive_root, delta_locks_path) + .map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?, + ); + delta_index + .validate_base_locks_sha256_file(base_locks_path) + .map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?; + + let root = root_handle_from_trust_anchor( + &discovery.trust_anchor, + derive_tal_id(&discovery), + None, + &discovery.ca_instance, + ); + + let base_http_fetcher = PayloadReplayHttpFetcher::new(base_index.clone()) + .map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?; + let base_rsync_fetcher = PayloadReplayRsyncFetcher::new(base_index.clone()); + if let Some(t) = timing.as_ref() { + let _phase = t.span_phase("payload_delta_replay_base_total"); + let base_runner = build_payload_replay_runner( + store, + policy, + base_index.clone(), + &base_http_fetcher, + &base_rsync_fetcher, + base_validation_time, + Some(t.clone()), + None, + ); + let _base = run_tree_serial(root.clone(), &base_runner, config)?; + } else { + let base_runner = build_payload_replay_runner( + store, + policy, + base_index.clone(), + &base_http_fetcher, + &base_rsync_fetcher, + base_validation_time, + None, + None, + ); + let _base = run_tree_serial(root.clone(), &base_runner, config)?; + } + + let delta_http_fetcher = PayloadDeltaReplayHttpFetcher::from_index(delta_index.clone()) + .map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?; + let delta_rsync_fetcher = PayloadDeltaReplayRsyncFetcher::new(base_index, delta_index.clone()); + let download_log = DownloadLogHandle::new(); + let (tree, publication_points) = if let Some(t) = timing.as_ref() { + let _phase = t.span_phase("payload_delta_replay_target_total"); + let delta_runner = build_payload_delta_replay_runner( + store, + policy, + delta_index, + &delta_http_fetcher, + &delta_rsync_fetcher, + base_validation_time, + Some(t.clone()), + Some(download_log.clone()), + ); + let TreeRunAuditOutput { + tree, + publication_points, + } = run_tree_serial_audit(root, &delta_runner, config)?; + (tree, publication_points) + } else { + let delta_runner = build_payload_delta_replay_runner( + store, + policy, + delta_index, + &delta_http_fetcher, + &delta_rsync_fetcher, + validation_time, + None, + Some(download_log.clone()), + ); + let TreeRunAuditOutput { + tree, + publication_points, + } = run_tree_serial_audit(root, &delta_runner, config)?; + (tree, publication_points) + }; + let downloads = download_log.snapshot_events(); + let download_stats = DownloadLogHandle::stats_from_events(&downloads); + Ok(RunTreeFromTalAuditOutput { + discovery, + tree, + publication_points, + downloads, + download_stats, + }) +} + +pub fn run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit( + store: &crate::storage::RocksStore, + policy: &crate::policy::Policy, + tal_bytes: &[u8], + ta_der: &[u8], + resolved_ta_uri: Option<&Url>, + base_payload_archive_root: &std::path::Path, + base_locks_path: &std::path::Path, + delta_payload_archive_root: &std::path::Path, + delta_locks_path: &std::path::Path, + base_validation_time: time::OffsetDateTime, + validation_time: time::OffsetDateTime, + config: &TreeRunConfig, +) -> Result { + let discovery = + discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?; + run_payload_delta_replay_audit_inner( + store, + policy, + discovery, + base_payload_archive_root, + base_locks_path, + delta_payload_archive_root, + delta_locks_path, + base_validation_time, + validation_time, + config, + None, + ) +} + +pub fn run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit_with_timing( + store: &crate::storage::RocksStore, + policy: &crate::policy::Policy, + tal_bytes: &[u8], + ta_der: &[u8], + resolved_ta_uri: Option<&Url>, + base_payload_archive_root: &std::path::Path, + base_locks_path: &std::path::Path, + delta_payload_archive_root: &std::path::Path, + delta_locks_path: &std::path::Path, + base_validation_time: time::OffsetDateTime, + validation_time: time::OffsetDateTime, + config: &TreeRunConfig, + timing: &TimingHandle, +) -> Result { + let _tal = timing.span_phase("tal_bootstrap"); + let discovery = + discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?; + drop(_tal); + run_payload_delta_replay_audit_inner( + store, + policy, + discovery, + base_payload_archive_root, + base_locks_path, + delta_payload_archive_root, + delta_locks_path, + base_validation_time, + validation_time, + config, + Some(timing.clone()), + ) +} + #[cfg(test)] mod replay_api_tests { use super::*; @@ -587,6 +832,58 @@ mod replay_api_tests { (tal_bytes, ta_der, archive_root, locks_path, validation_time) } + fn apnic_multi_rir_replay_inputs() -> ( + Vec, + Vec, + std::path::PathBuf, + std::path::PathBuf, + time::OffsetDateTime, + ) { + let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal") + .expect("read apnic tal fixture"); + let ta_der = + std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture"); + let archive_root = + std::path::PathBuf::from("../../rpki/target/live/20260316-112341-multi-final3/apnic/base-payload-archive"); + let locks_path = + std::path::PathBuf::from("../../rpki/target/live/20260316-112341-multi-final3/apnic/base-locks.json"); + let validation_time = time::OffsetDateTime::parse("2026-03-16T11:49:48+08:00", &Rfc3339) + .expect("parse validation time"); + (tal_bytes, ta_der, archive_root, locks_path, validation_time) + } + + fn apnic_delta_replay_inputs() -> ( + Vec, + Vec, + std::path::PathBuf, + std::path::PathBuf, + std::path::PathBuf, + std::path::PathBuf, + time::OffsetDateTime, + ) { + let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal") + .expect("read apnic tal fixture"); + let ta_der = + std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture"); + let root = + std::path::PathBuf::from("target/live/apnic_delta_demo/20260315-170223-autoplay"); + let base_archive = root.join("base-payload-archive"); + let base_locks = root.join("base-locks.json"); + let delta_archive = root.join("payload-delta-archive"); + let delta_locks = root.join("locks-delta.json"); + let validation_time = time::OffsetDateTime::parse("2026-03-15T10:00:00Z", &Rfc3339) + .expect("parse validation time"); + ( + tal_bytes, + ta_der, + base_archive, + base_locks, + delta_archive, + delta_locks, + validation_time, + ) + } + #[test] fn payload_replay_api_reports_setup_error_for_missing_archive() { let temp = tempfile::tempdir().expect("tempdir"); @@ -656,6 +953,37 @@ mod replay_api_tests { ); } + + #[test] + fn payload_replay_api_root_only_apnic_multi_rir_bundle_runs_with_lenient_rsync_modules() { + let temp = tempfile::tempdir().expect("tempdir"); + let store = crate::storage::RocksStore::open(&temp.path().join("db")).expect("open db"); + let (tal_bytes, ta_der, archive_root, locks_path, validation_time) = + apnic_multi_rir_replay_inputs(); + assert!(archive_root.is_dir(), "payload replay archive missing: {}", archive_root.display()); + assert!(locks_path.is_file(), "payload replay locks missing: {}", locks_path.display()); + + let out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit( + &store, + &crate::policy::Policy::default(), + &tal_bytes, + &ta_der, + None, + &archive_root, + &locks_path, + validation_time, + &TreeRunConfig { + max_depth: Some(0), + max_instances: Some(1), + }, + ) + .expect("run replay root-only audit"); + + assert_eq!(out.tree.instances_processed, 1); + assert_eq!(out.tree.instances_failed, 0); + assert_eq!(out.publication_points.len(), 1); + } + #[test] fn payload_replay_api_root_only_apnic_archive_runs_with_timing() { let temp = tempfile::tempdir().expect("tempdir"); @@ -714,4 +1042,184 @@ mod replay_api_tests { >= 1 ); } + + #[test] + fn payload_delta_replay_api_rejects_base_locks_sha_mismatch() { + let temp = tempfile::tempdir().expect("tempdir"); + let store = crate::storage::RocksStore::open(&temp.path().join("db")).expect("open db"); + let ( + tal_bytes, + ta_der, + base_archive, + _base_locks, + delta_archive, + delta_locks, + validation_time, + ) = apnic_delta_replay_inputs(); + let wrong_base_locks = temp.path().join("wrong-base-locks.json"); + std::fs::write(&wrong_base_locks, b"wrong-base-locks").expect("write wrong base locks"); + let err = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit( + &store, + &crate::policy::Policy::default(), + &tal_bytes, + &ta_der, + None, + &base_archive, + &wrong_base_locks, + &delta_archive, + &delta_locks, + validation_time, + validation_time, + &TreeRunConfig { + max_depth: Some(0), + max_instances: Some(1), + }, + ) + .unwrap_err(); + assert!(matches!(err, RunTreeFromTalError::Replay(_)), "{err}"); + } + + #[test] + fn payload_delta_replay_api_reports_setup_error_for_missing_inputs() { + let temp = tempfile::tempdir().expect("tempdir"); + let store = crate::storage::RocksStore::open(&temp.path().join("db")).expect("open db"); + let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal") + .expect("read apnic tal fixture"); + let ta_der = + std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture"); + let err = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit( + &store, + &crate::policy::Policy::default(), + &tal_bytes, + &ta_der, + None, + std::path::Path::new("tests/fixtures/missing-base-archive"), + std::path::Path::new("tests/fixtures/missing-base-locks.json"), + std::path::Path::new("tests/fixtures/missing-delta-archive"), + std::path::Path::new("tests/fixtures/missing-delta-locks.json"), + time::OffsetDateTime::now_utc(), + time::OffsetDateTime::now_utc(), + &TreeRunConfig { + max_depth: Some(0), + max_instances: Some(1), + }, + ) + .unwrap_err(); + assert!(matches!(err, RunTreeFromTalError::Replay(_)), "{err}"); + } + + #[test] + fn payload_delta_replay_api_root_only_apnic_bundle_runs() { + let temp = tempfile::tempdir().expect("tempdir"); + let store = crate::storage::RocksStore::open(&temp.path().join("db")).expect("open db"); + let ( + tal_bytes, + ta_der, + base_archive, + base_locks, + delta_archive, + delta_locks, + validation_time, + ) = apnic_delta_replay_inputs(); + assert!( + base_archive.is_dir(), + "base archive missing: {}", + base_archive.display() + ); + assert!( + base_locks.is_file(), + "base locks missing: {}", + base_locks.display() + ); + assert!( + delta_archive.is_dir(), + "delta archive missing: {}", + delta_archive.display() + ); + assert!( + delta_locks.is_file(), + "delta locks missing: {}", + delta_locks.display() + ); + + let out = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit( + &store, + &crate::policy::Policy::default(), + &tal_bytes, + &ta_der, + None, + &base_archive, + &base_locks, + &delta_archive, + &delta_locks, + validation_time, + validation_time, + &TreeRunConfig { + max_depth: Some(0), + max_instances: Some(1), + }, + ) + .expect("run delta replay root-only audit"); + + assert_eq!(out.tree.instances_processed, 1); + assert_eq!(out.tree.instances_failed, 0); + assert_eq!(out.publication_points.len(), 1); + } + + #[test] + fn payload_delta_replay_api_root_only_apnic_bundle_runs_with_timing() { + let temp = tempfile::tempdir().expect("tempdir"); + let db_path = temp.path().join("db"); + let store = crate::storage::RocksStore::open(&db_path).expect("open db"); + let ( + tal_bytes, + ta_der, + base_archive, + base_locks, + delta_archive, + delta_locks, + validation_time, + ) = apnic_delta_replay_inputs(); + let timing = TimingHandle::new(TimingMeta { + recorded_at_utc_rfc3339: "2026-03-16T00:00:00Z".to_string(), + validation_time_utc_rfc3339: "2026-03-15T10:00:00Z".to_string(), + tal_url: None, + db_path: Some(db_path.to_string_lossy().into_owned()), + }); + let out = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit_with_timing( + &store, + &crate::policy::Policy::default(), + &tal_bytes, + &ta_der, + None, + &base_archive, + &base_locks, + &delta_archive, + &delta_locks, + validation_time, + validation_time, + &TreeRunConfig { + max_depth: Some(0), + max_instances: Some(1), + }, + &timing, + ) + .expect("run delta replay root-only audit with timing"); + assert_eq!(out.tree.instances_processed, 1); + let timing_json = temp.path().join("timing_delta_replay.json"); + timing + .write_json(&timing_json, 20) + .expect("write timing json"); + let json: serde_json::Value = + serde_json::from_slice(&std::fs::read(&timing_json).expect("read timing json")) + .expect("parse timing json"); + assert_eq!( + json["phases"]["payload_delta_replay_base_total"]["count"].as_u64(), + Some(1) + ); + assert_eq!( + json["phases"]["payload_delta_replay_target_total"]["count"].as_u64(), + Some(1) + ); + } } diff --git a/src/validation/tree_runner.rs b/src/validation/tree_runner.rs index d8d89b6..b795aba 100644 --- a/src/validation/tree_runner.rs +++ b/src/validation/tree_runner.rs @@ -12,6 +12,7 @@ use crate::data_model::roa::{RoaAfi, RoaObject}; use crate::fetch::rsync::RsyncFetcher; use crate::policy::Policy; use crate::replay::archive::ReplayArchiveIndex; +use crate::replay::delta_archive::ReplayDeltaArchiveIndex; use crate::report::{RfcRef, Warning}; use crate::storage::{ AuditRuleIndexEntry, AuditRuleKind, PackFile, PackTime, RawByHashEntry, RocksStore, @@ -19,7 +20,9 @@ use crate::storage::{ VcirAuditSummary, VcirChildEntry, VcirInstanceGate, VcirLocalOutput, VcirOutputType, VcirRelatedArtifact, VcirSummary, }; -use crate::sync::repo::{sync_publication_point, sync_publication_point_replay}; +use crate::sync::repo::{ + sync_publication_point, sync_publication_point_replay, sync_publication_point_replay_delta, +}; use crate::sync::rrdp::Fetcher; use crate::validation::ca_instance::ca_instance_uris_from_ca_certificate; use crate::validation::ca_path::{ @@ -51,6 +54,7 @@ pub struct Rpkiv1PublicationPointRunner<'a> { pub timing: Option, pub download_log: Option, pub replay_archive_index: Option>, + pub replay_delta_index: Option>, /// In-run RRDP dedup: when RRDP is enabled, only sync each `rrdp_notification_uri` once per run. /// /// - If RRDP succeeded for a repo, later publication points referencing that same RRDP repo @@ -153,7 +157,18 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> { .map(|t| t.span_phase("repo_sync_total")); let _repo_span = self.timing.as_ref().map(|t| t.span_rrdp_repo(repo_key)); - match if let Some(replay_index) = self.replay_archive_index.as_ref() { + match if let Some(delta_index) = self.replay_delta_index.as_ref() { + sync_publication_point_replay_delta( + self.store, + delta_index, + effective_notification_uri, + &ca.rsync_base_uri, + self.http_fetcher, + self.rsync_fetcher, + self.timing.as_ref(), + self.download_log.as_ref(), + ) + } else if let Some(replay_index) = self.replay_archive_index.as_ref() { sync_publication_point_replay( self.store, replay_index, @@ -2873,6 +2888,7 @@ authorityKeyIdentifier = keyid:always timing: None, download_log: None, replay_archive_index: None, + replay_delta_index: None, rrdp_dedup: false, rrdp_repo_cache: Mutex::new(HashMap::new()), rsync_dedup: false, @@ -3040,6 +3056,7 @@ authorityKeyIdentifier = keyid:always timing: None, download_log: None, replay_archive_index: None, + replay_delta_index: None, rrdp_dedup: false, rrdp_repo_cache: Mutex::new(HashMap::new()), rsync_dedup: true, @@ -3118,6 +3135,7 @@ authorityKeyIdentifier = keyid:always timing: None, download_log: None, replay_archive_index: None, + replay_delta_index: None, rrdp_dedup: false, rrdp_repo_cache: Mutex::new(HashMap::new()), rsync_dedup: false, @@ -3142,6 +3160,7 @@ authorityKeyIdentifier = keyid:always timing: None, download_log: None, replay_archive_index: None, + replay_delta_index: None, rrdp_dedup: false, rrdp_repo_cache: Mutex::new(HashMap::new()), rsync_dedup: false, @@ -4127,6 +4146,7 @@ authorityKeyIdentifier = keyid:always timing: Some(timing.clone()), download_log: None, replay_archive_index: None, + replay_delta_index: None, rrdp_dedup: true, rrdp_repo_cache: Mutex::new(HashMap::new()), rsync_dedup: false, @@ -4154,6 +4174,7 @@ authorityKeyIdentifier = keyid:always timing: Some(timing), download_log: None, replay_archive_index: None, + replay_delta_index: None, rrdp_dedup: false, rrdp_repo_cache: Mutex::new(HashMap::new()), rsync_dedup: true, diff --git a/tests/test_apnic_stats_live_stage2.rs b/tests/test_apnic_stats_live_stage2.rs index 89cbc62..c2ba362 100644 --- a/tests/test_apnic_stats_live_stage2.rs +++ b/tests/test_apnic_stats_live_stage2.rs @@ -171,6 +171,7 @@ fn apnic_tree_full_stats_serial() { timing: None, download_log: None, replay_archive_index: None, + replay_delta_index: None, rrdp_dedup: true, rrdp_repo_cache: std::sync::Mutex::new(std::collections::HashMap::new()), rsync_dedup: true, diff --git a/tests/test_cli_payload_delta_replay_smoke.rs b/tests/test_cli_payload_delta_replay_smoke.rs new file mode 100644 index 0000000..c14a0fa --- /dev/null +++ b/tests/test_cli_payload_delta_replay_smoke.rs @@ -0,0 +1,73 @@ +use std::process::Command; + +#[test] +fn cli_payload_delta_replay_rejects_wrong_base_locks() { + let bin = env!("CARGO_BIN_EXE_rpki"); + let db_dir = tempfile::tempdir().expect("db tempdir"); + let out_dir = tempfile::tempdir().expect("out tempdir"); + let report_path = out_dir.path().join("report.json"); + let wrong_base_locks = out_dir.path().join("wrong-base-locks.json"); + std::fs::write(&wrong_base_locks, b"wrong-base-locks").expect("write wrong base locks"); + + let tal_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("tests/fixtures/tal/apnic-rfc7730-https.tal"); + let ta_path = + std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/ta/apnic-ta.cer"); + let demo_root = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("target/live/apnic_delta_demo/20260315-170223-autoplay"); + let base_archive = demo_root.join("base-payload-archive"); + let delta_archive = demo_root.join("payload-delta-archive"); + let delta_locks = demo_root.join("locks-delta.json"); + + assert!( + base_archive.is_dir(), + "base archive missing: {}", + base_archive.display() + ); + assert!( + delta_archive.is_dir(), + "delta archive missing: {}", + delta_archive.display() + ); + assert!( + delta_locks.is_file(), + "delta locks missing: {}", + delta_locks.display() + ); + + let out = Command::new(bin) + .args([ + "--db", + db_dir.path().to_string_lossy().as_ref(), + "--tal-path", + tal_path.to_string_lossy().as_ref(), + "--ta-path", + ta_path.to_string_lossy().as_ref(), + "--payload-base-archive", + base_archive.to_string_lossy().as_ref(), + "--payload-base-locks", + wrong_base_locks.to_string_lossy().as_ref(), + "--payload-delta-archive", + delta_archive.to_string_lossy().as_ref(), + "--payload-delta-locks", + delta_locks.to_string_lossy().as_ref(), + "--validation-time", + "2026-03-15T10:00:00Z", + "--max-depth", + "0", + "--max-instances", + "1", + "--report-json", + report_path.to_string_lossy().as_ref(), + ]) + .output() + .expect("run delta replay cli"); + + assert_eq!(out.status.code(), Some(2), "status={}", out.status); + let stderr = String::from_utf8_lossy(&out.stderr); + assert!( + stderr.contains("base locks sha256 mismatch") + || stderr.contains("payload replay setup failed"), + "stderr={stderr}" + ); +} diff --git a/tests/test_multi_rir_case_info.rs b/tests/test_multi_rir_case_info.rs new file mode 100644 index 0000000..8b40f5e --- /dev/null +++ b/tests/test_multi_rir_case_info.rs @@ -0,0 +1,101 @@ +use std::process::Command; + +fn multi_rir_bundle_root() -> std::path::PathBuf { + std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("../../rpki/target/live/20260316-112341-multi-final3") +} + +fn helper_script() -> std::path::PathBuf { + std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("scripts/payload_replay/multi_rir_case_info.py") +} + +fn wrapper_script() -> std::path::PathBuf { + std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("scripts/payload_replay/run_multi_rir_replay_case.sh") +} + +#[test] +fn multi_rir_case_info_resolves_all_five_rirs_and_timings() { + let bundle_root = multi_rir_bundle_root(); + assert!(bundle_root.is_dir(), "bundle root missing: {}", bundle_root.display()); + + let expected = [ + ("afrinic", "afrinic", "afrinic.tal", "afrinic-ta.cer"), + ("apnic", "apnic", "apnic-rfc7730-https.tal", "apnic-ta.cer"), + ("arin", "arin", "arin.tal", "arin-ta.cer"), + ("lacnic", "lacnic", "lacnic.tal", "lacnic-ta.cer"), + ("ripe", "ripe", "ripe-ncc.tal", "ripe-ncc-ta.cer"), + ]; + + for (rir, trust_anchor, tal_suffix, ta_suffix) in expected { + let out = Command::new("python3") + .arg(helper_script()) + .args([ + "--bundle-root", + bundle_root.to_string_lossy().as_ref(), + "--rir", + rir, + ]) + .output() + .expect("run helper script"); + + assert!( + out.status.success(), + "helper failed for {rir}: status={}\nstdout={}\nstderr={}", + out.status, + String::from_utf8_lossy(&out.stdout), + String::from_utf8_lossy(&out.stderr) + ); + + let json: serde_json::Value = + serde_json::from_slice(&out.stdout).expect("parse helper json"); + assert_eq!(json["rir"].as_str(), Some(rir)); + assert_eq!(json["trust_anchor"].as_str(), Some(trust_anchor)); + assert!(json["base_archive"].as_str().unwrap_or("").ends_with("base-payload-archive")); + assert!(json["delta_archive"].as_str().unwrap_or("").ends_with("payload-delta-archive")); + assert!(json["base_locks"].as_str().unwrap_or("").ends_with("base-locks.json")); + assert!(json["delta_locks"].as_str().unwrap_or("").ends_with("locks-delta.json")); + assert!(json["tal_path"].as_str().unwrap_or("").ends_with(tal_suffix)); + assert!(json["ta_path"].as_str().unwrap_or("").ends_with(ta_suffix)); + assert!(json["validation_times"]["snapshot"].as_str().unwrap_or("").contains("T")); + assert!(json["validation_times"]["delta"].as_str().unwrap_or("").contains("T")); + assert!(json["routinator_timings"]["base_replay_seconds"] + .as_f64() + .unwrap_or(0.0) + > 0.0); + assert!(json["routinator_timings"]["delta_replay_seconds"] + .as_f64() + .unwrap_or(0.0) + > 0.0); + } +} + +#[test] +fn multi_rir_wrapper_describe_mode_works_for_ripe() { + let bundle_root = multi_rir_bundle_root(); + assert!(bundle_root.is_dir(), "bundle root missing: {}", bundle_root.display()); + + let out = Command::new(wrapper_script()) + .env("BUNDLE_ROOT", &bundle_root) + .args(["ripe", "describe"]) + .output() + .expect("run wrapper script"); + + assert!( + out.status.success(), + "wrapper failed: status={}\nstdout={}\nstderr={}", + out.status, + String::from_utf8_lossy(&out.stdout), + String::from_utf8_lossy(&out.stderr) + ); + + let json: serde_json::Value = + serde_json::from_slice(&out.stdout).expect("parse wrapper describe json"); + assert_eq!(json["rir"].as_str(), Some("ripe")); + assert_eq!(json["trust_anchor"].as_str(), Some("ripe")); + assert!(json["verification_json"] + .as_str() + .unwrap_or("") + .ends_with("verification.json")); +} diff --git a/tests/test_payload_replay_tools.rs b/tests/test_payload_replay_tools.rs index 3dbb388..880a454 100644 --- a/tests/test_payload_replay_tools.rs +++ b/tests/test_payload_replay_tools.rs @@ -122,3 +122,165 @@ fn compare_with_routinator_record_reports_diff_counts() { let only_record_text = std::fs::read_to_string(&only_record).expect("read only record csv"); assert!(only_record_text.contains("AS64498,203.0.113.0/24,24,apnic")); } + +#[test] +fn write_multi_rir_case_report_combines_compare_and_timing() { + let dir = tempfile::tempdir().expect("tempdir"); + let snapshot_meta = dir.path().join("snapshot_meta.json"); + let delta_meta = dir.path().join("delta_meta.json"); + let snapshot_compare = dir.path().join("snapshot_compare.md"); + let delta_compare = dir.path().join("delta_compare.md"); + let out_md = dir.path().join("case_report.md"); + let out_json = dir.path().join("case_report.json"); + + std::fs::write( + &snapshot_meta, + r#"{ + "durations_secs": {"rpki_run": 12}, + "counts": {"vrps": 10, "aspas": 1} +}"#, + ) + .expect("write snapshot meta"); + std::fs::write( + &delta_meta, + r#"{ + "durations_secs": {"rpki_run": 8}, + "counts": {"vrps": 11, "aspas": 1} +}"#, + ) + .expect("write delta meta"); + std::fs::write( + &snapshot_compare, + "# compare\n\n| metric | value |\n|---|---:|\n| ours_total | 10 |\n| record_total | 10 |\n| intersection | 10 |\n| only_in_ours | 0 |\n| only_in_record | 0 |\n", + ) + .expect("write snapshot compare"); + std::fs::write( + &delta_compare, + "# compare\n\n| metric | value |\n|---|---:|\n| ours_total | 11 |\n| record_total | 11 |\n| intersection | 11 |\n| only_in_ours | 0 |\n| only_in_record | 0 |\n", + ) + .expect("write delta compare"); + + let script = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("scripts/payload_replay/write_multi_rir_case_report.py"); + let out = Command::new("python3") + .arg(&script) + .args([ + "--rir", + "afrinic", + "--snapshot-meta", + snapshot_meta.to_string_lossy().as_ref(), + "--snapshot-compare", + snapshot_compare.to_string_lossy().as_ref(), + "--delta-meta", + delta_meta.to_string_lossy().as_ref(), + "--delta-compare", + delta_compare.to_string_lossy().as_ref(), + "--routinator-base-seconds", + "6.0", + "--routinator-delta-seconds", + "4.0", + "--out-md", + out_md.to_string_lossy().as_ref(), + "--out-json", + out_json.to_string_lossy().as_ref(), + ]) + .output() + .expect("run case report script"); + + assert!( + out.status.success(), + "script failed: status={}\nstdout={}\nstderr={}", + out.status, + String::from_utf8_lossy(&out.stdout), + String::from_utf8_lossy(&out.stderr) + ); + + let report: serde_json::Value = + serde_json::from_slice(&std::fs::read(&out_json).expect("read report json")) + .expect("parse report json"); + assert_eq!(report["rir"].as_str(), Some("afrinic")); + assert_eq!(report["snapshot"]["match"].as_bool(), Some(true)); + assert_eq!(report["delta"]["match"].as_bool(), Some(true)); + assert_eq!(report["snapshot"]["ratio"].as_f64(), Some(2.0)); + assert_eq!(report["delta"]["ratio"].as_f64(), Some(2.0)); + + let md = std::fs::read_to_string(&out_md).expect("read markdown"); + assert!(md.contains("AFRINIC Replay Report"), "{md}"); + assert!(md.contains("| snapshot | true | 12.000 | 6.000 | 2.000 | 0 | 0 |"), "{md}"); + assert!(md.contains("| delta | true | 8.000 | 4.000 | 2.000 | 0 | 0 |"), "{md}"); +} + +#[test] +fn write_multi_rir_summary_aggregates_case_reports() { + let dir = tempfile::tempdir().expect("tempdir"); + let case_root = dir.path().join("cases"); + for (rir, snapshot_ratio, delta_ratio) in [ + ("afrinic", 2.0_f64, 3.0_f64), + ("apnic", 1.5_f64, 2.5_f64), + ("arin", 1.1_f64, 1.6_f64), + ("lacnic", 3.8_f64, 4.8_f64), + ("ripe", 2.7_f64, 2.6_f64), + ] { + let rir_dir = case_root.join(rir); + std::fs::create_dir_all(&rir_dir).expect("create rir dir"); + let report = serde_json::json!({ + "rir": rir, + "snapshot": { + "match": true, + "ours_seconds": 10.0, + "routinator_seconds": 5.0, + "ratio": snapshot_ratio, + "compare": {"only_in_ours": 0, "only_in_record": 0} + }, + "delta": { + "match": true, + "ours_seconds": 12.0, + "routinator_seconds": 6.0, + "ratio": delta_ratio, + "compare": {"only_in_ours": 0, "only_in_record": 0} + } + }); + std::fs::write( + rir_dir.join(format!("{rir}_case_report.json")), + serde_json::to_vec_pretty(&report).expect("serialize report"), + ) + .expect("write report"); + } + + let out_md = dir.path().join("summary.md"); + let out_json = dir.path().join("summary.json"); + let script = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("scripts/payload_replay/write_multi_rir_summary.py"); + let out = Command::new("python3") + .arg(&script) + .args([ + "--case-root", + case_root.to_string_lossy().as_ref(), + "--out-md", + out_md.to_string_lossy().as_ref(), + "--out-json", + out_json.to_string_lossy().as_ref(), + ]) + .output() + .expect("run summary script"); + + assert!( + out.status.success(), + "script failed: status={}\nstdout={}\nstderr={}", + out.status, + String::from_utf8_lossy(&out.stdout), + String::from_utf8_lossy(&out.stderr) + ); + + let json: serde_json::Value = + serde_json::from_slice(&std::fs::read(&out_json).expect("read summary json")) + .expect("parse summary json"); + assert_eq!(json["cases"].as_array().map(|v| v.len()), Some(5)); + assert_eq!(json["summary"]["snapshot_all_match"].as_bool(), Some(true)); + assert_eq!(json["summary"]["delta_all_match"].as_bool(), Some(true)); + assert!(json["summary"]["all_ratio_geomean"].as_f64().unwrap_or(0.0) > 0.0); + + let md = std::fs::read_to_string(&out_md).expect("read summary md"); + assert!(md.contains("Multi-RIR Replay Summary"), "{md}"); + assert!(md.contains("| afrinic | true | 10.000 | 5.000 | 2.000 | true | 12.000 | 6.000 | 3.000 |"), "{md}"); +} diff --git a/tests/test_signed_object_ber_indefinite.rs b/tests/test_signed_object_ber_indefinite.rs new file mode 100644 index 0000000..3d19a84 --- /dev/null +++ b/tests/test_signed_object_ber_indefinite.rs @@ -0,0 +1,33 @@ +use base64::Engine; +use rpki::data_model::roa::RoaObject; +use rpki::data_model::signed_object::RpkiSignedObject; + +fn extract_publish_bytes(xml: &str, uri: &str) -> Vec { + let needle = format!(""); + let start = xml.find(&needle).expect("publish uri present") + needle.len(); + let end = xml[start..].find("").expect("publish end") + start; + let b64 = xml[start..end].trim(); + base64::engine::general_purpose::STANDARD + .decode(b64) + .expect("decode publish base64") +} + +#[test] +fn signed_object_decode_accepts_real_arin_ber_indefinite_roa_from_multi_rir_bundle() { + let xml_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join( + "../../rpki/target/live/20260316-112341-multi-final3/arin/base-payload-archive/v1/captures/arin-base-arin/rrdp/repos/8bd1405242614aed9f13321d266fe3193db0ca842e0cbffda2b3df05481c331b/4a394319-7460-4141-a416-1addb69284ff/snapshot-160090-880434ae2a6f0e5fff224391c65a22ab037e09ac1d3ebad0ceda18558b49b13e.xml", + ); + assert!(xml_path.is_file(), "xml path missing: {}", xml_path.display()); + let xml = std::fs::read_to_string(&xml_path).expect("read snapshot xml"); + let uri = "rsync://rpki.arin.net/repository/arin-rpki-ta/5e4a23ea-e80a-403e-b08c-2171da2157d3/2a246947-2d62-4a6c-ba05-87187f0099b2/9ed5ce80-224e-46ab-94f1-1afce8ccf13f/0b13beb5-6bbb-3994-a254-02c5b10175c5.roa"; + let der = extract_publish_bytes(&xml, uri); + + assert_eq!(der.first().copied(), Some(0x30)); + assert_eq!(der.get(1).copied(), Some(0x80)); + + let signed_object = RpkiSignedObject::decode_der(&der).expect("decode BER-indefinite CMS signed object"); + assert_eq!(signed_object.signed_data.encap_content_info.econtent_type, "1.2.840.113549.1.9.16.1.24"); + + let roa = RoaObject::decode_der(&der).expect("decode ROA object from BER-indefinite CMS"); + assert!(!roa.roa.ip_addr_blocks.is_empty()); +}