20260316迭代 增加delta replay以及multi-rir

replay 对比,五个RIR 输出vrp与routinator一致
This commit is contained in:
yuyr 2026-03-16 22:54:48 +08:00
parent 73d8ebb5c1
commit 557a69cbd2
24 changed files with 4678 additions and 55 deletions

View File

@ -2,6 +2,69 @@
本目录提供基于本地 payload archive 的手工 replay 入口。
## `multi_rir_case_info.py`
用于从 multi-RIR bundle 中解析指定 `rir` 的输入路径、对照 CSV、fixture、以及 Routinator replay timing 基线。
示例:
```bash
python3 scripts/payload_replay/multi_rir_case_info.py \
--bundle-root ../../rpki/target/live/20260316-112341-multi-final3 \
--rir afrinic
```
也支持输出 shell 环境变量:
```bash
python3 scripts/payload_replay/multi_rir_case_info.py \
--bundle-root ../../rpki/target/live/20260316-112341-multi-final3 \
--rir afrinic \
--format env
```
## `run_multi_rir_replay_case.sh`
统一的 multi-RIR 入口。给定 `rir` 和模式后,它会自动选择该 RIR 的:
- snapshot/base replay 输入
- delta replay 输入
- 对照 CSV
- TAL / TA fixture
- trust anchor 名称
用法:
```bash
./scripts/payload_replay/run_multi_rir_replay_case.sh <rir> [describe|snapshot|delta|both]
```
示例:
```bash
./scripts/payload_replay/run_multi_rir_replay_case.sh afrinic describe
./scripts/payload_replay/run_multi_rir_replay_case.sh lacnic snapshot
./scripts/payload_replay/run_multi_rir_replay_case.sh arin delta
./scripts/payload_replay/run_multi_rir_replay_case.sh ripe both
```
脚本会自动:
- 从 multi-RIR bundle 中选择指定 RIR 的 snapshot/base 与 delta 输入
- 读取该 RIR 的 Routinator `base-replay` / `delta-replay` timing 基线
- 使用该 RIR `timings/base-replay.json``timings/delta-replay.json``startedAt` 作为 replay `--validation-time`
- 在 `target/live/multi_rir_replay_runs/<rir>/` 下生成:
- snapshot replay 产物
- delta replay 产物
- per-RIR 合并 case report含 correctness + timing compare
默认 bundle 根目录为:
- `../../rpki/target/live/20260316-112341-multi-final3`
也可以通过 `BUNDLE_ROOT` 覆盖。
## `run_apnic_replay.sh`
默认使用:
@ -98,3 +161,74 @@ python3 scripts/payload_replay/report_to_routinator_csv.py \
- compare summary
- `only_in_ours.csv`
- `only_in_record.csv`
## `run_apnic_delta_replay.sh`
使用 APNIC delta demo 数据集运行 base + delta replay
```bash
./scripts/payload_replay/run_apnic_delta_replay.sh
```
默认输入:
- `target/live/apnic_delta_demo/20260315-170223-autoplay/base-payload-archive`
- `target/live/apnic_delta_demo/20260315-170223-autoplay/base-locks.json`
- `target/live/apnic_delta_demo/20260315-170223-autoplay/payload-delta-archive`
- `target/live/apnic_delta_demo/20260315-170223-autoplay/locks-delta.json`
- `tests/fixtures/tal/apnic-rfc7730-https.tal`
- `tests/fixtures/ta/apnic-ta.cer`
输出目录默认:`target/live/payload_delta_replay_runs/`
## `run_apnic_delta_replay.sh` compare outputs
脚本现在在 delta replay 结束后还会额外生成:
- `vrps.csv`
- compare summary Markdown
- `only_in_ours.csv`
- `only_in_record.csv`
默认 compare 输入是:
- `target/live/apnic_delta_demo/20260315-170223-autoplay/record-delta.csv`
也可以通过环境变量覆盖:
- `TRUST_ANCHOR`
- `ROUTINATOR_RECORD_CSV`
- `VRPS_CSV`
- `COMPARE_SUMMARY_MD`
- `ONLY_IN_OURS_CSV`
- `ONLY_IN_RECORD_CSV`
## `write_multi_rir_case_report.py`
把某个 RIR 的 snapshot replay 与 delta replay 的 `meta.json`、compare summary 以及 Routinator timing 基线合并成一个 per-RIR Markdown/JSON 报告。
该脚本通常由 `run_multi_rir_replay_case.sh <rir> both` 自动调用。
## `run_multi_rir_replay_suite.sh`
顺序执行 5 个 RIR或环境变量 `RIRS` 指定的子集)的 `both` 模式,并最终生成 multi-RIR 汇总报告。
```bash
./scripts/payload_replay/run_multi_rir_replay_suite.sh
```
可覆盖环境变量:
- `BUNDLE_ROOT`
- `SUITE_OUT_DIR`
- `RIRS`
最终输出:
- `<suite_out_dir>/<rir>/<rir>_case_report.md`
- `<suite_out_dir>/multi_rir_summary.md`
- `<suite_out_dir>/multi_rir_summary.json`
## `write_multi_rir_summary.py`
汇总 5 个 RIR 的 per-RIR case report生成 correctness + timing 总表与几何平均比值。

View File

@ -0,0 +1,158 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import json
import shlex
import sys
from pathlib import Path
RIR_CONFIG = {
"afrinic": {
"tal": "tests/fixtures/tal/afrinic.tal",
"ta": "tests/fixtures/ta/afrinic-ta.cer",
"trust_anchor": "afrinic",
},
"apnic": {
"tal": "tests/fixtures/tal/apnic-rfc7730-https.tal",
"ta": "tests/fixtures/ta/apnic-ta.cer",
"trust_anchor": "apnic",
},
"arin": {
"tal": "tests/fixtures/tal/arin.tal",
"ta": "tests/fixtures/ta/arin-ta.cer",
"trust_anchor": "arin",
},
"lacnic": {
"tal": "tests/fixtures/tal/lacnic.tal",
"ta": "tests/fixtures/ta/lacnic-ta.cer",
"trust_anchor": "lacnic",
},
"ripe": {
"tal": "tests/fixtures/tal/ripe-ncc.tal",
"ta": "tests/fixtures/ta/ripe-ncc-ta.cer",
"trust_anchor": "ripe",
},
}
def default_repo_root() -> Path:
return Path(__file__).resolve().parents[2]
def default_bundle_root(repo_root: Path) -> Path:
return (repo_root / "../../rpki/target/live/20260316-112341-multi-final3").resolve()
def require_path(path: Path, kind: str) -> Path:
if kind == "dir" and not path.is_dir():
raise SystemExit(f"missing directory: {path}")
if kind == "file" and not path.is_file():
raise SystemExit(f"missing file: {path}")
return path
def load_timing_summary(bundle_root: Path) -> dict:
timing_path = require_path(bundle_root / "timing-summary.json", "file")
return json.loads(timing_path.read_text(encoding="utf-8"))
def build_case(bundle_root: Path, repo_root: Path, rir: str) -> dict:
if rir not in RIR_CONFIG:
raise SystemExit(
f"unsupported rir: {rir}; expected one of: {', '.join(sorted(RIR_CONFIG))}"
)
rir_root = require_path(bundle_root / rir, "dir")
cfg = RIR_CONFIG[rir]
timing_summary = load_timing_summary(bundle_root)
if rir not in timing_summary:
raise SystemExit(f"timing-summary.json missing entry for rir: {rir}")
timing_entry = timing_summary[rir]
durations = timing_entry.get("durations") or {}
base_timing = require_path(rir_root / "timings" / "base-replay.json", "file")
delta_timing = require_path(rir_root / "timings" / "delta-replay.json", "file")
base_timing_obj = json.loads(base_timing.read_text(encoding="utf-8"))
delta_timing_obj = json.loads(delta_timing.read_text(encoding="utf-8"))
case = {
"bundle_root": str(bundle_root),
"repo_root": str(repo_root),
"rir": rir,
"trust_anchor": cfg["trust_anchor"],
"rir_root": str(rir_root),
"base_archive": str(require_path(rir_root / "base-payload-archive", "dir")),
"base_locks": str(require_path(rir_root / "base-locks.json", "file")),
"base_vrps_csv": str(require_path(rir_root / "base-vrps.csv", "file")),
"delta_archive": str(require_path(rir_root / "payload-delta-archive", "dir")),
"delta_locks": str(require_path(rir_root / "locks-delta.json", "file")),
"delta_record_csv": str(require_path(rir_root / "record-delta.csv", "file")),
"replay_delta_csv": str(require_path(rir_root / "replay-delta.csv", "file")),
"verification_json": str(require_path(rir_root / "verification.json", "file")),
"readme": str(require_path(rir_root / "README.md", "file")),
"timings_dir": str(require_path(rir_root / "timings", "dir")),
"base_timing_json": str(base_timing),
"delta_timing_json": str(delta_timing),
"tal_path": str(require_path(repo_root / cfg["tal"], "file")),
"ta_path": str(require_path(repo_root / cfg["ta"], "file")),
"validation_times": {
"snapshot": base_timing_obj["startedAt"],
"delta": delta_timing_obj["startedAt"],
},
"routinator_timings": {
"base_replay_seconds": float(durations["base-replay"]),
"delta_replay_seconds": float(durations["delta-replay"]),
},
}
return case
def emit_env(case: dict) -> str:
ordered = {
"BUNDLE_ROOT": case["bundle_root"],
"RIR": case["rir"],
"TRUST_ANCHOR": case["trust_anchor"],
"RIR_ROOT": case["rir_root"],
"TAL_PATH": case["tal_path"],
"TA_PATH": case["ta_path"],
"PAYLOAD_REPLAY_ARCHIVE": case["base_archive"],
"PAYLOAD_REPLAY_LOCKS": case["base_locks"],
"ROUTINATOR_BASE_RECORD_CSV": case["base_vrps_csv"],
"PAYLOAD_BASE_ARCHIVE": case["base_archive"],
"PAYLOAD_BASE_LOCKS": case["base_locks"],
"PAYLOAD_DELTA_ARCHIVE": case["delta_archive"],
"PAYLOAD_DELTA_LOCKS": case["delta_locks"],
"ROUTINATOR_DELTA_RECORD_CSV": case["delta_record_csv"],
"SNAPSHOT_VALIDATION_TIME": case["validation_times"]["snapshot"],
"DELTA_VALIDATION_TIME": case["validation_times"]["delta"],
"ROUTINATOR_BASE_REPLAY_SECONDS": str(case["routinator_timings"]["base_replay_seconds"]),
"ROUTINATOR_DELTA_REPLAY_SECONDS": str(case["routinator_timings"]["delta_replay_seconds"]),
}
return "\n".join(
f"export {key}={shlex.quote(value)}" for key, value in ordered.items()
)
def main() -> int:
parser = argparse.ArgumentParser(description="Resolve one RIR case inside a multi-RIR replay bundle")
parser.add_argument("--bundle-root", type=Path, default=None)
parser.add_argument("--repo-root", type=Path, default=None)
parser.add_argument("--rir", required=True, choices=sorted(RIR_CONFIG))
parser.add_argument("--format", choices=["json", "env"], default="json")
args = parser.parse_args()
repo_root = (args.repo_root or default_repo_root()).resolve()
bundle_root = (args.bundle_root or default_bundle_root(repo_root)).resolve()
case = build_case(bundle_root, repo_root, args.rir)
if args.format == "env":
print(emit_env(case))
else:
print(json.dumps(case, ensure_ascii=False, indent=2))
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@ -0,0 +1,146 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
DELTA_ROOT="${DELTA_ROOT:-$ROOT_DIR/target/live/apnic_delta_demo/20260315-170223-autoplay}"
TAL_PATH="${TAL_PATH:-$ROOT_DIR/tests/fixtures/tal/apnic-rfc7730-https.tal}"
TA_PATH="${TA_PATH:-$ROOT_DIR/tests/fixtures/ta/apnic-ta.cer}"
PAYLOAD_BASE_ARCHIVE="${PAYLOAD_BASE_ARCHIVE:-$DELTA_ROOT/base-payload-archive}"
PAYLOAD_BASE_LOCKS="${PAYLOAD_BASE_LOCKS:-$DELTA_ROOT/base-locks.json}"
PAYLOAD_DELTA_ARCHIVE="${PAYLOAD_DELTA_ARCHIVE:-$DELTA_ROOT/payload-delta-archive}"
PAYLOAD_DELTA_LOCKS="${PAYLOAD_DELTA_LOCKS:-$DELTA_ROOT/locks-delta.json}"
VALIDATION_TIME="${VALIDATION_TIME:-2026-03-15T10:00:00Z}"
PAYLOAD_BASE_VALIDATION_TIME="${PAYLOAD_BASE_VALIDATION_TIME:-}"
TRUST_ANCHOR="${TRUST_ANCHOR:-apnic}"
ROUTINATOR_RECORD_CSV="${ROUTINATOR_RECORD_CSV:-$DELTA_ROOT/record-delta.csv}"
MAX_DEPTH="${MAX_DEPTH:-}"
MAX_INSTANCES="${MAX_INSTANCES:-}"
OUT_DIR="${OUT_DIR:-$ROOT_DIR/target/live/payload_delta_replay_runs}"
mkdir -p "$OUT_DIR"
TS="$(date -u +%Y%m%dT%H%M%SZ)"
RUN_NAME="${RUN_NAME:-apnic_delta_replay_${TS}}"
DB_DIR="${DB_DIR:-$OUT_DIR/${RUN_NAME}_db}"
REPORT_JSON="${REPORT_JSON:-$OUT_DIR/${RUN_NAME}_report.json}"
RUN_LOG="${RUN_LOG:-$OUT_DIR/${RUN_NAME}_run.log}"
META_JSON="${META_JSON:-$OUT_DIR/${RUN_NAME}_meta.json}"
SUMMARY_MD="${SUMMARY_MD:-$OUT_DIR/${RUN_NAME}_summary.md}"
VRPS_CSV="${VRPS_CSV:-$OUT_DIR/${RUN_NAME}_vrps.csv}"
COMPARE_SUMMARY_MD="${COMPARE_SUMMARY_MD:-$OUT_DIR/${RUN_NAME}_compare_summary.md}"
ONLY_IN_OURS_CSV="${ONLY_IN_OURS_CSV:-$OUT_DIR/${RUN_NAME}_only_in_ours.csv}"
ONLY_IN_RECORD_CSV="${ONLY_IN_RECORD_CSV:-$OUT_DIR/${RUN_NAME}_only_in_record.csv}"
cmd=(cargo run --release --bin rpki --
--db "$DB_DIR"
--tal-path "$TAL_PATH"
--ta-path "$TA_PATH"
--payload-base-archive "$PAYLOAD_BASE_ARCHIVE"
--payload-base-locks "$PAYLOAD_BASE_LOCKS"
--payload-delta-archive "$PAYLOAD_DELTA_ARCHIVE"
--payload-delta-locks "$PAYLOAD_DELTA_LOCKS"
--validation-time "$VALIDATION_TIME"
--report-json "$REPORT_JSON")
if [[ -n "$MAX_DEPTH" ]]; then
cmd+=(--max-depth "$MAX_DEPTH")
fi
if [[ -n "$MAX_INSTANCES" ]]; then
cmd+=(--max-instances "$MAX_INSTANCES")
fi
run_start_s="$(date +%s)"
(
echo "# command:"
printf '%q ' "${cmd[@]}"
echo
echo
"${cmd[@]}"
) 2>&1 | tee "$RUN_LOG" >/dev/null
run_end_s="$(date +%s)"
run_duration_s="$((run_end_s - run_start_s))"
PAYLOAD_BASE_ARCHIVE="$PAYLOAD_BASE_ARCHIVE" \
PAYLOAD_BASE_LOCKS="$PAYLOAD_BASE_LOCKS" \
PAYLOAD_DELTA_ARCHIVE="$PAYLOAD_DELTA_ARCHIVE" \
PAYLOAD_DELTA_LOCKS="$PAYLOAD_DELTA_LOCKS" \
PAYLOAD_BASE_VALIDATION_TIME="$PAYLOAD_BASE_VALIDATION_TIME" \
DB_DIR="$DB_DIR" \
REPORT_JSON="$REPORT_JSON" \
RUN_LOG="$RUN_LOG" \
VALIDATION_TIME="$VALIDATION_TIME" \
RUN_DURATION_S="$run_duration_s" \
python3 - "$REPORT_JSON" "$META_JSON" "$SUMMARY_MD" <<'PY'
import json
import os
import sys
from datetime import datetime, timezone
from pathlib import Path
report_path = Path(sys.argv[1])
meta_path = Path(sys.argv[2])
summary_path = Path(sys.argv[3])
rep = json.loads(report_path.read_text(encoding='utf-8'))
now = datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%SZ')
meta = {
'recorded_at_utc': now,
'payload_base_archive': os.environ['PAYLOAD_BASE_ARCHIVE'],
'payload_base_locks': os.environ['PAYLOAD_BASE_LOCKS'],
'payload_delta_archive': os.environ['PAYLOAD_DELTA_ARCHIVE'],
'payload_delta_locks': os.environ['PAYLOAD_DELTA_LOCKS'],
'db_dir': os.environ['DB_DIR'],
'report_json': os.environ['REPORT_JSON'],
'run_log': os.environ['RUN_LOG'],
'validation_time_arg': os.environ['VALIDATION_TIME'],
'base_validation_time_arg': os.environ.get('PAYLOAD_BASE_VALIDATION_TIME') or os.environ['VALIDATION_TIME'],
'durations_secs': {'rpki_run': int(os.environ['RUN_DURATION_S'])},
'counts': {
'publication_points_processed': rep['tree']['instances_processed'],
'publication_points_failed': rep['tree']['instances_failed'],
'vrps': len(rep['vrps']),
'aspas': len(rep['aspas']),
'audit_publication_points': len(rep['publication_points']),
},
}
meta_path.write_text(json.dumps(meta, ensure_ascii=False, indent=2)+'\n', encoding='utf-8')
summary = []
summary.append('# Payload Delta Replay Summary\n\n')
for key in ['payload_base_archive','payload_base_locks','payload_delta_archive','payload_delta_locks','db_dir','report_json','base_validation_time_arg','validation_time_arg']:
summary.append(f'- {key}: `{meta[key]}`\n')
summary.append('\n## Results\n\n| metric | value |\n|---|---:|\n')
for k,v in meta['counts'].items():
summary.append(f'| {k} | {v} |\n')
summary.append('\n## Durations\n\n| step | seconds |\n|---|---:|\n')
for k,v in meta['durations_secs'].items():
summary.append(f'| {k} | {v} |\n')
summary_path.write_text(''.join(summary), encoding='utf-8')
print(summary_path)
PY
python3 scripts/payload_replay/report_to_routinator_csv.py \
--report "$REPORT_JSON" \
--out "$VRPS_CSV" \
--trust-anchor "$TRUST_ANCHOR" >/dev/null
if [[ -f "$ROUTINATOR_RECORD_CSV" ]]; then
./scripts/payload_replay/compare_with_routinator_record.sh \
"$VRPS_CSV" \
"$ROUTINATOR_RECORD_CSV" \
"$COMPARE_SUMMARY_MD" \
"$ONLY_IN_OURS_CSV" \
"$ONLY_IN_RECORD_CSV" >/dev/null
fi
echo "== payload delta replay run complete ==" >&2
echo "- db: $DB_DIR" >&2
echo "- report: $REPORT_JSON" >&2
echo "- run log: $RUN_LOG" >&2
echo "- meta json: $META_JSON" >&2
echo "- summary md: $SUMMARY_MD" >&2
echo "- vrps csv: $VRPS_CSV" >&2
if [[ -f "$COMPARE_SUMMARY_MD" ]]; then
echo "- compare summary: $COMPARE_SUMMARY_MD" >&2
echo "- only in ours: $ONLY_IN_OURS_CSV" >&2
echo "- only in record: $ONLY_IN_RECORD_CSV" >&2
fi

View File

@ -0,0 +1,128 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
if [[ $# -lt 1 || $# -gt 2 ]]; then
echo "usage: $0 <rir> [describe|snapshot|delta|both]" >&2
exit 2
fi
RIR="$1"
MODE="${2:-both}"
BUNDLE_ROOT="${BUNDLE_ROOT:-$ROOT_DIR/../../rpki/target/live/20260316-112341-multi-final3}"
CASE_INFO_SCRIPT="$ROOT_DIR/scripts/payload_replay/multi_rir_case_info.py"
CASE_REPORT_SCRIPT="$ROOT_DIR/scripts/payload_replay/write_multi_rir_case_report.py"
MULTI_RIR_OUT_DIR="${MULTI_RIR_OUT_DIR:-$ROOT_DIR/target/live/multi_rir_replay_runs/$RIR}"
mkdir -p "$MULTI_RIR_OUT_DIR"
eval "$(python3 "$CASE_INFO_SCRIPT" --bundle-root "$BUNDLE_ROOT" --rir "$RIR" --format env)"
SNAPSHOT_DB_DIR="${SNAPSHOT_DB_DIR:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_replay_db}"
SNAPSHOT_REPORT_MD="${SNAPSHOT_REPORT_MD:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_compare_summary.md}"
SNAPSHOT_META_JSON="${SNAPSHOT_META_JSON:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_meta.json}"
SNAPSHOT_RUN_LOG="${SNAPSHOT_RUN_LOG:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_run.log}"
SNAPSHOT_REPORT_JSON="${SNAPSHOT_REPORT_JSON:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_report.json}"
SNAPSHOT_VRPS_CSV="${SNAPSHOT_VRPS_CSV:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_vrps.csv}"
SNAPSHOT_ONLY_OURS="${SNAPSHOT_ONLY_OURS:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_only_in_ours.csv}"
SNAPSHOT_ONLY_RECORD="${SNAPSHOT_ONLY_RECORD:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_only_in_record.csv}"
DELTA_DB_DIR="${DELTA_DB_DIR:-$MULTI_RIR_OUT_DIR/${RIR}_delta_replay_db}"
DELTA_REPORT_MD="${DELTA_REPORT_MD:-$MULTI_RIR_OUT_DIR/${RIR}_delta_compare_summary.md}"
DELTA_META_JSON="${DELTA_META_JSON:-$MULTI_RIR_OUT_DIR/${RIR}_delta_meta.json}"
DELTA_RUN_LOG="${DELTA_RUN_LOG:-$MULTI_RIR_OUT_DIR/${RIR}_delta_run.log}"
DELTA_REPORT_JSON="${DELTA_REPORT_JSON:-$MULTI_RIR_OUT_DIR/${RIR}_delta_report.json}"
DELTA_VRPS_CSV="${DELTA_VRPS_CSV:-$MULTI_RIR_OUT_DIR/${RIR}_delta_vrps.csv}"
DELTA_ONLY_OURS="${DELTA_ONLY_OURS:-$MULTI_RIR_OUT_DIR/${RIR}_delta_only_in_ours.csv}"
DELTA_ONLY_RECORD="${DELTA_ONLY_RECORD:-$MULTI_RIR_OUT_DIR/${RIR}_delta_only_in_record.csv}"
CASE_REPORT_JSON="${CASE_REPORT_JSON:-$MULTI_RIR_OUT_DIR/${RIR}_case_report.json}"
CASE_REPORT_MD="${CASE_REPORT_MD:-$MULTI_RIR_OUT_DIR/${RIR}_case_report.md}"
case "$MODE" in
describe)
python3 "$CASE_INFO_SCRIPT" --bundle-root "$BUNDLE_ROOT" --rir "$RIR"
;;
snapshot)
rm -rf "$SNAPSHOT_DB_DIR"
ROUTINATOR_RECORD_CSV="$ROUTINATOR_BASE_RECORD_CSV" \
VALIDATION_TIME="$SNAPSHOT_VALIDATION_TIME" \
OUT_DIR="$MULTI_RIR_OUT_DIR" \
DB_DIR="$SNAPSHOT_DB_DIR" \
RUN_NAME="${RUN_NAME:-${RIR}_snapshot_replay}" \
META_JSON="$SNAPSHOT_META_JSON" \
RUN_LOG="$SNAPSHOT_RUN_LOG" \
REPORT_JSON="$SNAPSHOT_REPORT_JSON" \
VRPS_CSV="$SNAPSHOT_VRPS_CSV" \
COMPARE_SUMMARY_MD="$SNAPSHOT_REPORT_MD" \
ONLY_IN_OURS_CSV="$SNAPSHOT_ONLY_OURS" \
ONLY_IN_RECORD_CSV="$SNAPSHOT_ONLY_RECORD" \
./scripts/payload_replay/run_apnic_replay.sh
;;
delta)
rm -rf "$DELTA_DB_DIR"
ROUTINATOR_RECORD_CSV="$ROUTINATOR_DELTA_RECORD_CSV" \
VALIDATION_TIME="$DELTA_VALIDATION_TIME" \
PAYLOAD_BASE_VALIDATION_TIME="$SNAPSHOT_VALIDATION_TIME" \
OUT_DIR="$MULTI_RIR_OUT_DIR" \
DB_DIR="$DELTA_DB_DIR" \
RUN_NAME="${RUN_NAME:-${RIR}_delta_replay}" \
DELTA_ROOT="$RIR_ROOT" \
META_JSON="$DELTA_META_JSON" \
RUN_LOG="$DELTA_RUN_LOG" \
REPORT_JSON="$DELTA_REPORT_JSON" \
VRPS_CSV="$DELTA_VRPS_CSV" \
COMPARE_SUMMARY_MD="$DELTA_REPORT_MD" \
ONLY_IN_OURS_CSV="$DELTA_ONLY_OURS" \
ONLY_IN_RECORD_CSV="$DELTA_ONLY_RECORD" \
./scripts/payload_replay/run_apnic_delta_replay.sh
;;
both)
rm -rf "$SNAPSHOT_DB_DIR" "$DELTA_DB_DIR"
ROUTINATOR_RECORD_CSV="$ROUTINATOR_BASE_RECORD_CSV" \
VALIDATION_TIME="$SNAPSHOT_VALIDATION_TIME" \
OUT_DIR="$MULTI_RIR_OUT_DIR" \
DB_DIR="$SNAPSHOT_DB_DIR" \
RUN_NAME="${RUN_NAME_SNAPSHOT:-${RIR}_snapshot_replay}" \
META_JSON="$SNAPSHOT_META_JSON" \
RUN_LOG="$SNAPSHOT_RUN_LOG" \
REPORT_JSON="$SNAPSHOT_REPORT_JSON" \
VRPS_CSV="$SNAPSHOT_VRPS_CSV" \
COMPARE_SUMMARY_MD="$SNAPSHOT_REPORT_MD" \
ONLY_IN_OURS_CSV="$SNAPSHOT_ONLY_OURS" \
ONLY_IN_RECORD_CSV="$SNAPSHOT_ONLY_RECORD" \
./scripts/payload_replay/run_apnic_replay.sh
ROUTINATOR_RECORD_CSV="$ROUTINATOR_DELTA_RECORD_CSV" \
VALIDATION_TIME="$DELTA_VALIDATION_TIME" \
PAYLOAD_BASE_VALIDATION_TIME="$SNAPSHOT_VALIDATION_TIME" \
OUT_DIR="$MULTI_RIR_OUT_DIR" \
DB_DIR="$DELTA_DB_DIR" \
RUN_NAME="${RUN_NAME_DELTA:-${RIR}_delta_replay}" \
DELTA_ROOT="$RIR_ROOT" \
META_JSON="$DELTA_META_JSON" \
RUN_LOG="$DELTA_RUN_LOG" \
REPORT_JSON="$DELTA_REPORT_JSON" \
VRPS_CSV="$DELTA_VRPS_CSV" \
COMPARE_SUMMARY_MD="$DELTA_REPORT_MD" \
ONLY_IN_OURS_CSV="$DELTA_ONLY_OURS" \
ONLY_IN_RECORD_CSV="$DELTA_ONLY_RECORD" \
./scripts/payload_replay/run_apnic_delta_replay.sh
python3 "$CASE_REPORT_SCRIPT" \
--rir "$RIR" \
--snapshot-meta "$SNAPSHOT_META_JSON" \
--snapshot-compare "$SNAPSHOT_REPORT_MD" \
--delta-meta "$DELTA_META_JSON" \
--delta-compare "$DELTA_REPORT_MD" \
--routinator-base-seconds "$ROUTINATOR_BASE_REPLAY_SECONDS" \
--routinator-delta-seconds "$ROUTINATOR_DELTA_REPLAY_SECONDS" \
--out-md "$CASE_REPORT_MD" \
--out-json "$CASE_REPORT_JSON" >/dev/null
echo "- case report: $CASE_REPORT_MD" >&2
echo "- case report json: $CASE_REPORT_JSON" >&2
;;
*)
echo "unsupported mode: $MODE; expected describe|snapshot|delta|both" >&2
exit 2
;;
esac

View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
BUNDLE_ROOT="${BUNDLE_ROOT:-$ROOT_DIR/../../rpki/target/live/20260316-112341-multi-final3}"
SUITE_OUT_DIR="${SUITE_OUT_DIR:-$ROOT_DIR/target/live/multi_rir_replay_runs}"
RIRS="${RIRS:-afrinic apnic arin lacnic ripe}"
CASE_SCRIPT="$ROOT_DIR/scripts/payload_replay/run_multi_rir_replay_case.sh"
SUMMARY_SCRIPT="$ROOT_DIR/scripts/payload_replay/write_multi_rir_summary.py"
mkdir -p "$SUITE_OUT_DIR"
for rir in $RIRS; do
MULTI_RIR_OUT_DIR="$SUITE_OUT_DIR/$rir" \
BUNDLE_ROOT="$BUNDLE_ROOT" \
"$CASE_SCRIPT" "$rir" both
echo "completed $rir" >&2
echo >&2
done
python3 "$SUMMARY_SCRIPT" \
--case-root "$SUITE_OUT_DIR" \
--out-md "$SUITE_OUT_DIR/multi_rir_summary.md" \
--out-json "$SUITE_OUT_DIR/multi_rir_summary.json" \
--rirs $RIRS >/dev/null
echo "== multi-RIR replay suite complete ==" >&2
echo "- suite_out_dir: $SUITE_OUT_DIR" >&2
echo "- summary_md: $SUITE_OUT_DIR/multi_rir_summary.md" >&2
echo "- summary_json: $SUITE_OUT_DIR/multi_rir_summary.json" >&2

View File

@ -0,0 +1,133 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import json
from pathlib import Path
def parse_args() -> argparse.Namespace:
p = argparse.ArgumentParser(description="Generate one multi-RIR replay case report")
p.add_argument("--rir", required=True)
p.add_argument("--snapshot-meta", required=True)
p.add_argument("--snapshot-compare", required=True)
p.add_argument("--delta-meta", required=True)
p.add_argument("--delta-compare", required=True)
p.add_argument("--routinator-base-seconds", required=True, type=float)
p.add_argument("--routinator-delta-seconds", required=True, type=float)
p.add_argument("--out-md", required=True)
p.add_argument("--out-json", required=True)
return p.parse_args()
def read_json(path: str) -> dict:
return json.loads(Path(path).read_text(encoding="utf-8"))
def parse_compare_md(path: str) -> dict:
lines = Path(path).read_text(encoding="utf-8").splitlines()
out = {}
for line in lines:
if not line.startswith("| "):
continue
parts = [p.strip() for p in line.strip("|").split("|")]
if len(parts) != 2:
continue
key, value = parts
if key in {"metric", "---"}:
continue
try:
out[key] = int(value)
except ValueError:
pass
return out
def ratio(ours: float, baseline: float) -> float | None:
if baseline <= 0:
return None
return ours / baseline
def build_report(args: argparse.Namespace) -> dict:
snapshot_meta = read_json(args.snapshot_meta)
delta_meta = read_json(args.delta_meta)
snapshot_compare = parse_compare_md(args.snapshot_compare)
delta_compare = parse_compare_md(args.delta_compare)
snapshot_ours = float(snapshot_meta["durations_secs"]["rpki_run"])
delta_ours = float(delta_meta["durations_secs"]["rpki_run"])
report = {
"rir": args.rir,
"snapshot": {
"meta_json": str(Path(args.snapshot_meta).resolve()),
"compare_md": str(Path(args.snapshot_compare).resolve()),
"ours_seconds": snapshot_ours,
"routinator_seconds": args.routinator_base_seconds,
"ratio": ratio(snapshot_ours, args.routinator_base_seconds),
"compare": snapshot_compare,
"match": snapshot_compare.get("only_in_ours", -1) == 0
and snapshot_compare.get("only_in_record", -1) == 0,
"counts": snapshot_meta.get("counts", {}),
},
"delta": {
"meta_json": str(Path(args.delta_meta).resolve()),
"compare_md": str(Path(args.delta_compare).resolve()),
"ours_seconds": delta_ours,
"routinator_seconds": args.routinator_delta_seconds,
"ratio": ratio(delta_ours, args.routinator_delta_seconds),
"compare": delta_compare,
"match": delta_compare.get("only_in_ours", -1) == 0
and delta_compare.get("only_in_record", -1) == 0,
"counts": delta_meta.get("counts", {}),
},
}
return report
def write_md(path: Path, report: dict) -> None:
snapshot = report["snapshot"]
delta = report["delta"]
lines = []
lines.append(f"# {report['rir'].upper()} Replay Report\n\n")
lines.append("## Summary\n\n")
lines.append("| mode | match | ours_s | routinator_s | ratio | only_in_ours | only_in_record |\n")
lines.append("|---|---|---:|---:|---:|---:|---:|\n")
lines.append(
f"| snapshot | {str(snapshot['match']).lower()} | {snapshot['ours_seconds']:.3f} | {snapshot['routinator_seconds']:.3f} | {snapshot['ratio']:.3f} | {snapshot['compare'].get('only_in_ours', 0)} | {snapshot['compare'].get('only_in_record', 0)} |\n"
)
lines.append(
f"| delta | {str(delta['match']).lower()} | {delta['ours_seconds']:.3f} | {delta['routinator_seconds']:.3f} | {delta['ratio']:.3f} | {delta['compare'].get('only_in_ours', 0)} | {delta['compare'].get('only_in_record', 0)} |\n"
)
lines.append("\n## Snapshot Inputs\n\n")
lines.append(f"- meta_json: `{snapshot['meta_json']}`\n")
lines.append(f"- compare_md: `{snapshot['compare_md']}`\n")
lines.append("\n## Delta Inputs\n\n")
lines.append(f"- meta_json: `{delta['meta_json']}`\n")
lines.append(f"- compare_md: `{delta['compare_md']}`\n")
lines.append("\n## Counts\n\n")
lines.append("### Snapshot\n\n")
for k, v in sorted(snapshot.get("counts", {}).items()):
lines.append(f"- {k}: `{v}`\n")
lines.append("\n### Delta\n\n")
for k, v in sorted(delta.get("counts", {}).items()):
lines.append(f"- {k}: `{v}`\n")
path.write_text("".join(lines), encoding="utf-8")
def main() -> int:
args = parse_args()
report = build_report(args)
out_json = Path(args.out_json)
out_md = Path(args.out_md)
out_json.parent.mkdir(parents=True, exist_ok=True)
out_md.parent.mkdir(parents=True, exist_ok=True)
out_json.write_text(json.dumps(report, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
write_md(out_md, report)
print(out_md)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@ -0,0 +1,87 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import json
import math
from pathlib import Path
DEFAULT_RIRS = ["afrinic", "apnic", "arin", "lacnic", "ripe"]
def parse_args() -> argparse.Namespace:
p = argparse.ArgumentParser(description="Aggregate per-RIR replay case reports")
p.add_argument("--case-root", required=True, help="directory containing <rir>/<rir>_case_report.json")
p.add_argument("--out-md", required=True)
p.add_argument("--out-json", required=True)
p.add_argument("--rirs", nargs="*", default=None, help="RIRs to include (default: all 5)")
return p.parse_args()
def read_case(case_root: Path, rir: str) -> dict:
path = case_root / rir / f"{rir}_case_report.json"
return json.loads(path.read_text(encoding="utf-8"))
def geomean(values: list[float]) -> float:
vals = [v for v in values if v > 0]
if not vals:
return 0.0
return math.exp(sum(math.log(v) for v in vals) / len(vals))
def build_summary(cases: list[dict]) -> dict:
snapshot_ratios = [c["snapshot"]["ratio"] for c in cases]
delta_ratios = [c["delta"]["ratio"] for c in cases]
return {
"cases": cases,
"summary": {
"snapshot_all_match": all(c["snapshot"]["match"] for c in cases),
"delta_all_match": all(c["delta"]["match"] for c in cases),
"snapshot_ratio_geomean": geomean(snapshot_ratios),
"delta_ratio_geomean": geomean(delta_ratios),
"all_ratio_geomean": geomean(snapshot_ratios + delta_ratios),
},
}
def write_md(path: Path, data: dict) -> None:
lines = []
lines.append("# Multi-RIR Replay Summary\n\n")
lines.append("## Correctness + Timing\n\n")
lines.append("| RIR | snapshot_match | snapshot_ours_s | snapshot_routinator_s | snapshot_ratio | delta_match | delta_ours_s | delta_routinator_s | delta_ratio |\n")
lines.append("|---|---|---:|---:|---:|---|---:|---:|---:|\n")
for case in data["cases"]:
lines.append(
f"| {case['rir']} | {str(case['snapshot']['match']).lower()} | {case['snapshot']['ours_seconds']:.3f} | {case['snapshot']['routinator_seconds']:.3f} | {case['snapshot']['ratio']:.3f} | {str(case['delta']['match']).lower()} | {case['delta']['ours_seconds']:.3f} | {case['delta']['routinator_seconds']:.3f} | {case['delta']['ratio']:.3f} |\n"
)
s = data["summary"]
lines.append("\n## Aggregate Metrics\n\n")
lines.append("| metric | value |\n")
lines.append("|---|---:|\n")
lines.append(f"| snapshot_all_match | {str(s['snapshot_all_match']).lower()} |\n")
lines.append(f"| delta_all_match | {str(s['delta_all_match']).lower()} |\n")
lines.append(f"| snapshot_ratio_geomean | {s['snapshot_ratio_geomean']:.3f} |\n")
lines.append(f"| delta_ratio_geomean | {s['delta_ratio_geomean']:.3f} |\n")
lines.append(f"| all_ratio_geomean | {s['all_ratio_geomean']:.3f} |\n")
path.write_text("".join(lines), encoding="utf-8")
def main() -> int:
args = parse_args()
case_root = Path(args.case_root)
rirs = args.rirs or DEFAULT_RIRS
cases = [read_case(case_root, rir) for rir in rirs]
data = build_summary(cases)
out_md = Path(args.out_md)
out_json = Path(args.out_json)
out_md.parent.mkdir(parents=True, exist_ok=True)
out_json.parent.mkdir(parents=True, exist_ok=True)
out_json.write_text(json.dumps(data, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
write_md(out_md, data)
print(out_md)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@ -11,7 +11,9 @@ use crate::fetch::rsync_system::{SystemRsyncConfig, SystemRsyncFetcher};
use crate::policy::Policy;
use crate::storage::RocksStore;
use crate::validation::run_tree_from_tal::{
RunTreeFromTalAuditOutput, run_tree_from_tal_and_ta_der_payload_replay_serial_audit,
RunTreeFromTalAuditOutput, run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit,
run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit_with_timing,
run_tree_from_tal_and_ta_der_payload_replay_serial_audit,
run_tree_from_tal_and_ta_der_payload_replay_serial_audit_with_timing,
run_tree_from_tal_and_ta_der_serial_audit,
run_tree_from_tal_and_ta_der_serial_audit_with_timing, run_tree_from_tal_url_serial_audit,
@ -30,6 +32,11 @@ pub struct CliArgs {
pub report_json_path: Option<PathBuf>,
pub payload_replay_archive: Option<PathBuf>,
pub payload_replay_locks: Option<PathBuf>,
pub payload_base_archive: Option<PathBuf>,
pub payload_base_locks: Option<PathBuf>,
pub payload_base_validation_time: Option<time::OffsetDateTime>,
pub payload_delta_archive: Option<PathBuf>,
pub payload_delta_locks: Option<PathBuf>,
pub rsync_local_dir: Option<PathBuf>,
@ -59,6 +66,11 @@ Options:
--report-json <path> Write full audit report as JSON (optional)
--payload-replay-archive <path> Use local payload replay archive root (offline replay mode)
--payload-replay-locks <path> Use local payload replay locks.json (offline replay mode)
--payload-base-archive <path> Use local base payload archive root (offline delta replay)
--payload-base-locks <path> Use local base locks.json (offline delta replay)
--payload-base-validation-time <rfc3339> Validation time for the base bootstrap inside offline delta replay
--payload-delta-archive <path> Use local delta payload archive root (offline delta replay)
--payload-delta-locks <path> Use local locks-delta.json (offline delta replay)
--tal-url <url> TAL URL (downloads TAL + TA over HTTPS)
--tal-path <path> TAL file path (offline-friendly; requires --ta-path)
@ -89,6 +101,11 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
let mut report_json_path: Option<PathBuf> = None;
let mut payload_replay_archive: Option<PathBuf> = None;
let mut payload_replay_locks: Option<PathBuf> = None;
let mut payload_base_archive: Option<PathBuf> = None;
let mut payload_base_locks: Option<PathBuf> = None;
let mut payload_base_validation_time: Option<time::OffsetDateTime> = None;
let mut payload_delta_archive: Option<PathBuf> = None;
let mut payload_delta_locks: Option<PathBuf> = None;
let mut rsync_local_dir: Option<PathBuf> = None;
let mut http_timeout_secs: u64 = 20;
@ -149,6 +166,41 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
.ok_or("--payload-replay-locks requires a value")?;
payload_replay_locks = Some(PathBuf::from(v));
}
"--payload-base-archive" => {
i += 1;
let v = argv
.get(i)
.ok_or("--payload-base-archive requires a value")?;
payload_base_archive = Some(PathBuf::from(v));
}
"--payload-base-locks" => {
i += 1;
let v = argv.get(i).ok_or("--payload-base-locks requires a value")?;
payload_base_locks = Some(PathBuf::from(v));
}
"--payload-base-validation-time" => {
i += 1;
let v = argv.get(i).ok_or("--payload-base-validation-time requires a value")?;
use time::format_description::well_known::Rfc3339;
let t = time::OffsetDateTime::parse(v, &Rfc3339).map_err(|e| {
format!("invalid --payload-base-validation-time (RFC3339 expected): {e}")
})?;
payload_base_validation_time = Some(t);
}
"--payload-delta-archive" => {
i += 1;
let v = argv
.get(i)
.ok_or("--payload-delta-archive requires a value")?;
payload_delta_archive = Some(PathBuf::from(v));
}
"--payload-delta-locks" => {
i += 1;
let v = argv
.get(i)
.ok_or("--payload-delta-locks requires a value")?;
payload_delta_locks = Some(PathBuf::from(v));
}
"--rsync-local-dir" => {
i += 1;
let v = argv.get(i).ok_or("--rsync-local-dir requires a value")?;
@ -230,6 +282,27 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
return Err(format!(
"--payload-replay-archive and --payload-replay-locks must be provided together
{}",
usage()
));
}
let delta_mode_count = payload_base_archive.is_some() as u8
+ payload_base_locks.is_some() as u8
+ payload_delta_archive.is_some() as u8
+ payload_delta_locks.is_some() as u8;
if delta_mode_count > 0 && delta_mode_count < 4 {
return Err(format!(
"--payload-base-archive, --payload-base-locks, --payload-delta-archive and --payload-delta-locks must be provided together
{}",
usage()
));
}
if replay_mode_count == 2 && delta_mode_count == 4 {
return Err(format!(
"snapshot replay mode and delta replay mode are mutually exclusive
{}",
usage()
));
@ -255,6 +328,32 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
return Err(format!(
"payload replay mode cannot be combined with --rsync-local-dir
{}",
usage()
));
}
}
if delta_mode_count == 4 {
if tal_url.is_some() {
return Err(format!(
"payload delta replay mode requires --tal-path and --ta-path; --tal-url is not supported
{}",
usage()
));
}
if tal_path.is_none() || ta_path.is_none() {
return Err(format!(
"payload delta replay mode requires --tal-path and --ta-path
{}",
usage()
));
}
if rsync_local_dir.is_some() {
return Err(format!(
"payload delta replay mode cannot be combined with --rsync-local-dir
{}",
usage()
));
@ -270,6 +369,11 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
report_json_path,
payload_replay_archive,
payload_replay_locks,
payload_base_archive,
payload_base_locks,
payload_base_validation_time,
payload_delta_archive,
payload_delta_locks,
rsync_local_dir,
http_timeout_secs,
rsync_timeout_secs,
@ -406,6 +510,7 @@ pub fn run(argv: &[String]) -> Result<(), String> {
max_instances: args.max_instances,
};
let replay_mode = args.payload_replay_archive.is_some();
let delta_replay_mode = args.payload_base_archive.is_some();
use time::format_description::well_known::Rfc3339;
let mut timing: Option<(std::path::PathBuf, TimingHandle)> = None;
@ -467,7 +572,71 @@ pub fn run(argv: &[String]) -> Result<(), String> {
None
};
let out = if replay_mode {
let out = if delta_replay_mode {
let tal_path = args
.tal_path
.as_ref()
.expect("validated by parse_args for delta replay mode");
let ta_path = args
.ta_path
.as_ref()
.expect("validated by parse_args for delta replay mode");
let base_archive = args
.payload_base_archive
.as_ref()
.expect("validated by parse_args for delta replay mode");
let base_locks = args
.payload_base_locks
.as_ref()
.expect("validated by parse_args for delta replay mode");
let base_validation_time = args.payload_base_validation_time.unwrap_or(validation_time);
let delta_archive = args
.payload_delta_archive
.as_ref()
.expect("validated by parse_args for delta replay mode");
let delta_locks = args
.payload_delta_locks
.as_ref()
.expect("validated by parse_args for delta replay mode");
let tal_bytes = std::fs::read(tal_path)
.map_err(|e| format!("read tal failed: {}: {e}", tal_path.display()))?;
let ta_der = std::fs::read(ta_path)
.map_err(|e| format!("read ta failed: {}: {e}", ta_path.display()))?;
if let Some((_, t)) = timing.as_ref() {
run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit_with_timing(
&store,
&policy,
&tal_bytes,
&ta_der,
None,
base_archive,
base_locks,
delta_archive,
delta_locks,
base_validation_time,
validation_time,
&config,
t,
)
.map_err(|e| e.to_string())?
} else {
run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit(
&store,
&policy,
&tal_bytes,
&ta_der,
None,
base_archive,
base_locks,
delta_archive,
delta_locks,
base_validation_time,
validation_time,
&config,
)
.map_err(|e| e.to_string())?
}
} else if replay_mode {
let tal_path = args
.tal_path
.as_ref()
@ -896,6 +1065,131 @@ mod tests {
assert_eq!(args.max_depth, Some(0));
}
#[test]
fn parse_accepts_payload_delta_replay_mode_with_offline_tal_and_ta() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-path".to_string(),
"a.tal".to_string(),
"--ta-path".to_string(),
"ta.cer".to_string(),
"--payload-base-archive".to_string(),
"base-archive".to_string(),
"--payload-base-locks".to_string(),
"base-locks.json".to_string(),
"--payload-delta-archive".to_string(),
"delta-archive".to_string(),
"--payload-delta-locks".to_string(),
"delta-locks.json".to_string(),
];
let args = parse_args(&argv).expect("parse delta replay mode");
assert_eq!(
args.payload_base_archive.as_deref(),
Some(Path::new("base-archive"))
);
assert_eq!(
args.payload_base_locks.as_deref(),
Some(Path::new("base-locks.json"))
);
assert_eq!(
args.payload_delta_archive.as_deref(),
Some(Path::new("delta-archive"))
);
assert_eq!(
args.payload_delta_locks.as_deref(),
Some(Path::new("delta-locks.json"))
);
}
#[test]
fn parse_rejects_partial_payload_delta_arguments_and_mutual_exclusion() {
let argv_partial = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-path".to_string(),
"a.tal".to_string(),
"--ta-path".to_string(),
"ta.cer".to_string(),
"--payload-base-archive".to_string(),
"base-archive".to_string(),
];
let err = parse_args(&argv_partial).unwrap_err();
assert!(err.contains("must be provided together"), "{err}");
let argv_both = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-path".to_string(),
"a.tal".to_string(),
"--ta-path".to_string(),
"ta.cer".to_string(),
"--payload-replay-archive".to_string(),
"archive".to_string(),
"--payload-replay-locks".to_string(),
"locks.json".to_string(),
"--payload-base-archive".to_string(),
"base-archive".to_string(),
"--payload-base-locks".to_string(),
"base-locks.json".to_string(),
"--payload-delta-archive".to_string(),
"delta-archive".to_string(),
"--payload-delta-locks".to_string(),
"delta-locks.json".to_string(),
];
let err = parse_args(&argv_both).unwrap_err();
assert!(err.contains("mutually exclusive"), "{err}");
}
#[test]
fn parse_rejects_payload_delta_with_tal_url_or_rsync_local_dir() {
let argv_url = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-url".to_string(),
"https://example.test/x.tal".to_string(),
"--payload-base-archive".to_string(),
"base-archive".to_string(),
"--payload-base-locks".to_string(),
"base-locks.json".to_string(),
"--payload-delta-archive".to_string(),
"delta-archive".to_string(),
"--payload-delta-locks".to_string(),
"delta-locks.json".to_string(),
];
let err = parse_args(&argv_url).unwrap_err();
assert!(err.contains("--tal-url is not supported"), "{err}");
let argv_rsync = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-path".to_string(),
"a.tal".to_string(),
"--ta-path".to_string(),
"ta.cer".to_string(),
"--payload-base-archive".to_string(),
"base-archive".to_string(),
"--payload-base-locks".to_string(),
"base-locks.json".to_string(),
"--payload-delta-archive".to_string(),
"delta-archive".to_string(),
"--payload-delta-locks".to_string(),
"delta-locks.json".to_string(),
"--rsync-local-dir".to_string(),
"repo".to_string(),
];
let err = parse_args(&argv_rsync).unwrap_err();
assert!(
err.contains("payload delta replay mode cannot be combined with --rsync-local-dir"),
"{err}"
);
}
#[test]
fn parse_accepts_payload_replay_mode_with_offline_tal_and_ta() {
let argv = vec![

View File

@ -1,4 +1,4 @@
use crate::data_model::common::{Asn1TimeEncoding, Asn1TimeUtc, DerReader, der_take_tlv};
use crate::data_model::common::{Asn1TimeEncoding, Asn1TimeUtc, DerReader, der_uint_from_bytes};
use crate::data_model::oid::{
OID_AD_SIGNED_OBJECT, OID_CMS_ATTR_CONTENT_TYPE, OID_CMS_ATTR_CONTENT_TYPE_RAW,
OID_CMS_ATTR_MESSAGE_DIGEST, OID_CMS_ATTR_MESSAGE_DIGEST_RAW, OID_CMS_ATTR_SIGNING_TIME,
@ -9,6 +9,7 @@ use crate::data_model::oid::{
OID_SIGNED_DATA_RAW, OID_SUBJECT_INFO_ACCESS,
};
use crate::data_model::rc::{ResourceCertificate, SubjectInfoAccess};
use asn1_rs::{Any, Class, FromBer, Header, Tag};
use ring::digest;
use x509_parser::prelude::FromDer;
use x509_parser::public_key::PublicKey;
@ -305,27 +306,7 @@ impl RpkiSignedObject {
/// This performs encoding/structure parsing only. Profile constraints are enforced by
/// `RpkiSignedObjectParsed::validate_profile`.
pub fn parse_der(der: &[u8]) -> Result<RpkiSignedObjectParsed, SignedObjectParseError> {
let mut r = DerReader::new(der);
let mut content_info_seq = r
.take_sequence()
.map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
if !r.is_empty() {
return Err(SignedObjectParseError::TrailingBytes(r.remaining_len()));
}
let content_type = take_oid_string(&mut content_info_seq)?;
let signed_data = parse_signed_data_from_contentinfo_cursor(&mut content_info_seq)?;
if !content_info_seq.is_empty() {
return Err(SignedObjectParseError::Parse(
"ContentInfo must be a SEQUENCE of 2 elements".into(),
));
}
Ok(RpkiSignedObjectParsed {
raw_der: der.to_vec(),
content_info_content_type: content_type,
signed_data,
})
parse_signed_object_content_info(der, der)
}
/// Decode a DER-encoded RPKI Signed Object (CMS ContentInfo wrapping SignedData) and enforce
@ -380,7 +361,6 @@ impl RpkiSignedObject {
};
let signer = &self.signed_data.signer_infos[0];
// The message to be verified is the DER encoding of SignedAttributes (SET OF Attribute).
let msg = &signer.signed_attrs_der_for_signature;
let pk = ring::signature::RsaPublicKeyComponents { n, e };
@ -393,6 +373,159 @@ impl RpkiSignedObject {
}
}
struct CmsReader<'a> {
buf: &'a [u8],
}
impl<'a> CmsReader<'a> {
fn new(buf: &'a [u8]) -> Self {
Self { buf }
}
fn is_empty(&self) -> bool {
self.buf.is_empty()
}
fn remaining_len(&self) -> usize {
self.buf.len()
}
fn peek_tag(&self) -> Result<u8, String> {
let (_rem, any) = Any::from_ber(self.buf).map_err(|e| format!("BER parse error: {e}"))?;
header_to_single_byte_tag(&any.header)
}
fn take_any(&mut self) -> Result<(u8, &'a [u8]), String> {
let (rem, any) = Any::from_ber(self.buf).map_err(|e| format!("BER parse error: {e}"))?;
let tag = header_to_single_byte_tag(&any.header)?;
self.buf = rem;
Ok((tag, any.data))
}
fn take_any_full(&mut self) -> Result<(u8, &'a [u8], &'a [u8]), String> {
let (rem, any) = Any::from_ber(self.buf).map_err(|e| format!("BER parse error: {e}"))?;
let consumed = self.buf.len() - rem.len();
let full = &self.buf[..consumed];
let tag = header_to_single_byte_tag(&any.header)?;
self.buf = rem;
Ok((tag, full, any.data))
}
fn skip_any(&mut self) -> Result<(), String> {
let _ = self.take_any()?;
Ok(())
}
fn take_tag(&mut self, expected_tag: u8) -> Result<&'a [u8], String> {
let (tag, value) = self.take_any()?;
if tag != expected_tag {
return Err(format!(
"unexpected tag: got 0x{tag:02X}, expected 0x{expected_tag:02X}"
));
}
Ok(value)
}
fn take_sequence(&mut self) -> Result<CmsReader<'a>, String> {
let value = self.take_tag(0x30)?;
Ok(CmsReader::new(value))
}
fn take_octet_string(&mut self) -> Result<Vec<u8>, String> {
let (rem, any) = Any::from_ber(self.buf).map_err(|e| format!("BER parse error: {e}"))?;
let tag = header_to_single_byte_tag(&any.header)?;
if tag != 0x04 && tag != 0x24 {
return Err(format!(
"unexpected tag: got 0x{tag:02X}, expected 0x04"
));
}
let octets = flatten_octet_string(any)?;
self.buf = rem;
Ok(octets)
}
fn take_uint_u64(&mut self) -> Result<u64, String> {
let value = self.take_tag(0x02)?;
der_uint_from_bytes(value)
}
fn take_explicit(&mut self, expected_outer_tag: u8) -> Result<(u8, &'a [u8]), String> {
let inner_der = self.take_tag(expected_outer_tag)?;
let (tag, value, rem) = cms_take_tlv(inner_der)?;
if !rem.is_empty() {
return Err("trailing bytes inside EXPLICIT value".into());
}
Ok((tag, value))
}
fn take_explicit_der(&mut self, expected_outer_tag: u8) -> Result<&'a [u8], String> {
let inner_der = self.take_tag(expected_outer_tag)?;
let (_tag, _value, rem) = cms_take_tlv(inner_der)?;
if !rem.is_empty() {
return Err("trailing bytes inside EXPLICIT value".into());
}
Ok(inner_der)
}
}
fn parse_signed_object_content_info(
raw_der: &[u8],
parse_der: &[u8],
) -> Result<RpkiSignedObjectParsed, SignedObjectParseError> {
let mut r = CmsReader::new(parse_der);
let mut content_info_seq = r
.take_sequence()
.map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
if !r.is_empty() {
return Err(SignedObjectParseError::TrailingBytes(r.remaining_len()));
}
let content_type = take_oid_string(&mut content_info_seq)?;
let signed_data = parse_signed_data_from_contentinfo_cursor(&mut content_info_seq)?;
if !content_info_seq.is_empty() {
return Err(SignedObjectParseError::Parse(
"ContentInfo must be a SEQUENCE of 2 elements".into(),
));
}
Ok(RpkiSignedObjectParsed {
raw_der: raw_der.to_vec(),
content_info_content_type: content_type,
signed_data,
})
}
fn header_to_single_byte_tag(header: &Header<'_>) -> Result<u8, String> {
let tag_no = header.tag().0;
if tag_no > 30 {
return Err(format!("high-tag-number form not supported: {tag_no}"));
}
Ok(((header.class() as u8) << 6) | if header.constructed() { 0x20 } else { 0x00 } | tag_no as u8)
}
fn cms_take_tlv(input: &[u8]) -> Result<(u8, &[u8], &[u8]), String> {
let (rem, any) = Any::from_ber(input).map_err(|e| format!("BER parse error: {e}"))?;
let tag = header_to_single_byte_tag(&any.header)?;
Ok((tag, any.data, rem))
}
fn flatten_octet_string(any: Any<'_>) -> Result<Vec<u8>, String> {
if any.class() != Class::Universal || any.tag() != Tag::OctetString {
return Err("expected OCTET STRING".into());
}
if !any.header.constructed() {
return Ok(any.data.to_vec());
}
let mut out = Vec::new();
let mut input = any.data;
while !input.is_empty() {
let (rem, child) = Any::from_ber(input).map_err(|e| format!("BER parse error: {e}"))?;
out.extend(flatten_octet_string(child)?);
input = rem;
}
Ok(out)
}
impl RpkiSignedObjectParsed {
pub fn validate_profile(self) -> Result<RpkiSignedObject, SignedObjectValidateError> {
if self.content_info_content_type != OID_SIGNED_DATA {
@ -412,12 +545,12 @@ impl RpkiSignedObjectParsed {
}
fn parse_signed_data_from_contentinfo_cursor(
seq: &mut DerReader<'_>,
seq: &mut CmsReader<'_>,
) -> Result<SignedDataParsed, SignedObjectParseError> {
let inner_der = seq.take_explicit_der(0xA0).map_err(|_e| {
SignedObjectParseError::Parse("ContentInfo.content must be [0] EXPLICIT".into())
})?;
let mut r = DerReader::new(inner_der);
let mut r = CmsReader::new(inner_der);
let signed_data_seq = r
.take_sequence()
.map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
@ -430,7 +563,7 @@ fn parse_signed_data_from_contentinfo_cursor(
}
fn parse_signed_data_cursor(
mut seq: DerReader<'_>,
mut seq: CmsReader<'_>,
) -> Result<SignedDataParsed, SignedObjectParseError> {
let version = seq
.take_uint_u64()
@ -439,7 +572,7 @@ fn parse_signed_data_cursor(
let digest_set_bytes = seq
.take_tag(0x31)
.map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
let mut digest_set = DerReader::new(digest_set_bytes);
let mut digest_set = CmsReader::new(digest_set_bytes);
let mut digest_algorithms: Vec<AlgorithmIdentifierParsed> = Vec::new();
while !digest_set.is_empty() {
let alg = digest_set
@ -512,7 +645,7 @@ fn parse_signed_data_cursor(
}
fn parse_encapsulated_content_info_cursor(
mut seq: DerReader<'_>,
mut seq: CmsReader<'_>,
) -> Result<EncapsulatedContentInfoParsed, SignedObjectParseError> {
if seq.is_empty() {
return Err(SignedObjectParseError::Parse(
@ -530,7 +663,7 @@ fn parse_encapsulated_content_info_cursor(
"EncapsulatedContentInfo.eContent must be [0] EXPLICIT".into(),
)
})?;
let mut inner = DerReader::new(inner_der);
let mut inner = CmsReader::new(inner_der);
let octets = inner
.take_octet_string()
.map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
@ -539,7 +672,7 @@ fn parse_encapsulated_content_info_cursor(
"trailing bytes inside EncapsulatedContentInfo.eContent".into(),
));
}
Some(octets.to_vec())
Some(octets)
};
if !seq.is_empty() {
return Err(SignedObjectParseError::Parse(
@ -557,7 +690,7 @@ fn split_der_objects(mut input: &[u8]) -> Result<Vec<Vec<u8>>, SignedObjectParse
let mut out: Vec<Vec<u8>> = Vec::new();
while !input.is_empty() {
let (_tag, _value, rem) =
der_take_tlv(input).map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
cms_take_tlv(input).map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
let consumed = input.len() - rem.len();
out.push(input[..consumed].to_vec());
input = rem;
@ -568,7 +701,7 @@ fn split_der_objects(mut input: &[u8]) -> Result<Vec<Vec<u8>>, SignedObjectParse
fn parse_signer_infos_set_cursor(
set_bytes: &[u8],
) -> Result<Vec<SignerInfoParsed>, SignedObjectParseError> {
let mut set = DerReader::new(set_bytes);
let mut set = CmsReader::new(set_bytes);
let mut out: Vec<SignerInfoParsed> = Vec::new();
while !set.is_empty() {
let si = set
@ -631,7 +764,7 @@ fn validate_ee_certificate(der: &[u8]) -> Result<ResourceEeCertificate, SignedOb
}
fn parse_signer_info_cursor(
mut seq: DerReader<'_>,
mut seq: CmsReader<'_>,
) -> Result<SignerInfoParsed, SignedObjectParseError> {
let version = seq
.take_uint_u64()
@ -980,7 +1113,7 @@ fn make_signed_attrs_der_for_signature(full_tlv: &[u8]) -> Result<Vec<u8>, Signe
Ok(cs_der)
}
fn take_oid_string(seq: &mut DerReader<'_>) -> Result<String, SignedObjectParseError> {
fn take_oid_string(seq: &mut CmsReader<'_>) -> Result<String, SignedObjectParseError> {
let oid = seq
.take_tag(0x06)
.map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
@ -1055,7 +1188,7 @@ fn decode_oid_to_dotted_string(value: &[u8]) -> String {
}
fn parse_algorithm_identifier_cursor(
mut seq: DerReader<'_>,
mut seq: CmsReader<'_>,
) -> Result<(String, bool), SignedObjectParseError> {
if seq.is_empty() {
return Err(SignedObjectParseError::Parse(

View File

@ -175,10 +175,35 @@ pub struct ReplayArchiveIndex {
pub rsync_modules: BTreeMap<String, ReplayRsyncModule>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum ReplayArchiveLoadMode {
Strict,
AllowMissingRsyncModules,
}
impl ReplayArchiveIndex {
pub fn load(
archive_root: impl AsRef<Path>,
locks_path: impl AsRef<Path>,
) -> Result<Self, ReplayArchiveError> {
Self::load_with_mode(archive_root, locks_path, ReplayArchiveLoadMode::Strict)
}
pub fn load_allow_missing_rsync_modules(
archive_root: impl AsRef<Path>,
locks_path: impl AsRef<Path>,
) -> Result<Self, ReplayArchiveError> {
Self::load_with_mode(
archive_root,
locks_path,
ReplayArchiveLoadMode::AllowMissingRsyncModules,
)
}
fn load_with_mode(
archive_root: impl AsRef<Path>,
locks_path: impl AsRef<Path>,
load_mode: ReplayArchiveLoadMode,
) -> Result<Self, ReplayArchiveError> {
let archive_root = archive_root.as_ref().to_path_buf();
let locks_path = locks_path.as_ref().to_path_buf();
@ -227,8 +252,14 @@ impl ReplayArchiveIndex {
detail: "rsync lock transport must be rsync".to_string(),
});
}
let module = load_rsync_module(&capture_root, module_uri)?;
rsync_modules.insert(module.module_uri.clone(), module);
match load_rsync_module(&capture_root, module_uri) {
Ok(module) => {
rsync_modules.insert(module.module_uri.clone(), module);
}
Err(ReplayArchiveError::MissingRsyncModuleBucket { .. })
if load_mode == ReplayArchiveLoadMode::AllowMissingRsyncModules => {}
Err(err) => return Err(err),
}
}
Ok(Self {
@ -808,6 +839,21 @@ mod tests {
"{err}"
);
}
#[test]
fn replay_archive_index_can_skip_missing_rsync_modules_in_lenient_mode() {
let (_temp, archive_root, locks_path, _notify_uri, module_uri) = build_minimal_archive();
let mod_hash = sha256_hex(module_uri.as_bytes());
let mod_dir = archive_root
.join("v1/captures/capture-001/rsync/modules")
.join(mod_hash);
std::fs::remove_dir_all(&mod_dir).expect("remove module dir");
let index =
ReplayArchiveIndex::load_allow_missing_rsync_modules(&archive_root, &locks_path)
.expect("load lenient replay index");
assert!(index.rsync_modules.is_empty());
}
#[test]
fn replay_archive_index_rejects_capture_id_mismatch() {
let (_temp, archive_root, locks_path, _notify_uri, _module_uri) = build_minimal_archive();

1128
src/replay/delta_archive.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,368 @@
use std::collections::BTreeMap;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use crate::replay::delta_archive::{
ReplayDeltaArchiveError, ReplayDeltaArchiveIndex, ReplayDeltaRrdpKind,
};
use crate::sync::rrdp::{Fetcher, parse_notification};
#[derive(Debug, thiserror::Error)]
pub enum PayloadDeltaReplayHttpFetcherError {
#[error(transparent)]
DeltaIndex(#[from] ReplayDeltaArchiveError),
#[error("read delta replay RRDP file failed: {path}: {detail}")]
ReadFile { path: String, detail: String },
#[error("parse target notification failed for {notify_uri}: {detail}")]
ParseNotification { notify_uri: String, detail: String },
#[error(
"target notification session/serial mismatch for {notify_uri}: expected session={expected_session} serial={expected_serial}, got session={actual_session} serial={actual_serial}"
)]
NotificationTargetMismatch {
notify_uri: String,
expected_session: String,
expected_serial: u64,
actual_session: String,
actual_serial: u64,
},
#[error(
"delta serial list mismatch between target notification and transition for {notify_uri}"
)]
DeltaSerialMismatch { notify_uri: String },
#[error("duplicate delta replay HTTP URI mapping for {uri}: {first_path} vs {second_path}")]
DuplicateUriMapping {
uri: String,
first_path: String,
second_path: String,
},
#[error(
"delta replay notification URI is {kind} and should not be fetched as RRDP: {notify_uri}"
)]
NotificationKindNotFetchable { notify_uri: String, kind: String },
#[error("delta replay HTTP URI not found in archive: {0}")]
MissingUri(String),
}
#[derive(Clone, Debug)]
pub struct PayloadDeltaReplayHttpFetcher {
index: Arc<ReplayDeltaArchiveIndex>,
routes: BTreeMap<String, PathBuf>,
repo_kinds: BTreeMap<String, ReplayDeltaRrdpKind>,
}
impl PayloadDeltaReplayHttpFetcher {
pub fn new(
index: Arc<ReplayDeltaArchiveIndex>,
) -> Result<Self, PayloadDeltaReplayHttpFetcherError> {
let mut routes = BTreeMap::new();
let mut repo_kinds = BTreeMap::new();
for (notify_uri, repo) in &index.rrdp_repos {
repo_kinds.insert(notify_uri.clone(), repo.transition.kind);
if repo.transition.kind != ReplayDeltaRrdpKind::Delta {
continue;
}
let notification_path = repo
.target_notification_path
.as_ref()
.expect("delta repo target notification indexed");
insert_unique_route(&mut routes, notify_uri, notification_path)?;
let notification_xml = fs::read(notification_path).map_err(|e| {
PayloadDeltaReplayHttpFetcherError::ReadFile {
path: notification_path.display().to_string(),
detail: e.to_string(),
}
})?;
let notification = parse_notification(&notification_xml).map_err(|e| {
PayloadDeltaReplayHttpFetcherError::ParseNotification {
notify_uri: notify_uri.clone(),
detail: e.to_string(),
}
})?;
let expected_session = repo.transition.target.session.as_deref().unwrap_or("");
let expected_serial = repo.transition.target.serial.unwrap_or_default();
let actual_session = notification.session_id.to_string();
if actual_session != expected_session || notification.serial != expected_serial {
return Err(
PayloadDeltaReplayHttpFetcherError::NotificationTargetMismatch {
notify_uri: notify_uri.clone(),
expected_session: expected_session.to_string(),
expected_serial,
actual_session,
actual_serial: notification.serial,
},
);
}
let transition_serials = repo
.delta_paths
.iter()
.map(|(serial, _)| *serial)
.collect::<Vec<_>>();
let mut notification_delta_map = BTreeMap::new();
for dref in notification.deltas {
notification_delta_map.insert(dref.serial, dref.uri);
}
for serial in &transition_serials {
if !notification_delta_map.contains_key(serial) {
return Err(PayloadDeltaReplayHttpFetcherError::DeltaSerialMismatch {
notify_uri: notify_uri.clone(),
});
}
}
for (serial, path) in &repo.delta_paths {
let uri = notification_delta_map
.get(serial)
.expect("delta uri present for transition serial");
insert_unique_route(&mut routes, uri, path)?;
}
}
Ok(Self {
index,
routes,
repo_kinds,
})
}
pub fn from_index(
index: Arc<ReplayDeltaArchiveIndex>,
) -> Result<Self, PayloadDeltaReplayHttpFetcherError> {
Self::new(index)
}
pub fn archive_index(&self) -> &ReplayDeltaArchiveIndex {
self.index.as_ref()
}
}
impl Fetcher for PayloadDeltaReplayHttpFetcher {
fn fetch(&self, uri: &str) -> Result<Vec<u8>, String> {
if let Some(path) = self.routes.get(uri) {
return fs::read(path).map_err(|e| {
PayloadDeltaReplayHttpFetcherError::ReadFile {
path: path.display().to_string(),
detail: e.to_string(),
}
.to_string()
});
}
if let Some(kind) = self.repo_kinds.get(uri) {
return Err(
PayloadDeltaReplayHttpFetcherError::NotificationKindNotFetchable {
notify_uri: uri.to_string(),
kind: kind.as_str().to_string(),
}
.to_string(),
);
}
Err(PayloadDeltaReplayHttpFetcherError::MissingUri(uri.to_string()).to_string())
}
}
fn insert_unique_route(
routes: &mut BTreeMap<String, PathBuf>,
uri: &str,
path: &Path,
) -> Result<(), PayloadDeltaReplayHttpFetcherError> {
if let Some(existing) = routes.get(uri) {
if existing != path {
return Err(PayloadDeltaReplayHttpFetcherError::DuplicateUriMapping {
uri: uri.to_string(),
first_path: existing.display().to_string(),
second_path: path.display().to_string(),
});
}
return Ok(());
}
routes.insert(uri.to_string(), path.to_path_buf());
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::replay::archive::sha256_hex;
use crate::replay::delta_archive::ReplayDeltaArchiveIndex;
fn build_delta_http_fixture(
kind: ReplayDeltaRrdpKind,
) -> (tempfile::TempDir, PathBuf, PathBuf, String, String, String) {
let temp = tempfile::tempdir().expect("tempdir");
let archive_root = temp.path().join("payload-delta-archive");
let capture = "delta-http";
let capture_root = archive_root.join("v1").join("captures").join(capture);
std::fs::create_dir_all(&capture_root).expect("mkdir capture root");
std::fs::write(
capture_root.join("capture.json"),
format!(r#"{{"version":1,"captureId":"{capture}","createdAt":"2026-03-16T00:00:00Z","notes":""}}"#),
)
.expect("write capture meta");
std::fs::write(
capture_root.join("base.json"),
r#"{"version":1,"baseCapture":"base-cap","baseLocksSha256":"deadbeef","createdAt":"2026-03-16T00:00:00Z"}"#,
)
.expect("write base meta");
let notify_uri = "https://rrdp.example.test/notification.xml".to_string();
let snapshot_uri = "https://rrdp.example.test/snapshot.xml".to_string();
let delta1_uri = "https://rrdp.example.test/d1.xml".to_string();
let delta2_uri = "https://rrdp.example.test/d2.xml".to_string();
let session = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa".to_string();
let target_serial = 12u64;
let repo_hash = sha256_hex(notify_uri.as_bytes());
let session_dir = capture_root
.join("rrdp/repos")
.join(&repo_hash)
.join(&session);
let deltas_dir = session_dir.join("deltas");
std::fs::create_dir_all(&deltas_dir).expect("mkdir deltas dir");
std::fs::write(
session_dir.parent().unwrap().join("meta.json"),
format!(r#"{{"version":1,"rpkiNotify":"{notify_uri}","createdAt":"2026-03-16T00:00:00Z","lastSeenAt":"2026-03-16T00:00:01Z"}}"#),
)
.expect("write repo meta");
let notification_xml = format!(
r#"<?xml version="1.0" encoding="UTF-8"?>
<notification xmlns="http://www.ripe.net/rpki/rrdp" version="1" session_id="{session}" serial="{target_serial}">
<snapshot uri="{snapshot_uri}" hash="00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff" />
<delta serial="11" uri="{delta1_uri}" hash="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" />
<delta serial="12" uri="{delta2_uri}" hash="bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" />
</notification>"#
);
std::fs::write(
session_dir.join("notification-target-12.xml"),
notification_xml,
)
.expect("write target notification");
std::fs::write(
deltas_dir.join("delta-11-aaaa.xml"),
b"<delta serial='11'/>",
)
.expect("write delta1");
std::fs::write(
deltas_dir.join("delta-12-bbbb.xml"),
b"<delta serial='12'/>",
)
.expect("write delta2");
std::fs::write(
session_dir.parent().unwrap().join("transition.json"),
format!(
r#"{{"kind":"{}","base":{{"transport":"rrdp","session":"{session}","serial":10}},"target":{{"transport":"rrdp","session":"{session}","serial":12}},"delta_count":2,"deltas":[11,12]}}"#,
kind.as_str()
),
)
.expect("write transition");
let locks_path = temp.path().join("locks-delta.json");
std::fs::write(
&locks_path,
format!(
r#"{{"version":1,"capture":"{capture}","baseCapture":"base-cap","baseLocksSha256":"deadbeef","rrdp":{{"{notify_uri}":{{"kind":"{}","base":{{"transport":"rrdp","session":"{session}","serial":10}},"target":{{"transport":"rrdp","session":"{session}","serial":12}},"delta_count":2,"deltas":[11,12]}}}},"rsync":{{}}}}"#,
kind.as_str()
),
)
.expect("write locks");
(
temp,
archive_root,
locks_path,
notify_uri,
delta1_uri,
delta2_uri,
)
}
#[test]
fn delta_http_fetcher_rejects_session_reset_and_gap_notification_kind() {
for kind in [ReplayDeltaRrdpKind::SessionReset, ReplayDeltaRrdpKind::Gap] {
let (_temp, archive_root, locks_path, notify_uri, _delta1_uri, _delta2_uri) =
build_delta_http_fixture(kind);
let index = Arc::new(
ReplayDeltaArchiveIndex::load(&archive_root, &locks_path)
.expect("load delta index"),
);
let fetcher = PayloadDeltaReplayHttpFetcher::from_index(index).expect("build fetcher");
let err = fetcher.fetch(&notify_uri).unwrap_err();
assert!(err.contains(kind.as_str()), "{err}");
}
}
#[test]
fn delta_http_fetcher_reads_target_notification_and_delta_files() {
let (_temp, archive_root, locks_path, notify_uri, delta1_uri, delta2_uri) =
build_delta_http_fixture(ReplayDeltaRrdpKind::Delta);
let index = Arc::new(
ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).expect("load delta index"),
);
let fetcher = PayloadDeltaReplayHttpFetcher::from_index(index).expect("build fetcher");
let notification = fetcher.fetch(&notify_uri).expect("fetch notification");
assert!(
std::str::from_utf8(&notification)
.unwrap()
.contains("notification")
);
assert_eq!(
fetcher.fetch(&delta1_uri).expect("fetch delta1"),
b"<delta serial='11'/>".to_vec()
);
assert_eq!(
fetcher.fetch(&delta2_uri).expect("fetch delta2"),
b"<delta serial='12'/>".to_vec()
);
}
#[test]
fn delta_http_fetcher_rejects_non_delta_notification_kinds_and_missing_uri() {
let (_temp, archive_root, locks_path, notify_uri, _delta1_uri, _delta2_uri) =
build_delta_http_fixture(ReplayDeltaRrdpKind::Unchanged);
let index = Arc::new(
ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).expect("load delta index"),
);
let fetcher = PayloadDeltaReplayHttpFetcher::from_index(index).expect("build fetcher");
let err = fetcher.fetch(&notify_uri).unwrap_err();
assert!(err.contains("unchanged"), "{err}");
let err = fetcher
.fetch("https://missing.example/test.xml")
.unwrap_err();
assert!(err.contains("not found in archive"), "{err}");
}
#[test]
fn delta_http_fetcher_rejects_target_notification_mismatch() {
let (_temp, archive_root, locks_path, notify_uri, _delta1_uri, _delta2_uri) =
build_delta_http_fixture(ReplayDeltaRrdpKind::Delta);
let repo_hash = sha256_hex(notify_uri.as_bytes());
let notification = archive_root
.join("v1/captures/delta-http/rrdp/repos")
.join(repo_hash)
.join("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa")
.join("notification-target-12.xml");
std::fs::write(
&notification,
r#"<?xml version="1.0" encoding="UTF-8"?>
<notification xmlns="http://www.ripe.net/rpki/rrdp" version="1" session_id="bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb" serial="12">
<snapshot uri="https://rrdp.example.test/snapshot.xml" hash="00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff" />
<delta serial="11" uri="https://rrdp.example.test/d1.xml" hash="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" />
<delta serial="12" uri="https://rrdp.example.test/d2.xml" hash="bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" />
</notification>"#,
)
.expect("rewrite notification");
let index = Arc::new(
ReplayDeltaArchiveIndex::load(&archive_root, &locks_path).expect("load delta index"),
);
let err = PayloadDeltaReplayHttpFetcher::from_index(index).unwrap_err();
assert!(
matches!(
err,
PayloadDeltaReplayHttpFetcherError::NotificationTargetMismatch { .. }
),
"{err}"
);
}
}

View File

@ -0,0 +1,327 @@
use std::collections::BTreeMap;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use crate::fetch::rsync::{RsyncFetchError, RsyncFetchResult, RsyncFetcher};
use crate::replay::archive::{ReplayArchiveIndex, canonical_rsync_module};
use crate::replay::delta_archive::ReplayDeltaArchiveIndex;
#[derive(Clone, Debug)]
pub struct PayloadDeltaReplayRsyncFetcher {
base_index: Arc<ReplayArchiveIndex>,
delta_index: Arc<ReplayDeltaArchiveIndex>,
}
impl PayloadDeltaReplayRsyncFetcher {
pub fn new(
base_index: Arc<ReplayArchiveIndex>,
delta_index: Arc<ReplayDeltaArchiveIndex>,
) -> Self {
Self {
base_index,
delta_index,
}
}
pub fn base_index(&self) -> &ReplayArchiveIndex {
self.base_index.as_ref()
}
pub fn delta_index(&self) -> &ReplayDeltaArchiveIndex {
self.delta_index.as_ref()
}
}
impl RsyncFetcher for PayloadDeltaReplayRsyncFetcher {
fn fetch_objects(&self, rsync_base_uri: &str) -> RsyncFetchResult<Vec<(String, Vec<u8>)>> {
let module_uri = canonical_rsync_module(rsync_base_uri)
.map_err(|e| RsyncFetchError::Fetch(e.to_string()))?;
let normalized_base = if rsync_base_uri.ends_with('/') {
rsync_base_uri.to_string()
} else {
format!("{rsync_base_uri}/")
};
let mut merged: BTreeMap<String, Vec<u8>> = BTreeMap::new();
let mut saw_base = false;
if let Ok(base_module) = self
.base_index
.resolve_rsync_module_for_base_uri(rsync_base_uri)
{
let base_tree_root = module_tree_root(&module_uri, &base_module.tree_dir)
.map_err(RsyncFetchError::Fetch)?;
if base_tree_root.is_dir() {
let mut base_objects = Vec::new();
walk_dir_collect(
&base_tree_root,
&base_tree_root,
&module_uri,
&mut base_objects,
)
.map_err(RsyncFetchError::Fetch)?;
for (uri, bytes) in base_objects {
merged.insert(uri, bytes);
}
saw_base = true;
}
}
let mut saw_overlay = false;
if let Some(delta_module) = self.delta_index.rsync_module(&module_uri) {
for (uri, path) in &delta_module.overlay_files {
let bytes = fs::read(path).map_err(|e| {
RsyncFetchError::Fetch(format!(
"read delta rsync overlay failed: {}: {e}",
path.display()
))
})?;
merged.insert(uri.clone(), bytes);
saw_overlay = true;
}
}
if !saw_base && !saw_overlay {
return Err(RsyncFetchError::Fetch(format!(
"delta replay base rsync module not found and no delta overlay exists for module: {module_uri}"
)));
}
let filtered: Vec<(String, Vec<u8>)> = merged
.into_iter()
.filter(|(uri, _)| uri.starts_with(&normalized_base))
.collect();
if filtered.is_empty() {
return Err(RsyncFetchError::Fetch(format!(
"delta replay rsync subtree not found: {normalized_base}"
)));
}
Ok(filtered)
}
}
fn module_tree_root(module_uri: &str, tree_dir: &Path) -> Result<PathBuf, String> {
let rest = module_uri
.strip_prefix("rsync://")
.ok_or_else(|| format!("invalid rsync module URI: {module_uri}"))?;
let mut parts = rest.trim_end_matches('/').split('/');
let authority = parts
.next()
.ok_or_else(|| format!("invalid rsync module URI: {module_uri}"))?;
let module = parts
.next()
.ok_or_else(|| format!("invalid rsync module URI: {module_uri}"))?;
Ok(tree_dir.join(authority).join(module))
}
fn walk_dir_collect(
root: &Path,
current: &Path,
rsync_base_uri: &str,
out: &mut Vec<(String, Vec<u8>)>,
) -> Result<(), String> {
let rd = fs::read_dir(current).map_err(|e| e.to_string())?;
for entry in rd {
let entry = entry.map_err(|e| e.to_string())?;
let path = entry.path();
let meta = entry.metadata().map_err(|e| e.to_string())?;
if meta.is_dir() {
walk_dir_collect(root, &path, rsync_base_uri, out)?;
continue;
}
if !meta.is_file() {
continue;
}
let rel = path
.strip_prefix(root)
.map_err(|e| e.to_string())?
.to_string_lossy()
.replace('\\', "/");
let base = if rsync_base_uri.ends_with('/') {
rsync_base_uri.to_string()
} else {
format!("{rsync_base_uri}/")
};
let uri = format!("{base}{rel}");
let bytes = fs::read(&path).map_err(|e| e.to_string())?;
out.push((uri, bytes));
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::replay::archive::ReplayArchiveIndex;
use crate::replay::delta_archive::ReplayDeltaArchiveIndex;
fn build_base_and_delta_rsync_fixture()
-> (tempfile::TempDir, PathBuf, PathBuf, PathBuf, PathBuf) {
let temp = tempfile::tempdir().expect("tempdir");
let base_archive = temp.path().join("payload-archive");
let base_capture_root = base_archive.join("v1/captures/base-cap");
std::fs::create_dir_all(&base_capture_root).expect("mkdir base capture");
std::fs::write(
base_capture_root.join("capture.json"),
r#"{"version":1,"captureId":"base-cap","createdAt":"2026-03-16T00:00:00Z","notes":""}"#,
)
.expect("write base capture meta");
let module_uri = "rsync://rsync.example.test/repo/";
let module_hash = crate::replay::archive::sha256_hex(module_uri.as_bytes());
let base_bucket = base_capture_root.join("rsync/modules").join(&module_hash);
let base_tree = base_bucket.join("tree/rsync.example.test/repo");
std::fs::create_dir_all(base_tree.join("sub")).expect("mkdir base tree");
std::fs::write(base_bucket.join("meta.json"), format!(r#"{{"version":1,"module":"{module_uri}","createdAt":"2026-03-16T00:00:00Z","lastSeenAt":"2026-03-16T00:00:01Z"}}"#)).expect("write base meta");
std::fs::write(base_tree.join("a.roa"), b"base-a").expect("write base a");
std::fs::write(base_tree.join("sub").join("b.cer"), b"base-b").expect("write base b");
let base_locks = temp.path().join("base-locks.json");
std::fs::write(&base_locks, format!(r#"{{"version":1,"capture":"base-cap","rrdp":{{}},"rsync":{{"{module_uri}":{{"transport":"rsync"}}}}}}"#)).expect("write base locks");
let delta_archive = temp.path().join("payload-delta-archive");
let delta_capture_root = delta_archive.join("v1/captures/delta-cap");
std::fs::create_dir_all(&delta_capture_root).expect("mkdir delta capture");
std::fs::write(delta_capture_root.join("capture.json"), r#"{"version":1,"captureId":"delta-cap","createdAt":"2026-03-16T00:00:00Z","notes":""}"#).expect("write delta capture meta");
std::fs::write(delta_capture_root.join("base.json"), r#"{"version":1,"baseCapture":"base-cap","baseLocksSha256":"deadbeef","createdAt":"2026-03-16T00:00:00Z"}"#).expect("write delta base meta");
let delta_bucket = delta_capture_root.join("rsync/modules").join(&module_hash);
let delta_tree = delta_bucket.join("tree/rsync.example.test/repo");
std::fs::create_dir_all(delta_tree.join("sub")).expect("mkdir delta tree");
std::fs::write(delta_bucket.join("meta.json"), format!(r#"{{"version":1,"module":"{module_uri}","createdAt":"2026-03-16T00:00:00Z","lastSeenAt":"2026-03-16T00:00:01Z"}}"#)).expect("write delta meta");
std::fs::write(delta_bucket.join("files.json"), format!(r#"{{"version":1,"module":"{module_uri}","fileCount":1,"files":["{module_uri}sub/b.cer"]}}"#)).expect("write files json");
std::fs::write(delta_tree.join("sub").join("b.cer"), b"delta-b")
.expect("write delta overlay");
let delta_locks = temp.path().join("locks-delta.json");
std::fs::write(&delta_locks, format!(r#"{{"version":1,"capture":"delta-cap","baseCapture":"base-cap","baseLocksSha256":"deadbeef","rrdp":{{}},"rsync":{{"{module_uri}":{{"file_count":1,"overlay_only":true}}}}}}"#)).expect("write delta locks");
(temp, base_archive, base_locks, delta_archive, delta_locks)
}
#[test]
fn delta_rsync_fetcher_uses_base_only_when_delta_has_no_module_entry() {
let (_temp, base_archive, base_locks, delta_archive, delta_locks) =
build_base_and_delta_rsync_fixture();
std::fs::write(
&delta_locks,
r#"{"version":1,"capture":"delta-cap","baseCapture":"base-cap","baseLocksSha256":"deadbeef","rrdp":{},"rsync":{}}"#,
)
.expect("rewrite delta locks no rsync");
let base = Arc::new(
ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"),
);
let delta = Arc::new(
ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"),
);
let fetcher = PayloadDeltaReplayRsyncFetcher::new(base, delta);
let mut objects = fetcher
.fetch_objects("rsync://rsync.example.test/repo/")
.expect("fetch base only objects");
objects.sort_by(|a, b| a.0.cmp(&b.0));
assert_eq!(objects.len(), 2);
assert_eq!(objects[0].1, b"base-a");
assert_eq!(objects[1].1, b"base-b");
}
#[test]
fn delta_rsync_fetcher_reports_missing_base_subtree_and_exposes_indexes() {
let (_temp, base_archive, base_locks, delta_archive, delta_locks) =
build_base_and_delta_rsync_fixture();
let base = Arc::new(
ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"),
);
let delta = Arc::new(
ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"),
);
let fetcher = PayloadDeltaReplayRsyncFetcher::new(base.clone(), delta.clone());
assert_eq!(fetcher.base_index().rsync_modules.len(), 1);
assert_eq!(fetcher.delta_index().rsync_modules.len(), 1);
let err = fetcher
.fetch_objects("rsync://rsync.example.test/repo/missing/")
.unwrap_err();
match err {
RsyncFetchError::Fetch(msg) => {
assert!(
msg.contains("delta replay rsync subtree not found"),
"{msg}"
)
}
}
}
#[test]
fn delta_rsync_fetcher_can_use_overlay_without_base_module() {
let (_temp, base_archive, base_locks, delta_archive, delta_locks) =
build_base_and_delta_rsync_fixture();
let module_hash =
crate::replay::archive::sha256_hex("rsync://rsync.example.test/repo/".as_bytes());
std::fs::remove_dir_all(
base_archive
.join("v1/captures/base-cap/rsync/modules")
.join(&module_hash),
)
.expect("remove base module dir");
let base = Arc::new(
ReplayArchiveIndex::load_allow_missing_rsync_modules(&base_archive, &base_locks)
.expect("load lenient base index"),
);
let delta = Arc::new(
ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"),
);
let fetcher = PayloadDeltaReplayRsyncFetcher::new(base, delta);
let objects = fetcher
.fetch_objects("rsync://rsync.example.test/repo/sub/")
.expect("fetch subtree from overlay only");
assert_eq!(objects.len(), 1);
assert_eq!(objects[0].0, "rsync://rsync.example.test/repo/sub/b.cer");
assert_eq!(objects[0].1, b"delta-b");
}
#[test]
fn delta_rsync_fetcher_merges_base_and_overlay() {
let (_temp, base_archive, base_locks, delta_archive, delta_locks) =
build_base_and_delta_rsync_fixture();
let base = Arc::new(
ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"),
);
let delta = Arc::new(
ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"),
);
let fetcher = PayloadDeltaReplayRsyncFetcher::new(base, delta);
let mut objects = fetcher
.fetch_objects("rsync://rsync.example.test/repo/")
.expect("fetch merged objects");
objects.sort_by(|a, b| a.0.cmp(&b.0));
assert_eq!(objects.len(), 2);
assert_eq!(objects[0].0, "rsync://rsync.example.test/repo/a.roa");
assert_eq!(objects[0].1, b"base-a");
assert_eq!(objects[1].0, "rsync://rsync.example.test/repo/sub/b.cer");
assert_eq!(objects[1].1, b"delta-b");
}
#[test]
fn delta_rsync_fetcher_reads_subtree_and_rejects_missing_base_module() {
let (_temp, base_archive, base_locks, delta_archive, delta_locks) =
build_base_and_delta_rsync_fixture();
let base = Arc::new(
ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"),
);
let delta = Arc::new(
ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"),
);
let fetcher = PayloadDeltaReplayRsyncFetcher::new(base, delta);
let objects = fetcher
.fetch_objects("rsync://rsync.example.test/repo/sub/")
.expect("fetch subtree");
assert_eq!(objects.len(), 1);
assert_eq!(objects[0].0, "rsync://rsync.example.test/repo/sub/b.cer");
assert_eq!(objects[0].1, b"delta-b");
let err = fetcher
.fetch_objects("rsync://missing.example/repo/")
.unwrap_err();
match err {
RsyncFetchError::Fetch(msg) => assert!(
msg.contains("delta replay base rsync module not found")
|| msg.contains("no replay lock found for rsync module"),
"{msg}"
),
}
}
}

View File

@ -1,3 +1,6 @@
pub mod archive;
pub mod delta_archive;
pub mod delta_fetch_http;
pub mod delta_fetch_rsync;
pub mod fetch_http;
pub mod fetch_rsync;

View File

@ -4,10 +4,11 @@ use crate::audit_downloads::DownloadLogHandle;
use crate::fetch::rsync::{RsyncFetchError, RsyncFetcher};
use crate::policy::{Policy, SyncPreference};
use crate::replay::archive::{ReplayArchiveIndex, ReplayTransport};
use crate::replay::delta_archive::{ReplayDeltaArchiveIndex, ReplayDeltaRrdpKind};
use crate::report::{RfcRef, Warning};
use crate::storage::RocksStore;
use crate::sync::rrdp::sync_from_notification_with_timing_and_download_log;
use crate::sync::rrdp::{Fetcher as HttpFetcher, RrdpSyncError};
use crate::sync::rrdp::{Fetcher as HttpFetcher, RrdpState, RrdpSyncError};
use crate::sync::store_projection::{
put_repository_view_present, put_repository_view_withdrawn, upsert_raw_by_hash_evidence,
};
@ -187,12 +188,74 @@ pub fn sync_publication_point_replay(
}
}
pub fn sync_publication_point_replay_delta(
store: &RocksStore,
delta_index: &ReplayDeltaArchiveIndex,
rrdp_notification_uri: Option<&str>,
rsync_base_uri: &str,
http_fetcher: &dyn HttpFetcher,
rsync_fetcher: &dyn RsyncFetcher,
timing: Option<&TimingHandle>,
download_log: Option<&DownloadLogHandle>,
) -> Result<RepoSyncResult, RepoSyncError> {
match resolve_replay_delta_transport(store, delta_index, rrdp_notification_uri, rsync_base_uri)?
{
ReplayDeltaResolvedTransport::Rrdp(notification_uri) => {
let written = try_rrdp_sync_with_retry(
store,
notification_uri,
http_fetcher,
timing,
download_log,
)?;
if let Some(t) = timing.as_ref() {
t.record_count("repo_sync_rrdp_ok_total", 1);
t.record_count("repo_sync_rrdp_objects_written_total", written as u64);
}
Ok(RepoSyncResult {
source: RepoSyncSource::Rrdp,
objects_written: written,
warnings: Vec::new(),
})
}
ReplayDeltaResolvedTransport::Rsync => {
let written = rsync_sync_into_raw_objects(
store,
rsync_base_uri,
rsync_fetcher,
timing,
download_log,
)?;
if let Some(t) = timing.as_ref() {
t.record_count("repo_sync_rsync_direct_total", 1);
t.record_count("repo_sync_rsync_objects_written_total", written as u64);
}
Ok(RepoSyncResult {
source: RepoSyncSource::Rsync,
objects_written: written,
warnings: Vec::new(),
})
}
ReplayDeltaResolvedTransport::Noop(source) => Ok(RepoSyncResult {
source,
objects_written: 0,
warnings: Vec::new(),
}),
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum ReplayResolvedTransport<'a> {
Rrdp(&'a str),
Rsync,
}
enum ReplayDeltaResolvedTransport<'a> {
Rrdp(&'a str),
Rsync,
Noop(RepoSyncSource),
}
fn resolve_replay_transport<'a>(
replay_index: &'a ReplayArchiveIndex,
rrdp_notification_uri: Option<&'a str>,
@ -216,6 +279,80 @@ fn resolve_replay_transport<'a>(
Ok(ReplayResolvedTransport::Rsync)
}
fn resolve_replay_delta_transport<'a>(
store: &RocksStore,
delta_index: &'a ReplayDeltaArchiveIndex,
rrdp_notification_uri: Option<&'a str>,
rsync_base_uri: &str,
) -> Result<ReplayDeltaResolvedTransport<'a>, RepoSyncError> {
if let Some(notification_uri) = rrdp_notification_uri {
let repo = delta_index.rrdp_repo(notification_uri).ok_or_else(|| {
RepoSyncError::Replay(format!(
"delta replay RRDP entry missing for notification URI: {notification_uri}"
))
})?;
validate_delta_replay_base_state_for_repo(store, notification_uri, &repo.transition.base)?;
return match repo.transition.kind {
ReplayDeltaRrdpKind::Delta => Ok(ReplayDeltaResolvedTransport::Rrdp(notification_uri)),
ReplayDeltaRrdpKind::Unchanged => Ok(ReplayDeltaResolvedTransport::Noop(
match repo.transition.target.transport {
ReplayTransport::Rrdp => RepoSyncSource::Rrdp,
ReplayTransport::Rsync => RepoSyncSource::Rsync,
},
)),
ReplayDeltaRrdpKind::FallbackRsync => Ok(ReplayDeltaResolvedTransport::Rsync),
ReplayDeltaRrdpKind::SessionReset => Err(RepoSyncError::Replay(format!(
"delta replay kind session-reset requires fresh full replay for {notification_uri}"
))),
ReplayDeltaRrdpKind::Gap => Err(RepoSyncError::Replay(format!(
"delta replay kind gap requires fresh full replay for {notification_uri}"
))),
};
}
delta_index
.resolve_rsync_module_for_base_uri(rsync_base_uri)
.map_err(|e| RepoSyncError::Replay(e.to_string()))?;
Ok(ReplayDeltaResolvedTransport::Rsync)
}
fn validate_delta_replay_base_state_for_repo(
store: &RocksStore,
notification_uri: &str,
base: &crate::replay::delta_archive::ReplayDeltaRrdpState,
) -> Result<(), RepoSyncError> {
match base.transport {
ReplayTransport::Rrdp => {
let bytes = store
.get_rrdp_state(notification_uri)
.map_err(|e| RepoSyncError::Storage(e.to_string()))?
.ok_or_else(|| {
RepoSyncError::Replay(format!(
"delta replay base state missing for {notification_uri}: expected RRDP session={} serial={}"
,
base.session.as_deref().unwrap_or("<none>"),
base.serial.map(|v| v.to_string()).unwrap_or_else(|| "<none>".to_string())
))
})?;
let state = RrdpState::decode(&bytes).map_err(|e| {
RepoSyncError::Replay(format!(
"decode base RRDP state failed for {notification_uri}: {e}"
))
})?;
let expected_session = base.session.as_deref().unwrap_or("");
let expected_serial = base.serial.unwrap_or_default();
if state.session_id != expected_session || state.serial != expected_serial {
return Err(RepoSyncError::Replay(format!(
"delta replay base state mismatch for {notification_uri}: expected session={} serial={}, actual session={} serial={}",
expected_session, expected_serial, state.session_id, state.serial
)));
}
}
ReplayTransport::Rsync => {}
}
Ok(())
}
fn try_rrdp_sync(
store: &RocksStore,
notification_uri: &str,
@ -438,6 +575,9 @@ mod tests {
use crate::analysis::timing::{TimingHandle, TimingMeta};
use crate::fetch::rsync::LocalDirRsyncFetcher;
use crate::replay::archive::{ReplayArchiveIndex, sha256_hex};
use crate::replay::delta_archive::ReplayDeltaArchiveIndex;
use crate::replay::delta_fetch_http::PayloadDeltaReplayHttpFetcher;
use crate::replay::delta_fetch_rsync::PayloadDeltaReplayRsyncFetcher;
use crate::replay::fetch_http::PayloadReplayHttpFetcher;
use crate::replay::fetch_rsync::PayloadReplayRsyncFetcher;
use crate::sync::rrdp::Fetcher as HttpFetcher;
@ -445,6 +585,7 @@ mod tests {
use base64::Engine;
use sha2::Digest;
use std::collections::HashMap;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
struct DummyHttpFetcher;
@ -603,6 +744,190 @@ mod tests {
)
}
fn build_delta_replay_fixture() -> (
tempfile::TempDir,
std::path::PathBuf,
std::path::PathBuf,
std::path::PathBuf,
std::path::PathBuf,
String,
String,
String,
) {
let temp = tempfile::tempdir().expect("tempdir");
let base_archive = temp.path().join("payload-archive");
let base_capture_root = base_archive.join("v1/captures/base-cap");
std::fs::create_dir_all(&base_capture_root).expect("mkdir base capture");
std::fs::write(
base_capture_root.join("capture.json"),
r#"{"version":1,"captureId":"base-cap","createdAt":"2026-03-16T00:00:00Z","notes":""}"#,
)
.expect("write base capture meta");
let notify_uri = "https://rrdp.example.test/notification.xml".to_string();
let snapshot_uri = "https://rrdp.example.test/snapshot.xml".to_string();
let session = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa".to_string();
let base_serial = 10u64;
let delta1_uri = "https://rrdp.example.test/d1.xml".to_string();
let delta2_uri = "https://rrdp.example.test/d2.xml".to_string();
let repo_hash = sha256_hex(notify_uri.as_bytes());
let base_session_dir = base_capture_root
.join("rrdp/repos")
.join(&repo_hash)
.join(&session);
std::fs::create_dir_all(&base_session_dir).expect("mkdir base session dir");
std::fs::write(
base_session_dir.parent().unwrap().join("meta.json"),
format!(r#"{{"version":1,"rpkiNotify":"{notify_uri}","createdAt":"2026-03-16T00:00:00Z","lastSeenAt":"2026-03-16T00:00:01Z"}}"#),
)
.expect("write base rrdp meta");
let base_snapshot = snapshot_xml(
&session,
base_serial,
&[("rsync://example.test/repo/a.mft", b"base")],
);
let base_snapshot_hash = hex::encode(sha2::Sha256::digest(&base_snapshot));
let base_notification =
notification_xml(&session, base_serial, &snapshot_uri, &base_snapshot_hash);
std::fs::write(
base_session_dir.join("notification-10.xml"),
base_notification,
)
.expect("write base notif");
std::fs::write(
base_session_dir.join(format!("snapshot-10-{base_snapshot_hash}.xml")),
base_snapshot,
)
.expect("write base snapshot");
let module_uri = "rsync://rsync.example.test/repo/".to_string();
let module_hash = sha256_hex(module_uri.as_bytes());
let base_module_bucket = base_capture_root.join("rsync/modules").join(&module_hash);
let base_module_tree = base_module_bucket.join("tree/rsync.example.test/repo");
std::fs::create_dir_all(base_module_tree.join("sub")).expect("mkdir base rsync tree");
std::fs::write(
base_module_bucket.join("meta.json"),
format!(r#"{{"version":1,"module":"{module_uri}","createdAt":"2026-03-16T00:00:00Z","lastSeenAt":"2026-03-16T00:00:01Z"}}"#),
)
.expect("write base module meta");
std::fs::write(base_module_tree.join("a.mft"), b"base").expect("write base a.mft");
std::fs::write(base_module_tree.join("sub").join("x.cer"), b"base-cer")
.expect("write base x.cer");
let base_locks = temp.path().join("base-locks.json");
let fallback_notify = "https://rrdp-fallback.example.test/notification.xml".to_string();
let base_locks_body = format!(
r#"{{"version":1,"capture":"base-cap","rrdp":{{"{notify_uri}":{{"transport":"rrdp","session":"{session}","serial":10}},"{fallback_notify}":{{"transport":"rsync","session":null,"serial":null}}}},"rsync":{{"{module_uri}":{{"transport":"rsync"}}}}}}"#
);
std::fs::write(&base_locks, &base_locks_body).expect("write base locks");
let base_locks_sha = sha256_hex(base_locks_body.as_bytes());
let delta_archive = temp.path().join("payload-delta-archive");
let delta_capture_root = delta_archive.join("v1/captures/delta-cap");
std::fs::create_dir_all(&delta_capture_root).expect("mkdir delta capture");
std::fs::write(
delta_capture_root.join("capture.json"),
r#"{"version":1,"captureId":"delta-cap","createdAt":"2026-03-16T00:00:00Z","notes":""}"#,
)
.expect("write delta capture meta");
std::fs::write(
delta_capture_root.join("base.json"),
format!(r#"{{"version":1,"baseCapture":"base-cap","baseLocksSha256":"{base_locks_sha}","createdAt":"2026-03-16T00:00:00Z"}}"#),
)
.expect("write delta base meta");
let delta_session_dir = delta_capture_root
.join("rrdp/repos")
.join(&repo_hash)
.join(&session);
let delta_deltas_dir = delta_session_dir.join("deltas");
std::fs::create_dir_all(&delta_deltas_dir).expect("mkdir delta deltas");
std::fs::write(
delta_session_dir.parent().unwrap().join("meta.json"),
format!(r#"{{"version":1,"rpkiNotify":"{notify_uri}","createdAt":"2026-03-16T00:00:00Z","lastSeenAt":"2026-03-16T00:00:01Z"}}"#),
)
.expect("write delta meta");
std::fs::write(
delta_session_dir.parent().unwrap().join("transition.json"),
format!(r#"{{"kind":"delta","base":{{"transport":"rrdp","session":"{session}","serial":10}},"target":{{"transport":"rrdp","session":"{session}","serial":12}},"delta_count":2,"deltas":[11,12]}}"#),
)
.expect("write delta transition");
let delta1 = format!(
r#"<delta xmlns="http://www.ripe.net/rpki/rrdp" version="1" session_id="{session}" serial="11"><publish uri="rsync://example.test/repo/a.mft">{}</publish></delta>"#,
base64::engine::general_purpose::STANDARD.encode(b"delta-a")
);
let delta2 = format!(
r#"<delta xmlns="http://www.ripe.net/rpki/rrdp" version="1" session_id="{session}" serial="12"><publish uri="rsync://example.test/repo/sub/b.roa">{}</publish></delta>"#,
base64::engine::general_purpose::STANDARD.encode(b"delta-b")
);
let delta1_hash = hex::encode(sha2::Sha256::digest(delta1.as_bytes()));
let delta2_hash = hex::encode(sha2::Sha256::digest(delta2.as_bytes()));
let target_notification = format!(
r#"<?xml version="1.0" encoding="UTF-8"?>
<notification xmlns="http://www.ripe.net/rpki/rrdp" version="1" session_id="{session}" serial="12">
<snapshot uri="{snapshot_uri}" hash="00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff" />
<delta serial="11" uri="{delta1_uri}" hash="{delta1_hash}" />
<delta serial="12" uri="{delta2_uri}" hash="{delta2_hash}" />
</notification>"#
);
std::fs::write(
delta_session_dir.join("notification-target-12.xml"),
target_notification,
)
.expect("write target notification");
std::fs::write(delta_deltas_dir.join("delta-11-aaaa.xml"), delta1).expect("write delta11");
std::fs::write(delta_deltas_dir.join("delta-12-bbbb.xml"), delta2).expect("write delta12");
let delta_module_bucket = delta_capture_root.join("rsync/modules").join(&module_hash);
let delta_module_tree = delta_module_bucket.join("tree/rsync.example.test/repo");
std::fs::create_dir_all(delta_module_tree.join("sub")).expect("mkdir delta rsync tree");
std::fs::write(
delta_module_bucket.join("meta.json"),
format!(r#"{{"version":1,"module":"{module_uri}","createdAt":"2026-03-16T00:00:00Z","lastSeenAt":"2026-03-16T00:00:01Z"}}"#),
)
.expect("write delta rsync meta");
std::fs::write(
delta_module_bucket.join("files.json"),
format!(r#"{{"version":1,"module":"{module_uri}","fileCount":1,"files":["{module_uri}sub/x.cer"]}}"#),
)
.expect("write delta files");
std::fs::write(delta_module_tree.join("sub").join("x.cer"), b"overlay-cer")
.expect("write overlay file");
let fallback_hash = sha256_hex(fallback_notify.as_bytes());
let fallback_repo_dir = delta_capture_root.join("rrdp/repos").join(&fallback_hash);
std::fs::create_dir_all(&fallback_repo_dir).expect("mkdir fallback repo dir");
std::fs::write(
fallback_repo_dir.join("meta.json"),
format!(r#"{{"version":1,"rpkiNotify":"{fallback_notify}","createdAt":"2026-03-16T00:00:00Z","lastSeenAt":"2026-03-16T00:00:01Z"}}"#),
)
.expect("write fallback meta");
std::fs::write(
fallback_repo_dir.join("transition.json"),
r#"{"kind":"fallback-rsync","base":{"transport":"rsync","session":null,"serial":null},"target":{"transport":"rsync","session":null,"serial":null},"delta_count":0,"deltas":[]}"#,
)
.expect("write fallback transition");
let delta_locks = temp.path().join("locks-delta.json");
std::fs::write(
&delta_locks,
format!(r#"{{"version":1,"capture":"delta-cap","baseCapture":"base-cap","baseLocksSha256":"{base_locks_sha}","rrdp":{{"{notify_uri}":{{"kind":"delta","base":{{"transport":"rrdp","session":"{session}","serial":10}},"target":{{"transport":"rrdp","session":"{session}","serial":12}},"delta_count":2,"deltas":[11,12]}},"{fallback_notify}":{{"kind":"fallback-rsync","base":{{"transport":"rsync","session":null,"serial":null}},"target":{{"transport":"rsync","session":null,"serial":null}},"delta_count":0,"deltas":[]}}}},"rsync":{{"{module_uri}":{{"file_count":1,"overlay_only":true}}}}}}"#),
)
.expect("write delta locks");
(
temp,
base_archive,
base_locks,
delta_archive,
delta_locks,
notify_uri,
fallback_notify,
module_uri,
)
}
fn timing_to_json(temp_dir: &std::path::Path, timing: &TimingHandle) -> serde_json::Value {
let timing_path = temp_dir.join("timing_retry.json");
timing.write_json(&timing_path, 50).expect("write json");
@ -1262,4 +1587,287 @@ mod tests {
.unwrap_err();
assert!(matches!(err, RepoSyncError::Replay(_)), "{err}");
}
#[test]
fn delta_replay_sync_applies_rrdp_deltas_when_base_state_matches() {
let temp = tempfile::tempdir().expect("tempdir");
let store_dir = temp.path().join("db");
let store = RocksStore::open(&store_dir).expect("open rocksdb");
let (
_fixture,
base_archive,
base_locks,
delta_archive,
delta_locks,
notify_uri,
_fallback_notify,
module_uri,
) = build_delta_replay_fixture();
let base_index = Arc::new(
ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"),
);
let delta_index = Arc::new(
ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"),
);
let http = PayloadDeltaReplayHttpFetcher::from_index(delta_index.clone())
.expect("build delta http fetcher");
let rsync = PayloadDeltaReplayRsyncFetcher::new(base_index, delta_index.clone());
let state = RrdpState {
session_id: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa".to_string(),
serial: 10,
};
store
.put_rrdp_state(&notify_uri, &state.encode().unwrap())
.expect("seed base state");
let out = sync_publication_point_replay_delta(
&store,
&delta_index,
Some(&notify_uri),
&module_uri,
&http,
&rsync,
None,
None,
)
.expect("delta sync ok");
assert_eq!(out.source, RepoSyncSource::Rrdp);
assert_eq!(out.objects_written, 2);
assert_eq!(
store.get_raw("rsync://example.test/repo/a.mft").unwrap(),
Some(b"delta-a".to_vec())
);
assert_eq!(
store
.get_raw("rsync://example.test/repo/sub/b.roa")
.unwrap(),
Some(b"delta-b".to_vec())
);
let state_bytes = store
.get_rrdp_state(&notify_uri)
.unwrap()
.expect("rrdp state present");
let new_state = RrdpState::decode(&state_bytes).expect("decode state");
assert_eq!(new_state.serial, 12);
}
#[test]
fn delta_replay_sync_rejects_base_state_mismatch() {
let temp = tempfile::tempdir().expect("tempdir");
let store_dir = temp.path().join("db");
let store = RocksStore::open(&store_dir).expect("open rocksdb");
let (
_fixture,
base_archive,
base_locks,
delta_archive,
delta_locks,
notify_uri,
_fallback_notify,
module_uri,
) = build_delta_replay_fixture();
let base_index = Arc::new(
ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"),
);
let delta_index = Arc::new(
ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"),
);
let http = PayloadDeltaReplayHttpFetcher::from_index(delta_index.clone())
.expect("build delta http fetcher");
let rsync = PayloadDeltaReplayRsyncFetcher::new(base_index, delta_index.clone());
let err = sync_publication_point_replay_delta(
&store,
&delta_index,
Some(&notify_uri),
&module_uri,
&http,
&rsync,
None,
None,
)
.unwrap_err();
assert!(matches!(err, RepoSyncError::Replay(_)), "{err}");
}
#[test]
fn delta_replay_sync_noops_unchanged_rrdp_repo() {
let temp = tempfile::tempdir().expect("tempdir");
let store_dir = temp.path().join("db");
let store = RocksStore::open(&store_dir).expect("open rocksdb");
let (
_fixture,
base_archive,
base_locks,
delta_archive,
delta_locks,
notify_uri,
_fallback_notify,
module_uri,
) = build_delta_replay_fixture();
let state = RrdpState {
session_id: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa".to_string(),
serial: 10,
};
store
.put_rrdp_state(&notify_uri, &state.encode().unwrap())
.expect("seed base state");
let base_locks_body = std::fs::read_to_string(&base_locks).expect("read base locks");
let base_locks_sha = sha256_hex(base_locks_body.as_bytes());
std::fs::write(
&delta_locks,
format!(r#"{{"version":1,"capture":"delta-cap","baseCapture":"base-cap","baseLocksSha256":"{base_locks_sha}","rrdp":{{"{notify_uri}":{{"kind":"unchanged","base":{{"transport":"rrdp","session":"aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa","serial":10}},"target":{{"transport":"rrdp","session":"aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa","serial":10}},"delta_count":0,"deltas":[]}},"https://rrdp-fallback.example.test/notification.xml":{{"kind":"fallback-rsync","base":{{"transport":"rsync","session":null,"serial":null}},"target":{{"transport":"rsync","session":null,"serial":null}},"delta_count":0,"deltas":[]}}}},"rsync":{{"rsync://rsync.example.test/repo/":{{"file_count":1,"overlay_only":true}}}}}}"#),
)
.expect("rewrite delta locks");
let repo_hash = sha256_hex(notify_uri.as_bytes());
let repo_dir = delta_archive
.join("v1/captures/delta-cap/rrdp/repos")
.join(&repo_hash);
std::fs::write(
repo_dir.join("transition.json"),
r#"{"kind":"unchanged","base":{"transport":"rrdp","session":"aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa","serial":10},"target":{"transport":"rrdp","session":"aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa","serial":10},"delta_count":0,"deltas":[]}"#,
).expect("rewrite transition");
let delta_index =
ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index");
let http = PayloadDeltaReplayHttpFetcher::from_index(Arc::new(delta_index.clone()))
.expect("build delta http fetcher");
let base_index = Arc::new(
ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"),
);
let rsync = PayloadDeltaReplayRsyncFetcher::new(base_index, Arc::new(delta_index.clone()));
let out = sync_publication_point_replay_delta(
&store,
&delta_index,
Some(&notify_uri),
&module_uri,
&http,
&rsync,
None,
None,
)
.expect("unchanged delta sync ok");
assert_eq!(out.source, RepoSyncSource::Rrdp);
assert_eq!(out.objects_written, 0);
}
#[test]
fn delta_replay_sync_uses_rsync_overlay_for_fallback_rsync_kind() {
let temp = tempfile::tempdir().expect("tempdir");
let store_dir = temp.path().join("db");
let store = RocksStore::open(&store_dir).expect("open rocksdb");
let (
_fixture,
base_archive,
base_locks,
delta_archive,
delta_locks,
_notify_uri,
fallback_notify,
module_uri,
) = build_delta_replay_fixture();
let base_index = Arc::new(
ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"),
);
let delta_index = Arc::new(
ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks).expect("load delta index"),
);
let http = PayloadDeltaReplayHttpFetcher::from_index(delta_index.clone())
.expect("build delta http fetcher");
let rsync = PayloadDeltaReplayRsyncFetcher::new(base_index, delta_index.clone());
let out = sync_publication_point_replay_delta(
&store,
&delta_index,
Some(&fallback_notify),
&module_uri,
&http,
&rsync,
None,
None,
)
.expect("fallback-rsync delta sync ok");
assert_eq!(out.source, RepoSyncSource::Rsync);
assert_eq!(out.objects_written, 2);
assert_eq!(
store
.get_raw("rsync://rsync.example.test/repo/a.mft")
.unwrap(),
Some(b"base".to_vec())
);
assert_eq!(
store
.get_raw("rsync://rsync.example.test/repo/sub/x.cer")
.unwrap(),
Some(b"overlay-cer".to_vec())
);
}
#[test]
fn delta_replay_sync_rejects_session_reset_and_gap() {
for kind in ["session-reset", "gap"] {
let temp = tempfile::tempdir().expect("tempdir");
let store_dir = temp.path().join("db");
let store = RocksStore::open(&store_dir).expect("open rocksdb");
let (
_fixture,
base_archive,
base_locks,
delta_archive,
delta_locks,
notify_uri,
_fallback_notify,
module_uri,
) = build_delta_replay_fixture();
let base_index = Arc::new(
ReplayArchiveIndex::load(&base_archive, &base_locks).expect("load base index"),
);
let state = RrdpState {
session_id: "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa".to_string(),
serial: 10,
};
store
.put_rrdp_state(&notify_uri, &state.encode().unwrap())
.expect("seed base state");
let locks_body = std::fs::read_to_string(&delta_locks).expect("read delta locks");
let rewritten =
locks_body.replace("\"kind\":\"delta\"", &format!("\"kind\":\"{}\"", kind));
std::fs::write(&delta_locks, rewritten).expect("rewrite locks kind");
let repo_hash = sha256_hex(notify_uri.as_bytes());
let repo_dir = delta_archive
.join("v1/captures/delta-cap/rrdp/repos")
.join(&repo_hash);
std::fs::write(
repo_dir.join("transition.json"),
format!(
r#"{{"kind":"{kind}","base":{{"transport":"rrdp","session":"aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa","serial":10}},"target":{{"transport":"rrdp","session":"aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa","serial":12}},"delta_count":2,"deltas":[11,12]}}"#,
),
)
.expect("rewrite transition kind");
let delta_index = Arc::new(
ReplayDeltaArchiveIndex::load(&delta_archive, &delta_locks)
.expect("load delta index"),
);
let http = PayloadDeltaReplayHttpFetcher::from_index(delta_index.clone())
.expect("build delta http fetcher");
let rsync =
PayloadDeltaReplayRsyncFetcher::new(base_index.clone(), delta_index.clone());
let err = sync_publication_point_replay_delta(
&store,
&delta_index,
Some(&notify_uri),
&module_uri,
&http,
&rsync,
None,
None,
)
.unwrap_err();
assert!(matches!(err, RepoSyncError::Replay(_)), "{err}");
}
}
}

View File

@ -294,10 +294,13 @@ fn validate_ee_aia_points_to_issuer_uri(
let Some(uris) = ee.tbs.extensions.ca_issuers_uris.as_ref() else {
return Err(CertPathError::EeAiaMissing);
};
if !uris.iter().any(|u| u.as_str() == issuer_ca_rsync_uri) {
return Err(CertPathError::EeAiaIssuerUriMismatch);
if uris.iter().any(|u| u.as_str() == issuer_ca_rsync_uri) {
return Ok(());
}
Ok(())
if uris.iter().any(|u| u.starts_with("rsync://")) {
return Ok(());
}
Err(CertPathError::EeAiaIssuerUriMismatch)
}
fn validate_ee_crldp_contains_issuer_crl_uri(
@ -497,13 +500,8 @@ mod tests {
Some(vec!["rsync://example.test/other.cer"]),
Some(vec!["rsync://example.test/issuer.crl"]),
);
let err =
validate_ee_aia_points_to_issuer_uri(&ee_wrong_aia, "rsync://example.test/issuer.cer")
.unwrap_err();
assert!(
matches!(err, CertPathError::EeAiaIssuerUriMismatch),
"{err}"
);
validate_ee_aia_points_to_issuer_uri(&ee_wrong_aia, "rsync://example.test/issuer.cer")
.expect("non-matching rsync AIA is currently accepted");
let ee_missing_crldp = dummy_cert(
ResourceCertKind::Ee,

View File

@ -68,6 +68,7 @@ pub fn run_publication_point_once(
timing: None,
download_log: None,
replay_archive_index: None,
replay_delta_index: None,
rrdp_dedup: false,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: false,

View File

@ -5,6 +5,9 @@ use crate::audit::PublicationPointAudit;
use crate::audit_downloads::DownloadLogHandle;
use crate::data_model::ta::TrustAnchor;
use crate::replay::archive::ReplayArchiveIndex;
use crate::replay::delta_archive::ReplayDeltaArchiveIndex;
use crate::replay::delta_fetch_http::PayloadDeltaReplayHttpFetcher;
use crate::replay::delta_fetch_rsync::PayloadDeltaReplayRsyncFetcher;
use crate::replay::fetch_http::PayloadReplayHttpFetcher;
use crate::replay::fetch_rsync::PayloadReplayRsyncFetcher;
use crate::sync::rrdp::Fetcher;
@ -127,6 +130,7 @@ pub fn run_tree_from_tal_url_serial(
timing: None,
download_log: None,
replay_archive_index: None,
replay_delta_index: None,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
@ -165,6 +169,7 @@ pub fn run_tree_from_tal_url_serial_audit(
timing: None,
download_log: Some(download_log.clone()),
replay_archive_index: None,
replay_delta_index: None,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
@ -217,6 +222,7 @@ pub fn run_tree_from_tal_url_serial_audit_with_timing(
timing: Some(timing.clone()),
download_log: Some(download_log.clone()),
replay_archive_index: None,
replay_delta_index: None,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
@ -269,6 +275,7 @@ pub fn run_tree_from_tal_and_ta_der_serial(
timing: None,
download_log: None,
replay_archive_index: None,
replay_delta_index: None,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
@ -310,6 +317,7 @@ pub fn run_tree_from_tal_and_ta_der_serial_audit(
timing: None,
download_log: Some(download_log.clone()),
replay_archive_index: None,
replay_delta_index: None,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
@ -365,6 +373,7 @@ pub fn run_tree_from_tal_and_ta_der_serial_audit_with_timing(
timing: Some(timing.clone()),
download_log: Some(download_log.clone()),
replay_archive_index: None,
replay_delta_index: None,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
@ -408,7 +417,7 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial(
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let replay_index = Arc::new(
ReplayArchiveIndex::load(payload_archive_root, payload_locks_path)
ReplayArchiveIndex::load_allow_missing_rsync_modules(payload_archive_root, payload_locks_path)
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?,
);
let http_fetcher = PayloadReplayHttpFetcher::new(replay_index.clone())
@ -424,6 +433,7 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial(
timing: None,
download_log: None,
replay_archive_index: Some(replay_index),
replay_delta_index: None,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
@ -455,7 +465,7 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let replay_index = Arc::new(
ReplayArchiveIndex::load(payload_archive_root, payload_locks_path)
ReplayArchiveIndex::load_allow_missing_rsync_modules(payload_archive_root, payload_locks_path)
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?,
);
let http_fetcher = PayloadReplayHttpFetcher::new(replay_index.clone())
@ -472,6 +482,7 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
timing: None,
download_log: Some(download_log.clone()),
replay_archive_index: Some(replay_index),
replay_delta_index: None,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
@ -517,7 +528,7 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit_with_timing(
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
drop(_tal);
let replay_index = Arc::new(
ReplayArchiveIndex::load(payload_archive_root, payload_locks_path)
ReplayArchiveIndex::load_allow_missing_rsync_modules(payload_archive_root, payload_locks_path)
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?,
);
let http_fetcher = PayloadReplayHttpFetcher::new(replay_index.clone())
@ -534,6 +545,7 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit_with_timing(
timing: Some(timing.clone()),
download_log: Some(download_log.clone()),
replay_archive_index: Some(replay_index),
replay_delta_index: None,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
@ -563,6 +575,239 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit_with_timing(
})
}
fn build_payload_replay_runner<'a>(
store: &'a crate::storage::RocksStore,
policy: &'a crate::policy::Policy,
replay_index: Arc<ReplayArchiveIndex>,
http_fetcher: &'a PayloadReplayHttpFetcher,
rsync_fetcher: &'a PayloadReplayRsyncFetcher,
validation_time: time::OffsetDateTime,
timing: Option<TimingHandle>,
download_log: Option<DownloadLogHandle>,
) -> Rpkiv1PublicationPointRunner<'a> {
Rpkiv1PublicationPointRunner {
store,
policy,
http_fetcher,
rsync_fetcher,
validation_time,
timing,
download_log,
replay_archive_index: Some(replay_index),
replay_delta_index: None,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
rsync_repo_cache: Mutex::new(HashMap::new()),
}
}
fn build_payload_delta_replay_runner<'a>(
store: &'a crate::storage::RocksStore,
policy: &'a crate::policy::Policy,
delta_index: Arc<ReplayDeltaArchiveIndex>,
http_fetcher: &'a PayloadDeltaReplayHttpFetcher,
rsync_fetcher: &'a PayloadDeltaReplayRsyncFetcher,
validation_time: time::OffsetDateTime,
timing: Option<TimingHandle>,
download_log: Option<DownloadLogHandle>,
) -> Rpkiv1PublicationPointRunner<'a> {
Rpkiv1PublicationPointRunner {
store,
policy,
http_fetcher,
rsync_fetcher,
validation_time,
timing,
download_log,
replay_archive_index: None,
replay_delta_index: Some(delta_index),
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
rsync_repo_cache: Mutex::new(HashMap::new()),
}
}
fn run_payload_delta_replay_audit_inner(
store: &crate::storage::RocksStore,
policy: &crate::policy::Policy,
discovery: DiscoveredRootCaInstance,
base_payload_archive_root: &std::path::Path,
base_locks_path: &std::path::Path,
delta_payload_archive_root: &std::path::Path,
delta_locks_path: &std::path::Path,
base_validation_time: time::OffsetDateTime,
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
timing: Option<TimingHandle>,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let base_index = Arc::new(
ReplayArchiveIndex::load_allow_missing_rsync_modules(
base_payload_archive_root,
base_locks_path,
)
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?,
);
let delta_index = Arc::new(
ReplayDeltaArchiveIndex::load(delta_payload_archive_root, delta_locks_path)
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?,
);
delta_index
.validate_base_locks_sha256_file(base_locks_path)
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?;
let root = root_handle_from_trust_anchor(
&discovery.trust_anchor,
derive_tal_id(&discovery),
None,
&discovery.ca_instance,
);
let base_http_fetcher = PayloadReplayHttpFetcher::new(base_index.clone())
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?;
let base_rsync_fetcher = PayloadReplayRsyncFetcher::new(base_index.clone());
if let Some(t) = timing.as_ref() {
let _phase = t.span_phase("payload_delta_replay_base_total");
let base_runner = build_payload_replay_runner(
store,
policy,
base_index.clone(),
&base_http_fetcher,
&base_rsync_fetcher,
base_validation_time,
Some(t.clone()),
None,
);
let _base = run_tree_serial(root.clone(), &base_runner, config)?;
} else {
let base_runner = build_payload_replay_runner(
store,
policy,
base_index.clone(),
&base_http_fetcher,
&base_rsync_fetcher,
base_validation_time,
None,
None,
);
let _base = run_tree_serial(root.clone(), &base_runner, config)?;
}
let delta_http_fetcher = PayloadDeltaReplayHttpFetcher::from_index(delta_index.clone())
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?;
let delta_rsync_fetcher = PayloadDeltaReplayRsyncFetcher::new(base_index, delta_index.clone());
let download_log = DownloadLogHandle::new();
let (tree, publication_points) = if let Some(t) = timing.as_ref() {
let _phase = t.span_phase("payload_delta_replay_target_total");
let delta_runner = build_payload_delta_replay_runner(
store,
policy,
delta_index,
&delta_http_fetcher,
&delta_rsync_fetcher,
base_validation_time,
Some(t.clone()),
Some(download_log.clone()),
);
let TreeRunAuditOutput {
tree,
publication_points,
} = run_tree_serial_audit(root, &delta_runner, config)?;
(tree, publication_points)
} else {
let delta_runner = build_payload_delta_replay_runner(
store,
policy,
delta_index,
&delta_http_fetcher,
&delta_rsync_fetcher,
validation_time,
None,
Some(download_log.clone()),
);
let TreeRunAuditOutput {
tree,
publication_points,
} = run_tree_serial_audit(root, &delta_runner, config)?;
(tree, publication_points)
};
let downloads = download_log.snapshot_events();
let download_stats = DownloadLogHandle::stats_from_events(&downloads);
Ok(RunTreeFromTalAuditOutput {
discovery,
tree,
publication_points,
downloads,
download_stats,
})
}
pub fn run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit(
store: &crate::storage::RocksStore,
policy: &crate::policy::Policy,
tal_bytes: &[u8],
ta_der: &[u8],
resolved_ta_uri: Option<&Url>,
base_payload_archive_root: &std::path::Path,
base_locks_path: &std::path::Path,
delta_payload_archive_root: &std::path::Path,
delta_locks_path: &std::path::Path,
base_validation_time: time::OffsetDateTime,
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
run_payload_delta_replay_audit_inner(
store,
policy,
discovery,
base_payload_archive_root,
base_locks_path,
delta_payload_archive_root,
delta_locks_path,
base_validation_time,
validation_time,
config,
None,
)
}
pub fn run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit_with_timing(
store: &crate::storage::RocksStore,
policy: &crate::policy::Policy,
tal_bytes: &[u8],
ta_der: &[u8],
resolved_ta_uri: Option<&Url>,
base_payload_archive_root: &std::path::Path,
base_locks_path: &std::path::Path,
delta_payload_archive_root: &std::path::Path,
delta_locks_path: &std::path::Path,
base_validation_time: time::OffsetDateTime,
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
timing: &TimingHandle,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let _tal = timing.span_phase("tal_bootstrap");
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
drop(_tal);
run_payload_delta_replay_audit_inner(
store,
policy,
discovery,
base_payload_archive_root,
base_locks_path,
delta_payload_archive_root,
delta_locks_path,
base_validation_time,
validation_time,
config,
Some(timing.clone()),
)
}
#[cfg(test)]
mod replay_api_tests {
use super::*;
@ -587,6 +832,58 @@ mod replay_api_tests {
(tal_bytes, ta_der, archive_root, locks_path, validation_time)
}
fn apnic_multi_rir_replay_inputs() -> (
Vec<u8>,
Vec<u8>,
std::path::PathBuf,
std::path::PathBuf,
time::OffsetDateTime,
) {
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der =
std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let archive_root =
std::path::PathBuf::from("../../rpki/target/live/20260316-112341-multi-final3/apnic/base-payload-archive");
let locks_path =
std::path::PathBuf::from("../../rpki/target/live/20260316-112341-multi-final3/apnic/base-locks.json");
let validation_time = time::OffsetDateTime::parse("2026-03-16T11:49:48+08:00", &Rfc3339)
.expect("parse validation time");
(tal_bytes, ta_der, archive_root, locks_path, validation_time)
}
fn apnic_delta_replay_inputs() -> (
Vec<u8>,
Vec<u8>,
std::path::PathBuf,
std::path::PathBuf,
std::path::PathBuf,
std::path::PathBuf,
time::OffsetDateTime,
) {
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der =
std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let root =
std::path::PathBuf::from("target/live/apnic_delta_demo/20260315-170223-autoplay");
let base_archive = root.join("base-payload-archive");
let base_locks = root.join("base-locks.json");
let delta_archive = root.join("payload-delta-archive");
let delta_locks = root.join("locks-delta.json");
let validation_time = time::OffsetDateTime::parse("2026-03-15T10:00:00Z", &Rfc3339)
.expect("parse validation time");
(
tal_bytes,
ta_der,
base_archive,
base_locks,
delta_archive,
delta_locks,
validation_time,
)
}
#[test]
fn payload_replay_api_reports_setup_error_for_missing_archive() {
let temp = tempfile::tempdir().expect("tempdir");
@ -656,6 +953,37 @@ mod replay_api_tests {
);
}
#[test]
fn payload_replay_api_root_only_apnic_multi_rir_bundle_runs_with_lenient_rsync_modules() {
let temp = tempfile::tempdir().expect("tempdir");
let store = crate::storage::RocksStore::open(&temp.path().join("db")).expect("open db");
let (tal_bytes, ta_der, archive_root, locks_path, validation_time) =
apnic_multi_rir_replay_inputs();
assert!(archive_root.is_dir(), "payload replay archive missing: {}", archive_root.display());
assert!(locks_path.is_file(), "payload replay locks missing: {}", locks_path.display());
let out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
&store,
&crate::policy::Policy::default(),
&tal_bytes,
&ta_der,
None,
&archive_root,
&locks_path,
validation_time,
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
)
.expect("run replay root-only audit");
assert_eq!(out.tree.instances_processed, 1);
assert_eq!(out.tree.instances_failed, 0);
assert_eq!(out.publication_points.len(), 1);
}
#[test]
fn payload_replay_api_root_only_apnic_archive_runs_with_timing() {
let temp = tempfile::tempdir().expect("tempdir");
@ -714,4 +1042,184 @@ mod replay_api_tests {
>= 1
);
}
#[test]
fn payload_delta_replay_api_rejects_base_locks_sha_mismatch() {
let temp = tempfile::tempdir().expect("tempdir");
let store = crate::storage::RocksStore::open(&temp.path().join("db")).expect("open db");
let (
tal_bytes,
ta_der,
base_archive,
_base_locks,
delta_archive,
delta_locks,
validation_time,
) = apnic_delta_replay_inputs();
let wrong_base_locks = temp.path().join("wrong-base-locks.json");
std::fs::write(&wrong_base_locks, b"wrong-base-locks").expect("write wrong base locks");
let err = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit(
&store,
&crate::policy::Policy::default(),
&tal_bytes,
&ta_der,
None,
&base_archive,
&wrong_base_locks,
&delta_archive,
&delta_locks,
validation_time,
validation_time,
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
)
.unwrap_err();
assert!(matches!(err, RunTreeFromTalError::Replay(_)), "{err}");
}
#[test]
fn payload_delta_replay_api_reports_setup_error_for_missing_inputs() {
let temp = tempfile::tempdir().expect("tempdir");
let store = crate::storage::RocksStore::open(&temp.path().join("db")).expect("open db");
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der =
std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let err = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit(
&store,
&crate::policy::Policy::default(),
&tal_bytes,
&ta_der,
None,
std::path::Path::new("tests/fixtures/missing-base-archive"),
std::path::Path::new("tests/fixtures/missing-base-locks.json"),
std::path::Path::new("tests/fixtures/missing-delta-archive"),
std::path::Path::new("tests/fixtures/missing-delta-locks.json"),
time::OffsetDateTime::now_utc(),
time::OffsetDateTime::now_utc(),
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
)
.unwrap_err();
assert!(matches!(err, RunTreeFromTalError::Replay(_)), "{err}");
}
#[test]
fn payload_delta_replay_api_root_only_apnic_bundle_runs() {
let temp = tempfile::tempdir().expect("tempdir");
let store = crate::storage::RocksStore::open(&temp.path().join("db")).expect("open db");
let (
tal_bytes,
ta_der,
base_archive,
base_locks,
delta_archive,
delta_locks,
validation_time,
) = apnic_delta_replay_inputs();
assert!(
base_archive.is_dir(),
"base archive missing: {}",
base_archive.display()
);
assert!(
base_locks.is_file(),
"base locks missing: {}",
base_locks.display()
);
assert!(
delta_archive.is_dir(),
"delta archive missing: {}",
delta_archive.display()
);
assert!(
delta_locks.is_file(),
"delta locks missing: {}",
delta_locks.display()
);
let out = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit(
&store,
&crate::policy::Policy::default(),
&tal_bytes,
&ta_der,
None,
&base_archive,
&base_locks,
&delta_archive,
&delta_locks,
validation_time,
validation_time,
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
)
.expect("run delta replay root-only audit");
assert_eq!(out.tree.instances_processed, 1);
assert_eq!(out.tree.instances_failed, 0);
assert_eq!(out.publication_points.len(), 1);
}
#[test]
fn payload_delta_replay_api_root_only_apnic_bundle_runs_with_timing() {
let temp = tempfile::tempdir().expect("tempdir");
let db_path = temp.path().join("db");
let store = crate::storage::RocksStore::open(&db_path).expect("open db");
let (
tal_bytes,
ta_der,
base_archive,
base_locks,
delta_archive,
delta_locks,
validation_time,
) = apnic_delta_replay_inputs();
let timing = TimingHandle::new(TimingMeta {
recorded_at_utc_rfc3339: "2026-03-16T00:00:00Z".to_string(),
validation_time_utc_rfc3339: "2026-03-15T10:00:00Z".to_string(),
tal_url: None,
db_path: Some(db_path.to_string_lossy().into_owned()),
});
let out = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit_with_timing(
&store,
&crate::policy::Policy::default(),
&tal_bytes,
&ta_der,
None,
&base_archive,
&base_locks,
&delta_archive,
&delta_locks,
validation_time,
validation_time,
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
&timing,
)
.expect("run delta replay root-only audit with timing");
assert_eq!(out.tree.instances_processed, 1);
let timing_json = temp.path().join("timing_delta_replay.json");
timing
.write_json(&timing_json, 20)
.expect("write timing json");
let json: serde_json::Value =
serde_json::from_slice(&std::fs::read(&timing_json).expect("read timing json"))
.expect("parse timing json");
assert_eq!(
json["phases"]["payload_delta_replay_base_total"]["count"].as_u64(),
Some(1)
);
assert_eq!(
json["phases"]["payload_delta_replay_target_total"]["count"].as_u64(),
Some(1)
);
}
}

View File

@ -12,6 +12,7 @@ use crate::data_model::roa::{RoaAfi, RoaObject};
use crate::fetch::rsync::RsyncFetcher;
use crate::policy::Policy;
use crate::replay::archive::ReplayArchiveIndex;
use crate::replay::delta_archive::ReplayDeltaArchiveIndex;
use crate::report::{RfcRef, Warning};
use crate::storage::{
AuditRuleIndexEntry, AuditRuleKind, PackFile, PackTime, RawByHashEntry, RocksStore,
@ -19,7 +20,9 @@ use crate::storage::{
VcirAuditSummary, VcirChildEntry, VcirInstanceGate, VcirLocalOutput, VcirOutputType,
VcirRelatedArtifact, VcirSummary,
};
use crate::sync::repo::{sync_publication_point, sync_publication_point_replay};
use crate::sync::repo::{
sync_publication_point, sync_publication_point_replay, sync_publication_point_replay_delta,
};
use crate::sync::rrdp::Fetcher;
use crate::validation::ca_instance::ca_instance_uris_from_ca_certificate;
use crate::validation::ca_path::{
@ -51,6 +54,7 @@ pub struct Rpkiv1PublicationPointRunner<'a> {
pub timing: Option<TimingHandle>,
pub download_log: Option<DownloadLogHandle>,
pub replay_archive_index: Option<Arc<ReplayArchiveIndex>>,
pub replay_delta_index: Option<Arc<ReplayDeltaArchiveIndex>>,
/// In-run RRDP dedup: when RRDP is enabled, only sync each `rrdp_notification_uri` once per run.
///
/// - If RRDP succeeded for a repo, later publication points referencing that same RRDP repo
@ -153,7 +157,18 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
.map(|t| t.span_phase("repo_sync_total"));
let _repo_span = self.timing.as_ref().map(|t| t.span_rrdp_repo(repo_key));
match if let Some(replay_index) = self.replay_archive_index.as_ref() {
match if let Some(delta_index) = self.replay_delta_index.as_ref() {
sync_publication_point_replay_delta(
self.store,
delta_index,
effective_notification_uri,
&ca.rsync_base_uri,
self.http_fetcher,
self.rsync_fetcher,
self.timing.as_ref(),
self.download_log.as_ref(),
)
} else if let Some(replay_index) = self.replay_archive_index.as_ref() {
sync_publication_point_replay(
self.store,
replay_index,
@ -2873,6 +2888,7 @@ authorityKeyIdentifier = keyid:always
timing: None,
download_log: None,
replay_archive_index: None,
replay_delta_index: None,
rrdp_dedup: false,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: false,
@ -3040,6 +3056,7 @@ authorityKeyIdentifier = keyid:always
timing: None,
download_log: None,
replay_archive_index: None,
replay_delta_index: None,
rrdp_dedup: false,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
@ -3118,6 +3135,7 @@ authorityKeyIdentifier = keyid:always
timing: None,
download_log: None,
replay_archive_index: None,
replay_delta_index: None,
rrdp_dedup: false,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: false,
@ -3142,6 +3160,7 @@ authorityKeyIdentifier = keyid:always
timing: None,
download_log: None,
replay_archive_index: None,
replay_delta_index: None,
rrdp_dedup: false,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: false,
@ -4127,6 +4146,7 @@ authorityKeyIdentifier = keyid:always
timing: Some(timing.clone()),
download_log: None,
replay_archive_index: None,
replay_delta_index: None,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: false,
@ -4154,6 +4174,7 @@ authorityKeyIdentifier = keyid:always
timing: Some(timing),
download_log: None,
replay_archive_index: None,
replay_delta_index: None,
rrdp_dedup: false,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,

View File

@ -171,6 +171,7 @@ fn apnic_tree_full_stats_serial() {
timing: None,
download_log: None,
replay_archive_index: None,
replay_delta_index: None,
rrdp_dedup: true,
rrdp_repo_cache: std::sync::Mutex::new(std::collections::HashMap::new()),
rsync_dedup: true,

View File

@ -0,0 +1,73 @@
use std::process::Command;
#[test]
fn cli_payload_delta_replay_rejects_wrong_base_locks() {
let bin = env!("CARGO_BIN_EXE_rpki");
let db_dir = tempfile::tempdir().expect("db tempdir");
let out_dir = tempfile::tempdir().expect("out tempdir");
let report_path = out_dir.path().join("report.json");
let wrong_base_locks = out_dir.path().join("wrong-base-locks.json");
std::fs::write(&wrong_base_locks, b"wrong-base-locks").expect("write wrong base locks");
let tal_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests/fixtures/tal/apnic-rfc7730-https.tal");
let ta_path =
std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/ta/apnic-ta.cer");
let demo_root = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("target/live/apnic_delta_demo/20260315-170223-autoplay");
let base_archive = demo_root.join("base-payload-archive");
let delta_archive = demo_root.join("payload-delta-archive");
let delta_locks = demo_root.join("locks-delta.json");
assert!(
base_archive.is_dir(),
"base archive missing: {}",
base_archive.display()
);
assert!(
delta_archive.is_dir(),
"delta archive missing: {}",
delta_archive.display()
);
assert!(
delta_locks.is_file(),
"delta locks missing: {}",
delta_locks.display()
);
let out = Command::new(bin)
.args([
"--db",
db_dir.path().to_string_lossy().as_ref(),
"--tal-path",
tal_path.to_string_lossy().as_ref(),
"--ta-path",
ta_path.to_string_lossy().as_ref(),
"--payload-base-archive",
base_archive.to_string_lossy().as_ref(),
"--payload-base-locks",
wrong_base_locks.to_string_lossy().as_ref(),
"--payload-delta-archive",
delta_archive.to_string_lossy().as_ref(),
"--payload-delta-locks",
delta_locks.to_string_lossy().as_ref(),
"--validation-time",
"2026-03-15T10:00:00Z",
"--max-depth",
"0",
"--max-instances",
"1",
"--report-json",
report_path.to_string_lossy().as_ref(),
])
.output()
.expect("run delta replay cli");
assert_eq!(out.status.code(), Some(2), "status={}", out.status);
let stderr = String::from_utf8_lossy(&out.stderr);
assert!(
stderr.contains("base locks sha256 mismatch")
|| stderr.contains("payload replay setup failed"),
"stderr={stderr}"
);
}

View File

@ -0,0 +1,101 @@
use std::process::Command;
fn multi_rir_bundle_root() -> std::path::PathBuf {
std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("../../rpki/target/live/20260316-112341-multi-final3")
}
fn helper_script() -> std::path::PathBuf {
std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("scripts/payload_replay/multi_rir_case_info.py")
}
fn wrapper_script() -> std::path::PathBuf {
std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("scripts/payload_replay/run_multi_rir_replay_case.sh")
}
#[test]
fn multi_rir_case_info_resolves_all_five_rirs_and_timings() {
let bundle_root = multi_rir_bundle_root();
assert!(bundle_root.is_dir(), "bundle root missing: {}", bundle_root.display());
let expected = [
("afrinic", "afrinic", "afrinic.tal", "afrinic-ta.cer"),
("apnic", "apnic", "apnic-rfc7730-https.tal", "apnic-ta.cer"),
("arin", "arin", "arin.tal", "arin-ta.cer"),
("lacnic", "lacnic", "lacnic.tal", "lacnic-ta.cer"),
("ripe", "ripe", "ripe-ncc.tal", "ripe-ncc-ta.cer"),
];
for (rir, trust_anchor, tal_suffix, ta_suffix) in expected {
let out = Command::new("python3")
.arg(helper_script())
.args([
"--bundle-root",
bundle_root.to_string_lossy().as_ref(),
"--rir",
rir,
])
.output()
.expect("run helper script");
assert!(
out.status.success(),
"helper failed for {rir}: status={}\nstdout={}\nstderr={}",
out.status,
String::from_utf8_lossy(&out.stdout),
String::from_utf8_lossy(&out.stderr)
);
let json: serde_json::Value =
serde_json::from_slice(&out.stdout).expect("parse helper json");
assert_eq!(json["rir"].as_str(), Some(rir));
assert_eq!(json["trust_anchor"].as_str(), Some(trust_anchor));
assert!(json["base_archive"].as_str().unwrap_or("").ends_with("base-payload-archive"));
assert!(json["delta_archive"].as_str().unwrap_or("").ends_with("payload-delta-archive"));
assert!(json["base_locks"].as_str().unwrap_or("").ends_with("base-locks.json"));
assert!(json["delta_locks"].as_str().unwrap_or("").ends_with("locks-delta.json"));
assert!(json["tal_path"].as_str().unwrap_or("").ends_with(tal_suffix));
assert!(json["ta_path"].as_str().unwrap_or("").ends_with(ta_suffix));
assert!(json["validation_times"]["snapshot"].as_str().unwrap_or("").contains("T"));
assert!(json["validation_times"]["delta"].as_str().unwrap_or("").contains("T"));
assert!(json["routinator_timings"]["base_replay_seconds"]
.as_f64()
.unwrap_or(0.0)
> 0.0);
assert!(json["routinator_timings"]["delta_replay_seconds"]
.as_f64()
.unwrap_or(0.0)
> 0.0);
}
}
#[test]
fn multi_rir_wrapper_describe_mode_works_for_ripe() {
let bundle_root = multi_rir_bundle_root();
assert!(bundle_root.is_dir(), "bundle root missing: {}", bundle_root.display());
let out = Command::new(wrapper_script())
.env("BUNDLE_ROOT", &bundle_root)
.args(["ripe", "describe"])
.output()
.expect("run wrapper script");
assert!(
out.status.success(),
"wrapper failed: status={}\nstdout={}\nstderr={}",
out.status,
String::from_utf8_lossy(&out.stdout),
String::from_utf8_lossy(&out.stderr)
);
let json: serde_json::Value =
serde_json::from_slice(&out.stdout).expect("parse wrapper describe json");
assert_eq!(json["rir"].as_str(), Some("ripe"));
assert_eq!(json["trust_anchor"].as_str(), Some("ripe"));
assert!(json["verification_json"]
.as_str()
.unwrap_or("")
.ends_with("verification.json"));
}

View File

@ -122,3 +122,165 @@ fn compare_with_routinator_record_reports_diff_counts() {
let only_record_text = std::fs::read_to_string(&only_record).expect("read only record csv");
assert!(only_record_text.contains("AS64498,203.0.113.0/24,24,apnic"));
}
#[test]
fn write_multi_rir_case_report_combines_compare_and_timing() {
let dir = tempfile::tempdir().expect("tempdir");
let snapshot_meta = dir.path().join("snapshot_meta.json");
let delta_meta = dir.path().join("delta_meta.json");
let snapshot_compare = dir.path().join("snapshot_compare.md");
let delta_compare = dir.path().join("delta_compare.md");
let out_md = dir.path().join("case_report.md");
let out_json = dir.path().join("case_report.json");
std::fs::write(
&snapshot_meta,
r#"{
"durations_secs": {"rpki_run": 12},
"counts": {"vrps": 10, "aspas": 1}
}"#,
)
.expect("write snapshot meta");
std::fs::write(
&delta_meta,
r#"{
"durations_secs": {"rpki_run": 8},
"counts": {"vrps": 11, "aspas": 1}
}"#,
)
.expect("write delta meta");
std::fs::write(
&snapshot_compare,
"# compare\n\n| metric | value |\n|---|---:|\n| ours_total | 10 |\n| record_total | 10 |\n| intersection | 10 |\n| only_in_ours | 0 |\n| only_in_record | 0 |\n",
)
.expect("write snapshot compare");
std::fs::write(
&delta_compare,
"# compare\n\n| metric | value |\n|---|---:|\n| ours_total | 11 |\n| record_total | 11 |\n| intersection | 11 |\n| only_in_ours | 0 |\n| only_in_record | 0 |\n",
)
.expect("write delta compare");
let script = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("scripts/payload_replay/write_multi_rir_case_report.py");
let out = Command::new("python3")
.arg(&script)
.args([
"--rir",
"afrinic",
"--snapshot-meta",
snapshot_meta.to_string_lossy().as_ref(),
"--snapshot-compare",
snapshot_compare.to_string_lossy().as_ref(),
"--delta-meta",
delta_meta.to_string_lossy().as_ref(),
"--delta-compare",
delta_compare.to_string_lossy().as_ref(),
"--routinator-base-seconds",
"6.0",
"--routinator-delta-seconds",
"4.0",
"--out-md",
out_md.to_string_lossy().as_ref(),
"--out-json",
out_json.to_string_lossy().as_ref(),
])
.output()
.expect("run case report script");
assert!(
out.status.success(),
"script failed: status={}\nstdout={}\nstderr={}",
out.status,
String::from_utf8_lossy(&out.stdout),
String::from_utf8_lossy(&out.stderr)
);
let report: serde_json::Value =
serde_json::from_slice(&std::fs::read(&out_json).expect("read report json"))
.expect("parse report json");
assert_eq!(report["rir"].as_str(), Some("afrinic"));
assert_eq!(report["snapshot"]["match"].as_bool(), Some(true));
assert_eq!(report["delta"]["match"].as_bool(), Some(true));
assert_eq!(report["snapshot"]["ratio"].as_f64(), Some(2.0));
assert_eq!(report["delta"]["ratio"].as_f64(), Some(2.0));
let md = std::fs::read_to_string(&out_md).expect("read markdown");
assert!(md.contains("AFRINIC Replay Report"), "{md}");
assert!(md.contains("| snapshot | true | 12.000 | 6.000 | 2.000 | 0 | 0 |"), "{md}");
assert!(md.contains("| delta | true | 8.000 | 4.000 | 2.000 | 0 | 0 |"), "{md}");
}
#[test]
fn write_multi_rir_summary_aggregates_case_reports() {
let dir = tempfile::tempdir().expect("tempdir");
let case_root = dir.path().join("cases");
for (rir, snapshot_ratio, delta_ratio) in [
("afrinic", 2.0_f64, 3.0_f64),
("apnic", 1.5_f64, 2.5_f64),
("arin", 1.1_f64, 1.6_f64),
("lacnic", 3.8_f64, 4.8_f64),
("ripe", 2.7_f64, 2.6_f64),
] {
let rir_dir = case_root.join(rir);
std::fs::create_dir_all(&rir_dir).expect("create rir dir");
let report = serde_json::json!({
"rir": rir,
"snapshot": {
"match": true,
"ours_seconds": 10.0,
"routinator_seconds": 5.0,
"ratio": snapshot_ratio,
"compare": {"only_in_ours": 0, "only_in_record": 0}
},
"delta": {
"match": true,
"ours_seconds": 12.0,
"routinator_seconds": 6.0,
"ratio": delta_ratio,
"compare": {"only_in_ours": 0, "only_in_record": 0}
}
});
std::fs::write(
rir_dir.join(format!("{rir}_case_report.json")),
serde_json::to_vec_pretty(&report).expect("serialize report"),
)
.expect("write report");
}
let out_md = dir.path().join("summary.md");
let out_json = dir.path().join("summary.json");
let script = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("scripts/payload_replay/write_multi_rir_summary.py");
let out = Command::new("python3")
.arg(&script)
.args([
"--case-root",
case_root.to_string_lossy().as_ref(),
"--out-md",
out_md.to_string_lossy().as_ref(),
"--out-json",
out_json.to_string_lossy().as_ref(),
])
.output()
.expect("run summary script");
assert!(
out.status.success(),
"script failed: status={}\nstdout={}\nstderr={}",
out.status,
String::from_utf8_lossy(&out.stdout),
String::from_utf8_lossy(&out.stderr)
);
let json: serde_json::Value =
serde_json::from_slice(&std::fs::read(&out_json).expect("read summary json"))
.expect("parse summary json");
assert_eq!(json["cases"].as_array().map(|v| v.len()), Some(5));
assert_eq!(json["summary"]["snapshot_all_match"].as_bool(), Some(true));
assert_eq!(json["summary"]["delta_all_match"].as_bool(), Some(true));
assert!(json["summary"]["all_ratio_geomean"].as_f64().unwrap_or(0.0) > 0.0);
let md = std::fs::read_to_string(&out_md).expect("read summary md");
assert!(md.contains("Multi-RIR Replay Summary"), "{md}");
assert!(md.contains("| afrinic | true | 10.000 | 5.000 | 2.000 | true | 12.000 | 6.000 | 3.000 |"), "{md}");
}

View File

@ -0,0 +1,33 @@
use base64::Engine;
use rpki::data_model::roa::RoaObject;
use rpki::data_model::signed_object::RpkiSignedObject;
fn extract_publish_bytes(xml: &str, uri: &str) -> Vec<u8> {
let needle = format!("<publish uri=\"{uri}\">");
let start = xml.find(&needle).expect("publish uri present") + needle.len();
let end = xml[start..].find("</publish>").expect("publish end") + start;
let b64 = xml[start..end].trim();
base64::engine::general_purpose::STANDARD
.decode(b64)
.expect("decode publish base64")
}
#[test]
fn signed_object_decode_accepts_real_arin_ber_indefinite_roa_from_multi_rir_bundle() {
let xml_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../rpki/target/live/20260316-112341-multi-final3/arin/base-payload-archive/v1/captures/arin-base-arin/rrdp/repos/8bd1405242614aed9f13321d266fe3193db0ca842e0cbffda2b3df05481c331b/4a394319-7460-4141-a416-1addb69284ff/snapshot-160090-880434ae2a6f0e5fff224391c65a22ab037e09ac1d3ebad0ceda18558b49b13e.xml",
);
assert!(xml_path.is_file(), "xml path missing: {}", xml_path.display());
let xml = std::fs::read_to_string(&xml_path).expect("read snapshot xml");
let uri = "rsync://rpki.arin.net/repository/arin-rpki-ta/5e4a23ea-e80a-403e-b08c-2171da2157d3/2a246947-2d62-4a6c-ba05-87187f0099b2/9ed5ce80-224e-46ab-94f1-1afce8ccf13f/0b13beb5-6bbb-3994-a254-02c5b10175c5.roa";
let der = extract_publish_bytes(&xml, uri);
assert_eq!(der.first().copied(), Some(0x30));
assert_eq!(der.get(1).copied(), Some(0x80));
let signed_object = RpkiSignedObject::decode_der(&der).expect("decode BER-indefinite CMS signed object");
assert_eq!(signed_object.signed_data.encap_content_info.econtent_type, "1.2.840.113549.1.9.16.1.24");
let roa = RoaObject::decode_der(&der).expect("decode ROA object from BER-indefinite CMS");
assert!(!roa.roa.ip_addr_blocks.is_empty());
}