From 51663a941012fd338b9a295b11ad240024b0dad9 Mon Sep 17 00:00:00 2001 From: yuyr Date: Wed, 6 May 2026 16:49:23 +0800 Subject: [PATCH] =?UTF-8?q?20260506=20=E6=B8=85=E7=90=86=E5=BA=9F=E5=BC=83?= =?UTF-8?q?bundle=E4=BB=A3=E7=A0=81=E5=B9=B6=E6=94=B6=E6=95=9Bccr=20compar?= =?UTF-8?q?e=20view?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- scripts/coverage.sh | 2 +- scripts/replay_bundle/README.md | 129 -- .../replay_bundle/run_live_bundle_record.sh | 135 -- .../run_live_bundle_record_multi_rir.sh | 166 -- ...n_live_bundle_record_multi_rir_sequence.sh | 173 -- .../run_live_bundle_record_sequence.sh | 119 -- scripts/replay_verify/README.md | 24 +- .../replay_verify/run_peer_bundle_matrix.sh | 210 -- src/bin/ccr_to_compare_views.rs | 2 +- src/bin/cir_drop_report.rs | 2 +- src/bin/measure_sequence_replay.rs | 263 --- src/bin/replay_bundle_capture.rs | 419 ---- src/bin/replay_bundle_capture_delta.rs | 492 ----- src/bin/replay_bundle_capture_sequence.rs | 1005 ---------- src/bin/replay_bundle_record.rs | 833 -------- .../replay_bundle_refresh_sequence_outputs.rs | 1060 ---------- src/bundle/live_capture.rs | 1722 ----------------- src/bundle/mod.rs | 24 - src/bundle/record_io.rs | 274 --- src/bundle/spec.rs | 185 -- src/{bundle => ccr}/compare_view.rs | 0 src/ccr/mod.rs | 7 + src/cli.rs | 2 +- src/lib.rs | 3 - tests/test_multi_tal_parallel_m2.rs | 2 +- ...st_parallel_phase1_transport_offline_r5.rs | 8 +- 26 files changed, 16 insertions(+), 7245 deletions(-) delete mode 100644 scripts/replay_bundle/README.md delete mode 100755 scripts/replay_bundle/run_live_bundle_record.sh delete mode 100755 scripts/replay_bundle/run_live_bundle_record_multi_rir.sh delete mode 100644 scripts/replay_bundle/run_live_bundle_record_multi_rir_sequence.sh delete mode 100755 scripts/replay_bundle/run_live_bundle_record_sequence.sh delete mode 100755 scripts/replay_verify/run_peer_bundle_matrix.sh delete mode 100644 src/bin/measure_sequence_replay.rs delete mode 100644 src/bin/replay_bundle_capture.rs delete mode 100644 src/bin/replay_bundle_capture_delta.rs delete mode 100644 src/bin/replay_bundle_capture_sequence.rs delete mode 100644 src/bin/replay_bundle_record.rs delete mode 100644 src/bin/replay_bundle_refresh_sequence_outputs.rs delete mode 100644 src/bundle/live_capture.rs delete mode 100644 src/bundle/mod.rs delete mode 100644 src/bundle/record_io.rs delete mode 100644 src/bundle/spec.rs rename src/{bundle => ccr}/compare_view.rs (100%) diff --git a/scripts/coverage.sh b/scripts/coverage.sh index c481160..ca7252e 100755 --- a/scripts/coverage.sh +++ b/scripts/coverage.sh @@ -27,7 +27,7 @@ cleanup() { } trap cleanup EXIT -IGNORE_REGEX='src/bin/replay_bundle_capture\.rs|src/bin/replay_bundle_capture_delta\.rs|src/bin/replay_bundle_capture_sequence\.rs|src/bin/replay_bundle_record\.rs|src/bin/replay_bundle_refresh_sequence_outputs\.rs|src/bin/measure_sequence_replay\.rs|src/bin/repository_view_stats\.rs|src/bin/trace_arin_missing_vrps\.rs|src/bin/db_stats\.rs|src/bin/rrdp_state_dump\.rs|src/bin/ccr_dump\.rs|src/bin/ccr_verify\.rs|src/bin/ccr_to_routinator_csv\.rs|src/bin/ccr_to_compare_views\.rs|src/bin/cir_materialize\.rs|src/bin/cir_extract_inputs\.rs|src/bin/cir_drop_report\.rs|src/bin/cir_ta_only_fixture\.rs|src/bundle/live_capture\.rs|src/bundle/record_io\.rs|src/bundle/compare_view\.rs|src/progress_log\.rs|src/cli\.rs|src/validation/run_tree_from_tal\.rs|src/validation/tree_parallel\.rs|src/validation/from_tal\.rs|src/sync/store_projection\.rs|src/cir/materialize\.rs' +IGNORE_REGEX='src/bin/repository_view_stats\.rs|src/bin/trace_arin_missing_vrps\.rs|src/bin/db_stats\.rs|src/bin/rrdp_state_dump\.rs|src/bin/ccr_dump\.rs|src/bin/ccr_verify\.rs|src/bin/ccr_to_routinator_csv\.rs|src/bin/ccr_to_compare_views\.rs|src/bin/cir_materialize\.rs|src/bin/cir_extract_inputs\.rs|src/bin/cir_drop_report\.rs|src/bin/cir_ta_only_fixture\.rs|src/ccr/compare_view\.rs|src/progress_log\.rs|src/cli\.rs|src/validation/run_tree_from_tal\.rs|src/validation/tree_parallel\.rs|src/validation/from_tal\.rs|src/sync/store_projection\.rs|src/cir/materialize\.rs' # Preserve colored output even though we post-process output by running under a pseudo-TTY. # We run tests only once, then generate both CLI text + HTML reports without rerunning tests. diff --git a/scripts/replay_bundle/README.md b/scripts/replay_bundle/README.md deleted file mode 100644 index fc70ac2..0000000 --- a/scripts/replay_bundle/README.md +++ /dev/null @@ -1,129 +0,0 @@ -# Live Bundle Record - -`run_live_bundle_record.sh` 是当前 `ours` 的单命令 live bundle 录制入口。 - -它做三件事: - -1. 联网执行 **live base recorder** -2. 基于刚录制的 base bundle 执行 **live delta recorder** -3. 产出一个统一的最终目录,包含: - - `base-payload-archive/` - - `payload-delta-archive/` - - `base-locks.json` - - `locks-delta.json` - - `tal.tal` - - `ta.cer` - - `base.ccr` - - `delta.ccr` - - `base-vrps.csv` - - `base-vaps.csv` - - `record-delta.csv` - - `record-delta-vaps.csv` - - `bundle.json` - - `verification.json` - - `timings/` - -## 用法 - -```bash -cd rpki -./scripts/replay_bundle/run_live_bundle_record.sh \ - --rir apnic \ - --tal-path tests/fixtures/tal/apnic-rfc7730-https.tal \ - --ta-path tests/fixtures/ta/apnic-ta.cer -``` - -默认输出目录: - -```text -target/replay/_live_bundle_ -``` - -如果要一次录制多个 RIR,使用: - -```bash -cd rpki -./scripts/replay_bundle/run_live_bundle_record_multi_rir.sh \ - --rir afrinic,apnic,arin,lacnic,ripe -``` - -默认输出目录: - -```text -target/replay/live_bundle_matrix_ -``` - -每个 RIR 会落到: - -```text -target/replay/live_bundle_matrix_/_live_bundle_ -``` - -如果要录制单个 RIR 的 `1 base + N delta` 序列,使用: - -```bash -cd rpki -./scripts/replay_bundle/run_live_bundle_record_sequence.sh \ - --rir apnic \ - --tal-path tests/fixtures/tal/apnic-rfc7730-https.tal \ - --ta-path tests/fixtures/ta/apnic-ta.cer \ - --delta-count 2 \ - --delta-interval-secs 0 -``` - -默认输出目录: - -```text -target/replay/_live_bundle_sequence_ -``` - -如果要一次录制多个 RIR 的 `1 base + N delta` 序列,使用: - -```bash -cd rpki -./scripts/replay_bundle/run_live_bundle_record_multi_rir_sequence.sh \ - --rir afrinic,apnic,arin,lacnic,ripe -``` - -默认输出目录: - -```text -target/replay/live_bundle_sequence_matrix_ -``` - -## 可选参数 - -- `--out-dir ` -- `--base-validation-time ` -- `--delta-validation-time ` -- `--http-timeout-secs ` -- `--rsync-timeout-secs ` -- `--rsync-mirror-root ` -- `--max-depth ` -- `--max-instances ` -- `--trust-anchor ` -- `--bin-dir ` -- `--no-build` -- `--delta-count `(sequence 入口) -- `--delta-interval-secs `(sequence 入口) -- `--keep-db`(sequence 入口) - -`run_live_bundle_record_multi_rir.sh` 会自动按 RIR 选择当前仓库内置的: - -- `tests/fixtures/tal/*.tal` -- `tests/fixtures/ta/*.cer` - -并将 `--trust-anchor` 设置为对应 RIR 名称。 - -## 说明 - -- 该脚本会先构建: - - `replay_bundle_capture` - - `replay_bundle_capture_delta` -- 如果提供 `--no-build`,则直接复用: - - `--bin-dir ` 下的现有二进制 -- 中间 staging 目录: - - `.stage-base` - - `.stage-delta` - 在成功完成后会清理,只保留最终输出目录。 -- 最终输出目录是 **delta 阶段产物**,其中已经包含 base 阶段结果。 diff --git a/scripts/replay_bundle/run_live_bundle_record.sh b/scripts/replay_bundle/run_live_bundle_record.sh deleted file mode 100755 index 30b56fa..0000000 --- a/scripts/replay_bundle/run_live_bundle_record.sh +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" -cd "$ROOT_DIR" - -RIR="" -OUT_DIR="" -TAL_PATH="" -TA_PATH="" -BASE_VALIDATION_TIME="" -DELTA_VALIDATION_TIME="" -HTTP_TIMEOUT_SECS="" -RSYNC_TIMEOUT_SECS="" -RSYNC_MIRROR_ROOT="" -MAX_DEPTH="" -MAX_INSTANCES="" -TRUST_ANCHOR="" -NO_BUILD=0 -BIN_DIR="target/release" - -usage() { - cat <<'EOF' -Usage: - ./scripts/replay_bundle/run_live_bundle_record.sh \ - --rir \ - --tal-path \ - --ta-path \ - [--out-dir ] \ - [--base-validation-time ] \ - [--delta-validation-time ] \ - [--http-timeout-secs ] \ - [--rsync-timeout-secs ] \ - [--rsync-mirror-root ] \ - [--max-depth ] \ - [--max-instances ] \ - [--trust-anchor ] \ - [--bin-dir ] \ - [--no-build] -EOF -} - -while [[ $# -gt 0 ]]; do - case "$1" in - --rir) RIR="${2:?}"; shift 2 ;; - --out-dir) OUT_DIR="${2:?}"; shift 2 ;; - --tal-path) TAL_PATH="${2:?}"; shift 2 ;; - --ta-path) TA_PATH="${2:?}"; shift 2 ;; - --base-validation-time) BASE_VALIDATION_TIME="${2:?}"; shift 2 ;; - --delta-validation-time) DELTA_VALIDATION_TIME="${2:?}"; shift 2 ;; - --http-timeout-secs) HTTP_TIMEOUT_SECS="${2:?}"; shift 2 ;; - --rsync-timeout-secs) RSYNC_TIMEOUT_SECS="${2:?}"; shift 2 ;; - --rsync-mirror-root) RSYNC_MIRROR_ROOT="${2:?}"; shift 2 ;; - --max-depth) MAX_DEPTH="${2:?}"; shift 2 ;; - --max-instances) MAX_INSTANCES="${2:?}"; shift 2 ;; - --trust-anchor) TRUST_ANCHOR="${2:?}"; shift 2 ;; - --bin-dir) BIN_DIR="${2:?}"; shift 2 ;; - --no-build) NO_BUILD=1; shift ;; - --help|-h) usage; exit 0 ;; - *) echo "unknown argument: $1" >&2; usage >&2; exit 2 ;; - esac -done - -if [[ -z "$RIR" || -z "$TAL_PATH" || -z "$TA_PATH" ]]; then - usage >&2 - exit 2 -fi - -TS="$(date -u +%Y%m%dT%H%M%SZ)" -if [[ -z "$OUT_DIR" ]]; then - OUT_DIR="target/replay/${RIR}_live_bundle_${TS}" -fi - -STAGE_BASE="${OUT_DIR}.stage-base" -STAGE_DELTA="${OUT_DIR}.stage-delta" - -rm -rf "$OUT_DIR" "$STAGE_BASE" "$STAGE_DELTA" -mkdir -p "$(dirname "$OUT_DIR")" - -CAPTURE_BIN="$BIN_DIR/replay_bundle_capture" -DELTA_CAPTURE_BIN="$BIN_DIR/replay_bundle_capture_delta" - -if [[ "$NO_BUILD" -eq 0 ]]; then - echo "[1/3] build release binaries" - cargo build --release --bin replay_bundle_capture --bin replay_bundle_capture_delta -else - echo "[1/3] reuse existing binaries from $BIN_DIR" -fi - -if [[ ! -x "$CAPTURE_BIN" ]]; then - echo "missing executable: $CAPTURE_BIN" >&2 - exit 1 -fi -if [[ ! -x "$DELTA_CAPTURE_BIN" ]]; then - echo "missing executable: $DELTA_CAPTURE_BIN" >&2 - exit 1 -fi - -echo "[2/3] record live base bundle into $STAGE_BASE" -BASE_CMD=( - "$CAPTURE_BIN" - --rir "$RIR" - --out-dir "$STAGE_BASE" - --tal-path "$TAL_PATH" - --ta-path "$TA_PATH" -) -[[ -n "$BASE_VALIDATION_TIME" ]] && BASE_CMD+=(--validation-time "$BASE_VALIDATION_TIME") -[[ -n "$HTTP_TIMEOUT_SECS" ]] && BASE_CMD+=(--http-timeout-secs "$HTTP_TIMEOUT_SECS") -[[ -n "$RSYNC_TIMEOUT_SECS" ]] && BASE_CMD+=(--rsync-timeout-secs "$RSYNC_TIMEOUT_SECS") -[[ -n "$RSYNC_MIRROR_ROOT" ]] && BASE_CMD+=(--rsync-mirror-root "$RSYNC_MIRROR_ROOT") -[[ -n "$MAX_DEPTH" ]] && BASE_CMD+=(--max-depth "$MAX_DEPTH") -[[ -n "$MAX_INSTANCES" ]] && BASE_CMD+=(--max-instances "$MAX_INSTANCES") -[[ -n "$TRUST_ANCHOR" ]] && BASE_CMD+=(--trust-anchor "$TRUST_ANCHOR") -"${BASE_CMD[@]}" - -echo "[3/3] record live delta bundle into $STAGE_DELTA" -DELTA_CMD=( - "$DELTA_CAPTURE_BIN" - --rir "$RIR" - --base-bundle-dir "$STAGE_BASE" - --out-dir "$STAGE_DELTA" -) -[[ -n "$DELTA_VALIDATION_TIME" ]] && DELTA_CMD+=(--validation-time "$DELTA_VALIDATION_TIME") -[[ -n "$HTTP_TIMEOUT_SECS" ]] && DELTA_CMD+=(--http-timeout-secs "$HTTP_TIMEOUT_SECS") -[[ -n "$RSYNC_TIMEOUT_SECS" ]] && DELTA_CMD+=(--rsync-timeout-secs "$RSYNC_TIMEOUT_SECS") -[[ -n "$RSYNC_MIRROR_ROOT" ]] && DELTA_CMD+=(--rsync-mirror-root "$RSYNC_MIRROR_ROOT") -[[ -n "$MAX_DEPTH" ]] && DELTA_CMD+=(--max-depth "$MAX_DEPTH") -[[ -n "$MAX_INSTANCES" ]] && DELTA_CMD+=(--max-instances "$MAX_INSTANCES") -[[ -n "$TRUST_ANCHOR" ]] && DELTA_CMD+=(--trust-anchor "$TRUST_ANCHOR") -"${DELTA_CMD[@]}" - -mv "$STAGE_DELTA" "$OUT_DIR" -rm -rf "$STAGE_BASE" - -echo "$OUT_DIR" diff --git a/scripts/replay_bundle/run_live_bundle_record_multi_rir.sh b/scripts/replay_bundle/run_live_bundle_record_multi_rir.sh deleted file mode 100755 index d6e4be8..0000000 --- a/scripts/replay_bundle/run_live_bundle_record_multi_rir.sh +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" -cd "$ROOT_DIR" - -RIRS="" -OUT_ROOT="" -BASE_VALIDATION_TIME="" -DELTA_VALIDATION_TIME="" -HTTP_TIMEOUT_SECS="" -RSYNC_TIMEOUT_SECS="" -RSYNC_MIRROR_ROOT="" -MAX_DEPTH="" -MAX_INSTANCES="" -NO_BUILD=0 -BIN_DIR="target/release" - -usage() { - cat <<'EOF' -Usage: - ./scripts/replay_bundle/run_live_bundle_record_multi_rir.sh \ - --rir \ - [--out-root ] \ - [--base-validation-time ] \ - [--delta-validation-time ] \ - [--http-timeout-secs ] \ - [--rsync-timeout-secs ] \ - [--rsync-mirror-root ] \ - [--max-depth ] \ - [--max-instances ] \ - [--bin-dir ] \ - [--no-build] -EOF -} - -while [[ $# -gt 0 ]]; do - case "$1" in - --rir) RIRS="${2:?}"; shift 2 ;; - --out-root) OUT_ROOT="${2:?}"; shift 2 ;; - --base-validation-time) BASE_VALIDATION_TIME="${2:?}"; shift 2 ;; - --delta-validation-time) DELTA_VALIDATION_TIME="${2:?}"; shift 2 ;; - --http-timeout-secs) HTTP_TIMEOUT_SECS="${2:?}"; shift 2 ;; - --rsync-timeout-secs) RSYNC_TIMEOUT_SECS="${2:?}"; shift 2 ;; - --rsync-mirror-root) RSYNC_MIRROR_ROOT="${2:?}"; shift 2 ;; - --max-depth) MAX_DEPTH="${2:?}"; shift 2 ;; - --max-instances) MAX_INSTANCES="${2:?}"; shift 2 ;; - --bin-dir) BIN_DIR="${2:?}"; shift 2 ;; - --no-build) NO_BUILD=1; shift ;; - --help|-h) usage; exit 0 ;; - *) echo "unknown argument: $1" >&2; usage >&2; exit 2 ;; - esac -done - -if [[ -z "$RIRS" ]]; then - usage >&2 - exit 2 -fi - -RUN_TAG="$(date -u +%Y%m%dT%H%M%SZ)" -if [[ -z "$OUT_ROOT" ]]; then - OUT_ROOT="target/replay/live_bundle_matrix_${RUN_TAG}" -fi -mkdir -p "$OUT_ROOT" - -resolve_tal_path() { - case "$1" in - afrinic) printf 'tests/fixtures/tal/afrinic.tal' ;; - apnic) printf 'tests/fixtures/tal/apnic-rfc7730-https.tal' ;; - arin) printf 'tests/fixtures/tal/arin.tal' ;; - lacnic) printf 'tests/fixtures/tal/lacnic.tal' ;; - ripe) printf 'tests/fixtures/tal/ripe-ncc.tal' ;; - *) echo "unsupported rir: $1" >&2; exit 2 ;; - esac -} - -resolve_ta_path() { - case "$1" in - afrinic) printf 'tests/fixtures/ta/afrinic-ta.cer' ;; - apnic) printf 'tests/fixtures/ta/apnic-ta.cer' ;; - arin) printf 'tests/fixtures/ta/arin-ta.cer' ;; - lacnic) printf 'tests/fixtures/ta/lacnic-ta.cer' ;; - ripe) printf 'tests/fixtures/ta/ripe-ncc-ta.cer' ;; - *) echo "unsupported rir: $1" >&2; exit 2 ;; - esac -} - -SUMMARY_JSON="$OUT_ROOT/summary.json" -SUMMARY_MD="$OUT_ROOT/summary.md" - -python3 - "$SUMMARY_JSON" "$RUN_TAG" <<'PY' -import json, sys -out, run_tag = sys.argv[1:] -with open(out, "w") as fh: - json.dump({"runTag": run_tag, "results": []}, fh, indent=2) -PY - -IFS=',' read -r -a RIR_LIST <<< "$RIRS" -for raw_rir in "${RIR_LIST[@]}"; do - rir="$(printf '%s' "$raw_rir" | tr '[:upper:]' '[:lower:]' | xargs)" - [[ -n "$rir" ]] || continue - tal_path="$(resolve_tal_path "$rir")" - ta_path="$(resolve_ta_path "$rir")" - out_dir="$OUT_ROOT/${rir}_live_bundle_${RUN_TAG}" - cmd=( - ./scripts/replay_bundle/run_live_bundle_record.sh - --rir "$rir" - --out-dir "$out_dir" - --tal-path "$tal_path" - --ta-path "$ta_path" - --trust-anchor "$rir" - --bin-dir "$BIN_DIR" - ) - [[ -n "$BASE_VALIDATION_TIME" ]] && cmd+=(--base-validation-time "$BASE_VALIDATION_TIME") - [[ -n "$DELTA_VALIDATION_TIME" ]] && cmd+=(--delta-validation-time "$DELTA_VALIDATION_TIME") - [[ -n "$HTTP_TIMEOUT_SECS" ]] && cmd+=(--http-timeout-secs "$HTTP_TIMEOUT_SECS") - [[ -n "$RSYNC_TIMEOUT_SECS" ]] && cmd+=(--rsync-timeout-secs "$RSYNC_TIMEOUT_SECS") - [[ -n "$RSYNC_MIRROR_ROOT" ]] && cmd+=(--rsync-mirror-root "$RSYNC_MIRROR_ROOT") - [[ -n "$MAX_DEPTH" ]] && cmd+=(--max-depth "$MAX_DEPTH") - [[ -n "$MAX_INSTANCES" ]] && cmd+=(--max-instances "$MAX_INSTANCES") - [[ "$NO_BUILD" -eq 1 ]] && cmd+=(--no-build) - "${cmd[@]}" - - python3 - "$SUMMARY_JSON" "$rir" "$out_dir" <<'PY' -import json, pathlib, sys -summary_path, rir, out_dir = sys.argv[1:] -summary = json.loads(pathlib.Path(summary_path).read_text()) -bundle = json.loads(pathlib.Path(out_dir, rir, "bundle.json").read_text()) -verification = json.loads(pathlib.Path(out_dir, rir, "verification.json").read_text()) -summary["results"].append({ - "rir": rir, - "outDir": out_dir, - "baseVrpCount": bundle["baseVrpCount"], - "deltaVrpCount": bundle["deltaVrpCount"], - "baseVapCount": bundle["baseVapCount"], - "deltaVapCount": bundle["deltaVapCount"], - "baseSelfReplayOk": verification["base"]["capture"]["selfReplayOk"], - "deltaSelfReplayOk": verification["delta"]["capture"]["selfReplayOk"], -}) -pathlib.Path(summary_path).write_text(json.dumps(summary, indent=2)) -PY -done - -python3 - "$SUMMARY_JSON" "$SUMMARY_MD" <<'PY' -import json, pathlib, sys -summary = json.loads(pathlib.Path(sys.argv[1]).read_text()) -out = pathlib.Path(sys.argv[2]) -lines = [ - "# Multi-RIR Live Bundle Record Summary", - "", - f"- runTag: `{summary['runTag']}`", - "", - "| rir | base_vrps | delta_vrps | base_vaps | delta_vaps | base_self_replay | delta_self_replay | out_dir |", - "|---|---:|---:|---:|---:|---|---|---|", -] -for item in summary["results"]: - lines.append( - f"| {item['rir']} | {item['baseVrpCount']} | {item['deltaVrpCount']} | " - f"{item['baseVapCount']} | {item['deltaVapCount']} | " - f"{str(item['baseSelfReplayOk']).lower()} | {str(item['deltaSelfReplayOk']).lower()} | " - f"`{item['outDir']}` |" - ) -out.write_text("\n".join(lines) + "\n") -PY - -echo "$OUT_ROOT" diff --git a/scripts/replay_bundle/run_live_bundle_record_multi_rir_sequence.sh b/scripts/replay_bundle/run_live_bundle_record_multi_rir_sequence.sh deleted file mode 100644 index 6dac573..0000000 --- a/scripts/replay_bundle/run_live_bundle_record_multi_rir_sequence.sh +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" -cd "$ROOT_DIR" - -RIRS="" -OUT_ROOT="" -BASE_VALIDATION_TIME="" -DELTA_COUNT="" -DELTA_INTERVAL_SECS="" -HTTP_TIMEOUT_SECS="" -RSYNC_TIMEOUT_SECS="" -RSYNC_MIRROR_ROOT="" -MAX_DEPTH="" -MAX_INSTANCES="" -NO_BUILD=0 -KEEP_DB=0 -CAPTURE_INPUTS_ONLY=0 -BIN_DIR="target/release" - -usage() { - cat <<'EOF' -Usage: - ./scripts/replay_bundle/run_live_bundle_record_multi_rir_sequence.sh \ - --rir \ - [--out-root ] \ - [--base-validation-time ] \ - [--delta-count ] \ - [--delta-interval-secs ] \ - [--http-timeout-secs ] \ - [--rsync-timeout-secs ] \ - [--rsync-mirror-root ] \ - [--max-depth ] \ - [--max-instances ] \ - [--bin-dir ] \ - [--no-build] \ - [--keep-db] \ - [--capture-inputs-only] -EOF -} - -while [[ $# -gt 0 ]]; do - case "$1" in - --rir) RIRS="${2:?}"; shift 2 ;; - --out-root) OUT_ROOT="${2:?}"; shift 2 ;; - --base-validation-time) BASE_VALIDATION_TIME="${2:?}"; shift 2 ;; - --delta-count) DELTA_COUNT="${2:?}"; shift 2 ;; - --delta-interval-secs) DELTA_INTERVAL_SECS="${2:?}"; shift 2 ;; - --http-timeout-secs) HTTP_TIMEOUT_SECS="${2:?}"; shift 2 ;; - --rsync-timeout-secs) RSYNC_TIMEOUT_SECS="${2:?}"; shift 2 ;; - --rsync-mirror-root) RSYNC_MIRROR_ROOT="${2:?}"; shift 2 ;; - --max-depth) MAX_DEPTH="${2:?}"; shift 2 ;; - --max-instances) MAX_INSTANCES="${2:?}"; shift 2 ;; - --bin-dir) BIN_DIR="${2:?}"; shift 2 ;; - --no-build) NO_BUILD=1; shift ;; - --keep-db) KEEP_DB=1; shift ;; - --capture-inputs-only) CAPTURE_INPUTS_ONLY=1; shift ;; - --help|-h) usage; exit 0 ;; - *) echo "unknown argument: $1" >&2; usage >&2; exit 2 ;; - esac -done - -if [[ -z "$RIRS" ]]; then - usage >&2 - exit 2 -fi - -RUN_TAG="$(date -u +%Y%m%dT%H%M%SZ)" -if [[ -z "$OUT_ROOT" ]]; then - OUT_ROOT="target/replay/live_bundle_sequence_matrix_${RUN_TAG}" -fi -mkdir -p "$OUT_ROOT" - -resolve_tal_path() { - case "$1" in - afrinic) printf 'tests/fixtures/tal/afrinic.tal' ;; - apnic) printf 'tests/fixtures/tal/apnic-rfc7730-https.tal' ;; - arin) printf 'tests/fixtures/tal/arin.tal' ;; - lacnic) printf 'tests/fixtures/tal/lacnic.tal' ;; - ripe) printf 'tests/fixtures/tal/ripe-ncc.tal' ;; - *) echo "unsupported rir: $1" >&2; exit 2 ;; - esac -} - -resolve_ta_path() { - case "$1" in - afrinic) printf 'tests/fixtures/ta/afrinic-ta.cer' ;; - apnic) printf 'tests/fixtures/ta/apnic-ta.cer' ;; - arin) printf 'tests/fixtures/ta/arin-ta.cer' ;; - lacnic) printf 'tests/fixtures/ta/lacnic-ta.cer' ;; - ripe) printf 'tests/fixtures/ta/ripe-ncc-ta.cer' ;; - *) echo "unsupported rir: $1" >&2; exit 2 ;; - esac -} - -SUMMARY_JSON="$OUT_ROOT/summary.json" -SUMMARY_MD="$OUT_ROOT/summary.md" -python3 - "$SUMMARY_JSON" "$RUN_TAG" <<'PY' -import json, sys -path, run_tag = sys.argv[1:] -with open(path, "w") as fh: - json.dump({"runTag": run_tag, "results": []}, fh, indent=2) -PY - -IFS=',' read -r -a RIR_LIST <<< "$RIRS" -for raw_rir in "${RIR_LIST[@]}"; do - rir="$(printf '%s' "$raw_rir" | tr '[:upper:]' '[:lower:]' | xargs)" - [[ -n "$rir" ]] || continue - tal_path="$(resolve_tal_path "$rir")" - ta_path="$(resolve_ta_path "$rir")" - out_dir="$OUT_ROOT/${rir}_live_bundle_sequence_${RUN_TAG}" - cmd=( - ./scripts/replay_bundle/run_live_bundle_record_sequence.sh - --rir "$rir" - --out-dir "$out_dir" - --tal-path "$tal_path" - --ta-path "$ta_path" - --trust-anchor "$rir" - --bin-dir "$BIN_DIR" - ) - [[ -n "$BASE_VALIDATION_TIME" ]] && cmd+=(--base-validation-time "$BASE_VALIDATION_TIME") - [[ -n "$DELTA_COUNT" ]] && cmd+=(--delta-count "$DELTA_COUNT") - [[ -n "$DELTA_INTERVAL_SECS" ]] && cmd+=(--delta-interval-secs "$DELTA_INTERVAL_SECS") - [[ -n "$HTTP_TIMEOUT_SECS" ]] && cmd+=(--http-timeout-secs "$HTTP_TIMEOUT_SECS") - [[ -n "$RSYNC_TIMEOUT_SECS" ]] && cmd+=(--rsync-timeout-secs "$RSYNC_TIMEOUT_SECS") - [[ -n "$RSYNC_MIRROR_ROOT" ]] && cmd+=(--rsync-mirror-root "$RSYNC_MIRROR_ROOT") - [[ -n "$MAX_DEPTH" ]] && cmd+=(--max-depth "$MAX_DEPTH") - [[ -n "$MAX_INSTANCES" ]] && cmd+=(--max-instances "$MAX_INSTANCES") - [[ "$NO_BUILD" -eq 1 ]] && cmd+=(--no-build) - [[ "$KEEP_DB" -eq 1 ]] && cmd+=(--keep-db) - [[ "$CAPTURE_INPUTS_ONLY" -eq 1 ]] && cmd+=(--capture-inputs-only) - "${cmd[@]}" - - python3 - "$SUMMARY_JSON" "$rir" "$out_dir" <<'PY' -import json, pathlib, sys -summary_path, rir, out_dir = sys.argv[1:] -summary = json.loads(pathlib.Path(summary_path).read_text()) -bundle = json.loads(pathlib.Path(out_dir, rir, "bundle.json").read_text()) -verification = json.loads(pathlib.Path(out_dir, rir, "verification.json").read_text()) -summary["results"].append({ - "rir": rir, - "outDir": out_dir, - "stepCount": len(bundle["deltaSequence"]["steps"]), - "baseVrpCount": bundle["base"]["vrpCount"], - "baseVapCount": bundle["base"]["vapCount"], - "allStepsSelfReplayOk": verification["summary"]["allStepsSelfReplayOk"], -}) -pathlib.Path(summary_path).write_text(json.dumps(summary, indent=2)) -PY -done - -python3 - "$SUMMARY_JSON" "$SUMMARY_MD" <<'PY' -import json, pathlib, sys -summary = json.loads(pathlib.Path(sys.argv[1]).read_text()) -out = pathlib.Path(sys.argv[2]) -lines = [ - "# Multi-RIR Live Bundle Sequence Summary", - "", - f"- runTag: `{summary['runTag']}`", - "", - "| rir | step_count | base_vrps | base_vaps | all_steps_self_replay | out_dir |", - "|---|---:|---:|---:|---|---|", -] -for item in summary["results"]: - lines.append( - f"| {item['rir']} | {item['stepCount']} | {item['baseVrpCount']} | {item['baseVapCount']} | " - f"{str(item['allStepsSelfReplayOk']).lower()} | `{item['outDir']}` |" - ) -out.write_text("\n".join(lines) + "\n") -PY - -echo "$OUT_ROOT" diff --git a/scripts/replay_bundle/run_live_bundle_record_sequence.sh b/scripts/replay_bundle/run_live_bundle_record_sequence.sh deleted file mode 100755 index 41d9224..0000000 --- a/scripts/replay_bundle/run_live_bundle_record_sequence.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" -cd "$ROOT_DIR" - -RIR="" -OUT_DIR="" -TAL_PATH="" -TA_PATH="" -BASE_VALIDATION_TIME="" -DELTA_COUNT="" -DELTA_INTERVAL_SECS="" -HTTP_TIMEOUT_SECS="" -RSYNC_TIMEOUT_SECS="" -RSYNC_MIRROR_ROOT="" -MAX_DEPTH="" -MAX_INSTANCES="" -TRUST_ANCHOR="" -NO_BUILD=0 -KEEP_DB=0 -CAPTURE_INPUTS_ONLY=0 -BIN_DIR="target/release" -PROGRESS_LOG="${RPKI_PROGRESS_LOG:-1}" -PROGRESS_SLOW_SECS="${RPKI_PROGRESS_SLOW_SECS:-30}" - -usage() { - cat <<'EOF' -Usage: - ./scripts/replay_bundle/run_live_bundle_record_sequence.sh \ - --rir \ - --tal-path \ - --ta-path \ - [--out-dir ] \ - [--base-validation-time ] \ - [--delta-count ] \ - [--delta-interval-secs ] \ - [--http-timeout-secs ] \ - [--rsync-timeout-secs ] \ - [--rsync-mirror-root ] \ - [--max-depth ] \ - [--max-instances ] \ - [--trust-anchor ] \ - [--bin-dir ] \ - [--no-build] \ - [--keep-db] \ - [--capture-inputs-only] -EOF -} - -while [[ $# -gt 0 ]]; do - case "$1" in - --rir) RIR="${2:?}"; shift 2 ;; - --out-dir) OUT_DIR="${2:?}"; shift 2 ;; - --tal-path) TAL_PATH="${2:?}"; shift 2 ;; - --ta-path) TA_PATH="${2:?}"; shift 2 ;; - --base-validation-time) BASE_VALIDATION_TIME="${2:?}"; shift 2 ;; - --delta-count) DELTA_COUNT="${2:?}"; shift 2 ;; - --delta-interval-secs) DELTA_INTERVAL_SECS="${2:?}"; shift 2 ;; - --http-timeout-secs) HTTP_TIMEOUT_SECS="${2:?}"; shift 2 ;; - --rsync-timeout-secs) RSYNC_TIMEOUT_SECS="${2:?}"; shift 2 ;; - --rsync-mirror-root) RSYNC_MIRROR_ROOT="${2:?}"; shift 2 ;; - --max-depth) MAX_DEPTH="${2:?}"; shift 2 ;; - --max-instances) MAX_INSTANCES="${2:?}"; shift 2 ;; - --trust-anchor) TRUST_ANCHOR="${2:?}"; shift 2 ;; - --bin-dir) BIN_DIR="${2:?}"; shift 2 ;; - --no-build) NO_BUILD=1; shift ;; - --keep-db) KEEP_DB=1; shift ;; - --capture-inputs-only) CAPTURE_INPUTS_ONLY=1; shift ;; - --help|-h) usage; exit 0 ;; - *) echo "unknown argument: $1" >&2; usage >&2; exit 2 ;; - esac -done - -if [[ -z "$RIR" || -z "$TAL_PATH" || -z "$TA_PATH" ]]; then - usage >&2 - exit 2 -fi - -TS="$(date -u +%Y%m%dT%H%M%SZ)" -if [[ -z "$OUT_DIR" ]]; then - OUT_DIR="target/replay/${RIR}_live_bundle_sequence_${TS}" -fi - -SEQUENCE_BIN="$BIN_DIR/replay_bundle_capture_sequence" -if [[ "$NO_BUILD" -eq 0 ]]; then - echo "[1/1] build release binary" - cargo build --release --bin replay_bundle_capture_sequence -else - echo "[1/1] reuse existing binary from $BIN_DIR" -fi - -if [[ ! -x "$SEQUENCE_BIN" ]]; then - echo "missing executable: $SEQUENCE_BIN" >&2 - exit 1 -fi - -cmd=( - "$SEQUENCE_BIN" - --rir "$RIR" - --out-dir "$OUT_DIR" - --tal-path "$TAL_PATH" - --ta-path "$TA_PATH" -) -[[ -n "$BASE_VALIDATION_TIME" ]] && cmd+=(--base-validation-time "$BASE_VALIDATION_TIME") -[[ -n "$DELTA_COUNT" ]] && cmd+=(--delta-count "$DELTA_COUNT") -[[ -n "$DELTA_INTERVAL_SECS" ]] && cmd+=(--delta-interval-secs "$DELTA_INTERVAL_SECS") -[[ -n "$HTTP_TIMEOUT_SECS" ]] && cmd+=(--http-timeout-secs "$HTTP_TIMEOUT_SECS") -[[ -n "$RSYNC_TIMEOUT_SECS" ]] && cmd+=(--rsync-timeout-secs "$RSYNC_TIMEOUT_SECS") -[[ -n "$RSYNC_MIRROR_ROOT" ]] && cmd+=(--rsync-mirror-root "$RSYNC_MIRROR_ROOT") -[[ -n "$MAX_DEPTH" ]] && cmd+=(--max-depth "$MAX_DEPTH") -[[ -n "$MAX_INSTANCES" ]] && cmd+=(--max-instances "$MAX_INSTANCES") -[[ -n "$TRUST_ANCHOR" ]] && cmd+=(--trust-anchor "$TRUST_ANCHOR") -[[ "$KEEP_DB" -eq 1 ]] && cmd+=(--keep-db) -[[ "$CAPTURE_INPUTS_ONLY" -eq 1 ]] && cmd+=(--capture-inputs-only) - -RPKI_PROGRESS_LOG="$PROGRESS_LOG" \ -RPKI_PROGRESS_SLOW_SECS="$PROGRESS_SLOW_SECS" \ -"${cmd[@]}" diff --git a/scripts/replay_verify/README.md b/scripts/replay_verify/README.md index 5e05f3f..0fea9ea 100644 --- a/scripts/replay_verify/README.md +++ b/scripts/replay_verify/README.md @@ -12,7 +12,7 @@ - `_ccr_replay_` 默认输入: -- bundle root: `/home/yuyr/dev/rust_playground/routinator/bench/multi_rir_demo/runs/20260316-112341-multi-final3` +- 历史 replay fixture root: `/home/yuyr/dev/rust_playground/routinator/bench/multi_rir_demo/runs/20260316-112341-multi-final3` - 每个 RIR 的 TAL / TA / validation time / record CSV 由 `scripts/payload_replay/multi_rir_case_info.py` 解析 用法: @@ -43,25 +43,3 @@ - 同次执行总汇总: - `multi_rir_ccr_replay_verify__summary.md` - `multi_rir_ccr_replay_verify__summary.json` - -## `run_peer_bundle_matrix.sh` - -用途: -- 对一组 `ours live bundle` 做本地 peer replay 矩阵验证 -- Routinator 与 `rpki-client` 分别消费相同 bundle root -- 汇总 `VRP + VAP` 的 base / delta 结果 - -用法: -- `./scripts/replay_verify/run_peer_bundle_matrix.sh --bundle-root target/replay/live_bundle_matrix_` -- `./scripts/replay_verify/run_peer_bundle_matrix.sh --bundle-root target/replay/live_bundle_matrix_ --rir apnic,ripe` - -主要产物: -- 输出根目录: - - `target/replay/peer_bundle_matrix_/` -- Routinator: - - `target/replay/peer_bundle_matrix_/routinator//` -- `rpki-client`: - - `target/replay/peer_bundle_matrix_/rpki-client/` -- 汇总: - - `summary.json` - - `summary.md` diff --git a/scripts/replay_verify/run_peer_bundle_matrix.sh b/scripts/replay_verify/run_peer_bundle_matrix.sh deleted file mode 100755 index 2026cd0..0000000 --- a/scripts/replay_verify/run_peer_bundle_matrix.sh +++ /dev/null @@ -1,210 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" -cd "$ROOT_DIR" - -BUNDLE_ROOT="" -RIRS="" -OUT_ROOT="" -ROUTINATOR_ROOT="/home/yuyr/dev/rust_playground/routinator" -RPKI_CLIENT_ROOT="/home/yuyr/dev/rpki-client-9.7" -RPKI_CLIENT_BUILD_DIR="/home/yuyr/dev/rpki-client-9.7/build-m5" -KEEP_DB=0 - -usage() { - cat <<'EOF' -Usage: - ./scripts/replay_verify/run_peer_bundle_matrix.sh \ - --bundle-root \ - [--rir ] \ - [--out-root ] \ - [--routinator-root ] \ - [--rpki-client-root ] \ - [--rpki-client-build-dir ] \ - [--keep-db] -EOF -} - -while [[ $# -gt 0 ]]; do - case "$1" in - --bundle-root) BUNDLE_ROOT="${2:?}"; shift 2 ;; - --rir) RIRS="${2:?}"; shift 2 ;; - --out-root) OUT_ROOT="${2:?}"; shift 2 ;; - --routinator-root) ROUTINATOR_ROOT="${2:?}"; shift 2 ;; - --rpki-client-root) RPKI_CLIENT_ROOT="${2:?}"; shift 2 ;; - --rpki-client-build-dir) RPKI_CLIENT_BUILD_DIR="${2:?}"; shift 2 ;; - --keep-db) KEEP_DB=1; shift ;; - --help|-h) usage; exit 0 ;; - *) echo "unknown argument: $1" >&2; usage >&2; exit 2 ;; - esac -done - -if [[ -z "$BUNDLE_ROOT" ]]; then - usage >&2 - exit 2 -fi - -BUNDLE_ROOT="$(python3 - "$BUNDLE_ROOT" <<'PY' -from pathlib import Path -import sys -print(Path(sys.argv[1]).resolve()) -PY -)" - -RUN_TAG="$(date -u +%Y%m%dT%H%M%SZ)" -if [[ -z "$OUT_ROOT" ]]; then - OUT_ROOT="target/replay/peer_bundle_matrix_${RUN_TAG}" -fi -mkdir -p "$OUT_ROOT" -OUT_ROOT="$(python3 - "$OUT_ROOT" <<'PY' -from pathlib import Path -import sys -print(Path(sys.argv[1]).resolve()) -PY -)" - -discover_rirs() { - python3 - "$BUNDLE_ROOT" <<'PY' -from pathlib import Path -import sys -root = Path(sys.argv[1]) -if (root / "base-locks.json").exists(): - print(root.name) - raise SystemExit -rirs = [] -for entry in sorted(root.iterdir()): - if not entry.is_dir(): - continue - if (entry / "base-locks.json").exists(): - rirs.append(entry.name) - continue - nested = sorted( - child.name for child in entry.iterdir() - if child.is_dir() and (child / "base-locks.json").exists() - ) - if len(nested) == 1: - rirs.append(nested[0]) -print(",".join(rirs)) -PY -} - -if [[ -z "$RIRS" ]]; then - RIRS="$(discover_rirs)" -fi - -ROUTI_OUT="$OUT_ROOT/routinator" -CLIENT_OUT="$OUT_ROOT/rpki-client" -NORMALIZED_BUNDLE_ROOT="$OUT_ROOT/.normalized-bundle-root" -mkdir -p "$ROUTI_OUT" "$CLIENT_OUT" -rm -rf "$NORMALIZED_BUNDLE_ROOT" -mkdir -p "$NORMALIZED_BUNDLE_ROOT" - -IFS=',' read -r -a RIR_LIST <<< "$RIRS" -for raw_rir in "${RIR_LIST[@]}"; do - rir="$(printf '%s' "$raw_rir" | tr '[:upper:]' '[:lower:]' | xargs)" - [[ -n "$rir" ]] || continue - source_bundle_dir="" - if [[ -d "$BUNDLE_ROOT/$rir" && -f "$BUNDLE_ROOT/$rir/base-locks.json" ]]; then - source_bundle_dir="$BUNDLE_ROOT/$rir" - else - match="$(find "$BUNDLE_ROOT" -maxdepth 2 -type d -path "*/${rir}" -exec test -f '{}/base-locks.json' ';' -print | head -n 1)" - if [[ -z "$match" ]]; then - echo "unable to resolve bundle directory for RIR: $rir" >&2 - exit 1 - fi - source_bundle_dir="$match" - fi - ln -sfn "$source_bundle_dir" "$NORMALIZED_BUNDLE_ROOT/$rir" - ROUTI_CMD=( - "$ROUTINATOR_ROOT/bench/multi_rir_demo_ours/run_single_rir_ours_bundle.sh" - "$source_bundle_dir" - "$ROUTI_OUT/$rir" - ) - [[ "$KEEP_DB" -eq 1 ]] && ROUTI_CMD=( "$ROUTINATOR_ROOT/bench/multi_rir_demo_ours/run_single_rir_ours_bundle.sh" --keep-db "$source_bundle_dir" "$ROUTI_OUT/$rir" ) - "${ROUTI_CMD[@]}" -done - -CLIENT_ARGS=( - python3 "$RPKI_CLIENT_ROOT/tools/run_bundle_matrix.py" - --bundle-dir "$NORMALIZED_BUNDLE_ROOT" - --build-dir "$RPKI_CLIENT_BUILD_DIR" - --work-dir "$CLIENT_OUT" -) -[[ "$KEEP_DB" -eq 1 ]] && CLIENT_ARGS+=(--keep-db) -for raw_rir in "${RIR_LIST[@]}"; do - rir="$(printf '%s' "$raw_rir" | tr '[:upper:]' '[:lower:]' | xargs)" - [[ -n "$rir" ]] || continue - CLIENT_ARGS+=(--rir "$rir") -done -"${CLIENT_ARGS[@]}" - -SUMMARY_JSON="$OUT_ROOT/summary.json" -SUMMARY_MD="$OUT_ROOT/summary.md" - -python3 - "$ROUTI_OUT" "$CLIENT_OUT/matrix-summary.json" "$SUMMARY_JSON" <<'PY' -import json -from pathlib import Path -import sys - -routi_root = Path(sys.argv[1]) -client_summary = json.loads(Path(sys.argv[2]).read_text()) -summary_path = Path(sys.argv[3]) -summary = {"routinator": {}, "rpki_client": client_summary} -for verification in sorted(routi_root.glob("*/verification.json")): - rir = verification.parent.name - summary["routinator"][rir] = json.loads(verification.read_text()) -summary_path.write_text(json.dumps(summary, indent=2)) -PY - -python3 - "$SUMMARY_JSON" "$SUMMARY_MD" <<'PY' -import json -from pathlib import Path -import sys -summary = json.loads(Path(sys.argv[1]).read_text()) -out = Path(sys.argv[2]) -lines = [ - "# Peer Bundle Matrix Summary", - "", - "## Routinator", - "", - "| rir | base_vrp | base_vap | sequence_vrp | sequence_vap |", - "|---|---|---|---|---|", -] -for rir, data in sorted(summary["routinator"].items()): - if "steps" in data: - lines.append( - f"| {rir} | {str(data.get('baseMatch')).lower()} | {str(data.get('baseVapsMatch')).lower()} | " - f"{str(data.get('summary', {}).get('allStepsMatch')).lower()} | " - f"{str(data.get('summary', {}).get('allStepsVapsMatch')).lower()} |" - ) - else: - lines.append( - f"| {rir} | {str(data.get('baseMatch')).lower()} | {str(data.get('baseVapsMatch')).lower()} | " - f"{str(data.get('deltaMatch')).lower()} | {str(data.get('deltaVapsMatch')).lower()} |" - ) -lines += [ - "", - "## rpki-client", - "", - "| rir | base_vrp | base_vap | sequence_vrp | sequence_vap |", - "|---|---|---|---|---|", -] -for rir, phases in sorted(summary["rpki_client"].items()): - base = phases.get("base", {}) - step_items = [ - value for key, value in phases.items() - if key not in ("base", "delta") and isinstance(value, dict) - ] - if "delta" in phases: - step_items.append(phases["delta"]) - all_step_match = all(item.get("match") for item in step_items) if step_items else None - all_step_vap_match = all(item.get("vaps_match") for item in step_items) if step_items else None - lines.append( - f"| {rir} | {str(base.get('match')).lower()} | {str(base.get('vaps_match')).lower()} | " - f"{str(all_step_match).lower()} | {str(all_step_vap_match).lower()} |" - ) -out.write_text("\n".join(lines) + "\n") -PY - -echo "$OUT_ROOT" diff --git a/src/bin/ccr_to_compare_views.rs b/src/bin/ccr_to_compare_views.rs index bd04f25..7dbb138 100644 --- a/src/bin/ccr_to_compare_views.rs +++ b/src/bin/ccr_to_compare_views.rs @@ -1,5 +1,5 @@ -use rpki::bundle::{decode_ccr_compare_views, write_vap_csv, write_vrp_csv}; use rpki::ccr::decode_content_info; +use rpki::ccr::{decode_ccr_compare_views, write_vap_csv, write_vrp_csv}; #[derive(Debug, Default, PartialEq, Eq)] struct Args { diff --git a/src/bin/cir_drop_report.rs b/src/bin/cir_drop_report.rs index 22cb41b..b793030 100644 --- a/src/bin/cir_drop_report.rs +++ b/src/bin/cir_drop_report.rs @@ -2,7 +2,7 @@ use std::collections::{BTreeMap, BTreeSet}; use std::path::PathBuf; use rpki::blob_store::ExternalRepoBytesDb; -use rpki::bundle::decode_ccr_compare_views; +use rpki::ccr::decode_ccr_compare_views; use rpki::ccr::decode_content_info; use rpki::cir::decode_cir; use rpki::data_model::roa::RoaObject; diff --git a/src/bin/measure_sequence_replay.rs b/src/bin/measure_sequence_replay.rs deleted file mode 100644 index 8bc6efc..0000000 --- a/src/bin/measure_sequence_replay.rs +++ /dev/null @@ -1,263 +0,0 @@ -use rpki::bundle::record_io::load_validation_time; -use rpki::storage::RocksStore; -use rpki::validation::run_tree_from_tal::{ - run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit, - run_tree_from_tal_and_ta_der_payload_replay_serial_audit, -}; -use rpki::validation::tree::TreeRunConfig; -use serde::Serialize; -use std::fs; -use std::path::{Path, PathBuf}; -use std::time::Instant; - -fn usage() -> &'static str { - "Usage: measure_sequence_replay --bundle-root [--rir ] --out [--keep-db]" -} - -#[derive(Default)] -struct Args { - bundle_root: Option, - rirs: Option>, - out: Option, - keep_db: bool, -} - -fn parse_args() -> Result { - let mut out = Args::default(); - let argv: Vec = std::env::args().skip(1).collect(); - let mut i = 0usize; - while i < argv.len() { - match argv[i].as_str() { - "--bundle-root" => { - i += 1; - out.bundle_root = Some(PathBuf::from( - argv.get(i).ok_or("--bundle-root requires a value")?, - )); - } - "--rir" => { - i += 1; - let value = argv.get(i).ok_or("--rir requires a value")?; - out.rirs = Some( - value - .split(',') - .map(|s| s.trim().to_lowercase()) - .filter(|s| !s.is_empty()) - .collect(), - ); - } - "--out" => { - i += 1; - out.out = Some(PathBuf::from(argv.get(i).ok_or("--out requires a value")?)); - } - "--keep-db" => out.keep_db = true, - "--help" | "-h" => return Err(usage().to_string()), - other => return Err(format!("unknown argument: {other}\n{}", usage())), - } - i += 1; - } - if out.bundle_root.is_none() || out.out.is_none() { - return Err(format!("--bundle-root and --out are required\n{}", usage())); - } - Ok(out) -} - -#[derive(Serialize)] -struct PhaseTiming { - duration_seconds: f64, - vrp_count: usize, - vap_count: usize, -} - -#[derive(Serialize)] -struct RirTiming { - rir: String, - base: PhaseTiming, - steps: Vec<(String, PhaseTiming)>, -} - -fn discover_rirs(bundle_root: &Path) -> Result, String> { - let mut out = Vec::new(); - for entry in fs::read_dir(bundle_root) - .map_err(|e| format!("read_dir failed: {}: {e}", bundle_root.display()))? - { - let entry = entry.map_err(|e| format!("read_dir entry failed: {e}"))?; - let path = entry.path(); - if path.is_dir() && path.join("bundle.json").exists() && path.join("tal.tal").exists() { - out.push( - path.file_name() - .and_then(|s| s.to_str()) - .ok_or_else(|| format!("invalid rir dir name: {}", path.display()))? - .to_string(), - ); - } - } - out.sort(); - Ok(out) -} - -fn path_join(root: &Path, relative: &str) -> PathBuf { - root.join(relative) -} - -fn main() { - if let Err(err) = real_main() { - eprintln!("{err}"); - std::process::exit(1); - } -} - -fn real_main() -> Result<(), String> { - let args = parse_args()?; - let bundle_root = args.bundle_root.unwrap(); - let out_path = args.out.unwrap(); - let rirs = match args.rirs { - Some(v) => v, - None => discover_rirs(&bundle_root)?, - }; - let mut results = Vec::new(); - let tmp_root = out_path - .parent() - .unwrap_or_else(|| Path::new(".")) - .join(".tmp-sequence-replay"); - fs::create_dir_all(&tmp_root) - .map_err(|e| format!("create tmp root failed: {}: {e}", tmp_root.display()))?; - - for rir in rirs { - let rir_dir = bundle_root.join(&rir); - let bundle: serde_json::Value = serde_json::from_slice( - &fs::read(rir_dir.join("bundle.json")) - .map_err(|e| format!("read bundle failed: {}: {e}", rir_dir.display()))?, - ) - .map_err(|e| format!("parse bundle failed for {}: {e}", rir_dir.display()))?; - let tal_bytes = fs::read(rir_dir.join("tal.tal")) - .map_err(|e| format!("read tal.tal failed for {}: {e}", rir_dir.display()))?; - let ta_bytes = fs::read(rir_dir.join("ta.cer")) - .map_err(|e| format!("read ta.cer failed for {}: {e}", rir_dir.display()))?; - - let db_dir = tmp_root.join(format!("{rir}-db")); - if db_dir.exists() { - fs::remove_dir_all(&db_dir) - .map_err(|e| format!("remove old db failed: {}: {e}", db_dir.display()))?; - } - let store = - RocksStore::open(&db_dir).map_err(|e| format!("open rocksdb failed for {rir}: {e}"))?; - - let base_archive = path_join( - &rir_dir, - bundle["base"]["relativeArchivePath"] - .as_str() - .ok_or("bundle missing base.relativeArchivePath")?, - ); - let base_locks = path_join( - &rir_dir, - bundle["base"]["relativeLocksPath"] - .as_str() - .ok_or("bundle missing base.relativeLocksPath")?, - ); - let base_validation_time = load_validation_time(&base_locks) - .map_err(|e| format!("load base validation time failed for {rir}: {e}"))?; - - let start = Instant::now(); - let base_out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit( - &store, - &rpki::policy::Policy::default(), - &tal_bytes, - &ta_bytes, - None, - &base_archive, - &base_locks, - base_validation_time, - &TreeRunConfig { - max_depth: None, - max_instances: None, - compact_audit: false, - persist_vcir: true, - build_ccr_accumulator: true, - }, - ) - .map_err(|e| format!("base replay failed for {rir}: {e}"))?; - let base_timing = PhaseTiming { - duration_seconds: start.elapsed().as_secs_f64(), - vrp_count: base_out.tree.vrps.len(), - vap_count: base_out.tree.aspas.len(), - }; - - let mut previous_locks = base_locks.clone(); - let mut step_timings = Vec::new(); - for step in bundle["deltaSequence"]["steps"] - .as_array() - .ok_or("bundle missing deltaSequence.steps")? - { - let step_id = step["id"].as_str().ok_or("step missing id")?.to_string(); - let step_dir = path_join( - &rir_dir, - step["relativePath"] - .as_str() - .ok_or("step missing relativePath")?, - ); - let delta_archive = path_join( - &rir_dir, - step["relativeArchivePath"] - .as_str() - .ok_or("step missing relativeArchivePath")?, - ); - let delta_locks = path_join( - &rir_dir, - step["relativeTransitionLocksPath"] - .as_str() - .ok_or("step missing relativeTransitionLocksPath")?, - ); - let validation_time = load_validation_time(&delta_locks).map_err(|e| { - format!("load step validation time failed for {rir}/{step_id}: {e}") - })?; - let start = Instant::now(); - let step_out = run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit( - &store, - &rpki::policy::Policy::default(), - &tal_bytes, - &ta_bytes, - None, - &delta_archive, - &previous_locks, - &delta_locks, - validation_time, - &TreeRunConfig { - max_depth: None, - max_instances: None, - compact_audit: false, - persist_vcir: true, - build_ccr_accumulator: true, - }, - ) - .map_err(|e| format!("delta step replay failed for {rir}/{step_id}: {e}"))?; - step_timings.push(( - step_id.clone(), - PhaseTiming { - duration_seconds: start.elapsed().as_secs_f64(), - vrp_count: step_out.tree.vrps.len(), - vap_count: step_out.tree.aspas.len(), - }, - )); - previous_locks = step_dir.join("target-locks.json"); - } - - results.push(RirTiming { - rir, - base: base_timing, - steps: step_timings, - }); - - if !args.keep_db && db_dir.exists() { - fs::remove_dir_all(&db_dir) - .map_err(|e| format!("remove db failed: {}: {e}", db_dir.display()))?; - } - } - - fs::write( - &out_path, - serde_json::to_vec_pretty(&results).map_err(|e| format!("encode json failed: {e}"))?, - ) - .map_err(|e| format!("write out failed: {}: {e}", out_path.display()))?; - println!("{}", out_path.display()); - Ok(()) -} diff --git a/src/bin/replay_bundle_capture.rs b/src/bin/replay_bundle_capture.rs deleted file mode 100644 index 7b68f6a..0000000 --- a/src/bin/replay_bundle_capture.rs +++ /dev/null @@ -1,419 +0,0 @@ -use rpki::bundle::{ - RecordingHttpFetcher, RecordingRsyncFetcher, RirBundleMetadata, - build_single_rir_bundle_manifest, build_vap_compare_rows, build_vrp_compare_rows, sha256_hex, - write_json, write_live_base_replay_bundle_inputs, write_live_bundle_rir_readme, - write_live_bundle_top_readme, write_timing_json, write_vap_csv, write_vrp_csv, -}; -use rpki::ccr::{build_ccr_from_run, verify_content_info, write_ccr_file}; -use rpki::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig}; -use rpki::fetch::rsync_system::{SystemRsyncConfig, SystemRsyncFetcher}; -use rpki::policy::Policy; -use rpki::storage::RocksStore; -use rpki::validation::run_tree_from_tal::{ - run_tree_from_tal_and_ta_der_payload_replay_serial_audit, - run_tree_from_tal_and_ta_der_serial_audit, -}; -use rpki::validation::tree::TreeRunConfig; -use std::fs; -use std::path::PathBuf; -use std::time::Instant; -use time::format_description::well_known::Rfc3339; - -#[derive(Debug, Default, PartialEq, Eq)] -struct Args { - rir: Option, - out_dir: Option, - tal_path: Option, - ta_path: Option, - validation_time: Option, - http_timeout_secs: u64, - rsync_timeout_secs: u64, - rsync_mirror_root: Option, - max_depth: Option, - max_instances: Option, - trust_anchor: Option, -} - -fn usage() -> &'static str { - "Usage: replay_bundle_capture --rir --out-dir --tal-path --ta-path [--validation-time ] [--http-timeout-secs ] [--rsync-timeout-secs ] [--rsync-mirror-root ] [--max-depth ] [--max-instances ] [--trust-anchor ]" -} - -fn parse_args(argv: &[String]) -> Result { - let mut args = Args { - http_timeout_secs: 20, - rsync_timeout_secs: 60, - ..Args::default() - }; - let mut i = 1usize; - while i < argv.len() { - match argv[i].as_str() { - "--help" | "-h" => return Err(usage().to_string()), - "--rir" => { - i += 1; - args.rir = Some(argv.get(i).ok_or("--rir requires a value")?.clone()); - } - "--out-dir" => { - i += 1; - args.out_dir = Some(PathBuf::from( - argv.get(i).ok_or("--out-dir requires a value")?, - )); - } - "--tal-path" => { - i += 1; - args.tal_path = Some(PathBuf::from( - argv.get(i).ok_or("--tal-path requires a value")?, - )); - } - "--ta-path" => { - i += 1; - args.ta_path = Some(PathBuf::from( - argv.get(i).ok_or("--ta-path requires a value")?, - )); - } - "--validation-time" => { - i += 1; - let value = argv.get(i).ok_or("--validation-time requires a value")?; - args.validation_time = Some( - time::OffsetDateTime::parse(value, &Rfc3339) - .map_err(|e| format!("invalid --validation-time: {e}"))?, - ); - } - "--http-timeout-secs" => { - i += 1; - args.http_timeout_secs = argv - .get(i) - .ok_or("--http-timeout-secs requires a value")? - .parse() - .map_err(|e| format!("invalid --http-timeout-secs: {e}"))?; - } - "--rsync-timeout-secs" => { - i += 1; - args.rsync_timeout_secs = argv - .get(i) - .ok_or("--rsync-timeout-secs requires a value")? - .parse() - .map_err(|e| format!("invalid --rsync-timeout-secs: {e}"))?; - } - "--rsync-mirror-root" => { - i += 1; - args.rsync_mirror_root = Some(PathBuf::from( - argv.get(i).ok_or("--rsync-mirror-root requires a value")?, - )); - } - "--max-depth" => { - i += 1; - args.max_depth = Some( - argv.get(i) - .ok_or("--max-depth requires a value")? - .parse() - .map_err(|e| format!("invalid --max-depth: {e}"))?, - ); - } - "--max-instances" => { - i += 1; - args.max_instances = Some( - argv.get(i) - .ok_or("--max-instances requires a value")? - .parse() - .map_err(|e| format!("invalid --max-instances: {e}"))?, - ); - } - "--trust-anchor" => { - i += 1; - args.trust_anchor = Some( - argv.get(i) - .ok_or("--trust-anchor requires a value")? - .clone(), - ); - } - other => return Err(format!("unknown argument: {other}\n{}", usage())), - } - i += 1; - } - - if args.rir.is_none() { - return Err(format!("--rir is required\n{}", usage())); - } - if args.out_dir.is_none() { - return Err(format!("--out-dir is required\n{}", usage())); - } - if args.tal_path.is_none() { - return Err(format!("--tal-path is required\n{}", usage())); - } - if args.ta_path.is_none() { - return Err(format!("--ta-path is required\n{}", usage())); - } - Ok(args) -} - -fn run(args: Args) -> Result { - let rir = args.rir.as_ref().unwrap(); - let rir_normalized = rir.to_ascii_lowercase(); - let trust_anchor = args - .trust_anchor - .clone() - .unwrap_or_else(|| rir_normalized.clone()); - let out_root = args.out_dir.as_ref().unwrap(); - let rir_dir = out_root.join(&rir_normalized); - fs::create_dir_all(&rir_dir) - .map_err(|e| format!("create rir dir failed: {}: {e}", rir_dir.display()))?; - - let tal_bytes = - fs::read(args.tal_path.as_ref().unwrap()).map_err(|e| format!("read tal failed: {e}"))?; - let ta_bytes = - fs::read(args.ta_path.as_ref().unwrap()).map_err(|e| format!("read ta failed: {e}"))?; - let validation_time = args - .validation_time - .unwrap_or_else(time::OffsetDateTime::now_utc); - - let db_dir = out_root.join(".tmp").join(format!("{rir}-live-base-db")); - let replay_db_dir = out_root.join(".tmp").join(format!("{rir}-self-replay-db")); - let _ = fs::remove_dir_all(&db_dir); - let _ = fs::remove_dir_all(&replay_db_dir); - if let Some(parent) = db_dir.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create tmp dir failed: {}: {e}", parent.display()))?; - } - - let store = RocksStore::open(&db_dir).map_err(|e| format!("open rocksdb failed: {e}"))?; - let http = RecordingHttpFetcher::new( - BlockingHttpFetcher::new(HttpFetcherConfig { - timeout: std::time::Duration::from_secs(args.http_timeout_secs), - ..HttpFetcherConfig::default() - }) - .map_err(|e| format!("create http fetcher failed: {e}"))?, - ); - let rsync = RecordingRsyncFetcher::new(SystemRsyncFetcher::new(SystemRsyncConfig { - timeout: std::time::Duration::from_secs(args.rsync_timeout_secs), - mirror_root: args.rsync_mirror_root.clone(), - ..SystemRsyncConfig::default() - })); - - let started = Instant::now(); - let out = run_tree_from_tal_and_ta_der_serial_audit( - &store, - &Policy::default(), - &tal_bytes, - &ta_bytes, - None, - &http, - &rsync, - validation_time, - &TreeRunConfig { - max_depth: args.max_depth, - max_instances: args.max_instances, - compact_audit: false, - persist_vcir: true, - build_ccr_accumulator: true, - }, - ) - .map_err(|e| format!("live base run failed: {e}"))?; - let duration = started.elapsed(); - - let ccr = build_ccr_from_run( - &store, - &[out.discovery.trust_anchor.clone()], - &out.tree.vrps, - &out.tree.aspas, - &out.tree.router_keys, - validation_time, - ) - .map_err(|e| format!("build ccr failed: {e}"))?; - let base_ccr_path = rir_dir.join("base.ccr"); - write_ccr_file(&base_ccr_path, &ccr).map_err(|e| format!("write ccr failed: {e}"))?; - let ccr_bytes = fs::read(&base_ccr_path) - .map_err(|e| format!("read written ccr failed: {}: {e}", base_ccr_path.display()))?; - let decoded = rpki::ccr::decode_content_info(&ccr_bytes) - .map_err(|e| format!("decode written ccr failed: {e}"))?; - let verify = verify_content_info(&decoded).map_err(|e| format!("verify ccr failed: {e}"))?; - - let vrp_rows = build_vrp_compare_rows(&out.tree.vrps, &trust_anchor); - let vap_rows = build_vap_compare_rows(&out.tree.aspas, &trust_anchor); - let (ccr_vrps, ccr_vaps) = rpki::bundle::decode_ccr_compare_views(&decoded, &trust_anchor)?; - if vrp_rows != ccr_vrps { - return Err("base-vrps compare view does not match base.ccr".to_string()); - } - if vap_rows != ccr_vaps { - return Err("base-vaps compare view does not match base.ccr".to_string()); - } - write_vrp_csv(&rir_dir.join("base-vrps.csv"), &vrp_rows)?; - write_vap_csv(&rir_dir.join("base-vaps.csv"), &vap_rows)?; - - fs::write(rir_dir.join("tal.tal"), &tal_bytes).map_err(|e| format!("write tal failed: {e}"))?; - fs::write(rir_dir.join("ta.cer"), &ta_bytes).map_err(|e| format!("write ta failed: {e}"))?; - let capture = write_live_base_replay_bundle_inputs( - &rir_dir, - &rir_normalized, - validation_time, - &out.publication_points, - &store, - &http.snapshot_responses(), - &rsync.snapshot_fetches(), - )?; - - let replay_store = RocksStore::open(&replay_db_dir) - .map_err(|e| format!("open self replay rocksdb failed: {e}"))?; - let replay_out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit( - &replay_store, - &Policy::default(), - &tal_bytes, - &ta_bytes, - None, - &rir_dir.join("base-payload-archive"), - &rir_dir.join("base-locks.json"), - validation_time, - &TreeRunConfig { - max_depth: args.max_depth, - max_instances: args.max_instances, - compact_audit: false, - persist_vcir: true, - build_ccr_accumulator: true, - }, - ) - .map_err(|e| format!("self replay failed: {e}"))?; - let replay_vrps = build_vrp_compare_rows(&replay_out.tree.vrps, &trust_anchor); - let replay_vaps = build_vap_compare_rows(&replay_out.tree.aspas, &trust_anchor); - if replay_vrps != vrp_rows { - return Err("self replay VRP compare view mismatch".to_string()); - } - if replay_vaps != vap_rows { - return Err("self replay VAP compare view mismatch".to_string()); - } - - fs::create_dir_all(rir_dir.join("timings")) - .map_err(|e| format!("create timings dir failed: {e}"))?; - write_timing_json( - &rir_dir.join("timings").join("base-produce.json"), - "base", - &validation_time, - duration, - )?; - - let metadata = RirBundleMetadata { - schema_version: "20260330-v1".to_string(), - bundle_producer: "ours".to_string(), - rir: rir_normalized.clone(), - base_validation_time: validation_time - .format(&Rfc3339) - .map_err(|e| format!("format validation time failed: {e}"))?, - delta_validation_time: None, - tal_sha256: sha256_hex(&tal_bytes), - ta_cert_sha256: sha256_hex(&ta_bytes), - base_ccr_sha256: sha256_hex(&ccr_bytes), - delta_ccr_sha256: None, - has_aspa: !vap_rows.is_empty(), - has_router_key: verify.router_key_count > 0, - base_vrp_count: vrp_rows.len(), - base_vap_count: vap_rows.len(), - delta_vrp_count: None, - delta_vap_count: None, - }; - write_json(&rir_dir.join("bundle.json"), &metadata)?; - write_json( - &rir_dir.join("verification.json"), - &serde_json::json!({ - "base": { - "validationTime": metadata.base_validation_time, - "ccr": { - "path": "base.ccr", - "sha256": metadata.base_ccr_sha256, - "stateHashesOk": verify.state_hashes_ok, - "manifestInstances": verify.manifest_instances, - "roaVrpCount": verify.roa_vrp_count, - "aspaPayloadSets": verify.aspa_payload_sets, - "routerKeyCount": verify.router_key_count, - }, - "compareViews": { - "vrpsSelfMatch": true, - "vapsSelfMatch": true, - "baseVrpCount": metadata.base_vrp_count, - "baseVapCount": metadata.base_vap_count, - }, - "capture": { - "captureId": capture.capture_id, - "rrdpRepoCount": capture.rrdp_repo_count, - "rsyncModuleCount": capture.rsync_module_count, - "selfReplayOk": true, - } - } - }), - )?; - write_live_bundle_top_readme(&out_root.join("README.md"), &rir_normalized)?; - write_live_bundle_rir_readme( - &rir_dir.join("README.md"), - &rir_normalized, - &metadata.base_validation_time, - )?; - write_json( - &out_root.join("bundle-manifest.json"), - &build_single_rir_bundle_manifest( - "20260330-v1", - "ours", - &rir_normalized, - &validation_time, - None, - metadata.has_aspa, - )?, - )?; - - let _ = fs::remove_dir_all(&db_dir); - let _ = fs::remove_dir_all(&replay_db_dir); - - Ok(out_root.clone()) -} - -fn main() -> Result<(), String> { - let args = parse_args(&std::env::args().collect::>())?; - let out = run(args)?; - println!("{}", out.display()); - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - use tempfile::tempdir; - - #[test] - fn parse_args_requires_required_flags() { - let argv = vec![ - "replay_bundle_capture".to_string(), - "--rir".to_string(), - "apnic".to_string(), - "--out-dir".to_string(), - "out".to_string(), - "--tal-path".to_string(), - "tal".to_string(), - "--ta-path".to_string(), - "ta".to_string(), - ]; - let args = parse_args(&argv).expect("parse"); - assert_eq!(args.rir.as_deref(), Some("apnic")); - assert_eq!(args.out_dir.as_deref(), Some(std::path::Path::new("out"))); - assert_eq!(args.http_timeout_secs, 20); - assert_eq!(args.rsync_timeout_secs, 60); - } - - #[test] - fn parse_args_rejects_missing_requireds() { - let err = parse_args(&["replay_bundle_capture".to_string()]).unwrap_err(); - assert!(err.contains("--rir is required"), "{err}"); - } - - #[test] - fn write_timing_json_writes_duration_and_mode() { - let td = tempdir().expect("tempdir"); - let path = td.path().join("timings/base-produce.json"); - write_timing_json( - &path, - "base", - &time::OffsetDateTime::parse("2026-03-30T00:00:00Z", &Rfc3339).expect("time"), - std::time::Duration::from_millis(1500), - ) - .expect("write timing"); - let json: serde_json::Value = - serde_json::from_slice(&std::fs::read(&path).expect("read timing")).expect("parse"); - assert_eq!(json["mode"], "base"); - assert_eq!(json["durationSeconds"], 1.5); - } -} diff --git a/src/bin/replay_bundle_capture_delta.rs b/src/bin/replay_bundle_capture_delta.rs deleted file mode 100644 index 6ee21de..0000000 --- a/src/bin/replay_bundle_capture_delta.rs +++ /dev/null @@ -1,492 +0,0 @@ -use rpki::bundle::{ - RecordingHttpFetcher, RecordingRsyncFetcher, build_single_rir_bundle_manifest, - build_vap_compare_rows, build_vrp_compare_rows, copy_dir_all, load_validation_time, sha256_hex, - write_json, write_live_delta_replay_bundle_inputs, write_vap_csv, write_vrp_csv, -}; -use rpki::ccr::{build_ccr_from_run, decode_content_info, verify_content_info, write_ccr_file}; -use rpki::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig}; -use rpki::fetch::rsync_system::{SystemRsyncConfig, SystemRsyncFetcher}; -use rpki::policy::Policy; -use rpki::storage::RocksStore; -use rpki::sync::rrdp::Fetcher; -use rpki::validation::run_tree_from_tal::{ - run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit, - run_tree_from_tal_and_ta_der_payload_replay_serial_audit, - run_tree_from_tal_and_ta_der_serial_audit, -}; -use rpki::validation::tree::TreeRunConfig; -use std::fs; -use std::path::{Path, PathBuf}; -use std::time::Instant; -use time::format_description::well_known::Rfc3339; - -#[derive(Debug, Default, PartialEq, Eq)] -struct Args { - rir: Option, - base_bundle_dir: Option, - out_dir: Option, - validation_time: Option, - http_timeout_secs: u64, - rsync_timeout_secs: u64, - rsync_mirror_root: Option, - max_depth: Option, - max_instances: Option, - trust_anchor: Option, -} - -fn usage() -> &'static str { - "Usage: replay_bundle_capture_delta --rir --base-bundle-dir --out-dir [--validation-time ] [--http-timeout-secs ] [--rsync-timeout-secs ] [--rsync-mirror-root ] [--max-depth ] [--max-instances ] [--trust-anchor ]" -} - -fn parse_args(argv: &[String]) -> Result { - let mut args = Args { - http_timeout_secs: 20, - rsync_timeout_secs: 60, - ..Args::default() - }; - let mut i = 1usize; - while i < argv.len() { - match argv[i].as_str() { - "--help" | "-h" => return Err(usage().to_string()), - "--rir" => { - i += 1; - args.rir = Some(argv.get(i).ok_or("--rir requires a value")?.clone()); - } - "--base-bundle-dir" => { - i += 1; - args.base_bundle_dir = Some(PathBuf::from( - argv.get(i).ok_or("--base-bundle-dir requires a value")?, - )); - } - "--out-dir" => { - i += 1; - args.out_dir = Some(PathBuf::from( - argv.get(i).ok_or("--out-dir requires a value")?, - )); - } - "--validation-time" => { - i += 1; - let value = argv.get(i).ok_or("--validation-time requires a value")?; - args.validation_time = Some( - time::OffsetDateTime::parse(value, &Rfc3339) - .map_err(|e| format!("invalid --validation-time: {e}"))?, - ); - } - "--http-timeout-secs" => { - i += 1; - args.http_timeout_secs = argv - .get(i) - .ok_or("--http-timeout-secs requires a value")? - .parse() - .map_err(|e| format!("invalid --http-timeout-secs: {e}"))?; - } - "--rsync-timeout-secs" => { - i += 1; - args.rsync_timeout_secs = argv - .get(i) - .ok_or("--rsync-timeout-secs requires a value")? - .parse() - .map_err(|e| format!("invalid --rsync-timeout-secs: {e}"))?; - } - "--rsync-mirror-root" => { - i += 1; - args.rsync_mirror_root = Some(PathBuf::from( - argv.get(i).ok_or("--rsync-mirror-root requires a value")?, - )); - } - "--max-depth" => { - i += 1; - args.max_depth = Some( - argv.get(i) - .ok_or("--max-depth requires a value")? - .parse() - .map_err(|e| format!("invalid --max-depth: {e}"))?, - ); - } - "--max-instances" => { - i += 1; - args.max_instances = Some( - argv.get(i) - .ok_or("--max-instances requires a value")? - .parse() - .map_err(|e| format!("invalid --max-instances: {e}"))?, - ); - } - "--trust-anchor" => { - i += 1; - args.trust_anchor = Some( - argv.get(i) - .ok_or("--trust-anchor requires a value")? - .clone(), - ); - } - other => return Err(format!("unknown argument: {other}\n{}", usage())), - } - i += 1; - } - - if args.rir.is_none() { - return Err(format!("--rir is required\n{}", usage())); - } - if args.base_bundle_dir.is_none() { - return Err(format!("--base-bundle-dir is required\n{}", usage())); - } - if args.out_dir.is_none() { - return Err(format!("--out-dir is required\n{}", usage())); - } - Ok(args) -} - -fn ensure_recorded_target_snapshots( - store: &RocksStore, - base_bundle_dir: &Path, - http: &RecordingHttpFetcher, -) -> Result<(), String> { - let base_locks: serde_json::Value = serde_json::from_slice( - &fs::read(base_bundle_dir.join("base-locks.json")) - .map_err(|e| format!("read base locks failed: {e}"))?, - ) - .map_err(|e| format!("parse base locks failed: {e}"))?; - let base_rrdp = base_locks - .get("rrdp") - .and_then(|v| v.as_object()) - .cloned() - .unwrap_or_default(); - - for (notify_uri, base_lock) in base_rrdp { - let Some(base_transport) = base_lock.get("transport").and_then(|v| v.as_str()) else { - continue; - }; - if base_transport != "rrdp" { - continue; - } - let Some(base_session) = base_lock.get("session").and_then(|v| v.as_str()) else { - continue; - }; - let Some(base_serial) = base_lock.get("serial").and_then(|v| v.as_u64()) else { - continue; - }; - let Some(record) = store - .get_rrdp_source_record(¬ify_uri) - .map_err(|e| format!("read rrdp source record failed for {notify_uri}: {e}"))? - else { - continue; - }; - let Some(target_session) = record.last_session_id.as_deref() else { - continue; - }; - let Some(target_serial) = record.last_serial else { - continue; - }; - if target_session != base_session || target_serial <= base_serial { - continue; - } - let Some(snapshot_uri) = record.last_snapshot_uri.as_deref() else { - continue; - }; - if http.snapshot_responses().contains_key(snapshot_uri) { - continue; - } - let _ = http - .fetch(snapshot_uri) - .map_err(|e| format!("fetch target snapshot for {notify_uri} failed: {e}"))?; - } - Ok(()) -} - -fn run(args: Args) -> Result { - let rir = args.rir.as_ref().unwrap(); - let rir_normalized = rir.to_ascii_lowercase(); - let out_root = args.out_dir.as_ref().unwrap(); - let base_root = args.base_bundle_dir.as_ref().unwrap(); - let base_rir_dir = base_root.join(&rir_normalized); - if !base_rir_dir.is_dir() { - return Err(format!( - "base bundle rir dir not found: {}", - base_rir_dir.display() - )); - } - if out_root.exists() { - fs::remove_dir_all(out_root) - .map_err(|e| format!("remove old out dir failed: {}: {e}", out_root.display()))?; - } - copy_dir_all(base_root, out_root)?; - let rir_dir = out_root.join(&rir_normalized); - - let trust_anchor = args - .trust_anchor - .clone() - .unwrap_or_else(|| rir_normalized.clone()); - let tal_bytes = fs::read(rir_dir.join("tal.tal")) - .map_err(|e| format!("read tal from base bundle failed: {e}"))?; - let ta_bytes = fs::read(rir_dir.join("ta.cer")) - .map_err(|e| format!("read ta from base bundle failed: {e}"))?; - let base_validation_time = load_validation_time(&rir_dir.join("base-locks.json"))?; - let target_validation_time = args - .validation_time - .unwrap_or_else(time::OffsetDateTime::now_utc); - - let target_store_dir = out_root.join(".tmp").join(format!("{rir}-live-target-db")); - let self_replay_dir = out_root.join(".tmp").join(format!("{rir}-self-delta-db")); - let _ = fs::remove_dir_all(&target_store_dir); - let _ = fs::remove_dir_all(&self_replay_dir); - if let Some(parent) = target_store_dir.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create tmp dir failed: {}: {e}", parent.display()))?; - } - let target_store = RocksStore::open(&target_store_dir) - .map_err(|e| format!("open target rocksdb failed: {e}"))?; - - let _base = run_tree_from_tal_and_ta_der_payload_replay_serial_audit( - &target_store, - &Policy::default(), - &tal_bytes, - &ta_bytes, - None, - &rir_dir.join("base-payload-archive"), - &rir_dir.join("base-locks.json"), - base_validation_time, - &TreeRunConfig { - max_depth: args.max_depth, - max_instances: args.max_instances, - compact_audit: false, - persist_vcir: true, - build_ccr_accumulator: true, - }, - ) - .map_err(|e| format!("base bootstrap replay failed: {e}"))?; - - let http = RecordingHttpFetcher::new( - BlockingHttpFetcher::new(HttpFetcherConfig { - timeout: std::time::Duration::from_secs(args.http_timeout_secs), - ..HttpFetcherConfig::default() - }) - .map_err(|e| format!("create http fetcher failed: {e}"))?, - ); - let rsync = RecordingRsyncFetcher::new(SystemRsyncFetcher::new(SystemRsyncConfig { - timeout: std::time::Duration::from_secs(args.rsync_timeout_secs), - mirror_root: args.rsync_mirror_root.clone(), - ..SystemRsyncConfig::default() - })); - - let started = Instant::now(); - let target_out = run_tree_from_tal_and_ta_der_serial_audit( - &target_store, - &Policy::default(), - &tal_bytes, - &ta_bytes, - None, - &http, - &rsync, - target_validation_time, - &TreeRunConfig { - max_depth: args.max_depth, - max_instances: args.max_instances, - compact_audit: false, - persist_vcir: true, - build_ccr_accumulator: true, - }, - ) - .map_err(|e| format!("live target run failed: {e}"))?; - let duration = started.elapsed(); - ensure_recorded_target_snapshots(&target_store, &rir_dir, &http)?; - - let delta_ccr = build_ccr_from_run( - &target_store, - &[target_out.discovery.trust_anchor.clone()], - &target_out.tree.vrps, - &target_out.tree.aspas, - &target_out.tree.router_keys, - target_validation_time, - ) - .map_err(|e| format!("build delta ccr failed: {e}"))?; - let delta_ccr_path = rir_dir.join("delta.ccr"); - write_ccr_file(&delta_ccr_path, &delta_ccr) - .map_err(|e| format!("write delta ccr failed: {e}"))?; - let delta_ccr_bytes = fs::read(&delta_ccr_path) - .map_err(|e| format!("read delta ccr failed: {}: {e}", delta_ccr_path.display()))?; - let delta_decoded = decode_content_info(&delta_ccr_bytes) - .map_err(|e| format!("decode delta ccr failed: {e}"))?; - let delta_verify = - verify_content_info(&delta_decoded).map_err(|e| format!("verify delta ccr failed: {e}"))?; - - let delta_vrp_rows = build_vrp_compare_rows(&target_out.tree.vrps, &trust_anchor); - let delta_vap_rows = build_vap_compare_rows(&target_out.tree.aspas, &trust_anchor); - let (ccr_vrps, ccr_vaps) = - rpki::bundle::decode_ccr_compare_views(&delta_decoded, &trust_anchor)?; - if delta_vrp_rows != ccr_vrps { - return Err("record-delta.csv compare view does not match delta.ccr".to_string()); - } - if delta_vap_rows != ccr_vaps { - return Err("record-delta-vaps.csv compare view does not match delta.ccr".to_string()); - } - write_vrp_csv(&rir_dir.join("record-delta.csv"), &delta_vrp_rows)?; - write_vap_csv(&rir_dir.join("record-delta-vaps.csv"), &delta_vap_rows)?; - - let capture = write_live_delta_replay_bundle_inputs( - &rir_dir, - &rir_normalized, - target_validation_time, - &target_out.publication_points, - &target_store, - &http.snapshot_responses(), - &rsync.snapshot_fetches(), - )?; - - let self_store = RocksStore::open(&self_replay_dir) - .map_err(|e| format!("open self replay db failed: {e}"))?; - let replay_out = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit( - &self_store, - &Policy::default(), - &tal_bytes, - &ta_bytes, - None, - &rir_dir.join("base-payload-archive"), - &rir_dir.join("base-locks.json"), - &rir_dir.join("payload-delta-archive"), - &rir_dir.join("locks-delta.json"), - base_validation_time, - target_validation_time, - &TreeRunConfig { - max_depth: args.max_depth, - max_instances: args.max_instances, - compact_audit: false, - persist_vcir: true, - build_ccr_accumulator: true, - }, - ) - .map_err(|e| format!("self delta replay failed: {e}"))?; - let replay_vrps = build_vrp_compare_rows(&replay_out.tree.vrps, &trust_anchor); - let replay_vaps = build_vap_compare_rows(&replay_out.tree.aspas, &trust_anchor); - if replay_vrps != delta_vrp_rows { - return Err("self delta replay VRP compare view mismatch".to_string()); - } - if replay_vaps != delta_vap_rows { - return Err("self delta replay VAP compare view mismatch".to_string()); - } - - fs::create_dir_all(rir_dir.join("timings")) - .map_err(|e| format!("create timings dir failed: {e}"))?; - write_json( - &rir_dir.join("timings").join("delta-produce.json"), - &serde_json::json!({ - "mode": "delta", - "validationTime": target_validation_time - .format(&Rfc3339) - .map_err(|e| format!("format validation time failed: {e}"))?, - "durationSeconds": duration.as_secs_f64(), - }), - )?; - - let mut bundle_json: serde_json::Value = serde_json::from_slice( - &fs::read(rir_dir.join("bundle.json")) - .map_err(|e| format!("read base bundle.json failed: {e}"))?, - ) - .map_err(|e| format!("parse base bundle.json failed: {e}"))?; - bundle_json["deltaValidationTime"] = serde_json::Value::String( - target_validation_time - .format(&Rfc3339) - .map_err(|e| format!("format delta validation time failed: {e}"))?, - ); - bundle_json["deltaCcrSha256"] = serde_json::Value::String(sha256_hex(&delta_ccr_bytes)); - bundle_json["deltaVrpCount"] = serde_json::Value::from(delta_vrp_rows.len() as u64); - bundle_json["deltaVapCount"] = serde_json::Value::from(delta_vap_rows.len() as u64); - bundle_json["hasAspa"] = serde_json::Value::Bool( - bundle_json - .get("hasAspa") - .and_then(|v| v.as_bool()) - .unwrap_or(false) - || !delta_vap_rows.is_empty(), - ); - bundle_json["hasRouterKey"] = serde_json::Value::Bool( - bundle_json - .get("hasRouterKey") - .and_then(|v| v.as_bool()) - .unwrap_or(false) - || delta_verify.router_key_count > 0, - ); - write_json(&rir_dir.join("bundle.json"), &bundle_json)?; - - let mut verification_json: serde_json::Value = serde_json::from_slice( - &fs::read(rir_dir.join("verification.json")) - .map_err(|e| format!("read base verification.json failed: {e}"))?, - ) - .map_err(|e| format!("parse base verification.json failed: {e}"))?; - verification_json["delta"] = serde_json::json!({ - "validationTime": target_validation_time - .format(&Rfc3339) - .map_err(|e| format!("format delta validation time failed: {e}"))?, - "ccr": { - "path": "delta.ccr", - "sha256": sha256_hex(&delta_ccr_bytes), - "stateHashesOk": delta_verify.state_hashes_ok, - "manifestInstances": delta_verify.manifest_instances, - "roaVrpCount": delta_verify.roa_vrp_count, - "aspaPayloadSets": delta_verify.aspa_payload_sets, - "routerKeyCount": delta_verify.router_key_count, - }, - "compareViews": { - "vrpsSelfMatch": true, - "vapsSelfMatch": true, - "deltaVrpCount": delta_vrp_rows.len(), - "deltaVapCount": delta_vap_rows.len(), - }, - "capture": { - "captureId": capture.capture_id, - "rrdpRepoCount": capture.rrdp_repo_count, - "rsyncModuleCount": capture.rsync_module_count, - "selfReplayOk": true, - } - }); - write_json(&rir_dir.join("verification.json"), &verification_json)?; - - let bundle_manifest = build_single_rir_bundle_manifest( - "20260330-v1", - "ours", - &rir_normalized, - &base_validation_time, - Some(&target_validation_time), - bundle_json["hasAspa"].as_bool().unwrap_or(false), - )?; - write_json(&out_root.join("bundle-manifest.json"), &bundle_manifest)?; - - let _ = fs::remove_dir_all(&target_store_dir); - let _ = fs::remove_dir_all(&self_replay_dir); - - Ok(out_root.clone()) -} - -fn main() -> Result<(), String> { - let args = parse_args(&std::env::args().collect::>())?; - let out = run(args)?; - println!("{}", out.display()); - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn parse_args_requires_required_flags() { - let argv = vec![ - "replay_bundle_capture_delta".to_string(), - "--rir".to_string(), - "apnic".to_string(), - "--base-bundle-dir".to_string(), - "base".to_string(), - "--out-dir".to_string(), - "out".to_string(), - ]; - let args = parse_args(&argv).expect("parse"); - assert_eq!(args.rir.as_deref(), Some("apnic")); - assert_eq!(args.base_bundle_dir.as_deref(), Some(Path::new("base"))); - assert_eq!(args.out_dir.as_deref(), Some(Path::new("out"))); - } - - #[test] - fn parse_args_rejects_missing_requireds() { - let err = parse_args(&["replay_bundle_capture_delta".to_string()]).unwrap_err(); - assert!(err.contains("--rir is required"), "{err}"); - } -} diff --git a/src/bin/replay_bundle_capture_sequence.rs b/src/bin/replay_bundle_capture_sequence.rs deleted file mode 100644 index 91544ca..0000000 --- a/src/bin/replay_bundle_capture_sequence.rs +++ /dev/null @@ -1,1005 +0,0 @@ -use rpki::bundle::{ - BaseBundleStateMetadataV2, BundleManifestEntryV2, BundleManifestV2, DeltaSequenceMetadataV2, - DeltaStepMetadataV2, RecordingHttpFetcher, RecordingRsyncFetcher, RirBundleMetadataV2, - build_vap_compare_rows, build_vrp_compare_rows, sha256_hex, write_current_replay_state_locks, - write_json, write_live_base_replay_bundle_inputs, write_live_delta_replay_step_inputs, - write_vap_csv, write_vrp_csv, -}; -use rpki::ccr::{ - CcrVerifySummary, build_ccr_from_run, decode_content_info, verify_content_info, write_ccr_file, -}; -use rpki::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig}; -use rpki::fetch::rsync_system::{SystemRsyncConfig, SystemRsyncFetcher}; -use rpki::policy::Policy; -use rpki::storage::RocksStore; -use rpki::sync::rrdp::Fetcher; -use rpki::validation::run_tree_from_tal::{ - run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit, - run_tree_from_tal_and_ta_der_payload_replay_serial_audit, - run_tree_from_tal_and_ta_der_serial_audit, -}; -use rpki::validation::tree::TreeRunConfig; -use std::fs; -use std::path::{Path, PathBuf}; -use std::time::{Duration, Instant}; -use time::format_description::well_known::Rfc3339; - -#[derive(Debug, Default, PartialEq, Eq)] -struct Args { - rir: Option, - out_dir: Option, - tal_path: Option, - ta_path: Option, - base_validation_time: Option, - delta_count: usize, - delta_interval_secs: u64, - http_timeout_secs: u64, - rsync_timeout_secs: u64, - rsync_mirror_root: Option, - max_depth: Option, - max_instances: Option, - trust_anchor: Option, - keep_db: bool, - capture_inputs_only: bool, -} - -fn usage() -> &'static str { - "Usage: replay_bundle_capture_sequence --rir --out-dir --tal-path --ta-path [--base-validation-time ] [--delta-count ] [--delta-interval-secs ] [--http-timeout-secs ] [--rsync-timeout-secs ] [--rsync-mirror-root ] [--max-depth ] [--max-instances ] [--trust-anchor ] [--keep-db] [--capture-inputs-only]" -} - -fn parse_args(argv: &[String]) -> Result { - let mut args = Args { - delta_count: 5, - delta_interval_secs: 600, - http_timeout_secs: 20, - rsync_timeout_secs: 60, - ..Args::default() - }; - let mut i = 1usize; - while i < argv.len() { - match argv[i].as_str() { - "--help" | "-h" => return Err(usage().to_string()), - "--rir" => { - i += 1; - args.rir = Some(argv.get(i).ok_or("--rir requires a value")?.clone()); - } - "--out-dir" => { - i += 1; - args.out_dir = Some(PathBuf::from( - argv.get(i).ok_or("--out-dir requires a value")?, - )); - } - "--tal-path" => { - i += 1; - args.tal_path = Some(PathBuf::from( - argv.get(i).ok_or("--tal-path requires a value")?, - )); - } - "--ta-path" => { - i += 1; - args.ta_path = Some(PathBuf::from( - argv.get(i).ok_or("--ta-path requires a value")?, - )); - } - "--base-validation-time" => { - i += 1; - let value = argv - .get(i) - .ok_or("--base-validation-time requires a value")?; - args.base_validation_time = Some( - time::OffsetDateTime::parse(value, &Rfc3339) - .map_err(|e| format!("invalid --base-validation-time: {e}"))?, - ); - } - "--delta-count" => { - i += 1; - args.delta_count = argv - .get(i) - .ok_or("--delta-count requires a value")? - .parse() - .map_err(|e| format!("invalid --delta-count: {e}"))?; - } - "--delta-interval-secs" => { - i += 1; - args.delta_interval_secs = argv - .get(i) - .ok_or("--delta-interval-secs requires a value")? - .parse() - .map_err(|e| format!("invalid --delta-interval-secs: {e}"))?; - } - "--http-timeout-secs" => { - i += 1; - args.http_timeout_secs = argv - .get(i) - .ok_or("--http-timeout-secs requires a value")? - .parse() - .map_err(|e| format!("invalid --http-timeout-secs: {e}"))?; - } - "--rsync-timeout-secs" => { - i += 1; - args.rsync_timeout_secs = argv - .get(i) - .ok_or("--rsync-timeout-secs requires a value")? - .parse() - .map_err(|e| format!("invalid --rsync-timeout-secs: {e}"))?; - } - "--rsync-mirror-root" => { - i += 1; - args.rsync_mirror_root = Some(PathBuf::from( - argv.get(i).ok_or("--rsync-mirror-root requires a value")?, - )); - } - "--max-depth" => { - i += 1; - args.max_depth = Some( - argv.get(i) - .ok_or("--max-depth requires a value")? - .parse() - .map_err(|e| format!("invalid --max-depth: {e}"))?, - ); - } - "--max-instances" => { - i += 1; - args.max_instances = Some( - argv.get(i) - .ok_or("--max-instances requires a value")? - .parse() - .map_err(|e| format!("invalid --max-instances: {e}"))?, - ); - } - "--trust-anchor" => { - i += 1; - args.trust_anchor = Some( - argv.get(i) - .ok_or("--trust-anchor requires a value")? - .clone(), - ); - } - "--keep-db" => args.keep_db = true, - "--capture-inputs-only" => args.capture_inputs_only = true, - other => return Err(format!("unknown argument: {other}\n{}", usage())), - } - i += 1; - } - - if args.rir.is_none() { - return Err(format!("--rir is required\n{}", usage())); - } - if args.out_dir.is_none() { - return Err(format!("--out-dir is required\n{}", usage())); - } - if args.tal_path.is_none() { - return Err(format!("--tal-path is required\n{}", usage())); - } - if args.ta_path.is_none() { - return Err(format!("--ta-path is required\n{}", usage())); - } - Ok(args) -} - -fn write_v2_top_readme( - path: &Path, - rir: &str, - delta_count: usize, - delta_interval_secs: u64, -) -> Result<(), String> { - if let Some(parent) = path.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?; - } - fs::write( - path, - format!( - "# Ours Multi-Delta Replay Bundle\n\n- RIR: `{rir}`\n- Schema: `20260401-v2`\n- Configured delta steps: `{delta_count}`\n- Configured interval seconds: `{delta_interval_secs}`\n" - ), - ) - .map_err(|e| format!("write readme failed: {}: {e}", path.display())) -} - -fn write_v2_rir_readme( - path: &Path, - rir: &str, - base_validation_time: &str, - delta_count: usize, - delta_interval_secs: u64, -) -> Result<(), String> { - if let Some(parent) = path.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?; - } - fs::write( - path, - format!( - "# {rir} multi-delta live replay bundle\n\n- `base-locks.json.validationTime` = `{base_validation_time}`\n- `delta-steps/` contains `{delta_count}` ordered target steps\n- configured interval seconds = `{delta_interval_secs}`\n" - ), - ) - .map_err(|e| format!("write rir readme failed: {}: {e}", path.display())) -} - -fn ensure_recorded_target_snapshots_for_locks( - store: &RocksStore, - previous_locks_path: &Path, - http: &RecordingHttpFetcher, -) -> Result<(), String> { - let previous_locks: serde_json::Value = - serde_json::from_slice(&fs::read(previous_locks_path).map_err(|e| { - format!( - "read previous locks failed: {}: {e}", - previous_locks_path.display() - ) - })?) - .map_err(|e| { - format!( - "parse previous locks failed: {}: {e}", - previous_locks_path.display() - ) - })?; - let previous_rrdp = previous_locks - .get("rrdp") - .and_then(|v| v.as_object()) - .cloned() - .unwrap_or_default(); - - for (notify_uri, base_lock) in previous_rrdp { - let Some(base_transport) = base_lock.get("transport").and_then(|v| v.as_str()) else { - continue; - }; - if base_transport != "rrdp" { - continue; - } - let Some(base_session) = base_lock.get("session").and_then(|v| v.as_str()) else { - continue; - }; - let Some(base_serial) = base_lock.get("serial").and_then(|v| v.as_u64()) else { - continue; - }; - let Some(record) = store - .get_rrdp_source_record(¬ify_uri) - .map_err(|e| format!("read rrdp source record failed for {notify_uri}: {e}"))? - else { - continue; - }; - let Some(target_session) = record.last_session_id.as_deref() else { - continue; - }; - let Some(target_serial) = record.last_serial else { - continue; - }; - if target_session != base_session || target_serial <= base_serial { - continue; - } - let Some(snapshot_uri) = record.last_snapshot_uri.as_deref() else { - continue; - }; - if step_http_has_snapshot(http, snapshot_uri) { - continue; - } - if let Err(err) = http.fetch(snapshot_uri) { - eprintln!( - "[sequence] warning: fetch target snapshot failed notify_uri={} snapshot_uri={} err={}", - notify_uri, snapshot_uri, err - ); - } - } - Ok(()) -} - -fn step_http_has_snapshot( - http: &RecordingHttpFetcher, - snapshot_uri: &str, -) -> bool { - http.snapshot_responses().contains_key(snapshot_uri) -} - -fn run(args: Args) -> Result { - let rir = args.rir.as_ref().unwrap(); - let rir_normalized = rir.to_ascii_lowercase(); - let trust_anchor = args - .trust_anchor - .clone() - .unwrap_or_else(|| rir_normalized.clone()); - let out_root = args.out_dir.as_ref().unwrap(); - if out_root.exists() { - fs::remove_dir_all(out_root) - .map_err(|e| format!("remove old out dir failed: {}: {e}", out_root.display()))?; - } - let rir_dir = out_root.join(&rir_normalized); - let delta_steps_root = rir_dir.join("delta-steps"); - fs::create_dir_all(&delta_steps_root).map_err(|e| { - format!( - "create delta steps dir failed: {}: {e}", - delta_steps_root.display() - ) - })?; - - let tal_bytes = - fs::read(args.tal_path.as_ref().unwrap()).map_err(|e| format!("read tal failed: {e}"))?; - let ta_bytes = - fs::read(args.ta_path.as_ref().unwrap()).map_err(|e| format!("read ta failed: {e}"))?; - fs::write(rir_dir.join("tal.tal"), &tal_bytes).map_err(|e| format!("write tal failed: {e}"))?; - fs::write(rir_dir.join("ta.cer"), &ta_bytes).map_err(|e| format!("write ta failed: {e}"))?; - - let base_validation_time = args - .base_validation_time - .unwrap_or_else(time::OffsetDateTime::now_utc); - - let work_db_dir = out_root - .join(".tmp") - .join(format!("{rir}-sequence-work-db")); - let base_self_replay_dir = out_root - .join(".tmp") - .join(format!("{rir}-sequence-base-self-replay-db")); - let _ = fs::remove_dir_all(&work_db_dir); - let _ = fs::remove_dir_all(&base_self_replay_dir); - if let Some(parent) = work_db_dir.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create tmp dir failed: {}: {e}", parent.display()))?; - } - - let store = RocksStore::open(&work_db_dir).map_err(|e| format!("open work db failed: {e}"))?; - let base_http = RecordingHttpFetcher::new( - BlockingHttpFetcher::new(HttpFetcherConfig { - timeout: Duration::from_secs(args.http_timeout_secs), - ..HttpFetcherConfig::default() - }) - .map_err(|e| format!("create base http fetcher failed: {e}"))?, - ); - let base_rsync_inner = SystemRsyncFetcher::new(SystemRsyncConfig { - timeout: Duration::from_secs(args.rsync_timeout_secs), - mirror_root: args.rsync_mirror_root.clone(), - ..SystemRsyncConfig::default() - }); - let base_rsync = if args.capture_inputs_only { - RecordingRsyncFetcher::new_without_objects(base_rsync_inner) - } else { - RecordingRsyncFetcher::new(base_rsync_inner) - }; - eprintln!("[sequence] base live run start rir={rir_normalized}"); - let started = Instant::now(); - let base_out = run_tree_from_tal_and_ta_der_serial_audit( - &store, - &Policy::default(), - &tal_bytes, - &ta_bytes, - None, - &base_http, - &base_rsync, - base_validation_time, - &TreeRunConfig { - max_depth: args.max_depth, - max_instances: args.max_instances, - compact_audit: false, - persist_vcir: true, - build_ccr_accumulator: true, - }, - ) - .map_err(|e| format!("live base run failed: {e}"))?; - let base_duration = started.elapsed(); - eprintln!( - "[sequence] base live run done rir={} duration_s={:.3}", - rir_normalized, - base_duration.as_secs_f64() - ); - - eprintln!("[sequence] base input materialization start rir={rir_normalized}"); - let base_capture = write_live_base_replay_bundle_inputs( - &rir_dir, - &rir_normalized, - base_validation_time, - &base_out.publication_points, - &store, - &base_http.snapshot_responses(), - &base_rsync.snapshot_fetches(), - )?; - eprintln!( - "[sequence] base input materialization done rir={} rrdp_repos={} rsync_modules={}", - rir_normalized, base_capture.rrdp_repo_count, base_capture.rsync_module_count - ); - let base_ccr_path = rir_dir.join("base.ccr"); - let base_vrps_path = rir_dir.join("base-vrps.csv"); - let base_vaps_path = rir_dir.join("base-vaps.csv"); - let (base_ccr_sha256, base_vrp_rows, base_vap_rows, base_verify, base_self_replay_ok) = - if args.capture_inputs_only { - eprintln!("[sequence] base output generation skipped rir={rir_normalized}"); - ( - String::new(), - std::collections::BTreeSet::::new(), - std::collections::BTreeSet::::new(), - CcrVerifySummary { - content_type_oid: String::new(), - version: 0, - produced_at_rfc3339_utc: String::new(), - state_hashes_ok: false, - manifest_instances: 0, - roa_payload_sets: 0, - roa_vrp_count: 0, - aspa_payload_sets: 0, - trust_anchor_ski_count: 0, - router_key_sets: 0, - router_key_count: 0, - }, - false, - ) - } else { - eprintln!("[sequence] base CCR/self-replay start rir={rir_normalized}"); - let base_ccr = build_ccr_from_run( - &store, - &[base_out.discovery.trust_anchor.clone()], - &base_out.tree.vrps, - &base_out.tree.aspas, - &base_out.tree.router_keys, - base_validation_time, - ) - .map_err(|e| format!("build base ccr failed: {e}"))?; - write_ccr_file(&base_ccr_path, &base_ccr) - .map_err(|e| format!("write base ccr failed: {e}"))?; - let base_ccr_bytes = fs::read(&base_ccr_path) - .map_err(|e| format!("read base ccr failed: {}: {e}", base_ccr_path.display()))?; - let base_decoded = decode_content_info(&base_ccr_bytes) - .map_err(|e| format!("decode base ccr failed: {e}"))?; - let base_verify = verify_content_info(&base_decoded) - .map_err(|e| format!("verify base ccr failed: {e}"))?; - let base_vrp_rows = build_vrp_compare_rows(&base_out.tree.vrps, &trust_anchor); - let base_vap_rows = build_vap_compare_rows(&base_out.tree.aspas, &trust_anchor); - let (base_ccr_vrps, base_ccr_vaps) = - rpki::bundle::decode_ccr_compare_views(&base_decoded, &trust_anchor)?; - if base_vrp_rows != base_ccr_vrps { - return Err("base-vrps compare view does not match base.ccr".to_string()); - } - if base_vap_rows != base_ccr_vaps { - return Err("base-vaps compare view does not match base.ccr".to_string()); - } - write_vrp_csv(&base_vrps_path, &base_vrp_rows)?; - write_vap_csv(&base_vaps_path, &base_vap_rows)?; - let base_replay_store = RocksStore::open(&base_self_replay_dir) - .map_err(|e| format!("open base self replay db failed: {e}"))?; - let base_replay_out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit( - &base_replay_store, - &Policy::default(), - &tal_bytes, - &ta_bytes, - None, - &rir_dir.join("base-payload-archive"), - &rir_dir.join("base-locks.json"), - base_validation_time, - &TreeRunConfig { - max_depth: args.max_depth, - max_instances: args.max_instances, - compact_audit: false, - persist_vcir: true, - build_ccr_accumulator: true, - }, - ) - .map_err(|e| format!("base self replay failed: {e}"))?; - if build_vrp_compare_rows(&base_replay_out.tree.vrps, &trust_anchor) != base_vrp_rows { - return Err("base self replay VRP compare view mismatch".to_string()); - } - if build_vap_compare_rows(&base_replay_out.tree.aspas, &trust_anchor) != base_vap_rows { - return Err("base self replay VAP compare view mismatch".to_string()); - } - let output = ( - sha256_hex(&base_ccr_bytes), - base_vrp_rows, - base_vap_rows, - base_verify, - true, - ); - eprintln!("[sequence] base CCR/self-replay done rir={rir_normalized}"); - output - }; - fs::create_dir_all(rir_dir.join("timings")) - .map_err(|e| format!("create timings dir failed: {e}"))?; - write_json( - &rir_dir.join("timings").join("base-produce.json"), - &serde_json::json!({ - "mode": "base", - "validationTime": base_validation_time.format(&Rfc3339).map_err(|e| format!("format base validation time failed: {e}"))?, - "durationSeconds": base_duration.as_secs_f64(), - }), - )?; - - let mut steps_json = Vec::new(); - let mut delta_steps = Vec::new(); - let mut previous_locks_path = rir_dir.join("base-locks.json"); - let mut previous_ref = "base".to_string(); - let sequence_self_replay_dir = out_root - .join(".tmp") - .join(format!("{rir}-sequence-self-replay-db")); - let _ = fs::remove_dir_all(&sequence_self_replay_dir); - let sequence_replay_store = if args.capture_inputs_only { - None - } else { - let store = RocksStore::open(&sequence_self_replay_dir) - .map_err(|e| format!("open sequence self replay db failed: {e}"))?; - let _base_replay = run_tree_from_tal_and_ta_der_payload_replay_serial_audit( - &store, - &Policy::default(), - &tal_bytes, - &ta_bytes, - None, - &rir_dir.join("base-payload-archive"), - &rir_dir.join("base-locks.json"), - base_validation_time, - &TreeRunConfig { - max_depth: args.max_depth, - max_instances: args.max_instances, - compact_audit: false, - persist_vcir: true, - build_ccr_accumulator: true, - }, - ) - .map_err(|e| format!("sequence base self replay failed: {e}"))?; - Some(store) - }; - let mut all_steps_self_replay_ok = true; - - for step_index in 1..=args.delta_count { - if step_index > 1 && args.delta_interval_secs > 0 { - std::thread::sleep(Duration::from_secs(args.delta_interval_secs)); - } - let step_id = format!("step-{step_index:03}"); - let step_dir = delta_steps_root.join(&step_id); - fs::create_dir_all(&step_dir) - .map_err(|e| format!("create step dir failed: {}: {e}", step_dir.display()))?; - let step_validation_time = time::OffsetDateTime::now_utc(); - eprintln!( - "[sequence] step live run start rir={} step={}", - rir_normalized, step_id - ); - let step_http = RecordingHttpFetcher::new( - BlockingHttpFetcher::new(HttpFetcherConfig { - timeout: Duration::from_secs(args.http_timeout_secs), - ..HttpFetcherConfig::default() - }) - .map_err(|e| format!("create step http fetcher failed: {e}"))?, - ); - let step_rsync_inner = SystemRsyncFetcher::new(SystemRsyncConfig { - timeout: Duration::from_secs(args.rsync_timeout_secs), - mirror_root: args.rsync_mirror_root.clone(), - ..SystemRsyncConfig::default() - }); - let step_rsync = if args.capture_inputs_only { - RecordingRsyncFetcher::new_without_objects(step_rsync_inner) - } else { - RecordingRsyncFetcher::new(step_rsync_inner) - }; - let started = Instant::now(); - let step_out = run_tree_from_tal_and_ta_der_serial_audit( - &store, - &Policy::default(), - &tal_bytes, - &ta_bytes, - None, - &step_http, - &step_rsync, - step_validation_time, - &TreeRunConfig { - max_depth: args.max_depth, - max_instances: args.max_instances, - compact_audit: false, - persist_vcir: true, - build_ccr_accumulator: true, - }, - ) - .map_err(|e| format!("live delta step {step_id} failed: {e}"))?; - let step_duration = started.elapsed(); - eprintln!( - "[sequence] step live run done rir={} step={} duration_s={:.3}", - rir_normalized, - step_id, - step_duration.as_secs_f64() - ); - ensure_recorded_target_snapshots_for_locks(&store, &previous_locks_path, &step_http)?; - - eprintln!( - "[sequence] step output generation phase start rir={} step={}", - rir_normalized, step_id - ); - let delta_ccr_path = step_dir.join("delta.ccr"); - let delta_vrps_path = step_dir.join("record-delta.csv"); - let delta_vaps_path = step_dir.join("record-delta-vaps.csv"); - let (delta_ccr_sha256, delta_vrp_rows, delta_vap_rows, delta_verify, step_self_replay_ok) = - if args.capture_inputs_only { - eprintln!( - "[sequence] step CCR/self-replay skipped rir={} step={}", - rir_normalized, step_id - ); - ( - String::new(), - std::collections::BTreeSet::::new(), - std::collections::BTreeSet::::new(), - CcrVerifySummary { - content_type_oid: String::new(), - version: 0, - produced_at_rfc3339_utc: String::new(), - state_hashes_ok: false, - manifest_instances: 0, - roa_payload_sets: 0, - roa_vrp_count: 0, - aspa_payload_sets: 0, - trust_anchor_ski_count: 0, - router_key_sets: 0, - router_key_count: 0, - }, - false, - ) - } else { - eprintln!( - "[sequence] step CCR/self-replay start rir={} step={}", - rir_normalized, step_id - ); - let delta_ccr = build_ccr_from_run( - &store, - &[step_out.discovery.trust_anchor.clone()], - &step_out.tree.vrps, - &step_out.tree.aspas, - &step_out.tree.router_keys, - step_validation_time, - ) - .map_err(|e| format!("build step ccr failed for {step_id}: {e}"))?; - write_ccr_file(&delta_ccr_path, &delta_ccr) - .map_err(|e| format!("write step ccr failed for {step_id}: {e}"))?; - let delta_ccr_bytes = fs::read(&delta_ccr_path).map_err(|e| { - format!("read step ccr failed: {}: {e}", delta_ccr_path.display()) - })?; - let delta_decoded = decode_content_info(&delta_ccr_bytes) - .map_err(|e| format!("decode step ccr failed for {step_id}: {e}"))?; - let delta_verify = verify_content_info(&delta_decoded) - .map_err(|e| format!("verify step ccr failed for {step_id}: {e}"))?; - let delta_vrp_rows = build_vrp_compare_rows(&step_out.tree.vrps, &trust_anchor); - let delta_vap_rows = build_vap_compare_rows(&step_out.tree.aspas, &trust_anchor); - let (ccr_vrps, ccr_vaps) = - rpki::bundle::decode_ccr_compare_views(&delta_decoded, &trust_anchor)?; - if delta_vrp_rows != ccr_vrps { - return Err(format!( - "{step_id} VRP compare view does not match delta.ccr" - )); - } - if delta_vap_rows != ccr_vaps { - return Err(format!( - "{step_id} VAP compare view does not match delta.ccr" - )); - } - write_vrp_csv(&delta_vrps_path, &delta_vrp_rows)?; - write_vap_csv(&delta_vaps_path, &delta_vap_rows)?; - let step_replay_out = - run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit( - sequence_replay_store - .as_ref() - .expect("sequence replay store"), - &Policy::default(), - &tal_bytes, - &ta_bytes, - None, - &step_dir.join("payload-delta-archive"), - &previous_locks_path, - &step_dir.join("locks-delta.json"), - step_validation_time, - &TreeRunConfig { - max_depth: args.max_depth, - max_instances: args.max_instances, - compact_audit: false, - persist_vcir: true, - build_ccr_accumulator: true, - }, - ) - .map_err(|e| format!("sequence self replay failed for {step_id}: {e}"))?; - let step_self_replay_ok = - build_vrp_compare_rows(&step_replay_out.tree.vrps, &trust_anchor) - == delta_vrp_rows - && build_vap_compare_rows(&step_replay_out.tree.aspas, &trust_anchor) - == delta_vap_rows; - let output = ( - sha256_hex(&delta_ccr_bytes), - delta_vrp_rows, - delta_vap_rows, - delta_verify, - step_self_replay_ok, - ); - eprintln!( - "[sequence] step CCR/self-replay done rir={} step={}", - rir_normalized, step_id - ); - output - }; - - eprintln!( - "[sequence] step input materialization start rir={} step={}", - rir_normalized, step_id - ); - let delta_capture = write_live_delta_replay_step_inputs( - &step_dir, - &rir_normalized, - &previous_locks_path, - step_validation_time, - &step_out.publication_points, - &store, - &step_http.snapshot_responses(), - &step_rsync.snapshot_fetches(), - )?; - let target_lock_capture_id = format!("{rir_normalized}-target-{step_id}"); - write_current_replay_state_locks( - &step_dir.join("target-locks.json"), - &target_lock_capture_id, - step_validation_time, - &step_out.publication_points, - &store, - )?; - eprintln!( - "[sequence] step input materialization done rir={} step={} rrdp_repos={} rsync_modules={}", - rir_normalized, - step_id, - delta_capture.rrdp_repo_count, - delta_capture.rsync_module_count - ); - write_json( - &step_dir.join("verification.json"), - &serde_json::json!({ - "index": step_index, - "id": step_id, - "validationTime": step_validation_time.format(&Rfc3339).map_err(|e| format!("format validation time failed: {e}"))?, - "capture": { - "captureId": delta_capture.capture_id, - "rrdpRepoCount": delta_capture.rrdp_repo_count, - "rsyncModuleCount": delta_capture.rsync_module_count, - }, - "ccr": { - "path": "delta.ccr", - "sha256": delta_ccr_sha256.clone(), - "stateHashesOk": delta_verify.state_hashes_ok, - "manifestInstances": delta_verify.manifest_instances, - "roaVrpCount": delta_verify.roa_vrp_count, - "aspaPayloadSets": delta_verify.aspa_payload_sets, - "routerKeyCount": delta_verify.router_key_count, - }, - "compareViews": { - "vrpCount": delta_vrp_rows.len(), - "vapCount": delta_vap_rows.len(), - }, - "selfReplayOk": serde_json::Value::Null, - "timings": { - "durationSeconds": step_duration.as_secs_f64(), - } - }), - )?; - fs::create_dir_all(step_dir.join("timings")) - .map_err(|e| format!("create step timings dir failed: {e}"))?; - write_json( - &step_dir.join("timings").join("delta-produce.json"), - &serde_json::json!({ - "mode": "delta", - "stepIndex": step_index, - "validationTime": step_validation_time.format(&Rfc3339).map_err(|e| format!("format validation time failed: {e}"))?, - "durationSeconds": step_duration.as_secs_f64(), - }), - )?; - - delta_steps.push(DeltaStepMetadataV2 { - index: step_index, - id: step_id.clone(), - relative_path: format!("delta-steps/{step_id}"), - base_ref: previous_ref.clone(), - validation_time: step_validation_time - .format(&Rfc3339) - .map_err(|e| format!("format validation time failed: {e}"))?, - delta_ccr_sha256: delta_ccr_sha256.clone(), - vrp_count: delta_vrp_rows.len(), - vap_count: delta_vap_rows.len(), - relative_archive_path: format!("delta-steps/{step_id}/payload-delta-archive"), - relative_transition_locks_path: format!("delta-steps/{step_id}/locks-delta.json"), - relative_target_locks_path: format!("delta-steps/{step_id}/target-locks.json"), - relative_ccr_path: format!("delta-steps/{step_id}/delta.ccr"), - relative_vrps_path: format!("delta-steps/{step_id}/record-delta.csv"), - relative_vaps_path: format!("delta-steps/{step_id}/record-delta-vaps.csv"), - has_aspa: !delta_vap_rows.is_empty(), - has_router_key: delta_verify.router_key_count > 0, - }); - all_steps_self_replay_ok &= step_self_replay_ok; - - steps_json.push(serde_json::json!({ - "index": step_index, - "id": step_id, - "validationTime": step_validation_time.format(&Rfc3339).map_err(|e| format!("format validation time failed: {e}"))?, - "capture": { - "captureId": delta_capture.capture_id, - "rrdpRepoCount": delta_capture.rrdp_repo_count, - "rsyncModuleCount": delta_capture.rsync_module_count, - }, - "ccr": { - "path": format!("delta-steps/{step_id}/delta.ccr"), - "sha256": delta_ccr_sha256, - "stateHashesOk": delta_verify.state_hashes_ok, - "manifestInstances": delta_verify.manifest_instances, - "roaVrpCount": delta_verify.roa_vrp_count, - "aspaPayloadSets": delta_verify.aspa_payload_sets, - "routerKeyCount": delta_verify.router_key_count, - }, - "compareViews": { - "vrpCount": delta_vrp_rows.len(), - "vapCount": delta_vap_rows.len(), - }, - "selfReplayOk": if args.capture_inputs_only { serde_json::Value::Null } else { serde_json::Value::Bool(step_self_replay_ok) } - })); - - previous_locks_path = step_dir.join("target-locks.json"); - previous_ref = step_id; - } - - let metadata = RirBundleMetadataV2 { - schema_version: "20260401-v2".to_string(), - bundle_producer: "ours".to_string(), - rir: rir_normalized.clone(), - tal_sha256: sha256_hex(&tal_bytes), - ta_cert_sha256: sha256_hex(&ta_bytes), - has_any_aspa: !base_vap_rows.is_empty() || delta_steps.iter().any(|step| step.has_aspa), - has_any_router_key: base_verify.router_key_count > 0 - || delta_steps.iter().any(|step| step.has_router_key), - base: BaseBundleStateMetadataV2 { - validation_time: base_validation_time - .format(&Rfc3339) - .map_err(|e| format!("format base validation time failed: {e}"))?, - ccr_sha256: base_ccr_sha256.clone(), - vrp_count: base_vrp_rows.len(), - vap_count: base_vap_rows.len(), - relative_archive_path: "base-payload-archive".to_string(), - relative_locks_path: "base-locks.json".to_string(), - relative_ccr_path: "base.ccr".to_string(), - relative_vrps_path: "base-vrps.csv".to_string(), - relative_vaps_path: "base-vaps.csv".to_string(), - }, - delta_sequence: DeltaSequenceMetadataV2 { - configured_delta_count: args.delta_count, - configured_interval_seconds: args.delta_interval_secs, - steps: delta_steps.clone(), - }, - }; - write_json(&rir_dir.join("bundle.json"), &metadata)?; - write_json( - &rir_dir.join("verification.json"), - &serde_json::json!({ - "base": { - "validationTime": metadata.base.validation_time, - "ccr": { - "path": "base.ccr", - "sha256": metadata.base.ccr_sha256, - "stateHashesOk": base_verify.state_hashes_ok, - "manifestInstances": base_verify.manifest_instances, - "roaVrpCount": base_verify.roa_vrp_count, - "aspaPayloadSets": base_verify.aspa_payload_sets, - "routerKeyCount": base_verify.router_key_count, - }, - "compareViews": { - "baseVrpCount": metadata.base.vrp_count, - "baseVapCount": metadata.base.vap_count, - }, - "capture": { - "captureId": base_capture.capture_id, - "rrdpRepoCount": base_capture.rrdp_repo_count, - "rsyncModuleCount": base_capture.rsync_module_count, - "selfReplayOk": if args.capture_inputs_only { serde_json::Value::Null } else { serde_json::Value::Bool(base_self_replay_ok) }, - } - }, - "steps": steps_json, - "summary": { - "baseSelfReplayOk": if args.capture_inputs_only { serde_json::Value::Null } else { serde_json::Value::Bool(base_self_replay_ok) }, - "stepCount": args.delta_count, - "allStepsSelfReplayOk": if args.capture_inputs_only { serde_json::Value::Null } else { serde_json::Value::Bool(all_steps_self_replay_ok) }, - } - }), - )?; - write_v2_top_readme( - &out_root.join("README.md"), - &rir_normalized, - args.delta_count, - args.delta_interval_secs, - )?; - write_v2_rir_readme( - &rir_dir.join("README.md"), - &rir_normalized, - &metadata.base.validation_time, - args.delta_count, - args.delta_interval_secs, - )?; - - let bundle_manifest = BundleManifestV2 { - schema_version: "20260401-v2".to_string(), - bundle_producer: "ours".to_string(), - recorded_at_rfc3339_utc: time::OffsetDateTime::now_utc() - .format(&Rfc3339) - .map_err(|e| format!("format recorded_at failed: {e}"))?, - rirs: vec![rir_normalized.clone()], - per_rir_bundles: vec![BundleManifestEntryV2 { - rir: rir_normalized.clone(), - relative_path: rir_normalized, - base_validation_time: metadata.base.validation_time.clone(), - step_count: metadata.delta_sequence.steps.len(), - first_delta_validation_time: metadata - .delta_sequence - .steps - .first() - .map(|step| step.validation_time.clone()), - last_delta_validation_time: metadata - .delta_sequence - .steps - .last() - .map(|step| step.validation_time.clone()), - has_aspa: metadata.has_any_aspa, - }], - }; - write_json(&out_root.join("bundle-manifest.json"), &bundle_manifest)?; - - if !args.keep_db { - drop(sequence_replay_store); - drop(store); - let _ = fs::remove_dir_all(&work_db_dir); - let _ = fs::remove_dir_all(&base_self_replay_dir); - let _ = fs::remove_dir_all(&sequence_self_replay_dir); - let tmp_dir = out_root.join(".tmp"); - if tmp_dir.is_dir() { - let is_empty = fs::read_dir(&tmp_dir) - .map_err(|e| format!("read tmp dir failed: {}: {e}", tmp_dir.display()))? - .next() - .is_none(); - if is_empty { - let _ = fs::remove_dir(&tmp_dir); - } - } - } - - Ok(out_root.clone()) -} - -fn main() -> Result<(), String> { - let args = parse_args(&std::env::args().collect::>())?; - let out = run(args)?; - println!("{}", out.display()); - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn parse_args_defaults_delta_sequence_parameters() { - let argv = vec![ - "replay_bundle_capture_sequence".to_string(), - "--rir".to_string(), - "apnic".to_string(), - "--out-dir".to_string(), - "out".to_string(), - "--tal-path".to_string(), - "tal".to_string(), - "--ta-path".to_string(), - "ta".to_string(), - ]; - let args = parse_args(&argv).expect("parse"); - assert_eq!(args.delta_count, 5); - assert_eq!(args.delta_interval_secs, 600); - assert!(!args.keep_db); - } - - #[test] - fn parse_args_accepts_overrides_and_keep_db() { - let argv = vec![ - "replay_bundle_capture_sequence".to_string(), - "--rir".to_string(), - "apnic".to_string(), - "--out-dir".to_string(), - "out".to_string(), - "--tal-path".to_string(), - "tal".to_string(), - "--ta-path".to_string(), - "ta".to_string(), - "--delta-count".to_string(), - "2".to_string(), - "--delta-interval-secs".to_string(), - "0".to_string(), - "--keep-db".to_string(), - ]; - let args = parse_args(&argv).expect("parse"); - assert_eq!(args.delta_count, 2); - assert_eq!(args.delta_interval_secs, 0); - assert!(args.keep_db); - } -} diff --git a/src/bin/replay_bundle_record.rs b/src/bin/replay_bundle_record.rs deleted file mode 100644 index 0e8d329..0000000 --- a/src/bin/replay_bundle_record.rs +++ /dev/null @@ -1,833 +0,0 @@ -use rpki::bundle::{ - BundleManifest, BundleManifestEntry, RirBundleMetadata, build_vap_compare_rows, - build_vrp_compare_rows, decode_ccr_compare_views, write_vap_csv, write_vrp_csv, -}; -use rpki::ccr::{build_ccr_from_run, decode_content_info, verify_content_info, write_ccr_file}; -use rpki::policy::Policy; -use rpki::storage::RocksStore; -use rpki::validation::run_tree_from_tal::{ - run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit, - run_tree_from_tal_and_ta_der_payload_replay_serial_audit, -}; -use rpki::validation::tree::TreeRunConfig; -use sha2::Digest; -use std::fs; -use std::path::{Path, PathBuf}; -use std::time::Instant; -use time::format_description::well_known::Rfc3339; - -#[derive(Debug, Default, PartialEq, Eq)] -struct Args { - rir: Option, - out_dir: Option, - tal_path: Option, - ta_path: Option, - payload_replay_archive: Option, - payload_replay_locks: Option, - payload_delta_archive: Option, - payload_delta_locks: Option, - validation_time: Option, - max_depth: Option, - max_instances: Option, - trust_anchor: Option, -} - -fn usage() -> &'static str { - "Usage: replay_bundle_record --rir --out-dir --tal-path --ta-path --payload-replay-archive --payload-replay-locks [--payload-delta-archive --payload-delta-locks ] [--validation-time ] [--max-depth ] [--max-instances ] [--trust-anchor ]" -} - -fn parse_args(argv: &[String]) -> Result { - let mut args = Args::default(); - let mut i = 1usize; - while i < argv.len() { - match argv[i].as_str() { - "--help" | "-h" => return Err(usage().to_string()), - "--rir" => { - i += 1; - args.rir = Some(argv.get(i).ok_or("--rir requires a value")?.clone()); - } - "--out-dir" => { - i += 1; - args.out_dir = Some(PathBuf::from( - argv.get(i).ok_or("--out-dir requires a value")?, - )); - } - "--tal-path" => { - i += 1; - args.tal_path = Some(PathBuf::from( - argv.get(i).ok_or("--tal-path requires a value")?, - )); - } - "--ta-path" => { - i += 1; - args.ta_path = Some(PathBuf::from( - argv.get(i).ok_or("--ta-path requires a value")?, - )); - } - "--payload-replay-archive" => { - i += 1; - args.payload_replay_archive = Some(PathBuf::from( - argv.get(i) - .ok_or("--payload-replay-archive requires a value")?, - )); - } - "--payload-replay-locks" => { - i += 1; - args.payload_replay_locks = Some(PathBuf::from( - argv.get(i) - .ok_or("--payload-replay-locks requires a value")?, - )); - } - "--payload-delta-archive" => { - i += 1; - args.payload_delta_archive = Some(PathBuf::from( - argv.get(i) - .ok_or("--payload-delta-archive requires a value")?, - )); - } - "--payload-delta-locks" => { - i += 1; - args.payload_delta_locks = Some(PathBuf::from( - argv.get(i) - .ok_or("--payload-delta-locks requires a value")?, - )); - } - "--validation-time" => { - i += 1; - let value = argv.get(i).ok_or("--validation-time requires a value")?; - args.validation_time = Some( - time::OffsetDateTime::parse(value, &Rfc3339) - .map_err(|e| format!("invalid --validation-time: {e}"))?, - ); - } - "--max-depth" => { - i += 1; - args.max_depth = Some( - argv.get(i) - .ok_or("--max-depth requires a value")? - .parse() - .map_err(|e| format!("invalid --max-depth: {e}"))?, - ); - } - "--max-instances" => { - i += 1; - args.max_instances = Some( - argv.get(i) - .ok_or("--max-instances requires a value")? - .parse() - .map_err(|e| format!("invalid --max-instances: {e}"))?, - ); - } - "--trust-anchor" => { - i += 1; - args.trust_anchor = Some( - argv.get(i) - .ok_or("--trust-anchor requires a value")? - .clone(), - ); - } - other => return Err(format!("unknown argument: {other}\n{}", usage())), - } - i += 1; - } - - if args.rir.is_none() { - return Err(format!("--rir is required\n{}", usage())); - } - if args.out_dir.is_none() { - return Err(format!("--out-dir is required\n{}", usage())); - } - if args.tal_path.is_none() { - return Err(format!("--tal-path is required\n{}", usage())); - } - if args.ta_path.is_none() { - return Err(format!("--ta-path is required\n{}", usage())); - } - if args.payload_replay_archive.is_none() { - return Err(format!("--payload-replay-archive is required\n{}", usage())); - } - if args.payload_replay_locks.is_none() { - return Err(format!("--payload-replay-locks is required\n{}", usage())); - } - Ok(args) -} - -fn load_validation_time(path: &Path) -> Result { - let json: serde_json::Value = serde_json::from_slice( - &fs::read(path).map_err(|e| format!("read locks failed: {}: {e}", path.display()))?, - ) - .map_err(|e| format!("parse locks failed: {}: {e}", path.display()))?; - let value = json - .get("validationTime") - .or_else(|| json.get("validation_time")) - .and_then(|v| v.as_str()) - .ok_or_else(|| format!("validationTime missing in {}", path.display()))?; - time::OffsetDateTime::parse(value, &Rfc3339) - .map_err(|e| format!("invalid validationTime in {}: {e}", path.display())) -} - -fn sha256_hex(bytes: &[u8]) -> String { - hex::encode(sha2::Sha256::digest(bytes)) -} - -fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> { - fs::create_dir_all(dst) - .map_err(|e| format!("create directory failed: {}: {e}", dst.display()))?; - for entry in - fs::read_dir(src).map_err(|e| format!("read_dir failed: {}: {e}", src.display()))? - { - let entry = entry.map_err(|e| format!("read_dir entry failed: {}: {e}", src.display()))?; - let ty = entry - .file_type() - .map_err(|e| format!("file_type failed: {}: {e}", entry.path().display()))?; - let to = dst.join(entry.file_name()); - if ty.is_dir() { - copy_dir_all(&entry.path(), &to)?; - } else if ty.is_file() { - if let Some(parent) = to.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?; - } - fs::copy(entry.path(), &to).map_err(|e| { - format!( - "copy failed: {} -> {}: {e}", - entry.path().display(), - to.display() - ) - })?; - } - } - Ok(()) -} - -fn write_json(path: &Path, value: &impl serde::Serialize) -> Result<(), String> { - if let Some(parent) = path.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?; - } - let bytes = serde_json::to_vec_pretty(value).map_err(|e| e.to_string())?; - fs::write(path, bytes).map_err(|e| format!("write json failed: {}: {e}", path.display())) -} - -fn write_top_readme(path: &Path, rir: &str) -> Result<(), String> { - fs::write( - path, - format!( - "# Ours Replay Bundle\n\nThis run contains one per-RIR bundle generated by `ours`.\n\n- RIR: `{rir}`\n- Reference result format: `CCR`\n" - ), - ) - .map_err(|e| format!("write readme failed: {}: {e}", path.display())) -} - -fn write_rir_readme(path: &Path, rir: &str, base_validation_time: &str) -> Result<(), String> { - fs::write( - path, - format!( - "# {rir} replay bundle\n\n- `tal.tal` and `ta.cer` are the direct replay inputs.\n- `base-locks.json.validationTime` = `{base_validation_time}`.\n- `base.ccr` is the authoritative reference result.\n- `base-vrps.csv` and `base-vaps.csv` are compare views derived from `base.ccr`.\n" - ), - ) - .map_err(|e| format!("write rir readme failed: {}: {e}", path.display())) -} - -fn write_timing_json( - path: &Path, - mode: &str, - validation_time: &time::OffsetDateTime, - duration: std::time::Duration, -) -> Result<(), String> { - write_json( - path, - &serde_json::json!({ - "mode": mode, - "validationTime": validation_time - .format(&Rfc3339) - .map_err(|e| format!("format validation time failed: {e}"))?, - "durationSeconds": duration.as_secs_f64(), - }), - ) -} - -fn rewrite_delta_base_locks_sha( - delta_root: &Path, - emitted_base_locks_sha256: &str, -) -> Result<(), String> { - let delta_locks = delta_root.join("locks-delta.json"); - if delta_locks.is_file() { - let mut json: serde_json::Value = serde_json::from_slice( - &fs::read(&delta_locks) - .map_err(|e| format!("read delta locks failed: {}: {e}", delta_locks.display()))?, - ) - .map_err(|e| format!("parse delta locks failed: {}: {e}", delta_locks.display()))?; - json.as_object_mut() - .ok_or_else(|| format!("delta locks must be object: {}", delta_locks.display()))? - .insert( - "baseLocksSha256".to_string(), - serde_json::Value::String(emitted_base_locks_sha256.to_string()), - ); - write_json(&delta_locks, &json)?; - } - - let archive_root = delta_root.join("payload-delta-archive"); - if archive_root.is_dir() { - for path in walk_json_files_named(&archive_root, "base.json")? { - let mut json: serde_json::Value = serde_json::from_slice( - &fs::read(&path) - .map_err(|e| format!("read base.json failed: {}: {e}", path.display()))?, - ) - .map_err(|e| format!("parse base.json failed: {}: {e}", path.display()))?; - json.as_object_mut() - .ok_or_else(|| format!("base.json must be object: {}", path.display()))? - .insert( - "baseLocksSha256".to_string(), - serde_json::Value::String(emitted_base_locks_sha256.to_string()), - ); - write_json(&path, &json)?; - } - } - Ok(()) -} - -fn walk_json_files_named(root: &Path, name: &str) -> Result, String> { - let mut out = Vec::new(); - if !root.is_dir() { - return Ok(out); - } - let mut stack = vec![root.to_path_buf()]; - while let Some(dir) = stack.pop() { - for entry in - fs::read_dir(&dir).map_err(|e| format!("read_dir failed: {}: {e}", dir.display()))? - { - let entry = - entry.map_err(|e| format!("read_dir entry failed: {}: {e}", dir.display()))?; - let path = entry.path(); - let ty = entry - .file_type() - .map_err(|e| format!("file_type failed: {}: {e}", path.display()))?; - if ty.is_dir() { - stack.push(path); - } else if ty.is_file() && entry.file_name() == name { - out.push(path); - } - } - } - Ok(out) -} - -fn run(args: Args) -> Result { - let rir = args.rir.as_ref().unwrap(); - let rir_normalized = rir.to_ascii_lowercase(); - let out_root = args.out_dir.as_ref().unwrap(); - let tal_path = args.tal_path.as_ref().unwrap(); - let ta_path = args.ta_path.as_ref().unwrap(); - let replay_archive = args.payload_replay_archive.as_ref().unwrap(); - let replay_locks = args.payload_replay_locks.as_ref().unwrap(); - let trust_anchor = args - .trust_anchor - .clone() - .unwrap_or_else(|| rir_normalized.clone()); - - let base_validation_time = match args.validation_time { - Some(value) => value, - None => load_validation_time(replay_locks)?, - }; - let delta_validation_time = match args.payload_delta_locks.as_ref() { - Some(path) => Some(load_validation_time(path)?), - None => None, - }; - - let run_root = out_root; - let rir_dir = run_root.join(&rir_normalized); - fs::create_dir_all(&rir_dir) - .map_err(|e| format!("create rir dir failed: {}: {e}", rir_dir.display()))?; - - let tal_bytes = - fs::read(tal_path).map_err(|e| format!("read tal failed: {}: {e}", tal_path.display()))?; - let ta_bytes = - fs::read(ta_path).map_err(|e| format!("read ta failed: {}: {e}", ta_path.display()))?; - - let db_dir = run_root.join(".tmp").join(format!("{rir}-base-db")); - if db_dir.exists() { - fs::remove_dir_all(&db_dir) - .map_err(|e| format!("remove old db failed: {}: {e}", db_dir.display()))?; - } - if let Some(parent) = db_dir.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create db parent failed: {}: {e}", parent.display()))?; - } - let store = RocksStore::open(&db_dir).map_err(|e| format!("open rocksdb failed: {e}"))?; - - let base_started = Instant::now(); - let out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit( - &store, - &Policy::default(), - &tal_bytes, - &ta_bytes, - None, - replay_archive, - replay_locks, - base_validation_time, - &TreeRunConfig { - max_depth: args.max_depth, - max_instances: args.max_instances, - compact_audit: false, - persist_vcir: true, - build_ccr_accumulator: true, - }, - ) - .map_err(|e| format!("base replay failed: {e}"))?; - let base_duration = base_started.elapsed(); - - let ccr = build_ccr_from_run( - &store, - &[out.discovery.trust_anchor.clone()], - &out.tree.vrps, - &out.tree.aspas, - &out.tree.router_keys, - base_validation_time, - ) - .map_err(|e| format!("build ccr failed: {e}"))?; - - let base_ccr_path = rir_dir.join("base.ccr"); - write_ccr_file(&base_ccr_path, &ccr).map_err(|e| format!("write ccr failed: {e}"))?; - let ccr_bytes = fs::read(&base_ccr_path) - .map_err(|e| format!("read written ccr failed: {}: {e}", base_ccr_path.display()))?; - let decoded = - decode_content_info(&ccr_bytes).map_err(|e| format!("decode written ccr failed: {e}"))?; - let verify = verify_content_info(&decoded).map_err(|e| format!("verify ccr failed: {e}"))?; - - let vrp_rows = build_vrp_compare_rows(&out.tree.vrps, &trust_anchor); - let vap_rows = build_vap_compare_rows(&out.tree.aspas, &trust_anchor); - let (ccr_vrps, ccr_vaps) = decode_ccr_compare_views(&decoded, &trust_anchor)?; - if vrp_rows != ccr_vrps { - return Err("base-vrps compare view does not match base.ccr".to_string()); - } - if vap_rows != ccr_vaps { - return Err("base-vaps compare view does not match base.ccr".to_string()); - } - - let base_vrps_csv = rir_dir.join("base-vrps.csv"); - let base_vaps_csv = rir_dir.join("base-vaps.csv"); - write_vrp_csv(&base_vrps_csv, &vrp_rows)?; - write_vap_csv(&base_vaps_csv, &vap_rows)?; - - copy_dir_all(replay_archive, &rir_dir.join("base-payload-archive"))?; - let mut base_locks_json: serde_json::Value = serde_json::from_slice( - &fs::read(replay_locks) - .map_err(|e| format!("read base locks failed: {}: {e}", replay_locks.display()))?, - ) - .map_err(|e| format!("parse base locks failed: {}: {e}", replay_locks.display()))?; - base_locks_json["validationTime"] = serde_json::Value::String( - base_validation_time - .format(&Rfc3339) - .map_err(|e| format!("format validation time failed: {e}"))?, - ); - let emitted_base_locks_path = rir_dir.join("base-locks.json"); - write_json(&emitted_base_locks_path, &base_locks_json)?; - let emitted_base_locks_sha256 = - sha256_hex(&fs::read(&emitted_base_locks_path).map_err(|e| { - format!( - "read emitted base locks failed: {}: {e}", - emitted_base_locks_path.display() - ) - })?); - - if let Some(delta_archive) = args.payload_delta_archive.as_ref() { - copy_dir_all(delta_archive, &rir_dir.join("payload-delta-archive"))?; - } - if let Some(delta_locks) = args.payload_delta_locks.as_ref() { - let mut delta_json: serde_json::Value = serde_json::from_slice( - &fs::read(delta_locks) - .map_err(|e| format!("read delta locks failed: {}: {e}", delta_locks.display()))?, - ) - .map_err(|e| format!("parse delta locks failed: {}: {e}", delta_locks.display()))?; - if let Some(delta_time) = delta_validation_time.as_ref() { - delta_json - .as_object_mut() - .ok_or_else(|| "delta locks json must be an object".to_string())? - .insert( - "validationTime".to_string(), - serde_json::Value::String( - delta_time - .format(&Rfc3339) - .map_err(|e| format!("format delta validation time failed: {e}"))?, - ), - ); - } - write_json(&rir_dir.join("locks-delta.json"), &delta_json)?; - } - if args.payload_delta_archive.is_some() && args.payload_delta_locks.is_some() { - rewrite_delta_base_locks_sha(&rir_dir, &emitted_base_locks_sha256)?; - } - - fs::write(rir_dir.join("tal.tal"), &tal_bytes).map_err(|e| format!("write tal failed: {e}"))?; - fs::write(rir_dir.join("ta.cer"), &ta_bytes).map_err(|e| format!("write ta failed: {e}"))?; - - let mut metadata = RirBundleMetadata { - schema_version: "20260330-v1".to_string(), - bundle_producer: "ours".to_string(), - rir: rir_normalized.clone(), - base_validation_time: base_validation_time - .format(&Rfc3339) - .map_err(|e| format!("format validation time failed: {e}"))?, - delta_validation_time: delta_validation_time.as_ref().map(|value| { - value - .format(&Rfc3339) - .expect("delta validation time must format") - }), - tal_sha256: sha256_hex(&tal_bytes), - ta_cert_sha256: sha256_hex(&ta_bytes), - base_ccr_sha256: sha256_hex(&ccr_bytes), - delta_ccr_sha256: None, - has_aspa: !vap_rows.is_empty(), - has_router_key: verify.router_key_count > 0, - base_vrp_count: vrp_rows.len(), - base_vap_count: vap_rows.len(), - delta_vrp_count: None, - delta_vap_count: None, - }; - - fs::create_dir_all(rir_dir.join("timings")) - .map_err(|e| format!("create timings dir failed: {e}"))?; - write_timing_json( - &rir_dir.join("timings").join("base-produce.json"), - "base", - &base_validation_time, - base_duration, - )?; - - let mut verification = serde_json::json!({ - "base": { - "validationTime": metadata.base_validation_time, - "ccr": { - "path": "base.ccr", - "sha256": metadata.base_ccr_sha256, - "stateHashesOk": verify.state_hashes_ok, - "manifestInstances": verify.manifest_instances, - "roaVrpCount": verify.roa_vrp_count, - "aspaPayloadSets": verify.aspa_payload_sets, - "routerKeyCount": verify.router_key_count, - }, - "compareViews": { - "vrpsSelfMatch": true, - "vapsSelfMatch": true, - "baseVrpCount": metadata.base_vrp_count, - "baseVapCount": metadata.base_vap_count, - } - } - }); - - if let (Some(delta_archive), Some(delta_locks), Some(delta_time)) = ( - args.payload_delta_archive.as_ref(), - args.payload_delta_locks.as_ref(), - delta_validation_time.as_ref(), - ) { - let delta_db_dir = run_root.join(".tmp").join(format!("{rir}-delta-db")); - if delta_db_dir.exists() { - fs::remove_dir_all(&delta_db_dir).map_err(|e| { - format!( - "remove old delta db failed: {}: {e}", - delta_db_dir.display() - ) - })?; - } - if let Some(parent) = delta_db_dir.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create delta db parent failed: {}: {e}", parent.display()))?; - } - let delta_store = RocksStore::open(&delta_db_dir) - .map_err(|e| format!("open delta rocksdb failed: {e}"))?; - let delta_started = Instant::now(); - let delta_out = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit( - &delta_store, - &Policy::default(), - &tal_bytes, - &ta_bytes, - None, - replay_archive, - replay_locks, - delta_archive, - delta_locks, - base_validation_time, - *delta_time, - &TreeRunConfig { - max_depth: args.max_depth, - max_instances: args.max_instances, - compact_audit: false, - persist_vcir: true, - build_ccr_accumulator: true, - }, - ) - .map_err(|e| format!("delta replay failed: {e}"))?; - let delta_duration = delta_started.elapsed(); - - let delta_ccr = build_ccr_from_run( - &delta_store, - &[delta_out.discovery.trust_anchor.clone()], - &delta_out.tree.vrps, - &delta_out.tree.aspas, - &delta_out.tree.router_keys, - *delta_time, - ) - .map_err(|e| format!("build delta ccr failed: {e}"))?; - - let delta_ccr_path = rir_dir.join("delta.ccr"); - write_ccr_file(&delta_ccr_path, &delta_ccr) - .map_err(|e| format!("write delta ccr failed: {e}"))?; - let delta_ccr_bytes = fs::read(&delta_ccr_path).map_err(|e| { - format!( - "read written delta ccr failed: {}: {e}", - delta_ccr_path.display() - ) - })?; - let delta_decoded = decode_content_info(&delta_ccr_bytes) - .map_err(|e| format!("decode written delta ccr failed: {e}"))?; - let delta_verify = verify_content_info(&delta_decoded) - .map_err(|e| format!("verify delta ccr failed: {e}"))?; - - let delta_vrp_rows = build_vrp_compare_rows(&delta_out.tree.vrps, &trust_anchor); - let delta_vap_rows = build_vap_compare_rows(&delta_out.tree.aspas, &trust_anchor); - let (delta_ccr_vrps, delta_ccr_vaps) = - decode_ccr_compare_views(&delta_decoded, &trust_anchor)?; - if delta_vrp_rows != delta_ccr_vrps { - return Err("record-delta.csv compare view does not match delta.ccr".to_string()); - } - if delta_vap_rows != delta_ccr_vaps { - return Err("record-delta-vaps.csv compare view does not match delta.ccr".to_string()); - } - write_vrp_csv(&rir_dir.join("record-delta.csv"), &delta_vrp_rows)?; - write_vap_csv(&rir_dir.join("record-delta-vaps.csv"), &delta_vap_rows)?; - write_timing_json( - &rir_dir.join("timings").join("delta-produce.json"), - "delta", - delta_time, - delta_duration, - )?; - - metadata.delta_ccr_sha256 = Some(sha256_hex(&delta_ccr_bytes)); - metadata.delta_vrp_count = Some(delta_vrp_rows.len()); - metadata.delta_vap_count = Some(delta_vap_rows.len()); - metadata.has_aspa = metadata.has_aspa || !delta_vap_rows.is_empty(); - metadata.has_router_key = metadata.has_router_key || delta_verify.router_key_count > 0; - - verification["delta"] = serde_json::json!({ - "validationTime": delta_time - .format(&Rfc3339) - .map_err(|e| format!("format delta validation time failed: {e}"))?, - "ccr": { - "path": "delta.ccr", - "sha256": metadata.delta_ccr_sha256.clone().expect("delta sha must exist"), - "stateHashesOk": delta_verify.state_hashes_ok, - "manifestInstances": delta_verify.manifest_instances, - "roaVrpCount": delta_verify.roa_vrp_count, - "aspaPayloadSets": delta_verify.aspa_payload_sets, - "routerKeyCount": delta_verify.router_key_count, - }, - "compareViews": { - "vrpsSelfMatch": true, - "vapsSelfMatch": true, - "deltaVrpCount": metadata.delta_vrp_count, - "deltaVapCount": metadata.delta_vap_count, - } - }); - - let _ = fs::remove_dir_all(&delta_db_dir); - } - - write_json(&rir_dir.join("bundle.json"), &metadata)?; - write_json(&rir_dir.join("verification.json"), &verification)?; - write_top_readme(&run_root.join("README.md"), rir)?; - write_rir_readme( - &rir_dir.join("README.md"), - rir, - &metadata.base_validation_time, - )?; - - let bundle_manifest = BundleManifest { - schema_version: "20260330-v1".to_string(), - bundle_producer: "ours".to_string(), - recorded_at_rfc3339_utc: time::OffsetDateTime::now_utc() - .format(&Rfc3339) - .map_err(|e| format!("format recorded_at failed: {e}"))?, - rirs: vec![rir_normalized.clone()], - per_rir_bundles: vec![BundleManifestEntry { - rir: rir_normalized.clone(), - relative_path: rir_normalized, - base_validation_time: metadata.base_validation_time.clone(), - delta_validation_time: metadata.delta_validation_time.clone(), - has_aspa: metadata.has_aspa, - }], - }; - write_json(&run_root.join("bundle-manifest.json"), &bundle_manifest)?; - - let _ = fs::remove_dir_all(&db_dir); - - Ok(run_root.clone()) -} - -fn main() -> Result<(), String> { - let args = parse_args(&std::env::args().collect::>())?; - let out = run(args)?; - println!("{}", out.display()); - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - use tempfile::tempdir; - - fn skip_heavy_blackbox_test() -> bool { - std::env::var_os("RPKI_SKIP_HEAVY_BLACKBOX_TESTS").is_some() - } - - #[test] - fn parse_args_requires_required_flags() { - let argv = vec![ - "replay_bundle_record".to_string(), - "--rir".to_string(), - "apnic".to_string(), - "--out-dir".to_string(), - "out".to_string(), - "--tal-path".to_string(), - "tal".to_string(), - "--ta-path".to_string(), - "ta".to_string(), - "--payload-replay-archive".to_string(), - "archive".to_string(), - "--payload-replay-locks".to_string(), - "locks.json".to_string(), - ]; - let args = parse_args(&argv).expect("parse"); - assert_eq!(args.rir.as_deref(), Some("apnic")); - assert_eq!(args.out_dir.as_deref(), Some(Path::new("out"))); - } - - #[test] - fn parse_args_rejects_missing_requireds() { - let argv = vec!["replay_bundle_record".to_string()]; - let err = parse_args(&argv).unwrap_err(); - assert!(err.contains("--rir is required"), "{err}"); - } - - #[test] - fn load_validation_time_reads_top_level_validation_time() { - let dir = tempdir().expect("tempdir"); - let path = dir.path().join("locks.json"); - std::fs::write(&path, r#"{"validationTime":"2026-03-16T11:49:15+08:00"}"#) - .expect("write locks"); - let got = load_validation_time(&path).expect("load validation time"); - assert_eq!( - got.format(&Rfc3339).expect("format"), - "2026-03-16T11:49:15+08:00" - ); - } - - #[test] - fn copy_dir_all_copies_nested_tree() { - let dir = tempdir().expect("tempdir"); - let src = dir.path().join("src"); - let dst = dir.path().join("dst"); - std::fs::create_dir_all(src.join("sub")).expect("mkdir"); - std::fs::write(src.join("a.txt"), b"a").expect("write a"); - std::fs::write(src.join("sub").join("b.txt"), b"b").expect("write b"); - copy_dir_all(&src, &dst).expect("copy dir"); - assert_eq!(std::fs::read(dst.join("a.txt")).expect("read a"), b"a"); - assert_eq!( - std::fs::read(dst.join("sub").join("b.txt")).expect("read b"), - b"b" - ); - } - - #[test] - fn run_base_bundle_record_smoke_root_only_apnic() { - if skip_heavy_blackbox_test() { - return; - } - let tal_path = PathBuf::from("tests/fixtures/tal/apnic-rfc7730-https.tal"); - let ta_path = PathBuf::from("tests/fixtures/ta/apnic-ta.cer"); - let replay_archive = PathBuf::from( - "/home/yuyr/dev/rust_playground/routinator/bench/multi_rir_demo/runs/20260316-112341-multi-final3/apnic/base-payload-archive", - ); - let replay_locks = PathBuf::from( - "/home/yuyr/dev/rust_playground/routinator/bench/multi_rir_demo/runs/20260316-112341-multi-final3/apnic/base-locks.json", - ); - let delta_archive = PathBuf::from( - "/home/yuyr/dev/rust_playground/routinator/bench/multi_rir_demo/runs/20260316-112341-multi-final3/apnic/payload-delta-archive", - ); - let delta_locks = PathBuf::from( - "/home/yuyr/dev/rust_playground/routinator/bench/multi_rir_demo/runs/20260316-112341-multi-final3/apnic/locks-delta.json", - ); - let required = [ - tal_path.as_path(), - ta_path.as_path(), - replay_archive.as_path(), - replay_locks.as_path(), - delta_archive.as_path(), - delta_locks.as_path(), - ]; - if let Some(missing) = required.iter().find(|path| !path.exists()) { - eprintln!( - "skipping replay_bundle_record smoke test; fixture missing: {}", - missing.display() - ); - return; - } - - let dir = tempdir().expect("tempdir"); - let out_dir = dir.path().join("bundle"); - let out = run(Args { - rir: Some("apnic".to_string()), - out_dir: Some(out_dir.clone()), - tal_path: Some(tal_path), - ta_path: Some(ta_path), - payload_replay_archive: Some(replay_archive), - payload_replay_locks: Some(replay_locks), - payload_delta_archive: Some(delta_archive), - payload_delta_locks: Some(delta_locks), - validation_time: None, - max_depth: Some(0), - max_instances: Some(1), - trust_anchor: Some("apnic".to_string()), - }) - .expect("run bundle record"); - assert_eq!(out, out_dir); - assert!(out_dir.join("bundle-manifest.json").is_file()); - assert!(out_dir.join("README.md").is_file()); - assert!(out_dir.join("apnic").join("bundle.json").is_file()); - assert!(out_dir.join("apnic").join("tal.tal").is_file()); - assert!(out_dir.join("apnic").join("ta.cer").is_file()); - assert!(out_dir.join("apnic").join("base-payload-archive").is_dir()); - assert!(out_dir.join("apnic").join("base-locks.json").is_file()); - assert!(out_dir.join("apnic").join("base.ccr").is_file()); - assert!(out_dir.join("apnic").join("base-vrps.csv").is_file()); - assert!(out_dir.join("apnic").join("base-vaps.csv").is_file()); - assert!(out_dir.join("apnic").join("delta.ccr").is_file()); - assert!(out_dir.join("apnic").join("record-delta.csv").is_file()); - assert!( - out_dir - .join("apnic") - .join("record-delta-vaps.csv") - .is_file() - ); - assert!(out_dir.join("apnic").join("verification.json").is_file()); - let bundle_json: serde_json::Value = serde_json::from_slice( - &std::fs::read(out_dir.join("apnic").join("bundle.json")).expect("read bundle.json"), - ) - .expect("parse bundle.json"); - assert_eq!(bundle_json["bundleProducer"], "ours"); - assert_eq!(bundle_json["rir"], "apnic"); - assert!(bundle_json.get("baseVrpCount").is_some()); - assert!(bundle_json.get("baseCcrSha256").is_some()); - assert!(bundle_json.get("deltaVrpCount").is_some()); - assert!(bundle_json.get("deltaCcrSha256").is_some()); - let base_locks_bytes = std::fs::read(out_dir.join("apnic").join("base-locks.json")) - .expect("read emitted base locks"); - let expected_base_locks_sha = sha256_hex(&base_locks_bytes); - let delta_locks_json: serde_json::Value = serde_json::from_slice( - &std::fs::read(out_dir.join("apnic").join("locks-delta.json")) - .expect("read delta locks"), - ) - .expect("parse delta locks"); - assert_eq!(delta_locks_json["baseLocksSha256"], expected_base_locks_sha); - } -} diff --git a/src/bin/replay_bundle_refresh_sequence_outputs.rs b/src/bin/replay_bundle_refresh_sequence_outputs.rs deleted file mode 100644 index ff7ff85..0000000 --- a/src/bin/replay_bundle_refresh_sequence_outputs.rs +++ /dev/null @@ -1,1060 +0,0 @@ -use std::collections::{BTreeMap, BTreeSet}; -use std::fs; -use std::path::{Path, PathBuf}; - -use rpki::bundle::{ - build_vap_compare_rows, build_vrp_compare_rows, sha256_hex, write_json, write_vap_csv, - write_vrp_csv, -}; -use rpki::ccr::{build_ccr_from_run, decode_content_info, verify_content_info, write_ccr_file}; -use rpki::policy::Policy; -use rpki::replay::archive::canonical_rsync_module; -use rpki::storage::RocksStore; -use rpki::validation::run_tree_from_tal::{ - run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit, - run_tree_from_tal_and_ta_der_payload_replay_serial_audit, -}; -use rpki::validation::tree::TreeRunConfig; -use serde::{Deserialize, Serialize}; -use time::format_description::well_known::Rfc3339; - -fn usage() -> &'static str { - "Usage: replay_bundle_refresh_sequence_outputs --rir-dir [--keep-db]" -} - -#[derive(Default)] -struct Args { - rir_dir: Option, - keep_db: bool, -} - -fn parse_args() -> Result { - let mut args = Args::default(); - let argv: Vec = std::env::args().skip(1).collect(); - let mut i = 0; - while i < argv.len() { - match argv[i].as_str() { - "--rir-dir" => { - i += 1; - args.rir_dir = Some(PathBuf::from( - argv.get(i).ok_or("--rir-dir requires a value")?, - )); - } - "--keep-db" => { - args.keep_db = true; - } - "--help" | "-h" => { - return Err(usage().to_string()); - } - other => return Err(format!("unknown argument: {other}\n{}", usage())), - } - i += 1; - } - if args.rir_dir.is_none() { - return Err(format!("--rir-dir is required\n{}", usage())); - } - Ok(args) -} - -#[derive(Debug, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -struct RirBundleMetadataV2Serde { - schema_version: String, - bundle_producer: String, - rir: String, - tal_sha256: String, - ta_cert_sha256: String, - has_any_aspa: bool, - has_any_router_key: bool, - base: BaseBundleStateMetadataV2Serde, - delta_sequence: DeltaSequenceMetadataV2Serde, -} - -#[derive(Debug, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -struct BaseBundleStateMetadataV2Serde { - validation_time: String, - ccr_sha256: String, - vrp_count: usize, - vap_count: usize, - relative_archive_path: String, - relative_locks_path: String, - relative_ccr_path: String, - relative_vrps_path: String, - relative_vaps_path: String, -} - -#[derive(Debug, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -struct DeltaSequenceMetadataV2Serde { - configured_delta_count: usize, - configured_interval_seconds: u64, - steps: Vec, -} - -#[derive(Debug, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -struct DeltaStepMetadataV2Serde { - index: usize, - id: String, - relative_path: String, - base_ref: String, - validation_time: String, - delta_ccr_sha256: String, - vrp_count: usize, - vap_count: usize, - relative_archive_path: String, - relative_transition_locks_path: String, - relative_target_locks_path: String, - relative_ccr_path: String, - relative_vrps_path: String, - relative_vaps_path: String, - has_aspa: bool, - has_router_key: bool, -} - -#[derive(Debug, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -struct VerificationV2 { - base: serde_json::Value, - steps: Vec, - summary: serde_json::Value, -} - -fn parse_time(value: &str) -> Result { - time::OffsetDateTime::parse(value, &Rfc3339) - .map_err(|e| format!("invalid RFC3339 time `{value}`: {e}")) -} - -fn path_join(root: &Path, relative: &str) -> PathBuf { - root.join(relative) -} - -fn is_failed_fetch_source(source: &str) -> bool { - source == "failed_fetch_no_cache" -} - -fn current_module_objects_from_store( - store: &RocksStore, - module_uri: &str, -) -> Result>, String> { - let entries = store - .list_repository_view_entries_with_prefix(module_uri) - .map_err(|e| format!("list repository view failed for {module_uri}: {e}"))?; - let mut out = BTreeMap::new(); - for entry in entries { - if entry.state != rpki::storage::RepositoryViewState::Present { - continue; - } - let bytes = store - .load_current_object_bytes_by_uri(&entry.rsync_uri) - .map_err(|e| format!("load current object failed for {}: {e}", entry.rsync_uri))? - .ok_or_else(|| format!("current object missing for {}", entry.rsync_uri))?; - out.insert(entry.rsync_uri, bytes); - } - Ok(out) -} - -fn rsync_bucket_dir(capture_root: &Path, module_uri: &str) -> PathBuf { - capture_root - .join("rsync") - .join("modules") - .join(sha256_hex(module_uri.as_bytes())) -} - -fn materialize_rsync_module_from_store( - capture_root: &Path, - module_uri: &str, - objects: &BTreeMap>, -) -> Result, String> { - let bucket_dir = rsync_bucket_dir(capture_root, module_uri); - let tree_root = bucket_dir.join("tree"); - if tree_root.exists() { - fs::remove_dir_all(&tree_root) - .map_err(|e| format!("remove old rsync tree failed: {}: {e}", tree_root.display()))?; - } - let relative_root = module_uri - .strip_prefix("rsync://") - .ok_or_else(|| format!("invalid rsync module uri: {module_uri}"))? - .trim_end_matches('/'); - fs::create_dir_all(tree_root.join(relative_root)).map_err(|e| { - format!( - "create rsync tree root failed: {}: {e}", - tree_root.join(relative_root).display() - ) - })?; - for (uri, bytes) in objects { - let rel = uri - .strip_prefix(module_uri) - .ok_or_else(|| format!("object uri {uri} does not belong to module {module_uri}"))?; - let path = tree_root.join(relative_root).join(rel); - if let Some(parent) = path.parent() { - fs::create_dir_all(parent).map_err(|e| { - format!( - "create rsync object parent failed: {}: {e}", - parent.display() - ) - })?; - } - fs::write(&path, bytes) - .map_err(|e| format!("write rsync object failed: {}: {e}", path.display()))?; - } - Ok(objects.keys().cloned().collect()) -} - -fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> { - fs::create_dir_all(dst) - .map_err(|e| format!("create directory failed: {}: {e}", dst.display()))?; - for entry in - fs::read_dir(src).map_err(|e| format!("read directory failed: {}: {e}", src.display()))? - { - let entry = entry.map_err(|e| format!("read entry failed: {}: {e}", src.display()))?; - let file_type = entry - .file_type() - .map_err(|e| format!("read file type failed: {}: {e}", entry.path().display()))?; - let target = dst.join(entry.file_name()); - if file_type.is_dir() { - copy_dir_all(&entry.path(), &target)?; - } else if file_type.is_file() { - fs::copy(entry.path(), &target).map_err(|e| { - format!( - "copy file failed: {} -> {}: {e}", - entry.path().display(), - target.display() - ) - })?; - } - } - Ok(()) -} - -fn load_json(path: &Path) -> Result { - serde_json::from_slice( - &fs::read(path).map_err(|e| format!("read json failed: {}: {e}", path.display()))?, - ) - .map_err(|e| format!("parse json failed: {}: {e}", path.display())) -} - -fn write_json_value(path: &Path, value: &serde_json::Value) -> Result<(), String> { - if let Some(parent) = path.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create json parent failed: {}: {e}", parent.display()))?; - } - fs::write( - path, - serde_json::to_vec_pretty(value).map_err(|e| format!("serialize json failed: {e}"))?, - ) - .map_err(|e| format!("write json failed: {}: {e}", path.display())) -} - -fn base_capture_root_from_locks(archive_root: &Path, locks_path: &Path) -> Result { - let value = load_json(locks_path)?; - let capture = value - .get("capture") - .and_then(|v| v.as_str()) - .ok_or_else(|| format!("missing capture in {}", locks_path.display()))?; - Ok(archive_root.join("v1").join("captures").join(capture)) -} - -fn keep_rsync_module(pp: &rpki::audit::PublicationPointAudit) -> Result, String> { - if is_failed_fetch_source(&pp.source) { - return Ok(None); - } - let module_uri = canonical_rsync_module(&pp.rsync_base_uri).map_err(|e| { - format!( - "canonicalize rsync module failed for {}: {e}", - pp.rsync_base_uri - ) - })?; - if pp.rrdp_notification_uri.is_none() || pp.repo_sync_source.as_deref() == Some("rsync") { - return Ok(Some(module_uri)); - } - Ok(None) -} - -fn repair_base_inputs( - archive_root: &Path, - locks_path: &Path, - publication_points: &[rpki::audit::PublicationPointAudit], - store: &RocksStore, - verification: &mut VerificationV2, -) -> Result<(), String> { - let capture_root = base_capture_root_from_locks(archive_root, locks_path)?; - let mut locks = load_json(locks_path)?; - - let candidate_modules: BTreeSet = publication_points - .iter() - .filter_map(|pp| keep_rsync_module(pp).transpose()) - .collect::, _>>()? - .into_iter() - .collect(); - - let old_modules: Vec = locks - .get("rsync") - .and_then(|v| v.as_object()) - .map(|m| m.keys().cloned().collect()) - .unwrap_or_default(); - - if let Some(rrdp_obj) = locks.get_mut("rrdp").and_then(|v| v.as_object_mut()) { - for pp in publication_points { - let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() else { - continue; - }; - let lock_value = match store - .get_rrdp_source_record(notify_uri) - .map_err(|e| format!("read rrdp source record failed for {notify_uri}: {e}"))? - { - Some(record) - if record.last_session_id.is_some() - && record.last_serial.is_some() - && record.last_snapshot_uri.is_some() - && record.last_snapshot_hash.is_some() => - { - serde_json::json!({ - "transport": "rrdp", - "session": record.last_session_id, - "serial": record.last_serial, - }) - } - _ => serde_json::json!({ - "transport": "rsync", - "session": null, - "serial": null, - }), - }; - rrdp_obj.insert(notify_uri.to_string(), lock_value); - } - let repos_dir = capture_root.join("rrdp").join("repos"); - if repos_dir.exists() { - for entry in fs::read_dir(&repos_dir) - .map_err(|e| format!("scan rrdp repo dir failed: {}: {e}", repos_dir.display()))? - { - let entry = entry.map_err(|e| { - format!("read rrdp repo entry failed: {}: {e}", repos_dir.display()) - })?; - let meta = entry.path().join("meta.json"); - if !meta.exists() { - continue; - } - let meta_value = load_json(&meta)?; - let notify_uri = match meta_value.get("rpkiNotify").and_then(|v| v.as_str()) { - Some(value) => value.to_string(), - None => continue, - }; - if rrdp_obj.contains_key(¬ify_uri) { - continue; - } - let lock_value = match store - .get_rrdp_source_record(¬ify_uri) - .map_err(|e| format!("read rrdp source record failed for {notify_uri}: {e}"))? - { - Some(record) - if record.last_session_id.is_some() - && record.last_serial.is_some() - && record.last_snapshot_uri.is_some() - && record.last_snapshot_hash.is_some() => - { - serde_json::json!({ - "transport": "rrdp", - "session": record.last_session_id, - "serial": record.last_serial, - }) - } - _ => serde_json::json!({ - "transport": "rsync", - "session": null, - "serial": null, - }), - }; - rrdp_obj.insert(notify_uri, lock_value); - } - } - } - - let mut final_modules = serde_json::Map::new(); - for module_uri in candidate_modules { - let objects = current_module_objects_from_store(store, &module_uri)?; - if objects.is_empty() { - continue; - } - let _files = materialize_rsync_module_from_store(&capture_root, &module_uri, &objects)?; - final_modules.insert( - module_uri, - serde_json::json!({ - "transport": "rsync" - }), - ); - } - - for module_uri in old_modules { - if !final_modules.contains_key(&module_uri) { - let bucket_dir = rsync_bucket_dir(&capture_root, &module_uri); - let _ = fs::remove_dir_all(bucket_dir); - } - } - - if let Some(rsync_value) = locks.get_mut("rsync") { - *rsync_value = serde_json::Value::Object(final_modules.clone()); - } - write_json_value(locks_path, &locks)?; - verification.base["capture"]["rrdpRepoCount"] = serde_json::Value::from( - locks - .get("rrdp") - .and_then(|v| v.as_object()) - .map(|m| m.len()) - .unwrap_or(0), - ); - verification.base["capture"]["rsyncModuleCount"] = serde_json::Value::from(final_modules.len()); - Ok(()) -} - -fn repair_target_locks( - locks_path: &Path, - previous_locks_path: &Path, - publication_points: &[rpki::audit::PublicationPointAudit], - store: &RocksStore, -) -> Result<(), String> { - let mut locks = load_json(locks_path)?; - let previous_locks = load_json(previous_locks_path)?; - - if let Some(rrdp_obj) = locks.get_mut("rrdp").and_then(|v| v.as_object_mut()) { - for pp in publication_points { - let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() else { - continue; - }; - let mut lock_value = match store - .get_rrdp_source_record(notify_uri) - .map_err(|e| format!("read rrdp source record failed for {notify_uri}: {e}"))? - { - Some(record) - if record.last_session_id.is_some() - && record.last_serial.is_some() - && record.last_snapshot_uri.is_some() - && record.last_snapshot_hash.is_some() => - { - serde_json::json!({ - "transport": "rrdp", - "session": record.last_session_id, - "serial": record.last_serial, - }) - } - _ => serde_json::json!({ - "transport": "rsync", - "session": null, - "serial": null, - }), - }; - let previous_transport = previous_locks - .get("rrdp") - .and_then(|v| v.get(notify_uri)) - .and_then(|v| v.get("transport")) - .and_then(|v| v.as_str()); - if previous_transport != Some("rrdp") { - lock_value = serde_json::json!({ - "transport": "rsync", - "session": null, - "serial": null, - }); - } - rrdp_obj.insert(notify_uri.to_string(), lock_value); - } - } - - let candidate_modules: BTreeSet = publication_points - .iter() - .filter_map(|pp| keep_rsync_module(pp).transpose()) - .collect::, _>>()? - .into_iter() - .collect(); - let mut final_modules = serde_json::Map::new(); - for module_uri in candidate_modules { - let objects = current_module_objects_from_store(store, &module_uri)?; - if objects.is_empty() { - continue; - } - final_modules.insert( - module_uri, - serde_json::json!({ - "transport": "rsync" - }), - ); - } - if let Some(rsync_value) = locks.get_mut("rsync") { - *rsync_value = serde_json::Value::Object(final_modules); - } - - write_json_value(locks_path, &locks) -} - -fn repair_delta_step_inputs( - step_dir: &Path, - base_archive_root: &Path, - base_locks_path: &Path, - previous_locks_path: &Path, - publication_points: &[rpki::audit::PublicationPointAudit], - store: &RocksStore, - step_verification: &mut serde_json::Value, -) -> Result<(), String> { - let locks_path = step_dir.join("locks-delta.json"); - let mut locks = load_json(&locks_path)?; - let previous_locks = load_json(previous_locks_path)?; - let capture = locks - .get("capture") - .and_then(|v| v.as_str()) - .ok_or_else(|| format!("missing capture in {}", locks_path.display()))? - .to_string(); - let capture_root = step_dir - .join("payload-delta-archive") - .join("v1") - .join("captures") - .join(capture); - - if let Some(rrdp_obj) = locks.get_mut("rrdp").and_then(|v| v.as_object_mut()) { - let repos_dir = capture_root.join("rrdp").join("repos"); - if repos_dir.exists() { - for entry in fs::read_dir(&repos_dir) - .map_err(|e| format!("scan rrdp repo dir failed: {}: {e}", repos_dir.display()))? - { - let entry = entry.map_err(|e| { - format!("read rrdp repo entry failed: {}: {e}", repos_dir.display()) - })?; - let meta = entry.path().join("meta.json"); - if !meta.exists() { - continue; - } - let meta_value = load_json(&meta)?; - let notify_uri = match meta_value.get("rpkiNotify").and_then(|v| v.as_str()) { - Some(value) => value.to_string(), - None => continue, - }; - if rrdp_obj.contains_key(¬ify_uri) { - continue; - } - let transition_path = entry.path().join("transition.json"); - let lock_value = if transition_path.exists() { - let transition = load_json(&transition_path)?; - serde_json::json!({ - "kind": transition.get("kind").cloned().unwrap_or(serde_json::Value::String("fallback-rsync".to_string())), - "base": transition.get("base").cloned().unwrap_or(serde_json::json!({ - "transport": "rsync", - "session": null, - "serial": null - })), - "target": transition.get("target").cloned().unwrap_or(serde_json::json!({ - "transport": "rsync", - "session": null, - "serial": null - })), - "delta_count": transition.get("delta_count").cloned().unwrap_or(serde_json::Value::from(0)), - "deltas": transition.get("deltas").cloned().unwrap_or(serde_json::Value::Array(vec![])), - }) - } else { - serde_json::json!({ - "kind": "fallback-rsync", - "base": {"transport":"rsync","session":null,"serial":null}, - "target": {"transport":"rsync","session":null,"serial":null}, - "delta_count": 0, - "deltas": [] - }) - }; - rrdp_obj.insert(notify_uri, lock_value); - } - } - for (notify_uri, entry) in rrdp_obj.iter_mut() { - let previous_transport = previous_locks - .get("rrdp") - .and_then(|v| v.get(notify_uri)) - .and_then(|v| v.get("transport")) - .and_then(|v| v.as_str()); - if previous_transport != Some("rrdp") { - let fallback = serde_json::json!({ - "kind": "fallback-rsync", - "base": {"transport":"rsync","session":null,"serial":null}, - "target": {"transport":"rsync","session":null,"serial":null}, - "delta_count": 0, - "deltas": [] - }); - *entry = fallback.clone(); - let bucket_dir = capture_root - .join("rrdp") - .join("repos") - .join(sha256_hex(notify_uri.as_bytes())); - if bucket_dir.exists() { - write_json(&bucket_dir.join("transition.json"), &fallback)?; - } - } - } - } - - let candidate_modules: BTreeSet = publication_points - .iter() - .filter_map(|pp| keep_rsync_module(pp).transpose()) - .collect::, _>>()? - .into_iter() - .collect(); - - let old_modules: Vec = locks - .get("rsync") - .and_then(|v| v.as_object()) - .map(|m| m.keys().cloned().collect()) - .unwrap_or_default(); - - let mut final_modules = serde_json::Map::new(); - for module_uri in candidate_modules { - let objects = current_module_objects_from_store(store, &module_uri)?; - if objects.is_empty() { - continue; - } - let files = materialize_rsync_module_from_store(&capture_root, &module_uri, &objects)?; - let bucket_dir = rsync_bucket_dir(&capture_root, &module_uri); - write_json( - &bucket_dir.join("files.json"), - &serde_json::json!({ - "version": 1, - "module": module_uri, - "fileCount": files.len(), - "files": files, - }), - )?; - final_modules.insert( - module_uri, - serde_json::json!({ - "file_count": objects.len(), - "overlay_only": true - }), - ); - } - - for module_uri in old_modules { - if !final_modules.contains_key(&module_uri) { - let bucket_dir = rsync_bucket_dir(&capture_root, &module_uri); - let _ = fs::remove_dir_all(bucket_dir); - } - } - - if let Some(rsync_value) = locks.get_mut("rsync") { - *rsync_value = serde_json::Value::Object(final_modules.clone()); - } - - let base_capture_root = base_capture_root_from_locks(base_archive_root, base_locks_path)?; - if let Some(rrdp_obj) = locks.get("rrdp").and_then(|v| v.as_object()) { - for (notify_uri, entry) in rrdp_obj { - let kind = entry.get("kind").and_then(|v| v.as_str()).unwrap_or(""); - if kind != "unchanged" { - continue; - } - let session = entry - .get("target") - .and_then(|v| v.get("session")) - .and_then(|v| v.as_str()) - .or_else(|| { - entry - .get("base") - .and_then(|v| v.get("session")) - .and_then(|v| v.as_str()) - }); - let Some(session) = session else { continue }; - let bucket_hash = sha256_hex(notify_uri.as_bytes()); - let bucket_dir = capture_root.join("rrdp").join("repos").join(&bucket_hash); - let session_dir = bucket_dir.join(session); - if session_dir.exists() { - continue; - } - let base_bucket_dir = base_capture_root - .join("rrdp") - .join("repos") - .join(&bucket_hash); - let base_session_dir = base_bucket_dir.join(session); - if !base_session_dir.exists() { - continue; - } - fs::create_dir_all(&bucket_dir).map_err(|e| { - format!( - "create delta rrdp repo dir failed: {}: {e}", - bucket_dir.display() - ) - })?; - let base_meta = base_bucket_dir.join("meta.json"); - if !bucket_dir.join("meta.json").exists() && base_meta.exists() { - fs::copy(&base_meta, bucket_dir.join("meta.json")).map_err(|e| { - format!( - "copy base repo meta failed: {} -> {}: {e}", - base_meta.display(), - bucket_dir.join("meta.json").display() - ) - })?; - } - copy_dir_all(&base_session_dir, &session_dir)?; - } - } - - write_json_value(&locks_path, &locks)?; - - step_verification["capture"]["rrdpRepoCount"] = serde_json::Value::from( - locks - .get("rrdp") - .and_then(|v| v.as_object()) - .map(|m| m.len()) - .unwrap_or(0), - ); - step_verification["capture"]["rsyncModuleCount"] = serde_json::Value::from(final_modules.len()); - Ok(()) -} - -fn rewrite_delta_base_hash(step_dir: &Path, previous_locks_path: &Path) -> Result<(), String> { - let previous_locks_bytes = fs::read(previous_locks_path).map_err(|e| { - format!( - "read previous locks failed for delta base hash rewrite: {}: {e}", - previous_locks_path.display() - ) - })?; - let previous_locks_sha256 = sha256_hex(&previous_locks_bytes); - let locks_path = step_dir.join("locks-delta.json"); - let mut locks = load_json(&locks_path)?; - let previous_locks = serde_json::from_slice::(&previous_locks_bytes) - .map_err(|e| { - format!( - "parse previous locks failed: {}: {e}", - previous_locks_path.display() - ) - })?; - locks["baseLocksSha256"] = serde_json::Value::String(previous_locks_sha256.clone()); - let capture = locks - .get("capture") - .and_then(|v| v.as_str()) - .map(|s| s.to_string()) - .ok_or_else(|| format!("missing capture in {}", locks_path.display()))?; - write_json_value(&locks_path, &locks)?; - - let base_meta_path = step_dir - .join("payload-delta-archive") - .join("v1") - .join("captures") - .join(&capture) - .join("base.json"); - let mut base_meta = load_json(&base_meta_path)?; - base_meta["baseLocksSha256"] = serde_json::Value::String(previous_locks_sha256); - write_json_value(&base_meta_path, &base_meta)?; - - if let Some(rrdp_obj) = locks.get_mut("rrdp").and_then(|v| v.as_object_mut()) { - for (notify_uri, entry) in rrdp_obj.iter_mut() { - let previous_transport = previous_locks - .get("rrdp") - .and_then(|v| v.get(notify_uri)) - .and_then(|v| v.get("transport")) - .and_then(|v| v.as_str()); - if previous_transport != Some("rrdp") { - let fallback = serde_json::json!({ - "kind": "fallback-rsync", - "base": {"transport":"rsync","session":null,"serial":null}, - "target": {"transport":"rsync","session":null,"serial":null}, - "delta_count": 0, - "deltas": [] - }); - *entry = fallback.clone(); - let bucket_dir = step_dir - .join("payload-delta-archive") - .join("v1") - .join("captures") - .join(&capture) - .join("rrdp") - .join("repos") - .join(sha256_hex(notify_uri.as_bytes())); - if bucket_dir.exists() { - write_json(&bucket_dir.join("transition.json"), &fallback)?; - } - } - } - } - write_json_value(&locks_path, &locks)?; - Ok(()) -} - -fn main() { - if let Err(err) = real_main() { - eprintln!("{err}"); - std::process::exit(1); - } -} - -fn real_main() -> Result<(), String> { - let args = parse_args()?; - let rir_dir = args.rir_dir.unwrap(); - let bundle_json_path = rir_dir.join("bundle.json"); - let verification_path = rir_dir.join("verification.json"); - - let mut bundle: RirBundleMetadataV2Serde = - serde_json::from_slice(&fs::read(&bundle_json_path).map_err(|e| { - format!( - "read bundle.json failed: {}: {e}", - bundle_json_path.display() - ) - })?) - .map_err(|e| { - format!( - "parse bundle.json failed: {}: {e}", - bundle_json_path.display() - ) - })?; - - let mut verification: VerificationV2 = - serde_json::from_slice(&fs::read(&verification_path).map_err(|e| { - format!( - "read verification.json failed: {}: {e}", - verification_path.display() - ) - })?) - .map_err(|e| { - format!( - "parse verification.json failed: {}: {e}", - verification_path.display() - ) - })?; - - let tal_bytes = fs::read(rir_dir.join("tal.tal")) - .map_err(|e| format!("read tal.tal failed: {}: {e}", rir_dir.display()))?; - let ta_bytes = fs::read(rir_dir.join("ta.cer")) - .map_err(|e| format!("read ta.cer failed: {}: {e}", rir_dir.display()))?; - - let tmp_root = rir_dir.parent().unwrap_or(&rir_dir).join(".tmp-refresh"); - let work_db = tmp_root.join(format!("{}-work-db", bundle.rir)); - if work_db.exists() { - fs::remove_dir_all(&work_db) - .map_err(|e| format!("remove old refresh db failed: {}: {e}", work_db.display()))?; - } - if let Some(parent) = work_db.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create refresh db parent failed: {}: {e}", parent.display()))?; - } - let store = - RocksStore::open(&work_db).map_err(|e| format!("open refresh rocksdb failed: {e}"))?; - - let base_archive = path_join(&rir_dir, &bundle.base.relative_archive_path); - let base_locks = path_join(&rir_dir, &bundle.base.relative_locks_path); - let base_ccr = path_join(&rir_dir, &bundle.base.relative_ccr_path); - let base_vrps = path_join(&rir_dir, &bundle.base.relative_vrps_path); - let base_vaps = path_join(&rir_dir, &bundle.base.relative_vaps_path); - let base_validation_time = parse_time(&bundle.base.validation_time)?; - - let base_out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit( - &store, - &Policy::default(), - &tal_bytes, - &ta_bytes, - None, - &base_archive, - &base_locks, - base_validation_time, - &TreeRunConfig { - max_depth: None, - max_instances: None, - compact_audit: false, - persist_vcir: true, - build_ccr_accumulator: true, - }, - ) - .map_err(|e| format!("base replay failed: {e}"))?; - - let base_ccr_content = build_ccr_from_run( - &store, - &[base_out.discovery.trust_anchor.clone()], - &base_out.tree.vrps, - &base_out.tree.aspas, - &base_out.tree.router_keys, - base_validation_time, - ) - .map_err(|e| format!("build base ccr failed: {e}"))?; - write_ccr_file(&base_ccr, &base_ccr_content) - .map_err(|e| format!("write base ccr failed: {}: {e}", base_ccr.display()))?; - let base_ccr_bytes = fs::read(&base_ccr) - .map_err(|e| format!("read base ccr failed: {}: {e}", base_ccr.display()))?; - let base_decoded = - decode_content_info(&base_ccr_bytes).map_err(|e| format!("decode base ccr failed: {e}"))?; - let base_verify = - verify_content_info(&base_decoded).map_err(|e| format!("verify base ccr failed: {e}"))?; - let base_vrp_rows = build_vrp_compare_rows(&base_out.tree.vrps, &bundle.rir); - let base_vap_rows = build_vap_compare_rows(&base_out.tree.aspas, &bundle.rir); - write_vrp_csv(&base_vrps, &base_vrp_rows)?; - write_vap_csv(&base_vaps, &base_vap_rows)?; - bundle.base.ccr_sha256 = sha256_hex(&base_ccr_bytes); - bundle.base.vrp_count = base_vrp_rows.len(); - bundle.base.vap_count = base_vap_rows.len(); - - verification.base["ccr"]["sha256"] = serde_json::Value::String(bundle.base.ccr_sha256.clone()); - verification.base["ccr"]["stateHashesOk"] = - serde_json::Value::Bool(base_verify.state_hashes_ok); - verification.base["ccr"]["manifestInstances"] = - serde_json::Value::from(base_verify.manifest_instances); - verification.base["ccr"]["roaVrpCount"] = serde_json::Value::from(base_vrp_rows.len()); - verification.base["ccr"]["aspaPayloadSets"] = serde_json::Value::from(base_vap_rows.len()); - verification.base["ccr"]["routerKeyCount"] = - serde_json::Value::from(base_verify.router_key_count); - verification.base["compareViews"]["baseVrpCount"] = - serde_json::Value::from(base_vrp_rows.len()); - verification.base["compareViews"]["baseVapCount"] = - serde_json::Value::from(base_vap_rows.len()); - verification.base["capture"]["selfReplayOk"] = serde_json::Value::Bool(true); - repair_base_inputs( - &base_archive, - &base_locks, - &base_out.publication_points, - &store, - &mut verification, - )?; - - let mut previous_locks_path = base_locks.clone(); - let mut any_aspa = !base_vap_rows.is_empty(); - let mut all_steps_self_replay_ok = true; - for (idx, step) in bundle.delta_sequence.steps.iter_mut().enumerate() { - let step_dir = path_join(&rir_dir, &step.relative_path); - rewrite_delta_base_hash(&step_dir, &previous_locks_path)?; - let delta_archive = path_join(&rir_dir, &step.relative_archive_path); - let delta_locks = path_join(&rir_dir, &step.relative_transition_locks_path); - let delta_ccr = path_join(&rir_dir, &step.relative_ccr_path); - let delta_vrps = path_join(&rir_dir, &step.relative_vrps_path); - let delta_vaps = path_join(&rir_dir, &step.relative_vaps_path); - let target_locks = path_join(&rir_dir, &step.relative_target_locks_path); - let delta_validation_time = parse_time(&step.validation_time)?; - - let delta_out = run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit( - &store, - &Policy::default(), - &tal_bytes, - &ta_bytes, - None, - &delta_archive, - &previous_locks_path, - &delta_locks, - delta_validation_time, - &TreeRunConfig { - max_depth: None, - max_instances: None, - compact_audit: false, - persist_vcir: true, - build_ccr_accumulator: true, - }, - ) - .map_err(|e| format!("delta step {} replay failed: {e}", step.id))?; - - let delta_ccr_content = build_ccr_from_run( - &store, - &[delta_out.discovery.trust_anchor.clone()], - &delta_out.tree.vrps, - &delta_out.tree.aspas, - &delta_out.tree.router_keys, - delta_validation_time, - ) - .map_err(|e| format!("build delta ccr failed for {}: {e}", step.id))?; - write_ccr_file(&delta_ccr, &delta_ccr_content) - .map_err(|e| format!("write delta ccr failed: {}: {e}", delta_ccr.display()))?; - let delta_ccr_bytes = fs::read(&delta_ccr) - .map_err(|e| format!("read delta ccr failed: {}: {e}", delta_ccr.display()))?; - let delta_decoded = decode_content_info(&delta_ccr_bytes) - .map_err(|e| format!("decode delta ccr failed for {}: {e}", step.id))?; - let delta_verify = verify_content_info(&delta_decoded) - .map_err(|e| format!("verify delta ccr failed for {}: {e}", step.id))?; - let delta_vrp_rows = build_vrp_compare_rows(&delta_out.tree.vrps, &bundle.rir); - let delta_vap_rows = build_vap_compare_rows(&delta_out.tree.aspas, &bundle.rir); - write_vrp_csv(&delta_vrps, &delta_vrp_rows)?; - write_vap_csv(&delta_vaps, &delta_vap_rows)?; - step.delta_ccr_sha256 = sha256_hex(&delta_ccr_bytes); - step.vrp_count = delta_vrp_rows.len(); - step.vap_count = delta_vap_rows.len(); - step.has_aspa = !delta_vap_rows.is_empty(); - any_aspa |= step.has_aspa; - - if let Some(step_verification) = verification.steps.get_mut(idx) { - step_verification["ccr"]["sha256"] = - serde_json::Value::String(step.delta_ccr_sha256.clone()); - step_verification["ccr"]["stateHashesOk"] = - serde_json::Value::Bool(delta_verify.state_hashes_ok); - step_verification["ccr"]["manifestInstances"] = - serde_json::Value::from(delta_verify.manifest_instances); - step_verification["ccr"]["roaVrpCount"] = serde_json::Value::from(delta_vrp_rows.len()); - step_verification["ccr"]["aspaPayloadSets"] = - serde_json::Value::from(delta_vap_rows.len()); - step_verification["ccr"]["routerKeyCount"] = - serde_json::Value::from(delta_verify.router_key_count); - step_verification["compareViews"]["vrpCount"] = - serde_json::Value::from(delta_vrp_rows.len()); - step_verification["compareViews"]["vapCount"] = - serde_json::Value::from(delta_vap_rows.len()); - step_verification["selfReplayOk"] = serde_json::Value::Bool(true); - } - let step_verification_path = - path_join(&rir_dir, &step.relative_path).join("verification.json"); - let mut step_verification_json: serde_json::Value = - serde_json::from_slice(&fs::read(&step_verification_path).map_err(|e| { - format!( - "read step verification failed: {}: {e}", - step_verification_path.display() - ) - })?) - .map_err(|e| { - format!( - "parse step verification failed: {}: {e}", - step_verification_path.display() - ) - })?; - step_verification_json["ccr"]["sha256"] = - serde_json::Value::String(step.delta_ccr_sha256.clone()); - step_verification_json["ccr"]["stateHashesOk"] = - serde_json::Value::Bool(delta_verify.state_hashes_ok); - step_verification_json["ccr"]["manifestInstances"] = - serde_json::Value::from(delta_verify.manifest_instances); - step_verification_json["ccr"]["roaVrpCount"] = - serde_json::Value::from(delta_vrp_rows.len()); - step_verification_json["ccr"]["aspaPayloadSets"] = - serde_json::Value::from(delta_vap_rows.len()); - step_verification_json["ccr"]["routerKeyCount"] = - serde_json::Value::from(delta_verify.router_key_count); - step_verification_json["compareViews"]["vrpCount"] = - serde_json::Value::from(delta_vrp_rows.len()); - step_verification_json["compareViews"]["vapCount"] = - serde_json::Value::from(delta_vap_rows.len()); - step_verification_json["selfReplayOk"] = serde_json::Value::Bool(true); - repair_delta_step_inputs( - &step_dir, - &base_archive, - &base_locks, - &previous_locks_path, - &delta_out.publication_points, - &store, - &mut step_verification_json, - )?; - write_json(&step_verification_path, &step_verification_json)?; - all_steps_self_replay_ok &= true; - - repair_target_locks( - &target_locks, - &previous_locks_path, - &delta_out.publication_points, - &store, - )?; - - previous_locks_path = target_locks; - } - - bundle.has_any_aspa = any_aspa; - verification.summary["baseSelfReplayOk"] = serde_json::Value::Bool(true); - verification.summary["allStepsSelfReplayOk"] = - serde_json::Value::Bool(all_steps_self_replay_ok); - write_json(&bundle_json_path, &bundle)?; - write_json(&verification_path, &verification)?; - - if !args.keep_db && work_db.exists() { - fs::remove_dir_all(&work_db) - .map_err(|e| format!("remove refresh db failed: {}: {e}", work_db.display()))?; - if tmp_root.exists() - && fs::read_dir(&tmp_root) - .map_err(|e| format!("read_dir failed: {}: {e}", tmp_root.display()))? - .next() - .is_none() - { - let _ = fs::remove_dir(&tmp_root); - } - } - - println!("{}", rir_dir.display()); - Ok(()) -} diff --git a/src/bundle/live_capture.rs b/src/bundle/live_capture.rs deleted file mode 100644 index b03c100..0000000 --- a/src/bundle/live_capture.rs +++ /dev/null @@ -1,1722 +0,0 @@ -use std::collections::{BTreeMap, BTreeSet}; -use std::fs; -use std::path::{Path, PathBuf}; -use std::sync::{Arc, Mutex}; - -use serde::Serialize; -use time::format_description::well_known::Rfc3339; - -use crate::audit::PublicationPointAudit; -use crate::fetch::rsync::{RsyncFetchError, RsyncFetcher}; -use crate::replay::archive::{ - ReplayArchiveIndex, ReplayRrdpLock, ReplayTransport, canonical_rsync_module, sha256_hex, -}; -use crate::storage::{RocksStore, RrdpSourceRecord}; -use crate::sync::rrdp::Fetcher; -use crate::sync::rrdp::{NotificationDeltaRef, parse_notification}; - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct RecordedHttpResponse { - pub uri: String, - pub bytes: Vec, - pub fetched_at_rfc3339_utc: String, -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct RecordedRsyncFetch { - pub requested_base_uri: String, - pub objects: Vec<(String, Vec)>, - pub fetched_at_rfc3339_utc: String, -} - -#[derive(Clone, Default)] -pub struct RecordingHttpFetcher { - inner: F, - responses: Arc>>, -} - -impl RecordingHttpFetcher { - pub fn new(inner: F) -> Self { - Self { - inner, - responses: Arc::new(Mutex::new(BTreeMap::new())), - } - } - - pub fn snapshot_responses(&self) -> BTreeMap { - self.responses.lock().expect("http recorder lock").clone() - } -} - -impl Fetcher for RecordingHttpFetcher { - fn fetch(&self, uri: &str) -> Result, String> { - let bytes = self.inner.fetch(uri)?; - let fetched_at_rfc3339_utc = time::OffsetDateTime::now_utc() - .format(&Rfc3339) - .unwrap_or_else(|_| "".to_string()); - self.responses.lock().expect("http recorder lock").insert( - uri.to_string(), - RecordedHttpResponse { - uri: uri.to_string(), - bytes: bytes.clone(), - fetched_at_rfc3339_utc, - }, - ); - Ok(bytes) - } -} - -#[derive(Clone, Default)] -pub struct RecordingRsyncFetcher { - inner: F, - fetches: Arc>>, - capture_objects: bool, -} - -impl RecordingRsyncFetcher { - pub fn new(inner: F) -> Self { - Self { - inner, - fetches: Arc::new(Mutex::new(BTreeMap::new())), - capture_objects: true, - } - } - - pub fn new_without_objects(inner: F) -> Self { - Self { - inner, - fetches: Arc::new(Mutex::new(BTreeMap::new())), - capture_objects: false, - } - } - - pub fn snapshot_fetches(&self) -> BTreeMap { - self.fetches.lock().expect("rsync recorder lock").clone() - } -} - -impl RsyncFetcher for RecordingRsyncFetcher { - fn fetch_objects( - &self, - rsync_base_uri: &str, - ) -> Result)>, RsyncFetchError> { - let objects = self.inner.fetch_objects(rsync_base_uri)?; - let fetched_at_rfc3339_utc = time::OffsetDateTime::now_utc() - .format(&Rfc3339) - .unwrap_or_else(|_| "".to_string()); - self.fetches.lock().expect("rsync recorder lock").insert( - rsync_base_uri.to_string(), - RecordedRsyncFetch { - requested_base_uri: rsync_base_uri.to_string(), - objects: objects.clone(), - fetched_at_rfc3339_utc, - }, - ); - Ok(objects) - } - - fn visit_objects( - &self, - rsync_base_uri: &str, - visitor: &mut dyn FnMut(String, Vec) -> Result<(), String>, - ) -> Result<(usize, u64), RsyncFetchError> { - let fetched_at_rfc3339_utc = time::OffsetDateTime::now_utc() - .format(&Rfc3339) - .unwrap_or_else(|_| "".to_string()); - - if self.capture_objects { - let mut recorded = Vec::new(); - let result = self - .inner - .visit_objects(rsync_base_uri, &mut |uri, bytes| { - recorded.push((uri.clone(), bytes.clone())); - visitor(uri, bytes) - })?; - self.fetches.lock().expect("rsync recorder lock").insert( - rsync_base_uri.to_string(), - RecordedRsyncFetch { - requested_base_uri: rsync_base_uri.to_string(), - objects: recorded, - fetched_at_rfc3339_utc, - }, - ); - Ok(result) - } else { - self.inner.visit_objects(rsync_base_uri, visitor) - } - } - - fn dedup_key(&self, rsync_base_uri: &str) -> String { - self.inner.dedup_key(rsync_base_uri) - } -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct LiveBaseCaptureSummary { - pub archive_root: PathBuf, - pub locks_path: PathBuf, - pub capture_id: String, - pub rrdp_repo_count: usize, - pub rsync_module_count: usize, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -struct CaptureMetaJson { - version: u32, - #[serde(rename = "captureId")] - capture_id: String, - #[serde(rename = "createdAt")] - created_at: String, - notes: String, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -struct RepoMetaJson { - version: u32, - #[serde(rename = "rpkiNotify")] - rpki_notify: String, - #[serde(rename = "createdAt")] - created_at: String, - #[serde(rename = "lastSeenAt")] - last_seen_at: String, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -struct ModuleMetaJson { - version: u32, - module: String, - #[serde(rename = "createdAt")] - created_at: String, - #[serde(rename = "lastSeenAt")] - last_seen_at: String, -} - -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize)] -#[serde(rename_all = "snake_case")] -enum TransportJson { - Rrdp, - Rsync, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -struct RrdpLockJson { - transport: TransportJson, - session: Option, - serial: Option, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -struct RsyncLockJson { - transport: TransportJson, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -struct ReplayLocksJson { - version: u32, - capture: String, - #[serde(rename = "validationTime")] - validation_time: String, - rrdp: BTreeMap, - rsync: BTreeMap, -} - -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize)] -#[serde(rename_all = "kebab-case")] -enum DeltaKindJson { - Unchanged, - Delta, - FallbackRsync, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -struct DeltaBaseMetaJson { - version: u32, - #[serde(rename = "baseCapture")] - base_capture: String, - #[serde(rename = "baseLocksSha256")] - base_locks_sha256: String, - #[serde(rename = "createdAt")] - created_at: String, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -struct DeltaStateJson { - transport: TransportJson, - session: Option, - serial: Option, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -struct DeltaRrdpEntryJson { - kind: DeltaKindJson, - base: DeltaStateJson, - target: DeltaStateJson, - #[serde(rename = "delta_count")] - delta_count: usize, - deltas: Vec, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -struct DeltaRsyncEntryJson { - #[serde(rename = "file_count")] - file_count: usize, - #[serde(rename = "overlay_only")] - overlay_only: bool, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -struct DeltaTransitionJson { - kind: DeltaKindJson, - base: DeltaStateJson, - target: DeltaStateJson, - #[serde(rename = "delta_count")] - delta_count: usize, - deltas: Vec, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -struct ReplayDeltaLocksJson { - version: u32, - capture: String, - #[serde(rename = "baseCapture")] - base_capture: String, - #[serde(rename = "baseLocksSha256")] - base_locks_sha256: String, - #[serde(rename = "validationTime")] - validation_time: String, - rrdp: BTreeMap, - rsync: BTreeMap, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -struct ReplayDeltaRsyncFilesJson { - version: u32, - module: String, - #[serde(rename = "fileCount")] - file_count: usize, - files: Vec, -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct LiveDeltaCaptureSummary { - pub archive_root: PathBuf, - pub locks_path: PathBuf, - pub capture_id: String, - pub rrdp_repo_count: usize, - pub rsync_module_count: usize, -} - -fn write_json(path: &Path, value: &impl Serialize) -> Result<(), String> { - if let Some(parent) = path.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?; - } - let bytes = serde_json::to_vec_pretty(value).map_err(|e| e.to_string())?; - fs::write(path, bytes).map_err(|e| format!("write json failed: {}: {e}", path.display())) -} - -fn write_bytes(path: &Path, bytes: &[u8]) -> Result<(), String> { - if let Some(parent) = path.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?; - } - fs::write(path, bytes).map_err(|e| format!("write file failed: {}: {e}", path.display())) -} - -fn base_capture_id(rir: &str, recorded_at_utc: &time::OffsetDateTime) -> String { - let ts = recorded_at_utc - .format(&Rfc3339) - .unwrap_or_else(|_| "1970-01-01T00:00:00Z".to_string()) - .replace(':', "") - .replace('-', ""); - format!("{rir}-base-{}", ts.replace('+', "_")) -} - -fn rrdp_repo_lock_and_record( - store: &RocksStore, - notify_uri: &str, -) -> Result, String> { - store - .get_rrdp_source_record(notify_uri) - .map_err(|e| format!("read rrdp source record failed for {notify_uri}: {e}")) -} - -fn rrdp_repo_is_replayable(record: &RrdpSourceRecord) -> bool { - record.last_session_id.is_some() - && record.last_serial.is_some() - && record.last_snapshot_uri.is_some() - && record.last_snapshot_hash.is_some() -} - -fn collect_current_state_locks( - publication_points: &[PublicationPointAudit], - store: &RocksStore, -) -> Result< - ( - BTreeMap, - BTreeMap, - ), - String, -> { - let mut rrdp_locks = BTreeMap::new(); - let mut rsync_locks = BTreeMap::new(); - let mut seen_modules = BTreeSet::new(); - let mut seen_rrdp = BTreeSet::new(); - - for pp in publication_points { - if pp.source == "failed_fetch_no_cache" { - continue; - } - let module_uri = canonical_rsync_module(&pp.rsync_base_uri).map_err(|e| { - format!( - "canonicalize rsync module failed for {}: {e}", - pp.rsync_base_uri - ) - })?; - if let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() { - if !seen_rrdp.insert(notify_uri.to_string()) { - continue; - } - if let Some(source_record) = rrdp_repo_lock_and_record(store, notify_uri)? { - if rrdp_repo_is_replayable(&source_record) { - rrdp_locks.insert( - notify_uri.to_string(), - RrdpLockJson { - transport: TransportJson::Rrdp, - session: source_record.last_session_id.clone(), - serial: source_record.last_serial, - }, - ); - continue; - } - } - rrdp_locks.insert( - notify_uri.to_string(), - RrdpLockJson { - transport: TransportJson::Rsync, - session: None, - serial: None, - }, - ); - if seen_modules.insert(module_uri.clone()) { - rsync_locks.insert( - module_uri.clone(), - RsyncLockJson { - transport: TransportJson::Rsync, - }, - ); - } - } else if seen_modules.insert(module_uri.clone()) { - rsync_locks.insert( - module_uri.clone(), - RsyncLockJson { - transport: TransportJson::Rsync, - }, - ); - } - } - - Ok((rrdp_locks, rsync_locks)) -} - -pub fn write_current_replay_state_locks( - output_path: &Path, - capture_id: &str, - validation_time: time::OffsetDateTime, - publication_points: &[PublicationPointAudit], - store: &RocksStore, -) -> Result<(), String> { - let (rrdp_locks, rsync_locks) = collect_current_state_locks(publication_points, store)?; - let locks = ReplayLocksJson { - version: 1, - capture: capture_id.to_string(), - validation_time: validation_time - .format(&Rfc3339) - .map_err(|e| format!("format validation time failed: {e}"))?, - rrdp: rrdp_locks, - rsync: rsync_locks, - }; - write_json(output_path, &locks) -} - -fn materialize_rrdp_repo( - capture_root: &Path, - record: &RrdpSourceRecord, - notification_bytes: &[u8], - snapshot_bytes: &[u8], -) -> Result<(), String> { - let notify_uri = &record.notify_uri; - let session = record - .last_session_id - .as_deref() - .ok_or_else(|| format!("missing last_session_id for {notify_uri}"))?; - let serial = record - .last_serial - .ok_or_else(|| format!("missing last_serial for {notify_uri}"))?; - let snapshot_hash = record - .last_snapshot_hash - .as_deref() - .ok_or_else(|| format!("missing last_snapshot_hash for {notify_uri}"))?; - let bucket_hash = sha256_hex(notify_uri.as_bytes()); - let bucket_dir = capture_root.join("rrdp").join("repos").join(&bucket_hash); - let session_dir = bucket_dir.join(session); - let created_at = record.first_seen_at.clone(); - let last_seen_at = record.last_seen_at.clone(); - write_json( - &bucket_dir.join("meta.json"), - &RepoMetaJson { - version: 1, - rpki_notify: notify_uri.clone(), - created_at: created_at.rfc3339_utc.clone(), - last_seen_at: last_seen_at.rfc3339_utc.clone(), - }, - )?; - write_bytes( - &session_dir.join(format!("notification-{serial}.xml")), - notification_bytes, - )?; - write_bytes( - &session_dir.join(format!("snapshot-{serial}-{snapshot_hash}.xml")), - snapshot_bytes, - )?; - Ok(()) -} - -fn materialize_rsync_module( - capture_root: &Path, - module_uri: &str, - objects: &BTreeMap>, - created_at: &str, - last_seen_at: &str, -) -> Result<(), String> { - let bucket_hash = sha256_hex(module_uri.as_bytes()); - let bucket_dir = capture_root - .join("rsync") - .join("modules") - .join(&bucket_hash); - write_json( - &bucket_dir.join("meta.json"), - &ModuleMetaJson { - version: 1, - module: module_uri.to_string(), - created_at: created_at.to_string(), - last_seen_at: last_seen_at.to_string(), - }, - )?; - let without_scheme = module_uri - .strip_prefix("rsync://") - .ok_or_else(|| format!("invalid rsync module uri: {module_uri}"))?; - let relative_root = without_scheme.trim_end_matches('/'); - fs::create_dir_all(bucket_dir.join("tree").join(relative_root)).map_err(|e| { - format!( - "create rsync tree root failed: {}: {e}", - bucket_dir.join("tree").join(relative_root).display() - ) - })?; - for (uri, bytes) in objects { - let rel = uri - .strip_prefix(module_uri) - .ok_or_else(|| format!("object uri {uri} does not belong to module {module_uri}"))?; - let path = bucket_dir.join("tree").join(relative_root).join(rel); - write_bytes(&path, bytes)?; - } - Ok(()) -} - -fn current_module_objects_from_store( - store: &RocksStore, - module_uri: &str, -) -> Result>, String> { - let entries = store - .list_repository_view_entries_with_prefix(module_uri) - .map_err(|e| format!("list repository view failed for {module_uri}: {e}"))?; - let mut out = BTreeMap::new(); - for entry in entries { - if entry.state != crate::storage::RepositoryViewState::Present { - continue; - } - let bytes = store - .load_current_object_bytes_by_uri(&entry.rsync_uri) - .map_err(|e| format!("load current object failed for {}: {e}", entry.rsync_uri))? - .ok_or_else(|| format!("current object missing for {}", entry.rsync_uri))?; - out.insert(entry.rsync_uri, bytes); - } - Ok(out) -} - -pub fn write_live_base_replay_bundle_inputs( - rir_dir: &Path, - rir: &str, - validation_time: time::OffsetDateTime, - publication_points: &[PublicationPointAudit], - store: &RocksStore, - http_records: &BTreeMap, - rsync_records: &BTreeMap, -) -> Result { - let recorded_at = time::OffsetDateTime::now_utc(); - let capture_id = base_capture_id(rir, &recorded_at); - let archive_root = rir_dir.join("base-payload-archive"); - let capture_root = archive_root.join("v1").join("captures").join(&capture_id); - write_json( - &capture_root.join("capture.json"), - &CaptureMetaJson { - version: 1, - capture_id: capture_id.clone(), - created_at: recorded_at - .format(&Rfc3339) - .map_err(|e| format!("format createdAt failed: {e}"))?, - notes: format!("recorded by ours live base recorder for {rir}"), - }, - )?; - - let mut rrdp_locks = BTreeMap::new(); - let mut rsync_locks = BTreeMap::new(); - let mut seen_modules = BTreeSet::new(); - let mut seen_rrdp = BTreeSet::new(); - - for pp in publication_points { - if pp.source == "failed_fetch_no_cache" { - continue; - } - let module_uri = canonical_rsync_module(&pp.rsync_base_uri).map_err(|e| { - format!( - "canonicalize rsync module failed for {}: {e}", - pp.rsync_base_uri - ) - })?; - if let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() { - if !seen_rrdp.insert(notify_uri.to_string()) { - continue; - } - if let Some(source_record) = rrdp_repo_lock_and_record(store, notify_uri)? { - if rrdp_repo_is_replayable(&source_record) { - let notification_bytes = &http_records - .get(notify_uri) - .ok_or_else(|| { - format!("missing recorded notification body for {notify_uri}") - })? - .bytes; - let snapshot_uri = source_record - .last_snapshot_uri - .as_deref() - .ok_or_else(|| format!("missing last_snapshot_uri for {notify_uri}"))?; - let snapshot_bytes = &http_records - .get(snapshot_uri) - .ok_or_else(|| { - format!("missing recorded snapshot body for {snapshot_uri}") - })? - .bytes; - materialize_rrdp_repo( - &capture_root, - &source_record, - notification_bytes, - snapshot_bytes, - )?; - rrdp_locks.insert( - notify_uri.to_string(), - RrdpLockJson { - transport: TransportJson::Rrdp, - session: source_record.last_session_id.clone(), - serial: source_record.last_serial, - }, - ); - continue; - } - } - - rrdp_locks.insert( - notify_uri.to_string(), - RrdpLockJson { - transport: TransportJson::Rsync, - session: None, - serial: None, - }, - ); - if seen_modules.insert(module_uri.clone()) { - rsync_locks.insert( - module_uri.clone(), - RsyncLockJson { - transport: TransportJson::Rsync, - }, - ); - } - } else if seen_modules.insert(module_uri.clone()) { - rsync_locks.insert( - module_uri.clone(), - RsyncLockJson { - transport: TransportJson::Rsync, - }, - ); - } - } - - let mut rsync_objects_by_module: BTreeMap>> = BTreeMap::new(); - let mut rsync_times_by_module: BTreeMap = BTreeMap::new(); - for fetch in rsync_records.values() { - let module_uri = canonical_rsync_module(&fetch.requested_base_uri) - .map_err(|e| format!("canonicalize requested rsync module failed: {e}"))?; - let objects = rsync_objects_by_module - .entry(module_uri.clone()) - .or_default(); - let times = rsync_times_by_module.entry(module_uri).or_insert_with(|| { - ( - fetch.fetched_at_rfc3339_utc.clone(), - fetch.fetched_at_rfc3339_utc.clone(), - ) - }); - if fetch.fetched_at_rfc3339_utc < times.0 { - times.0 = fetch.fetched_at_rfc3339_utc.clone(); - } - if fetch.fetched_at_rfc3339_utc > times.1 { - times.1 = fetch.fetched_at_rfc3339_utc.clone(); - } - for (uri, bytes) in &fetch.objects { - objects.insert(uri.clone(), bytes.clone()); - } - } - - for module_uri in rsync_locks.keys() { - let owned_objects; - let objects = if let Some(objects) = rsync_objects_by_module.get(module_uri) { - objects - } else { - owned_objects = current_module_objects_from_store(store, module_uri)?; - &owned_objects - }; - let (created_at, last_seen_at) = rsync_times_by_module - .get(module_uri) - .cloned() - .unwrap_or_else(|| { - let now = recorded_at - .format(&Rfc3339) - .unwrap_or_else(|_| "1970-01-01T00:00:00Z".to_string()); - (now.clone(), now) - }); - materialize_rsync_module( - &capture_root, - module_uri, - objects, - &created_at, - &last_seen_at, - )?; - } - - let locks = ReplayLocksJson { - version: 1, - capture: capture_id.clone(), - validation_time: validation_time - .format(&Rfc3339) - .map_err(|e| format!("format validation time failed: {e}"))?, - rrdp: rrdp_locks, - rsync: rsync_locks, - }; - let locks_path = rir_dir.join("base-locks.json"); - write_json(&locks_path, &locks)?; - - ReplayArchiveIndex::load_allow_missing_rsync_modules(&archive_root, &locks_path) - .map_err(|e| format!("replay archive self-validate failed: {e}"))?; - - Ok(LiveBaseCaptureSummary { - archive_root, - locks_path, - capture_id, - rrdp_repo_count: locks.rrdp.len(), - rsync_module_count: locks.rsync.len(), - }) -} - -fn target_rrdp_state_from_record(record: &RrdpSourceRecord) -> Option { - Some(DeltaStateJson { - transport: TransportJson::Rrdp, - session: Some(record.last_session_id.clone()?), - serial: record.last_serial, - }) -} - -fn fallback_rsync_state() -> DeltaStateJson { - DeltaStateJson { - transport: TransportJson::Rsync, - session: None, - serial: None, - } -} - -fn delta_state_from_base_lock(lock: Option<&ReplayRrdpLock>) -> DeltaStateJson { - match lock { - Some(lock) if lock.transport == ReplayTransport::Rrdp => DeltaStateJson { - transport: TransportJson::Rrdp, - session: lock.session.clone(), - serial: lock.serial, - }, - _ => fallback_rsync_state(), - } -} - -fn write_delta_repo_meta( - bucket_dir: &Path, - notify_uri: &str, - created_at: &str, - last_seen_at: &str, -) -> Result<(), String> { - write_json( - &bucket_dir.join("meta.json"), - &RepoMetaJson { - version: 1, - rpki_notify: notify_uri.to_string(), - created_at: created_at.to_string(), - last_seen_at: last_seen_at.to_string(), - }, - ) -} - -fn write_delta_transition( - bucket_dir: &Path, - transition: &DeltaTransitionJson, -) -> Result<(), String> { - write_json(&bucket_dir.join("transition.json"), transition) -} - -fn notification_deltas_after_serial( - notification_bytes: &[u8], - base_serial: u64, - target_serial: u64, -) -> Result, String> { - let notification = parse_notification(notification_bytes) - .map_err(|e| format!("parse notification failed: {e}"))?; - if notification.serial != target_serial { - return Err(format!( - "target notification serial mismatch: expected {target_serial}, actual {}", - notification.serial - )); - } - Ok(notification - .deltas - .into_iter() - .filter(|d| d.serial > base_serial && d.serial <= target_serial) - .collect()) -} - -pub fn write_live_delta_replay_step_inputs( - step_dir: &Path, - rir: &str, - previous_locks_path: &Path, - target_validation_time: time::OffsetDateTime, - publication_points: &[PublicationPointAudit], - store: &RocksStore, - http_records: &BTreeMap, - rsync_records: &BTreeMap, -) -> Result { - let previous_locks: crate::replay::archive::ReplayLocks = - serde_json::from_slice(&fs::read(previous_locks_path).map_err(|e| { - format!( - "read previous locks failed: {}: {e}", - previous_locks_path.display() - ) - })?) - .map_err(|e| { - format!( - "parse previous locks failed: {}: {e}", - previous_locks_path.display() - ) - })?; - let previous_locks_bytes = fs::read(previous_locks_path).map_err(|e| { - format!( - "read previous locks bytes failed: {}: {e}", - previous_locks_path.display() - ) - })?; - let previous_locks_sha256 = sha256_hex(&previous_locks_bytes); - - let recorded_at = time::OffsetDateTime::now_utc(); - let capture_id = format!( - "{rir}-delta-{}", - recorded_at - .format(&Rfc3339) - .unwrap_or_else(|_| "1970-01-01T00:00:00Z".to_string()) - .replace(':', "") - .replace('-', "") - .replace('+', "_") - ); - let archive_root = step_dir.join("payload-delta-archive"); - let capture_root = archive_root.join("v1").join("captures").join(&capture_id); - write_json( - &capture_root.join("capture.json"), - &CaptureMetaJson { - version: 1, - capture_id: capture_id.clone(), - created_at: recorded_at - .format(&Rfc3339) - .map_err(|e| format!("format createdAt failed: {e}"))?, - notes: format!("recorded by ours live delta recorder for {rir}"), - }, - )?; - write_json( - &capture_root.join("base.json"), - &DeltaBaseMetaJson { - version: 1, - base_capture: previous_locks.capture.clone(), - base_locks_sha256: previous_locks_sha256.clone(), - created_at: recorded_at - .format(&Rfc3339) - .map_err(|e| format!("format createdAt failed: {e}"))?, - }, - )?; - - let mut rsync_objects_by_module: BTreeMap>> = BTreeMap::new(); - let mut rsync_times_by_module: BTreeMap = BTreeMap::new(); - for fetch in rsync_records.values() { - let module_uri = canonical_rsync_module(&fetch.requested_base_uri) - .map_err(|e| format!("canonicalize requested rsync module failed: {e}"))?; - let objects = rsync_objects_by_module - .entry(module_uri.clone()) - .or_default(); - let times = rsync_times_by_module.entry(module_uri).or_insert_with(|| { - ( - fetch.fetched_at_rfc3339_utc.clone(), - fetch.fetched_at_rfc3339_utc.clone(), - ) - }); - if fetch.fetched_at_rfc3339_utc < times.0 { - times.0 = fetch.fetched_at_rfc3339_utc.clone(); - } - if fetch.fetched_at_rfc3339_utc > times.1 { - times.1 = fetch.fetched_at_rfc3339_utc.clone(); - } - for (uri, bytes) in &fetch.objects { - objects.insert(uri.clone(), bytes.clone()); - } - } - - let mut delta_rrdp_locks = BTreeMap::new(); - let mut delta_rsync_locks = BTreeMap::new(); - let mut seen_notifications = BTreeSet::new(); - let mut needed_modules = BTreeSet::new(); - - for pp in publication_points { - if pp.source == "failed_fetch_no_cache" { - continue; - } - let module_uri = canonical_rsync_module(&pp.rsync_base_uri).map_err(|e| { - format!( - "canonicalize rsync module failed for {}: {e}", - pp.rsync_base_uri - ) - })?; - if let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() { - if !seen_notifications.insert(notify_uri.to_string()) { - continue; - } - let base_lock = previous_locks.rrdp.get(notify_uri); - let target_record = store.get_rrdp_source_record(notify_uri).map_err(|e| { - format!("read target rrdp source record failed for {notify_uri}: {e}") - })?; - - let bucket_hash = sha256_hex(notify_uri.as_bytes()); - let bucket_dir = capture_root.join("rrdp").join("repos").join(&bucket_hash); - let (created_at, last_seen_at) = target_record - .as_ref() - .map(|record| { - ( - record.first_seen_at.rfc3339_utc.clone(), - record.last_seen_at.rfc3339_utc.clone(), - ) - }) - .unwrap_or_else(|| { - let now = recorded_at - .format(&Rfc3339) - .unwrap_or_else(|_| "1970-01-01T00:00:00Z".to_string()); - (now.clone(), now) - }); - write_delta_repo_meta(&bucket_dir, notify_uri, &created_at, &last_seen_at)?; - - let fallback_entry = || DeltaRrdpEntryJson { - kind: DeltaKindJson::FallbackRsync, - base: fallback_rsync_state(), - target: fallback_rsync_state(), - delta_count: 0, - deltas: Vec::new(), - }; - - let entry = if let (Some(base_lock), Some(target_record), Some(target_state)) = ( - base_lock, - target_record.as_ref(), - target_record - .as_ref() - .and_then(target_rrdp_state_from_record), - ) { - if base_lock.transport == ReplayTransport::Rrdp - && base_lock.session.as_deref() == target_record.last_session_id.as_deref() - && target_record.last_serial == base_lock.serial - { - let transition = DeltaTransitionJson { - kind: DeltaKindJson::Unchanged, - base: delta_state_from_base_lock(Some(base_lock)), - target: target_state.clone(), - delta_count: 0, - deltas: Vec::new(), - }; - write_delta_transition(&bucket_dir, &transition)?; - DeltaRrdpEntryJson { - kind: DeltaKindJson::Unchanged, - base: transition.base, - target: transition.target, - delta_count: 0, - deltas: Vec::new(), - } - } else if base_lock.transport == ReplayTransport::Rrdp - && base_lock.session.as_deref() == target_record.last_session_id.as_deref() - && target_record - .last_serial - .zip(base_lock.serial) - .is_some_and(|(target, base)| target > base) - { - let notification_bytes = http_records - .get(notify_uri) - .map(|record| record.bytes.as_slice()) - .ok_or_else(|| { - format!("missing recorded target notification body for {notify_uri}") - })?; - let base_serial = base_lock.serial.expect("checked above"); - let target_serial = target_record.last_serial.expect("checked above"); - let deltas = notification_deltas_after_serial( - notification_bytes, - base_serial, - target_serial, - )?; - let mut all_present = true; - let session = target_record - .last_session_id - .as_deref() - .ok_or_else(|| format!("missing target session for {notify_uri}"))?; - let session_dir = bucket_dir.join(session); - let notification_path = - session_dir.join(format!("notification-target-{target_serial}.xml")); - write_bytes(¬ification_path, notification_bytes)?; - let target_notification = - parse_notification(notification_bytes).map_err(|e| { - format!("parse target notification failed for {notify_uri}: {e}") - })?; - let snapshot_hash_hex = hex::encode(target_notification.snapshot_hash_sha256); - if let Some(snapshot_bytes) = http_records - .get(&target_notification.snapshot_uri) - .map(|record| record.bytes.as_slice()) - { - let snapshot_path = session_dir.join(format!( - "snapshot-target-{target_serial}-{snapshot_hash_hex}.xml" - )); - write_bytes(&snapshot_path, snapshot_bytes)?; - } - let deltas_dir = session_dir.join("deltas"); - let mut delta_serials = Vec::new(); - for dref in &deltas { - if let Some(delta_bytes) = http_records - .get(&dref.uri) - .map(|record| record.bytes.as_slice()) - { - let hash = hex::encode(dref.hash_sha256); - let path = - deltas_dir.join(format!("delta-{}-{}.xml", dref.serial, hash)); - write_bytes(&path, delta_bytes)?; - delta_serials.push(dref.serial); - } else { - all_present = false; - break; - } - } - if all_present && !delta_serials.is_empty() { - let transition = DeltaTransitionJson { - kind: DeltaKindJson::Delta, - base: delta_state_from_base_lock(Some(base_lock)), - target: target_state.clone(), - delta_count: delta_serials.len(), - deltas: delta_serials.clone(), - }; - write_delta_transition(&bucket_dir, &transition)?; - DeltaRrdpEntryJson { - kind: DeltaKindJson::Delta, - base: transition.base, - target: transition.target, - delta_count: transition.delta_count, - deltas: transition.deltas, - } - } else { - needed_modules.insert(module_uri.clone()); - let transition = DeltaTransitionJson { - kind: DeltaKindJson::FallbackRsync, - base: fallback_rsync_state(), - target: fallback_rsync_state(), - delta_count: 0, - deltas: Vec::new(), - }; - write_delta_transition(&bucket_dir, &transition)?; - fallback_entry() - } - } else { - needed_modules.insert(module_uri.clone()); - let transition = DeltaTransitionJson { - kind: DeltaKindJson::FallbackRsync, - base: fallback_rsync_state(), - target: fallback_rsync_state(), - delta_count: 0, - deltas: Vec::new(), - }; - write_delta_transition(&bucket_dir, &transition)?; - fallback_entry() - } - } else { - needed_modules.insert(module_uri.clone()); - let transition = DeltaTransitionJson { - kind: DeltaKindJson::FallbackRsync, - base: fallback_rsync_state(), - target: fallback_rsync_state(), - delta_count: 0, - deltas: Vec::new(), - }; - write_delta_transition(&bucket_dir, &transition)?; - fallback_entry() - }; - delta_rrdp_locks.insert(notify_uri.to_string(), entry); - } else { - needed_modules.insert(module_uri); - } - } - - for module_uri in needed_modules { - let owned_objects; - let objects = if let Some(objects) = rsync_objects_by_module.get(&module_uri) { - objects - } else { - owned_objects = current_module_objects_from_store(store, &module_uri)?; - &owned_objects - }; - let (created_at, last_seen_at) = rsync_times_by_module - .get(&module_uri) - .cloned() - .unwrap_or_else(|| { - let now = recorded_at - .format(&Rfc3339) - .unwrap_or_else(|_| "1970-01-01T00:00:00Z".to_string()); - (now.clone(), now) - }); - let bucket_hash = sha256_hex(module_uri.as_bytes()); - let bucket_dir = capture_root - .join("rsync") - .join("modules") - .join(&bucket_hash); - materialize_rsync_module( - &capture_root, - &module_uri, - objects, - &created_at, - &last_seen_at, - )?; - let files = objects.keys().cloned().collect::>(); - write_json( - &bucket_dir.join("files.json"), - &ReplayDeltaRsyncFilesJson { - version: 1, - module: module_uri.clone(), - file_count: files.len(), - files: files.clone(), - }, - )?; - delta_rsync_locks.insert( - module_uri, - DeltaRsyncEntryJson { - file_count: files.len(), - overlay_only: true, - }, - ); - } - - let locks = ReplayDeltaLocksJson { - version: 1, - capture: capture_id.clone(), - base_capture: previous_locks.capture.clone(), - base_locks_sha256: previous_locks_sha256, - validation_time: target_validation_time - .format(&Rfc3339) - .map_err(|e| format!("format validation time failed: {e}"))?, - rrdp: delta_rrdp_locks, - rsync: delta_rsync_locks, - }; - let locks_path = step_dir.join("locks-delta.json"); - write_json(&locks_path, &locks)?; - - crate::replay::delta_archive::ReplayDeltaArchiveIndex::load(&archive_root, &locks_path) - .map_err(|e| format!("delta archive self-validate failed: {e}"))?; - - Ok(LiveDeltaCaptureSummary { - archive_root, - locks_path, - capture_id, - rrdp_repo_count: locks.rrdp.len(), - rsync_module_count: locks.rsync.len(), - }) -} - -pub fn write_live_delta_replay_bundle_inputs( - rir_dir: &Path, - rir: &str, - target_validation_time: time::OffsetDateTime, - publication_points: &[PublicationPointAudit], - store: &RocksStore, - http_records: &BTreeMap, - rsync_records: &BTreeMap, -) -> Result { - let base_archive_root = rir_dir.join("base-payload-archive"); - let base_locks_path = rir_dir.join("base-locks.json"); - let base_index = - ReplayArchiveIndex::load_allow_missing_rsync_modules(&base_archive_root, &base_locks_path) - .map_err(|e| format!("load base replay index failed: {e}"))?; - let base_locks_bytes = fs::read(&base_locks_path) - .map_err(|e| format!("read base locks failed: {}: {e}", base_locks_path.display()))?; - let base_locks_sha256 = sha256_hex(&base_locks_bytes); - - let recorded_at = time::OffsetDateTime::now_utc(); - let capture_id = format!( - "{rir}-delta-{}", - recorded_at - .format(&Rfc3339) - .unwrap_or_else(|_| "1970-01-01T00:00:00Z".to_string()) - .replace(':', "") - .replace('-', "") - .replace('+', "_") - ); - let archive_root = rir_dir.join("payload-delta-archive"); - let capture_root = archive_root.join("v1").join("captures").join(&capture_id); - write_json( - &capture_root.join("capture.json"), - &CaptureMetaJson { - version: 1, - capture_id: capture_id.clone(), - created_at: recorded_at - .format(&Rfc3339) - .map_err(|e| format!("format createdAt failed: {e}"))?, - notes: format!("recorded by ours live delta recorder for {rir}"), - }, - )?; - write_json( - &capture_root.join("base.json"), - &DeltaBaseMetaJson { - version: 1, - base_capture: base_index.locks.capture.clone(), - base_locks_sha256: base_locks_sha256.clone(), - created_at: recorded_at - .format(&Rfc3339) - .map_err(|e| format!("format createdAt failed: {e}"))?, - }, - )?; - - let mut rsync_objects_by_module: BTreeMap>> = BTreeMap::new(); - let mut rsync_times_by_module: BTreeMap = BTreeMap::new(); - for fetch in rsync_records.values() { - let module_uri = canonical_rsync_module(&fetch.requested_base_uri) - .map_err(|e| format!("canonicalize requested rsync module failed: {e}"))?; - let objects = rsync_objects_by_module - .entry(module_uri.clone()) - .or_default(); - let times = rsync_times_by_module.entry(module_uri).or_insert_with(|| { - ( - fetch.fetched_at_rfc3339_utc.clone(), - fetch.fetched_at_rfc3339_utc.clone(), - ) - }); - if fetch.fetched_at_rfc3339_utc < times.0 { - times.0 = fetch.fetched_at_rfc3339_utc.clone(); - } - if fetch.fetched_at_rfc3339_utc > times.1 { - times.1 = fetch.fetched_at_rfc3339_utc.clone(); - } - for (uri, bytes) in &fetch.objects { - objects.insert(uri.clone(), bytes.clone()); - } - } - - let mut delta_rrdp_locks = BTreeMap::new(); - let mut delta_rsync_locks = BTreeMap::new(); - let mut seen_notifications = BTreeSet::new(); - let mut needed_modules = BTreeSet::new(); - - for pp in publication_points { - let module_uri = canonical_rsync_module(&pp.rsync_base_uri).map_err(|e| { - format!( - "canonicalize rsync module failed for {}: {e}", - pp.rsync_base_uri - ) - })?; - if let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() { - if !seen_notifications.insert(notify_uri.to_string()) { - continue; - } - let base_lock = base_index.rrdp_lock(notify_uri); - let target_record = store.get_rrdp_source_record(notify_uri).map_err(|e| { - format!("read target rrdp source record failed for {notify_uri}: {e}") - })?; - - let bucket_hash = sha256_hex(notify_uri.as_bytes()); - let bucket_dir = capture_root.join("rrdp").join("repos").join(&bucket_hash); - let (created_at, last_seen_at) = target_record - .as_ref() - .map(|record| { - ( - record.first_seen_at.rfc3339_utc.clone(), - record.last_seen_at.rfc3339_utc.clone(), - ) - }) - .unwrap_or_else(|| { - let now = recorded_at - .format(&Rfc3339) - .unwrap_or_else(|_| "1970-01-01T00:00:00Z".to_string()); - (now.clone(), now) - }); - write_delta_repo_meta(&bucket_dir, notify_uri, &created_at, &last_seen_at)?; - - let fallback_entry = || DeltaRrdpEntryJson { - kind: DeltaKindJson::FallbackRsync, - base: fallback_rsync_state(), - target: fallback_rsync_state(), - delta_count: 0, - deltas: Vec::new(), - }; - - let entry = if let (Some(base_lock), Some(target_record), Some(target_state)) = ( - base_lock, - target_record.as_ref(), - target_record - .as_ref() - .and_then(target_rrdp_state_from_record), - ) { - if base_lock.transport == ReplayTransport::Rrdp - && base_lock.session.as_deref() == target_record.last_session_id.as_deref() - && target_record.last_serial == base_lock.serial - { - let transition = DeltaTransitionJson { - kind: DeltaKindJson::Unchanged, - base: delta_state_from_base_lock(Some(base_lock)), - target: target_state.clone(), - delta_count: 0, - deltas: Vec::new(), - }; - write_delta_transition(&bucket_dir, &transition)?; - DeltaRrdpEntryJson { - kind: DeltaKindJson::Unchanged, - base: transition.base, - target: transition.target, - delta_count: 0, - deltas: Vec::new(), - } - } else if base_lock.transport == ReplayTransport::Rrdp - && base_lock.session.as_deref() == target_record.last_session_id.as_deref() - && target_record - .last_serial - .zip(base_lock.serial) - .is_some_and(|(target, base)| target > base) - { - let notification_bytes = http_records - .get(notify_uri) - .map(|record| record.bytes.as_slice()) - .ok_or_else(|| { - format!("missing recorded target notification body for {notify_uri}") - })?; - let base_serial = base_lock.serial.expect("checked above"); - let target_serial = target_record.last_serial.expect("checked above"); - let deltas = notification_deltas_after_serial( - notification_bytes, - base_serial, - target_serial, - )?; - let mut all_present = true; - let session = target_record - .last_session_id - .as_deref() - .ok_or_else(|| format!("missing target session for {notify_uri}"))?; - let session_dir = bucket_dir.join(session); - let notification_path = - session_dir.join(format!("notification-target-{target_serial}.xml")); - write_bytes(¬ification_path, notification_bytes)?; - let target_notification = - parse_notification(notification_bytes).map_err(|e| { - format!("parse target notification failed for {notify_uri}: {e}") - })?; - let snapshot_hash_hex = hex::encode(target_notification.snapshot_hash_sha256); - if let Some(snapshot_bytes) = http_records - .get(&target_notification.snapshot_uri) - .map(|record| record.bytes.as_slice()) - { - let snapshot_path = session_dir.join(format!( - "snapshot-target-{target_serial}-{snapshot_hash_hex}.xml" - )); - write_bytes(&snapshot_path, snapshot_bytes)?; - } - let deltas_dir = session_dir.join("deltas"); - let mut delta_serials = Vec::new(); - for dref in &deltas { - if let Some(delta_bytes) = http_records - .get(&dref.uri) - .map(|record| record.bytes.as_slice()) - { - let hash = hex::encode(dref.hash_sha256); - let path = - deltas_dir.join(format!("delta-{}-{}.xml", dref.serial, hash)); - write_bytes(&path, delta_bytes)?; - delta_serials.push(dref.serial); - } else { - all_present = false; - break; - } - } - if all_present && !delta_serials.is_empty() { - let transition = DeltaTransitionJson { - kind: DeltaKindJson::Delta, - base: delta_state_from_base_lock(Some(base_lock)), - target: target_state.clone(), - delta_count: delta_serials.len(), - deltas: delta_serials.clone(), - }; - write_delta_transition(&bucket_dir, &transition)?; - DeltaRrdpEntryJson { - kind: DeltaKindJson::Delta, - base: transition.base, - target: transition.target, - delta_count: transition.delta_count, - deltas: transition.deltas, - } - } else { - needed_modules.insert(module_uri.clone()); - let transition = DeltaTransitionJson { - kind: DeltaKindJson::FallbackRsync, - base: fallback_rsync_state(), - target: fallback_rsync_state(), - delta_count: 0, - deltas: Vec::new(), - }; - write_delta_transition(&bucket_dir, &transition)?; - fallback_entry() - } - } else { - needed_modules.insert(module_uri.clone()); - let transition = DeltaTransitionJson { - kind: DeltaKindJson::FallbackRsync, - base: fallback_rsync_state(), - target: fallback_rsync_state(), - delta_count: 0, - deltas: Vec::new(), - }; - write_delta_transition(&bucket_dir, &transition)?; - fallback_entry() - } - } else { - needed_modules.insert(module_uri.clone()); - let transition = DeltaTransitionJson { - kind: DeltaKindJson::FallbackRsync, - base: fallback_rsync_state(), - target: fallback_rsync_state(), - delta_count: 0, - deltas: Vec::new(), - }; - write_delta_transition(&bucket_dir, &transition)?; - fallback_entry() - }; - delta_rrdp_locks.insert(notify_uri.to_string(), entry); - } else { - needed_modules.insert(module_uri); - } - } - - for module_uri in needed_modules { - let owned_objects; - let objects = if let Some(objects) = rsync_objects_by_module.get(&module_uri) { - objects - } else { - owned_objects = current_module_objects_from_store(store, &module_uri)?; - &owned_objects - }; - let (created_at, last_seen_at) = rsync_times_by_module - .get(&module_uri) - .cloned() - .unwrap_or_else(|| { - let now = recorded_at - .format(&Rfc3339) - .unwrap_or_else(|_| "1970-01-01T00:00:00Z".to_string()); - (now.clone(), now) - }); - let bucket_hash = sha256_hex(module_uri.as_bytes()); - let bucket_dir = capture_root - .join("rsync") - .join("modules") - .join(&bucket_hash); - materialize_rsync_module( - &capture_root, - &module_uri, - objects, - &created_at, - &last_seen_at, - )?; - let files = objects.keys().cloned().collect::>(); - write_json( - &bucket_dir.join("files.json"), - &ReplayDeltaRsyncFilesJson { - version: 1, - module: module_uri.clone(), - file_count: files.len(), - files: files.clone(), - }, - )?; - delta_rsync_locks.insert( - module_uri, - DeltaRsyncEntryJson { - file_count: files.len(), - overlay_only: true, - }, - ); - } - - let locks = ReplayDeltaLocksJson { - version: 1, - capture: capture_id.clone(), - base_capture: base_index.locks.capture.clone(), - base_locks_sha256: base_locks_sha256.clone(), - validation_time: target_validation_time - .format(&Rfc3339) - .map_err(|e| format!("format validation time failed: {e}"))?, - rrdp: delta_rrdp_locks, - rsync: delta_rsync_locks, - }; - let locks_path = rir_dir.join("locks-delta.json"); - write_json(&locks_path, &locks)?; - - crate::replay::delta_archive::ReplayDeltaArchiveIndex::load(&archive_root, &locks_path) - .map_err(|e| format!("delta archive self-validate failed: {e}"))?; - - Ok(LiveDeltaCaptureSummary { - archive_root, - locks_path, - capture_id, - rrdp_repo_count: locks.rrdp.len(), - rsync_module_count: locks.rsync.len(), - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::fetch::rsync::RsyncFetcher; - use crate::storage::{PackTime, RrdpSourceRecord, RrdpSourceSyncState}; - use crate::sync::rrdp::Fetcher; - - #[derive(Clone)] - struct DummyHttpFetcher { - map: BTreeMap>, - } - - impl Fetcher for DummyHttpFetcher { - fn fetch(&self, uri: &str) -> Result, String> { - self.map - .get(uri) - .cloned() - .ok_or_else(|| format!("not found: {uri}")) - } - } - - #[derive(Clone)] - struct DummyRsyncFetcher { - objects: Vec<(String, Vec)>, - } - - impl RsyncFetcher for DummyRsyncFetcher { - fn fetch_objects( - &self, - _rsync_base_uri: &str, - ) -> Result)>, RsyncFetchError> { - Ok(self.objects.clone()) - } - } - - fn minimal_notification( - notify_uri: &str, - snapshot_uri: &str, - session: &str, - serial: u64, - ) -> Vec { - format!( - r#""#, - sha256_hex(b"") - ) - .replace("notification.xml", notify_uri) - .into_bytes() - } - - #[test] - fn recording_http_fetcher_records_successful_responses() { - let fetcher = RecordingHttpFetcher::new(DummyHttpFetcher { - map: BTreeMap::from([("https://example.test/a".to_string(), b"abc".to_vec())]), - }); - let got = fetcher.fetch("https://example.test/a").expect("fetch"); - assert_eq!(got, b"abc"); - let snapshot = fetcher.snapshot_responses(); - assert_eq!(snapshot.len(), 1); - assert_eq!(snapshot["https://example.test/a"].bytes, b"abc"); - } - - #[test] - fn recording_rsync_fetcher_records_object_sets() { - let fetcher = RecordingRsyncFetcher::new(DummyRsyncFetcher { - objects: vec![( - "rsync://example.test/repo/a.roa".to_string(), - b"roa".to_vec(), - )], - }); - let got = fetcher - .fetch_objects("rsync://example.test/repo/") - .expect("fetch objects"); - assert_eq!(got.len(), 1); - let snapshot = fetcher.snapshot_fetches(); - assert_eq!(snapshot.len(), 1); - assert_eq!( - snapshot["rsync://example.test/repo/"].objects[0].0, - "rsync://example.test/repo/a.roa" - ); - } - - #[test] - fn write_live_base_replay_bundle_inputs_materializes_archive_and_locks() { - let td = tempfile::tempdir().expect("tempdir"); - let rir_dir = td.path().join("apnic"); - std::fs::create_dir_all(&rir_dir).expect("mkdir"); - let store = RocksStore::open(&td.path().join("db")).expect("open rocksdb"); - - let notify_uri = "https://rrdp.example.test/notification.xml"; - let snapshot_uri = "https://rrdp.example.test/snapshot.xml"; - let session = "11111111-1111-1111-1111-111111111111"; - let serial = 42u64; - store - .put_rrdp_source_record(&RrdpSourceRecord { - notify_uri: notify_uri.to_string(), - last_session_id: Some(session.to_string()), - last_serial: Some(serial), - first_seen_at: PackTime { - rfc3339_utc: "2026-03-30T00:00:00Z".to_string(), - }, - last_seen_at: PackTime { - rfc3339_utc: "2026-03-30T00:00:01Z".to_string(), - }, - last_sync_at: Some(PackTime { - rfc3339_utc: "2026-03-30T00:00:01Z".to_string(), - }), - sync_state: RrdpSourceSyncState::SnapshotOnly, - last_snapshot_uri: Some(snapshot_uri.to_string()), - last_snapshot_hash: Some(sha256_hex(b"")), - last_error: None, - }) - .expect("put source"); - - let publication_points = vec![ - PublicationPointAudit { - rsync_base_uri: "rsync://rsync.example.test/repo/".to_string(), - manifest_rsync_uri: "rsync://rsync.example.test/repo/manifest.mft".to_string(), - publication_point_rsync_uri: "rsync://rsync.example.test/repo/".to_string(), - rrdp_notification_uri: Some(notify_uri.to_string()), - source: "fresh".to_string(), - repo_sync_source: None, - repo_sync_phase: None, - repo_sync_duration_ms: None, - repo_sync_error: None, - repo_terminal_state: "fresh".to_string(), - this_update_rfc3339_utc: "2026-03-30T00:00:00Z".to_string(), - next_update_rfc3339_utc: "2026-03-30T01:00:00Z".to_string(), - verified_at_rfc3339_utc: "2026-03-30T00:00:02Z".to_string(), - warnings: Vec::new(), - objects: Vec::new(), - node_id: None, - parent_node_id: None, - discovered_from: None, - }, - PublicationPointAudit { - rsync_base_uri: "rsync://rsync-only.example.test/repo/".to_string(), - manifest_rsync_uri: "rsync://rsync-only.example.test/repo/manifest.mft".to_string(), - publication_point_rsync_uri: "rsync://rsync-only.example.test/repo/".to_string(), - rrdp_notification_uri: None, - source: "fresh".to_string(), - repo_sync_source: None, - repo_sync_phase: None, - repo_sync_duration_ms: None, - repo_sync_error: None, - repo_terminal_state: "fresh".to_string(), - this_update_rfc3339_utc: "2026-03-30T00:00:00Z".to_string(), - next_update_rfc3339_utc: "2026-03-30T01:00:00Z".to_string(), - verified_at_rfc3339_utc: "2026-03-30T00:00:02Z".to_string(), - warnings: Vec::new(), - objects: Vec::new(), - node_id: None, - parent_node_id: None, - discovered_from: None, - }, - ]; - - let http_records = BTreeMap::from([ - ( - notify_uri.to_string(), - RecordedHttpResponse { - uri: notify_uri.to_string(), - bytes: minimal_notification(notify_uri, snapshot_uri, session, serial), - fetched_at_rfc3339_utc: "2026-03-30T00:00:00Z".to_string(), - }, - ), - ( - snapshot_uri.to_string(), - RecordedHttpResponse { - uri: snapshot_uri.to_string(), - bytes: b"".to_vec(), - fetched_at_rfc3339_utc: "2026-03-30T00:00:01Z".to_string(), - }, - ), - ]); - let rsync_records = BTreeMap::from([( - "rsync://rsync-only.example.test/repo/".to_string(), - RecordedRsyncFetch { - requested_base_uri: "rsync://rsync-only.example.test/repo/".to_string(), - objects: vec![( - "rsync://rsync-only.example.test/repo/a.roa".to_string(), - b"roa".to_vec(), - )], - fetched_at_rfc3339_utc: "2026-03-30T00:00:02Z".to_string(), - }, - )]); - - let summary = write_live_base_replay_bundle_inputs( - &rir_dir, - "apnic", - time::OffsetDateTime::parse("2026-03-30T00:00:03Z", &Rfc3339).expect("time"), - &publication_points, - &store, - &http_records, - &rsync_records, - ) - .expect("write live bundle inputs"); - - assert!(summary.archive_root.is_dir()); - assert!(summary.locks_path.is_file()); - let locks: serde_json::Value = - serde_json::from_slice(&std::fs::read(&summary.locks_path).expect("read locks")) - .expect("parse locks"); - assert_eq!(locks["validationTime"], "2026-03-30T00:00:03Z"); - assert_eq!(locks["rrdp"][notify_uri]["transport"], "rrdp"); - assert_eq!( - locks["rsync"]["rsync://rsync-only.example.test/repo/"]["transport"], - "rsync" - ); - - let index = ReplayArchiveIndex::load_allow_missing_rsync_modules( - rir_dir.join("base-payload-archive"), - rir_dir.join("base-locks.json"), - ) - .expect("load emitted replay archive"); - assert!(index.rrdp_repo(notify_uri).is_some()); - assert!( - index - .resolve_rsync_module_for_base_uri("rsync://rsync-only.example.test/repo/sub") - .is_ok() - ); - } -} diff --git a/src/bundle/mod.rs b/src/bundle/mod.rs deleted file mode 100644 index fb3dea7..0000000 --- a/src/bundle/mod.rs +++ /dev/null @@ -1,24 +0,0 @@ -pub mod compare_view; -pub mod live_capture; -pub mod record_io; -pub mod spec; - -pub use compare_view::{ - VapCompareRow, VrpCompareRow, build_vap_compare_rows, build_vrp_compare_rows, - canonical_vrp_prefix, decode_ccr_compare_views, write_vap_csv, write_vrp_csv, -}; -pub use live_capture::{ - LiveBaseCaptureSummary, LiveDeltaCaptureSummary, RecordedHttpResponse, RecordedRsyncFetch, - RecordingHttpFetcher, RecordingRsyncFetcher, write_current_replay_state_locks, - write_live_base_replay_bundle_inputs, write_live_delta_replay_bundle_inputs, - write_live_delta_replay_step_inputs, -}; -pub use record_io::{ - build_single_rir_bundle_manifest, copy_dir_all, load_validation_time, sha256_hex, write_bytes, - write_json, write_live_bundle_rir_readme, write_live_bundle_top_readme, write_timing_json, -}; -pub use spec::{ - BaseBundleStateMetadataV2, BundleManifestEntryV2, BundleManifestV2, DeltaSequenceMetadataV2, - DeltaStepMetadataV2, RirBundleMetadataV2, -}; -pub use spec::{BundleManifest, BundleManifestEntry, RirBundleMetadata}; diff --git a/src/bundle/record_io.rs b/src/bundle/record_io.rs deleted file mode 100644 index bac9658..0000000 --- a/src/bundle/record_io.rs +++ /dev/null @@ -1,274 +0,0 @@ -use std::fs; -use std::path::Path; - -use serde::Serialize; -use sha2::Digest; -use time::format_description::well_known::Rfc3339; - -use super::{BundleManifest, BundleManifestEntry}; - -pub fn sha256_hex(bytes: &[u8]) -> String { - hex::encode(sha2::Sha256::digest(bytes)) -} - -pub fn write_json(path: &Path, value: &impl Serialize) -> Result<(), String> { - if let Some(parent) = path.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?; - } - let bytes = serde_json::to_vec_pretty(value).map_err(|e| e.to_string())?; - fs::write(path, bytes).map_err(|e| format!("write json failed: {}: {e}", path.display())) -} - -pub fn write_timing_json( - path: &Path, - mode: &str, - validation_time: &time::OffsetDateTime, - duration: std::time::Duration, -) -> Result<(), String> { - write_json( - path, - &serde_json::json!({ - "mode": mode, - "validationTime": validation_time - .format(&Rfc3339) - .map_err(|e| format!("format validation time failed: {e}"))?, - "durationSeconds": duration.as_secs_f64(), - }), - ) -} - -pub fn write_live_bundle_top_readme(path: &Path, rir: &str) -> Result<(), String> { - if let Some(parent) = path.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?; - } - fs::write( - path, - format!( - "# Ours Live Replay Bundle\n\nThis run contains one per-RIR bundle recorded online by `ours`.\n\n- RIR: `{rir}`\n- Reference result format: `CCR`\n" - ), - ) - .map_err(|e| format!("write readme failed: {}: {e}", path.display())) -} - -pub fn write_live_bundle_rir_readme( - path: &Path, - rir: &str, - base_validation_time: &str, -) -> Result<(), String> { - if let Some(parent) = path.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?; - } - fs::write( - path, - format!( - "# {rir} live replay bundle\n\n- `tal.tal` and `ta.cer` are the actual live run inputs.\n- `base-locks.json.validationTime` = `{base_validation_time}`.\n- `base.ccr` is the authoritative reference result.\n- `base-vrps.csv` and `base-vaps.csv` are compare views derived from `base.ccr`.\n" - ), - ) - .map_err(|e| format!("write rir readme failed: {}: {e}", path.display())) -} - -pub fn write_bytes(path: &Path, bytes: &[u8]) -> Result<(), String> { - if let Some(parent) = path.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?; - } - fs::write(path, bytes).map_err(|e| format!("write file failed: {}: {e}", path.display())) -} - -pub fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> { - fs::create_dir_all(dst) - .map_err(|e| format!("create directory failed: {}: {e}", dst.display()))?; - for entry in - fs::read_dir(src).map_err(|e| format!("read_dir failed: {}: {e}", src.display()))? - { - let entry = entry.map_err(|e| format!("read_dir entry failed: {}: {e}", src.display()))?; - let ty = entry - .file_type() - .map_err(|e| format!("file_type failed: {}: {e}", entry.path().display()))?; - let to = dst.join(entry.file_name()); - if ty.is_dir() { - copy_dir_all(&entry.path(), &to)?; - } else if ty.is_file() { - if let Some(parent) = to.parent() { - fs::create_dir_all(parent) - .map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?; - } - fs::copy(entry.path(), &to).map_err(|e| { - format!( - "copy failed: {} -> {}: {e}", - entry.path().display(), - to.display() - ) - })?; - } - } - Ok(()) -} - -pub fn load_validation_time(path: &Path) -> Result { - let json: serde_json::Value = serde_json::from_slice( - &fs::read(path).map_err(|e| format!("read json failed: {}: {e}", path.display()))?, - ) - .map_err(|e| format!("parse json failed: {}: {e}", path.display()))?; - let value = json - .get("validationTime") - .or_else(|| json.get("validation_time")) - .and_then(|v| v.as_str()) - .ok_or_else(|| format!("validationTime missing in {}", path.display()))?; - time::OffsetDateTime::parse(value, &Rfc3339) - .map_err(|e| format!("invalid validationTime in {}: {e}", path.display())) -} - -pub fn build_single_rir_bundle_manifest( - schema_version: &str, - bundle_producer: &str, - rir: &str, - base_validation_time: &time::OffsetDateTime, - delta_validation_time: Option<&time::OffsetDateTime>, - has_aspa: bool, -) -> Result { - Ok(BundleManifest { - schema_version: schema_version.to_string(), - bundle_producer: bundle_producer.to_string(), - recorded_at_rfc3339_utc: time::OffsetDateTime::now_utc() - .format(&Rfc3339) - .map_err(|e| format!("format recorded_at failed: {e}"))?, - rirs: vec![rir.to_string()], - per_rir_bundles: vec![BundleManifestEntry { - rir: rir.to_string(), - relative_path: rir.to_string(), - base_validation_time: base_validation_time - .format(&Rfc3339) - .map_err(|e| format!("format base validation time failed: {e}"))?, - delta_validation_time: match delta_validation_time { - Some(value) => Some( - value - .format(&Rfc3339) - .map_err(|e| format!("format delta validation time failed: {e}"))?, - ), - None => None, - }, - has_aspa, - }], - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use tempfile::tempdir; - - #[test] - fn load_validation_time_reads_validation_time_field() { - let td = tempdir().expect("tempdir"); - let path = td.path().join("locks.json"); - fs::write(&path, r#"{"validationTime":"2026-04-01T00:00:00Z"}"#).expect("write"); - let parsed = load_validation_time(&path).expect("load"); - assert_eq!( - parsed.format(&Rfc3339).expect("format"), - "2026-04-01T00:00:00Z" - ); - } - - #[test] - fn copy_dir_all_copies_nested_files() { - let td = tempdir().expect("tempdir"); - let src = td.path().join("src"); - let dst = td.path().join("dst"); - fs::create_dir_all(src.join("nested")).expect("mkdir"); - fs::write(src.join("root.txt"), b"root").expect("write root"); - fs::write(src.join("nested/child.txt"), b"child").expect("write child"); - copy_dir_all(&src, &dst).expect("copy"); - assert_eq!(fs::read(dst.join("root.txt")).expect("read root"), b"root"); - assert_eq!( - fs::read(dst.join("nested/child.txt")).expect("read child"), - b"child" - ); - } - - #[test] - fn build_single_rir_bundle_manifest_formats_times() { - let base = time::OffsetDateTime::parse("2026-04-01T00:00:00Z", &Rfc3339).expect("base"); - let delta = time::OffsetDateTime::parse("2026-04-01T00:10:00Z", &Rfc3339).expect("delta"); - let manifest = build_single_rir_bundle_manifest( - "20260330-v1", - "ours", - "apnic", - &base, - Some(&delta), - true, - ) - .expect("manifest"); - assert_eq!(manifest.schema_version, "20260330-v1"); - assert_eq!(manifest.rirs, vec!["apnic".to_string()]); - assert_eq!( - manifest.per_rir_bundles[0].base_validation_time, - "2026-04-01T00:00:00Z" - ); - assert_eq!( - manifest.per_rir_bundles[0].delta_validation_time.as_deref(), - Some("2026-04-01T00:10:00Z") - ); - } - - #[test] - fn write_json_and_write_bytes_create_parent_directories() { - let td = tempdir().expect("tempdir"); - let json_path = td.path().join("nested/meta/data.json"); - write_json(&json_path, &serde_json::json!({"ok": true})).expect("write json"); - let json: serde_json::Value = - serde_json::from_slice(&fs::read(&json_path).expect("read json")).expect("parse"); - assert_eq!(json["ok"], true); - - let bytes_path = td.path().join("nested/raw/file.bin"); - write_bytes(&bytes_path, b"payload").expect("write bytes"); - assert_eq!(fs::read(&bytes_path).expect("read bytes"), b"payload"); - } - - #[test] - fn write_timing_and_readmes_emit_expected_text() { - let td = tempdir().expect("tempdir"); - let timing_path = td.path().join("timings/base-produce.json"); - let validation_time = - time::OffsetDateTime::parse("2026-04-01T00:00:00Z", &Rfc3339).expect("time"); - write_timing_json( - &timing_path, - "base", - &validation_time, - std::time::Duration::from_secs_f64(1.25), - ) - .expect("write timing"); - let timing: serde_json::Value = - serde_json::from_slice(&fs::read(&timing_path).expect("read timing")).expect("parse"); - assert_eq!(timing["mode"], "base"); - assert_eq!(timing["validationTime"], "2026-04-01T00:00:00Z"); - assert_eq!(timing["durationSeconds"], 1.25); - - let top_readme = td.path().join("README.md"); - write_live_bundle_top_readme(&top_readme, "apnic").expect("write top readme"); - let top_text = fs::read_to_string(&top_readme).expect("read top readme"); - assert!(top_text.contains("RIR: `apnic`")); - assert!(top_text.contains("Reference result format: `CCR`")); - - let rir_readme = td.path().join("apnic/README.md"); - write_live_bundle_rir_readme(&rir_readme, "apnic", "2026-04-01T00:00:00Z") - .expect("write rir readme"); - let rir_text = fs::read_to_string(&rir_readme).expect("read rir readme"); - assert!(rir_text.contains("base-locks.json.validationTime")); - assert!(rir_text.contains("base-vrps.csv")); - assert!(rir_text.contains("base-vaps.csv")); - } - - #[test] - fn build_single_rir_bundle_manifest_supports_none_delta_time() { - let base = time::OffsetDateTime::parse("2026-04-01T00:00:00Z", &Rfc3339).expect("base"); - let manifest = - build_single_rir_bundle_manifest("20260330-v1", "ours", "afrinic", &base, None, false) - .expect("manifest"); - assert_eq!(manifest.per_rir_bundles[0].delta_validation_time, None); - assert!(!manifest.per_rir_bundles[0].has_aspa); - } -} diff --git a/src/bundle/spec.rs b/src/bundle/spec.rs deleted file mode 100644 index a389aab..0000000 --- a/src/bundle/spec.rs +++ /dev/null @@ -1,185 +0,0 @@ -use serde::Serialize; - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct BundleManifest { - #[serde(rename = "schemaVersion")] - pub schema_version: String, - #[serde(rename = "bundleProducer")] - pub bundle_producer: String, - #[serde(rename = "recordedAt")] - pub recorded_at_rfc3339_utc: String, - pub rirs: Vec, - #[serde(rename = "perRirBundles")] - pub per_rir_bundles: Vec, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct BundleManifestEntry { - pub rir: String, - pub relative_path: String, - #[serde(rename = "baseValidationTime")] - pub base_validation_time: String, - #[serde( - rename = "deltaValidationTime", - skip_serializing_if = "Option::is_none" - )] - pub delta_validation_time: Option, - #[serde(rename = "hasAspa")] - pub has_aspa: bool, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct RirBundleMetadata { - #[serde(rename = "schemaVersion")] - pub schema_version: String, - #[serde(rename = "bundleProducer")] - pub bundle_producer: String, - pub rir: String, - #[serde(rename = "baseValidationTime")] - pub base_validation_time: String, - #[serde( - rename = "deltaValidationTime", - skip_serializing_if = "Option::is_none" - )] - pub delta_validation_time: Option, - #[serde(rename = "talSha256")] - pub tal_sha256: String, - #[serde(rename = "taCertSha256")] - pub ta_cert_sha256: String, - #[serde(rename = "baseCcrSha256")] - pub base_ccr_sha256: String, - #[serde(rename = "deltaCcrSha256", skip_serializing_if = "Option::is_none")] - pub delta_ccr_sha256: Option, - #[serde(rename = "hasAspa")] - pub has_aspa: bool, - #[serde(rename = "hasRouterKey")] - pub has_router_key: bool, - #[serde(rename = "baseVrpCount")] - pub base_vrp_count: usize, - #[serde(rename = "baseVapCount")] - pub base_vap_count: usize, - #[serde(rename = "deltaVrpCount", skip_serializing_if = "Option::is_none")] - pub delta_vrp_count: Option, - #[serde(rename = "deltaVapCount", skip_serializing_if = "Option::is_none")] - pub delta_vap_count: Option, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct BundleManifestV2 { - #[serde(rename = "schemaVersion")] - pub schema_version: String, - #[serde(rename = "bundleProducer")] - pub bundle_producer: String, - #[serde(rename = "recordedAt")] - pub recorded_at_rfc3339_utc: String, - pub rirs: Vec, - #[serde(rename = "perRirBundles")] - pub per_rir_bundles: Vec, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct BundleManifestEntryV2 { - pub rir: String, - pub relative_path: String, - #[serde(rename = "baseValidationTime")] - pub base_validation_time: String, - #[serde(rename = "stepCount")] - pub step_count: usize, - #[serde( - rename = "firstDeltaValidationTime", - skip_serializing_if = "Option::is_none" - )] - pub first_delta_validation_time: Option, - #[serde( - rename = "lastDeltaValidationTime", - skip_serializing_if = "Option::is_none" - )] - pub last_delta_validation_time: Option, - #[serde(rename = "hasAspa")] - pub has_aspa: bool, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct BaseBundleStateMetadataV2 { - #[serde(rename = "validationTime")] - pub validation_time: String, - #[serde(rename = "ccrSha256")] - pub ccr_sha256: String, - #[serde(rename = "vrpCount")] - pub vrp_count: usize, - #[serde(rename = "vapCount")] - pub vap_count: usize, - #[serde(rename = "relativeArchivePath")] - pub relative_archive_path: String, - #[serde(rename = "relativeLocksPath")] - pub relative_locks_path: String, - #[serde(rename = "relativeCcrPath")] - pub relative_ccr_path: String, - #[serde(rename = "relativeVrpsPath")] - pub relative_vrps_path: String, - #[serde(rename = "relativeVapsPath")] - pub relative_vaps_path: String, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct DeltaStepMetadataV2 { - pub index: usize, - pub id: String, - #[serde(rename = "relativePath")] - pub relative_path: String, - #[serde(rename = "baseRef")] - pub base_ref: String, - #[serde(rename = "validationTime")] - pub validation_time: String, - #[serde(rename = "deltaCcrSha256")] - pub delta_ccr_sha256: String, - #[serde(rename = "vrpCount")] - pub vrp_count: usize, - #[serde(rename = "vapCount")] - pub vap_count: usize, - #[serde(rename = "relativeArchivePath")] - pub relative_archive_path: String, - #[serde(rename = "relativeTransitionLocksPath")] - pub relative_transition_locks_path: String, - #[serde(rename = "relativeTargetLocksPath")] - pub relative_target_locks_path: String, - #[serde(rename = "relativeCcrPath")] - pub relative_ccr_path: String, - #[serde(rename = "relativeVrpsPath")] - pub relative_vrps_path: String, - #[serde(rename = "relativeVapsPath")] - pub relative_vaps_path: String, - #[serde(rename = "hasAspa")] - pub has_aspa: bool, - #[serde(rename = "hasRouterKey")] - pub has_router_key: bool, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct DeltaSequenceMetadataV2 { - #[serde(rename = "configuredDeltaCount")] - pub configured_delta_count: usize, - #[serde(rename = "configuredIntervalSeconds")] - pub configured_interval_seconds: u64, - pub steps: Vec, -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct RirBundleMetadataV2 { - #[serde(rename = "schemaVersion")] - pub schema_version: String, - #[serde(rename = "bundleProducer")] - pub bundle_producer: String, - pub rir: String, - #[serde(rename = "talSha256")] - pub tal_sha256: String, - #[serde(rename = "taCertSha256")] - pub ta_cert_sha256: String, - #[serde(rename = "hasAnyAspa")] - pub has_any_aspa: bool, - #[serde(rename = "hasAnyRouterKey")] - pub has_any_router_key: bool, - pub base: BaseBundleStateMetadataV2, - #[serde(rename = "deltaSequence")] - pub delta_sequence: DeltaSequenceMetadataV2, -} diff --git a/src/bundle/compare_view.rs b/src/ccr/compare_view.rs similarity index 100% rename from src/bundle/compare_view.rs rename to src/ccr/compare_view.rs diff --git a/src/ccr/mod.rs b/src/ccr/mod.rs index e93f9b7..36f3c5a 100644 --- a/src/ccr/mod.rs +++ b/src/ccr/mod.rs @@ -2,6 +2,8 @@ pub mod accumulator; #[cfg(feature = "full")] pub mod build; +#[cfg(feature = "full")] +pub mod compare_view; pub mod decode; pub mod dump; pub mod encode; @@ -20,6 +22,11 @@ pub use build::{ build_manifest_state_from_vcirs, build_manifest_state_from_vcirs_with_breakdown, build_roa_payload_state, build_router_key_state_from_runtime, build_trust_anchor_state, }; +#[cfg(feature = "full")] +pub use compare_view::{ + VapCompareRow, VrpCompareRow, build_vap_compare_rows, build_vrp_compare_rows, + canonical_vrp_prefix, decode_ccr_compare_views, write_vap_csv, write_vrp_csv, +}; pub use decode::{CcrDecodeError, decode_content_info}; pub use dump::{CcrDumpError, dump_content_info_json, dump_content_info_json_value}; pub use encode::{CcrEncodeError, encode_content_info}; diff --git a/src/cli.rs b/src/cli.rs index e1c20a1..bf46b8d 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -10,7 +10,7 @@ use crate::audit::{ AspaOutput, AuditRepoSyncStats, AuditReportV2, AuditRunMeta, AuditWarning, TreeSummary, VrpOutput, format_roa_ip_prefix, }; -use crate::bundle::canonical_vrp_prefix; +use crate::ccr::canonical_vrp_prefix; use crate::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig}; use crate::fetch::rsync::LocalDirRsyncFetcher; use crate::fetch::rsync_system::{SystemRsyncConfig, SystemRsyncFetcher}; diff --git a/src/lib.rs b/src/lib.rs index 41986d8..6d4417a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -12,9 +12,6 @@ pub mod audit_downloads; pub mod audit_trace; #[cfg(feature = "full")] pub mod blob_store; -#[cfg(feature = "full")] -pub mod bundle; -#[cfg(feature = "full")] pub mod cli; #[cfg(feature = "full")] pub mod current_repo_index; diff --git a/tests/test_multi_tal_parallel_m2.rs b/tests/test_multi_tal_parallel_m2.rs index bbc49cf..fa050f9 100644 --- a/tests/test_multi_tal_parallel_m2.rs +++ b/tests/test_multi_tal_parallel_m2.rs @@ -1,7 +1,7 @@ use std::collections::BTreeSet; use std::path::{Path, PathBuf}; -use rpki::bundle::{VapCompareRow, VrpCompareRow, decode_ccr_compare_views}; +use rpki::ccr::{VapCompareRow, VrpCompareRow, decode_ccr_compare_views}; fn fixture(rel: &str) -> PathBuf { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(rel) diff --git a/tests/test_parallel_phase1_transport_offline_r5.rs b/tests/test_parallel_phase1_transport_offline_r5.rs index ce4f5df..46c6d6e 100644 --- a/tests/test_parallel_phase1_transport_offline_r5.rs +++ b/tests/test_parallel_phase1_transport_offline_r5.rs @@ -64,10 +64,9 @@ fn offline_default_parallel_and_configured_phase2_match_compare_views() { rpki::ccr::decode_content_info(&configured_ccr_bytes).expect("decode configured ccr"); let (default_vrps, default_vaps) = - rpki::bundle::decode_ccr_compare_views(&default_ccr, "apnic") - .expect("default compare view"); + rpki::ccr::decode_ccr_compare_views(&default_ccr, "apnic").expect("default compare view"); let (configured_vrps, configured_vaps) = - rpki::bundle::decode_ccr_compare_views(&configured_ccr, "apnic") + rpki::ccr::decode_ccr_compare_views(&configured_ccr, "apnic") .expect("configured compare view"); assert_eq!( @@ -96,8 +95,7 @@ fn offline_default_parallel_and_configured_phase2_match_compare_views() { fn offline_default_parallel_emits_online_ccr_accumulator_output() { let (report, ccr_bytes) = run_offline_case(None); let ccr = rpki::ccr::decode_content_info(&ccr_bytes).expect("decode ccr"); - let (_vrps, _vaps) = - rpki::bundle::decode_ccr_compare_views(&ccr, "apnic").expect("compare view"); + let (_vrps, _vaps) = rpki::ccr::decode_ccr_compare_views(&ccr, "apnic").expect("compare view"); assert!( report["publication_points"] .as_array()