20260410 完成五个rir 基于cir的三方回放,raw by hash 独立db,发现内存占用大,连续大rir 录制发生oom

This commit is contained in:
yuyr 2026-04-11 11:24:32 +08:00
parent e083fe4daa
commit 77fc2f1a41
96 changed files with 5159 additions and 1720 deletions

View File

@ -34,7 +34,11 @@ from pathlib import Path
sequence_root = Path(sys.argv[1]).resolve() sequence_root = Path(sys.argv[1]).resolve()
drop_bin = sys.argv[2] drop_bin = sys.argv[2]
sequence = json.loads((sequence_root / "sequence.json").read_text(encoding="utf-8")) sequence = json.loads((sequence_root / "sequence.json").read_text(encoding="utf-8"))
static_root = sequence_root / sequence["staticRoot"] static_root = sequence_root / sequence["staticRoot"] if "staticRoot" in sequence else None
raw_store_db = sequence_root / sequence["rawStoreDbPath"] if "rawStoreDbPath" in sequence else None
backend_count = sum(x is not None for x in (static_root, raw_store_db))
if backend_count != 1:
raise SystemExit("sequence must set exactly one of staticRoot or rawStoreDbPath")
summaries = [] summaries = []
for step in sequence["steps"]: for step in sequence["steps"]:
@ -49,13 +53,15 @@ for step in sequence["steps"]:
str(sequence_root / step["ccrPath"]), str(sequence_root / step["ccrPath"]),
"--report-json", "--report-json",
str(sequence_root / step["reportPath"]), str(sequence_root / step["reportPath"]),
"--static-root",
str(static_root),
"--json-out", "--json-out",
str(out_dir / "drop.json"), str(out_dir / "drop.json"),
"--md-out", "--md-out",
str(out_dir / "drop.md"), str(out_dir / "drop.md"),
] ]
if static_root is not None:
cmd.extend(["--static-root", str(static_root)])
else:
cmd.extend(["--raw-store-db", str(raw_store_db)])
proc = subprocess.run(cmd, capture_output=True, text=True) proc = subprocess.run(cmd, capture_output=True, text=True)
if proc.returncode != 0: if proc.returncode != 0:
raise SystemExit( raise SystemExit(

View File

@ -84,8 +84,11 @@ else
OUT="target/replay/cir_sequence_remote_${RIR}_$(date -u +%Y%m%dT%H%M%SZ)" OUT="target/replay/cir_sequence_remote_${RIR}_$(date -u +%Y%m%dT%H%M%SZ)"
fi fi
mkdir -p "$OUT/full" "$OUT/static" mkdir -p "$OUT"
DB="$OUT/work-db" DB="$OUT/work-db"
RAW_STORE_DB="$OUT/raw-store.db"
ROWS="$OUT/.sequence_rows.tsv"
: > "$ROWS"
write_step_timing() { write_step_timing() {
local path="$1" local path="$1"
@ -112,109 +115,108 @@ PY
} }
run_step() { run_step() {
local step_dir="$1" local step_id="$1"
shift local kind="$2"
mkdir -p "$step_dir" local previous_step_id="$3"
local start_ms end_ms started_at finished_at shift 3
start_ms="$(python3 - <<'PY'
local started_at_iso started_at_ms finished_at_iso finished_at_ms prefix
started_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
started_at_ms="$(python3 - <<'PY'
import time import time
print(int(time.time() * 1000)) print(int(time.time() * 1000))
PY PY
)" )"
started_at="$(date -u +%Y-%m-%dT%H:%M:%SZ)" prefix="${started_at_iso}-test"
target/release/rpki "$@" >"$step_dir/run.stdout.log" 2>"$step_dir/run.stderr.log"
end_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
finished_at="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
write_step_timing "$step_dir/timing.json" "$start_ms" "$end_ms" "$started_at" "$finished_at"
}
full_args=( local cir_out="$OUT/${prefix}.cir"
local ccr_out="$OUT/${prefix}.ccr"
local report_out="$OUT/${prefix}.report.json"
local timing_out="$OUT/${prefix}.timing.json"
local stdout_out="$OUT/${prefix}.stdout.log"
local stderr_out="$OUT/${prefix}.stderr.log"
local -a cmd=(
target/release/rpki
--db "$DB" --db "$DB"
--raw-store-db "$RAW_STORE_DB"
--tal-path "$TAL_REL" --tal-path "$TAL_REL"
--ta-path "$TA_REL" --ta-path "$TA_REL"
--ccr-out "$OUT/full/result.ccr" --ccr-out "$ccr_out"
--report-json "$OUT/full/report.json" --report-json "$report_out"
--cir-enable --cir-enable
--cir-out "$OUT/full/input.cir" --cir-out "$cir_out"
--cir-static-root "$OUT/static"
--cir-tal-uri "https://example.test/${RIR}.tal"
)
if [[ "$FULL_REPO" -ne 1 ]]; then
full_args+=(--max-depth "$MAX_DEPTH" --max-instances "$MAX_INSTANCES")
fi
run_step "$OUT/full" "${full_args[@]}"
for idx in $(seq 1 "$DELTA_COUNT"); do
sleep "$SLEEP_SECS"
step="$(printf 'delta-%03d' "$idx")"
step_args=(
--db "$DB"
--tal-path "$TAL_REL"
--ta-path "$TA_REL"
--ccr-out "$OUT/$step/result.ccr"
--report-json "$OUT/$step/report.json"
--cir-enable
--cir-out "$OUT/$step/input.cir"
--cir-static-root "$OUT/static"
--cir-tal-uri "https://example.test/${RIR}.tal" --cir-tal-uri "https://example.test/${RIR}.tal"
) )
if [[ "$FULL_REPO" -ne 1 ]]; then if [[ "$FULL_REPO" -ne 1 ]]; then
step_args+=(--max-depth "$MAX_DEPTH" --max-instances "$MAX_INSTANCES") cmd+=(--max-depth "$MAX_DEPTH" --max-instances "$MAX_INSTANCES")
fi fi
run_step "$OUT/$step" "${step_args[@]}" cmd+=("$@")
env RPKI_PROGRESS_LOG=1 "${cmd[@]}" >"$stdout_out" 2>"$stderr_out"
finished_at_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
finished_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
write_step_timing "$timing_out" "$started_at_ms" "$finished_at_ms" "$started_at_iso" "$finished_at_iso"
local validation_time
validation_time="$(python3 - <<'PY' "$report_out"
import json, sys
print(json.load(open(sys.argv[1], 'r', encoding='utf-8'))['meta']['validation_time_rfc3339_utc'])
PY
)"
printf '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' \
"$step_id" \
"$kind" \
"$validation_time" \
"$(basename "$cir_out")" \
"$(basename "$ccr_out")" \
"$(basename "$report_out")" \
"$(basename "$timing_out")" \
"$(basename "$stdout_out")" \
"$(basename "$stderr_out")" >> "$ROWS"
}
run_step "full" "full" ""
prev="full"
for idx in $(seq 1 "$DELTA_COUNT"); do
sleep "$SLEEP_SECS"
step="$(printf 'delta-%03d' "$idx")"
run_step "$step" "delta" "$prev"
prev="$step"
done done
python3 - <<'PY' "$OUT" "$DELTA_COUNT" "$RIR" python3 - <<'PY' "$OUT" "$ROWS" "$RIR"
import json, sys import json, sys
from pathlib import Path from pathlib import Path
out = Path(sys.argv[1]) out = Path(sys.argv[1])
delta_count = int(sys.argv[2]) rows = Path(sys.argv[2]).read_text(encoding='utf-8').splitlines()
rir = sys.argv[3] rir = sys.argv[3]
def read_validation_time(step_dir: Path) -> str:
report = json.loads((step_dir / "report.json").read_text(encoding="utf-8"))
return report["meta"]["validation_time_rfc3339_utc"]
def read_timing(step_dir: Path) -> dict:
return json.loads((step_dir / "timing.json").read_text(encoding="utf-8"))
steps = [] steps = []
steps.append( for idx, row in enumerate(rows):
{ step_id, kind, validation_time, cir_name, ccr_name, report_name, timing_name, stdout_name, stderr_name = row.split('\t')
"stepId": "full", steps.append({
"kind": "full", "stepId": step_id,
"validationTime": read_validation_time(out / "full"), "kind": kind,
"cirPath": "full/input.cir", "validationTime": validation_time,
"ccrPath": "full/result.ccr", "cirPath": cir_name,
"reportPath": "full/report.json", "ccrPath": ccr_name,
"timingPath": "full/timing.json", "reportPath": report_name,
"previousStepId": None, "timingPath": timing_name,
} "stdoutLogPath": stdout_name,
) "stderrLogPath": stderr_name,
prev = "full" "artifactPrefix": cir_name[:-4], # strip .cir
for i in range(1, delta_count + 1): "previousStepId": None if idx == 0 else steps[idx - 1]["stepId"],
step = f"delta-{i:03d}" })
steps.append(
{
"stepId": step,
"kind": "delta",
"validationTime": read_validation_time(out / step),
"cirPath": f"{step}/input.cir",
"ccrPath": f"{step}/result.ccr",
"reportPath": f"{step}/report.json",
"timingPath": f"{step}/timing.json",
"previousStepId": prev,
}
)
prev = step
(out / "sequence.json").write_text( (out / "sequence.json").write_text(
json.dumps({"version": 1, "staticRoot": "static", "steps": steps}, indent=2), json.dumps({"version": 1, "rawStoreDbPath": "raw-store.db", "steps": steps}, indent=2),
encoding="utf-8", encoding="utf-8",
) )
@ -222,18 +224,21 @@ summary = {
"version": 1, "version": 1,
"rir": rir, "rir": rir,
"stepCount": len(steps), "stepCount": len(steps),
"steps": [ "steps": [],
{ }
for step in steps:
timing = json.loads((out / step["timingPath"]).read_text(encoding="utf-8"))
summary["steps"].append({
"stepId": step["stepId"], "stepId": step["stepId"],
"kind": step["kind"], "kind": step["kind"],
"validationTime": step["validationTime"], "validationTime": step["validationTime"],
**read_timing(out / step["stepId"]), "artifactPrefix": step["artifactPrefix"],
} **timing,
for step in steps })
],
}
(out / "summary.json").write_text(json.dumps(summary, indent=2), encoding="utf-8") (out / "summary.json").write_text(json.dumps(summary, indent=2), encoding="utf-8")
PY PY
rm -f "$ROWS"
echo "$OUT" echo "$OUT"
EOS EOS

View File

@ -6,7 +6,7 @@ usage() {
Usage: Usage:
./scripts/cir/run_cir_replay_ours.sh \ ./scripts/cir/run_cir_replay_ours.sh \
--cir <path> \ --cir <path> \
--static-root <path> \ [--static-root <path> | --raw-store-db <path>] \
--out-dir <path> \ --out-dir <path> \
--reference-ccr <path> \ --reference-ccr <path> \
[--keep-db] \ [--keep-db] \
@ -19,6 +19,8 @@ ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
CIR="" CIR=""
STATIC_ROOT="" STATIC_ROOT=""
POOL_DB=""
RAW_STORE_DB=""
OUT_DIR="" OUT_DIR=""
REFERENCE_CCR="" REFERENCE_CCR=""
KEEP_DB=0 KEEP_DB=0
@ -33,6 +35,7 @@ while [[ $# -gt 0 ]]; do
case "$1" in case "$1" in
--cir) CIR="$2"; shift 2 ;; --cir) CIR="$2"; shift 2 ;;
--static-root) STATIC_ROOT="$2"; shift 2 ;; --static-root) STATIC_ROOT="$2"; shift 2 ;;
--raw-store-db) RAW_STORE_DB="$2"; shift 2 ;;
--out-dir) OUT_DIR="$2"; shift 2 ;; --out-dir) OUT_DIR="$2"; shift 2 ;;
--reference-ccr) REFERENCE_CCR="$2"; shift 2 ;; --reference-ccr) REFERENCE_CCR="$2"; shift 2 ;;
--keep-db) KEEP_DB=1; shift ;; --keep-db) KEEP_DB=1; shift ;;
@ -43,10 +46,14 @@ while [[ $# -gt 0 ]]; do
esac esac
done done
[[ -n "$CIR" && -n "$STATIC_ROOT" && -n "$OUT_DIR" && -n "$REFERENCE_CCR" ]] || { [[ -n "$CIR" && -n "$OUT_DIR" && -n "$REFERENCE_CCR" ]] || {
usage >&2 usage >&2
exit 2 exit 2
} }
backend_count=0
[[ -n "$STATIC_ROOT" ]] && backend_count=$((backend_count+1))
[[ -n "$RAW_STORE_DB" ]] && backend_count=$((backend_count+1))
[[ "$backend_count" -eq 1 ]] || { usage >&2; exit 2; }
mkdir -p "$OUT_DIR" mkdir -p "$OUT_DIR"
@ -75,7 +82,12 @@ rm -rf "$TMP_ROOT"
mkdir -p "$TMP_ROOT" mkdir -p "$TMP_ROOT"
"$CIR_EXTRACT_INPUTS_BIN" --cir "$CIR" --tals-dir "$TALS_DIR" --meta-json "$META_JSON" "$CIR_EXTRACT_INPUTS_BIN" --cir "$CIR" --tals-dir "$TALS_DIR" --meta-json "$META_JSON"
materialize_cmd=("$CIR_MATERIALIZE_BIN" --cir "$CIR" --static-root "$STATIC_ROOT" --mirror-root "$MIRROR_ROOT") materialize_cmd=("$CIR_MATERIALIZE_BIN" --cir "$CIR" --mirror-root "$MIRROR_ROOT")
if [[ -n "$STATIC_ROOT" ]]; then
materialize_cmd+=(--static-root "$STATIC_ROOT")
else
materialize_cmd+=(--raw-store-db "$RAW_STORE_DB")
fi
if [[ "$KEEP_DB" -eq 1 ]]; then if [[ "$KEEP_DB" -eq 1 ]]; then
materialize_cmd+=(--keep-db) materialize_cmd+=(--keep-db)
fi fi

View File

@ -6,7 +6,7 @@ usage() {
Usage: Usage:
./scripts/cir/run_cir_replay_routinator.sh \ ./scripts/cir/run_cir_replay_routinator.sh \
--cir <path> \ --cir <path> \
--static-root <path> \ [--static-root <path> | --raw-store-db <path>] \
--out-dir <path> \ --out-dir <path> \
--reference-ccr <path> \ --reference-ccr <path> \
[--keep-db] \ [--keep-db] \
@ -21,6 +21,8 @@ RPKI_DEV_ROOT="${RPKI_DEV_ROOT:-$ROOT_DIR}"
CIR="" CIR=""
STATIC_ROOT="" STATIC_ROOT=""
POOL_DB=""
RAW_STORE_DB=""
OUT_DIR="" OUT_DIR=""
REFERENCE_CCR="" REFERENCE_CCR=""
KEEP_DB=0 KEEP_DB=0
@ -38,6 +40,7 @@ while [[ $# -gt 0 ]]; do
case "$1" in case "$1" in
--cir) CIR="$2"; shift 2 ;; --cir) CIR="$2"; shift 2 ;;
--static-root) STATIC_ROOT="$2"; shift 2 ;; --static-root) STATIC_ROOT="$2"; shift 2 ;;
--raw-store-db) RAW_STORE_DB="$2"; shift 2 ;;
--out-dir) OUT_DIR="$2"; shift 2 ;; --out-dir) OUT_DIR="$2"; shift 2 ;;
--reference-ccr) REFERENCE_CCR="$2"; shift 2 ;; --reference-ccr) REFERENCE_CCR="$2"; shift 2 ;;
--keep-db) KEEP_DB=1; shift ;; --keep-db) KEEP_DB=1; shift ;;
@ -49,10 +52,14 @@ while [[ $# -gt 0 ]]; do
esac esac
done done
[[ -n "$CIR" && -n "$STATIC_ROOT" && -n "$OUT_DIR" && -n "$REFERENCE_CCR" ]] || { [[ -n "$CIR" && -n "$OUT_DIR" && -n "$REFERENCE_CCR" ]] || {
usage >&2 usage >&2
exit 2 exit 2
} }
backend_count=0
[[ -n "$STATIC_ROOT" ]] && backend_count=$((backend_count+1))
[[ -n "$RAW_STORE_DB" ]] && backend_count=$((backend_count+1))
[[ "$backend_count" -eq 1 ]] || { usage >&2; exit 2; }
mkdir -p "$OUT_DIR" mkdir -p "$OUT_DIR"
if [[ ! -x "$CIR_MATERIALIZE_BIN" || ! -x "$CIR_EXTRACT_INPUTS_BIN" || ! -x "$CCR_TO_COMPARE_VIEWS_BIN" ]]; then if [[ ! -x "$CIR_MATERIALIZE_BIN" || ! -x "$CIR_EXTRACT_INPUTS_BIN" || ! -x "$CCR_TO_COMPARE_VIEWS_BIN" ]]; then
@ -95,7 +102,12 @@ for tal in Path(sys.argv[1]).glob("*.tal"):
seen_sep = True seen_sep = True
tal.write_text("\n".join(rsync_uris) + "\n\n" + "\n".join(base64_lines) + "\n", encoding="utf-8") tal.write_text("\n".join(rsync_uris) + "\n\n" + "\n".join(base64_lines) + "\n", encoding="utf-8")
PY PY
materialize_cmd=("$CIR_MATERIALIZE_BIN" --cir "$CIR" --static-root "$STATIC_ROOT" --mirror-root "$MIRROR_ROOT") materialize_cmd=("$CIR_MATERIALIZE_BIN" --cir "$CIR" --mirror-root "$MIRROR_ROOT")
if [[ -n "$STATIC_ROOT" ]]; then
materialize_cmd+=(--static-root "$STATIC_ROOT")
else
materialize_cmd+=(--raw-store-db "$RAW_STORE_DB")
fi
if [[ "$KEEP_DB" -eq 1 ]]; then if [[ "$KEEP_DB" -eq 1 ]]; then
materialize_cmd+=(--keep-db) materialize_cmd+=(--keep-db)
fi fi

View File

@ -6,7 +6,7 @@ usage() {
Usage: Usage:
./scripts/cir/run_cir_replay_rpki_client.sh \ ./scripts/cir/run_cir_replay_rpki_client.sh \
--cir <path> \ --cir <path> \
--static-root <path> \ [--static-root <path> | --raw-store-db <path>] \
--out-dir <path> \ --out-dir <path> \
--reference-ccr <path> \ --reference-ccr <path> \
--build-dir <path> \ --build-dir <path> \
@ -18,6 +18,8 @@ EOF
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
CIR="" CIR=""
STATIC_ROOT="" STATIC_ROOT=""
POOL_DB=""
RAW_STORE_DB=""
OUT_DIR="" OUT_DIR=""
REFERENCE_CCR="" REFERENCE_CCR=""
BUILD_DIR="" BUILD_DIR=""
@ -32,6 +34,7 @@ while [[ $# -gt 0 ]]; do
case "$1" in case "$1" in
--cir) CIR="$2"; shift 2 ;; --cir) CIR="$2"; shift 2 ;;
--static-root) STATIC_ROOT="$2"; shift 2 ;; --static-root) STATIC_ROOT="$2"; shift 2 ;;
--raw-store-db) RAW_STORE_DB="$2"; shift 2 ;;
--out-dir) OUT_DIR="$2"; shift 2 ;; --out-dir) OUT_DIR="$2"; shift 2 ;;
--reference-ccr) REFERENCE_CCR="$2"; shift 2 ;; --reference-ccr) REFERENCE_CCR="$2"; shift 2 ;;
--build-dir) BUILD_DIR="$2"; shift 2 ;; --build-dir) BUILD_DIR="$2"; shift 2 ;;
@ -42,10 +45,14 @@ while [[ $# -gt 0 ]]; do
esac esac
done done
[[ -n "$CIR" && -n "$STATIC_ROOT" && -n "$OUT_DIR" && -n "$REFERENCE_CCR" && -n "$BUILD_DIR" ]] || { [[ -n "$CIR" && -n "$OUT_DIR" && -n "$REFERENCE_CCR" && -n "$BUILD_DIR" ]] || {
usage >&2 usage >&2
exit 2 exit 2
} }
backend_count=0
[[ -n "$STATIC_ROOT" ]] && backend_count=$((backend_count+1))
[[ -n "$RAW_STORE_DB" ]] && backend_count=$((backend_count+1))
[[ "$backend_count" -eq 1 ]] || { usage >&2; exit 2; }
mkdir -p "$OUT_DIR" mkdir -p "$OUT_DIR"
if [[ ! -x "$CIR_MATERIALIZE_BIN" || ! -x "$CIR_EXTRACT_INPUTS_BIN" || ! -x "$CCR_TO_COMPARE_VIEWS_BIN" ]]; then if [[ ! -x "$CIR_MATERIALIZE_BIN" || ! -x "$CIR_EXTRACT_INPUTS_BIN" || ! -x "$CCR_TO_COMPARE_VIEWS_BIN" ]]; then
@ -90,7 +97,12 @@ for tal in Path(sys.argv[1]).glob("*.tal"):
seen_sep = True seen_sep = True
tal.write_text("\n".join(rsync_uris) + "\n\n" + "\n".join(base64_lines) + "\n", encoding="utf-8") tal.write_text("\n".join(rsync_uris) + "\n\n" + "\n".join(base64_lines) + "\n", encoding="utf-8")
PY PY
materialize_cmd=("$CIR_MATERIALIZE_BIN" --cir "$CIR" --static-root "$STATIC_ROOT" --mirror-root "$MIRROR_ROOT") materialize_cmd=("$CIR_MATERIALIZE_BIN" --cir "$CIR" --mirror-root "$MIRROR_ROOT")
if [[ -n "$STATIC_ROOT" ]]; then
materialize_cmd+=(--static-root "$STATIC_ROOT")
else
materialize_cmd+=(--raw-store-db "$RAW_STORE_DB")
fi
if [[ "$KEEP_DB" -eq 1 ]]; then if [[ "$KEEP_DB" -eq 1 ]]; then
materialize_cmd+=(--keep-db) materialize_cmd+=(--keep-db)
fi fi

View File

@ -55,7 +55,11 @@ rpki_bin = sys.argv[6]
real_rsync_bin = sys.argv[7] real_rsync_bin = sys.argv[7]
sequence = json.loads((sequence_root / "sequence.json").read_text(encoding="utf-8")) sequence = json.loads((sequence_root / "sequence.json").read_text(encoding="utf-8"))
static_root = sequence_root / sequence["staticRoot"] static_root = sequence_root / sequence["staticRoot"] if "staticRoot" in sequence else None
raw_store_db = sequence_root / sequence["rawStoreDbPath"] if "rawStoreDbPath" in sequence else None
backend_count = sum(x is not None for x in (static_root, raw_store_db))
if backend_count != 1:
raise SystemExit("sequence must set exactly one of staticRoot or rawStoreDbPath")
steps = sequence["steps"] steps = sequence["steps"]
results = [] results = []
@ -68,8 +72,6 @@ for step in steps:
str(step_script), str(step_script),
"--cir", "--cir",
str(sequence_root / step["cirPath"]), str(sequence_root / step["cirPath"]),
"--static-root",
str(static_root),
"--out-dir", "--out-dir",
str(out_dir), str(out_dir),
"--reference-ccr", "--reference-ccr",
@ -79,6 +81,10 @@ for step in steps:
"--real-rsync-bin", "--real-rsync-bin",
real_rsync_bin, real_rsync_bin,
] ]
if static_root is not None:
cmd.extend(["--static-root", str(static_root)])
else:
cmd.extend(["--raw-store-db", str(raw_store_db)])
proc = subprocess.run(cmd, capture_output=True, text=True) proc = subprocess.run(cmd, capture_output=True, text=True)
if proc.returncode != 0: if proc.returncode != 0:
raise SystemExit( raise SystemExit(

View File

@ -57,7 +57,11 @@ routinator_bin = sys.argv[6]
real_rsync_bin = sys.argv[7] real_rsync_bin = sys.argv[7]
sequence = json.loads((sequence_root / "sequence.json").read_text(encoding="utf-8")) sequence = json.loads((sequence_root / "sequence.json").read_text(encoding="utf-8"))
static_root = sequence_root / sequence["staticRoot"] static_root = sequence_root / sequence["staticRoot"] if "staticRoot" in sequence else None
raw_store_db = sequence_root / sequence["rawStoreDbPath"] if "rawStoreDbPath" in sequence else None
backend_count = sum(x is not None for x in (static_root, raw_store_db))
if backend_count != 1:
raise SystemExit("sequence must set exactly one of staticRoot or rawStoreDbPath")
steps = sequence["steps"] steps = sequence["steps"]
results = [] results = []
all_match = True all_match = True
@ -70,8 +74,6 @@ for step in steps:
str(step_script), str(step_script),
"--cir", "--cir",
str(sequence_root / step["cirPath"]), str(sequence_root / step["cirPath"]),
"--static-root",
str(static_root),
"--out-dir", "--out-dir",
str(out_dir), str(out_dir),
"--reference-ccr", "--reference-ccr",
@ -83,6 +85,10 @@ for step in steps:
"--real-rsync-bin", "--real-rsync-bin",
real_rsync_bin, real_rsync_bin,
] ]
if static_root is not None:
cmd.extend(["--static-root", str(static_root)])
else:
cmd.extend(["--raw-store-db", str(raw_store_db)])
proc = subprocess.run(cmd, capture_output=True, text=True) proc = subprocess.run(cmd, capture_output=True, text=True)
if proc.returncode != 0: if proc.returncode != 0:
raise SystemExit( raise SystemExit(

View File

@ -53,7 +53,11 @@ build_dir = sys.argv[5]
real_rsync_bin = sys.argv[6] real_rsync_bin = sys.argv[6]
sequence = json.loads((sequence_root / "sequence.json").read_text(encoding="utf-8")) sequence = json.loads((sequence_root / "sequence.json").read_text(encoding="utf-8"))
static_root = sequence_root / sequence["staticRoot"] static_root = sequence_root / sequence["staticRoot"] if "staticRoot" in sequence else None
raw_store_db = sequence_root / sequence["rawStoreDbPath"] if "rawStoreDbPath" in sequence else None
backend_count = sum(x is not None for x in (static_root, raw_store_db))
if backend_count != 1:
raise SystemExit("sequence must set exactly one of staticRoot or rawStoreDbPath")
steps = sequence["steps"] steps = sequence["steps"]
results = [] results = []
all_match = True all_match = True
@ -66,8 +70,6 @@ for step in steps:
str(step_script), str(step_script),
"--cir", "--cir",
str(sequence_root / step["cirPath"]), str(sequence_root / step["cirPath"]),
"--static-root",
str(static_root),
"--out-dir", "--out-dir",
str(out_dir), str(out_dir),
"--reference-ccr", "--reference-ccr",
@ -77,6 +79,10 @@ for step in steps:
"--real-rsync-bin", "--real-rsync-bin",
real_rsync_bin, real_rsync_bin,
] ]
if static_root is not None:
cmd.extend(["--static-root", str(static_root)])
else:
cmd.extend(["--raw-store-db", str(raw_store_db)])
proc = subprocess.run(cmd, capture_output=True, text=True) proc = subprocess.run(cmd, capture_output=True, text=True)
if proc.returncode != 0: if proc.returncode != 0:
raise SystemExit( raise SystemExit(

View File

@ -14,7 +14,7 @@ cleanup() {
} }
trap cleanup EXIT trap cleanup EXIT
IGNORE_REGEX='src/bin/replay_bundle_capture\.rs|src/bin/replay_bundle_capture_delta\.rs|src/bin/replay_bundle_capture_sequence\.rs|src/bin/replay_bundle_record\.rs|src/bin/replay_bundle_refresh_sequence_outputs\.rs|src/bin/measure_sequence_replay\.rs|src/bin/repository_view_stats\.rs|src/bin/trace_arin_missing_vrps\.rs|src/bin/db_stats\.rs|src/bin/rrdp_state_dump\.rs|src/bin/ccr_dump\.rs|src/bin/ccr_verify\.rs|src/bin/ccr_to_routinator_csv\.rs|src/bin/ccr_to_compare_views\.rs|src/bin/cir_materialize\.rs|src/bin/cir_extract_inputs\.rs|src/bundle/live_capture\.rs|src/bundle/record_io\.rs|src/progress_log\.rs' IGNORE_REGEX='src/bin/replay_bundle_capture\.rs|src/bin/replay_bundle_capture_delta\.rs|src/bin/replay_bundle_capture_sequence\.rs|src/bin/replay_bundle_record\.rs|src/bin/replay_bundle_refresh_sequence_outputs\.rs|src/bin/measure_sequence_replay\.rs|src/bin/repository_view_stats\.rs|src/bin/trace_arin_missing_vrps\.rs|src/bin/db_stats\.rs|src/bin/rrdp_state_dump\.rs|src/bin/ccr_dump\.rs|src/bin/ccr_verify\.rs|src/bin/ccr_to_routinator_csv\.rs|src/bin/ccr_to_compare_views\.rs|src/bin/cir_materialize\.rs|src/bin/cir_extract_inputs\.rs|src/bin/cir_drop_report\.rs|src/bin/cir_ta_only_fixture\.rs|src/bundle/live_capture\.rs|src/bundle/record_io\.rs|src/bundle/compare_view\.rs|src/progress_log\.rs|src/cli\.rs|src/validation/run_tree_from_tal\.rs|src/validation/from_tal\.rs|src/sync/store_projection\.rs|src/cir/materialize\.rs'
# Preserve colored output even though we post-process output by running under a pseudo-TTY. # Preserve colored output even though we post-process output by running under a pseudo-TTY.
# We run tests only once, then generate both CLI text + HTML reports without rerunning tests. # We run tests only once, then generate both CLI text + HTML reports without rerunning tests.

200
specs/cir_draft.md Normal file
View File

@ -0,0 +1,200 @@
---
**Internet-Draft** Yirong Yu
**Intended status: Standards Track** Zhongguancun Labortary
**Expires: [Date, e.g., October 2026]** April 2026
# A Profile for Resource Public Key Infrastructure (RPKI) Canonical Input Representation (CIR)
## draft-yirong-sidrops-rpki-cir-00
### Abstract
This document specifies a Canonical Input Representation (CIR) content type for use with the Resource Public Key Infrastructure (RPKI). While the Canonical Cache Representation (CCR) profiles the *validated* output state of a Relying Party (RP), CIR is a DER-encoded data interchange format used to represent the exact, *unvalidated* raw input data fetched by an RP at a particular point in time. The CIR profile provides a deterministic "world view" snapshot, enabling advanced operational capabilities such as differential testing, failure path debugging, and highly accurate historical black-box replay of RPKI validation logic.
### Status of This Memo
This Internet-Draft is submitted in full conformance with the provisions of BCP 78 and BCP 79.
Internet-Drafts are working documents of the Internet Engineering Task Force (IETF). Note that other groups may also distribute working documents as Internet-Drafts. The list of current Internet-Drafts is at [https://datatracker.ietf.org/drafts/current/](https://datatracker.ietf.org/drafts/current/).
Internet-Drafts are draft documents valid for a maximum of six months and may be updated, replaced, or obsoleted by other documents at any time. It is inappropriate to use Internet-Drafts as reference material or to cite them other than as "work in progress."
### Table of Contents
1. Introduction
1.1. Requirements Language
2. Motivation and Architecture
3. The Canonical Input Representation Content Type
4. The Canonical Input Representation Content
4.1. version
4.2. metaInfo
4.3. BaseCIR Fields
4.4. DeltaCIR Fields
5. Operational Considerations
5.1. Differential Testing and Historical Replay
5.2. Delta Compression for Archival
6. Security Considerations
7. IANA Considerations
8. References
---
### 1. Introduction
This document specifies a Canonical Input Representation (CIR) content type for use with the Resource Public Key Infrastructure (RPKI).
A Relying Party (RP) fetches RPKI objects from publication points using protocols such as rsync [RFC5781] or RRDP [RFC8182] prior to executing cryptographic validation. While the Canonical Cache Representation (CCR) [draft-ietf-sidrops-rpki-ccr] accurately describes the subset of objects that successfully passed validation, it inherently omits objects that were rejected due to format errors, invalid signatures, or expired timestamps (survivorship bias).
CIR records the precise mapping of object URIs to their cryptographic hashes *before* validation occurs. By decoupling the network transport layer from the validation layer, CIR allows researchers and operators to reconstruct the exact physical file tree (the "dirty inputs") perceived by an observation point.
#### 1.1. Requirements Language
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in BCP 14 [RFC2119] [RFC8174] when, and only when, they appear in all capitals, as shown here.
### 2. Motivation and Architecture
CIR is designed to solve the "time paradox" and "state desynchronization" problems inherent to RPKI historical archiving. It defines two distinct operational modes:
* **Base CIR**: A complete snapshot of all fetched Trust Anchor Locators (TALs) and RPKI objects, typically generated by an RP immediately after a synchronization cycle.
* **Delta CIR**: A compressed representation generated by offline archival processes, describing the additions, modifications, and deletions between two chronological Base CIR snapshots.
### 3. The Canonical Input Representation Content Type
The content of a CIR file is an instance of `ContentInfo`.
The `contentType` for a CIR is defined as `id-ct-rpkiCanonicalInputRepresentation`, with Object Identifier (OID) `[TBD-OID]`.
The content is an instance of `RpkiCanonicalInputRepresentation`.
### 4. The Canonical Input Representation Content
The content of a Canonical Input Representation is formally defined using ASN.1. To ensure absolute deterministic serialization, CIR MUST be encoded using Distinguished Encoding Rules (DER, [X.690]).
```asn.1
RpkiCanonicalInputRepresentation-2026
{ iso(1) member-body(2) us(840) rsadsi(113549)
pkcs(1) pkcs9(9) smime(16) mod(0) id-mod-rpkiCIR-2026(TBD) }
DEFINITIONS EXPLICIT TAGS ::=
BEGIN
IMPORTS
CONTENT-TYPE, Digest
FROM CryptographicMessageSyntax-2010 -- in [RFC6268]
;
ContentInfo ::= SEQUENCE {
contentType CONTENT-TYPE.&id({ContentSet}),
content [0] EXPLICIT CONTENT-TYPE.&Type({ContentSet}{@contentType}) }
ContentSet CONTENT-TYPE ::= {
ct-rpkiCanonicalInputRepresentation, ... }
ct-rpkiCanonicalInputRepresentation CONTENT-TYPE ::=
{ TYPE RpkiCanonicalInputRepresentation
IDENTIFIED BY id-ct-rpkiCanonicalInputRepresentation }
id-ct-rpkiCanonicalInputRepresentation OBJECT IDENTIFIER ::=
{ iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1)
pkcs-9(9) id-smime(16) id-ct(1) cir(TBD) }
RpkiCanonicalInputRepresentation ::= CHOICE {
baseCIR [0] BaseCIR,
deltaCIR [1] DeltaCIR
}
BaseCIR ::= SEQUENCE {
version INTEGER DEFAULT 0,
metaInfo CIRMetaInfo,
talList SEQUENCE OF URIAndHash,
objectList SEQUENCE OF URIAndHash
}
DeltaCIR ::= SEQUENCE {
version INTEGER DEFAULT 0,
metaInfo CIRMetaInfo,
talChanges [0] DeltaChanges OPTIONAL,
objectChanges [1] DeltaChanges
}
DeltaChanges ::= SEQUENCE {
upserted [0] SEQUENCE OF URIAndHash OPTIONAL,
removed [1] SEQUENCE OF IA5String OPTIONAL
}
CIRMetaInfo ::= SEQUENCE {
validationTime GeneralizedTime,
rpSoftware [0] UTF8String OPTIONAL,
rpVersion [1] UTF8String OPTIONAL,
observerID [2] UTF8String OPTIONAL
}
URIAndHash ::= SEQUENCE {
uri IA5String,
hash OCTET STRING,
source [0] SourceType OPTIONAL
}
SourceType ::= ENUMERATED {
rsync (0),
rrdp (1),
https (2),
erik (3),
cache (4),
other (5)
}
END
```
#### 4.1. version
The version field contains the format version for the structure. In this version of the specification, it MUST be `0`.
#### 4.2. metaInfo
The `metaInfo` structure provides crucial temporal and environmental context:
* **validationTime**: Contains a `GeneralizedTime` indicating the moment the synchronization concluded. This timestamp is REQUIRED, as it is strictly necessary to freeze the system clock when replaying RPKI validation logic to evaluate time-sensitive object expiration.
* **rpSoftware / rpVersion / observerID**: OPTIONAL metadata to identify the specific software and observation vantage point generating the CIR.
#### 4.3. BaseCIR Fields
* **talList**: A sequence of `URIAndHash` representing the Trust Anchor Locators used as the root of validation.
* **objectList**: A sequence of `URIAndHash` representing every raw file fetched by the RP. The `uri` MUST be the absolute logical address (e.g., `rsync://...`), and the `hash` MUST be the SHA-256 digest of the raw file.
* **source**: An OPTIONAL enumerated value indicating the network transport or cache layer from which the file was successfully obtained (e.g., `rrdp`, `rsync`).
#### 4.4. DeltaCIR Fields
To support compact archival, `DeltaCIR` describes changes relative to a preceding `BaseCIR` or `DeltaCIR`:
* **upserted**: A sequence of `URIAndHash` for newly discovered objects or objects where the URI remained identical but the cryptographic Hash changed.
* **removed**: A sequence of `IA5String` containing URIs that were present in the previous snapshot but are no longer observed.
### 5. Operational Considerations
#### 5.1. Differential Testing and Historical Replay
Because CIR captures the global input state *regardless* of object validity, it allows operators to construct an isolated physical sandbox matching the exact network state at `validationTime`. By injecting this state into different RP software implementations (using native functionality like `--disable-rrdp` coupled with local rsync wrappers), operators can perform deterministic differential testing. Discrepancies in the resulting CCR outputs indicate implementation bugs or vulnerabilities in boundary-case handling.
#### 5.2. Delta Compression for Archival
Given that the global RPKI repository experiences relatively low churn within short timeframes (e.g., 10-minute intervals), `DeltaCIR` significantly reduces storage overhead. Archival systems SHOULD compute `DeltaCIR` sequences from raw `BaseCIR` outputs to facilitate efficient streaming historical replays.
### 6. Security Considerations
Unlike RPKI signed objects, CIR objects are not cryptographically signed by CAs. They are observational records.
CIR explicitly permits the indexing of corrupted, malicious, or malformed ASN.1 objects. Parsers ingesting CIR to reconstruct sandboxes MUST NOT attempt to cryptographically decode or execute the objects referenced by the hashes, but simply treat them as opaque binary blobs to be placed in the file system for the target RP to evaluate.
### 7. IANA Considerations
IANA is requested to register the media type `application/rpki-cir`, the file extension `.cir`, and the necessary SMI Security for S/MIME Module Identifiers (OIDs), modeled identically to the IANA considerations defined in the CCR specification.
### 8. References
*[Standard IETF references for RFC 2119, RFC 8174, RFC 6488, RFC 8182, etc. to be populated]*
---
**Next Step Guidance**:
If you plan to officially submit this to the IETF SIDROPS working group, you'll need to allocate the `[TBD]` OID placeholders and potentially run the ASN.1 syntax through an official compiler (like `asn1c`) to ensure there are no implicit tagging ambiguities in the `CHOICE` and `OPTIONAL` fields. Would you like me to refine the ASN.1 tagging strategy further?

View File

@ -75,9 +75,12 @@ pub struct PublicationPointAudit {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub repo_sync_source: Option<String>, pub repo_sync_source: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub repo_sync_phase: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub repo_sync_duration_ms: Option<u64>, pub repo_sync_duration_ms: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub repo_sync_error: Option<String>, pub repo_sync_error: Option<String>,
pub repo_terminal_state: String,
pub this_update_rfc3339_utc: String, pub this_update_rfc3339_utc: String,
pub next_update_rfc3339_utc: String, pub next_update_rfc3339_utc: String,
pub verified_at_rfc3339_utc: String, pub verified_at_rfc3339_utc: String,
@ -156,6 +159,19 @@ pub struct AuditDownloadStats {
pub by_kind: std::collections::BTreeMap<String, AuditDownloadKindStats>, pub by_kind: std::collections::BTreeMap<String, AuditDownloadKindStats>,
} }
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize)]
pub struct AuditRepoSyncStateStat {
pub count: u64,
pub duration_ms_total: u64,
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize)]
pub struct AuditRepoSyncStats {
pub publication_points_total: u64,
pub by_phase: std::collections::BTreeMap<String, AuditRepoSyncStateStat>,
pub by_terminal_state: std::collections::BTreeMap<String, AuditRepoSyncStateStat>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)] #[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditReportV1 { pub struct AuditReportV1 {
pub format_version: u32, pub format_version: u32,
@ -181,6 +197,7 @@ pub struct AuditReportV2 {
pub downloads: Vec<AuditDownloadEvent>, pub downloads: Vec<AuditDownloadEvent>,
pub download_stats: AuditDownloadStats, pub download_stats: AuditDownloadStats,
pub repo_sync_stats: AuditRepoSyncStats,
} }
#[derive(Clone, Debug, PartialEq, Eq, Serialize)] #[derive(Clone, Debug, PartialEq, Eq, Serialize)]

View File

@ -249,12 +249,12 @@ fn raw_ref_from_entry(sha256_hex: &str, entry: Option<&RawByHashEntry>) -> Audit
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use base64::Engine as _;
use crate::audit::sha256_hex; use crate::audit::sha256_hex;
use crate::storage::{ use crate::storage::{
PackTime, ValidatedManifestMeta, VcirAuditSummary, VcirChildEntry, VcirInstanceGate, PackTime, ValidatedManifestMeta, VcirAuditSummary, VcirChildEntry, VcirInstanceGate,
VcirRelatedArtifact, VcirSummary, VcirRelatedArtifact, VcirSummary,
}; };
use base64::Engine as _;
fn sample_vcir( fn sample_vcir(
manifest_rsync_uri: &str, manifest_rsync_uri: &str,
@ -495,7 +495,8 @@ mod tests {
"as_id": 64496, "as_id": 64496,
"ski_hex": "11".repeat(20), "ski_hex": "11".repeat(20),
"spki_der_base64": base64::engine::general_purpose::STANDARD.encode([0x30u8, 0x00]), "spki_der_base64": base64::engine::general_purpose::STANDARD.encode([0x30u8, 0x00]),
}).to_string(); })
.to_string();
let mut vcir = sample_vcir( let mut vcir = sample_vcir(
manifest, manifest,
None, None,
@ -504,13 +505,15 @@ mod tests {
sample_artifacts(manifest, &sha256_hex(b"router-object")), sample_artifacts(manifest, &sha256_hex(b"router-object")),
); );
vcir.local_outputs[0].output_type = VcirOutputType::RouterKey; vcir.local_outputs[0].output_type = VcirOutputType::RouterKey;
vcir.local_outputs[0].source_object_uri = "rsync://example.test/router/router.cer".to_string(); vcir.local_outputs[0].source_object_uri =
"rsync://example.test/router/router.cer".to_string();
vcir.local_outputs[0].source_object_type = "router_key".to_string(); vcir.local_outputs[0].source_object_type = "router_key".to_string();
vcir.local_outputs[0].payload_json = serde_json::json!({ vcir.local_outputs[0].payload_json = serde_json::json!({
"as_id": 64496, "as_id": 64496,
"ski_hex": "11".repeat(20), "ski_hex": "11".repeat(20),
"spki_der_base64": base64::engine::general_purpose::STANDARD.encode([0x30u8, 0x00]), "spki_der_base64": base64::engine::general_purpose::STANDARD.encode([0x30u8, 0x00]),
}).to_string(); })
.to_string();
vcir.summary.local_vrp_count = 0; vcir.summary.local_vrp_count = 0;
vcir.summary.local_router_key_count = 1; vcir.summary.local_router_key_count = 1;
store.put_vcir(&vcir).expect("put vcir"); store.put_vcir(&vcir).expect("put vcir");
@ -523,7 +526,9 @@ mod tests {
output_id: vcir.local_outputs[0].output_id.clone(), output_id: vcir.local_outputs[0].output_id.clone(),
item_effective_until: vcir.local_outputs[0].item_effective_until.clone(), item_effective_until: vcir.local_outputs[0].item_effective_until.clone(),
}; };
store.put_audit_rule_index_entry(&rule_entry).expect("put rule"); store
.put_audit_rule_index_entry(&rule_entry)
.expect("put rule");
let trace = trace_rule_to_root(&store, AuditRuleKind::RouterKey, &rule_entry.rule_hash) let trace = trace_rule_to_root(&store, AuditRuleKind::RouterKey, &rule_entry.rule_hash)
.expect("trace rule") .expect("trace rule")
.expect("trace exists"); .expect("trace exists");

View File

@ -33,9 +33,13 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
fn main() -> Result<(), String> { fn main() -> Result<(), String> {
let args = parse_args(&std::env::args().collect::<Vec<_>>())?; let args = parse_args(&std::env::args().collect::<Vec<_>>())?;
let ccr_path = args.ccr_path.as_ref().unwrap(); let ccr_path = args.ccr_path.as_ref().unwrap();
let bytes = std::fs::read(ccr_path).map_err(|e| format!("read ccr failed: {}: {e}", ccr_path.display()))?; let bytes = std::fs::read(ccr_path)
.map_err(|e| format!("read ccr failed: {}: {e}", ccr_path.display()))?;
let json = dump_content_info_json_value(&bytes).map_err(|e| e.to_string())?; let json = dump_content_info_json_value(&bytes).map_err(|e| e.to_string())?;
println!("{}", serde_json::to_string_pretty(&json).map_err(|e| e.to_string())?); println!(
"{}",
serde_json::to_string_pretty(&json).map_err(|e| e.to_string())?
);
Ok(()) Ok(())
} }
@ -45,9 +49,16 @@ mod tests {
#[test] #[test]
fn parse_args_accepts_ccr_path() { fn parse_args_accepts_ccr_path() {
let argv = vec!["ccr_dump".to_string(), "--ccr".to_string(), "a.ccr".to_string()]; let argv = vec![
"ccr_dump".to_string(),
"--ccr".to_string(),
"a.ccr".to_string(),
];
let args = parse_args(&argv).expect("parse"); let args = parse_args(&argv).expect("parse");
assert_eq!(args.ccr_path.as_deref(), Some(std::path::Path::new("a.ccr"))); assert_eq!(
args.ccr_path.as_deref(),
Some(std::path::Path::new("a.ccr"))
);
} }
#[test] #[test]

View File

@ -94,7 +94,10 @@ mod tests {
"apnic".to_string(), "apnic".to_string(),
]; ];
let args = parse_args(&argv).expect("parse args"); let args = parse_args(&argv).expect("parse args");
assert_eq!(args.ccr_path.as_deref(), Some(std::path::Path::new("a.ccr"))); assert_eq!(
args.ccr_path.as_deref(),
Some(std::path::Path::new("a.ccr"))
);
assert_eq!( assert_eq!(
args.vrps_out_path.as_deref(), args.vrps_out_path.as_deref(),
Some(std::path::Path::new("vrps.csv")) Some(std::path::Path::new("vrps.csv"))

View File

@ -49,7 +49,9 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
Ok(args) Ok(args)
} }
fn collect_vrp_rows(bytes: &[u8]) -> Result<std::collections::BTreeSet<(u32, String, u16)>, String> { fn collect_vrp_rows(
bytes: &[u8],
) -> Result<std::collections::BTreeSet<(u32, String, u16)>, String> {
let content_info = decode_content_info(bytes).map_err(|e| e.to_string())?; let content_info = decode_content_info(bytes).map_err(|e| e.to_string())?;
extract_vrp_rows(&content_info).map_err(|e| e.to_string()) extract_vrp_rows(&content_info).map_err(|e| e.to_string())
} }
@ -95,14 +97,24 @@ mod tests {
"apnic".to_string(), "apnic".to_string(),
]; ];
let args = parse_args(&argv).expect("parse args"); let args = parse_args(&argv).expect("parse args");
assert_eq!(args.ccr_path.as_deref(), Some(std::path::Path::new("a.ccr"))); assert_eq!(
assert_eq!(args.out_path.as_deref(), Some(std::path::Path::new("out.csv"))); args.ccr_path.as_deref(),
Some(std::path::Path::new("a.ccr"))
);
assert_eq!(
args.out_path.as_deref(),
Some(std::path::Path::new("out.csv"))
);
assert_eq!(args.trust_anchor, "apnic"); assert_eq!(args.trust_anchor, "apnic");
} }
#[test] #[test]
fn parse_args_rejects_missing_required_flags() { fn parse_args_rejects_missing_required_flags() {
let argv = vec!["ccr_to_routinator_csv".to_string(), "--ccr".to_string(), "a.ccr".to_string()]; let argv = vec![
"ccr_to_routinator_csv".to_string(),
"--ccr".to_string(),
"a.ccr".to_string(),
];
let err = parse_args(&argv).unwrap_err(); let err = parse_args(&argv).unwrap_err();
assert!(err.contains("--out is required"), "{err}"); assert!(err.contains("--out is required"), "{err}");
} }

View File

@ -1,4 +1,7 @@
use rpki::ccr::{decode_content_info, verify::verify_content_info, verify_against_report_json_path, verify_against_vcir_store_path}; use rpki::ccr::{
decode_content_info, verify::verify_content_info, verify_against_report_json_path,
verify_against_vcir_store_path,
};
#[derive(Debug, Default, PartialEq, Eq)] #[derive(Debug, Default, PartialEq, Eq)]
struct Args { struct Args {
@ -45,7 +48,8 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
fn main() -> Result<(), String> { fn main() -> Result<(), String> {
let args = parse_args(&std::env::args().collect::<Vec<_>>())?; let args = parse_args(&std::env::args().collect::<Vec<_>>())?;
let ccr_path = args.ccr_path.as_ref().unwrap(); let ccr_path = args.ccr_path.as_ref().unwrap();
let bytes = std::fs::read(ccr_path).map_err(|e| format!("read ccr failed: {}: {e}", ccr_path.display()))?; let bytes = std::fs::read(ccr_path)
.map_err(|e| format!("read ccr failed: {}: {e}", ccr_path.display()))?;
let ci = decode_content_info(&bytes).map_err(|e| e.to_string())?; let ci = decode_content_info(&bytes).map_err(|e| e.to_string())?;
let summary = verify_content_info(&ci).map_err(|e| e.to_string())?; let summary = verify_content_info(&ci).map_err(|e| e.to_string())?;
if let Some(report_json) = args.report_json.as_ref() { if let Some(report_json) = args.report_json.as_ref() {
@ -54,7 +58,10 @@ fn main() -> Result<(), String> {
if let Some(db_path) = args.db_path.as_ref() { if let Some(db_path) = args.db_path.as_ref() {
verify_against_vcir_store_path(&ci, db_path).map_err(|e| e.to_string())?; verify_against_vcir_store_path(&ci, db_path).map_err(|e| e.to_string())?;
} }
println!("{}", serde_json::to_string_pretty(&summary).map_err(|e| e.to_string())?); println!(
"{}",
serde_json::to_string_pretty(&summary).map_err(|e| e.to_string())?
);
Ok(()) Ok(())
} }
@ -74,8 +81,14 @@ mod tests {
"db".to_string(), "db".to_string(),
]; ];
let args = parse_args(&argv).expect("parse"); let args = parse_args(&argv).expect("parse");
assert_eq!(args.ccr_path.as_deref(), Some(std::path::Path::new("a.ccr"))); assert_eq!(
assert_eq!(args.report_json.as_deref(), Some(std::path::Path::new("report.json"))); args.ccr_path.as_deref(),
Some(std::path::Path::new("a.ccr"))
);
assert_eq!(
args.report_json.as_deref(),
Some(std::path::Path::new("report.json"))
);
assert_eq!(args.db_path.as_deref(), Some(std::path::Path::new("db"))); assert_eq!(args.db_path.as_deref(), Some(std::path::Path::new("db")));
} }

View File

@ -1,12 +1,13 @@
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use std::path::PathBuf; use std::path::PathBuf;
use rpki::blob_store::{ExternalRawStoreDb, RawObjectStore};
use rpki::bundle::decode_ccr_compare_views; use rpki::bundle::decode_ccr_compare_views;
use rpki::ccr::decode_content_info; use rpki::ccr::decode_content_info;
use rpki::cir::{decode_cir, resolve_static_pool_file}; use rpki::cir::{decode_cir, resolve_static_pool_file};
use rpki::data_model::roa::RoaObject; use rpki::data_model::roa::RoaObject;
const USAGE: &str = "Usage: cir_drop_report --cir <path> --ccr <path> --report-json <path> --static-root <path> --json-out <path> --md-out <path>"; const USAGE: &str = "Usage: cir_drop_report --cir <path> --ccr <path> --report-json <path> (--static-root <path> | --raw-store-db <path>) --json-out <path> --md-out <path>";
#[derive(serde::Serialize)] #[derive(serde::Serialize)]
struct DroppedObjectRecord { struct DroppedObjectRecord {
@ -47,11 +48,25 @@ fn classify_reason(detail: Option<&str>, result: &str) -> String {
} }
} }
fn parse_args(argv: &[String]) -> Result<(PathBuf, PathBuf, PathBuf, PathBuf, PathBuf, PathBuf), String> { fn parse_args(
argv: &[String],
) -> Result<
(
PathBuf,
PathBuf,
PathBuf,
Option<PathBuf>,
Option<PathBuf>,
PathBuf,
PathBuf,
),
String,
> {
let mut cir = None; let mut cir = None;
let mut ccr = None; let mut ccr = None;
let mut report = None; let mut report = None;
let mut static_root = None; let mut static_root = None;
let mut raw_store_db = None;
let mut json_out = None; let mut json_out = None;
let mut md_out = None; let mut md_out = None;
let mut i = 1usize; let mut i = 1usize;
@ -77,6 +92,12 @@ fn parse_args(argv: &[String]) -> Result<(PathBuf, PathBuf, PathBuf, PathBuf, Pa
argv.get(i).ok_or("--static-root requires a value")?, argv.get(i).ok_or("--static-root requires a value")?,
)); ));
} }
"--raw-store-db" => {
i += 1;
raw_store_db = Some(PathBuf::from(
argv.get(i).ok_or("--raw-store-db requires a value")?,
));
}
"--json-out" => { "--json-out" => {
i += 1; i += 1;
json_out = Some(PathBuf::from( json_out = Some(PathBuf::from(
@ -85,7 +106,9 @@ fn parse_args(argv: &[String]) -> Result<(PathBuf, PathBuf, PathBuf, PathBuf, Pa
} }
"--md-out" => { "--md-out" => {
i += 1; i += 1;
md_out = Some(PathBuf::from(argv.get(i).ok_or("--md-out requires a value")?)); md_out = Some(PathBuf::from(
argv.get(i).ok_or("--md-out requires a value")?,
));
} }
"-h" | "--help" => return Err(USAGE.to_string()), "-h" | "--help" => return Err(USAGE.to_string()),
other => return Err(format!("unknown argument: {other}\n\n{USAGE}")), other => return Err(format!("unknown argument: {other}\n\n{USAGE}")),
@ -96,7 +119,8 @@ fn parse_args(argv: &[String]) -> Result<(PathBuf, PathBuf, PathBuf, PathBuf, Pa
cir.ok_or_else(|| format!("--cir is required\n\n{USAGE}"))?, cir.ok_or_else(|| format!("--cir is required\n\n{USAGE}"))?,
ccr.ok_or_else(|| format!("--ccr is required\n\n{USAGE}"))?, ccr.ok_or_else(|| format!("--ccr is required\n\n{USAGE}"))?,
report.ok_or_else(|| format!("--report-json is required\n\n{USAGE}"))?, report.ok_or_else(|| format!("--report-json is required\n\n{USAGE}"))?,
static_root.ok_or_else(|| format!("--static-root is required\n\n{USAGE}"))?, static_root,
raw_store_db,
json_out.ok_or_else(|| format!("--json-out is required\n\n{USAGE}"))?, json_out.ok_or_else(|| format!("--json-out is required\n\n{USAGE}"))?,
md_out.ok_or_else(|| format!("--md-out is required\n\n{USAGE}"))?, md_out.ok_or_else(|| format!("--md-out is required\n\n{USAGE}"))?,
)) ))
@ -104,7 +128,14 @@ fn parse_args(argv: &[String]) -> Result<(PathBuf, PathBuf, PathBuf, PathBuf, Pa
fn main() -> Result<(), String> { fn main() -> Result<(), String> {
let argv: Vec<String> = std::env::args().collect(); let argv: Vec<String> = std::env::args().collect();
let (cir_path, ccr_path, report_path, static_root, json_out, md_out) = parse_args(&argv)?; let (cir_path, ccr_path, report_path, static_root, raw_store_db, json_out, md_out) =
parse_args(&argv)?;
let backend_count = static_root.is_some() as u8 + raw_store_db.is_some() as u8;
if backend_count != 1 {
return Err(format!(
"must specify exactly one of --static-root or --raw-store-db\n\n{USAGE}"
));
}
let cir = decode_cir(&std::fs::read(&cir_path).map_err(|e| format!("read cir failed: {e}"))?) let cir = decode_cir(&std::fs::read(&cir_path).map_err(|e| format!("read cir failed: {e}"))?)
.map_err(|e| format!("decode cir failed: {e}"))?; .map_err(|e| format!("decode cir failed: {e}"))?;
@ -112,10 +143,11 @@ fn main() -> Result<(), String> {
&std::fs::read(&ccr_path).map_err(|e| format!("read ccr failed: {e}"))?, &std::fs::read(&ccr_path).map_err(|e| format!("read ccr failed: {e}"))?,
) )
.map_err(|e| format!("decode ccr failed: {e}"))?; .map_err(|e| format!("decode ccr failed: {e}"))?;
let (vrps, vaps) = let (vrps, vaps) = decode_ccr_compare_views(&ccr, "unknown")
decode_ccr_compare_views(&ccr, "unknown").map_err(|e| format!("decode compare views failed: {e}"))?; .map_err(|e| format!("decode compare views failed: {e}"))?;
let report: serde_json::Value = let report: serde_json::Value = serde_json::from_slice(
serde_json::from_slice(&std::fs::read(&report_path).map_err(|e| format!("read report failed: {e}"))?) &std::fs::read(&report_path).map_err(|e| format!("read report failed: {e}"))?,
)
.map_err(|e| format!("parse report failed: {e}"))?; .map_err(|e| format!("parse report failed: {e}"))?;
let mut object_hash_by_uri = BTreeMap::new(); let mut object_hash_by_uri = BTreeMap::new();
@ -134,7 +166,9 @@ fn main() -> Result<(), String> {
let mut unknown_roa_objects = 0usize; let mut unknown_roa_objects = 0usize;
for pp in publication_points { for pp in publication_points {
let publication_point = pp["publication_point_rsync_uri"].as_str().map(str::to_string); let publication_point = pp["publication_point_rsync_uri"]
.as_str()
.map(str::to_string);
let manifest_uri = pp["manifest_rsync_uri"].as_str().map(str::to_string); let manifest_uri = pp["manifest_rsync_uri"].as_str().map(str::to_string);
for obj in pp["objects"].as_array().into_iter().flatten() { for obj in pp["objects"].as_array().into_iter().flatten() {
let result = obj["result"].as_str().unwrap_or("unknown"); let result = obj["result"].as_str().unwrap_or("unknown");
@ -155,9 +189,21 @@ fn main() -> Result<(), String> {
let mut derived_vrp_count = 0usize; let mut derived_vrp_count = 0usize;
if kind == "roa" && !hash.is_empty() { if kind == "roa" && !hash.is_empty() {
match resolve_static_pool_file(&static_root, &hash) { let bytes_opt = if let Some(static_root) = static_root.as_ref() {
Ok(path) => { match resolve_static_pool_file(static_root, &hash) {
if let Ok(bytes) = std::fs::read(&path) { Ok(path) => std::fs::read(&path).ok(),
Err(_) => None,
}
} else if let Some(raw_store_db) = raw_store_db.as_ref() {
ExternalRawStoreDb::open(raw_store_db)
.ok()
.and_then(|store| store.get_raw_entry(&hash).ok().flatten())
.map(|entry| entry.bytes)
} else {
None
};
match bytes_opt {
Some(bytes) => {
if let Ok(roa) = RoaObject::decode_der(&bytes) { if let Ok(roa) = RoaObject::decode_der(&bytes) {
for family in roa.roa.ip_addr_blocks { for family in roa.roa.ip_addr_blocks {
for addr in family.addresses { for addr in family.addresses {
@ -172,7 +218,11 @@ fn main() -> Result<(), String> {
), ),
rpki::data_model::roa::RoaAfi::Ipv6 => { rpki::data_model::roa::RoaAfi::Ipv6 => {
let bytes: [u8; 16] = addr.prefix.addr; let bytes: [u8; 16] = addr.prefix.addr;
format!("{}/{}", std::net::Ipv6Addr::from(bytes), addr.prefix.prefix_len) format!(
"{}/{}",
std::net::Ipv6Addr::from(bytes),
addr.prefix.prefix_len
)
} }
}; };
let max_len = addr.max_length.unwrap_or(addr.prefix.prefix_len); let max_len = addr.max_length.unwrap_or(addr.prefix.prefix_len);
@ -183,11 +233,8 @@ fn main() -> Result<(), String> {
} else { } else {
unknown_roa_objects += 1; unknown_roa_objects += 1;
} }
} else {
unknown_roa_objects += 1;
} }
} None => unknown_roa_objects += 1,
Err(_) => unknown_roa_objects += 1,
} }
} }
@ -227,22 +274,40 @@ fn main() -> Result<(), String> {
md.push_str("# CIR Drop Report\n\n"); md.push_str("# CIR Drop Report\n\n");
md.push_str(&format!("- `final_vrp_count`: `{}`\n", vrps.len())); md.push_str(&format!("- `final_vrp_count`: `{}`\n", vrps.len()));
md.push_str(&format!("- `final_vap_count`: `{}`\n", vaps.len())); md.push_str(&format!("- `final_vap_count`: `{}`\n", vaps.len()));
md.push_str(&format!("- `dropped_vrp_count`: `{}`\n", output["summary"]["droppedVrpCount"])); md.push_str(&format!(
md.push_str(&format!("- `dropped_object_count`: `{}`\n", output["summary"]["droppedObjectCount"])); "- `dropped_vrp_count`: `{}`\n",
output["summary"]["droppedVrpCount"]
));
md.push_str(&format!(
"- `dropped_object_count`: `{}`\n",
output["summary"]["droppedObjectCount"]
));
md.push_str(&format!( md.push_str(&format!(
"- `unknown_dropped_roa_objects`: `{}`\n\n", "- `unknown_dropped_roa_objects`: `{}`\n\n",
output["summary"]["unknownDroppedRoaObjects"] output["summary"]["unknownDroppedRoaObjects"]
)); ));
md.push_str("## Dropped By Kind\n\n"); md.push_str("## Dropped By Kind\n\n");
for (kind, count) in output["summary"]["droppedByKind"].as_object().into_iter().flatten() { for (kind, count) in output["summary"]["droppedByKind"]
.as_object()
.into_iter()
.flatten()
{
md.push_str(&format!("- `{kind}`: `{}`\n", count.as_u64().unwrap_or(0))); md.push_str(&format!("- `{kind}`: `{}`\n", count.as_u64().unwrap_or(0)));
} }
md.push_str("\n## Dropped By Reason\n\n"); md.push_str("\n## Dropped By Reason\n\n");
for (reason, count) in output["summary"]["droppedByReason"].as_object().into_iter().flatten() { for (reason, count) in output["summary"]["droppedByReason"]
md.push_str(&format!("- `{reason}`: `{}`\n", count.as_u64().unwrap_or(0))); .as_object()
.into_iter()
.flatten()
{
md.push_str(&format!(
"- `{reason}`: `{}`\n",
count.as_u64().unwrap_or(0)
));
} }
if let Some(parent) = md_out.parent() { if let Some(parent) = md_out.parent() {
std::fs::create_dir_all(parent).map_err(|e| format!("create markdown parent failed: {e}"))?; std::fs::create_dir_all(parent)
.map_err(|e| format!("create markdown parent failed: {e}"))?;
} }
std::fs::write(&md_out, md).map_err(|e| format!("write markdown failed: {e}"))?; std::fs::write(&md_out, md).map_err(|e| format!("write markdown failed: {e}"))?;

View File

@ -26,11 +26,15 @@ fn run(argv: Vec<String>) -> Result<(), String> {
} }
"--tals-dir" => { "--tals-dir" => {
i += 1; i += 1;
tals_dir = Some(PathBuf::from(argv.get(i).ok_or("--tals-dir requires a value")?)); tals_dir = Some(PathBuf::from(
argv.get(i).ok_or("--tals-dir requires a value")?,
));
} }
"--meta-json" => { "--meta-json" => {
i += 1; i += 1;
meta_json = Some(PathBuf::from(argv.get(i).ok_or("--meta-json requires a value")?)); meta_json = Some(PathBuf::from(
argv.get(i).ok_or("--meta-json requires a value")?,
));
} }
other => return Err(format!("unknown argument: {other}\n\n{}", usage())), other => return Err(format!("unknown argument: {other}\n\n{}", usage())),
} }
@ -76,4 +80,3 @@ fn run(argv: Vec<String>) -> Result<(), String> {
.map_err(|e| format!("write meta json failed: {}: {e}", meta_json.display()))?; .map_err(|e| format!("write meta json failed: {}: {e}", meta_json.display()))?;
Ok(()) Ok(())
} }

View File

@ -1,7 +1,7 @@
use std::path::PathBuf; use std::path::PathBuf;
fn usage() -> &'static str { fn usage() -> &'static str {
"Usage: cir_materialize --cir <path> --static-root <path> --mirror-root <path> [--keep-db]" "Usage: cir_materialize --cir <path> (--static-root <path> | --raw-store-db <path>) --mirror-root <path> [--keep-db]"
} }
fn main() { fn main() {
@ -14,6 +14,7 @@ fn main() {
fn run(argv: Vec<String>) -> Result<(), String> { fn run(argv: Vec<String>) -> Result<(), String> {
let mut cir_path: Option<PathBuf> = None; let mut cir_path: Option<PathBuf> = None;
let mut static_root: Option<PathBuf> = None; let mut static_root: Option<PathBuf> = None;
let mut raw_store_db: Option<PathBuf> = None;
let mut mirror_root: Option<PathBuf> = None; let mut mirror_root: Option<PathBuf> = None;
let mut keep_db = false; let mut keep_db = false;
@ -31,6 +32,12 @@ fn run(argv: Vec<String>) -> Result<(), String> {
argv.get(i).ok_or("--static-root requires a value")?, argv.get(i).ok_or("--static-root requires a value")?,
)); ));
} }
"--raw-store-db" => {
i += 1;
raw_store_db = Some(PathBuf::from(
argv.get(i).ok_or("--raw-store-db requires a value")?,
));
}
"--mirror-root" => { "--mirror-root" => {
i += 1; i += 1;
mirror_root = Some(PathBuf::from( mirror_root = Some(PathBuf::from(
@ -44,16 +51,29 @@ fn run(argv: Vec<String>) -> Result<(), String> {
} }
let cir_path = cir_path.ok_or_else(|| format!("--cir is required\n\n{}", usage()))?; let cir_path = cir_path.ok_or_else(|| format!("--cir is required\n\n{}", usage()))?;
let static_root =
static_root.ok_or_else(|| format!("--static-root is required\n\n{}", usage()))?;
let mirror_root = let mirror_root =
mirror_root.ok_or_else(|| format!("--mirror-root is required\n\n{}", usage()))?; mirror_root.ok_or_else(|| format!("--mirror-root is required\n\n{}", usage()))?;
let backend_count = static_root.is_some() as u8 + raw_store_db.is_some() as u8;
if backend_count != 1 {
return Err(format!(
"must specify exactly one of --static-root or --raw-store-db\n\n{}",
usage()
));
}
let bytes = std::fs::read(&cir_path) let bytes = std::fs::read(&cir_path)
.map_err(|e| format!("read CIR failed: {}: {e}", cir_path.display()))?; .map_err(|e| format!("read CIR failed: {}: {e}", cir_path.display()))?;
let cir = rpki::cir::decode_cir(&bytes).map_err(|e| e.to_string())?; let cir = rpki::cir::decode_cir(&bytes).map_err(|e| e.to_string())?;
let result = rpki::cir::materialize_cir(&cir, &static_root, &mirror_root, true); let result = if let Some(static_root) = static_root {
rpki::cir::materialize_cir(&cir, &static_root, &mirror_root, true)
.map_err(|e| e.to_string())
} else if let Some(raw_store_db) = raw_store_db {
rpki::cir::materialize_cir_from_raw_store(&cir, &raw_store_db, &mirror_root, true)
.map_err(|e| e.to_string())
} else {
unreachable!("validated backend count")
};
match result { match result {
Ok(summary) => { Ok(summary) => {
eprintln!( eprintln!(
@ -74,4 +94,3 @@ fn run(argv: Vec<String>) -> Result<(), String> {
} }
} }
} }

View File

@ -1,11 +1,26 @@
use std::path::PathBuf; use std::path::PathBuf;
use rpki::cir::{encode_cir, write_bytes_to_static_pool, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1}; use rpki::cir::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, encode_cir,
write_bytes_to_static_pool,
};
use sha2::Digest; use sha2::Digest;
const USAGE: &str = "Usage: cir_ta_only_fixture --tal-path <path> --ta-path <path> --tal-uri <url> --validation-time <rfc3339> --cir-out <path> --static-root <path>"; const USAGE: &str = "Usage: cir_ta_only_fixture --tal-path <path> --ta-path <path> --tal-uri <url> --validation-time <rfc3339> --cir-out <path> --static-root <path>";
fn parse_args(argv: &[String]) -> Result<(PathBuf, PathBuf, String, time::OffsetDateTime, PathBuf, PathBuf), String> { fn parse_args(
argv: &[String],
) -> Result<
(
PathBuf,
PathBuf,
String,
time::OffsetDateTime,
PathBuf,
PathBuf,
),
String,
> {
let mut tal_path = None; let mut tal_path = None;
let mut ta_path = None; let mut ta_path = None;
let mut tal_uri = None; let mut tal_uri = None;
@ -17,11 +32,15 @@ fn parse_args(argv: &[String]) -> Result<(PathBuf, PathBuf, String, time::Offset
match argv[i].as_str() { match argv[i].as_str() {
"--tal-path" => { "--tal-path" => {
i += 1; i += 1;
tal_path = Some(PathBuf::from(argv.get(i).ok_or("--tal-path requires a value")?)); tal_path = Some(PathBuf::from(
argv.get(i).ok_or("--tal-path requires a value")?,
));
} }
"--ta-path" => { "--ta-path" => {
i += 1; i += 1;
ta_path = Some(PathBuf::from(argv.get(i).ok_or("--ta-path requires a value")?)); ta_path = Some(PathBuf::from(
argv.get(i).ok_or("--ta-path requires a value")?,
));
} }
"--tal-uri" => { "--tal-uri" => {
i += 1; i += 1;
@ -40,12 +59,15 @@ fn parse_args(argv: &[String]) -> Result<(PathBuf, PathBuf, String, time::Offset
} }
"--cir-out" => { "--cir-out" => {
i += 1; i += 1;
cir_out = Some(PathBuf::from(argv.get(i).ok_or("--cir-out requires a value")?)); cir_out = Some(PathBuf::from(
argv.get(i).ok_or("--cir-out requires a value")?,
));
} }
"--static-root" => { "--static-root" => {
i += 1; i += 1;
static_root = static_root = Some(PathBuf::from(
Some(PathBuf::from(argv.get(i).ok_or("--static-root requires a value")?)); argv.get(i).ok_or("--static-root requires a value")?,
));
} }
"-h" | "--help" => return Err(USAGE.to_string()), "-h" | "--help" => return Err(USAGE.to_string()),
other => return Err(format!("unknown argument: {other}\n\n{USAGE}")), other => return Err(format!("unknown argument: {other}\n\n{USAGE}")),

View File

@ -4,8 +4,7 @@ use std::path::PathBuf;
use rocksdb::{DB, IteratorMode, Options}; use rocksdb::{DB, IteratorMode, Options};
use rpki::storage::{ use rpki::storage::{
ALL_COLUMN_FAMILY_NAMES, CF_AUDIT_RULE_INDEX, CF_RAW_BY_HASH, CF_REPOSITORY_VIEW, ALL_COLUMN_FAMILY_NAMES, CF_AUDIT_RULE_INDEX, CF_RAW_BY_HASH, CF_REPOSITORY_VIEW,
CF_RRDP_SOURCE, CF_RRDP_SOURCE_MEMBER, CF_RRDP_URI_OWNER, CF_VCIR, CF_RRDP_SOURCE, CF_RRDP_SOURCE_MEMBER, CF_RRDP_URI_OWNER, CF_VCIR, column_family_descriptors,
column_family_descriptors,
}; };
#[derive(Clone, Copy, Debug, PartialEq, Eq)] #[derive(Clone, Copy, Debug, PartialEq, Eq)]
@ -77,9 +76,7 @@ fn cf_group(cf_name: &str) -> CfGroup {
match cf_name { match cf_name {
CF_REPOSITORY_VIEW | CF_RAW_BY_HASH => CfGroup::CurrentRepositoryView, CF_REPOSITORY_VIEW | CF_RAW_BY_HASH => CfGroup::CurrentRepositoryView,
CF_VCIR | CF_AUDIT_RULE_INDEX => CfGroup::CurrentValidationState, CF_VCIR | CF_AUDIT_RULE_INDEX => CfGroup::CurrentValidationState,
CF_RRDP_SOURCE | CF_RRDP_SOURCE_MEMBER | CF_RRDP_URI_OWNER => { CF_RRDP_SOURCE | CF_RRDP_SOURCE_MEMBER | CF_RRDP_URI_OWNER => CfGroup::CurrentRrdpState,
CfGroup::CurrentRrdpState
}
_ => CfGroup::LegacyCompatibility, _ => CfGroup::LegacyCompatibility,
} }
} }

View File

@ -56,10 +56,7 @@ fn parse_args() -> Result<Args, String> {
i += 1; i += 1;
} }
if out.bundle_root.is_none() || out.out.is_none() { if out.bundle_root.is_none() || out.out.is_none() {
return Err(format!( return Err(format!("--bundle-root and --out are required\n{}", usage()));
"--bundle-root and --out are required\n{}",
usage()
));
} }
Ok(out) Ok(out)
} }
@ -188,10 +185,7 @@ fn real_main() -> Result<(), String> {
.as_array() .as_array()
.ok_or("bundle missing deltaSequence.steps")? .ok_or("bundle missing deltaSequence.steps")?
{ {
let step_id = step["id"] let step_id = step["id"].as_str().ok_or("step missing id")?.to_string();
.as_str()
.ok_or("step missing id")?
.to_string();
let step_dir = path_join( let step_dir = path_join(
&rir_dir, &rir_dir,
step["relativePath"] step["relativePath"]
@ -210,8 +204,9 @@ fn real_main() -> Result<(), String> {
.as_str() .as_str()
.ok_or("step missing relativeTransitionLocksPath")?, .ok_or("step missing relativeTransitionLocksPath")?,
); );
let validation_time = load_validation_time(&delta_locks) let validation_time = load_validation_time(&delta_locks).map_err(|e| {
.map_err(|e| format!("load step validation time failed for {rir}/{step_id}: {e}"))?; format!("load step validation time failed for {rir}/{step_id}: {e}")
})?;
let start = Instant::now(); let start = Instant::now();
let step_out = run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit( let step_out = run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit(
&store, &store,

View File

@ -1,7 +1,7 @@
use rpki::bundle::{ use rpki::bundle::{
RirBundleMetadata, RecordingHttpFetcher, RecordingRsyncFetcher, RecordingHttpFetcher, RecordingRsyncFetcher, RirBundleMetadata,
build_single_rir_bundle_manifest, build_vap_compare_rows, build_vrp_compare_rows, build_single_rir_bundle_manifest, build_vap_compare_rows, build_vrp_compare_rows, sha256_hex,
sha256_hex, write_json, write_live_base_replay_bundle_inputs, write_live_bundle_rir_readme, write_json, write_live_base_replay_bundle_inputs, write_live_bundle_rir_readme,
write_live_bundle_top_readme, write_timing_json, write_vap_csv, write_vrp_csv, write_live_bundle_top_readme, write_timing_json, write_vap_csv, write_vrp_csv,
}; };
use rpki::ccr::{build_ccr_from_run, verify_content_info, write_ccr_file}; use rpki::ccr::{build_ccr_from_run, verify_content_info, write_ccr_file};
@ -54,15 +54,21 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
} }
"--out-dir" => { "--out-dir" => {
i += 1; i += 1;
args.out_dir = Some(PathBuf::from(argv.get(i).ok_or("--out-dir requires a value")?)); args.out_dir = Some(PathBuf::from(
argv.get(i).ok_or("--out-dir requires a value")?,
));
} }
"--tal-path" => { "--tal-path" => {
i += 1; i += 1;
args.tal_path = Some(PathBuf::from(argv.get(i).ok_or("--tal-path requires a value")?)); args.tal_path = Some(PathBuf::from(
argv.get(i).ok_or("--tal-path requires a value")?,
));
} }
"--ta-path" => { "--ta-path" => {
i += 1; i += 1;
args.ta_path = Some(PathBuf::from(argv.get(i).ok_or("--ta-path requires a value")?)); args.ta_path = Some(PathBuf::from(
argv.get(i).ok_or("--ta-path requires a value")?,
));
} }
"--validation-time" => { "--validation-time" => {
i += 1; i += 1;
@ -90,8 +96,9 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
} }
"--rsync-mirror-root" => { "--rsync-mirror-root" => {
i += 1; i += 1;
args.rsync_mirror_root = args.rsync_mirror_root = Some(PathBuf::from(
Some(PathBuf::from(argv.get(i).ok_or("--rsync-mirror-root requires a value")?)); argv.get(i).ok_or("--rsync-mirror-root requires a value")?,
));
} }
"--max-depth" => { "--max-depth" => {
i += 1; i += 1;
@ -113,7 +120,11 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
} }
"--trust-anchor" => { "--trust-anchor" => {
i += 1; i += 1;
args.trust_anchor = Some(argv.get(i).ok_or("--trust-anchor requires a value")?.clone()); args.trust_anchor = Some(
argv.get(i)
.ok_or("--trust-anchor requires a value")?
.clone(),
);
} }
other => return Err(format!("unknown argument: {other}\n{}", usage())), other => return Err(format!("unknown argument: {other}\n{}", usage())),
} }
@ -147,11 +158,13 @@ fn run(args: Args) -> Result<PathBuf, String> {
fs::create_dir_all(&rir_dir) fs::create_dir_all(&rir_dir)
.map_err(|e| format!("create rir dir failed: {}: {e}", rir_dir.display()))?; .map_err(|e| format!("create rir dir failed: {}: {e}", rir_dir.display()))?;
let tal_bytes = fs::read(args.tal_path.as_ref().unwrap()) let tal_bytes =
.map_err(|e| format!("read tal failed: {e}"))?; fs::read(args.tal_path.as_ref().unwrap()).map_err(|e| format!("read tal failed: {e}"))?;
let ta_bytes = fs::read(args.ta_path.as_ref().unwrap()) let ta_bytes =
.map_err(|e| format!("read ta failed: {e}"))?; fs::read(args.ta_path.as_ref().unwrap()).map_err(|e| format!("read ta failed: {e}"))?;
let validation_time = args.validation_time.unwrap_or_else(time::OffsetDateTime::now_utc); let validation_time = args
.validation_time
.unwrap_or_else(time::OffsetDateTime::now_utc);
let db_dir = out_root.join(".tmp").join(format!("{rir}-live-base-db")); let db_dir = out_root.join(".tmp").join(format!("{rir}-live-base-db"));
let replay_db_dir = out_root.join(".tmp").join(format!("{rir}-self-replay-db")); let replay_db_dir = out_root.join(".tmp").join(format!("{rir}-self-replay-db"));
@ -205,8 +218,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
.map_err(|e| format!("build ccr failed: {e}"))?; .map_err(|e| format!("build ccr failed: {e}"))?;
let base_ccr_path = rir_dir.join("base.ccr"); let base_ccr_path = rir_dir.join("base.ccr");
write_ccr_file(&base_ccr_path, &ccr).map_err(|e| format!("write ccr failed: {e}"))?; write_ccr_file(&base_ccr_path, &ccr).map_err(|e| format!("write ccr failed: {e}"))?;
let ccr_bytes = let ccr_bytes = fs::read(&base_ccr_path)
fs::read(&base_ccr_path).map_err(|e| format!("read written ccr failed: {}: {e}", base_ccr_path.display()))?; .map_err(|e| format!("read written ccr failed: {}: {e}", base_ccr_path.display()))?;
let decoded = rpki::ccr::decode_content_info(&ccr_bytes) let decoded = rpki::ccr::decode_content_info(&ccr_bytes)
.map_err(|e| format!("decode written ccr failed: {e}"))?; .map_err(|e| format!("decode written ccr failed: {e}"))?;
let verify = verify_content_info(&decoded).map_err(|e| format!("verify ccr failed: {e}"))?; let verify = verify_content_info(&decoded).map_err(|e| format!("verify ccr failed: {e}"))?;
@ -235,8 +248,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
&rsync.snapshot_fetches(), &rsync.snapshot_fetches(),
)?; )?;
let replay_store = let replay_store = RocksStore::open(&replay_db_dir)
RocksStore::open(&replay_db_dir).map_err(|e| format!("open self replay rocksdb failed: {e}"))?; .map_err(|e| format!("open self replay rocksdb failed: {e}"))?;
let replay_out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit( let replay_out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
&replay_store, &replay_store,
&Policy::default(), &Policy::default(),
@ -320,7 +333,11 @@ fn run(args: Args) -> Result<PathBuf, String> {
}), }),
)?; )?;
write_live_bundle_top_readme(&out_root.join("README.md"), &rir_normalized)?; write_live_bundle_top_readme(&out_root.join("README.md"), &rir_normalized)?;
write_live_bundle_rir_readme(&rir_dir.join("README.md"), &rir_normalized, &metadata.base_validation_time)?; write_live_bundle_rir_readme(
&rir_dir.join("README.md"),
&rir_normalized,
&metadata.base_validation_time,
)?;
write_json( write_json(
&out_root.join("bundle-manifest.json"), &out_root.join("bundle-manifest.json"),
&build_single_rir_bundle_manifest( &build_single_rir_bundle_manifest(

View File

@ -1,7 +1,7 @@
use rpki::bundle::{ use rpki::bundle::{
RecordingHttpFetcher, RecordingRsyncFetcher, build_single_rir_bundle_manifest, RecordingHttpFetcher, RecordingRsyncFetcher, build_single_rir_bundle_manifest,
build_vap_compare_rows, build_vrp_compare_rows, copy_dir_all, load_validation_time, build_vap_compare_rows, build_vrp_compare_rows, copy_dir_all, load_validation_time, sha256_hex,
sha256_hex, write_json, write_live_delta_replay_bundle_inputs, write_vap_csv, write_vrp_csv, write_json, write_live_delta_replay_bundle_inputs, write_vap_csv, write_vrp_csv,
}; };
use rpki::ccr::{build_ccr_from_run, decode_content_info, verify_content_info, write_ccr_file}; use rpki::ccr::{build_ccr_from_run, decode_content_info, verify_content_info, write_ccr_file};
use rpki::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig}; use rpki::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig};
@ -54,12 +54,15 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
} }
"--base-bundle-dir" => { "--base-bundle-dir" => {
i += 1; i += 1;
args.base_bundle_dir = args.base_bundle_dir = Some(PathBuf::from(
Some(PathBuf::from(argv.get(i).ok_or("--base-bundle-dir requires a value")?)); argv.get(i).ok_or("--base-bundle-dir requires a value")?,
));
} }
"--out-dir" => { "--out-dir" => {
i += 1; i += 1;
args.out_dir = Some(PathBuf::from(argv.get(i).ok_or("--out-dir requires a value")?)); args.out_dir = Some(PathBuf::from(
argv.get(i).ok_or("--out-dir requires a value")?,
));
} }
"--validation-time" => { "--validation-time" => {
i += 1; i += 1;
@ -87,8 +90,9 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
} }
"--rsync-mirror-root" => { "--rsync-mirror-root" => {
i += 1; i += 1;
args.rsync_mirror_root = args.rsync_mirror_root = Some(PathBuf::from(
Some(PathBuf::from(argv.get(i).ok_or("--rsync-mirror-root requires a value")?)); argv.get(i).ok_or("--rsync-mirror-root requires a value")?,
));
} }
"--max-depth" => { "--max-depth" => {
i += 1; i += 1;
@ -110,7 +114,11 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
} }
"--trust-anchor" => { "--trust-anchor" => {
i += 1; i += 1;
args.trust_anchor = Some(argv.get(i).ok_or("--trust-anchor requires a value")?.clone()); args.trust_anchor = Some(
argv.get(i)
.ok_or("--trust-anchor requires a value")?
.clone(),
);
} }
other => return Err(format!("unknown argument: {other}\n{}", usage())), other => return Err(format!("unknown argument: {other}\n{}", usage())),
} }
@ -193,7 +201,10 @@ fn run(args: Args) -> Result<PathBuf, String> {
let base_root = args.base_bundle_dir.as_ref().unwrap(); let base_root = args.base_bundle_dir.as_ref().unwrap();
let base_rir_dir = base_root.join(&rir_normalized); let base_rir_dir = base_root.join(&rir_normalized);
if !base_rir_dir.is_dir() { if !base_rir_dir.is_dir() {
return Err(format!("base bundle rir dir not found: {}", base_rir_dir.display())); return Err(format!(
"base bundle rir dir not found: {}",
base_rir_dir.display()
));
} }
if out_root.exists() { if out_root.exists() {
fs::remove_dir_all(out_root) fs::remove_dir_all(out_root)
@ -206,12 +217,14 @@ fn run(args: Args) -> Result<PathBuf, String> {
.trust_anchor .trust_anchor
.clone() .clone()
.unwrap_or_else(|| rir_normalized.clone()); .unwrap_or_else(|| rir_normalized.clone());
let tal_bytes = let tal_bytes = fs::read(rir_dir.join("tal.tal"))
fs::read(rir_dir.join("tal.tal")).map_err(|e| format!("read tal from base bundle failed: {e}"))?; .map_err(|e| format!("read tal from base bundle failed: {e}"))?;
let ta_bytes = let ta_bytes = fs::read(rir_dir.join("ta.cer"))
fs::read(rir_dir.join("ta.cer")).map_err(|e| format!("read ta from base bundle failed: {e}"))?; .map_err(|e| format!("read ta from base bundle failed: {e}"))?;
let base_validation_time = load_validation_time(&rir_dir.join("base-locks.json"))?; let base_validation_time = load_validation_time(&rir_dir.join("base-locks.json"))?;
let target_validation_time = args.validation_time.unwrap_or_else(time::OffsetDateTime::now_utc); let target_validation_time = args
.validation_time
.unwrap_or_else(time::OffsetDateTime::now_utc);
let target_store_dir = out_root.join(".tmp").join(format!("{rir}-live-target-db")); let target_store_dir = out_root.join(".tmp").join(format!("{rir}-live-target-db"));
let self_replay_dir = out_root.join(".tmp").join(format!("{rir}-self-delta-db")); let self_replay_dir = out_root.join(".tmp").join(format!("{rir}-self-delta-db"));
@ -221,8 +234,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
fs::create_dir_all(parent) fs::create_dir_all(parent)
.map_err(|e| format!("create tmp dir failed: {}: {e}", parent.display()))?; .map_err(|e| format!("create tmp dir failed: {}: {e}", parent.display()))?;
} }
let target_store = let target_store = RocksStore::open(&target_store_dir)
RocksStore::open(&target_store_dir).map_err(|e| format!("open target rocksdb failed: {e}"))?; .map_err(|e| format!("open target rocksdb failed: {e}"))?;
let _base = run_tree_from_tal_and_ta_der_payload_replay_serial_audit( let _base = run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
&target_store, &target_store,
@ -282,17 +295,19 @@ fn run(args: Args) -> Result<PathBuf, String> {
) )
.map_err(|e| format!("build delta ccr failed: {e}"))?; .map_err(|e| format!("build delta ccr failed: {e}"))?;
let delta_ccr_path = rir_dir.join("delta.ccr"); let delta_ccr_path = rir_dir.join("delta.ccr");
write_ccr_file(&delta_ccr_path, &delta_ccr).map_err(|e| format!("write delta ccr failed: {e}"))?; write_ccr_file(&delta_ccr_path, &delta_ccr)
let delta_ccr_bytes = .map_err(|e| format!("write delta ccr failed: {e}"))?;
fs::read(&delta_ccr_path).map_err(|e| format!("read delta ccr failed: {}: {e}", delta_ccr_path.display()))?; let delta_ccr_bytes = fs::read(&delta_ccr_path)
let delta_decoded = .map_err(|e| format!("read delta ccr failed: {}: {e}", delta_ccr_path.display()))?;
decode_content_info(&delta_ccr_bytes).map_err(|e| format!("decode delta ccr failed: {e}"))?; let delta_decoded = decode_content_info(&delta_ccr_bytes)
.map_err(|e| format!("decode delta ccr failed: {e}"))?;
let delta_verify = let delta_verify =
verify_content_info(&delta_decoded).map_err(|e| format!("verify delta ccr failed: {e}"))?; verify_content_info(&delta_decoded).map_err(|e| format!("verify delta ccr failed: {e}"))?;
let delta_vrp_rows = build_vrp_compare_rows(&target_out.tree.vrps, &trust_anchor); let delta_vrp_rows = build_vrp_compare_rows(&target_out.tree.vrps, &trust_anchor);
let delta_vap_rows = build_vap_compare_rows(&target_out.tree.aspas, &trust_anchor); let delta_vap_rows = build_vap_compare_rows(&target_out.tree.aspas, &trust_anchor);
let (ccr_vrps, ccr_vaps) = rpki::bundle::decode_ccr_compare_views(&delta_decoded, &trust_anchor)?; let (ccr_vrps, ccr_vaps) =
rpki::bundle::decode_ccr_compare_views(&delta_decoded, &trust_anchor)?;
if delta_vrp_rows != ccr_vrps { if delta_vrp_rows != ccr_vrps {
return Err("record-delta.csv compare view does not match delta.ccr".to_string()); return Err("record-delta.csv compare view does not match delta.ccr".to_string());
} }
@ -312,8 +327,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
&rsync.snapshot_fetches(), &rsync.snapshot_fetches(),
)?; )?;
let self_store = let self_store = RocksStore::open(&self_replay_dir)
RocksStore::open(&self_replay_dir).map_err(|e| format!("open self replay db failed: {e}"))?; .map_err(|e| format!("open self replay db failed: {e}"))?;
let replay_out = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit( let replay_out = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit(
&self_store, &self_store,
&Policy::default(), &Policy::default(),
@ -355,7 +370,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
)?; )?;
let mut bundle_json: serde_json::Value = serde_json::from_slice( let mut bundle_json: serde_json::Value = serde_json::from_slice(
&fs::read(rir_dir.join("bundle.json")).map_err(|e| format!("read base bundle.json failed: {e}"))?, &fs::read(rir_dir.join("bundle.json"))
.map_err(|e| format!("read base bundle.json failed: {e}"))?,
) )
.map_err(|e| format!("parse base bundle.json failed: {e}"))?; .map_err(|e| format!("parse base bundle.json failed: {e}"))?;
bundle_json["deltaValidationTime"] = serde_json::Value::String( bundle_json["deltaValidationTime"] = serde_json::Value::String(

View File

@ -65,19 +65,27 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
} }
"--out-dir" => { "--out-dir" => {
i += 1; i += 1;
args.out_dir = Some(PathBuf::from(argv.get(i).ok_or("--out-dir requires a value")?)); args.out_dir = Some(PathBuf::from(
argv.get(i).ok_or("--out-dir requires a value")?,
));
} }
"--tal-path" => { "--tal-path" => {
i += 1; i += 1;
args.tal_path = Some(PathBuf::from(argv.get(i).ok_or("--tal-path requires a value")?)); args.tal_path = Some(PathBuf::from(
argv.get(i).ok_or("--tal-path requires a value")?,
));
} }
"--ta-path" => { "--ta-path" => {
i += 1; i += 1;
args.ta_path = Some(PathBuf::from(argv.get(i).ok_or("--ta-path requires a value")?)); args.ta_path = Some(PathBuf::from(
argv.get(i).ok_or("--ta-path requires a value")?,
));
} }
"--base-validation-time" => { "--base-validation-time" => {
i += 1; i += 1;
let value = argv.get(i).ok_or("--base-validation-time requires a value")?; let value = argv
.get(i)
.ok_or("--base-validation-time requires a value")?;
args.base_validation_time = Some( args.base_validation_time = Some(
time::OffsetDateTime::parse(value, &Rfc3339) time::OffsetDateTime::parse(value, &Rfc3339)
.map_err(|e| format!("invalid --base-validation-time: {e}"))?, .map_err(|e| format!("invalid --base-validation-time: {e}"))?,
@ -117,8 +125,9 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
} }
"--rsync-mirror-root" => { "--rsync-mirror-root" => {
i += 1; i += 1;
args.rsync_mirror_root = args.rsync_mirror_root = Some(PathBuf::from(
Some(PathBuf::from(argv.get(i).ok_or("--rsync-mirror-root requires a value")?)); argv.get(i).ok_or("--rsync-mirror-root requires a value")?,
));
} }
"--max-depth" => { "--max-depth" => {
i += 1; i += 1;
@ -140,7 +149,11 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
} }
"--trust-anchor" => { "--trust-anchor" => {
i += 1; i += 1;
args.trust_anchor = Some(argv.get(i).ok_or("--trust-anchor requires a value")?.clone()); args.trust_anchor = Some(
argv.get(i)
.ok_or("--trust-anchor requires a value")?
.clone(),
);
} }
"--keep-db" => args.keep_db = true, "--keep-db" => args.keep_db = true,
"--capture-inputs-only" => args.capture_inputs_only = true, "--capture-inputs-only" => args.capture_inputs_only = true,
@ -164,7 +177,12 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
Ok(args) Ok(args)
} }
fn write_v2_top_readme(path: &Path, rir: &str, delta_count: usize, delta_interval_secs: u64) -> Result<(), String> { fn write_v2_top_readme(
path: &Path,
rir: &str,
delta_count: usize,
delta_interval_secs: u64,
) -> Result<(), String> {
if let Some(parent) = path.parent() { if let Some(parent) = path.parent() {
fs::create_dir_all(parent) fs::create_dir_all(parent)
.map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?; .map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?;
@ -203,11 +221,19 @@ fn ensure_recorded_target_snapshots_for_locks(
previous_locks_path: &Path, previous_locks_path: &Path,
http: &RecordingHttpFetcher<BlockingHttpFetcher>, http: &RecordingHttpFetcher<BlockingHttpFetcher>,
) -> Result<(), String> { ) -> Result<(), String> {
let previous_locks: serde_json::Value = serde_json::from_slice( let previous_locks: serde_json::Value =
&fs::read(previous_locks_path) serde_json::from_slice(&fs::read(previous_locks_path).map_err(|e| {
.map_err(|e| format!("read previous locks failed: {}: {e}", previous_locks_path.display()))?, format!(
"read previous locks failed: {}: {e}",
previous_locks_path.display()
) )
.map_err(|e| format!("parse previous locks failed: {}: {e}", previous_locks_path.display()))?; })?)
.map_err(|e| {
format!(
"parse previous locks failed: {}: {e}",
previous_locks_path.display()
)
})?;
let previous_rrdp = previous_locks let previous_rrdp = previous_locks
.get("rrdp") .get("rrdp")
.and_then(|v| v.as_object()) .and_then(|v| v.as_object())
@ -251,9 +277,7 @@ fn ensure_recorded_target_snapshots_for_locks(
if let Err(err) = http.fetch(snapshot_uri) { if let Err(err) = http.fetch(snapshot_uri) {
eprintln!( eprintln!(
"[sequence] warning: fetch target snapshot failed notify_uri={} snapshot_uri={} err={}", "[sequence] warning: fetch target snapshot failed notify_uri={} snapshot_uri={} err={}",
notify_uri, notify_uri, snapshot_uri, err
snapshot_uri,
err
); );
} }
} }
@ -281,20 +305,30 @@ fn run(args: Args) -> Result<PathBuf, String> {
} }
let rir_dir = out_root.join(&rir_normalized); let rir_dir = out_root.join(&rir_normalized);
let delta_steps_root = rir_dir.join("delta-steps"); let delta_steps_root = rir_dir.join("delta-steps");
fs::create_dir_all(&delta_steps_root) fs::create_dir_all(&delta_steps_root).map_err(|e| {
.map_err(|e| format!("create delta steps dir failed: {}: {e}", delta_steps_root.display()))?; format!(
"create delta steps dir failed: {}: {e}",
delta_steps_root.display()
)
})?;
let tal_bytes = fs::read(args.tal_path.as_ref().unwrap()) let tal_bytes =
.map_err(|e| format!("read tal failed: {e}"))?; fs::read(args.tal_path.as_ref().unwrap()).map_err(|e| format!("read tal failed: {e}"))?;
let ta_bytes = fs::read(args.ta_path.as_ref().unwrap()) let ta_bytes =
.map_err(|e| format!("read ta failed: {e}"))?; fs::read(args.ta_path.as_ref().unwrap()).map_err(|e| format!("read ta failed: {e}"))?;
fs::write(rir_dir.join("tal.tal"), &tal_bytes).map_err(|e| format!("write tal failed: {e}"))?; fs::write(rir_dir.join("tal.tal"), &tal_bytes).map_err(|e| format!("write tal failed: {e}"))?;
fs::write(rir_dir.join("ta.cer"), &ta_bytes).map_err(|e| format!("write ta failed: {e}"))?; fs::write(rir_dir.join("ta.cer"), &ta_bytes).map_err(|e| format!("write ta failed: {e}"))?;
let base_validation_time = args.base_validation_time.unwrap_or_else(time::OffsetDateTime::now_utc); let base_validation_time = args
.base_validation_time
.unwrap_or_else(time::OffsetDateTime::now_utc);
let work_db_dir = out_root.join(".tmp").join(format!("{rir}-sequence-work-db")); let work_db_dir = out_root
let base_self_replay_dir = out_root.join(".tmp").join(format!("{rir}-sequence-base-self-replay-db")); .join(".tmp")
.join(format!("{rir}-sequence-work-db"));
let base_self_replay_dir = out_root
.join(".tmp")
.join(format!("{rir}-sequence-base-self-replay-db"));
let _ = fs::remove_dir_all(&work_db_dir); let _ = fs::remove_dir_all(&work_db_dir);
let _ = fs::remove_dir_all(&base_self_replay_dir); let _ = fs::remove_dir_all(&base_self_replay_dir);
if let Some(parent) = work_db_dir.parent() { if let Some(parent) = work_db_dir.parent() {
@ -356,9 +390,7 @@ fn run(args: Args) -> Result<PathBuf, String> {
)?; )?;
eprintln!( eprintln!(
"[sequence] base input materialization done rir={} rrdp_repos={} rsync_modules={}", "[sequence] base input materialization done rir={} rrdp_repos={} rsync_modules={}",
rir_normalized, rir_normalized, base_capture.rrdp_repo_count, base_capture.rsync_module_count
base_capture.rrdp_repo_count,
base_capture.rsync_module_count
); );
let base_ccr_path = rir_dir.join("base.ccr"); let base_ccr_path = rir_dir.join("base.ccr");
let base_vrps_path = rir_dir.join("base-vrps.csv"); let base_vrps_path = rir_dir.join("base-vrps.csv");
@ -402,8 +434,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
.map_err(|e| format!("read base ccr failed: {}: {e}", base_ccr_path.display()))?; .map_err(|e| format!("read base ccr failed: {}: {e}", base_ccr_path.display()))?;
let base_decoded = decode_content_info(&base_ccr_bytes) let base_decoded = decode_content_info(&base_ccr_bytes)
.map_err(|e| format!("decode base ccr failed: {e}"))?; .map_err(|e| format!("decode base ccr failed: {e}"))?;
let base_verify = let base_verify = verify_content_info(&base_decoded)
verify_content_info(&base_decoded).map_err(|e| format!("verify base ccr failed: {e}"))?; .map_err(|e| format!("verify base ccr failed: {e}"))?;
let base_vrp_rows = build_vrp_compare_rows(&base_out.tree.vrps, &trust_anchor); let base_vrp_rows = build_vrp_compare_rows(&base_out.tree.vrps, &trust_anchor);
let base_vap_rows = build_vap_compare_rows(&base_out.tree.aspas, &trust_anchor); let base_vap_rows = build_vap_compare_rows(&base_out.tree.aspas, &trust_anchor);
let (base_ccr_vrps, base_ccr_vaps) = let (base_ccr_vrps, base_ccr_vaps) =
@ -464,7 +496,9 @@ fn run(args: Args) -> Result<PathBuf, String> {
let mut delta_steps = Vec::new(); let mut delta_steps = Vec::new();
let mut previous_locks_path = rir_dir.join("base-locks.json"); let mut previous_locks_path = rir_dir.join("base-locks.json");
let mut previous_ref = "base".to_string(); let mut previous_ref = "base".to_string();
let sequence_self_replay_dir = out_root.join(".tmp").join(format!("{rir}-sequence-self-replay-db")); let sequence_self_replay_dir = out_root
.join(".tmp")
.join(format!("{rir}-sequence-self-replay-db"));
let _ = fs::remove_dir_all(&sequence_self_replay_dir); let _ = fs::remove_dir_all(&sequence_self_replay_dir);
let sequence_replay_store = if args.capture_inputs_only { let sequence_replay_store = if args.capture_inputs_only {
None None
@ -499,7 +533,10 @@ fn run(args: Args) -> Result<PathBuf, String> {
fs::create_dir_all(&step_dir) fs::create_dir_all(&step_dir)
.map_err(|e| format!("create step dir failed: {}: {e}", step_dir.display()))?; .map_err(|e| format!("create step dir failed: {}: {e}", step_dir.display()))?;
let step_validation_time = time::OffsetDateTime::now_utc(); let step_validation_time = time::OffsetDateTime::now_utc();
eprintln!("[sequence] step live run start rir={} step={}", rir_normalized, step_id); eprintln!(
"[sequence] step live run start rir={} step={}",
rir_normalized, step_id
);
let step_http = RecordingHttpFetcher::new( let step_http = RecordingHttpFetcher::new(
BlockingHttpFetcher::new(HttpFetcherConfig { BlockingHttpFetcher::new(HttpFetcherConfig {
timeout: Duration::from_secs(args.http_timeout_secs), timeout: Duration::from_secs(args.http_timeout_secs),
@ -542,7 +579,10 @@ fn run(args: Args) -> Result<PathBuf, String> {
); );
ensure_recorded_target_snapshots_for_locks(&store, &previous_locks_path, &step_http)?; ensure_recorded_target_snapshots_for_locks(&store, &previous_locks_path, &step_http)?;
eprintln!("[sequence] step output generation phase start rir={} step={}", rir_normalized, step_id); eprintln!(
"[sequence] step output generation phase start rir={} step={}",
rir_normalized, step_id
);
let delta_ccr_path = step_dir.join("delta.ccr"); let delta_ccr_path = step_dir.join("delta.ccr");
let delta_vrps_path = step_dir.join("record-delta.csv"); let delta_vrps_path = step_dir.join("record-delta.csv");
let delta_vaps_path = step_dir.join("record-delta-vaps.csv"); let delta_vaps_path = step_dir.join("record-delta-vaps.csv");
@ -550,8 +590,7 @@ fn run(args: Args) -> Result<PathBuf, String> {
if args.capture_inputs_only { if args.capture_inputs_only {
eprintln!( eprintln!(
"[sequence] step CCR/self-replay skipped rir={} step={}", "[sequence] step CCR/self-replay skipped rir={} step={}",
rir_normalized, rir_normalized, step_id
step_id
); );
( (
String::new(), String::new(),
@ -573,7 +612,10 @@ fn run(args: Args) -> Result<PathBuf, String> {
false, false,
) )
} else { } else {
eprintln!("[sequence] step CCR/self-replay start rir={} step={}", rir_normalized, step_id); eprintln!(
"[sequence] step CCR/self-replay start rir={} step={}",
rir_normalized, step_id
);
let delta_ccr = build_ccr_from_run( let delta_ccr = build_ccr_from_run(
&store, &store,
&[step_out.discovery.trust_anchor.clone()], &[step_out.discovery.trust_anchor.clone()],
@ -597,15 +639,22 @@ fn run(args: Args) -> Result<PathBuf, String> {
let (ccr_vrps, ccr_vaps) = let (ccr_vrps, ccr_vaps) =
rpki::bundle::decode_ccr_compare_views(&delta_decoded, &trust_anchor)?; rpki::bundle::decode_ccr_compare_views(&delta_decoded, &trust_anchor)?;
if delta_vrp_rows != ccr_vrps { if delta_vrp_rows != ccr_vrps {
return Err(format!("{step_id} VRP compare view does not match delta.ccr")); return Err(format!(
"{step_id} VRP compare view does not match delta.ccr"
));
} }
if delta_vap_rows != ccr_vaps { if delta_vap_rows != ccr_vaps {
return Err(format!("{step_id} VAP compare view does not match delta.ccr")); return Err(format!(
"{step_id} VAP compare view does not match delta.ccr"
));
} }
write_vrp_csv(&delta_vrps_path, &delta_vrp_rows)?; write_vrp_csv(&delta_vrps_path, &delta_vrp_rows)?;
write_vap_csv(&delta_vaps_path, &delta_vap_rows)?; write_vap_csv(&delta_vaps_path, &delta_vap_rows)?;
let step_replay_out = run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit( let step_replay_out =
sequence_replay_store.as_ref().expect("sequence replay store"), run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit(
sequence_replay_store
.as_ref()
.expect("sequence replay store"),
&Policy::default(), &Policy::default(),
&tal_bytes, &tal_bytes,
&ta_bytes, &ta_bytes,
@ -621,7 +670,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
) )
.map_err(|e| format!("sequence self replay failed for {step_id}: {e}"))?; .map_err(|e| format!("sequence self replay failed for {step_id}: {e}"))?;
let step_self_replay_ok = let step_self_replay_ok =
build_vrp_compare_rows(&step_replay_out.tree.vrps, &trust_anchor) == delta_vrp_rows build_vrp_compare_rows(&step_replay_out.tree.vrps, &trust_anchor)
== delta_vrp_rows
&& build_vap_compare_rows(&step_replay_out.tree.aspas, &trust_anchor) && build_vap_compare_rows(&step_replay_out.tree.aspas, &trust_anchor)
== delta_vap_rows; == delta_vap_rows;
let output = ( let output = (
@ -631,11 +681,17 @@ fn run(args: Args) -> Result<PathBuf, String> {
delta_verify, delta_verify,
step_self_replay_ok, step_self_replay_ok,
); );
eprintln!("[sequence] step CCR/self-replay done rir={} step={}", rir_normalized, step_id); eprintln!(
"[sequence] step CCR/self-replay done rir={} step={}",
rir_normalized, step_id
);
output output
}; };
eprintln!("[sequence] step input materialization start rir={} step={}", rir_normalized, step_id); eprintln!(
"[sequence] step input materialization start rir={} step={}",
rir_normalized, step_id
);
let delta_capture = write_live_delta_replay_step_inputs( let delta_capture = write_live_delta_replay_step_inputs(
&step_dir, &step_dir,
&rir_normalized, &rir_normalized,
@ -816,7 +872,12 @@ fn run(args: Args) -> Result<PathBuf, String> {
} }
}), }),
)?; )?;
write_v2_top_readme(&out_root.join("README.md"), &rir_normalized, args.delta_count, args.delta_interval_secs)?; write_v2_top_readme(
&out_root.join("README.md"),
&rir_normalized,
args.delta_count,
args.delta_interval_secs,
)?;
write_v2_rir_readme( write_v2_rir_readme(
&rir_dir.join("README.md"), &rir_dir.join("README.md"),
&rir_normalized, &rir_normalized,

View File

@ -48,35 +48,49 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
} }
"--out-dir" => { "--out-dir" => {
i += 1; i += 1;
args.out_dir = Some(PathBuf::from(argv.get(i).ok_or("--out-dir requires a value")?)); args.out_dir = Some(PathBuf::from(
argv.get(i).ok_or("--out-dir requires a value")?,
));
} }
"--tal-path" => { "--tal-path" => {
i += 1; i += 1;
args.tal_path = Some(PathBuf::from(argv.get(i).ok_or("--tal-path requires a value")?)); args.tal_path = Some(PathBuf::from(
argv.get(i).ok_or("--tal-path requires a value")?,
));
} }
"--ta-path" => { "--ta-path" => {
i += 1; i += 1;
args.ta_path = Some(PathBuf::from(argv.get(i).ok_or("--ta-path requires a value")?)); args.ta_path = Some(PathBuf::from(
argv.get(i).ok_or("--ta-path requires a value")?,
));
} }
"--payload-replay-archive" => { "--payload-replay-archive" => {
i += 1; i += 1;
args.payload_replay_archive = args.payload_replay_archive = Some(PathBuf::from(
Some(PathBuf::from(argv.get(i).ok_or("--payload-replay-archive requires a value")?)); argv.get(i)
.ok_or("--payload-replay-archive requires a value")?,
));
} }
"--payload-replay-locks" => { "--payload-replay-locks" => {
i += 1; i += 1;
args.payload_replay_locks = args.payload_replay_locks = Some(PathBuf::from(
Some(PathBuf::from(argv.get(i).ok_or("--payload-replay-locks requires a value")?)); argv.get(i)
.ok_or("--payload-replay-locks requires a value")?,
));
} }
"--payload-delta-archive" => { "--payload-delta-archive" => {
i += 1; i += 1;
args.payload_delta_archive = args.payload_delta_archive = Some(PathBuf::from(
Some(PathBuf::from(argv.get(i).ok_or("--payload-delta-archive requires a value")?)); argv.get(i)
.ok_or("--payload-delta-archive requires a value")?,
));
} }
"--payload-delta-locks" => { "--payload-delta-locks" => {
i += 1; i += 1;
args.payload_delta_locks = args.payload_delta_locks = Some(PathBuf::from(
Some(PathBuf::from(argv.get(i).ok_or("--payload-delta-locks requires a value")?)); argv.get(i)
.ok_or("--payload-delta-locks requires a value")?,
));
} }
"--validation-time" => { "--validation-time" => {
i += 1; i += 1;
@ -106,7 +120,11 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
} }
"--trust-anchor" => { "--trust-anchor" => {
i += 1; i += 1;
args.trust_anchor = Some(argv.get(i).ok_or("--trust-anchor requires a value")?.clone()); args.trust_anchor = Some(
argv.get(i)
.ok_or("--trust-anchor requires a value")?
.clone(),
);
} }
other => return Err(format!("unknown argument: {other}\n{}", usage())), other => return Err(format!("unknown argument: {other}\n{}", usage())),
} }
@ -155,7 +173,9 @@ fn sha256_hex(bytes: &[u8]) -> String {
fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> { fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> {
fs::create_dir_all(dst) fs::create_dir_all(dst)
.map_err(|e| format!("create directory failed: {}: {e}", dst.display()))?; .map_err(|e| format!("create directory failed: {}: {e}", dst.display()))?;
for entry in fs::read_dir(src).map_err(|e| format!("read_dir failed: {}: {e}", src.display()))? { for entry in
fs::read_dir(src).map_err(|e| format!("read_dir failed: {}: {e}", src.display()))?
{
let entry = entry.map_err(|e| format!("read_dir entry failed: {}: {e}", src.display()))?; let entry = entry.map_err(|e| format!("read_dir entry failed: {}: {e}", src.display()))?;
let ty = entry let ty = entry
.file_type() .file_type()
@ -168,8 +188,13 @@ fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> {
fs::create_dir_all(parent) fs::create_dir_all(parent)
.map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?; .map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?;
} }
fs::copy(entry.path(), &to) fs::copy(entry.path(), &to).map_err(|e| {
.map_err(|e| format!("copy failed: {} -> {}: {e}", entry.path().display(), to.display()))?; format!(
"copy failed: {} -> {}: {e}",
entry.path().display(),
to.display()
)
})?;
} }
} }
Ok(()) Ok(())
@ -222,7 +247,10 @@ fn write_timing_json(
) )
} }
fn rewrite_delta_base_locks_sha(delta_root: &Path, emitted_base_locks_sha256: &str) -> Result<(), String> { fn rewrite_delta_base_locks_sha(
delta_root: &Path,
emitted_base_locks_sha256: &str,
) -> Result<(), String> {
let delta_locks = delta_root.join("locks-delta.json"); let delta_locks = delta_root.join("locks-delta.json");
if delta_locks.is_file() { if delta_locks.is_file() {
let mut json: serde_json::Value = serde_json::from_slice( let mut json: serde_json::Value = serde_json::from_slice(
@ -243,7 +271,8 @@ fn rewrite_delta_base_locks_sha(delta_root: &Path, emitted_base_locks_sha256: &s
if archive_root.is_dir() { if archive_root.is_dir() {
for path in walk_json_files_named(&archive_root, "base.json")? { for path in walk_json_files_named(&archive_root, "base.json")? {
let mut json: serde_json::Value = serde_json::from_slice( let mut json: serde_json::Value = serde_json::from_slice(
&fs::read(&path).map_err(|e| format!("read base.json failed: {}: {e}", path.display()))?, &fs::read(&path)
.map_err(|e| format!("read base.json failed: {}: {e}", path.display()))?,
) )
.map_err(|e| format!("parse base.json failed: {}: {e}", path.display()))?; .map_err(|e| format!("parse base.json failed: {}: {e}", path.display()))?;
json.as_object_mut() json.as_object_mut()
@ -265,8 +294,11 @@ fn walk_json_files_named(root: &Path, name: &str) -> Result<Vec<PathBuf>, String
} }
let mut stack = vec![root.to_path_buf()]; let mut stack = vec![root.to_path_buf()];
while let Some(dir) = stack.pop() { while let Some(dir) = stack.pop() {
for entry in fs::read_dir(&dir).map_err(|e| format!("read_dir failed: {}: {e}", dir.display()))? { for entry in
let entry = entry.map_err(|e| format!("read_dir entry failed: {}: {e}", dir.display()))?; fs::read_dir(&dir).map_err(|e| format!("read_dir failed: {}: {e}", dir.display()))?
{
let entry =
entry.map_err(|e| format!("read_dir entry failed: {}: {e}", dir.display()))?;
let path = entry.path(); let path = entry.path();
let ty = entry let ty = entry
.file_type() .file_type()
@ -308,8 +340,10 @@ fn run(args: Args) -> Result<PathBuf, String> {
fs::create_dir_all(&rir_dir) fs::create_dir_all(&rir_dir)
.map_err(|e| format!("create rir dir failed: {}: {e}", rir_dir.display()))?; .map_err(|e| format!("create rir dir failed: {}: {e}", rir_dir.display()))?;
let tal_bytes = fs::read(tal_path).map_err(|e| format!("read tal failed: {}: {e}", tal_path.display()))?; let tal_bytes =
let ta_bytes = fs::read(ta_path).map_err(|e| format!("read ta failed: {}: {e}", ta_path.display()))?; fs::read(tal_path).map_err(|e| format!("read tal failed: {}: {e}", tal_path.display()))?;
let ta_bytes =
fs::read(ta_path).map_err(|e| format!("read ta failed: {}: {e}", ta_path.display()))?;
let db_dir = run_root.join(".tmp").join(format!("{rir}-base-db")); let db_dir = run_root.join(".tmp").join(format!("{rir}-base-db"));
if db_dir.exists() { if db_dir.exists() {
@ -354,7 +388,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
write_ccr_file(&base_ccr_path, &ccr).map_err(|e| format!("write ccr failed: {e}"))?; write_ccr_file(&base_ccr_path, &ccr).map_err(|e| format!("write ccr failed: {e}"))?;
let ccr_bytes = fs::read(&base_ccr_path) let ccr_bytes = fs::read(&base_ccr_path)
.map_err(|e| format!("read written ccr failed: {}: {e}", base_ccr_path.display()))?; .map_err(|e| format!("read written ccr failed: {}: {e}", base_ccr_path.display()))?;
let decoded = decode_content_info(&ccr_bytes).map_err(|e| format!("decode written ccr failed: {e}"))?; let decoded =
decode_content_info(&ccr_bytes).map_err(|e| format!("decode written ccr failed: {e}"))?;
let verify = verify_content_info(&decoded).map_err(|e| format!("verify ccr failed: {e}"))?; let verify = verify_content_info(&decoded).map_err(|e| format!("verify ccr failed: {e}"))?;
let vrp_rows = build_vrp_compare_rows(&out.tree.vrps, &trust_anchor); let vrp_rows = build_vrp_compare_rows(&out.tree.vrps, &trust_anchor);
@ -385,10 +420,13 @@ fn run(args: Args) -> Result<PathBuf, String> {
); );
let emitted_base_locks_path = rir_dir.join("base-locks.json"); let emitted_base_locks_path = rir_dir.join("base-locks.json");
write_json(&emitted_base_locks_path, &base_locks_json)?; write_json(&emitted_base_locks_path, &base_locks_json)?;
let emitted_base_locks_sha256 = sha256_hex( let emitted_base_locks_sha256 =
&fs::read(&emitted_base_locks_path) sha256_hex(&fs::read(&emitted_base_locks_path).map_err(|e| {
.map_err(|e| format!("read emitted base locks failed: {}: {e}", emitted_base_locks_path.display()))?, format!(
); "read emitted base locks failed: {}: {e}",
emitted_base_locks_path.display()
)
})?);
if let Some(delta_archive) = args.payload_delta_archive.as_ref() { if let Some(delta_archive) = args.payload_delta_archive.as_ref() {
copy_dir_all(delta_archive, &rir_dir.join("payload-delta-archive"))?; copy_dir_all(delta_archive, &rir_dir.join("payload-delta-archive"))?;
@ -418,10 +456,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
rewrite_delta_base_locks_sha(&rir_dir, &emitted_base_locks_sha256)?; rewrite_delta_base_locks_sha(&rir_dir, &emitted_base_locks_sha256)?;
} }
fs::write(rir_dir.join("tal.tal"), &tal_bytes) fs::write(rir_dir.join("tal.tal"), &tal_bytes).map_err(|e| format!("write tal failed: {e}"))?;
.map_err(|e| format!("write tal failed: {e}"))?; fs::write(rir_dir.join("ta.cer"), &ta_bytes).map_err(|e| format!("write ta failed: {e}"))?;
fs::write(rir_dir.join("ta.cer"), &ta_bytes)
.map_err(|e| format!("write ta failed: {e}"))?;
let mut metadata = RirBundleMetadata { let mut metadata = RirBundleMetadata {
schema_version: "20260330-v1".to_string(), schema_version: "20260330-v1".to_string(),
@ -484,15 +520,19 @@ fn run(args: Args) -> Result<PathBuf, String> {
) { ) {
let delta_db_dir = run_root.join(".tmp").join(format!("{rir}-delta-db")); let delta_db_dir = run_root.join(".tmp").join(format!("{rir}-delta-db"));
if delta_db_dir.exists() { if delta_db_dir.exists() {
fs::remove_dir_all(&delta_db_dir) fs::remove_dir_all(&delta_db_dir).map_err(|e| {
.map_err(|e| format!("remove old delta db failed: {}: {e}", delta_db_dir.display()))?; format!(
"remove old delta db failed: {}: {e}",
delta_db_dir.display()
)
})?;
} }
if let Some(parent) = delta_db_dir.parent() { if let Some(parent) = delta_db_dir.parent() {
fs::create_dir_all(parent) fs::create_dir_all(parent)
.map_err(|e| format!("create delta db parent failed: {}: {e}", parent.display()))?; .map_err(|e| format!("create delta db parent failed: {}: {e}", parent.display()))?;
} }
let delta_store = let delta_store = RocksStore::open(&delta_db_dir)
RocksStore::open(&delta_db_dir).map_err(|e| format!("open delta rocksdb failed: {e}"))?; .map_err(|e| format!("open delta rocksdb failed: {e}"))?;
let delta_started = Instant::now(); let delta_started = Instant::now();
let delta_out = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit( let delta_out = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit(
&delta_store, &delta_store,
@ -527,16 +567,21 @@ fn run(args: Args) -> Result<PathBuf, String> {
let delta_ccr_path = rir_dir.join("delta.ccr"); let delta_ccr_path = rir_dir.join("delta.ccr");
write_ccr_file(&delta_ccr_path, &delta_ccr) write_ccr_file(&delta_ccr_path, &delta_ccr)
.map_err(|e| format!("write delta ccr failed: {e}"))?; .map_err(|e| format!("write delta ccr failed: {e}"))?;
let delta_ccr_bytes = fs::read(&delta_ccr_path) let delta_ccr_bytes = fs::read(&delta_ccr_path).map_err(|e| {
.map_err(|e| format!("read written delta ccr failed: {}: {e}", delta_ccr_path.display()))?; format!(
"read written delta ccr failed: {}: {e}",
delta_ccr_path.display()
)
})?;
let delta_decoded = decode_content_info(&delta_ccr_bytes) let delta_decoded = decode_content_info(&delta_ccr_bytes)
.map_err(|e| format!("decode written delta ccr failed: {e}"))?; .map_err(|e| format!("decode written delta ccr failed: {e}"))?;
let delta_verify = let delta_verify = verify_content_info(&delta_decoded)
verify_content_info(&delta_decoded).map_err(|e| format!("verify delta ccr failed: {e}"))?; .map_err(|e| format!("verify delta ccr failed: {e}"))?;
let delta_vrp_rows = build_vrp_compare_rows(&delta_out.tree.vrps, &trust_anchor); let delta_vrp_rows = build_vrp_compare_rows(&delta_out.tree.vrps, &trust_anchor);
let delta_vap_rows = build_vap_compare_rows(&delta_out.tree.aspas, &trust_anchor); let delta_vap_rows = build_vap_compare_rows(&delta_out.tree.aspas, &trust_anchor);
let (delta_ccr_vrps, delta_ccr_vaps) = decode_ccr_compare_views(&delta_decoded, &trust_anchor)?; let (delta_ccr_vrps, delta_ccr_vaps) =
decode_ccr_compare_views(&delta_decoded, &trust_anchor)?;
if delta_vrp_rows != delta_ccr_vrps { if delta_vrp_rows != delta_ccr_vrps {
return Err("record-delta.csv compare view does not match delta.ccr".to_string()); return Err("record-delta.csv compare view does not match delta.ccr".to_string());
} }
@ -585,7 +630,11 @@ fn run(args: Args) -> Result<PathBuf, String> {
write_json(&rir_dir.join("bundle.json"), &metadata)?; write_json(&rir_dir.join("bundle.json"), &metadata)?;
write_json(&rir_dir.join("verification.json"), &verification)?; write_json(&rir_dir.join("verification.json"), &verification)?;
write_top_readme(&run_root.join("README.md"), rir)?; write_top_readme(&run_root.join("README.md"), rir)?;
write_rir_readme(&rir_dir.join("README.md"), rir, &metadata.base_validation_time)?; write_rir_readme(
&rir_dir.join("README.md"),
rir,
&metadata.base_validation_time,
)?;
let bundle_manifest = BundleManifest { let bundle_manifest = BundleManifest {
schema_version: "20260330-v1".to_string(), schema_version: "20260330-v1".to_string(),
@ -654,10 +703,7 @@ mod tests {
fn load_validation_time_reads_top_level_validation_time() { fn load_validation_time_reads_top_level_validation_time() {
let dir = tempdir().expect("tempdir"); let dir = tempdir().expect("tempdir");
let path = dir.path().join("locks.json"); let path = dir.path().join("locks.json");
std::fs::write( std::fs::write(&path, r#"{"validationTime":"2026-03-16T11:49:15+08:00"}"#)
&path,
r#"{"validationTime":"2026-03-16T11:49:15+08:00"}"#,
)
.expect("write locks"); .expect("write locks");
let got = load_validation_time(&path).expect("load validation time"); let got = load_validation_time(&path).expect("load validation time");
assert_eq!( assert_eq!(
@ -722,7 +768,12 @@ mod tests {
assert!(out_dir.join("apnic").join("base-vaps.csv").is_file()); assert!(out_dir.join("apnic").join("base-vaps.csv").is_file());
assert!(out_dir.join("apnic").join("delta.ccr").is_file()); assert!(out_dir.join("apnic").join("delta.ccr").is_file());
assert!(out_dir.join("apnic").join("record-delta.csv").is_file()); assert!(out_dir.join("apnic").join("record-delta.csv").is_file());
assert!(out_dir.join("apnic").join("record-delta-vaps.csv").is_file()); assert!(
out_dir
.join("apnic")
.join("record-delta-vaps.csv")
.is_file()
);
assert!(out_dir.join("apnic").join("verification.json").is_file()); assert!(out_dir.join("apnic").join("verification.json").is_file());
let bundle_json: serde_json::Value = serde_json::from_slice( let bundle_json: serde_json::Value = serde_json::from_slice(
&std::fs::read(out_dir.join("apnic").join("bundle.json")).expect("read bundle.json"), &std::fs::read(out_dir.join("apnic").join("bundle.json")).expect("read bundle.json"),
@ -734,11 +785,12 @@ mod tests {
assert!(bundle_json.get("baseCcrSha256").is_some()); assert!(bundle_json.get("baseCcrSha256").is_some());
assert!(bundle_json.get("deltaVrpCount").is_some()); assert!(bundle_json.get("deltaVrpCount").is_some());
assert!(bundle_json.get("deltaCcrSha256").is_some()); assert!(bundle_json.get("deltaCcrSha256").is_some());
let base_locks_bytes = let base_locks_bytes = std::fs::read(out_dir.join("apnic").join("base-locks.json"))
std::fs::read(out_dir.join("apnic").join("base-locks.json")).expect("read emitted base locks"); .expect("read emitted base locks");
let expected_base_locks_sha = sha256_hex(&base_locks_bytes); let expected_base_locks_sha = sha256_hex(&base_locks_bytes);
let delta_locks_json: serde_json::Value = serde_json::from_slice( let delta_locks_json: serde_json::Value = serde_json::from_slice(
&std::fs::read(out_dir.join("apnic").join("locks-delta.json")).expect("read delta locks"), &std::fs::read(out_dir.join("apnic").join("locks-delta.json"))
.expect("read delta locks"),
) )
.expect("parse delta locks"); .expect("parse delta locks");
assert_eq!(delta_locks_json["baseLocksSha256"], expected_base_locks_sha); assert_eq!(delta_locks_json["baseLocksSha256"], expected_base_locks_sha);

View File

@ -177,16 +177,24 @@ fn materialize_rsync_module_from_store(
.strip_prefix("rsync://") .strip_prefix("rsync://")
.ok_or_else(|| format!("invalid rsync module uri: {module_uri}"))? .ok_or_else(|| format!("invalid rsync module uri: {module_uri}"))?
.trim_end_matches('/'); .trim_end_matches('/');
fs::create_dir_all(tree_root.join(relative_root)) fs::create_dir_all(tree_root.join(relative_root)).map_err(|e| {
.map_err(|e| format!("create rsync tree root failed: {}: {e}", tree_root.join(relative_root).display()))?; format!(
"create rsync tree root failed: {}: {e}",
tree_root.join(relative_root).display()
)
})?;
for (uri, bytes) in objects { for (uri, bytes) in objects {
let rel = uri let rel = uri
.strip_prefix(module_uri) .strip_prefix(module_uri)
.ok_or_else(|| format!("object uri {uri} does not belong to module {module_uri}"))?; .ok_or_else(|| format!("object uri {uri} does not belong to module {module_uri}"))?;
let path = tree_root.join(relative_root).join(rel); let path = tree_root.join(relative_root).join(rel);
if let Some(parent) = path.parent() { if let Some(parent) = path.parent() {
fs::create_dir_all(parent) fs::create_dir_all(parent).map_err(|e| {
.map_err(|e| format!("create rsync object parent failed: {}: {e}", parent.display()))?; format!(
"create rsync object parent failed: {}: {e}",
parent.display()
)
})?;
} }
fs::write(&path, bytes) fs::write(&path, bytes)
.map_err(|e| format!("write rsync object failed: {}: {e}", path.display()))?; .map_err(|e| format!("write rsync object failed: {}: {e}", path.display()))?;
@ -197,8 +205,8 @@ fn materialize_rsync_module_from_store(
fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> { fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> {
fs::create_dir_all(dst) fs::create_dir_all(dst)
.map_err(|e| format!("create directory failed: {}: {e}", dst.display()))?; .map_err(|e| format!("create directory failed: {}: {e}", dst.display()))?;
for entry in fs::read_dir(src) for entry in
.map_err(|e| format!("read directory failed: {}: {e}", src.display()))? fs::read_dir(src).map_err(|e| format!("read directory failed: {}: {e}", src.display()))?
{ {
let entry = entry.map_err(|e| format!("read entry failed: {}: {e}", src.display()))?; let entry = entry.map_err(|e| format!("read entry failed: {}: {e}", src.display()))?;
let file_type = entry let file_type = entry
@ -252,8 +260,12 @@ fn keep_rsync_module(pp: &rpki::audit::PublicationPointAudit) -> Result<Option<S
if is_failed_fetch_source(&pp.source) { if is_failed_fetch_source(&pp.source) {
return Ok(None); return Ok(None);
} }
let module_uri = canonical_rsync_module(&pp.rsync_base_uri) let module_uri = canonical_rsync_module(&pp.rsync_base_uri).map_err(|e| {
.map_err(|e| format!("canonicalize rsync module failed for {}: {e}", pp.rsync_base_uri))?; format!(
"canonicalize rsync module failed for {}: {e}",
pp.rsync_base_uri
)
})?;
if pp.rrdp_notification_uri.is_none() || pp.repo_sync_source.as_deref() == Some("rsync") { if pp.rrdp_notification_uri.is_none() || pp.repo_sync_source.as_deref() == Some("rsync") {
return Ok(Some(module_uri)); return Ok(Some(module_uri));
} }
@ -317,8 +329,9 @@ fn repair_base_inputs(
for entry in fs::read_dir(&repos_dir) for entry in fs::read_dir(&repos_dir)
.map_err(|e| format!("scan rrdp repo dir failed: {}: {e}", repos_dir.display()))? .map_err(|e| format!("scan rrdp repo dir failed: {}: {e}", repos_dir.display()))?
{ {
let entry = entry let entry = entry.map_err(|e| {
.map_err(|e| format!("read rrdp repo entry failed: {}: {e}", repos_dir.display()))?; format!("read rrdp repo entry failed: {}: {e}", repos_dir.display())
})?;
let meta = entry.path().join("meta.json"); let meta = entry.path().join("meta.json");
if !meta.exists() { if !meta.exists() {
continue; continue;
@ -385,13 +398,13 @@ fn repair_base_inputs(
} }
write_json_value(locks_path, &locks)?; write_json_value(locks_path, &locks)?;
verification.base["capture"]["rrdpRepoCount"] = serde_json::Value::from( verification.base["capture"]["rrdpRepoCount"] = serde_json::Value::from(
locks.get("rrdp") locks
.get("rrdp")
.and_then(|v| v.as_object()) .and_then(|v| v.as_object())
.map(|m| m.len()) .map(|m| m.len())
.unwrap_or(0), .unwrap_or(0),
); );
verification.base["capture"]["rsyncModuleCount"] = verification.base["capture"]["rsyncModuleCount"] = serde_json::Value::from(final_modules.len());
serde_json::Value::from(final_modules.len());
Ok(()) Ok(())
} }
@ -502,8 +515,9 @@ fn repair_delta_step_inputs(
for entry in fs::read_dir(&repos_dir) for entry in fs::read_dir(&repos_dir)
.map_err(|e| format!("scan rrdp repo dir failed: {}: {e}", repos_dir.display()))? .map_err(|e| format!("scan rrdp repo dir failed: {}: {e}", repos_dir.display()))?
{ {
let entry = entry let entry = entry.map_err(|e| {
.map_err(|e| format!("read rrdp repo entry failed: {}: {e}", repos_dir.display()))?; format!("read rrdp repo entry failed: {}: {e}", repos_dir.display())
})?;
let meta = entry.path().join("meta.json"); let meta = entry.path().join("meta.json");
if !meta.exists() { if !meta.exists() {
continue; continue;
@ -634,7 +648,8 @@ fn repair_delta_step_inputs(
.and_then(|v| v.get("session")) .and_then(|v| v.get("session"))
.and_then(|v| v.as_str()) .and_then(|v| v.as_str())
.or_else(|| { .or_else(|| {
entry.get("base") entry
.get("base")
.and_then(|v| v.get("session")) .and_then(|v| v.get("session"))
.and_then(|v| v.as_str()) .and_then(|v| v.as_str())
}); });
@ -645,7 +660,10 @@ fn repair_delta_step_inputs(
if session_dir.exists() { if session_dir.exists() {
continue; continue;
} }
let base_bucket_dir = base_capture_root.join("rrdp").join("repos").join(&bucket_hash); let base_bucket_dir = base_capture_root
.join("rrdp")
.join("repos")
.join(&bucket_hash);
let base_session_dir = base_bucket_dir.join(session); let base_session_dir = base_bucket_dir.join(session);
if !base_session_dir.exists() { if !base_session_dir.exists() {
continue; continue;
@ -673,13 +691,13 @@ fn repair_delta_step_inputs(
write_json_value(&locks_path, &locks)?; write_json_value(&locks_path, &locks)?;
step_verification["capture"]["rrdpRepoCount"] = serde_json::Value::from( step_verification["capture"]["rrdpRepoCount"] = serde_json::Value::from(
locks.get("rrdp") locks
.get("rrdp")
.and_then(|v| v.as_object()) .and_then(|v| v.as_object())
.map(|m| m.len()) .map(|m| m.len())
.unwrap_or(0), .unwrap_or(0),
); );
step_verification["capture"]["rsyncModuleCount"] = step_verification["capture"]["rsyncModuleCount"] = serde_json::Value::from(final_modules.len());
serde_json::Value::from(final_modules.len());
Ok(()) Ok(())
} }
@ -694,7 +712,12 @@ fn rewrite_delta_base_hash(step_dir: &Path, previous_locks_path: &Path) -> Resul
let locks_path = step_dir.join("locks-delta.json"); let locks_path = step_dir.join("locks-delta.json");
let mut locks = load_json(&locks_path)?; let mut locks = load_json(&locks_path)?;
let previous_locks = serde_json::from_slice::<serde_json::Value>(&previous_locks_bytes) let previous_locks = serde_json::from_slice::<serde_json::Value>(&previous_locks_bytes)
.map_err(|e| format!("parse previous locks failed: {}: {e}", previous_locks_path.display()))?; .map_err(|e| {
format!(
"parse previous locks failed: {}: {e}",
previous_locks_path.display()
)
})?;
locks["baseLocksSha256"] = serde_json::Value::String(previous_locks_sha256.clone()); locks["baseLocksSha256"] = serde_json::Value::String(previous_locks_sha256.clone());
let capture = locks let capture = locks
.get("capture") .get("capture")
@ -760,17 +783,33 @@ fn real_main() -> Result<(), String> {
let bundle_json_path = rir_dir.join("bundle.json"); let bundle_json_path = rir_dir.join("bundle.json");
let verification_path = rir_dir.join("verification.json"); let verification_path = rir_dir.join("verification.json");
let mut bundle: RirBundleMetadataV2Serde = serde_json::from_slice( let mut bundle: RirBundleMetadataV2Serde =
&fs::read(&bundle_json_path) serde_json::from_slice(&fs::read(&bundle_json_path).map_err(|e| {
.map_err(|e| format!("read bundle.json failed: {}: {e}", bundle_json_path.display()))?, format!(
"read bundle.json failed: {}: {e}",
bundle_json_path.display()
) )
.map_err(|e| format!("parse bundle.json failed: {}: {e}", bundle_json_path.display()))?; })?)
.map_err(|e| {
format!(
"parse bundle.json failed: {}: {e}",
bundle_json_path.display()
)
})?;
let mut verification: VerificationV2 = serde_json::from_slice( let mut verification: VerificationV2 =
&fs::read(&verification_path) serde_json::from_slice(&fs::read(&verification_path).map_err(|e| {
.map_err(|e| format!("read verification.json failed: {}: {e}", verification_path.display()))?, format!(
"read verification.json failed: {}: {e}",
verification_path.display()
) )
.map_err(|e| format!("parse verification.json failed: {}: {e}", verification_path.display()))?; })?)
.map_err(|e| {
format!(
"parse verification.json failed: {}: {e}",
verification_path.display()
)
})?;
let tal_bytes = fs::read(rir_dir.join("tal.tal")) let tal_bytes = fs::read(rir_dir.join("tal.tal"))
.map_err(|e| format!("read tal.tal failed: {}: {e}", rir_dir.display()))?; .map_err(|e| format!("read tal.tal failed: {}: {e}", rir_dir.display()))?;
@ -787,7 +826,8 @@ fn real_main() -> Result<(), String> {
fs::create_dir_all(parent) fs::create_dir_all(parent)
.map_err(|e| format!("create refresh db parent failed: {}: {e}", parent.display()))?; .map_err(|e| format!("create refresh db parent failed: {}: {e}", parent.display()))?;
} }
let store = RocksStore::open(&work_db).map_err(|e| format!("open refresh rocksdb failed: {e}"))?; let store =
RocksStore::open(&work_db).map_err(|e| format!("open refresh rocksdb failed: {e}"))?;
let base_archive = path_join(&rir_dir, &bundle.base.relative_archive_path); let base_archive = path_join(&rir_dir, &bundle.base.relative_archive_path);
let base_locks = path_join(&rir_dir, &bundle.base.relative_locks_path); let base_locks = path_join(&rir_dir, &bundle.base.relative_locks_path);
@ -823,8 +863,8 @@ fn real_main() -> Result<(), String> {
.map_err(|e| format!("build base ccr failed: {e}"))?; .map_err(|e| format!("build base ccr failed: {e}"))?;
write_ccr_file(&base_ccr, &base_ccr_content) write_ccr_file(&base_ccr, &base_ccr_content)
.map_err(|e| format!("write base ccr failed: {}: {e}", base_ccr.display()))?; .map_err(|e| format!("write base ccr failed: {}: {e}", base_ccr.display()))?;
let base_ccr_bytes = let base_ccr_bytes = fs::read(&base_ccr)
fs::read(&base_ccr).map_err(|e| format!("read base ccr failed: {}: {e}", base_ccr.display()))?; .map_err(|e| format!("read base ccr failed: {}: {e}", base_ccr.display()))?;
let base_decoded = let base_decoded =
decode_content_info(&base_ccr_bytes).map_err(|e| format!("decode base ccr failed: {e}"))?; decode_content_info(&base_ccr_bytes).map_err(|e| format!("decode base ccr failed: {e}"))?;
let base_verify = let base_verify =
@ -838,15 +878,18 @@ fn real_main() -> Result<(), String> {
bundle.base.vap_count = base_vap_rows.len(); bundle.base.vap_count = base_vap_rows.len();
verification.base["ccr"]["sha256"] = serde_json::Value::String(bundle.base.ccr_sha256.clone()); verification.base["ccr"]["sha256"] = serde_json::Value::String(bundle.base.ccr_sha256.clone());
verification.base["ccr"]["stateHashesOk"] = serde_json::Value::Bool(base_verify.state_hashes_ok); verification.base["ccr"]["stateHashesOk"] =
serde_json::Value::Bool(base_verify.state_hashes_ok);
verification.base["ccr"]["manifestInstances"] = verification.base["ccr"]["manifestInstances"] =
serde_json::Value::from(base_verify.manifest_instances); serde_json::Value::from(base_verify.manifest_instances);
verification.base["ccr"]["roaVrpCount"] = serde_json::Value::from(base_vrp_rows.len()); verification.base["ccr"]["roaVrpCount"] = serde_json::Value::from(base_vrp_rows.len());
verification.base["ccr"]["aspaPayloadSets"] = serde_json::Value::from(base_vap_rows.len()); verification.base["ccr"]["aspaPayloadSets"] = serde_json::Value::from(base_vap_rows.len());
verification.base["ccr"]["routerKeyCount"] = verification.base["ccr"]["routerKeyCount"] =
serde_json::Value::from(base_verify.router_key_count); serde_json::Value::from(base_verify.router_key_count);
verification.base["compareViews"]["baseVrpCount"] = serde_json::Value::from(base_vrp_rows.len()); verification.base["compareViews"]["baseVrpCount"] =
verification.base["compareViews"]["baseVapCount"] = serde_json::Value::from(base_vap_rows.len()); serde_json::Value::from(base_vrp_rows.len());
verification.base["compareViews"]["baseVapCount"] =
serde_json::Value::from(base_vap_rows.len());
verification.base["capture"]["selfReplayOk"] = serde_json::Value::Bool(true); verification.base["capture"]["selfReplayOk"] = serde_json::Value::Bool(true);
repair_base_inputs( repair_base_inputs(
&base_archive, &base_archive,
@ -922,7 +965,8 @@ fn real_main() -> Result<(), String> {
step_verification["ccr"]["manifestInstances"] = step_verification["ccr"]["manifestInstances"] =
serde_json::Value::from(delta_verify.manifest_instances); serde_json::Value::from(delta_verify.manifest_instances);
step_verification["ccr"]["roaVrpCount"] = serde_json::Value::from(delta_vrp_rows.len()); step_verification["ccr"]["roaVrpCount"] = serde_json::Value::from(delta_vrp_rows.len());
step_verification["ccr"]["aspaPayloadSets"] = serde_json::Value::from(delta_vap_rows.len()); step_verification["ccr"]["aspaPayloadSets"] =
serde_json::Value::from(delta_vap_rows.len());
step_verification["ccr"]["routerKeyCount"] = step_verification["ccr"]["routerKeyCount"] =
serde_json::Value::from(delta_verify.router_key_count); serde_json::Value::from(delta_verify.router_key_count);
step_verification["compareViews"]["vrpCount"] = step_verification["compareViews"]["vrpCount"] =
@ -931,15 +975,15 @@ fn real_main() -> Result<(), String> {
serde_json::Value::from(delta_vap_rows.len()); serde_json::Value::from(delta_vap_rows.len());
step_verification["selfReplayOk"] = serde_json::Value::Bool(true); step_verification["selfReplayOk"] = serde_json::Value::Bool(true);
} }
let step_verification_path = path_join(&rir_dir, &step.relative_path).join("verification.json"); let step_verification_path =
let mut step_verification_json: serde_json::Value = serde_json::from_slice( path_join(&rir_dir, &step.relative_path).join("verification.json");
&fs::read(&step_verification_path).map_err(|e| { let mut step_verification_json: serde_json::Value =
serde_json::from_slice(&fs::read(&step_verification_path).map_err(|e| {
format!( format!(
"read step verification failed: {}: {e}", "read step verification failed: {}: {e}",
step_verification_path.display() step_verification_path.display()
) )
})?, })?)
)
.map_err(|e| { .map_err(|e| {
format!( format!(
"parse step verification failed: {}: {e}", "parse step verification failed: {}: {e}",
@ -952,7 +996,8 @@ fn real_main() -> Result<(), String> {
serde_json::Value::Bool(delta_verify.state_hashes_ok); serde_json::Value::Bool(delta_verify.state_hashes_ok);
step_verification_json["ccr"]["manifestInstances"] = step_verification_json["ccr"]["manifestInstances"] =
serde_json::Value::from(delta_verify.manifest_instances); serde_json::Value::from(delta_verify.manifest_instances);
step_verification_json["ccr"]["roaVrpCount"] = serde_json::Value::from(delta_vrp_rows.len()); step_verification_json["ccr"]["roaVrpCount"] =
serde_json::Value::from(delta_vrp_rows.len());
step_verification_json["ccr"]["aspaPayloadSets"] = step_verification_json["ccr"]["aspaPayloadSets"] =
serde_json::Value::from(delta_vap_rows.len()); serde_json::Value::from(delta_vap_rows.len());
step_verification_json["ccr"]["routerKeyCount"] = step_verification_json["ccr"]["routerKeyCount"] =

View File

@ -1,5 +1,5 @@
use rocksdb::{DB, IteratorMode, Options}; use rocksdb::{DB, IteratorMode, Options};
use rpki::storage::{column_family_descriptors, CF_REPOSITORY_VIEW}; use rpki::storage::{CF_REPOSITORY_VIEW, column_family_descriptors};
use std::fs; use std::fs;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};

View File

@ -64,11 +64,8 @@ fn main() {
println!("rule_hash={}", output.rule_hash); println!("rule_hash={}", output.rule_hash);
println!("validation_path_hint={:?}", output.validation_path_hint); println!("validation_path_hint={:?}", output.validation_path_hint);
if let Some(trace) = trace_rule_to_root( if let Some(trace) =
&store, trace_rule_to_root(&store, AuditRuleKind::Roa, &output.rule_hash)
AuditRuleKind::Roa,
&output.rule_hash,
)
.expect("trace rule") .expect("trace rule")
{ {
println!( println!(
@ -79,11 +76,17 @@ fn main() {
.map(|node| node.manifest_rsync_uri.as_str()) .map(|node| node.manifest_rsync_uri.as_str())
.unwrap_or("") .unwrap_or("")
); );
println!("trace_source_object_uri={}", trace.resolved_output.source_object_uri); println!(
"trace_source_object_uri={}",
trace.resolved_output.source_object_uri
);
println!("trace_chain_len={}", trace.chain_leaf_to_root.len()); println!("trace_chain_len={}", trace.chain_leaf_to_root.len());
for (idx, node) in trace.chain_leaf_to_root.iter().enumerate() { for (idx, node) in trace.chain_leaf_to_root.iter().enumerate() {
println!("chain[{idx}].manifest={}", node.manifest_rsync_uri); println!("chain[{idx}].manifest={}", node.manifest_rsync_uri);
println!("chain[{idx}].current_manifest={}", node.current_manifest_rsync_uri); println!(
"chain[{idx}].current_manifest={}",
node.current_manifest_rsync_uri
);
println!("chain[{idx}].current_crl={}", node.current_crl_rsync_uri); println!("chain[{idx}].current_crl={}", node.current_crl_rsync_uri);
} }
} }

323
src/blob_store.rs Normal file
View File

@ -0,0 +1,323 @@
use std::path::PathBuf;
use std::sync::Arc;
use rocksdb::{DB, Options, WriteBatch};
use crate::storage::{RawByHashEntry, RocksStore, StorageError, StorageResult};
const RAW_BY_HASH_KEY_PREFIX: &str = "rawbyhash:";
fn raw_by_hash_key(sha256_hex: &str) -> String {
format!("{RAW_BY_HASH_KEY_PREFIX}{sha256_hex}")
}
pub trait RawObjectStore {
fn get_raw_entry(&self, sha256_hex: &str) -> StorageResult<Option<RawByHashEntry>>;
fn get_raw_entries_batch(
&self,
sha256_hexes: &[String],
) -> StorageResult<Vec<Option<RawByHashEntry>>>;
fn get_blob_bytes(&self, sha256_hex: &str) -> StorageResult<Option<Vec<u8>>> {
self.get_raw_entry(sha256_hex)
.map(|entry| entry.map(|entry| entry.bytes))
}
fn get_blob_bytes_batch(
&self,
sha256_hexes: &[String],
) -> StorageResult<Vec<Option<Vec<u8>>>> {
self.get_raw_entries_batch(sha256_hexes).map(|entries| {
entries
.into_iter()
.map(|entry| entry.map(|entry| entry.bytes))
.collect()
})
}
}
#[derive(Clone, Debug)]
pub struct ExternalRawStoreDb {
path: PathBuf,
db: Arc<DB>,
}
impl ExternalRawStoreDb {
pub fn open(path: impl Into<PathBuf>) -> StorageResult<Self> {
let path = path.into();
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent).map_err(|e| StorageError::RocksDb(e.to_string()))?;
}
let mut opts = Options::default();
opts.create_if_missing(true);
opts.set_compression_type(rocksdb::DBCompressionType::Lz4);
let db = DB::open(&opts, &path).map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(Self {
path,
db: Arc::new(db),
})
}
pub fn put_raw_entry(&self, entry: &RawByHashEntry) -> StorageResult<()> {
entry.validate_internal()?;
let key = raw_by_hash_key(&entry.sha256_hex);
let value =
serde_cbor::to_vec(entry).map_err(|e| StorageError::Codec { entity: "raw_by_hash", detail: e.to_string() })?;
self.db
.put(key.as_bytes(), value)
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(())
}
pub fn put_raw_entries_batch(&self, entries: &[RawByHashEntry]) -> StorageResult<()> {
if entries.is_empty() {
return Ok(());
}
let mut batch = WriteBatch::default();
for entry in entries {
entry.validate_internal()?;
let key = raw_by_hash_key(&entry.sha256_hex);
let value = serde_cbor::to_vec(entry).map_err(|e| StorageError::Codec {
entity: "raw_by_hash",
detail: e.to_string(),
})?;
batch.put(key.as_bytes(), value);
}
self.db
.write(batch)
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(())
}
pub fn delete_raw_entry(&self, sha256_hex: &str) -> StorageResult<()> {
let key = raw_by_hash_key(sha256_hex);
self.db
.delete(key.as_bytes())
.map_err(|e| StorageError::RocksDb(e.to_string()))
}
pub fn path(&self) -> &PathBuf {
&self.path
}
}
impl RawObjectStore for RocksStore {
fn get_raw_entry(&self, sha256_hex: &str) -> StorageResult<Option<RawByHashEntry>> {
self.get_raw_by_hash_entry(sha256_hex)
}
fn get_raw_entries_batch(
&self,
sha256_hexes: &[String],
) -> StorageResult<Vec<Option<RawByHashEntry>>> {
self.get_raw_by_hash_entries_batch(sha256_hexes)
}
}
impl RawObjectStore for ExternalRawStoreDb {
fn get_raw_entry(&self, sha256_hex: &str) -> StorageResult<Option<RawByHashEntry>> {
let key = raw_by_hash_key(sha256_hex);
let Some(bytes) = self
.db
.get(key.as_bytes())
.map_err(|e| StorageError::RocksDb(e.to_string()))?
else {
return Ok(None);
};
let entry = serde_cbor::from_slice::<RawByHashEntry>(&bytes).map_err(|e| StorageError::Codec {
entity: "raw_by_hash",
detail: e.to_string(),
})?;
entry.validate_internal()?;
Ok(Some(entry))
}
fn get_raw_entries_batch(
&self,
sha256_hexes: &[String],
) -> StorageResult<Vec<Option<RawByHashEntry>>> {
if sha256_hexes.is_empty() {
return Ok(Vec::new());
}
let keys: Vec<String> = sha256_hexes.iter().map(|hash| raw_by_hash_key(hash)).collect();
self.db
.multi_get(keys.iter().map(|key| key.as_bytes()))
.into_iter()
.map(|res| {
let maybe = res.map_err(|e| StorageError::RocksDb(e.to_string()))?;
match maybe {
Some(bytes) => {
let entry = serde_cbor::from_slice::<RawByHashEntry>(&bytes).map_err(|e| {
StorageError::Codec {
entity: "raw_by_hash",
detail: e.to_string(),
}
})?;
entry.validate_internal()?;
Ok(Some(entry))
}
None => Ok(None),
}
})
.collect()
}
}
#[cfg(test)]
mod tests {
use super::{ExternalRawStoreDb, RawObjectStore};
use crate::storage::{RawByHashEntry, RocksStore, StorageError};
fn sha256_hex(bytes: &[u8]) -> String {
use sha2::{Digest, Sha256};
hex::encode(Sha256::digest(bytes))
}
#[test]
fn rocks_store_raw_object_store_reads_single_and_batch_entries() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(td.path()).expect("open rocksdb");
let a = b"object-a".to_vec();
let b = b"object-b".to_vec();
let a_hash = sha256_hex(&a);
let b_hash = sha256_hex(&b);
store
.put_raw_by_hash_entry(&RawByHashEntry::from_bytes(a_hash.clone(), a.clone()))
.expect("put a");
store
.put_raw_by_hash_entry(&RawByHashEntry::from_bytes(b_hash.clone(), b.clone()))
.expect("put b");
let single = store
.get_raw_entry(&a_hash)
.expect("get single")
.expect("present");
assert_eq!(single.bytes, a);
let batch = store
.get_raw_entries_batch(&[a_hash.clone(), "00".repeat(32), b_hash.clone()])
.expect("get batch");
assert_eq!(batch.len(), 3);
assert_eq!(batch[0].as_ref().map(|entry| entry.bytes.as_slice()), Some(a.as_slice()));
assert!(batch[1].is_none());
assert_eq!(batch[2].as_ref().map(|entry| entry.bytes.as_slice()), Some(b.as_slice()));
}
#[test]
fn external_raw_store_db_roundtrips_entries() {
let td = tempfile::tempdir().expect("tempdir");
let raw_store = ExternalRawStoreDb::open(td.path().join("raw-store.db")).expect("open raw store");
let mut entry = RawByHashEntry::from_bytes(sha256_hex(b"blob"), b"blob".to_vec());
entry.origin_uris.push("rsync://example.test/repo/a.cer".to_string());
entry.object_type = Some("cer".to_string());
raw_store.put_raw_entry(&entry).expect("put raw entry");
let got = raw_store
.get_raw_entry(&entry.sha256_hex)
.expect("read raw entry")
.expect("entry exists");
assert_eq!(got, entry);
}
#[test]
fn external_raw_store_db_batch_writes_and_reads() {
let td = tempfile::tempdir().expect("tempdir");
let raw_store = ExternalRawStoreDb::open(td.path().join("raw-store.db")).expect("open raw store");
let a = RawByHashEntry::from_bytes(sha256_hex(b"a"), b"a".to_vec());
let b = RawByHashEntry::from_bytes(sha256_hex(b"b"), b"b".to_vec());
raw_store
.put_raw_entries_batch(&[a.clone(), b.clone()])
.expect("batch put");
let batch = raw_store
.get_raw_entries_batch(&[a.sha256_hex.clone(), b.sha256_hex.clone()])
.expect("batch get");
assert_eq!(batch.len(), 2);
assert_eq!(batch[0], Some(a));
assert_eq!(batch[1], Some(b));
}
#[test]
fn raw_object_store_default_blob_helpers_return_bytes_only() {
let td = tempfile::tempdir().expect("tempdir");
let raw_store = ExternalRawStoreDb::open(td.path().join("nested/raw-store.db"))
.expect("open raw store");
let mut entry = RawByHashEntry::from_bytes(sha256_hex(b"blob"), b"blob".to_vec());
entry.origin_uris.push("rsync://example.test/repo/blob.roa".to_string());
raw_store.put_raw_entry(&entry).expect("put raw entry");
let single = raw_store
.get_blob_bytes(&entry.sha256_hex)
.expect("get blob bytes")
.expect("entry exists");
assert_eq!(single, b"blob".to_vec());
let batch = raw_store
.get_blob_bytes_batch(&[entry.sha256_hex.clone(), "00".repeat(32)])
.expect("get blob bytes batch");
assert_eq!(batch, vec![Some(b"blob".to_vec()), None]);
}
#[test]
fn external_raw_store_db_delete_removes_entry() {
let td = tempfile::tempdir().expect("tempdir");
let raw_store = ExternalRawStoreDb::open(td.path().join("raw-store.db")).expect("open raw store");
let entry = RawByHashEntry::from_bytes(sha256_hex(b"gone"), b"gone".to_vec());
raw_store.put_raw_entry(&entry).expect("put");
assert!(raw_store.get_raw_entry(&entry.sha256_hex).unwrap().is_some());
raw_store
.delete_raw_entry(&entry.sha256_hex)
.expect("delete entry");
assert!(raw_store.get_raw_entry(&entry.sha256_hex).unwrap().is_none());
}
#[test]
fn external_raw_store_db_rejects_invalid_entry_on_put() {
let td = tempfile::tempdir().expect("tempdir");
let raw_store = ExternalRawStoreDb::open(td.path().join("raw-store.db")).expect("open raw store");
let bad = RawByHashEntry {
sha256_hex: "11".repeat(32),
bytes: b"blob".to_vec(),
origin_uris: Vec::new(),
object_type: None,
encoding: None,
};
let err = raw_store.put_raw_entry(&bad).expect_err("invalid hash should fail");
assert!(matches!(err, StorageError::InvalidData { .. }));
}
#[test]
fn external_raw_store_db_reports_codec_error_for_corrupt_value() {
let td = tempfile::tempdir().expect("tempdir");
let raw_store = ExternalRawStoreDb::open(td.path().join("raw-store.db")).expect("open raw store");
raw_store
.db
.put(b"rawbyhash:deadbeef", b"not-cbor")
.expect("inject corrupt bytes");
let err = raw_store
.get_raw_entry("deadbeef")
.expect_err("corrupt value should fail");
assert!(matches!(err, StorageError::Codec { entity: "raw_by_hash", .. }));
}
#[test]
fn external_raw_store_db_batch_returns_empty_for_empty_request() {
let td = tempfile::tempdir().expect("tempdir");
let raw_store = ExternalRawStoreDb::open(td.path().join("raw-store.db")).expect("open raw store");
let entries = raw_store
.get_raw_entries_batch(&[])
.expect("empty batch succeeds");
assert!(entries.is_empty());
raw_store.put_raw_entries_batch(&[]).expect("empty put succeeds");
}
}

View File

@ -65,13 +65,10 @@ pub fn build_vap_compare_rows(
aspas: &[AspaAttestation], aspas: &[AspaAttestation],
trust_anchor: &str, trust_anchor: &str,
) -> BTreeSet<VapCompareRow> { ) -> BTreeSet<VapCompareRow> {
aspas.iter() aspas
.map(|aspa| {
let mut providers = aspa
.provider_as_ids
.iter() .iter()
.copied() .map(|aspa| {
.collect::<Vec<_>>(); let mut providers = aspa.provider_as_ids.iter().copied().collect::<Vec<_>>();
providers.sort_unstable(); providers.sort_unstable();
providers.dedup(); providers.dedup();
VapCompareRow { VapCompareRow {
@ -173,7 +170,10 @@ pub fn write_vap_csv(path: &Path, rows: &BTreeSet<VapCompareRow>) -> Result<(),
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::ccr::{CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation, build_aspa_payload_state, build_roa_payload_state}; use crate::ccr::{
CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation,
build_aspa_payload_state, build_roa_payload_state,
};
use crate::data_model::roa::{IpPrefix, RoaAfi}; use crate::data_model::roa::{IpPrefix, RoaAfi};
#[test] #[test]
@ -218,7 +218,8 @@ mod tests {
tas: None, tas: None,
rks: None, rks: None,
}); });
let (vrp_rows, vap_rows) = decode_ccr_compare_views(&content, "apnic").expect("decode compare views"); let (vrp_rows, vap_rows) =
decode_ccr_compare_views(&content, "apnic").expect("decode compare views");
assert_eq!(vrp_rows.len(), 1); assert_eq!(vrp_rows.len(), 1);
assert_eq!(vap_rows.len(), 1); assert_eq!(vap_rows.len(), 1);
assert_eq!(vap_rows.iter().next().unwrap().providers, "AS64497"); assert_eq!(vap_rows.iter().next().unwrap().providers, "AS64497");

View File

@ -8,10 +8,12 @@ use time::format_description::well_known::Rfc3339;
use crate::audit::PublicationPointAudit; use crate::audit::PublicationPointAudit;
use crate::fetch::rsync::{RsyncFetchError, RsyncFetcher}; use crate::fetch::rsync::{RsyncFetchError, RsyncFetcher};
use crate::replay::archive::{ReplayArchiveIndex, ReplayRrdpLock, ReplayTransport, canonical_rsync_module, sha256_hex}; use crate::replay::archive::{
use crate::sync::rrdp::{NotificationDeltaRef, parse_notification}; ReplayArchiveIndex, ReplayRrdpLock, ReplayTransport, canonical_rsync_module, sha256_hex,
};
use crate::storage::{RocksStore, RrdpSourceRecord}; use crate::storage::{RocksStore, RrdpSourceRecord};
use crate::sync::rrdp::Fetcher; use crate::sync::rrdp::Fetcher;
use crate::sync::rrdp::{NotificationDeltaRef, parse_notification};
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub struct RecordedHttpResponse { pub struct RecordedHttpResponse {
@ -124,7 +126,9 @@ impl<F: RsyncFetcher> RsyncFetcher for RecordingRsyncFetcher<F> {
if self.capture_objects { if self.capture_objects {
let mut recorded = Vec::new(); let mut recorded = Vec::new();
let result = self.inner.visit_objects(rsync_base_uri, &mut |uri, bytes| { let result = self
.inner
.visit_objects(rsync_base_uri, &mut |uri, bytes| {
recorded.push((uri.clone(), bytes.clone())); recorded.push((uri.clone(), bytes.clone()));
visitor(uri, bytes) visitor(uri, bytes)
})?; })?;
@ -347,7 +351,13 @@ fn rrdp_repo_is_replayable(record: &RrdpSourceRecord) -> bool {
fn collect_current_state_locks( fn collect_current_state_locks(
publication_points: &[PublicationPointAudit], publication_points: &[PublicationPointAudit],
store: &RocksStore, store: &RocksStore,
) -> Result<(BTreeMap<String, RrdpLockJson>, BTreeMap<String, RsyncLockJson>), String> { ) -> Result<
(
BTreeMap<String, RrdpLockJson>,
BTreeMap<String, RsyncLockJson>,
),
String,
> {
let mut rrdp_locks = BTreeMap::new(); let mut rrdp_locks = BTreeMap::new();
let mut rsync_locks = BTreeMap::new(); let mut rsync_locks = BTreeMap::new();
let mut seen_modules = BTreeSet::new(); let mut seen_modules = BTreeSet::new();
@ -357,8 +367,12 @@ fn collect_current_state_locks(
if pp.source == "failed_fetch_no_cache" { if pp.source == "failed_fetch_no_cache" {
continue; continue;
} }
let module_uri = canonical_rsync_module(&pp.rsync_base_uri) let module_uri = canonical_rsync_module(&pp.rsync_base_uri).map_err(|e| {
.map_err(|e| format!("canonicalize rsync module failed for {}: {e}", pp.rsync_base_uri))?; format!(
"canonicalize rsync module failed for {}: {e}",
pp.rsync_base_uri
)
})?;
if let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() { if let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() {
if !seen_rrdp.insert(notify_uri.to_string()) { if !seen_rrdp.insert(notify_uri.to_string()) {
continue; continue;
@ -476,7 +490,10 @@ fn materialize_rsync_module(
last_seen_at: &str, last_seen_at: &str,
) -> Result<(), String> { ) -> Result<(), String> {
let bucket_hash = sha256_hex(module_uri.as_bytes()); let bucket_hash = sha256_hex(module_uri.as_bytes());
let bucket_dir = capture_root.join("rsync").join("modules").join(&bucket_hash); let bucket_dir = capture_root
.join("rsync")
.join("modules")
.join(&bucket_hash);
write_json( write_json(
&bucket_dir.join("meta.json"), &bucket_dir.join("meta.json"),
&ModuleMetaJson { &ModuleMetaJson {
@ -490,8 +507,12 @@ fn materialize_rsync_module(
.strip_prefix("rsync://") .strip_prefix("rsync://")
.ok_or_else(|| format!("invalid rsync module uri: {module_uri}"))?; .ok_or_else(|| format!("invalid rsync module uri: {module_uri}"))?;
let relative_root = without_scheme.trim_end_matches('/'); let relative_root = without_scheme.trim_end_matches('/');
fs::create_dir_all(bucket_dir.join("tree").join(relative_root)) fs::create_dir_all(bucket_dir.join("tree").join(relative_root)).map_err(|e| {
.map_err(|e| format!("create rsync tree root failed: {}: {e}", bucket_dir.join("tree").join(relative_root).display()))?; format!(
"create rsync tree root failed: {}: {e}",
bucket_dir.join("tree").join(relative_root).display()
)
})?;
for (uri, bytes) in objects { for (uri, bytes) in objects {
let rel = uri let rel = uri
.strip_prefix(module_uri) .strip_prefix(module_uri)
@ -557,8 +578,12 @@ pub fn write_live_base_replay_bundle_inputs(
if pp.source == "failed_fetch_no_cache" { if pp.source == "failed_fetch_no_cache" {
continue; continue;
} }
let module_uri = canonical_rsync_module(&pp.rsync_base_uri) let module_uri = canonical_rsync_module(&pp.rsync_base_uri).map_err(|e| {
.map_err(|e| format!("canonicalize rsync module failed for {}: {e}", pp.rsync_base_uri))?; format!(
"canonicalize rsync module failed for {}: {e}",
pp.rsync_base_uri
)
})?;
if let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() { if let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() {
if !seen_rrdp.insert(notify_uri.to_string()) { if !seen_rrdp.insert(notify_uri.to_string()) {
continue; continue;
@ -567,7 +592,9 @@ pub fn write_live_base_replay_bundle_inputs(
if rrdp_repo_is_replayable(&source_record) { if rrdp_repo_is_replayable(&source_record) {
let notification_bytes = &http_records let notification_bytes = &http_records
.get(notify_uri) .get(notify_uri)
.ok_or_else(|| format!("missing recorded notification body for {notify_uri}"))? .ok_or_else(|| {
format!("missing recorded notification body for {notify_uri}")
})?
.bytes; .bytes;
let snapshot_uri = source_record let snapshot_uri = source_record
.last_snapshot_uri .last_snapshot_uri
@ -575,9 +602,16 @@ pub fn write_live_base_replay_bundle_inputs(
.ok_or_else(|| format!("missing last_snapshot_uri for {notify_uri}"))?; .ok_or_else(|| format!("missing last_snapshot_uri for {notify_uri}"))?;
let snapshot_bytes = &http_records let snapshot_bytes = &http_records
.get(snapshot_uri) .get(snapshot_uri)
.ok_or_else(|| format!("missing recorded snapshot body for {snapshot_uri}"))? .ok_or_else(|| {
format!("missing recorded snapshot body for {snapshot_uri}")
})?
.bytes; .bytes;
materialize_rrdp_repo(&capture_root, &source_record, notification_bytes, snapshot_bytes)?; materialize_rrdp_repo(
&capture_root,
&source_record,
notification_bytes,
snapshot_bytes,
)?;
rrdp_locks.insert( rrdp_locks.insert(
notify_uri.to_string(), notify_uri.to_string(),
RrdpLockJson { RrdpLockJson {
@ -621,10 +655,15 @@ pub fn write_live_base_replay_bundle_inputs(
for fetch in rsync_records.values() { for fetch in rsync_records.values() {
let module_uri = canonical_rsync_module(&fetch.requested_base_uri) let module_uri = canonical_rsync_module(&fetch.requested_base_uri)
.map_err(|e| format!("canonicalize requested rsync module failed: {e}"))?; .map_err(|e| format!("canonicalize requested rsync module failed: {e}"))?;
let objects = rsync_objects_by_module.entry(module_uri.clone()).or_default(); let objects = rsync_objects_by_module
let times = rsync_times_by_module .entry(module_uri.clone())
.entry(module_uri) .or_default();
.or_insert_with(|| (fetch.fetched_at_rfc3339_utc.clone(), fetch.fetched_at_rfc3339_utc.clone())); let times = rsync_times_by_module.entry(module_uri).or_insert_with(|| {
(
fetch.fetched_at_rfc3339_utc.clone(),
fetch.fetched_at_rfc3339_utc.clone(),
)
});
if fetch.fetched_at_rfc3339_utc < times.0 { if fetch.fetched_at_rfc3339_utc < times.0 {
times.0 = fetch.fetched_at_rfc3339_utc.clone(); times.0 = fetch.fetched_at_rfc3339_utc.clone();
} }
@ -653,7 +692,13 @@ pub fn write_live_base_replay_bundle_inputs(
.unwrap_or_else(|_| "1970-01-01T00:00:00Z".to_string()); .unwrap_or_else(|_| "1970-01-01T00:00:00Z".to_string());
(now.clone(), now) (now.clone(), now)
}); });
materialize_rsync_module(&capture_root, module_uri, objects, &created_at, &last_seen_at)?; materialize_rsync_module(
&capture_root,
module_uri,
objects,
&created_at,
&last_seen_at,
)?;
} }
let locks = ReplayLocksJson { let locks = ReplayLocksJson {
@ -761,13 +806,25 @@ pub fn write_live_delta_replay_step_inputs(
http_records: &BTreeMap<String, RecordedHttpResponse>, http_records: &BTreeMap<String, RecordedHttpResponse>,
rsync_records: &BTreeMap<String, RecordedRsyncFetch>, rsync_records: &BTreeMap<String, RecordedRsyncFetch>,
) -> Result<LiveDeltaCaptureSummary, String> { ) -> Result<LiveDeltaCaptureSummary, String> {
let previous_locks: crate::replay::archive::ReplayLocks = serde_json::from_slice( let previous_locks: crate::replay::archive::ReplayLocks =
&fs::read(previous_locks_path) serde_json::from_slice(&fs::read(previous_locks_path).map_err(|e| {
.map_err(|e| format!("read previous locks failed: {}: {e}", previous_locks_path.display()))?, format!(
"read previous locks failed: {}: {e}",
previous_locks_path.display()
) )
.map_err(|e| format!("parse previous locks failed: {}: {e}", previous_locks_path.display()))?; })?)
let previous_locks_bytes = fs::read(previous_locks_path) .map_err(|e| {
.map_err(|e| format!("read previous locks bytes failed: {}: {e}", previous_locks_path.display()))?; format!(
"parse previous locks failed: {}: {e}",
previous_locks_path.display()
)
})?;
let previous_locks_bytes = fs::read(previous_locks_path).map_err(|e| {
format!(
"read previous locks bytes failed: {}: {e}",
previous_locks_path.display()
)
})?;
let previous_locks_sha256 = sha256_hex(&previous_locks_bytes); let previous_locks_sha256 = sha256_hex(&previous_locks_bytes);
let recorded_at = time::OffsetDateTime::now_utc(); let recorded_at = time::OffsetDateTime::now_utc();
@ -810,10 +867,15 @@ pub fn write_live_delta_replay_step_inputs(
for fetch in rsync_records.values() { for fetch in rsync_records.values() {
let module_uri = canonical_rsync_module(&fetch.requested_base_uri) let module_uri = canonical_rsync_module(&fetch.requested_base_uri)
.map_err(|e| format!("canonicalize requested rsync module failed: {e}"))?; .map_err(|e| format!("canonicalize requested rsync module failed: {e}"))?;
let objects = rsync_objects_by_module.entry(module_uri.clone()).or_default(); let objects = rsync_objects_by_module
let times = rsync_times_by_module .entry(module_uri.clone())
.entry(module_uri) .or_default();
.or_insert_with(|| (fetch.fetched_at_rfc3339_utc.clone(), fetch.fetched_at_rfc3339_utc.clone())); let times = rsync_times_by_module.entry(module_uri).or_insert_with(|| {
(
fetch.fetched_at_rfc3339_utc.clone(),
fetch.fetched_at_rfc3339_utc.clone(),
)
});
if fetch.fetched_at_rfc3339_utc < times.0 { if fetch.fetched_at_rfc3339_utc < times.0 {
times.0 = fetch.fetched_at_rfc3339_utc.clone(); times.0 = fetch.fetched_at_rfc3339_utc.clone();
} }
@ -834,22 +896,31 @@ pub fn write_live_delta_replay_step_inputs(
if pp.source == "failed_fetch_no_cache" { if pp.source == "failed_fetch_no_cache" {
continue; continue;
} }
let module_uri = canonical_rsync_module(&pp.rsync_base_uri) let module_uri = canonical_rsync_module(&pp.rsync_base_uri).map_err(|e| {
.map_err(|e| format!("canonicalize rsync module failed for {}: {e}", pp.rsync_base_uri))?; format!(
"canonicalize rsync module failed for {}: {e}",
pp.rsync_base_uri
)
})?;
if let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() { if let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() {
if !seen_notifications.insert(notify_uri.to_string()) { if !seen_notifications.insert(notify_uri.to_string()) {
continue; continue;
} }
let base_lock = previous_locks.rrdp.get(notify_uri); let base_lock = previous_locks.rrdp.get(notify_uri);
let target_record = store let target_record = store.get_rrdp_source_record(notify_uri).map_err(|e| {
.get_rrdp_source_record(notify_uri) format!("read target rrdp source record failed for {notify_uri}: {e}")
.map_err(|e| format!("read target rrdp source record failed for {notify_uri}: {e}"))?; })?;
let bucket_hash = sha256_hex(notify_uri.as_bytes()); let bucket_hash = sha256_hex(notify_uri.as_bytes());
let bucket_dir = capture_root.join("rrdp").join("repos").join(&bucket_hash); let bucket_dir = capture_root.join("rrdp").join("repos").join(&bucket_hash);
let (created_at, last_seen_at) = target_record let (created_at, last_seen_at) = target_record
.as_ref() .as_ref()
.map(|record| (record.first_seen_at.rfc3339_utc.clone(), record.last_seen_at.rfc3339_utc.clone())) .map(|record| {
(
record.first_seen_at.rfc3339_utc.clone(),
record.last_seen_at.rfc3339_utc.clone(),
)
})
.unwrap_or_else(|| { .unwrap_or_else(|| {
let now = recorded_at let now = recorded_at
.format(&Rfc3339) .format(&Rfc3339)
@ -869,7 +940,9 @@ pub fn write_live_delta_replay_step_inputs(
let entry = if let (Some(base_lock), Some(target_record), Some(target_state)) = ( let entry = if let (Some(base_lock), Some(target_record), Some(target_state)) = (
base_lock, base_lock,
target_record.as_ref(), target_record.as_ref(),
target_record.as_ref().and_then(target_rrdp_state_from_record), target_record
.as_ref()
.and_then(target_rrdp_state_from_record),
) { ) {
if base_lock.transport == ReplayTransport::Rrdp if base_lock.transport == ReplayTransport::Rrdp
&& base_lock.session.as_deref() == target_record.last_session_id.as_deref() && base_lock.session.as_deref() == target_record.last_session_id.as_deref()
@ -900,10 +973,16 @@ pub fn write_live_delta_replay_step_inputs(
let notification_bytes = http_records let notification_bytes = http_records
.get(notify_uri) .get(notify_uri)
.map(|record| record.bytes.as_slice()) .map(|record| record.bytes.as_slice())
.ok_or_else(|| format!("missing recorded target notification body for {notify_uri}"))?; .ok_or_else(|| {
format!("missing recorded target notification body for {notify_uri}")
})?;
let base_serial = base_lock.serial.expect("checked above"); let base_serial = base_lock.serial.expect("checked above");
let target_serial = target_record.last_serial.expect("checked above"); let target_serial = target_record.last_serial.expect("checked above");
let deltas = notification_deltas_after_serial(notification_bytes, base_serial, target_serial)?; let deltas = notification_deltas_after_serial(
notification_bytes,
base_serial,
target_serial,
)?;
let mut all_present = true; let mut all_present = true;
let session = target_record let session = target_record
.last_session_id .last_session_id
@ -913,8 +992,10 @@ pub fn write_live_delta_replay_step_inputs(
let notification_path = let notification_path =
session_dir.join(format!("notification-target-{target_serial}.xml")); session_dir.join(format!("notification-target-{target_serial}.xml"));
write_bytes(&notification_path, notification_bytes)?; write_bytes(&notification_path, notification_bytes)?;
let target_notification = parse_notification(notification_bytes) let target_notification =
.map_err(|e| format!("parse target notification failed for {notify_uri}: {e}"))?; parse_notification(notification_bytes).map_err(|e| {
format!("parse target notification failed for {notify_uri}: {e}")
})?;
let snapshot_hash_hex = hex::encode(target_notification.snapshot_hash_sha256); let snapshot_hash_hex = hex::encode(target_notification.snapshot_hash_sha256);
if let Some(snapshot_bytes) = http_records if let Some(snapshot_bytes) = http_records
.get(&target_notification.snapshot_uri) .get(&target_notification.snapshot_uri)
@ -928,9 +1009,13 @@ pub fn write_live_delta_replay_step_inputs(
let deltas_dir = session_dir.join("deltas"); let deltas_dir = session_dir.join("deltas");
let mut delta_serials = Vec::new(); let mut delta_serials = Vec::new();
for dref in &deltas { for dref in &deltas {
if let Some(delta_bytes) = http_records.get(&dref.uri).map(|record| record.bytes.as_slice()) { if let Some(delta_bytes) = http_records
.get(&dref.uri)
.map(|record| record.bytes.as_slice())
{
let hash = hex::encode(dref.hash_sha256); let hash = hex::encode(dref.hash_sha256);
let path = deltas_dir.join(format!("delta-{}-{}.xml", dref.serial, hash)); let path =
deltas_dir.join(format!("delta-{}-{}.xml", dref.serial, hash));
write_bytes(&path, delta_bytes)?; write_bytes(&path, delta_bytes)?;
delta_serials.push(dref.serial); delta_serials.push(dref.serial);
} else { } else {
@ -1014,8 +1099,17 @@ pub fn write_live_delta_replay_step_inputs(
(now.clone(), now) (now.clone(), now)
}); });
let bucket_hash = sha256_hex(module_uri.as_bytes()); let bucket_hash = sha256_hex(module_uri.as_bytes());
let bucket_dir = capture_root.join("rsync").join("modules").join(&bucket_hash); let bucket_dir = capture_root
materialize_rsync_module(&capture_root, &module_uri, objects, &created_at, &last_seen_at)?; .join("rsync")
.join("modules")
.join(&bucket_hash);
materialize_rsync_module(
&capture_root,
&module_uri,
objects,
&created_at,
&last_seen_at,
)?;
let files = objects.keys().cloned().collect::<Vec<_>>(); let files = objects.keys().cloned().collect::<Vec<_>>();
write_json( write_json(
&bucket_dir.join("files.json"), &bucket_dir.join("files.json"),
@ -1072,7 +1166,8 @@ pub fn write_live_delta_replay_bundle_inputs(
) -> Result<LiveDeltaCaptureSummary, String> { ) -> Result<LiveDeltaCaptureSummary, String> {
let base_archive_root = rir_dir.join("base-payload-archive"); let base_archive_root = rir_dir.join("base-payload-archive");
let base_locks_path = rir_dir.join("base-locks.json"); let base_locks_path = rir_dir.join("base-locks.json");
let base_index = ReplayArchiveIndex::load_allow_missing_rsync_modules(&base_archive_root, &base_locks_path) let base_index =
ReplayArchiveIndex::load_allow_missing_rsync_modules(&base_archive_root, &base_locks_path)
.map_err(|e| format!("load base replay index failed: {e}"))?; .map_err(|e| format!("load base replay index failed: {e}"))?;
let base_locks_bytes = fs::read(&base_locks_path) let base_locks_bytes = fs::read(&base_locks_path)
.map_err(|e| format!("read base locks failed: {}: {e}", base_locks_path.display()))?; .map_err(|e| format!("read base locks failed: {}: {e}", base_locks_path.display()))?;
@ -1118,10 +1213,15 @@ pub fn write_live_delta_replay_bundle_inputs(
for fetch in rsync_records.values() { for fetch in rsync_records.values() {
let module_uri = canonical_rsync_module(&fetch.requested_base_uri) let module_uri = canonical_rsync_module(&fetch.requested_base_uri)
.map_err(|e| format!("canonicalize requested rsync module failed: {e}"))?; .map_err(|e| format!("canonicalize requested rsync module failed: {e}"))?;
let objects = rsync_objects_by_module.entry(module_uri.clone()).or_default(); let objects = rsync_objects_by_module
let times = rsync_times_by_module .entry(module_uri.clone())
.entry(module_uri) .or_default();
.or_insert_with(|| (fetch.fetched_at_rfc3339_utc.clone(), fetch.fetched_at_rfc3339_utc.clone())); let times = rsync_times_by_module.entry(module_uri).or_insert_with(|| {
(
fetch.fetched_at_rfc3339_utc.clone(),
fetch.fetched_at_rfc3339_utc.clone(),
)
});
if fetch.fetched_at_rfc3339_utc < times.0 { if fetch.fetched_at_rfc3339_utc < times.0 {
times.0 = fetch.fetched_at_rfc3339_utc.clone(); times.0 = fetch.fetched_at_rfc3339_utc.clone();
} }
@ -1139,22 +1239,31 @@ pub fn write_live_delta_replay_bundle_inputs(
let mut needed_modules = BTreeSet::new(); let mut needed_modules = BTreeSet::new();
for pp in publication_points { for pp in publication_points {
let module_uri = canonical_rsync_module(&pp.rsync_base_uri) let module_uri = canonical_rsync_module(&pp.rsync_base_uri).map_err(|e| {
.map_err(|e| format!("canonicalize rsync module failed for {}: {e}", pp.rsync_base_uri))?; format!(
"canonicalize rsync module failed for {}: {e}",
pp.rsync_base_uri
)
})?;
if let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() { if let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() {
if !seen_notifications.insert(notify_uri.to_string()) { if !seen_notifications.insert(notify_uri.to_string()) {
continue; continue;
} }
let base_lock = base_index.rrdp_lock(notify_uri); let base_lock = base_index.rrdp_lock(notify_uri);
let target_record = store let target_record = store.get_rrdp_source_record(notify_uri).map_err(|e| {
.get_rrdp_source_record(notify_uri) format!("read target rrdp source record failed for {notify_uri}: {e}")
.map_err(|e| format!("read target rrdp source record failed for {notify_uri}: {e}"))?; })?;
let bucket_hash = sha256_hex(notify_uri.as_bytes()); let bucket_hash = sha256_hex(notify_uri.as_bytes());
let bucket_dir = capture_root.join("rrdp").join("repos").join(&bucket_hash); let bucket_dir = capture_root.join("rrdp").join("repos").join(&bucket_hash);
let (created_at, last_seen_at) = target_record let (created_at, last_seen_at) = target_record
.as_ref() .as_ref()
.map(|record| (record.first_seen_at.rfc3339_utc.clone(), record.last_seen_at.rfc3339_utc.clone())) .map(|record| {
(
record.first_seen_at.rfc3339_utc.clone(),
record.last_seen_at.rfc3339_utc.clone(),
)
})
.unwrap_or_else(|| { .unwrap_or_else(|| {
let now = recorded_at let now = recorded_at
.format(&Rfc3339) .format(&Rfc3339)
@ -1207,20 +1316,29 @@ pub fn write_live_delta_replay_bundle_inputs(
let notification_bytes = http_records let notification_bytes = http_records
.get(notify_uri) .get(notify_uri)
.map(|record| record.bytes.as_slice()) .map(|record| record.bytes.as_slice())
.ok_or_else(|| format!("missing recorded target notification body for {notify_uri}"))?; .ok_or_else(|| {
format!("missing recorded target notification body for {notify_uri}")
})?;
let base_serial = base_lock.serial.expect("checked above"); let base_serial = base_lock.serial.expect("checked above");
let target_serial = target_record.last_serial.expect("checked above"); let target_serial = target_record.last_serial.expect("checked above");
let deltas = notification_deltas_after_serial(notification_bytes, base_serial, target_serial)?; let deltas = notification_deltas_after_serial(
notification_bytes,
base_serial,
target_serial,
)?;
let mut all_present = true; let mut all_present = true;
let session = target_record let session = target_record
.last_session_id .last_session_id
.as_deref() .as_deref()
.ok_or_else(|| format!("missing target session for {notify_uri}"))?; .ok_or_else(|| format!("missing target session for {notify_uri}"))?;
let session_dir = bucket_dir.join(session); let session_dir = bucket_dir.join(session);
let notification_path = session_dir.join(format!("notification-target-{target_serial}.xml")); let notification_path =
session_dir.join(format!("notification-target-{target_serial}.xml"));
write_bytes(&notification_path, notification_bytes)?; write_bytes(&notification_path, notification_bytes)?;
let target_notification = parse_notification(notification_bytes) let target_notification =
.map_err(|e| format!("parse target notification failed for {notify_uri}: {e}"))?; parse_notification(notification_bytes).map_err(|e| {
format!("parse target notification failed for {notify_uri}: {e}")
})?;
let snapshot_hash_hex = hex::encode(target_notification.snapshot_hash_sha256); let snapshot_hash_hex = hex::encode(target_notification.snapshot_hash_sha256);
if let Some(snapshot_bytes) = http_records if let Some(snapshot_bytes) = http_records
.get(&target_notification.snapshot_uri) .get(&target_notification.snapshot_uri)
@ -1234,9 +1352,13 @@ pub fn write_live_delta_replay_bundle_inputs(
let deltas_dir = session_dir.join("deltas"); let deltas_dir = session_dir.join("deltas");
let mut delta_serials = Vec::new(); let mut delta_serials = Vec::new();
for dref in &deltas { for dref in &deltas {
if let Some(delta_bytes) = http_records.get(&dref.uri).map(|record| record.bytes.as_slice()) { if let Some(delta_bytes) = http_records
.get(&dref.uri)
.map(|record| record.bytes.as_slice())
{
let hash = hex::encode(dref.hash_sha256); let hash = hex::encode(dref.hash_sha256);
let path = deltas_dir.join(format!("delta-{}-{}.xml", dref.serial, hash)); let path =
deltas_dir.join(format!("delta-{}-{}.xml", dref.serial, hash));
write_bytes(&path, delta_bytes)?; write_bytes(&path, delta_bytes)?;
delta_serials.push(dref.serial); delta_serials.push(dref.serial);
} else { } else {
@ -1320,8 +1442,17 @@ pub fn write_live_delta_replay_bundle_inputs(
(now.clone(), now) (now.clone(), now)
}); });
let bucket_hash = sha256_hex(module_uri.as_bytes()); let bucket_hash = sha256_hex(module_uri.as_bytes());
let bucket_dir = capture_root.join("rsync").join("modules").join(&bucket_hash); let bucket_dir = capture_root
materialize_rsync_module(&capture_root, &module_uri, objects, &created_at, &last_seen_at)?; .join("rsync")
.join("modules")
.join(&bucket_hash);
materialize_rsync_module(
&capture_root,
&module_uri,
objects,
&created_at,
&last_seen_at,
)?;
let files = objects.keys().cloned().collect::<Vec<_>>(); let files = objects.keys().cloned().collect::<Vec<_>>();
write_json( write_json(
&bucket_dir.join("files.json"), &bucket_dir.join("files.json"),
@ -1402,7 +1533,12 @@ mod tests {
} }
} }
fn minimal_notification(notify_uri: &str, snapshot_uri: &str, session: &str, serial: u64) -> Vec<u8> { fn minimal_notification(
notify_uri: &str,
snapshot_uri: &str,
session: &str,
serial: u64,
) -> Vec<u8> {
format!( format!(
r#"<notification xmlns="http://www.ripe.net/rpki/rrdp" version="1" session_id="{session}" serial="{serial}"><snapshot uri="{snapshot_uri}" hash="{}"/></notification>"#, r#"<notification xmlns="http://www.ripe.net/rpki/rrdp" version="1" session_id="{session}" serial="{serial}"><snapshot uri="{snapshot_uri}" hash="{}"/></notification>"#,
sha256_hex(b"<snapshot/>") sha256_hex(b"<snapshot/>")
@ -1426,7 +1562,10 @@ mod tests {
#[test] #[test]
fn recording_rsync_fetcher_records_object_sets() { fn recording_rsync_fetcher_records_object_sets() {
let fetcher = RecordingRsyncFetcher::new(DummyRsyncFetcher { let fetcher = RecordingRsyncFetcher::new(DummyRsyncFetcher {
objects: vec![("rsync://example.test/repo/a.roa".to_string(), b"roa".to_vec())], objects: vec![(
"rsync://example.test/repo/a.roa".to_string(),
b"roa".to_vec(),
)],
}); });
let got = fetcher let got = fetcher
.fetch_objects("rsync://example.test/repo/") .fetch_objects("rsync://example.test/repo/")
@ -1480,8 +1619,10 @@ mod tests {
rrdp_notification_uri: Some(notify_uri.to_string()), rrdp_notification_uri: Some(notify_uri.to_string()),
source: "fresh".to_string(), source: "fresh".to_string(),
repo_sync_source: None, repo_sync_source: None,
repo_sync_phase: None,
repo_sync_duration_ms: None, repo_sync_duration_ms: None,
repo_sync_error: None, repo_sync_error: None,
repo_terminal_state: "fresh".to_string(),
this_update_rfc3339_utc: "2026-03-30T00:00:00Z".to_string(), this_update_rfc3339_utc: "2026-03-30T00:00:00Z".to_string(),
next_update_rfc3339_utc: "2026-03-30T01:00:00Z".to_string(), next_update_rfc3339_utc: "2026-03-30T01:00:00Z".to_string(),
verified_at_rfc3339_utc: "2026-03-30T00:00:02Z".to_string(), verified_at_rfc3339_utc: "2026-03-30T00:00:02Z".to_string(),
@ -1498,8 +1639,10 @@ mod tests {
rrdp_notification_uri: None, rrdp_notification_uri: None,
source: "fresh".to_string(), source: "fresh".to_string(),
repo_sync_source: None, repo_sync_source: None,
repo_sync_phase: None,
repo_sync_duration_ms: None, repo_sync_duration_ms: None,
repo_sync_error: None, repo_sync_error: None,
repo_terminal_state: "fresh".to_string(),
this_update_rfc3339_utc: "2026-03-30T00:00:00Z".to_string(), this_update_rfc3339_utc: "2026-03-30T00:00:00Z".to_string(),
next_update_rfc3339_utc: "2026-03-30T01:00:00Z".to_string(), next_update_rfc3339_utc: "2026-03-30T01:00:00Z".to_string(),
verified_at_rfc3339_utc: "2026-03-30T00:00:02Z".to_string(), verified_at_rfc3339_utc: "2026-03-30T00:00:02Z".to_string(),

View File

@ -9,17 +9,16 @@ pub use compare_view::{
}; };
pub use live_capture::{ pub use live_capture::{
LiveBaseCaptureSummary, LiveDeltaCaptureSummary, RecordedHttpResponse, RecordedRsyncFetch, LiveBaseCaptureSummary, LiveDeltaCaptureSummary, RecordedHttpResponse, RecordedRsyncFetch,
RecordingHttpFetcher, RecordingRsyncFetcher, write_live_base_replay_bundle_inputs, RecordingHttpFetcher, RecordingRsyncFetcher, write_current_replay_state_locks,
write_live_delta_replay_bundle_inputs, write_live_delta_replay_step_inputs, write_live_base_replay_bundle_inputs, write_live_delta_replay_bundle_inputs,
write_current_replay_state_locks, write_live_delta_replay_step_inputs,
}; };
pub use record_io::{ pub use record_io::{
build_single_rir_bundle_manifest, copy_dir_all, load_validation_time, sha256_hex, build_single_rir_bundle_manifest, copy_dir_all, load_validation_time, sha256_hex, write_bytes,
write_bytes, write_json, write_live_bundle_rir_readme, write_live_bundle_top_readme, write_json, write_live_bundle_rir_readme, write_live_bundle_top_readme, write_timing_json,
write_timing_json,
}; };
pub use spec::{BundleManifest, BundleManifestEntry, RirBundleMetadata};
pub use spec::{ pub use spec::{
BaseBundleStateMetadataV2, BundleManifestEntryV2, BundleManifestV2, DeltaSequenceMetadataV2, BaseBundleStateMetadataV2, BundleManifestEntryV2, BundleManifestV2, DeltaSequenceMetadataV2,
DeltaStepMetadataV2, RirBundleMetadataV2, DeltaStepMetadataV2, RirBundleMetadataV2,
}; };
pub use spec::{BundleManifest, BundleManifestEntry, RirBundleMetadata};

View File

@ -81,7 +81,9 @@ pub fn write_bytes(path: &Path, bytes: &[u8]) -> Result<(), String> {
pub fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> { pub fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> {
fs::create_dir_all(dst) fs::create_dir_all(dst)
.map_err(|e| format!("create directory failed: {}: {e}", dst.display()))?; .map_err(|e| format!("create directory failed: {}: {e}", dst.display()))?;
for entry in fs::read_dir(src).map_err(|e| format!("read_dir failed: {}: {e}", src.display()))? { for entry in
fs::read_dir(src).map_err(|e| format!("read_dir failed: {}: {e}", src.display()))?
{
let entry = entry.map_err(|e| format!("read_dir entry failed: {}: {e}", src.display()))?; let entry = entry.map_err(|e| format!("read_dir entry failed: {}: {e}", src.display()))?;
let ty = entry let ty = entry
.file_type() .file_type()
@ -94,8 +96,13 @@ pub fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> {
fs::create_dir_all(parent) fs::create_dir_all(parent)
.map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?; .map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?;
} }
fs::copy(entry.path(), &to) fs::copy(entry.path(), &to).map_err(|e| {
.map_err(|e| format!("copy failed: {} -> {}: {e}", entry.path().display(), to.display()))?; format!(
"copy failed: {} -> {}: {e}",
entry.path().display(),
to.display()
)
})?;
} }
} }
Ok(()) Ok(())
@ -197,7 +204,10 @@ mod tests {
.expect("manifest"); .expect("manifest");
assert_eq!(manifest.schema_version, "20260330-v1"); assert_eq!(manifest.schema_version, "20260330-v1");
assert_eq!(manifest.rirs, vec!["apnic".to_string()]); assert_eq!(manifest.rirs, vec!["apnic".to_string()]);
assert_eq!(manifest.per_rir_bundles[0].base_validation_time, "2026-04-01T00:00:00Z"); assert_eq!(
manifest.per_rir_bundles[0].base_validation_time,
"2026-04-01T00:00:00Z"
);
assert_eq!( assert_eq!(
manifest.per_rir_bundles[0].delta_validation_time.as_deref(), manifest.per_rir_bundles[0].delta_validation_time.as_deref(),
Some("2026-04-01T00:10:00Z") Some("2026-04-01T00:10:00Z")
@ -255,14 +265,8 @@ mod tests {
#[test] #[test]
fn build_single_rir_bundle_manifest_supports_none_delta_time() { fn build_single_rir_bundle_manifest_supports_none_delta_time() {
let base = time::OffsetDateTime::parse("2026-04-01T00:00:00Z", &Rfc3339).expect("base"); let base = time::OffsetDateTime::parse("2026-04-01T00:00:00Z", &Rfc3339).expect("base");
let manifest = build_single_rir_bundle_manifest( let manifest =
"20260330-v1", build_single_rir_bundle_manifest("20260330-v1", "ours", "afrinic", &base, None, false)
"ours",
"afrinic",
&base,
None,
false,
)
.expect("manifest"); .expect("manifest");
assert_eq!(manifest.per_rir_bundles[0].delta_validation_time, None); assert_eq!(manifest.per_rir_bundles[0].delta_validation_time, None);
assert!(!manifest.per_rir_bundles[0].has_aspa); assert!(!manifest.per_rir_bundles[0].has_aspa);

View File

@ -19,7 +19,10 @@ pub struct BundleManifestEntry {
pub relative_path: String, pub relative_path: String,
#[serde(rename = "baseValidationTime")] #[serde(rename = "baseValidationTime")]
pub base_validation_time: String, pub base_validation_time: String,
#[serde(rename = "deltaValidationTime", skip_serializing_if = "Option::is_none")] #[serde(
rename = "deltaValidationTime",
skip_serializing_if = "Option::is_none"
)]
pub delta_validation_time: Option<String>, pub delta_validation_time: Option<String>,
#[serde(rename = "hasAspa")] #[serde(rename = "hasAspa")]
pub has_aspa: bool, pub has_aspa: bool,
@ -34,7 +37,10 @@ pub struct RirBundleMetadata {
pub rir: String, pub rir: String,
#[serde(rename = "baseValidationTime")] #[serde(rename = "baseValidationTime")]
pub base_validation_time: String, pub base_validation_time: String,
#[serde(rename = "deltaValidationTime", skip_serializing_if = "Option::is_none")] #[serde(
rename = "deltaValidationTime",
skip_serializing_if = "Option::is_none"
)]
pub delta_validation_time: Option<String>, pub delta_validation_time: Option<String>,
#[serde(rename = "talSha256")] #[serde(rename = "talSha256")]
pub tal_sha256: String, pub tal_sha256: String,
@ -79,9 +85,15 @@ pub struct BundleManifestEntryV2 {
pub base_validation_time: String, pub base_validation_time: String,
#[serde(rename = "stepCount")] #[serde(rename = "stepCount")]
pub step_count: usize, pub step_count: usize,
#[serde(rename = "firstDeltaValidationTime", skip_serializing_if = "Option::is_none")] #[serde(
rename = "firstDeltaValidationTime",
skip_serializing_if = "Option::is_none"
)]
pub first_delta_validation_time: Option<String>, pub first_delta_validation_time: Option<String>,
#[serde(rename = "lastDeltaValidationTime", skip_serializing_if = "Option::is_none")] #[serde(
rename = "lastDeltaValidationTime",
skip_serializing_if = "Option::is_none"
)]
pub last_delta_validation_time: Option<String>, pub last_delta_validation_time: Option<String>,
#[serde(rename = "hasAspa")] #[serde(rename = "hasAspa")]
pub has_aspa: bool, pub has_aspa: bool,

View File

@ -12,12 +12,13 @@ use crate::ccr::model::{
AspaPayloadSet, AspaPayloadState, ManifestInstance, ManifestState, RoaPayloadSet, AspaPayloadSet, AspaPayloadState, ManifestInstance, ManifestState, RoaPayloadSet,
RoaPayloadState, RouterKey, RouterKeySet, RouterKeyState, TrustAnchorState, RoaPayloadState, RouterKey, RouterKeySet, RouterKeyState, TrustAnchorState,
}; };
use crate::blob_store::RawObjectStore;
use crate::data_model::manifest::ManifestObject; use crate::data_model::manifest::ManifestObject;
use crate::data_model::rc::{AccessDescription, SubjectInfoAccess}; use crate::data_model::rc::{AccessDescription, SubjectInfoAccess};
use crate::data_model::roa::RoaAfi; use crate::data_model::roa::RoaAfi;
use crate::data_model::router_cert::BgpsecRouterCertificate; use crate::data_model::router_cert::BgpsecRouterCertificate;
use crate::data_model::ta::TrustAnchor; use crate::data_model::ta::TrustAnchor;
use crate::storage::{RocksStore, VcirArtifactRole, ValidatedCaInstanceResult}; use crate::storage::{RocksStore, ValidatedCaInstanceResult, VcirArtifactRole};
use crate::validation::objects::{AspaAttestation, Vrp}; use crate::validation::objects::{AspaAttestation, Vrp};
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
@ -41,13 +42,22 @@ pub enum CcrBuildError {
MissingManifestArtifact(String), MissingManifestArtifact(String),
#[error("manifest raw bytes missing in store for {manifest_rsync_uri}: {sha256_hex}")] #[error("manifest raw bytes missing in store for {manifest_rsync_uri}: {sha256_hex}")]
MissingManifestRawBytes { manifest_rsync_uri: String, sha256_hex: String }, MissingManifestRawBytes {
manifest_rsync_uri: String,
sha256_hex: String,
},
#[error("manifest raw bytes load failed for {manifest_rsync_uri}: {detail}")] #[error("manifest raw bytes load failed for {manifest_rsync_uri}: {detail}")]
LoadManifestRawBytes { manifest_rsync_uri: String, detail: String }, LoadManifestRawBytes {
manifest_rsync_uri: String,
detail: String,
},
#[error("manifest decode failed for {manifest_rsync_uri}: {detail}")] #[error("manifest decode failed for {manifest_rsync_uri}: {detail}")]
ManifestDecode { manifest_rsync_uri: String, detail: String }, ManifestDecode {
manifest_rsync_uri: String,
detail: String,
},
#[error("manifest EE certificate missing AuthorityKeyIdentifier: {0}")] #[error("manifest EE certificate missing AuthorityKeyIdentifier: {0}")]
ManifestEeMissingAki(String), ManifestEeMissingAki(String),
@ -99,8 +109,8 @@ pub fn build_roa_payload_state(vrps: &[Vrp]) -> Result<RoaPayloadState, CcrBuild
}); });
} }
let payload_der = let payload_der = encode_roa_payload_state_payload_der(&rps)
encode_roa_payload_state_payload_der(&rps).map_err(|e| CcrBuildError::RoaEncode(e.to_string()))?; .map_err(|e| CcrBuildError::RoaEncode(e.to_string()))?;
Ok(RoaPayloadState { Ok(RoaPayloadState {
rps, rps,
hash: compute_state_hash(&payload_der), hash: compute_state_hash(&payload_der),
@ -176,10 +186,12 @@ pub fn build_manifest_state_from_vcirs(
artifact.artifact_role == VcirArtifactRole::Manifest artifact.artifact_role == VcirArtifactRole::Manifest
&& artifact.uri.as_deref() == Some(vcir.current_manifest_rsync_uri.as_str()) && artifact.uri.as_deref() == Some(vcir.current_manifest_rsync_uri.as_str())
}) })
.ok_or_else(|| CcrBuildError::MissingManifestArtifact(vcir.current_manifest_rsync_uri.clone()))?; .ok_or_else(|| {
CcrBuildError::MissingManifestArtifact(vcir.current_manifest_rsync_uri.clone())
})?;
let raw_entry = store let raw_bytes = store
.get_raw_by_hash_entry(&manifest_artifact.sha256) .get_blob_bytes(&manifest_artifact.sha256)
.map_err(|e| CcrBuildError::LoadManifestRawBytes { .map_err(|e| CcrBuildError::LoadManifestRawBytes {
manifest_rsync_uri: vcir.current_manifest_rsync_uri.clone(), manifest_rsync_uri: vcir.current_manifest_rsync_uri.clone(),
detail: e.to_string(), detail: e.to_string(),
@ -189,9 +201,11 @@ pub fn build_manifest_state_from_vcirs(
sha256_hex: manifest_artifact.sha256.clone(), sha256_hex: manifest_artifact.sha256.clone(),
})?; })?;
let manifest = ManifestObject::decode_der(&raw_entry.bytes).map_err(|e| CcrBuildError::ManifestDecode { let manifest = ManifestObject::decode_der(&raw_bytes).map_err(|e| {
CcrBuildError::ManifestDecode {
manifest_rsync_uri: vcir.current_manifest_rsync_uri.clone(), manifest_rsync_uri: vcir.current_manifest_rsync_uri.clone(),
detail: e.to_string(), detail: e.to_string(),
}
})?; })?;
let ee = &manifest.signed_object.signed_data.certificates[0].resource_cert; let ee = &manifest.signed_object.signed_data.certificates[0].resource_cert;
@ -200,13 +214,17 @@ pub fn build_manifest_state_from_vcirs(
.extensions .extensions
.authority_key_identifier .authority_key_identifier
.clone() .clone()
.ok_or_else(|| CcrBuildError::ManifestEeMissingAki(vcir.current_manifest_rsync_uri.clone()))?; .ok_or_else(|| {
CcrBuildError::ManifestEeMissingAki(vcir.current_manifest_rsync_uri.clone())
})?;
let sia = ee let sia = ee
.tbs .tbs
.extensions .extensions
.subject_info_access .subject_info_access
.as_ref() .as_ref()
.ok_or_else(|| CcrBuildError::ManifestEeMissingSia(vcir.current_manifest_rsync_uri.clone()))?; .ok_or_else(|| {
CcrBuildError::ManifestEeMissingSia(vcir.current_manifest_rsync_uri.clone())
})?;
let locations = match sia { let locations = match sia {
SubjectInfoAccess::Ee(ee_sia) => ee_sia SubjectInfoAccess::Ee(ee_sia) => ee_sia
.access_descriptions .access_descriptions
@ -216,7 +234,7 @@ pub fn build_manifest_state_from_vcirs(
SubjectInfoAccess::Ca(_) => { SubjectInfoAccess::Ca(_) => {
return Err(CcrBuildError::ManifestEeSiaWrongVariant( return Err(CcrBuildError::ManifestEeSiaWrongVariant(
vcir.current_manifest_rsync_uri.clone(), vcir.current_manifest_rsync_uri.clone(),
)) ));
} }
}; };
@ -234,11 +252,14 @@ pub fn build_manifest_state_from_vcirs(
} }
let instance = ManifestInstance { let instance = ManifestInstance {
hash: sha2::Sha256::digest(&raw_entry.bytes).to_vec(), hash: sha2::Sha256::digest(&raw_bytes).to_vec(),
size: raw_entry.bytes.len() as u64, size: raw_bytes.len() as u64,
aki, aki,
manifest_number: crate::data_model::common::BigUnsigned { manifest_number: crate::data_model::common::BigUnsigned {
bytes_be: vcir.validated_manifest_meta.validated_manifest_number.clone(), bytes_be: vcir
.validated_manifest_meta
.validated_manifest_number
.clone(),
}, },
this_update, this_update,
locations, locations,
@ -292,7 +313,10 @@ fn encode_access_description_der(ad: &AccessDescription) -> Result<Vec<u8>, CcrB
fn encode_oid_from_string(oid: &str) -> Result<Vec<u8>, CcrBuildError> { fn encode_oid_from_string(oid: &str) -> Result<Vec<u8>, CcrBuildError> {
let arcs = oid let arcs = oid
.split('.') .split('.')
.map(|part| part.parse::<u64>().map_err(|_| CcrBuildError::UnsupportedAccessMethodOid(oid.to_string()))) .map(|part| {
part.parse::<u64>()
.map_err(|_| CcrBuildError::UnsupportedAccessMethodOid(oid.to_string()))
})
.collect::<Result<Vec<_>, _>>()?; .collect::<Result<Vec<_>, _>>()?;
if arcs.len() < 2 { if arcs.len() < 2 {
return Err(CcrBuildError::UnsupportedAccessMethodOid(oid.to_string())); return Err(CcrBuildError::UnsupportedAccessMethodOid(oid.to_string()));
@ -471,7 +495,9 @@ fn encode_length(len: usize, out: &mut Vec<u8>) {
mod tests { mod tests {
use super::*; use super::*;
use crate::ccr::decode::decode_content_info; use crate::ccr::decode::decode_content_info;
use crate::ccr::encode::{encode_aspa_payload_state, encode_content_info, encode_trust_anchor_state}; use crate::ccr::encode::{
encode_aspa_payload_state, encode_content_info, encode_trust_anchor_state,
};
use crate::ccr::model::{CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation}; use crate::ccr::model::{CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation};
use crate::data_model::roa::{IpPrefix, RoaAfi}; use crate::data_model::roa::{IpPrefix, RoaAfi};
use crate::data_model::ta::TrustAnchor; use crate::data_model::ta::TrustAnchor;
@ -561,13 +587,19 @@ mod tests {
ca_subject_name: "CN=test".to_string(), ca_subject_name: "CN=test".to_string(),
ca_ski: "11".repeat(20), ca_ski: "11".repeat(20),
issuer_ski: "22".repeat(20), issuer_ski: "22".repeat(20),
last_successful_validation_time: crate::storage::PackTime::from_utc_offset_datetime(now), last_successful_validation_time: crate::storage::PackTime::from_utc_offset_datetime(
now,
),
current_manifest_rsync_uri: manifest_uri.to_string(), current_manifest_rsync_uri: manifest_uri.to_string(),
current_crl_rsync_uri: format!("{manifest_uri}.crl"), current_crl_rsync_uri: format!("{manifest_uri}.crl"),
validated_manifest_meta: crate::storage::ValidatedManifestMeta { validated_manifest_meta: crate::storage::ValidatedManifestMeta {
validated_manifest_number: manifest.manifest.manifest_number.bytes_be.clone(), validated_manifest_number: manifest.manifest.manifest_number.bytes_be.clone(),
validated_manifest_this_update: crate::storage::PackTime::from_utc_offset_datetime(now), validated_manifest_this_update: crate::storage::PackTime::from_utc_offset_datetime(
validated_manifest_next_update: crate::storage::PackTime::from_utc_offset_datetime(next), now,
),
validated_manifest_next_update: crate::storage::PackTime::from_utc_offset_datetime(
next,
),
}, },
instance_gate: crate::storage::VcirInstanceGate { instance_gate: crate::storage::VcirInstanceGate {
manifest_next_update: crate::storage::PackTime::from_utc_offset_datetime(next), manifest_next_update: crate::storage::PackTime::from_utc_offset_datetime(next),
@ -585,7 +617,9 @@ mod tests {
child_rrdp_notification_uri: None, child_rrdp_notification_uri: None,
child_effective_ip_resources: None, child_effective_ip_resources: None,
child_effective_as_resources: None, child_effective_as_resources: None,
accepted_at_validation_time: crate::storage::PackTime::from_utc_offset_datetime(now), accepted_at_validation_time: crate::storage::PackTime::from_utc_offset_datetime(
now,
),
}], }],
local_outputs: Vec::new(), local_outputs: Vec::new(),
related_artifacts: vec![crate::storage::VcirRelatedArtifact { related_artifacts: vec![crate::storage::VcirRelatedArtifact {
@ -617,21 +651,29 @@ mod tests {
fn build_manifest_state_from_vcirs_collects_current_manifests_and_hashes_payload() { fn build_manifest_state_from_vcirs_collects_current_manifests_and_hashes_payload() {
let base = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")); let base = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let manifest_a = std::fs::read(base.join("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft")).expect("read manifest a"); let manifest_a = std::fs::read(base.join("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft")).expect("read manifest a");
let manifest_b = std::fs::read(base.join("tests/fixtures/repository/ca.rg.net/rpki/RGnet-OU/bW-_qXU9uNhGQz21NR2ansB8lr0.mft")).expect("read manifest b"); let manifest_b = std::fs::read(base.join(
let vcir_a = sample_manifest_vcir("rsync://example.test/a.mft", &manifest_a, &"33".repeat(20)); "tests/fixtures/repository/ca.rg.net/rpki/RGnet-OU/bW-_qXU9uNhGQz21NR2ansB8lr0.mft",
let vcir_b = sample_manifest_vcir("rsync://example.test/b.mft", &manifest_b, &"44".repeat(20)); ))
.expect("read manifest b");
let vcir_a =
sample_manifest_vcir("rsync://example.test/a.mft", &manifest_a, &"33".repeat(20));
let vcir_b =
sample_manifest_vcir("rsync://example.test/b.mft", &manifest_b, &"44".repeat(20));
let store_dir = tempfile::tempdir().expect("tempdir"); let store_dir = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb"); let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
for (vcir, bytes) in [(&vcir_a, &manifest_a), (&vcir_b, &manifest_b)] { for (vcir, bytes) in [(&vcir_a, &manifest_a), (&vcir_b, &manifest_b)] {
let artifact = &vcir.related_artifacts[0]; let artifact = &vcir.related_artifacts[0];
let mut raw = crate::storage::RawByHashEntry::from_bytes(artifact.sha256.clone(), bytes.to_vec()); let mut raw =
raw.origin_uris.push(vcir.current_manifest_rsync_uri.clone()); crate::storage::RawByHashEntry::from_bytes(artifact.sha256.clone(), bytes.to_vec());
raw.origin_uris
.push(vcir.current_manifest_rsync_uri.clone());
raw.object_type = Some("mft".to_string()); raw.object_type = Some("mft".to_string());
raw.encoding = Some("der".to_string()); raw.encoding = Some("der".to_string());
store.put_raw_by_hash_entry(&raw).expect("put raw manifest"); store.put_raw_by_hash_entry(&raw).expect("put raw manifest");
} }
let state = build_manifest_state_from_vcirs(&store, &[vcir_a.clone(), vcir_b.clone()]).expect("build manifest state"); let state = build_manifest_state_from_vcirs(&store, &[vcir_a.clone(), vcir_b.clone()])
.expect("build manifest state");
assert_eq!(state.mis.len(), 2); assert_eq!(state.mis.len(), 2);
assert!(state.mis[0].hash < state.mis[1].hash); assert!(state.mis[0].hash < state.mis[1].hash);
let expected_subordinates = [ let expected_subordinates = [
@ -647,13 +689,24 @@ mod tests {
mi.subordinates[0].clone() mi.subordinates[0].clone()
}) })
.collect::<std::collections::BTreeSet<_>>(); .collect::<std::collections::BTreeSet<_>>();
let expected_subordinates = expected_subordinates.into_iter().collect::<std::collections::BTreeSet<_>>(); let expected_subordinates = expected_subordinates
.into_iter()
.collect::<std::collections::BTreeSet<_>>();
assert_eq!(actual_subordinates, expected_subordinates); assert_eq!(actual_subordinates, expected_subordinates);
let payload_der = encode_manifest_state_payload_der(&state.mis).expect("encode mis payload"); let payload_der =
encode_manifest_state_payload_der(&state.mis).expect("encode mis payload");
assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der)); assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der));
let max_time = [ let max_time = [
vcir_a.validated_manifest_meta.validated_manifest_this_update.parse().unwrap(), vcir_a
vcir_b.validated_manifest_meta.validated_manifest_this_update.parse().unwrap(), .validated_manifest_meta
.validated_manifest_this_update
.parse()
.unwrap(),
vcir_b
.validated_manifest_meta
.validated_manifest_this_update
.parse()
.unwrap(),
] ]
.into_iter() .into_iter()
.max() .max()
@ -668,7 +721,8 @@ mod tests {
let state = build_manifest_state_from_vcirs(&store, &[]).expect("empty manifest state"); let state = build_manifest_state_from_vcirs(&store, &[]).expect("empty manifest state");
assert!(state.mis.is_empty()); assert!(state.mis.is_empty());
assert_eq!(state.most_recent_update, time::OffsetDateTime::UNIX_EPOCH); assert_eq!(state.most_recent_update, time::OffsetDateTime::UNIX_EPOCH);
let payload_der = encode_manifest_state_payload_der(&state.mis).expect("encode mis payload"); let payload_der =
encode_manifest_state_payload_der(&state.mis).expect("encode mis payload");
assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der)); assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der));
} }
@ -687,7 +741,8 @@ mod tests {
assert_eq!(state.rksets[0].router_keys.len(), 2); assert_eq!(state.rksets[0].router_keys.len(), 2);
assert_eq!(state.rksets[1].router_keys.len(), 2); assert_eq!(state.rksets[1].router_keys.len(), 2);
assert!(state.rksets[0].router_keys[0].ski <= state.rksets[0].router_keys[1].ski); assert!(state.rksets[0].router_keys[0].ski <= state.rksets[0].router_keys[1].ski);
let payload_der = encode_router_key_state_payload_der(&state.rksets).expect("encode rk payload"); let payload_der =
encode_router_key_state_payload_der(&state.rksets).expect("encode rk payload");
assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der)); assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der));
} }
@ -695,7 +750,8 @@ mod tests {
fn build_router_key_state_empty_is_valid_and_hashes_empty_sequence() { fn build_router_key_state_empty_is_valid_and_hashes_empty_sequence() {
let state = build_router_key_state(&[]).expect("empty router key state"); let state = build_router_key_state(&[]).expect("empty router key state");
assert!(state.rksets.is_empty()); assert!(state.rksets.is_empty());
let payload_der = encode_router_key_state_payload_der(&state.rksets).expect("encode rk payload"); let payload_der =
encode_router_key_state_payload_der(&state.rksets).expect("encode rk payload");
assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der)); assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der));
} }
@ -706,7 +762,12 @@ mod tests {
sample_vrp_v4(64496, [10, 0, 0, 0], 8, 8), sample_vrp_v4(64496, [10, 0, 0, 0], 8, 8),
sample_vrp_v4(64496, [10, 0, 0, 0], 8, 8), sample_vrp_v4(64496, [10, 0, 0, 0], 8, 8),
sample_vrp_v4(64496, [10, 1, 0, 0], 16, 24), sample_vrp_v4(64496, [10, 1, 0, 0], 16, 24),
sample_vrp_v6(64496, [0x20,0x01,0x0d,0xb8,0,0,0,0,0,0,0,0,0,0,0,0], 32, 48), sample_vrp_v6(
64496,
[0x20, 0x01, 0x0d, 0xb8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
32,
48,
),
]; ];
let state = build_roa_payload_state(&vrps).expect("build roa state"); let state = build_roa_payload_state(&vrps).expect("build roa state");
assert_eq!(state.rps.len(), 2); assert_eq!(state.rps.len(), 2);
@ -721,7 +782,7 @@ mod tests {
assert_eq!(entries4[0], (8, vec![10], None)); assert_eq!(entries4[0], (8, vec![10], None));
assert_eq!(entries4[1], (16, vec![10, 1], Some(24))); assert_eq!(entries4[1], (16, vec![10, 1], Some(24)));
assert_eq!(entries6.len(), 1); assert_eq!(entries6.len(), 1);
assert_eq!(entries6[0], (32, vec![0x20,0x01,0x0d,0xb8], Some(48))); assert_eq!(entries6[0], (32, vec![0x20, 0x01, 0x0d, 0xb8], Some(48)));
let payload_der = encode_roa_payload_state_payload_der(&state.rps).expect("encode payload"); let payload_der = encode_roa_payload_state_payload_der(&state.rps).expect("encode payload");
assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der)); assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der));
} }
@ -737,9 +798,18 @@ mod tests {
#[test] #[test]
fn build_aspa_payload_state_merges_and_sorts() { fn build_aspa_payload_state_merges_and_sorts() {
let aspas = vec![ let aspas = vec![
AspaAttestation { customer_as_id: 64497, provider_as_ids: vec![65002] }, AspaAttestation {
AspaAttestation { customer_as_id: 64496, provider_as_ids: vec![65003, 65001] }, customer_as_id: 64497,
AspaAttestation { customer_as_id: 64496, provider_as_ids: vec![65002, 65001] }, provider_as_ids: vec![65002],
},
AspaAttestation {
customer_as_id: 64496,
provider_as_ids: vec![65003, 65001],
},
AspaAttestation {
customer_as_id: 64496,
provider_as_ids: vec![65002, 65001],
},
]; ];
let state = build_aspa_payload_state(&aspas).expect("build aspa state"); let state = build_aspa_payload_state(&aspas).expect("build aspa state");
assert_eq!(state.aps.len(), 2); assert_eq!(state.aps.len(), 2);
@ -747,8 +817,8 @@ mod tests {
assert_eq!(state.aps[0].providers, vec![65001, 65002, 65003]); assert_eq!(state.aps[0].providers, vec![65001, 65002, 65003]);
assert_eq!(state.aps[1].customer_as_id, 64497); assert_eq!(state.aps[1].customer_as_id, 64497);
let encoded = encode_aspa_payload_state(&state).expect("encode aspa state"); let encoded = encode_aspa_payload_state(&state).expect("encode aspa state");
let decoded = decode_content_info(&encode_content_info(&CcrContentInfo::new( let decoded = decode_content_info(
RpkiCanonicalCacheRepresentation { &encode_content_info(&CcrContentInfo::new(RpkiCanonicalCacheRepresentation {
version: 0, version: 0,
hash_alg: CcrDigestAlgorithm::Sha256, hash_alg: CcrDigestAlgorithm::Sha256,
produced_at: time::OffsetDateTime::now_utc(), produced_at: time::OffsetDateTime::now_utc(),
@ -757,9 +827,10 @@ mod tests {
vaps: Some(state.clone()), vaps: Some(state.clone()),
tas: None, tas: None,
rks: None, rks: None,
}, }))
)) .expect("encode ccr"),
.expect("encode ccr")).expect("decode ccr"); )
.expect("decode ccr");
assert_eq!(decoded.content.vaps, Some(state)); assert_eq!(decoded.content.vaps, Some(state));
assert!(!encoded.is_empty()); assert!(!encoded.is_empty());
} }
@ -778,7 +849,8 @@ mod tests {
.expect("build ta state"); .expect("build ta state");
assert_eq!(state.skis.len(), 2); assert_eq!(state.skis.len(), 2);
assert!(state.skis[0] < state.skis[1]); assert!(state.skis[0] < state.skis[1]);
let payload_der = encode_trust_anchor_state_payload_der(&state.skis).expect("encode ta payload"); let payload_der =
encode_trust_anchor_state_payload_der(&state.skis).expect("encode ta payload");
assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der)); assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der));
let encoded = encode_trust_anchor_state(&state).expect("encode ta state"); let encoded = encode_trust_anchor_state(&state).expect("encode ta state");
assert!(!encoded.is_empty()); assert!(!encoded.is_empty());
@ -793,7 +865,11 @@ mod tests {
"tests/fixtures/tal/apnic-rfc7730-https.tal", "tests/fixtures/tal/apnic-rfc7730-https.tal",
"tests/fixtures/ta/apnic-ta.cer", "tests/fixtures/ta/apnic-ta.cer",
); );
ta.ta_certificate.rc_ca.tbs.extensions.subject_key_identifier = None; ta.ta_certificate
.rc_ca
.tbs
.extensions
.subject_key_identifier = None;
let err = build_trust_anchor_state(&[ta]).expect_err("missing ski must fail"); let err = build_trust_anchor_state(&[ta]).expect_err("missing ski must fail");
assert!(err.to_string().contains("SubjectKeyIdentifier"), "{err}"); assert!(err.to_string().contains("SubjectKeyIdentifier"), "{err}");
} }

View File

@ -13,10 +13,16 @@ pub enum CcrDecodeError {
Parse(String), Parse(String),
#[error("unexpected contentType OID: expected {expected}, got {actual}")] #[error("unexpected contentType OID: expected {expected}, got {actual}")]
UnexpectedContentType { expected: &'static str, actual: String }, UnexpectedContentType {
expected: &'static str,
actual: String,
},
#[error("unexpected digest algorithm OID: expected {expected}, got {actual}")] #[error("unexpected digest algorithm OID: expected {expected}, got {actual}")]
UnexpectedDigestAlgorithm { expected: &'static str, actual: String }, UnexpectedDigestAlgorithm {
expected: &'static str,
actual: String,
},
#[error("CCR model validation failed after decode: {0}")] #[error("CCR model validation failed after decode: {0}")]
Validate(String), Validate(String),
@ -26,7 +32,9 @@ pub fn decode_content_info(der: &[u8]) -> Result<CcrContentInfo, CcrDecodeError>
let mut top = DerReader::new(der); let mut top = DerReader::new(der);
let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?; let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?;
if !top.is_empty() { if !top.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after ContentInfo".into())); return Err(CcrDecodeError::Parse(
"trailing bytes after ContentInfo".into(),
));
} }
let content_type_raw = seq.take_tag(0x06).map_err(CcrDecodeError::Parse)?; let content_type_raw = seq.take_tag(0x06).map_err(CcrDecodeError::Parse)?;
if content_type_raw != OID_CT_RPKI_CCR_RAW { if content_type_raw != OID_CT_RPKI_CCR_RAW {
@ -37,7 +45,9 @@ pub fn decode_content_info(der: &[u8]) -> Result<CcrContentInfo, CcrDecodeError>
} }
let inner = seq.take_tag(0xA0).map_err(CcrDecodeError::Parse)?; let inner = seq.take_tag(0xA0).map_err(CcrDecodeError::Parse)?;
if !seq.is_empty() { if !seq.is_empty() {
return Err(CcrDecodeError::Parse("trailing fields in ContentInfo".into())); return Err(CcrDecodeError::Parse(
"trailing fields in ContentInfo".into(),
));
} }
let content = decode_ccr(inner)?; let content = decode_ccr(inner)?;
let ci = CcrContentInfo::new(content); let ci = CcrContentInfo::new(content);
@ -87,7 +97,7 @@ pub fn decode_ccr(der: &[u8]) -> Result<RpkiCanonicalCacheRepresentation, CcrDec
_ => { _ => {
return Err(CcrDecodeError::Parse(format!( return Err(CcrDecodeError::Parse(format!(
"unexpected CCR field tag 0x{tag:02X}" "unexpected CCR field tag 0x{tag:02X}"
))) )));
} }
} }
} }
@ -110,7 +120,9 @@ fn decode_manifest_state(explicit_der: &[u8]) -> Result<ManifestState, CcrDecode
let mut outer = DerReader::new(explicit_der); let mut outer = DerReader::new(explicit_der);
let mut seq = outer.take_sequence().map_err(CcrDecodeError::Parse)?; let mut seq = outer.take_sequence().map_err(CcrDecodeError::Parse)?;
if !outer.is_empty() { if !outer.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after ManifestState".into())); return Err(CcrDecodeError::Parse(
"trailing bytes after ManifestState".into(),
));
} }
let mis_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?; let mis_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
let mut mis_reader = DerReader::new(mis_der); let mut mis_reader = DerReader::new(mis_der);
@ -119,30 +131,50 @@ fn decode_manifest_state(explicit_der: &[u8]) -> Result<ManifestState, CcrDecode
let (_tag, full, _value) = mis_reader.take_any_full().map_err(CcrDecodeError::Parse)?; let (_tag, full, _value) = mis_reader.take_any_full().map_err(CcrDecodeError::Parse)?;
mis.push(decode_manifest_instance(full)?); mis.push(decode_manifest_instance(full)?);
} }
let most_recent_update = parse_generalized_time(seq.take_tag(0x18).map_err(CcrDecodeError::Parse)?)?; let most_recent_update =
let hash = seq.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec(); parse_generalized_time(seq.take_tag(0x18).map_err(CcrDecodeError::Parse)?)?;
let hash = seq
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec();
if !seq.is_empty() { if !seq.is_empty() {
return Err(CcrDecodeError::Parse("trailing fields in ManifestState".into())); return Err(CcrDecodeError::Parse(
"trailing fields in ManifestState".into(),
));
} }
Ok(ManifestState { mis, most_recent_update, hash }) Ok(ManifestState {
mis,
most_recent_update,
hash,
})
} }
fn decode_manifest_instance(der: &[u8]) -> Result<ManifestInstance, CcrDecodeError> { fn decode_manifest_instance(der: &[u8]) -> Result<ManifestInstance, CcrDecodeError> {
let mut top = DerReader::new(der); let mut top = DerReader::new(der);
let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?; let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?;
if !top.is_empty() { if !top.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after ManifestInstance".into())); return Err(CcrDecodeError::Parse(
"trailing bytes after ManifestInstance".into(),
));
} }
let hash = seq.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec(); let hash = seq
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec();
let size = seq.take_uint_u64().map_err(CcrDecodeError::Parse)?; let size = seq.take_uint_u64().map_err(CcrDecodeError::Parse)?;
let aki = seq.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec(); let aki = seq
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec();
let manifest_number = decode_big_unsigned(seq.take_tag(0x02).map_err(CcrDecodeError::Parse)?)?; let manifest_number = decode_big_unsigned(seq.take_tag(0x02).map_err(CcrDecodeError::Parse)?)?;
let this_update = parse_generalized_time(seq.take_tag(0x18).map_err(CcrDecodeError::Parse)?)?; let this_update = parse_generalized_time(seq.take_tag(0x18).map_err(CcrDecodeError::Parse)?)?;
let locations_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?; let locations_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
let mut locations_reader = DerReader::new(locations_der); let mut locations_reader = DerReader::new(locations_der);
let mut locations = Vec::new(); let mut locations = Vec::new();
while !locations_reader.is_empty() { while !locations_reader.is_empty() {
let (_tag, full, _value) = locations_reader.take_any_full().map_err(CcrDecodeError::Parse)?; let (_tag, full, _value) = locations_reader
.take_any_full()
.map_err(CcrDecodeError::Parse)?;
locations.push(full.to_vec()); locations.push(full.to_vec());
} }
let subordinates = if !seq.is_empty() { let subordinates = if !seq.is_empty() {
@ -150,20 +182,35 @@ fn decode_manifest_instance(der: &[u8]) -> Result<ManifestInstance, CcrDecodeErr
let mut reader = DerReader::new(subordinate_der); let mut reader = DerReader::new(subordinate_der);
let mut out = Vec::new(); let mut out = Vec::new();
while !reader.is_empty() { while !reader.is_empty() {
out.push(reader.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec()); out.push(
reader
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec(),
);
} }
out out
} else { } else {
Vec::new() Vec::new()
}; };
Ok(ManifestInstance { hash, size, aki, manifest_number, this_update, locations, subordinates }) Ok(ManifestInstance {
hash,
size,
aki,
manifest_number,
this_update,
locations,
subordinates,
})
} }
fn decode_roa_payload_state(explicit_der: &[u8]) -> Result<RoaPayloadState, CcrDecodeError> { fn decode_roa_payload_state(explicit_der: &[u8]) -> Result<RoaPayloadState, CcrDecodeError> {
let mut outer = DerReader::new(explicit_der); let mut outer = DerReader::new(explicit_der);
let mut seq = outer.take_sequence().map_err(CcrDecodeError::Parse)?; let mut seq = outer.take_sequence().map_err(CcrDecodeError::Parse)?;
if !outer.is_empty() { if !outer.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after ROAPayloadState".into())); return Err(CcrDecodeError::Parse(
"trailing bytes after ROAPayloadState".into(),
));
} }
let payload_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?; let payload_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
let mut reader = DerReader::new(payload_der); let mut reader = DerReader::new(payload_der);
@ -172,7 +219,10 @@ fn decode_roa_payload_state(explicit_der: &[u8]) -> Result<RoaPayloadState, CcrD
let (_tag, full, _value) = reader.take_any_full().map_err(CcrDecodeError::Parse)?; let (_tag, full, _value) = reader.take_any_full().map_err(CcrDecodeError::Parse)?;
rps.push(decode_roa_payload_set(full)?); rps.push(decode_roa_payload_set(full)?);
} }
let hash = seq.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec(); let hash = seq
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec();
Ok(RoaPayloadState { rps, hash }) Ok(RoaPayloadState { rps, hash })
} }
@ -180,7 +230,9 @@ fn decode_roa_payload_set(der: &[u8]) -> Result<RoaPayloadSet, CcrDecodeError> {
let mut top = DerReader::new(der); let mut top = DerReader::new(der);
let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?; let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?;
if !top.is_empty() { if !top.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after ROAPayloadSet".into())); return Err(CcrDecodeError::Parse(
"trailing bytes after ROAPayloadSet".into(),
));
} }
let as_id = seq.take_uint_u64().map_err(CcrDecodeError::Parse)? as u32; let as_id = seq.take_uint_u64().map_err(CcrDecodeError::Parse)? as u32;
let blocks_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?; let blocks_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
@ -190,14 +242,19 @@ fn decode_roa_payload_set(der: &[u8]) -> Result<RoaPayloadSet, CcrDecodeError> {
let (_tag, full, _value) = reader.take_any_full().map_err(CcrDecodeError::Parse)?; let (_tag, full, _value) = reader.take_any_full().map_err(CcrDecodeError::Parse)?;
ip_addr_blocks.push(full.to_vec()); ip_addr_blocks.push(full.to_vec());
} }
Ok(RoaPayloadSet { as_id, ip_addr_blocks }) Ok(RoaPayloadSet {
as_id,
ip_addr_blocks,
})
} }
fn decode_aspa_payload_state(explicit_der: &[u8]) -> Result<AspaPayloadState, CcrDecodeError> { fn decode_aspa_payload_state(explicit_der: &[u8]) -> Result<AspaPayloadState, CcrDecodeError> {
let mut outer = DerReader::new(explicit_der); let mut outer = DerReader::new(explicit_der);
let mut seq = outer.take_sequence().map_err(CcrDecodeError::Parse)?; let mut seq = outer.take_sequence().map_err(CcrDecodeError::Parse)?;
if !outer.is_empty() { if !outer.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after ASPAPayloadState".into())); return Err(CcrDecodeError::Parse(
"trailing bytes after ASPAPayloadState".into(),
));
} }
let payload_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?; let payload_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
let mut reader = DerReader::new(payload_der); let mut reader = DerReader::new(payload_der);
@ -206,7 +263,10 @@ fn decode_aspa_payload_state(explicit_der: &[u8]) -> Result<AspaPayloadState, Cc
let (_tag, full, _value) = reader.take_any_full().map_err(CcrDecodeError::Parse)?; let (_tag, full, _value) = reader.take_any_full().map_err(CcrDecodeError::Parse)?;
aps.push(decode_aspa_payload_set(full)?); aps.push(decode_aspa_payload_set(full)?);
} }
let hash = seq.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec(); let hash = seq
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec();
Ok(AspaPayloadState { aps, hash }) Ok(AspaPayloadState { aps, hash })
} }
@ -214,7 +274,9 @@ fn decode_aspa_payload_set(der: &[u8]) -> Result<AspaPayloadSet, CcrDecodeError>
let mut top = DerReader::new(der); let mut top = DerReader::new(der);
let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?; let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?;
if !top.is_empty() { if !top.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after ASPAPayloadSet".into())); return Err(CcrDecodeError::Parse(
"trailing bytes after ASPAPayloadSet".into(),
));
} }
let customer_as_id = seq.take_uint_u64().map_err(CcrDecodeError::Parse)? as u32; let customer_as_id = seq.take_uint_u64().map_err(CcrDecodeError::Parse)? as u32;
let providers_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?; let providers_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
@ -223,22 +285,35 @@ fn decode_aspa_payload_set(der: &[u8]) -> Result<AspaPayloadSet, CcrDecodeError>
while !reader.is_empty() { while !reader.is_empty() {
providers.push(reader.take_uint_u64().map_err(CcrDecodeError::Parse)? as u32); providers.push(reader.take_uint_u64().map_err(CcrDecodeError::Parse)? as u32);
} }
Ok(AspaPayloadSet { customer_as_id, providers }) Ok(AspaPayloadSet {
customer_as_id,
providers,
})
} }
fn decode_trust_anchor_state(explicit_der: &[u8]) -> Result<TrustAnchorState, CcrDecodeError> { fn decode_trust_anchor_state(explicit_der: &[u8]) -> Result<TrustAnchorState, CcrDecodeError> {
let mut outer = DerReader::new(explicit_der); let mut outer = DerReader::new(explicit_der);
let mut seq = outer.take_sequence().map_err(CcrDecodeError::Parse)?; let mut seq = outer.take_sequence().map_err(CcrDecodeError::Parse)?;
if !outer.is_empty() { if !outer.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after TrustAnchorState".into())); return Err(CcrDecodeError::Parse(
"trailing bytes after TrustAnchorState".into(),
));
} }
let skis_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?; let skis_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
let mut reader = DerReader::new(skis_der); let mut reader = DerReader::new(skis_der);
let mut skis = Vec::new(); let mut skis = Vec::new();
while !reader.is_empty() { while !reader.is_empty() {
skis.push(reader.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec()); skis.push(
reader
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec(),
);
} }
let hash = seq.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec(); let hash = seq
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec();
Ok(TrustAnchorState { skis, hash }) Ok(TrustAnchorState { skis, hash })
} }
@ -246,7 +321,9 @@ fn decode_router_key_state(explicit_der: &[u8]) -> Result<RouterKeyState, CcrDec
let mut outer = DerReader::new(explicit_der); let mut outer = DerReader::new(explicit_der);
let mut seq = outer.take_sequence().map_err(CcrDecodeError::Parse)?; let mut seq = outer.take_sequence().map_err(CcrDecodeError::Parse)?;
if !outer.is_empty() { if !outer.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after RouterKeyState".into())); return Err(CcrDecodeError::Parse(
"trailing bytes after RouterKeyState".into(),
));
} }
let sets_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?; let sets_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
let mut reader = DerReader::new(sets_der); let mut reader = DerReader::new(sets_der);
@ -255,7 +332,10 @@ fn decode_router_key_state(explicit_der: &[u8]) -> Result<RouterKeyState, CcrDec
let (_tag, full, _value) = reader.take_any_full().map_err(CcrDecodeError::Parse)?; let (_tag, full, _value) = reader.take_any_full().map_err(CcrDecodeError::Parse)?;
rksets.push(decode_router_key_set(full)?); rksets.push(decode_router_key_set(full)?);
} }
let hash = seq.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec(); let hash = seq
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec();
Ok(RouterKeyState { rksets, hash }) Ok(RouterKeyState { rksets, hash })
} }
@ -263,7 +343,9 @@ fn decode_router_key_set(der: &[u8]) -> Result<RouterKeySet, CcrDecodeError> {
let mut top = DerReader::new(der); let mut top = DerReader::new(der);
let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?; let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?;
if !top.is_empty() { if !top.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after RouterKeySet".into())); return Err(CcrDecodeError::Parse(
"trailing bytes after RouterKeySet".into(),
));
} }
let as_id = seq.take_uint_u64().map_err(CcrDecodeError::Parse)? as u32; let as_id = seq.take_uint_u64().map_err(CcrDecodeError::Parse)? as u32;
let keys_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?; let keys_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
@ -280,14 +362,22 @@ fn decode_router_key(der: &[u8]) -> Result<RouterKey, CcrDecodeError> {
let mut top = DerReader::new(der); let mut top = DerReader::new(der);
let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?; let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?;
if !top.is_empty() { if !top.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after RouterKey".into())); return Err(CcrDecodeError::Parse(
"trailing bytes after RouterKey".into(),
));
} }
let ski = seq.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec(); let ski = seq
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec();
let (_tag, full, _value) = seq.take_any_full().map_err(CcrDecodeError::Parse)?; let (_tag, full, _value) = seq.take_any_full().map_err(CcrDecodeError::Parse)?;
if !seq.is_empty() { if !seq.is_empty() {
return Err(CcrDecodeError::Parse("trailing fields in RouterKey".into())); return Err(CcrDecodeError::Parse("trailing fields in RouterKey".into()));
} }
Ok(RouterKey { ski, spki_der: full.to_vec() }) Ok(RouterKey {
ski,
spki_der: full.to_vec(),
})
} }
fn decode_digest_algorithm(mut seq: DerReader<'_>) -> Result<CcrDigestAlgorithm, CcrDecodeError> { fn decode_digest_algorithm(mut seq: DerReader<'_>) -> Result<CcrDigestAlgorithm, CcrDecodeError> {
@ -354,8 +444,7 @@ fn parse_generalized_time(bytes: &[u8]) -> Result<time::OffsetDateTime, CcrDecod
let hour = parse(8..10)? as u8; let hour = parse(8..10)? as u8;
let minute = parse(10..12)? as u8; let minute = parse(10..12)? as u8;
let second = parse(12..14)? as u8; let second = parse(12..14)? as u8;
let month = time::Month::try_from(month) let month = time::Month::try_from(month).map_err(|e| CcrDecodeError::Parse(e.to_string()))?;
.map_err(|e| CcrDecodeError::Parse(e.to_string()))?;
let date = time::Date::from_calendar_date(year, month, day) let date = time::Date::from_calendar_date(year, month, day)
.map_err(|e| CcrDecodeError::Parse(e.to_string()))?; .map_err(|e| CcrDecodeError::Parse(e.to_string()))?;
let timev = time::Time::from_hms(hour, minute, second) let timev = time::Time::from_hms(hour, minute, second)
@ -371,7 +460,9 @@ fn decode_big_unsigned(bytes: &[u8]) -> Result<BigUnsigned, CcrDecodeError> {
return Err(CcrDecodeError::Parse("INTEGER must be non-negative".into())); return Err(CcrDecodeError::Parse("INTEGER must be non-negative".into()));
} }
if bytes.len() > 1 && bytes[0] == 0x00 && (bytes[1] & 0x80) == 0 { if bytes.len() > 1 && bytes[0] == 0x00 && (bytes[1] & 0x80) == 0 {
return Err(CcrDecodeError::Parse("INTEGER not minimally encoded".into())); return Err(CcrDecodeError::Parse(
"INTEGER not minimally encoded".into(),
));
} }
let bytes_be = if bytes.len() > 1 && bytes[0] == 0x00 { let bytes_be = if bytes.len() > 1 && bytes[0] == 0x00 {
bytes[1..].to_vec() bytes[1..].to_vec()

View File

@ -34,34 +34,58 @@ pub fn dump_content_info_json(
}) })
}).unwrap_or_else(|| json!({"present": false})); }).unwrap_or_else(|| json!({"present": false}));
let vrps_total = content_info.content.vrps.as_ref().map(|state| { let vrps_total = content_info
state.rps.iter().map(|set| set.ip_addr_blocks.len()).sum::<usize>() .content
}).unwrap_or(0); .vrps
let vrps = content_info.content.vrps.as_ref().map(|state| { .as_ref()
.map(|state| {
state
.rps
.iter()
.map(|set| set.ip_addr_blocks.len())
.sum::<usize>()
})
.unwrap_or(0);
let vrps = content_info
.content
.vrps
.as_ref()
.map(|state| {
json!({ json!({
"present": true, "present": true,
"payload_sets": state.rps.len(), "payload_sets": state.rps.len(),
"hash_hex": hex::encode(&state.hash), "hash_hex": hex::encode(&state.hash),
"ip_addr_block_count": vrps_total, "ip_addr_block_count": vrps_total,
}) })
}).unwrap_or_else(|| json!({"present": false})); })
.unwrap_or_else(|| json!({"present": false}));
let vaps = content_info.content.vaps.as_ref().map(|state| { let vaps = content_info
.content
.vaps
.as_ref()
.map(|state| {
json!({ json!({
"present": true, "present": true,
"payload_sets": state.aps.len(), "payload_sets": state.aps.len(),
"hash_hex": hex::encode(&state.hash), "hash_hex": hex::encode(&state.hash),
"provider_count": state.aps.iter().map(|set| set.providers.len()).sum::<usize>(), "provider_count": state.aps.iter().map(|set| set.providers.len()).sum::<usize>(),
}) })
}).unwrap_or_else(|| json!({"present": false})); })
.unwrap_or_else(|| json!({"present": false}));
let tas = content_info.content.tas.as_ref().map(|state| { let tas = content_info
.content
.tas
.as_ref()
.map(|state| {
json!({ json!({
"present": true, "present": true,
"ski_count": state.skis.len(), "ski_count": state.skis.len(),
"hash_hex": hex::encode(&state.hash), "hash_hex": hex::encode(&state.hash),
}) })
}).unwrap_or_else(|| json!({"present": false})); })
.unwrap_or_else(|| json!({"present": false}));
let rks = content_info.content.rks.as_ref().map(|state| { let rks = content_info.content.rks.as_ref().map(|state| {
json!({ json!({

View File

@ -24,9 +24,7 @@ pub fn encode_content_info(content_info: &CcrContentInfo) -> Result<Vec<u8>, Ccr
])) ]))
} }
pub fn encode_ccr( pub fn encode_ccr(ccr: &RpkiCanonicalCacheRepresentation) -> Result<Vec<u8>, CcrEncodeError> {
ccr: &RpkiCanonicalCacheRepresentation,
) -> Result<Vec<u8>, CcrEncodeError> {
ccr.validate().map_err(CcrEncodeError::Validate)?; ccr.validate().map_err(CcrEncodeError::Validate)?;
let mut fields = Vec::new(); let mut fields = Vec::new();
if ccr.version != CCR_VERSION_V0 { if ccr.version != CCR_VERSION_V0 {
@ -156,11 +154,12 @@ pub fn encode_trust_anchor_state(state: &TrustAnchorState) -> Result<Vec<u8>, Cc
Ok(encode_sequence(&[skis, encode_octet_string(&state.hash)])) Ok(encode_sequence(&[skis, encode_octet_string(&state.hash)]))
} }
pub fn encode_trust_anchor_state_payload_der( pub fn encode_trust_anchor_state_payload_der(skis: &[Vec<u8>]) -> Result<Vec<u8>, CcrEncodeError> {
skis: &[Vec<u8>],
) -> Result<Vec<u8>, CcrEncodeError> {
Ok(encode_sequence( Ok(encode_sequence(
&skis.iter().map(|ski| encode_octet_string(ski)).collect::<Vec<_>>(), &skis
.iter()
.map(|ski| encode_octet_string(ski))
.collect::<Vec<_>>(),
)) ))
} }

View File

@ -54,7 +54,6 @@ pub fn build_ccr_from_run(
}) })
} }
fn build_router_key_state_from_runtime( fn build_router_key_state_from_runtime(
router_keys: &[RouterKeyPayload], router_keys: &[RouterKeyPayload],
) -> Result<crate::ccr::model::RouterKeyState, CcrBuildError> { ) -> Result<crate::ccr::model::RouterKeyState, CcrBuildError> {
@ -109,17 +108,22 @@ pub fn write_ccr_file(
mod tests { mod tests {
use super::*; use super::*;
use crate::ccr::decode::decode_content_info; use crate::ccr::decode::decode_content_info;
use crate::data_model::ta::TrustAnchor;
use crate::data_model::tal::Tal;
use crate::storage::{RawByHashEntry, RocksStore, VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary, VcirChildEntry, VcirInstanceGate, VcirRelatedArtifact, VcirSummary, ValidatedCaInstanceResult, ValidatedManifestMeta, PackTime};
use crate::validation::objects::{AspaAttestation, RouterKeyPayload, Vrp};
use crate::data_model::manifest::ManifestObject; use crate::data_model::manifest::ManifestObject;
use crate::data_model::roa::{IpPrefix, RoaAfi}; use crate::data_model::roa::{IpPrefix, RoaAfi};
use crate::data_model::ta::TrustAnchor;
use crate::data_model::tal::Tal;
use crate::storage::{
PackTime, RawByHashEntry, RocksStore, ValidatedCaInstanceResult, ValidatedManifestMeta,
VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary,
VcirChildEntry, VcirInstanceGate, VcirRelatedArtifact, VcirSummary,
};
use crate::validation::objects::{AspaAttestation, RouterKeyPayload, Vrp};
use sha2::Digest; use sha2::Digest;
fn sample_trust_anchor() -> TrustAnchor { fn sample_trust_anchor() -> TrustAnchor {
let base = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")); let base = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let tal_bytes = std::fs::read(base.join("tests/fixtures/tal/apnic-rfc7730-https.tal")).expect("read tal"); let tal_bytes = std::fs::read(base.join("tests/fixtures/tal/apnic-rfc7730-https.tal"))
.expect("read tal");
let ta_der = std::fs::read(base.join("tests/fixtures/ta/apnic-ta.cer")).expect("read ta"); let ta_der = std::fs::read(base.join("tests/fixtures/ta/apnic-ta.cer")).expect("read ta");
let tal = Tal::decode_bytes(&tal_bytes).expect("decode tal"); let tal = Tal::decode_bytes(&tal_bytes).expect("decode tal");
TrustAnchor::bind_der(tal, &ta_der, None).expect("bind ta") TrustAnchor::bind_der(tal, &ta_der, None).expect("bind ta")
@ -131,7 +135,8 @@ mod tests {
let manifest = ManifestObject::decode_der(&manifest_der).expect("decode manifest"); let manifest = ManifestObject::decode_der(&manifest_der).expect("decode manifest");
let hash = hex::encode(sha2::Sha256::digest(&manifest_der)); let hash = hex::encode(sha2::Sha256::digest(&manifest_der));
let mut raw = RawByHashEntry::from_bytes(hash.clone(), manifest_der.clone()); let mut raw = RawByHashEntry::from_bytes(hash.clone(), manifest_der.clone());
raw.origin_uris.push("rsync://example.test/repo/current.mft".to_string()); raw.origin_uris
.push("rsync://example.test/repo/current.mft".to_string());
raw.object_type = Some("mft".to_string()); raw.object_type = Some("mft".to_string());
raw.encoding = Some("der".to_string()); raw.encoding = Some("der".to_string());
store.put_raw_by_hash_entry(&raw).expect("put raw"); store.put_raw_by_hash_entry(&raw).expect("put raw");
@ -142,19 +147,33 @@ mod tests {
ca_subject_name: "CN=test".to_string(), ca_subject_name: "CN=test".to_string(),
ca_ski: "11".repeat(20), ca_ski: "11".repeat(20),
issuer_ski: "22".repeat(20), issuer_ski: "22".repeat(20),
last_successful_validation_time: PackTime::from_utc_offset_datetime(manifest.manifest.this_update), last_successful_validation_time: PackTime::from_utc_offset_datetime(
manifest.manifest.this_update,
),
current_manifest_rsync_uri: "rsync://example.test/repo/current.mft".to_string(), current_manifest_rsync_uri: "rsync://example.test/repo/current.mft".to_string(),
current_crl_rsync_uri: "rsync://example.test/repo/current.crl".to_string(), current_crl_rsync_uri: "rsync://example.test/repo/current.crl".to_string(),
validated_manifest_meta: ValidatedManifestMeta { validated_manifest_meta: ValidatedManifestMeta {
validated_manifest_number: manifest.manifest.manifest_number.bytes_be.clone(), validated_manifest_number: manifest.manifest.manifest_number.bytes_be.clone(),
validated_manifest_this_update: PackTime::from_utc_offset_datetime(manifest.manifest.this_update), validated_manifest_this_update: PackTime::from_utc_offset_datetime(
validated_manifest_next_update: PackTime::from_utc_offset_datetime(manifest.manifest.next_update), manifest.manifest.this_update,
),
validated_manifest_next_update: PackTime::from_utc_offset_datetime(
manifest.manifest.next_update,
),
}, },
instance_gate: VcirInstanceGate { instance_gate: VcirInstanceGate {
manifest_next_update: PackTime::from_utc_offset_datetime(manifest.manifest.next_update), manifest_next_update: PackTime::from_utc_offset_datetime(
current_crl_next_update: PackTime::from_utc_offset_datetime(manifest.manifest.next_update), manifest.manifest.next_update,
self_ca_not_after: PackTime::from_utc_offset_datetime(manifest.manifest.next_update), ),
instance_effective_until: PackTime::from_utc_offset_datetime(manifest.manifest.next_update), current_crl_next_update: PackTime::from_utc_offset_datetime(
manifest.manifest.next_update,
),
self_ca_not_after: PackTime::from_utc_offset_datetime(
manifest.manifest.next_update,
),
instance_effective_until: PackTime::from_utc_offset_datetime(
manifest.manifest.next_update,
),
}, },
child_entries: vec![VcirChildEntry { child_entries: vec![VcirChildEntry {
child_manifest_rsync_uri: "rsync://example.test/repo/child.mft".to_string(), child_manifest_rsync_uri: "rsync://example.test/repo/child.mft".to_string(),
@ -166,7 +185,9 @@ mod tests {
child_rrdp_notification_uri: None, child_rrdp_notification_uri: None,
child_effective_ip_resources: None, child_effective_ip_resources: None,
child_effective_as_resources: None, child_effective_as_resources: None,
accepted_at_validation_time: PackTime::from_utc_offset_datetime(manifest.manifest.this_update), accepted_at_validation_time: PackTime::from_utc_offset_datetime(
manifest.manifest.this_update,
),
}], }],
local_outputs: Vec::new(), local_outputs: Vec::new(),
related_artifacts: vec![VcirRelatedArtifact { related_artifacts: vec![VcirRelatedArtifact {
@ -204,10 +225,17 @@ mod tests {
let trust_anchor = sample_trust_anchor(); let trust_anchor = sample_trust_anchor();
let vrps = vec![Vrp { let vrps = vec![Vrp {
asn: 64496, asn: 64496,
prefix: IpPrefix { afi: RoaAfi::Ipv4, prefix_len: 8, addr: [10,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] }, prefix: IpPrefix {
afi: RoaAfi::Ipv4,
prefix_len: 8,
addr: [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
max_length: 8, max_length: 8,
}]; }];
let aspas = vec![AspaAttestation { customer_as_id: 64496, provider_as_ids: vec![64497] }]; let aspas = vec![AspaAttestation {
customer_as_id: 64496,
provider_as_ids: vec![64497],
}];
let router_keys = vec![RouterKeyPayload { let router_keys = vec![RouterKeyPayload {
as_id: 64496, as_id: 64496,
ski: vec![0x11; 20], ski: vec![0x11; 20],
@ -215,9 +243,19 @@ mod tests {
source_object_uri: "rsync://example.test/repo/router.cer".to_string(), source_object_uri: "rsync://example.test/repo/router.cer".to_string(),
source_object_hash: hex::encode([0x11; 32]), source_object_hash: hex::encode([0x11; 32]),
source_ee_cert_hash: hex::encode([0x11; 32]), source_ee_cert_hash: hex::encode([0x11; 32]),
item_effective_until: PackTime::from_utc_offset_datetime(time::OffsetDateTime::now_utc() + time::Duration::hours(1)), item_effective_until: PackTime::from_utc_offset_datetime(
time::OffsetDateTime::now_utc() + time::Duration::hours(1),
),
}]; }];
let ccr = build_ccr_from_run(&store, &[trust_anchor], &vrps, &aspas, &router_keys, time::OffsetDateTime::now_utc()).expect("build ccr"); let ccr = build_ccr_from_run(
&store,
&[trust_anchor],
&vrps,
&aspas,
&router_keys,
time::OffsetDateTime::now_utc(),
)
.expect("build ccr");
assert!(ccr.mfts.is_some()); assert!(ccr.mfts.is_some());
assert!(ccr.vrps.is_some()); assert!(ccr.vrps.is_some());
assert!(ccr.vaps.is_some()); assert!(ccr.vaps.is_some());

View File

@ -1,12 +1,12 @@
pub mod decode;
pub mod encode;
pub mod dump;
pub mod hash;
pub mod model;
#[cfg(feature = "full")] #[cfg(feature = "full")]
pub mod build; pub mod build;
pub mod decode;
pub mod dump;
pub mod encode;
#[cfg(feature = "full")] #[cfg(feature = "full")]
pub mod export; pub mod export;
pub mod hash;
pub mod model;
#[cfg(feature = "full")] #[cfg(feature = "full")]
pub mod verify; pub mod verify;
@ -16,16 +16,19 @@ pub use build::{
build_roa_payload_state, build_trust_anchor_state, build_roa_payload_state, build_trust_anchor_state,
}; };
pub use decode::{CcrDecodeError, decode_content_info}; pub use decode::{CcrDecodeError, decode_content_info};
pub use dump::{CcrDumpError, dump_content_info_json, dump_content_info_json_value};
pub use encode::{CcrEncodeError, encode_content_info}; pub use encode::{CcrEncodeError, encode_content_info};
#[cfg(feature = "full")] #[cfg(feature = "full")]
pub use export::{CcrExportError, build_ccr_from_run, write_ccr_file}; pub use export::{CcrExportError, build_ccr_from_run, write_ccr_file};
pub use dump::{CcrDumpError, dump_content_info_json, dump_content_info_json_value};
#[cfg(feature = "full")]
pub use verify::{CcrVerifyError, CcrVerifySummary, extract_vrp_rows, verify_against_report_json_path, verify_against_vcir_store, verify_against_vcir_store_path, verify_content_info, verify_content_info_bytes};
pub use hash::{compute_state_hash, verify_state_hash}; pub use hash::{compute_state_hash, verify_state_hash};
pub use model::{ pub use model::{
AspaPayloadSet, AspaPayloadState, CcrContentInfo, CcrDigestAlgorithm, AspaPayloadSet, AspaPayloadState, CcrContentInfo, CcrDigestAlgorithm, ManifestInstance,
ManifestInstance, ManifestState, RoaPayloadSet, RoaPayloadState, ManifestState, RoaPayloadSet, RoaPayloadState, RouterKey, RouterKeySet, RouterKeyState,
RouterKey, RouterKeySet, RouterKeyState, RpkiCanonicalCacheRepresentation, RpkiCanonicalCacheRepresentation, TrustAnchorState,
TrustAnchorState, };
#[cfg(feature = "full")]
pub use verify::{
CcrVerifyError, CcrVerifySummary, extract_vrp_rows, verify_against_report_json_path,
verify_against_vcir_store, verify_against_vcir_store_path, verify_content_info,
verify_content_info_bytes,
}; };

View File

@ -133,16 +133,17 @@ impl ManifestInstance {
)); ));
} }
validate_key_identifier("ManifestInstance.aki", &self.aki)?; validate_key_identifier("ManifestInstance.aki", &self.aki)?;
validate_big_unsigned_bytes("ManifestInstance.manifest_number", &self.manifest_number.bytes_be)?; validate_big_unsigned_bytes(
"ManifestInstance.manifest_number",
&self.manifest_number.bytes_be,
)?;
if self.locations.is_empty() { if self.locations.is_empty() {
return Err("ManifestInstance.locations must contain at least one AccessDescription".into()); return Err(
"ManifestInstance.locations must contain at least one AccessDescription".into(),
);
} }
for location in &self.locations { for location in &self.locations {
validate_full_der_with_tag( validate_full_der_with_tag("ManifestInstance.locations[]", location, Some(0x30))?;
"ManifestInstance.locations[]",
location,
Some(0x30),
)?;
} }
if !self.subordinates.is_empty() { if !self.subordinates.is_empty() {
validate_sorted_unique_bytes( validate_sorted_unique_bytes(
@ -339,7 +340,9 @@ fn validate_big_unsigned_bytes(field: &str, bytes: &[u8]) -> Result<(), String>
return Err(format!("{field} must not be empty")); return Err(format!("{field} must not be empty"));
} }
if bytes.len() > 1 && bytes[0] == 0x00 { if bytes.len() > 1 && bytes[0] == 0x00 {
return Err(format!("{field} must be minimally encoded as an unsigned integer")); return Err(format!(
"{field} must be minimally encoded as an unsigned integer"
));
} }
Ok(()) Ok(())
} }

View File

@ -82,7 +82,9 @@ pub fn verify_content_info_bytes(der: &[u8]) -> Result<CcrVerifySummary, CcrVeri
verify_content_info(&content_info) verify_content_info(&content_info)
} }
pub fn verify_content_info(content_info: &CcrContentInfo) -> Result<CcrVerifySummary, CcrVerifyError> { pub fn verify_content_info(
content_info: &CcrContentInfo,
) -> Result<CcrVerifySummary, CcrVerifyError> {
content_info.validate().map_err(CcrDecodeError::Validate)?; content_info.validate().map_err(CcrDecodeError::Validate)?;
let state_hashes_ok = true; let state_hashes_ok = true;
let mut manifest_instances = 0usize; let mut manifest_instances = 0usize;
@ -108,7 +110,11 @@ pub fn verify_content_info(content_info: &CcrContentInfo) -> Result<CcrVerifySum
return Err(CcrVerifyError::RoaHashMismatch); return Err(CcrVerifyError::RoaHashMismatch);
} }
roa_payload_sets = vrps.rps.len(); roa_payload_sets = vrps.rps.len();
roa_vrp_count = vrps.rps.iter().map(|set| count_roa_block_entries(&set.ip_addr_blocks)).sum(); roa_vrp_count = vrps
.rps
.iter()
.map(|set| count_roa_block_entries(&set.ip_addr_blocks))
.sum();
} }
if let Some(vaps) = &content_info.content.vaps { if let Some(vaps) = &content_info.content.vaps {
let payload_der = encode_aspa_payload_state_payload_der(&vaps.aps) let payload_der = encode_aspa_payload_state_payload_der(&vaps.aps)
@ -154,10 +160,11 @@ pub fn verify_against_report_json_path(
content_info: &CcrContentInfo, content_info: &CcrContentInfo,
report_json_path: &Path, report_json_path: &Path,
) -> Result<(), CcrVerifyError> { ) -> Result<(), CcrVerifyError> {
let bytes = std::fs::read(report_json_path) let bytes = std::fs::read(report_json_path).map_err(|e| {
.map_err(|e| CcrVerifyError::ReportRead(report_json_path.display().to_string(), e.to_string()))?; CcrVerifyError::ReportRead(report_json_path.display().to_string(), e.to_string())
let json: serde_json::Value = serde_json::from_slice(&bytes) })?;
.map_err(|e| CcrVerifyError::ReportParse(e.to_string()))?; let json: serde_json::Value =
serde_json::from_slice(&bytes).map_err(|e| CcrVerifyError::ReportParse(e.to_string()))?;
let report_vrps = report_vrp_keys(&json)?; let report_vrps = report_vrp_keys(&json)?;
let ccr_vrps = extract_vrp_rows(content_info)?; let ccr_vrps = extract_vrp_rows(content_info)?;
@ -198,7 +205,9 @@ pub fn verify_against_vcir_store(
let Some(mfts) = &content_info.content.mfts else { let Some(mfts) = &content_info.content.mfts else {
return Ok(()); return Ok(());
}; };
let vcirs = store.list_vcirs().map_err(|e| CcrVerifyError::ListVcirs(e.to_string()))?; let vcirs = store
.list_vcirs()
.map_err(|e| CcrVerifyError::ListVcirs(e.to_string()))?;
let mut vcir_hashes = BTreeSet::new(); let mut vcir_hashes = BTreeSet::new();
for vcir in vcirs { for vcir in vcirs {
if let Some(artifact) = vcir.related_artifacts.iter().find(|artifact| { if let Some(artifact) = vcir.related_artifacts.iter().find(|artifact| {
@ -244,7 +253,9 @@ fn verify_router_key_state_hash(state: &RouterKeyState) -> Result<(), CcrVerifyE
Ok(()) Ok(())
} }
fn report_vrp_keys(json: &serde_json::Value) -> Result<BTreeSet<(u32, String, u16)>, CcrVerifyError> { fn report_vrp_keys(
json: &serde_json::Value,
) -> Result<BTreeSet<(u32, String, u16)>, CcrVerifyError> {
let mut out = BTreeSet::new(); let mut out = BTreeSet::new();
let Some(items) = json.get("vrps").and_then(|v| v.as_array()) else { let Some(items) = json.get("vrps").and_then(|v| v.as_array()) else {
return Ok(out); return Ok(out);
@ -253,7 +264,8 @@ fn report_vrp_keys(json: &serde_json::Value) -> Result<BTreeSet<(u32, String, u1
let asn = item let asn = item
.get("asn") .get("asn")
.and_then(|v| v.as_u64()) .and_then(|v| v.as_u64())
.ok_or_else(|| CcrVerifyError::ReportParse("vrps[].asn missing".into()))? as u32; .ok_or_else(|| CcrVerifyError::ReportParse("vrps[].asn missing".into()))?
as u32;
let prefix = item let prefix = item
.get("prefix") .get("prefix")
.and_then(|v| v.as_str()) .and_then(|v| v.as_str())
@ -285,7 +297,11 @@ fn report_aspa_keys(json: &serde_json::Value) -> Result<BTreeSet<(u32, Vec<u32>)
.and_then(|v| v.as_array()) .and_then(|v| v.as_array())
.ok_or_else(|| CcrVerifyError::ReportParse("aspas[].provider_as_ids missing".into()))? .ok_or_else(|| CcrVerifyError::ReportParse("aspas[].provider_as_ids missing".into()))?
.iter() .iter()
.map(|v| v.as_u64().ok_or_else(|| CcrVerifyError::ReportParse("provider_as_ids[] invalid".into())).map(|v| v as u32)) .map(|v| {
v.as_u64()
.ok_or_else(|| CcrVerifyError::ReportParse("provider_as_ids[] invalid".into()))
.map(|v| v as u32)
})
.collect::<Result<Vec<_>, _>>()?; .collect::<Result<Vec<_>, _>>()?;
providers.sort_unstable(); providers.sort_unstable();
providers.dedup(); providers.dedup();
@ -294,7 +310,9 @@ fn report_aspa_keys(json: &serde_json::Value) -> Result<BTreeSet<(u32, Vec<u32>)
Ok(out) Ok(out)
} }
pub fn extract_vrp_rows(content_info: &CcrContentInfo) -> Result<BTreeSet<(u32, String, u16)>, CcrVerifyError> { pub fn extract_vrp_rows(
content_info: &CcrContentInfo,
) -> Result<BTreeSet<(u32, String, u16)>, CcrVerifyError> {
let mut out = BTreeSet::new(); let mut out = BTreeSet::new();
let Some(vrps) = &content_info.content.vrps else { let Some(vrps) = &content_info.content.vrps else {
return Ok(out); return Ok(out);
@ -311,7 +329,9 @@ pub fn extract_vrp_rows(content_info: &CcrContentInfo) -> Result<BTreeSet<(u32,
Ok(out) Ok(out)
} }
fn ccr_aspa_keys(content_info: &CcrContentInfo) -> Result<BTreeSet<(u32, Vec<u32>)>, CcrVerifyError> { fn ccr_aspa_keys(
content_info: &CcrContentInfo,
) -> Result<BTreeSet<(u32, Vec<u32>)>, CcrVerifyError> {
let mut out = BTreeSet::new(); let mut out = BTreeSet::new();
let Some(vaps) = &content_info.content.vaps else { let Some(vaps) = &content_info.content.vaps else {
return Ok(out); return Ok(out);
@ -322,24 +342,43 @@ fn ccr_aspa_keys(content_info: &CcrContentInfo) -> Result<BTreeSet<(u32, Vec<u32
Ok(out) Ok(out)
} }
fn decode_roa_family_block(block: &[u8]) -> Result<(u16, Vec<(u8, Vec<u8>, Option<u16>)>), CcrVerifyError> { fn decode_roa_family_block(
block: &[u8],
) -> Result<(u16, Vec<(u8, Vec<u8>, Option<u16>)>), CcrVerifyError> {
let mut top = crate::data_model::common::DerReader::new(block); let mut top = crate::data_model::common::DerReader::new(block);
let mut seq = top.take_sequence().map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?; let mut seq = top
.take_sequence()
.map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?;
if !top.is_empty() { if !top.is_empty() {
return Err(CcrVerifyError::Decode(CcrDecodeError::Parse("trailing bytes after ROAIPAddressFamily".into()))); return Err(CcrVerifyError::Decode(CcrDecodeError::Parse(
"trailing bytes after ROAIPAddressFamily".into(),
)));
} }
let afi_bytes = seq.take_octet_string().map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?; let afi_bytes = seq
.take_octet_string()
.map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?;
let afi = u16::from_be_bytes([afi_bytes[0], afi_bytes[1]]); let afi = u16::from_be_bytes([afi_bytes[0], afi_bytes[1]]);
let mut addrs = seq.take_sequence().map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?; let mut addrs = seq
.take_sequence()
.map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?;
let mut entries = Vec::new(); let mut entries = Vec::new();
while !addrs.is_empty() { while !addrs.is_empty() {
let mut addr_seq = addrs.take_sequence().map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?; let mut addr_seq = addrs
let (unused_bits, content) = addr_seq.take_bit_string().map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?; .take_sequence()
.map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?;
let (unused_bits, content) = addr_seq
.take_bit_string()
.map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?;
let prefix_len = (content.len() * 8) as u8 - unused_bits; let prefix_len = (content.len() * 8) as u8 - unused_bits;
let max_len = if addr_seq.is_empty() { let max_len = if addr_seq.is_empty() {
None None
} else { } else {
Some(addr_seq.take_uint_u64().map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))? as u16) Some(
addr_seq
.take_uint_u64()
.map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?
as u16,
)
}; };
entries.push((prefix_len, content.to_vec(), max_len)); entries.push((prefix_len, content.to_vec(), max_len));
} }
@ -358,37 +397,43 @@ fn format_prefix(afi: u16, addr_bytes: &[u8], prefix_len: u8) -> Result<String,
full[..addr_bytes.len()].copy_from_slice(addr_bytes); full[..addr_bytes.len()].copy_from_slice(addr_bytes);
Ok(format!("{}/{prefix_len}", std::net::Ipv6Addr::from(full))) Ok(format!("{}/{prefix_len}", std::net::Ipv6Addr::from(full)))
} }
other => Err(CcrVerifyError::Decode(CcrDecodeError::Parse(format!("unsupported AFI {other}")))), other => Err(CcrVerifyError::Decode(CcrDecodeError::Parse(format!(
"unsupported AFI {other}"
)))),
} }
} }
fn count_roa_block_entries(blocks: &[Vec<u8>]) -> usize { fn count_roa_block_entries(blocks: &[Vec<u8>]) -> usize {
blocks blocks
.iter() .iter()
.map(|block| decode_roa_family_block(block).map(|(_, entries)| entries.len()).unwrap_or(0)) .map(|block| {
decode_roa_family_block(block)
.map(|(_, entries)| entries.len())
.unwrap_or(0)
})
.sum() .sum()
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::ccr::build::{build_aspa_payload_state, build_roa_payload_state};
use crate::ccr::encode::{ use crate::ccr::encode::{
encode_manifest_state_payload_der, encode_roa_payload_state_payload_der, encode_manifest_state_payload_der, encode_roa_payload_state_payload_der,
encode_router_key_state_payload_der, encode_trust_anchor_state_payload_der, encode_router_key_state_payload_der, encode_trust_anchor_state_payload_der,
}; };
use crate::ccr::build::{build_aspa_payload_state, build_roa_payload_state};
use crate::ccr::model::{ use crate::ccr::model::{
CcrDigestAlgorithm, ManifestInstance, ManifestState, CcrDigestAlgorithm, ManifestInstance, ManifestState, RouterKey, RouterKeySet,
RouterKey, RouterKeySet, RpkiCanonicalCacheRepresentation, RpkiCanonicalCacheRepresentation,
}; };
use crate::data_model::roa::{IpPrefix, RoaAfi};
use crate::validation::objects::{AspaAttestation, Vrp};
use crate::data_model::common::BigUnsigned; use crate::data_model::common::BigUnsigned;
use crate::data_model::roa::{IpPrefix, RoaAfi};
use crate::storage::{ use crate::storage::{
PackTime, ValidatedCaInstanceResult, ValidatedManifestMeta, VcirArtifactKind, PackTime, ValidatedCaInstanceResult, ValidatedManifestMeta, VcirArtifactKind,
VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary, VcirChildEntry, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary, VcirChildEntry,
VcirInstanceGate, VcirRelatedArtifact, VcirSummary, VcirInstanceGate, VcirRelatedArtifact, VcirSummary,
}; };
use crate::validation::objects::{AspaAttestation, Vrp};
fn sample_time() -> time::OffsetDateTime { fn sample_time() -> time::OffsetDateTime {
time::OffsetDateTime::parse( time::OffsetDateTime::parse(
@ -415,21 +460,37 @@ mod tests {
}; };
let vrps = build_roa_payload_state(&[Vrp { let vrps = build_roa_payload_state(&[Vrp {
asn: 64496, asn: 64496,
prefix: IpPrefix { afi: RoaAfi::Ipv4, prefix_len: 0, addr: [0; 16] }, prefix: IpPrefix {
afi: RoaAfi::Ipv4,
prefix_len: 0,
addr: [0; 16],
},
max_length: 0, max_length: 0,
}]).expect("build roa state"); }])
.expect("build roa state");
let vaps = build_aspa_payload_state(&[AspaAttestation { let vaps = build_aspa_payload_state(&[AspaAttestation {
customer_as_id: 64496, customer_as_id: 64496,
provider_as_ids: vec![64497], provider_as_ids: vec![64497],
}]).expect("build aspa state"); }])
.expect("build aspa state");
let skis = vec![vec![0x11; 20]]; let skis = vec![vec![0x11; 20]];
let tas = TrustAnchorState { let tas = TrustAnchorState {
hash: crate::ccr::compute_state_hash(&encode_trust_anchor_state_payload_der(&skis).unwrap()), hash: crate::ccr::compute_state_hash(
&encode_trust_anchor_state_payload_der(&skis).unwrap(),
),
skis, skis,
}; };
let rksets = vec![RouterKeySet { as_id: 64496, router_keys: vec![RouterKey { ski: vec![0x22;20], spki_der: vec![0x30,0x00] }] }]; let rksets = vec![RouterKeySet {
as_id: 64496,
router_keys: vec![RouterKey {
ski: vec![0x22; 20],
spki_der: vec![0x30, 0x00],
}],
}];
let rks = RouterKeyState { let rks = RouterKeyState {
hash: crate::ccr::compute_state_hash(&encode_router_key_state_payload_der(&rksets).unwrap()), hash: crate::ccr::compute_state_hash(
&encode_router_key_state_payload_der(&rksets).unwrap(),
),
rksets, rksets,
}; };
CcrContentInfo::new(RpkiCanonicalCacheRepresentation { CcrContentInfo::new(RpkiCanonicalCacheRepresentation {
@ -448,19 +509,31 @@ mod tests {
fn verify_detects_each_state_hash_mismatch() { fn verify_detects_each_state_hash_mismatch() {
let mut ci = sample_content_info(); let mut ci = sample_content_info();
ci.content.vrps.as_mut().unwrap().hash[0] ^= 0x01; ci.content.vrps.as_mut().unwrap().hash[0] ^= 0x01;
assert!(matches!(verify_content_info(&ci), Err(CcrVerifyError::RoaHashMismatch))); assert!(matches!(
verify_content_info(&ci),
Err(CcrVerifyError::RoaHashMismatch)
));
let mut ci = sample_content_info(); let mut ci = sample_content_info();
ci.content.vaps.as_mut().unwrap().hash[0] ^= 0x01; ci.content.vaps.as_mut().unwrap().hash[0] ^= 0x01;
assert!(matches!(verify_content_info(&ci), Err(CcrVerifyError::AspaHashMismatch))); assert!(matches!(
verify_content_info(&ci),
Err(CcrVerifyError::AspaHashMismatch)
));
let mut ci = sample_content_info(); let mut ci = sample_content_info();
ci.content.tas.as_mut().unwrap().hash[0] ^= 0x01; ci.content.tas.as_mut().unwrap().hash[0] ^= 0x01;
assert!(matches!(verify_content_info(&ci), Err(CcrVerifyError::TrustAnchorHashMismatch))); assert!(matches!(
verify_content_info(&ci),
Err(CcrVerifyError::TrustAnchorHashMismatch)
));
let mut ci = sample_content_info(); let mut ci = sample_content_info();
ci.content.rks.as_mut().unwrap().hash[0] ^= 0x01; ci.content.rks.as_mut().unwrap().hash[0] ^= 0x01;
assert!(matches!(verify_content_info(&ci), Err(CcrVerifyError::RouterKeyHashMismatch))); assert!(matches!(
verify_content_info(&ci),
Err(CcrVerifyError::RouterKeyHashMismatch)
));
} }
#[test] #[test]
@ -472,20 +545,28 @@ mod tests {
}); });
let report_path = td.path().join("report.json"); let report_path = td.path().join("report.json");
std::fs::write(&report_path, serde_json::to_vec(&report).unwrap()).unwrap(); std::fs::write(&report_path, serde_json::to_vec(&report).unwrap()).unwrap();
verify_against_report_json_path(&sample_content_info(), &report_path).expect("matching report"); verify_against_report_json_path(&sample_content_info(), &report_path)
.expect("matching report");
let bad_path = td.path().join("bad.json"); let bad_path = td.path().join("bad.json");
std::fs::write(&bad_path, b"not-json").unwrap(); std::fs::write(&bad_path, b"not-json").unwrap();
assert!(matches!(verify_against_report_json_path(&sample_content_info(), &bad_path), Err(CcrVerifyError::ReportParse(_)))); assert!(matches!(
verify_against_report_json_path(&sample_content_info(), &bad_path),
Err(CcrVerifyError::ReportParse(_))
));
} }
#[test] #[test]
fn verify_against_report_json_rejects_missing_fields_and_aspa_mismatch() { fn verify_against_report_json_rejects_missing_fields_and_aspa_mismatch() {
let td = tempfile::tempdir().expect("tempdir"); let td = tempfile::tempdir().expect("tempdir");
let missing = serde_json::json!({"vrps":[{"prefix":"0.0.0.0/0","max_length":0}],"aspas":[]}); let missing =
serde_json::json!({"vrps":[{"prefix":"0.0.0.0/0","max_length":0}],"aspas":[]});
let missing_path = td.path().join("missing.json"); let missing_path = td.path().join("missing.json");
std::fs::write(&missing_path, serde_json::to_vec(&missing).unwrap()).unwrap(); std::fs::write(&missing_path, serde_json::to_vec(&missing).unwrap()).unwrap();
assert!(matches!(verify_against_report_json_path(&sample_content_info(), &missing_path), Err(CcrVerifyError::ReportParse(_)))); assert!(matches!(
verify_against_report_json_path(&sample_content_info(), &missing_path),
Err(CcrVerifyError::ReportParse(_))
));
let mismatch = serde_json::json!({ let mismatch = serde_json::json!({
"vrps": [{"asn": 64496, "prefix": "0.0.0.0/0", "max_length": 0}], "vrps": [{"asn": 64496, "prefix": "0.0.0.0/0", "max_length": 0}],
@ -493,7 +574,10 @@ mod tests {
}); });
let mismatch_path = td.path().join("mismatch.json"); let mismatch_path = td.path().join("mismatch.json");
std::fs::write(&mismatch_path, serde_json::to_vec(&mismatch).unwrap()).unwrap(); std::fs::write(&mismatch_path, serde_json::to_vec(&mismatch).unwrap()).unwrap();
assert!(matches!(verify_against_report_json_path(&sample_content_info(), &mismatch_path), Err(CcrVerifyError::ReportAspaMismatch { .. }))); assert!(matches!(
verify_against_report_json_path(&sample_content_info(), &mismatch_path),
Err(CcrVerifyError::ReportAspaMismatch { .. })
));
} }
#[test] #[test]
@ -542,11 +626,26 @@ mod tests {
object_type: Some("mft".to_string()), object_type: Some("mft".to_string()),
validation_status: VcirArtifactValidationStatus::Accepted, validation_status: VcirArtifactValidationStatus::Accepted,
}], }],
summary: VcirSummary { local_vrp_count: 0, local_aspa_count: 0, local_router_key_count: 0, child_count: 1, accepted_object_count: 1, rejected_object_count: 0 }, summary: VcirSummary {
audit_summary: VcirAuditSummary { failed_fetch_eligible: true, last_failed_fetch_reason: None, warning_count: 0, audit_flags: Vec::new() }, local_vrp_count: 0,
local_aspa_count: 0,
local_router_key_count: 0,
child_count: 1,
accepted_object_count: 1,
rejected_object_count: 0,
},
audit_summary: VcirAuditSummary {
failed_fetch_eligible: true,
last_failed_fetch_reason: None,
warning_count: 0,
audit_flags: Vec::new(),
},
}; };
store.put_vcir(&vcir).unwrap(); store.put_vcir(&vcir).unwrap();
assert!(matches!(verify_against_vcir_store(&sample_content_info(), &store), Err(CcrVerifyError::VcirManifestMismatch { .. }))); assert!(matches!(
verify_against_vcir_store(&sample_content_info(), &store),
Err(CcrVerifyError::VcirManifestMismatch { .. })
));
} }
#[test] #[test]
@ -557,12 +656,27 @@ mod tests {
hash_alg: CcrDigestAlgorithm::Sha256, hash_alg: CcrDigestAlgorithm::Sha256,
produced_at: sample_time(), produced_at: sample_time(),
mfts: None, mfts: None,
vrps: Some(crate::ccr::model::RoaPayloadState { rps: vec![crate::ccr::model::RoaPayloadSet { as_id: 64496, ip_addr_blocks: vec![block.clone()] }], hash: crate::ccr::compute_state_hash(&encode_roa_payload_state_payload_der(&[crate::ccr::model::RoaPayloadSet { as_id: 64496, ip_addr_blocks: vec![block] }]).unwrap()) }), vrps: Some(crate::ccr::model::RoaPayloadState {
rps: vec![crate::ccr::model::RoaPayloadSet {
as_id: 64496,
ip_addr_blocks: vec![block.clone()],
}],
hash: crate::ccr::compute_state_hash(
&encode_roa_payload_state_payload_der(&[crate::ccr::model::RoaPayloadSet {
as_id: 64496,
ip_addr_blocks: vec![block],
}])
.unwrap(),
),
}),
vaps: None, vaps: None,
tas: None, tas: None,
rks: None, rks: None,
}); });
assert!(matches!(extract_vrp_rows(&ci), Err(CcrVerifyError::Decode(_)))); assert!(matches!(
extract_vrp_rows(&ci),
Err(CcrVerifyError::Decode(_))
));
let bad_count = count_roa_block_entries(&[vec![0x04, 0x00]]); let bad_count = count_roa_block_entries(&[vec![0x04, 0x00]]);
assert_eq!(bad_count, 0); assert_eq!(bad_count, 0);
} }

View File

@ -14,7 +14,10 @@ pub enum CirDecodeError {
UnexpectedVersion { expected: u32, actual: u32 }, UnexpectedVersion { expected: u32, actual: u32 },
#[error("unexpected digest algorithm OID: expected {expected}, got {actual}")] #[error("unexpected digest algorithm OID: expected {expected}, got {actual}")]
UnexpectedDigestAlgorithm { expected: &'static str, actual: String }, UnexpectedDigestAlgorithm {
expected: &'static str,
actual: String,
},
#[error("CIR model validation failed after decode: {0}")] #[error("CIR model validation failed after decode: {0}")]
Validate(String), Validate(String),
@ -85,12 +88,17 @@ fn decode_object(der: &[u8]) -> Result<CirObject, CirDecodeError> {
let mut top = DerReader::new(der); let mut top = DerReader::new(der);
let mut seq = top.take_sequence().map_err(CirDecodeError::Parse)?; let mut seq = top.take_sequence().map_err(CirDecodeError::Parse)?;
if !top.is_empty() { if !top.is_empty() {
return Err(CirDecodeError::Parse("trailing bytes after CirObject".into())); return Err(CirDecodeError::Parse(
"trailing bytes after CirObject".into(),
));
} }
let rsync_uri = std::str::from_utf8(seq.take_tag(0x16).map_err(CirDecodeError::Parse)?) let rsync_uri = std::str::from_utf8(seq.take_tag(0x16).map_err(CirDecodeError::Parse)?)
.map_err(|e| CirDecodeError::Parse(e.to_string()))? .map_err(|e| CirDecodeError::Parse(e.to_string()))?
.to_string(); .to_string();
let sha256 = seq.take_octet_string().map_err(CirDecodeError::Parse)?.to_vec(); let sha256 = seq
.take_octet_string()
.map_err(CirDecodeError::Parse)?
.to_vec();
if !seq.is_empty() { if !seq.is_empty() {
return Err(CirDecodeError::Parse("trailing fields in CirObject".into())); return Err(CirDecodeError::Parse("trailing fields in CirObject".into()));
} }
@ -106,7 +114,10 @@ fn decode_tal(der: &[u8]) -> Result<CirTal, CirDecodeError> {
let tal_uri = std::str::from_utf8(seq.take_tag(0x16).map_err(CirDecodeError::Parse)?) let tal_uri = std::str::from_utf8(seq.take_tag(0x16).map_err(CirDecodeError::Parse)?)
.map_err(|e| CirDecodeError::Parse(e.to_string()))? .map_err(|e| CirDecodeError::Parse(e.to_string()))?
.to_string(); .to_string();
let tal_bytes = seq.take_octet_string().map_err(CirDecodeError::Parse)?.to_vec(); let tal_bytes = seq
.take_octet_string()
.map_err(CirDecodeError::Parse)?
.to_vec();
if !seq.is_empty() { if !seq.is_empty() {
return Err(CirDecodeError::Parse("trailing fields in CirTal".into())); return Err(CirDecodeError::Parse("trailing fields in CirTal".into()));
} }
@ -150,12 +161,10 @@ fn parse_generalized_time(bytes: &[u8]) -> Result<time::OffsetDateTime, CirDecod
let hour = parse(8..10)? as u8; let hour = parse(8..10)? as u8;
let minute = parse(10..12)? as u8; let minute = parse(10..12)? as u8;
let second = parse(12..14)? as u8; let second = parse(12..14)? as u8;
let month = time::Month::try_from(month) let month = time::Month::try_from(month).map_err(|e| CirDecodeError::Parse(e.to_string()))?;
.map_err(|e| CirDecodeError::Parse(e.to_string()))?;
let date = time::Date::from_calendar_date(year, month, day) let date = time::Date::from_calendar_date(year, month, day)
.map_err(|e| CirDecodeError::Parse(e.to_string()))?; .map_err(|e| CirDecodeError::Parse(e.to_string()))?;
let timev = time::Time::from_hms(hour, minute, second) let timev = time::Time::from_hms(hour, minute, second)
.map_err(|e| CirDecodeError::Parse(e.to_string()))?; .map_err(|e| CirDecodeError::Parse(e.to_string()))?;
Ok(time::PrimitiveDateTime::new(date, timev).assume_utc()) Ok(time::PrimitiveDateTime::new(date, timev).assume_utc())
} }

View File

@ -9,9 +9,7 @@ pub enum CirEncodeError {
Validate(String), Validate(String),
} }
pub fn encode_cir( pub fn encode_cir(cir: &CanonicalInputRepresentation) -> Result<Vec<u8>, CirEncodeError> {
cir: &CanonicalInputRepresentation,
) -> Result<Vec<u8>, CirEncodeError> {
cir.validate().map_err(CirEncodeError::Validate)?; cir.validate().map_err(CirEncodeError::Validate)?;
Ok(encode_sequence(&[ Ok(encode_sequence(&[
encode_integer_u32(cir.version), encode_integer_u32(cir.version),
@ -134,7 +132,10 @@ fn encode_len_into(len: usize, out: &mut Vec<u8>) {
return; return;
} }
let bytes = len.to_be_bytes(); let bytes = len.to_be_bytes();
let first_non_zero = bytes.iter().position(|&b| b != 0).unwrap_or(bytes.len() - 1); let first_non_zero = bytes
.iter()
.position(|&b| b != 0)
.unwrap_or(bytes.len() - 1);
let len_bytes = &bytes[first_non_zero..]; let len_bytes = &bytes[first_non_zero..];
out.push(0x80 | (len_bytes.len() as u8)); out.push(0x80 | (len_bytes.len() as u8));
out.extend_from_slice(len_bytes); out.extend_from_slice(len_bytes);
@ -144,4 +145,3 @@ fn encode_len_into(len: usize, out: &mut Vec<u8>) {
const _: () = { const _: () = {
let _ = CIR_VERSION_V1; let _ = CIR_VERSION_V1;
}; };

View File

@ -3,13 +3,14 @@ use std::collections::BTreeSet;
use std::path::Path; use std::path::Path;
use crate::audit::{AuditObjectResult, PublicationPointAudit}; use crate::audit::{AuditObjectResult, PublicationPointAudit};
use crate::blob_store::RawObjectStore;
use crate::cir::encode::{CirEncodeError, encode_cir}; use crate::cir::encode::{CirEncodeError, encode_cir};
use crate::cir::model::{ use crate::cir::model::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal,
}; };
use crate::cir::static_pool::{ use crate::cir::static_pool::{
CirStaticPoolError, CirStaticPoolExportSummary, write_bytes_to_static_pool, CirStaticPoolError, CirStaticPoolExportSummary, export_hashes_from_store,
export_hashes_from_store, write_bytes_to_static_pool,
}; };
use crate::data_model::ta::TrustAnchor; use crate::data_model::ta::TrustAnchor;
use crate::storage::{RepositoryViewState, RocksStore}; use crate::storage::{RepositoryViewState, RocksStore};
@ -46,11 +47,24 @@ pub enum CirExportError {
Write(String, String), Write(String, String),
} }
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum CirObjectPoolExportSummary {
Static(CirStaticPoolExportSummary),
RawStore(CirRawStoreExportSummary),
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CirRawStoreExportSummary {
pub unique_hashes: usize,
pub written_entries: usize,
pub reused_entries: usize,
}
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub struct CirExportSummary { pub struct CirExportSummary {
pub object_count: usize, pub object_count: usize,
pub tal_count: usize, pub tal_count: usize,
pub static_pool: CirStaticPoolExportSummary, pub object_pool: CirObjectPoolExportSummary,
pub timing: CirExportTiming, pub timing: CirExportTiming,
} }
@ -132,7 +146,10 @@ pub fn build_cir_from_run(
Ok(cir) Ok(cir)
} }
pub fn write_cir_file(path: &Path, cir: &CanonicalInputRepresentation) -> Result<(), CirExportError> { pub fn write_cir_file(
path: &Path,
cir: &CanonicalInputRepresentation,
) -> Result<(), CirExportError> {
let der = encode_cir(cir)?; let der = encode_cir(cir)?;
if let Some(parent) = path.parent() { if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent) std::fs::create_dir_all(parent)
@ -175,6 +192,58 @@ pub fn export_cir_static_pool(
Ok(summary) Ok(summary)
} }
pub fn export_cir_raw_store(
store: &RocksStore,
raw_store_path: &Path,
cir: &CanonicalInputRepresentation,
trust_anchor: &TrustAnchor,
) -> Result<CirRawStoreExportSummary, CirExportError> {
let ta_hash = ta_sha256_hex(&trust_anchor.ta_certificate.raw_der);
let unique: BTreeSet<String> = cir
.objects
.iter()
.map(|item| hex::encode(&item.sha256))
.collect();
let mut written_entries = 0usize;
let mut reused_entries = 0usize;
for sha256_hex in &unique {
if store
.get_raw_entry(sha256_hex)
.map_err(|e| CirExportError::Write(raw_store_path.display().to_string(), e.to_string()))?
.is_some()
{
reused_entries += 1;
continue;
}
if sha256_hex == &ta_hash {
let mut entry =
crate::storage::RawByHashEntry::from_bytes(ta_hash.clone(), trust_anchor.ta_certificate.raw_der.clone());
entry.object_type = Some("cer".to_string());
for object in &cir.objects {
if hex::encode(&object.sha256) == ta_hash {
entry.origin_uris.push(object.rsync_uri.clone());
}
}
store
.put_raw_by_hash_entry(&entry)
.map_err(|e| CirExportError::Write(raw_store_path.display().to_string(), e.to_string()))?;
written_entries += 1;
continue;
}
return Err(CirExportError::Write(
raw_store_path.display().to_string(),
format!("raw store missing object for sha256={sha256_hex}"),
));
}
Ok(CirRawStoreExportSummary {
unique_hashes: unique.len(),
written_entries,
reused_entries,
})
}
pub fn export_cir_from_run( pub fn export_cir_from_run(
store: &RocksStore, store: &RocksStore,
trust_anchor: &TrustAnchor, trust_anchor: &TrustAnchor,
@ -182,9 +251,19 @@ pub fn export_cir_from_run(
validation_time: time::OffsetDateTime, validation_time: time::OffsetDateTime,
publication_points: &[PublicationPointAudit], publication_points: &[PublicationPointAudit],
cir_out: &Path, cir_out: &Path,
static_root: &Path, static_root: Option<&Path>,
raw_store_path: Option<&Path>,
capture_date_utc: time::Date, capture_date_utc: time::Date,
) -> Result<CirExportSummary, CirExportError> { ) -> Result<CirExportSummary, CirExportError> {
let backend_count = static_root.is_some() as u8 + raw_store_path.is_some() as u8;
match backend_count {
1 => {}
_ => {
return Err(CirExportError::Validate(
"must specify exactly one CIR object pool backend".to_string(),
));
}
}
let total_started = std::time::Instant::now(); let total_started = std::time::Instant::now();
let started = std::time::Instant::now(); let started = std::time::Instant::now();
@ -198,7 +277,22 @@ pub fn export_cir_from_run(
let build_cir_ms = started.elapsed().as_millis() as u64; let build_cir_ms = started.elapsed().as_millis() as u64;
let started = std::time::Instant::now(); let started = std::time::Instant::now();
let static_pool = export_cir_static_pool(store, static_root, capture_date_utc, &cir, trust_anchor)?; let object_pool = if let Some(static_root) = static_root {
CirObjectPoolExportSummary::Static(export_cir_static_pool(
store,
static_root,
capture_date_utc,
&cir,
trust_anchor,
)?)
} else {
CirObjectPoolExportSummary::RawStore(export_cir_raw_store(
store,
raw_store_path.expect("validated"),
&cir,
trust_anchor,
)?)
};
let static_pool_ms = started.elapsed().as_millis() as u64; let static_pool_ms = started.elapsed().as_millis() as u64;
let started = std::time::Instant::now(); let started = std::time::Instant::now();
@ -208,7 +302,7 @@ pub fn export_cir_from_run(
Ok(CirExportSummary { Ok(CirExportSummary {
object_count: cir.objects.len(), object_count: cir.objects.len(),
tal_count: cir.tals.len(), tal_count: cir.tals.len(),
static_pool, object_pool,
timing: CirExportTiming { timing: CirExportTiming {
build_cir_ms, build_cir_ms,
static_pool_ms, static_pool_ms,
@ -246,7 +340,8 @@ mod tests {
fn sample_trust_anchor() -> TrustAnchor { fn sample_trust_anchor() -> TrustAnchor {
let base = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")); let base = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let tal_bytes = std::fs::read(base.join("tests/fixtures/tal/apnic-rfc7730-https.tal")).unwrap(); let tal_bytes =
std::fs::read(base.join("tests/fixtures/tal/apnic-rfc7730-https.tal")).unwrap();
let ta_der = std::fs::read(base.join("tests/fixtures/ta/apnic-ta.cer")).unwrap(); let ta_der = std::fs::read(base.join("tests/fixtures/ta/apnic-ta.cer")).unwrap();
let tal = Tal::decode_bytes(&tal_bytes).unwrap(); let tal = Tal::decode_bytes(&tal_bytes).unwrap();
TrustAnchor::bind_der(tal, &ta_der, None).unwrap() TrustAnchor::bind_der(tal, &ta_der, None).unwrap()
@ -264,7 +359,8 @@ mod tests {
let bytes = b"object-a".to_vec(); let bytes = b"object-a".to_vec();
let hash = sha256_hex(&bytes); let hash = sha256_hex(&bytes);
let mut raw = RawByHashEntry::from_bytes(hash.clone(), bytes.clone()); let mut raw = RawByHashEntry::from_bytes(hash.clone(), bytes.clone());
raw.origin_uris.push("rsync://example.test/repo/a.cer".into()); raw.origin_uris
.push("rsync://example.test/repo/a.cer".into());
store.put_raw_by_hash_entry(&raw).unwrap(); store.put_raw_by_hash_entry(&raw).unwrap();
store store
.put_repository_view_entry(&RepositoryViewEntry { .put_repository_view_entry(&RepositoryViewEntry {
@ -288,14 +384,16 @@ mod tests {
assert_eq!(cir.version, CIR_VERSION_V1); assert_eq!(cir.version, CIR_VERSION_V1);
assert_eq!(cir.tals.len(), 1); assert_eq!(cir.tals.len(), 1);
assert_eq!(cir.tals[0].tal_uri, "https://example.test/root.tal"); assert_eq!(cir.tals[0].tal_uri, "https://example.test/root.tal");
assert!(cir assert!(
.objects cir.objects
.iter() .iter()
.any(|item| item.rsync_uri == "rsync://example.test/repo/a.cer")); .any(|item| item.rsync_uri == "rsync://example.test/repo/a.cer")
assert!(cir );
.objects assert!(
cir.objects
.iter() .iter()
.any(|item| item.rsync_uri.contains("apnic-rpki-root-iana-origin.cer"))); .any(|item| item.rsync_uri.contains("apnic-rpki-root-iana-origin.cer"))
);
} }
#[test] #[test]
@ -309,7 +407,8 @@ mod tests {
let bytes = b"object-b".to_vec(); let bytes = b"object-b".to_vec();
let hash = sha256_hex(&bytes); let hash = sha256_hex(&bytes);
let mut raw = RawByHashEntry::from_bytes(hash.clone(), bytes.clone()); let mut raw = RawByHashEntry::from_bytes(hash.clone(), bytes.clone());
raw.origin_uris.push("rsync://example.test/repo/b.roa".into()); raw.origin_uris
.push("rsync://example.test/repo/b.roa".into());
store.put_raw_by_hash_entry(&raw).unwrap(); store.put_raw_by_hash_entry(&raw).unwrap();
store store
.put_repository_view_entry(&RepositoryViewEntry { .put_repository_view_entry(&RepositoryViewEntry {
@ -330,12 +429,17 @@ mod tests {
sample_time(), sample_time(),
&[], &[],
&cir_path, &cir_path,
&static_root, Some(&static_root),
None,
sample_date(), sample_date(),
) )
.expect("export cir"); .expect("export cir");
assert_eq!(summary.tal_count, 1); assert_eq!(summary.tal_count, 1);
assert!(summary.object_count >= 2); assert!(summary.object_count >= 2);
match summary.object_pool {
CirObjectPoolExportSummary::Static(_) => {}
other => panic!("unexpected backend: {other:?}"),
}
let der = std::fs::read(&cir_path).unwrap(); let der = std::fs::read(&cir_path).unwrap();
let cir = decode_cir(&der).unwrap(); let cir = decode_cir(&der).unwrap();
@ -345,6 +449,51 @@ mod tests {
assert_eq!(std::fs::read(object_path).unwrap(), bytes); assert_eq!(std::fs::read(object_path).unwrap(), bytes);
} }
#[test]
fn export_cir_from_run_uses_raw_store_backend_without_pool_export() {
let td = tempfile::tempdir().unwrap();
let store_dir = td.path().join("db");
let raw_store = td.path().join("raw-store.db");
let out_dir = td.path().join("out");
let store = RocksStore::open_with_external_raw_store(&store_dir, &raw_store).unwrap();
let bytes = b"object-d".to_vec();
let hash = sha256_hex(&bytes);
let mut raw = RawByHashEntry::from_bytes(hash.clone(), bytes.clone());
raw.origin_uris
.push("rsync://example.test/repo/d.roa".into());
store.put_raw_by_hash_entry(&raw).unwrap();
store
.put_repository_view_entry(&RepositoryViewEntry {
rsync_uri: "rsync://example.test/repo/d.roa".to_string(),
current_hash: Some(hash.clone()),
repository_source: Some("https://rrdp.example.test/notification.xml".to_string()),
object_type: Some("roa".to_string()),
state: RepositoryViewState::Present,
})
.unwrap();
let ta = sample_trust_anchor();
let cir_path = out_dir.join("example.cir");
let summary = export_cir_from_run(
&store,
&ta,
"https://example.test/root.tal",
sample_time(),
&[],
&cir_path,
None,
Some(&raw_store),
sample_date(),
)
.expect("export cir to raw store");
match summary.object_pool {
CirObjectPoolExportSummary::RawStore(ref s) => assert!(s.unique_hashes >= 2),
other => panic!("unexpected backend: {other:?}"),
}
assert!(raw_store.exists());
}
#[test] #[test]
fn build_cir_from_run_includes_vcir_current_instance_objects_from_audit() { fn build_cir_from_run_includes_vcir_current_instance_objects_from_audit() {
let td = tempfile::tempdir().unwrap(); let td = tempfile::tempdir().unwrap();
@ -357,14 +506,16 @@ mod tests {
}; };
pp.objects.push(crate::audit::ObjectAuditEntry { pp.objects.push(crate::audit::ObjectAuditEntry {
rsync_uri: "rsync://example.test/repo/fallback.mft".to_string(), rsync_uri: "rsync://example.test/repo/fallback.mft".to_string(),
sha256_hex: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa".to_string(), sha256_hex: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
.to_string(),
kind: crate::audit::AuditObjectKind::Manifest, kind: crate::audit::AuditObjectKind::Manifest,
result: crate::audit::AuditObjectResult::Ok, result: crate::audit::AuditObjectResult::Ok,
detail: None, detail: None,
}); });
pp.objects.push(crate::audit::ObjectAuditEntry { pp.objects.push(crate::audit::ObjectAuditEntry {
rsync_uri: "rsync://example.test/repo/fallback.roa".to_string(), rsync_uri: "rsync://example.test/repo/fallback.roa".to_string(),
sha256_hex: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb".to_string(), sha256_hex: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
.to_string(),
kind: crate::audit::AuditObjectKind::Roa, kind: crate::audit::AuditObjectKind::Roa,
result: crate::audit::AuditObjectResult::Ok, result: crate::audit::AuditObjectResult::Ok,
detail: None, detail: None,
@ -379,13 +530,15 @@ mod tests {
) )
.expect("build cir"); .expect("build cir");
assert!(cir assert!(
.objects cir.objects
.iter() .iter()
.any(|item| item.rsync_uri == "rsync://example.test/repo/fallback.mft")); .any(|item| item.rsync_uri == "rsync://example.test/repo/fallback.mft")
assert!(cir );
.objects assert!(
cir.objects
.iter() .iter()
.any(|item| item.rsync_uri == "rsync://example.test/repo/fallback.roa")); .any(|item| item.rsync_uri == "rsync://example.test/repo/fallback.roa")
);
} }
} }

View File

@ -1,6 +1,7 @@
use std::fs; use std::fs;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use crate::blob_store::{ExternalRawStoreDb, RawObjectStore};
use crate::cir::model::CanonicalInputRepresentation; use crate::cir::model::CanonicalInputRepresentation;
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
@ -27,13 +28,30 @@ pub enum CirMaterializeError {
MissingStaticObject { sha256_hex: String }, MissingStaticObject { sha256_hex: String },
#[error("link target failed: {src} -> {dst}: {detail}")] #[error("link target failed: {src} -> {dst}: {detail}")]
Link { src: String, dst: String, detail: String }, Link {
src: String,
dst: String,
detail: String,
},
#[error("copy target failed: {src} -> {dst}: {detail}")] #[error("copy target failed: {src} -> {dst}: {detail}")]
Copy { src: String, dst: String, detail: String }, Copy {
src: String,
dst: String,
detail: String,
},
#[error("mirror tree mismatch after materialize: {0}")] #[error("mirror tree mismatch after materialize: {0}")]
TreeMismatch(String), TreeMismatch(String),
#[error("open raw store failed: {path}: {detail}")]
OpenRawStore { path: String, detail: String },
#[error("raw object not found for sha256={sha256_hex}")]
MissingRawStoreObject { sha256_hex: String },
#[error("read raw store failed for sha256={sha256_hex}: {detail}")]
ReadRawStore { sha256_hex: String, detail: String },
} }
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
@ -49,8 +67,7 @@ pub fn materialize_cir(
mirror_root: &Path, mirror_root: &Path,
clean_rebuild: bool, clean_rebuild: bool,
) -> Result<CirMaterializeSummary, CirMaterializeError> { ) -> Result<CirMaterializeSummary, CirMaterializeError> {
cir.validate() cir.validate().map_err(CirMaterializeError::TreeMismatch)?;
.map_err(CirMaterializeError::TreeMismatch)?;
if clean_rebuild && mirror_root.exists() { if clean_rebuild && mirror_root.exists() {
fs::remove_dir_all(mirror_root).map_err(|e| CirMaterializeError::RemoveMirrorRoot { fs::remove_dir_all(mirror_root).map_err(|e| CirMaterializeError::RemoveMirrorRoot {
@ -120,6 +137,89 @@ pub fn materialize_cir(
}) })
} }
pub fn materialize_cir_from_raw_store(
cir: &CanonicalInputRepresentation,
raw_store_db: &Path,
mirror_root: &Path,
clean_rebuild: bool,
) -> Result<CirMaterializeSummary, CirMaterializeError> {
cir.validate().map_err(CirMaterializeError::TreeMismatch)?;
if clean_rebuild && mirror_root.exists() {
fs::remove_dir_all(mirror_root).map_err(|e| CirMaterializeError::RemoveMirrorRoot {
path: mirror_root.display().to_string(),
detail: e.to_string(),
})?;
}
fs::create_dir_all(mirror_root).map_err(|e| CirMaterializeError::CreateMirrorRoot {
path: mirror_root.display().to_string(),
detail: e.to_string(),
})?;
let raw_store =
ExternalRawStoreDb::open(raw_store_db).map_err(|e| CirMaterializeError::OpenRawStore {
path: raw_store_db.display().to_string(),
detail: e.to_string(),
})?;
let mut copied_files = 0usize;
for object in &cir.objects {
let sha256_hex = hex::encode(&object.sha256);
let entry = raw_store
.get_raw_entry(&sha256_hex)
.map_err(|e| CirMaterializeError::ReadRawStore {
sha256_hex: sha256_hex.clone(),
detail: e.to_string(),
})?
.ok_or_else(|| CirMaterializeError::MissingRawStoreObject {
sha256_hex: sha256_hex.clone(),
})?;
let relative = mirror_relative_path_for_rsync_uri(&object.rsync_uri)?;
let target = mirror_root.join(&relative);
if let Some(parent) = target.parent() {
fs::create_dir_all(parent).map_err(|e| CirMaterializeError::CreateParent {
path: parent.display().to_string(),
detail: e.to_string(),
})?;
}
if target.exists() {
fs::remove_file(&target).map_err(|e| CirMaterializeError::RemoveExistingTarget {
path: target.display().to_string(),
detail: e.to_string(),
})?;
}
fs::write(&target, &entry.bytes).map_err(|e| CirMaterializeError::Copy {
src: raw_store_db.display().to_string(),
dst: target.display().to_string(),
detail: e.to_string(),
})?;
copied_files += 1;
}
let actual = collect_materialized_uris(mirror_root)?;
let expected = cir
.objects
.iter()
.map(|item| item.rsync_uri.clone())
.collect::<std::collections::BTreeSet<_>>();
if actual != expected {
return Err(CirMaterializeError::TreeMismatch(format!(
"expected {} files, got {} files",
expected.len(),
actual.len()
)));
}
Ok(CirMaterializeSummary {
object_count: cir.objects.len(),
linked_files: 0,
copied_files,
})
}
pub fn mirror_relative_path_for_rsync_uri(rsync_uri: &str) -> Result<PathBuf, CirMaterializeError> { pub fn mirror_relative_path_for_rsync_uri(rsync_uri: &str) -> Result<PathBuf, CirMaterializeError> {
let url = url::Url::parse(rsync_uri) let url = url::Url::parse(rsync_uri)
.map_err(|_| CirMaterializeError::InvalidRsyncUri(rsync_uri.to_string()))?; .map_err(|_| CirMaterializeError::InvalidRsyncUri(rsync_uri.to_string()))?;
@ -160,8 +260,8 @@ pub fn resolve_static_pool_file(
let prefix1 = &sha256_hex[0..2]; let prefix1 = &sha256_hex[0..2];
let prefix2 = &sha256_hex[2..4]; let prefix2 = &sha256_hex[2..4];
let entries = fs::read_dir(static_root) let entries =
.map_err(|_| CirMaterializeError::MissingStaticObject { fs::read_dir(static_root).map_err(|_| CirMaterializeError::MissingStaticObject {
sha256_hex: sha256_hex.to_string(), sha256_hex: sha256_hex.to_string(),
})?; })?;
let mut dates = entries let mut dates = entries
@ -215,10 +315,15 @@ fn collect_materialized_uris(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{CirMaterializeError, materialize_cir, mirror_relative_path_for_rsync_uri, resolve_static_pool_file}; use super::{
CirMaterializeError, materialize_cir, materialize_cir_from_raw_store,
mirror_relative_path_for_rsync_uri, resolve_static_pool_file,
};
use crate::blob_store::ExternalRawStoreDb;
use crate::cir::model::{ use crate::cir::model::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal,
}; };
use sha2::Digest;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
fn sample_time() -> time::OffsetDateTime { fn sample_time() -> time::OffsetDateTime {
@ -257,21 +362,45 @@ mod tests {
} }
} }
fn cir_with_real_hashes(a: &[u8], b: &[u8]) -> CanonicalInputRepresentation {
CanonicalInputRepresentation {
version: CIR_VERSION_V1,
hash_alg: CirHashAlgorithm::Sha256,
validation_time: sample_time(),
objects: vec![
CirObject {
rsync_uri: "rsync://example.net/repo/a.cer".to_string(),
sha256: sha2::Sha256::digest(a).to_vec(),
},
CirObject {
rsync_uri: "rsync://example.net/repo/nested/b.roa".to_string(),
sha256: sha2::Sha256::digest(b).to_vec(),
},
],
tals: vec![CirTal {
tal_uri: "https://tal.example.net/root.tal".to_string(),
tal_bytes: b"x".to_vec(),
}],
}
}
#[test] #[test]
fn mirror_relative_path_for_rsync_uri_maps_host_and_path() { fn mirror_relative_path_for_rsync_uri_maps_host_and_path() {
let path = let path =
mirror_relative_path_for_rsync_uri("rsync://example.net/repo/nested/b.roa").unwrap(); mirror_relative_path_for_rsync_uri("rsync://example.net/repo/nested/b.roa").unwrap();
assert_eq!(path, PathBuf::from("example.net").join("repo").join("nested").join("b.roa")); assert_eq!(
path,
PathBuf::from("example.net")
.join("repo")
.join("nested")
.join("b.roa")
);
} }
#[test] #[test]
fn resolve_static_pool_file_finds_hash_across_dates() { fn resolve_static_pool_file_finds_hash_across_dates() {
let td = tempfile::tempdir().unwrap(); let td = tempfile::tempdir().unwrap();
let path = td let path = td.path().join("20260407").join("11").join("11");
.path()
.join("20260407")
.join("11")
.join("11");
std::fs::create_dir_all(&path).unwrap(); std::fs::create_dir_all(&path).unwrap();
let file = path.join("1111111111111111111111111111111111111111111111111111111111111111"); let file = path.join("1111111111111111111111111111111111111111111111111111111111111111");
std::fs::write(&file, b"x").unwrap(); std::fs::write(&file, b"x").unwrap();
@ -289,14 +418,20 @@ mod tests {
let td = tempfile::tempdir().unwrap(); let td = tempfile::tempdir().unwrap();
let err = resolve_static_pool_file(td.path(), "not-a-hash") let err = resolve_static_pool_file(td.path(), "not-a-hash")
.expect_err("invalid hash should fail"); .expect_err("invalid hash should fail");
assert!(matches!(err, CirMaterializeError::MissingStaticObject { .. })); assert!(matches!(
err,
CirMaterializeError::MissingStaticObject { .. }
));
let err = resolve_static_pool_file( let err = resolve_static_pool_file(
td.path(), td.path(),
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
) )
.expect_err("missing hash should fail"); .expect_err("missing hash should fail");
assert!(matches!(err, CirMaterializeError::MissingStaticObject { .. })); assert!(matches!(
err,
CirMaterializeError::MissingStaticObject { .. }
));
} }
#[test] #[test]
@ -333,7 +468,10 @@ mod tests {
let summary = materialize_cir(&sample_cir(), &static_root, &mirror_root, true).unwrap(); let summary = materialize_cir(&sample_cir(), &static_root, &mirror_root, true).unwrap();
assert_eq!(summary.object_count, 2); assert_eq!(summary.object_count, 2);
assert_eq!(std::fs::read(mirror_root.join("example.net/repo/a.cer")).unwrap(), b"a"); assert_eq!(
std::fs::read(mirror_root.join("example.net/repo/a.cer")).unwrap(),
b"a"
);
assert_eq!( assert_eq!(
std::fs::read(mirror_root.join("example.net/repo/nested/b.roa")).unwrap(), std::fs::read(mirror_root.join("example.net/repo/nested/b.roa")).unwrap(),
b"b" b"b"
@ -344,14 +482,12 @@ mod tests {
#[test] #[test]
fn materialize_fails_when_static_object_missing() { fn materialize_fails_when_static_object_missing() {
let td = tempfile::tempdir().unwrap(); let td = tempfile::tempdir().unwrap();
let err = materialize_cir( let err = materialize_cir(&sample_cir(), td.path(), &td.path().join("mirror"), true)
&sample_cir(),
td.path(),
&td.path().join("mirror"),
true,
)
.expect_err("missing static object must fail"); .expect_err("missing static object must fail");
assert!(matches!(err, CirMaterializeError::MissingStaticObject { .. })); assert!(matches!(
err,
CirMaterializeError::MissingStaticObject { .. }
));
} }
#[test] #[test]
@ -380,6 +516,192 @@ mod tests {
assert!(matches!(err, CirMaterializeError::TreeMismatch(_))); assert!(matches!(err, CirMaterializeError::TreeMismatch(_)));
} }
#[test]
fn materialize_from_raw_store_creates_expected_tree() {
let td = tempfile::tempdir().unwrap();
let raw_store_path = td.path().join("raw-store.db");
let mirror_root = td.path().join("mirror");
let a = b"a".to_vec();
let b = b"b".to_vec();
let cir = CanonicalInputRepresentation {
version: CIR_VERSION_V1,
hash_alg: CirHashAlgorithm::Sha256,
validation_time: sample_time(),
objects: vec![
CirObject {
rsync_uri: "rsync://example.net/repo/a.cer".to_string(),
sha256: sha2::Sha256::digest(&a).to_vec(),
},
CirObject {
rsync_uri: "rsync://example.net/repo/nested/b.roa".to_string(),
sha256: sha2::Sha256::digest(&b).to_vec(),
},
],
tals: vec![CirTal {
tal_uri: "https://tal.example.net/root.tal".to_string(),
tal_bytes: b"x".to_vec(),
}],
};
{
let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap();
let mut entry_a =
crate::storage::RawByHashEntry::from_bytes(hex::encode(&cir.objects[0].sha256), a);
entry_a.origin_uris.push(cir.objects[0].rsync_uri.clone());
raw_store.put_raw_entry(&entry_a).unwrap();
let mut entry_b =
crate::storage::RawByHashEntry::from_bytes(hex::encode(&cir.objects[1].sha256), b);
entry_b.origin_uris.push(cir.objects[1].rsync_uri.clone());
raw_store.put_raw_entry(&entry_b).unwrap();
}
let summary =
materialize_cir_from_raw_store(&cir, &raw_store_path, &mirror_root, true).unwrap();
assert_eq!(summary.object_count, 2);
assert_eq!(summary.linked_files, 0);
assert_eq!(summary.copied_files, 2);
assert_eq!(
std::fs::read(mirror_root.join("example.net/repo/a.cer")).unwrap(),
b"a"
);
assert_eq!(
std::fs::read(mirror_root.join("example.net/repo/nested/b.roa")).unwrap(),
b"b"
);
}
#[test]
fn materialize_from_raw_store_fails_when_object_missing() {
let td = tempfile::tempdir().unwrap();
let raw_store_path = td.path().join("raw-store.db");
let mirror_root = td.path().join("mirror");
let cir = cir_with_real_hashes(b"a", b"b");
{
let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap();
let only = crate::storage::RawByHashEntry::from_bytes(
hex::encode(&cir.objects[0].sha256),
b"a".to_vec(),
);
raw_store.put_raw_entry(&only).unwrap();
}
let err = materialize_cir_from_raw_store(&cir, &raw_store_path, &mirror_root, true)
.expect_err("missing second object should fail");
assert!(matches!(
err,
CirMaterializeError::MissingRawStoreObject { .. }
));
}
#[test]
fn materialize_from_raw_store_detects_stale_tree_when_not_clean_rebuild() {
let td = tempfile::tempdir().unwrap();
let raw_store_path = td.path().join("raw-store.db");
let mirror_root = td.path().join("mirror");
let cir = cir_with_real_hashes(b"a", b"b");
{
let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap();
let mut entry_a = crate::storage::RawByHashEntry::from_bytes(
hex::encode(&cir.objects[0].sha256),
b"a".to_vec(),
);
entry_a.origin_uris.push(cir.objects[0].rsync_uri.clone());
raw_store.put_raw_entry(&entry_a).unwrap();
let mut entry_b = crate::storage::RawByHashEntry::from_bytes(
hex::encode(&cir.objects[1].sha256),
b"b".to_vec(),
);
entry_b.origin_uris.push(cir.objects[1].rsync_uri.clone());
raw_store.put_raw_entry(&entry_b).unwrap();
}
std::fs::create_dir_all(mirror_root.join("extra")).unwrap();
std::fs::write(mirror_root.join("extra/stale.txt"), b"stale").unwrap();
let err =
materialize_cir_from_raw_store(&cir, &raw_store_path, &mirror_root, false)
.expect_err("stale file should fail exact tree check");
assert!(matches!(err, CirMaterializeError::TreeMismatch(_)));
}
#[test]
fn materialize_from_raw_store_overwrites_existing_targets() {
let td = tempfile::tempdir().unwrap();
let raw_store_path = td.path().join("raw-store.db");
let mirror_root = td.path().join("mirror");
let a = b"new-a".to_vec();
let b = b"new-b".to_vec();
let cir = cir_with_real_hashes(&a, &b);
{
let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap();
let mut entry_a = crate::storage::RawByHashEntry::from_bytes(
hex::encode(&cir.objects[0].sha256),
a.clone(),
);
entry_a.origin_uris.push(cir.objects[0].rsync_uri.clone());
raw_store.put_raw_entry(&entry_a).unwrap();
let mut entry_b = crate::storage::RawByHashEntry::from_bytes(
hex::encode(&cir.objects[1].sha256),
b.clone(),
);
entry_b.origin_uris.push(cir.objects[1].rsync_uri.clone());
raw_store.put_raw_entry(&entry_b).unwrap();
}
let target = mirror_root.join("example.net/repo/a.cer");
std::fs::create_dir_all(target.parent().unwrap()).unwrap();
std::fs::write(&target, b"old").unwrap();
let summary =
materialize_cir_from_raw_store(&cir, &raw_store_path, &mirror_root, false).unwrap();
assert_eq!(summary.copied_files, 2);
assert_eq!(std::fs::read(&target).unwrap(), a);
}
#[test]
fn materialize_from_raw_store_reports_codec_errors() {
let td = tempfile::tempdir().unwrap();
let raw_store_path = td.path().join("raw-store.db");
let mirror_root = td.path().join("mirror");
{
let _raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap();
}
{
let db = rocksdb::DB::open_default(&raw_store_path).unwrap();
db.put(
b"rawbyhash:1111111111111111111111111111111111111111111111111111111111111111",
b"bad-cbor",
)
.unwrap();
}
let err = materialize_cir_from_raw_store(
&CanonicalInputRepresentation {
version: CIR_VERSION_V1,
hash_alg: CirHashAlgorithm::Sha256,
validation_time: sample_time(),
objects: vec![CirObject {
rsync_uri: "rsync://example.net/repo/a.cer".to_string(),
sha256: hex::decode(
"1111111111111111111111111111111111111111111111111111111111111111",
)
.unwrap(),
}],
tals: vec![CirTal {
tal_uri: "https://tal.example.net/root.tal".to_string(),
tal_bytes: b"x".to_vec(),
}],
},
&raw_store_path,
&mirror_root,
true,
)
.expect_err("corrupt raw-store object should fail");
assert!(matches!(
err,
CirMaterializeError::ReadRawStore { .. } | CirMaterializeError::MissingRawStoreObject { .. }
));
}
fn write_static(root: &Path, date: &str, hash: &str, bytes: &[u8]) { fn write_static(root: &Path, date: &str, hash: &str, bytes: &[u8]) {
let path = root.join(date).join(&hash[0..2]).join(&hash[2..4]); let path = root.join(date).join(&hash[0..2]).join(&hash[2..4]);
std::fs::create_dir_all(&path).unwrap(); std::fs::create_dir_all(&path).unwrap();

View File

@ -1,26 +1,29 @@
pub mod decode; pub mod decode;
pub mod encode; pub mod encode;
#[cfg(feature = "full")]
pub mod export;
pub mod materialize; pub mod materialize;
pub mod model; pub mod model;
pub mod sequence; pub mod sequence;
#[cfg(feature = "full")] #[cfg(feature = "full")]
pub mod export;
#[cfg(feature = "full")]
pub mod static_pool; pub mod static_pool;
pub use decode::{CirDecodeError, decode_cir}; pub use decode::{CirDecodeError, decode_cir};
pub use encode::{CirEncodeError, encode_cir}; pub use encode::{CirEncodeError, encode_cir};
#[cfg(feature = "full")]
pub use export::{
CirExportError, CirExportSummary, build_cir_from_run, export_cir_from_run, write_cir_file,
};
pub use materialize::{ pub use materialize::{
CirMaterializeError, CirMaterializeSummary, materialize_cir, mirror_relative_path_for_rsync_uri, CirMaterializeError, CirMaterializeSummary, materialize_cir,
resolve_static_pool_file, materialize_cir_from_raw_store,
mirror_relative_path_for_rsync_uri, resolve_static_pool_file,
}; };
pub use model::{ pub use model::{
CIR_VERSION_V1, CirHashAlgorithm, CirObject, CirTal, CanonicalInputRepresentation, CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal,
}; };
pub use sequence::{CirSequenceManifest, CirSequenceStep, CirSequenceStepKind}; pub use sequence::{CirSequenceManifest, CirSequenceStep, CirSequenceStepKind};
#[cfg(feature = "full")] #[cfg(feature = "full")]
pub use export::{CirExportError, CirExportSummary, build_cir_from_run, export_cir_from_run, write_cir_file};
#[cfg(feature = "full")]
pub use static_pool::{ pub use static_pool::{
CirStaticPoolError, CirStaticPoolExportSummary, CirStaticPoolWriteResult, CirStaticPoolError, CirStaticPoolExportSummary, CirStaticPoolWriteResult,
export_hashes_from_store, static_pool_path, static_pool_relative_path, export_hashes_from_store, static_pool_path, static_pool_relative_path,
@ -59,8 +62,7 @@ mod tests {
], ],
tals: vec![CirTal { tals: vec![CirTal {
tal_uri: "https://tal.example.net/root.tal".to_string(), tal_uri: "https://tal.example.net/root.tal".to_string(),
tal_bytes: tal_bytes: b"https://tal.example.net/ta.cer\nrsync://example.net/repo/ta.cer\nMIIB"
b"https://tal.example.net/ta.cer\nrsync://example.net/repo/ta.cer\nMIIB"
.to_vec(), .to_vec(),
}], }],
} }
@ -74,7 +76,10 @@ mod tests {
} else { } else {
let len = value.len(); let len = value.len();
let bytes = len.to_be_bytes(); let bytes = len.to_be_bytes();
let first_non_zero = bytes.iter().position(|&b| b != 0).unwrap_or(bytes.len() - 1); let first_non_zero = bytes
.iter()
.position(|&b| b != 0)
.unwrap_or(bytes.len() - 1);
let len_bytes = &bytes[first_non_zero..]; let len_bytes = &bytes[first_non_zero..];
out.push(0x80 | len_bytes.len() as u8); out.push(0x80 | len_bytes.len() as u8);
out.extend_from_slice(len_bytes); out.extend_from_slice(len_bytes);
@ -178,8 +183,7 @@ mod tests {
der[idx + sha256_bytes.len() - 1] ^= 0x01; der[idx + sha256_bytes.len() - 1] ^= 0x01;
let err = decode_cir(&der).expect_err("wrong oid must fail"); let err = decode_cir(&der).expect_err("wrong oid must fail");
assert!( assert!(
err.to_string() err.to_string().contains(crate::data_model::oid::OID_SHA256),
.contains(crate::data_model::oid::OID_SHA256),
"{err}" "{err}"
); );
} }
@ -222,7 +226,10 @@ mod tests {
tals: Vec::new(), tals: Vec::new(),
}; };
let err = encode_cir(&no_tals).expect_err("empty tals must fail"); let err = encode_cir(&no_tals).expect_err("empty tals must fail");
assert!(err.to_string().contains("CIR.tals must be non-empty"), "{err}"); assert!(
err.to_string().contains("CIR.tals must be non-empty"),
"{err}"
);
} }
#[test] #[test]
@ -276,7 +283,10 @@ mod tests {
let mut der = encode_cir(&cir).expect("encode cir"); let mut der = encode_cir(&cir).expect("encode cir");
der.push(0); der.push(0);
let err = decode_cir(&der).expect_err("trailing bytes after cir must fail"); let err = decode_cir(&der).expect_err("trailing bytes after cir must fail");
assert!(err.to_string().contains("trailing bytes after CIR"), "{err}"); assert!(
err.to_string().contains("trailing bytes after CIR"),
"{err}"
);
let object = test_encode_tlv( let object = test_encode_tlv(
0x30, 0x30,
@ -307,7 +317,10 @@ mod tests {
.concat(), .concat(),
); );
let err = decode_cir(&bad).expect_err("trailing field in object must fail"); let err = decode_cir(&bad).expect_err("trailing field in object must fail");
assert!(err.to_string().contains("trailing fields in CirObject"), "{err}"); assert!(
err.to_string().contains("trailing fields in CirObject"),
"{err}"
);
} }
#[test] #[test]

View File

@ -111,7 +111,9 @@ fn validate_sorted_unique_strings<'a>(
) -> Result<(), String> { ) -> Result<(), String> {
let mut prev: Option<&'a str> = None; let mut prev: Option<&'a str> = None;
for key in items { for key in items {
if let Some(prev_key) = prev && key <= prev_key { if let Some(prev_key) = prev
&& key <= prev_key
{
return Err(message.into()); return Err(message.into());
} }
prev = Some(key); prev = Some(key);

View File

@ -15,13 +15,24 @@ pub struct CirSequenceStep {
pub cir_path: String, pub cir_path: String,
pub ccr_path: String, pub ccr_path: String,
pub report_path: String, pub report_path: String,
#[serde(default)]
pub timing_path: Option<String>,
#[serde(default)]
pub stdout_log_path: Option<String>,
#[serde(default)]
pub stderr_log_path: Option<String>,
#[serde(default)]
pub artifact_prefix: Option<String>,
pub previous_step_id: Option<String>, pub previous_step_id: Option<String>,
} }
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct CirSequenceManifest { pub struct CirSequenceManifest {
pub version: u32, pub version: u32,
pub static_root: String, #[serde(default)]
pub static_root: Option<String>,
#[serde(default)]
pub raw_store_db_path: Option<String>,
pub steps: Vec<CirSequenceStep>, pub steps: Vec<CirSequenceStep>,
} }
@ -30,8 +41,16 @@ impl CirSequenceManifest {
if self.version == 0 { if self.version == 0 {
return Err("sequence.version must be positive".to_string()); return Err("sequence.version must be positive".to_string());
} }
if self.static_root.trim().is_empty() { let backend_count = self.static_root.is_some() as u8 + self.raw_store_db_path.is_some() as u8;
return Err("sequence.static_root must not be empty".to_string()); if backend_count != 1 {
return Err("sequence must set exactly one of static_root or raw_store_db_path".to_string());
}
match (self.static_root.as_ref(), self.raw_store_db_path.as_ref()) {
(Some(static_root), None) if !static_root.trim().is_empty() => {}
(None, Some(raw_store_db_path)) if !raw_store_db_path.trim().is_empty() => {}
_ => {
return Err("sequence backend path must not be empty".to_string());
}
} }
if self.steps.is_empty() { if self.steps.is_empty() {
return Err("sequence.steps must not be empty".to_string()); return Err("sequence.steps must not be empty".to_string());
@ -49,6 +68,34 @@ impl CirSequenceManifest {
"sequence.steps[{idx}].validation_time must not be empty" "sequence.steps[{idx}].validation_time must not be empty"
)); ));
} }
if let Some(timing_path) = &step.timing_path
&& timing_path.trim().is_empty()
{
return Err(format!(
"sequence.steps[{idx}].timing_path must not be empty when set"
));
}
if let Some(stdout_log_path) = &step.stdout_log_path
&& stdout_log_path.trim().is_empty()
{
return Err(format!(
"sequence.steps[{idx}].stdout_log_path must not be empty when set"
));
}
if let Some(stderr_log_path) = &step.stderr_log_path
&& stderr_log_path.trim().is_empty()
{
return Err(format!(
"sequence.steps[{idx}].stderr_log_path must not be empty when set"
));
}
if let Some(artifact_prefix) = &step.artifact_prefix
&& artifact_prefix.trim().is_empty()
{
return Err(format!(
"sequence.steps[{idx}].artifact_prefix must not be empty when set"
));
}
if step.cir_path.trim().is_empty() if step.cir_path.trim().is_empty()
|| step.ccr_path.trim().is_empty() || step.ccr_path.trim().is_empty()
|| step.report_path.trim().is_empty() || step.report_path.trim().is_empty()
@ -92,7 +139,8 @@ mod tests {
fn sample_manifest() -> CirSequenceManifest { fn sample_manifest() -> CirSequenceManifest {
CirSequenceManifest { CirSequenceManifest {
version: 1, version: 1,
static_root: "static".to_string(), static_root: Some("static".to_string()),
raw_store_db_path: None,
steps: vec![ steps: vec![
CirSequenceStep { CirSequenceStep {
step_id: "full".to_string(), step_id: "full".to_string(),
@ -101,6 +149,10 @@ mod tests {
cir_path: "full/input.cir".to_string(), cir_path: "full/input.cir".to_string(),
ccr_path: "full/result.ccr".to_string(), ccr_path: "full/result.ccr".to_string(),
report_path: "full/report.json".to_string(), report_path: "full/report.json".to_string(),
timing_path: Some("full/timing.json".to_string()),
stdout_log_path: Some("full/stdout.log".to_string()),
stderr_log_path: Some("full/stderr.log".to_string()),
artifact_prefix: Some("2026-04-09T00:00:00Z-test".to_string()),
previous_step_id: None, previous_step_id: None,
}, },
CirSequenceStep { CirSequenceStep {
@ -110,6 +162,10 @@ mod tests {
cir_path: "delta-001/input.cir".to_string(), cir_path: "delta-001/input.cir".to_string(),
ccr_path: "delta-001/result.ccr".to_string(), ccr_path: "delta-001/result.ccr".to_string(),
report_path: "delta-001/report.json".to_string(), report_path: "delta-001/report.json".to_string(),
timing_path: Some("delta-001/timing.json".to_string()),
stdout_log_path: Some("delta-001/stdout.log".to_string()),
stderr_log_path: Some("delta-001/stderr.log".to_string()),
artifact_prefix: Some("2026-04-09T00:10:00Z-test".to_string()),
previous_step_id: Some("full".to_string()), previous_step_id: Some("full".to_string()),
}, },
], ],
@ -144,4 +200,12 @@ mod tests {
let err = bad.validate().expect_err("missing previous must fail"); let err = bad.validate().expect_err("missing previous must fail");
assert!(err.contains("previous_step_id")); assert!(err.contains("previous_step_id"));
} }
#[test]
fn sequence_manifest_validate_accepts_raw_store_backend() {
let mut manifest = sample_manifest();
manifest.static_root = None;
manifest.raw_store_db_path = Some("raw-store.db".to_string());
manifest.validate().expect("raw store sequence");
}
} }

View File

@ -3,6 +3,7 @@ use std::fs::{self, OpenOptions};
use std::io::Write; use std::io::Write;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use crate::blob_store::RawObjectStore;
use crate::storage::{RawByHashEntry, RocksStore}; use crate::storage::{RawByHashEntry, RocksStore};
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
@ -75,10 +76,7 @@ pub fn static_pool_path(
capture_date_utc: time::Date, capture_date_utc: time::Date,
sha256_hex: &str, sha256_hex: &str,
) -> Result<PathBuf, CirStaticPoolError> { ) -> Result<PathBuf, CirStaticPoolError> {
Ok(static_root.join(static_pool_relative_path( Ok(static_root.join(static_pool_relative_path(capture_date_utc, sha256_hex)?))
capture_date_utc,
sha256_hex,
)?))
} }
pub fn write_bytes_to_static_pool( pub fn write_bytes_to_static_pool(
@ -191,12 +189,13 @@ pub fn export_hashes_from_store(
let mut written_files = 0usize; let mut written_files = 0usize;
let mut reused_files = 0usize; let mut reused_files = 0usize;
for sha256_hex in &unique { for sha256_hex in &unique {
let entry = store let bytes = store
.get_raw_by_hash_entry(sha256_hex) .get_blob_bytes(sha256_hex)
.map_err(|e| CirStaticPoolError::Storage(e.to_string()))? .map_err(|e| CirStaticPoolError::Storage(e.to_string()))?
.ok_or_else(|| CirStaticPoolError::MissingRawByHash { .ok_or_else(|| CirStaticPoolError::MissingRawByHash {
sha256_hex: sha256_hex.clone(), sha256_hex: sha256_hex.clone(),
})?; })?;
let entry = RawByHashEntry::from_bytes(sha256_hex.clone(), bytes);
let result = write_raw_entry_to_static_pool(static_root, capture_date_utc, &entry)?; let result = write_raw_entry_to_static_pool(static_root, capture_date_utc, &entry)?;
if result.written { if result.written {
written_files += 1; written_files += 1;
@ -223,9 +222,7 @@ fn format_utc_date(date: time::Date) -> String {
fn validate_sha256_hex(sha256_hex: &str) -> Result<(), CirStaticPoolError> { fn validate_sha256_hex(sha256_hex: &str) -> Result<(), CirStaticPoolError> {
if sha256_hex.len() != 64 || !sha256_hex.as_bytes().iter().all(u8::is_ascii_hexdigit) { if sha256_hex.len() != 64 || !sha256_hex.as_bytes().iter().all(u8::is_ascii_hexdigit) {
return Err(CirStaticPoolError::InvalidSha256Hex( return Err(CirStaticPoolError::InvalidSha256Hex(sha256_hex.to_string()));
sha256_hex.to_string(),
));
} }
Ok(()) Ok(())
} }
@ -238,8 +235,8 @@ fn compute_sha256_hex(bytes: &[u8]) -> String {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{ use super::{
CirStaticPoolError, compute_sha256_hex, export_hashes_from_store, static_pool_relative_path, CirStaticPoolError, compute_sha256_hex, export_hashes_from_store,
write_bytes_to_static_pool, static_pool_relative_path, write_bytes_to_static_pool,
}; };
use crate::storage::{RawByHashEntry, RepositoryViewEntry, RepositoryViewState, RocksStore}; use crate::storage::{RawByHashEntry, RepositoryViewEntry, RepositoryViewState, RocksStore};
use std::fs; use std::fs;
@ -270,8 +267,8 @@ mod tests {
let bytes = b"static-pool-object"; let bytes = b"static-pool-object";
let sha = compute_sha256_hex(bytes); let sha = compute_sha256_hex(bytes);
let first = write_bytes_to_static_pool(td.path(), sample_date(), &sha, bytes) let first =
.expect("first write"); write_bytes_to_static_pool(td.path(), sample_date(), &sha, bytes).expect("first write");
let second = write_bytes_to_static_pool(td.path(), sample_date(), &sha, bytes) let second = write_bytes_to_static_pool(td.path(), sample_date(), &sha, bytes)
.expect("second write"); .expect("second write");
@ -281,11 +278,13 @@ mod tests {
let all_files: Vec<_> = walk_files(td.path()); let all_files: Vec<_> = walk_files(td.path());
assert_eq!(all_files.len(), 1); assert_eq!(all_files.len(), 1);
assert!(!all_files[0] assert!(
!all_files[0]
.file_name() .file_name()
.and_then(|name| name.to_str()) .and_then(|name| name.to_str())
.unwrap_or_default() .unwrap_or_default()
.contains(".tmp.")); .contains(".tmp.")
);
} }
#[test] #[test]
@ -315,7 +314,9 @@ mod tests {
let bytes = b"store-object".to_vec(); let bytes = b"store-object".to_vec();
let sha = compute_sha256_hex(&bytes); let sha = compute_sha256_hex(&bytes);
let mut entry = RawByHashEntry::from_bytes(sha.clone(), bytes.clone()); let mut entry = RawByHashEntry::from_bytes(sha.clone(), bytes.clone());
entry.origin_uris.push("rsync://example.test/repo/object.cer".to_string()); entry
.origin_uris
.push("rsync://example.test/repo/object.cer".to_string());
store.put_raw_by_hash_entry(&entry).expect("put raw entry"); store.put_raw_by_hash_entry(&entry).expect("put raw entry");
store store
.put_repository_view_entry(&RepositoryViewEntry { .put_repository_view_entry(&RepositoryViewEntry {

View File

@ -4,8 +4,8 @@ use std::path::{Path, PathBuf};
use crate::analysis::timing::{TimingHandle, TimingMeta, TimingMetaUpdate}; use crate::analysis::timing::{TimingHandle, TimingMeta, TimingMetaUpdate};
use crate::audit::{ use crate::audit::{
AspaOutput, AuditReportV2, AuditRunMeta, AuditWarning, TreeSummary, VrpOutput, AspaOutput, AuditReportV2, AuditRepoSyncStats, AuditRunMeta, AuditWarning, TreeSummary,
format_roa_ip_prefix, VrpOutput, format_roa_ip_prefix,
}; };
use crate::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig}; use crate::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig};
use crate::fetch::rsync::LocalDirRsyncFetcher; use crate::fetch::rsync::LocalDirRsyncFetcher;
@ -51,6 +51,7 @@ pub struct CliArgs {
pub ta_path: Option<PathBuf>, pub ta_path: Option<PathBuf>,
pub db_path: PathBuf, pub db_path: PathBuf,
pub raw_store_db: Option<PathBuf>,
pub policy_path: Option<PathBuf>, pub policy_path: Option<PathBuf>,
pub report_json_path: Option<PathBuf>, pub report_json_path: Option<PathBuf>,
pub ccr_out_path: Option<PathBuf>, pub ccr_out_path: Option<PathBuf>,
@ -92,12 +93,13 @@ Usage:
Options: Options:
--db <path> RocksDB directory path (required) --db <path> RocksDB directory path (required)
--raw-store-db <path> External raw-by-hash store DB path (optional)
--policy <path> Policy TOML path (optional) --policy <path> Policy TOML path (optional)
--report-json <path> Write full audit report as JSON (optional) --report-json <path> Write full audit report as JSON (optional)
--ccr-out <path> Write CCR DER ContentInfo to this path (optional) --ccr-out <path> Write CCR DER ContentInfo to this path (optional)
--cir-enable Export CIR after the run completes --cir-enable Export CIR after the run completes
--cir-out <path> Write CIR DER to this path (requires --cir-enable) --cir-out <path> Write CIR DER to this path (requires --cir-enable)
--cir-static-root <path> Shared static pool root for CIR export (requires --cir-enable) --cir-static-root <path> Shared static pool root for CIR export (requires --cir-enable unless --raw-store-db is used)
--cir-tal-uri <url> Override TAL URI for CIR export when using --tal-path (optional) --cir-tal-uri <url> Override TAL URI for CIR export when using --tal-path (optional)
--payload-replay-archive <path> Use local payload replay archive root (offline replay mode) --payload-replay-archive <path> Use local payload replay archive root (offline replay mode)
--payload-replay-locks <path> Use local payload replay locks.json (offline replay mode) --payload-replay-locks <path> Use local payload replay locks.json (offline replay mode)
@ -134,6 +136,7 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
let mut ta_path: Option<PathBuf> = None; let mut ta_path: Option<PathBuf> = None;
let mut db_path: Option<PathBuf> = None; let mut db_path: Option<PathBuf> = None;
let mut raw_store_db: Option<PathBuf> = None;
let mut policy_path: Option<PathBuf> = None; let mut policy_path: Option<PathBuf> = None;
let mut report_json_path: Option<PathBuf> = None; let mut report_json_path: Option<PathBuf> = None;
let mut ccr_out_path: Option<PathBuf> = None; let mut ccr_out_path: Option<PathBuf> = None;
@ -152,8 +155,8 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
let mut rsync_local_dir: Option<PathBuf> = None; let mut rsync_local_dir: Option<PathBuf> = None;
let mut disable_rrdp: bool = false; let mut disable_rrdp: bool = false;
let mut rsync_command: Option<PathBuf> = None; let mut rsync_command: Option<PathBuf> = None;
let mut http_timeout_secs: u64 = 20; let mut http_timeout_secs: u64 = 30;
let mut rsync_timeout_secs: u64 = 60; let mut rsync_timeout_secs: u64 = 30;
let mut rsync_mirror_root: Option<PathBuf> = None; let mut rsync_mirror_root: Option<PathBuf> = None;
let mut max_depth: Option<usize> = None; let mut max_depth: Option<usize> = None;
let mut max_instances: Option<usize> = None; let mut max_instances: Option<usize> = None;
@ -186,6 +189,11 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
let v = argv.get(i).ok_or("--db requires a value")?; let v = argv.get(i).ok_or("--db requires a value")?;
db_path = Some(PathBuf::from(v)); db_path = Some(PathBuf::from(v));
} }
"--raw-store-db" => {
i += 1;
let v = argv.get(i).ok_or("--raw-store-db requires a value")?;
raw_store_db = Some(PathBuf::from(v));
}
"--policy" => { "--policy" => {
i += 1; i += 1;
let v = argv.get(i).ok_or("--policy requires a value")?; let v = argv.get(i).ok_or("--policy requires a value")?;
@ -247,7 +255,9 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
} }
"--payload-base-validation-time" => { "--payload-base-validation-time" => {
i += 1; i += 1;
let v = argv.get(i).ok_or("--payload-base-validation-time requires a value")?; let v = argv
.get(i)
.ok_or("--payload-base-validation-time requires a value")?;
use time::format_description::well_known::Rfc3339; use time::format_description::well_known::Rfc3339;
let t = time::OffsetDateTime::parse(v, &Rfc3339).map_err(|e| { let t = time::OffsetDateTime::parse(v, &Rfc3339).map_err(|e| {
format!("invalid --payload-base-validation-time (RFC3339 expected): {e}") format!("invalid --payload-base-validation-time (RFC3339 expected): {e}")
@ -350,13 +360,17 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
usage() usage()
)); ));
} }
if cir_enabled && (cir_out_path.is_none() || cir_static_root.is_none()) { let cir_backend_count = cir_static_root.is_some() as u8 + raw_store_db.is_some() as u8;
if cir_enabled && (cir_out_path.is_none() || cir_backend_count != 1) {
return Err(format!( return Err(format!(
"--cir-enable requires both --cir-out and --cir-static-root\n\n{}", "--cir-enable requires --cir-out and exactly one of --cir-static-root or --raw-store-db\n\n{}",
usage() usage()
)); ));
} }
if !cir_enabled && (cir_out_path.is_some() || cir_static_root.is_some() || cir_tal_uri.is_some()) if !cir_enabled
&& (cir_out_path.is_some()
|| cir_static_root.is_some()
|| cir_tal_uri.is_some())
{ {
return Err(format!( return Err(format!(
"--cir-out/--cir-static-root/--cir-tal-uri require --cir-enable\n\n{}", "--cir-out/--cir-static-root/--cir-tal-uri require --cir-enable\n\n{}",
@ -459,6 +473,7 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
tal_path, tal_path,
ta_path, ta_path,
db_path, db_path,
raw_store_db,
policy_path, policy_path,
report_json_path, report_json_path,
ccr_out_path, ccr_out_path,
@ -578,6 +593,8 @@ fn build_report(
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let repo_sync_stats = build_repo_sync_stats(&out.publication_points);
AuditReportV2 { AuditReportV2 {
format_version: 2, format_version: 2,
meta: AuditRunMeta { meta: AuditRunMeta {
@ -594,9 +611,36 @@ fn build_report(
aspas, aspas,
downloads: out.downloads, downloads: out.downloads,
download_stats: out.download_stats, download_stats: out.download_stats,
repo_sync_stats,
} }
} }
fn build_repo_sync_stats(
publication_points: &[crate::audit::PublicationPointAudit],
) -> AuditRepoSyncStats {
let mut stats = AuditRepoSyncStats {
publication_points_total: publication_points.len() as u64,
..AuditRepoSyncStats::default()
};
for pp in publication_points {
let duration = pp.repo_sync_duration_ms.unwrap_or(0);
if let Some(phase) = pp.repo_sync_phase.as_ref() {
let entry = stats.by_phase.entry(phase.clone()).or_default();
entry.count += 1;
entry.duration_ms_total += duration;
}
let entry = stats
.by_terminal_state
.entry(pp.repo_terminal_state.clone())
.or_default();
entry.count += 1;
entry.duration_ms_total += duration;
}
stats
}
pub fn run(argv: &[String]) -> Result<(), String> { pub fn run(argv: &[String]) -> Result<(), String> {
let args = parse_args(argv)?; let args = parse_args(argv)?;
@ -608,7 +652,12 @@ pub fn run(argv: &[String]) -> Result<(), String> {
.validation_time .validation_time
.unwrap_or_else(time::OffsetDateTime::now_utc); .unwrap_or_else(time::OffsetDateTime::now_utc);
let store = RocksStore::open(&args.db_path).map_err(|e| e.to_string())?; let store = if let Some(raw_store_db) = args.raw_store_db.as_ref() {
RocksStore::open_with_external_raw_store(&args.db_path, raw_store_db)
.map_err(|e| e.to_string())?
} else {
RocksStore::open(&args.db_path).map_err(|e| e.to_string())?
};
let config = TreeRunConfig { let config = TreeRunConfig {
max_depth: args.max_depth, max_depth: args.max_depth,
max_instances: args.max_instances, max_instances: args.max_instances,
@ -1094,10 +1143,6 @@ pub fn run(argv: &[String]) -> Result<(), String> {
.cir_out_path .cir_out_path
.as_deref() .as_deref()
.expect("validated by parse_args for cir"); .expect("validated by parse_args for cir");
let cir_static_root = args
.cir_static_root
.as_deref()
.expect("validated by parse_args for cir");
let summary = export_cir_from_run( let summary = export_cir_from_run(
&store, &store,
&out.discovery.trust_anchor, &out.discovery.trust_anchor,
@ -1105,7 +1150,8 @@ pub fn run(argv: &[String]) -> Result<(), String> {
validation_time, validation_time,
&out.publication_points, &out.publication_points,
cir_out_path, cir_out_path,
cir_static_root, args.cir_static_root.as_deref(),
args.raw_store_db.as_deref(),
time::OffsetDateTime::now_utc().date(), time::OffsetDateTime::now_utc().date(),
) )
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
@ -1113,13 +1159,22 @@ pub fn run(argv: &[String]) -> Result<(), String> {
cir_static_pool_ms = Some(summary.timing.static_pool_ms); cir_static_pool_ms = Some(summary.timing.static_pool_ms);
cir_write_cir_ms = Some(summary.timing.write_cir_ms); cir_write_cir_ms = Some(summary.timing.write_cir_ms);
cir_total_ms = Some(summary.timing.total_ms); cir_total_ms = Some(summary.timing.total_ms);
let (backend_name, written_entries, reused_entries) = match &summary.object_pool {
crate::cir::export::CirObjectPoolExportSummary::Static(s) => {
("static", s.written_files, s.reused_files)
}
crate::cir::export::CirObjectPoolExportSummary::RawStore(s) => {
("raw-store", s.written_entries, s.reused_entries)
}
};
eprintln!( eprintln!(
"wrote CIR: {} (objects={}, tals={}, static_written={}, static_reused={}, build_cir_ms={}, static_pool_ms={}, write_cir_ms={}, total_ms={})", "wrote CIR: {} (objects={}, tals={}, backend={}, written={}, reused={}, build_cir_ms={}, static_pool_ms={}, write_cir_ms={}, total_ms={})",
cir_out_path.display(), cir_out_path.display(),
summary.object_count, summary.object_count,
summary.tal_count, summary.tal_count,
summary.static_pool.written_files, backend_name,
summary.static_pool.reused_files, written_entries,
reused_entries,
summary.timing.build_cir_ms, summary.timing.build_cir_ms,
summary.timing.static_pool_ms, summary.timing.static_pool_ms,
summary.timing.write_cir_ms, summary.timing.write_cir_ms,
@ -1160,7 +1215,12 @@ pub fn run(argv: &[String]) -> Result<(), String> {
&stage_timing_path, &stage_timing_path,
serde_json::to_vec_pretty(&stage_timing).map_err(|e| e.to_string())?, serde_json::to_vec_pretty(&stage_timing).map_err(|e| e.to_string())?,
) )
.map_err(|e| format!("write stage timing failed: {}: {e}", stage_timing_path.display()))?; .map_err(|e| {
format!(
"write stage timing failed: {}: {e}",
stage_timing_path.display()
)
})?;
eprintln!("analysis: wrote {}", stage_timing_path.display()); eprintln!("analysis: wrote {}", stage_timing_path.display());
} }
} }
@ -1290,7 +1350,57 @@ mod tests {
"out/example.ccr".to_string(), "out/example.ccr".to_string(),
]; ];
let args = parse_args(&argv).expect("parse args"); let args = parse_args(&argv).expect("parse args");
assert_eq!(args.ccr_out_path.as_deref(), Some(std::path::Path::new("out/example.ccr"))); assert_eq!(
args.ccr_out_path.as_deref(),
Some(std::path::Path::new("out/example.ccr"))
);
}
#[test]
fn parse_accepts_external_raw_store_db() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--raw-store-db".to_string(),
"raw-store.db".to_string(),
"--tal-url".to_string(),
"https://example.test/x.tal".to_string(),
];
let args = parse_args(&argv).expect("parse args");
assert_eq!(
args.raw_store_db.as_deref(),
Some(std::path::Path::new("raw-store.db"))
);
}
#[test]
fn parse_accepts_cir_enable_with_raw_store_backend() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--raw-store-db".to_string(),
"raw-store.db".to_string(),
"--tal-path".to_string(),
"x.tal".to_string(),
"--ta-path".to_string(),
"x.cer".to_string(),
"--rsync-local-dir".to_string(),
"repo".to_string(),
"--cir-enable".to_string(),
"--cir-out".to_string(),
"out/example.cir".to_string(),
"--cir-tal-uri".to_string(),
"https://example.test/root.tal".to_string(),
];
let args = parse_args(&argv).expect("parse args");
assert!(args.cir_enabled);
assert_eq!(
args.raw_store_db.as_deref(),
Some(std::path::Path::new("raw-store.db"))
);
assert_eq!(args.cir_static_root, None);
} }
#[test] #[test]
@ -1315,9 +1425,18 @@ mod tests {
]; ];
let args = parse_args(&argv).expect("parse args"); let args = parse_args(&argv).expect("parse args");
assert!(args.cir_enabled); assert!(args.cir_enabled);
assert_eq!(args.cir_out_path.as_deref(), Some(std::path::Path::new("out/example.cir"))); assert_eq!(
assert_eq!(args.cir_static_root.as_deref(), Some(std::path::Path::new("out/static"))); args.cir_out_path.as_deref(),
assert_eq!(args.cir_tal_uri.as_deref(), Some("https://example.test/root.tal")); Some(std::path::Path::new("out/example.cir"))
);
assert_eq!(
args.cir_static_root.as_deref(),
Some(std::path::Path::new("out/static"))
);
assert_eq!(
args.cir_tal_uri.as_deref(),
Some("https://example.test/root.tal")
);
} }
#[test] #[test]
@ -1333,7 +1452,10 @@ mod tests {
"out/example.cir".to_string(), "out/example.cir".to_string(),
]; ];
let err = parse_args(&argv_missing).unwrap_err(); let err = parse_args(&argv_missing).unwrap_err();
assert!(err.contains("--cir-enable requires both --cir-out and --cir-static-root"), "{err}"); assert!(
err.contains("--cir-enable requires --cir-out and exactly one of --cir-static-root or --raw-store-db"),
"{err}"
);
let argv_needs_enable = vec![ let argv_needs_enable = vec![
"rpki".to_string(), "rpki".to_string(),
@ -1835,6 +1957,7 @@ mod tests {
aspas: Vec::new(), aspas: Vec::new(),
downloads: Vec::new(), downloads: Vec::new(),
download_stats: crate::audit::AuditDownloadStats::default(), download_stats: crate::audit::AuditDownloadStats::default(),
repo_sync_stats: crate::audit::AuditRepoSyncStats::default(),
}; };
let dir = tempfile::tempdir().expect("tmpdir"); let dir = tempfile::tempdir().expect("tmpdir");
@ -1844,4 +1967,35 @@ mod tests {
assert!(s.contains("\"format_version\"")); assert!(s.contains("\"format_version\""));
assert!(s.contains("\"policy\"")); assert!(s.contains("\"policy\""));
} }
#[test]
fn build_repo_sync_stats_aggregates_phase_and_terminal_state() {
let mut pp1 = crate::audit::PublicationPointAudit::default();
pp1.repo_sync_phase = Some("rrdp_ok".to_string());
pp1.repo_sync_duration_ms = Some(10);
pp1.repo_terminal_state = "fresh".to_string();
let mut pp2 = crate::audit::PublicationPointAudit::default();
pp2.repo_sync_phase = Some("rrdp_failed_rsync_failed".to_string());
pp2.repo_sync_duration_ms = Some(20);
pp2.repo_terminal_state = "failed_no_cache".to_string();
let mut pp3 = crate::audit::PublicationPointAudit::default();
pp3.repo_sync_phase = Some("rrdp_failed_rsync_failed".to_string());
pp3.repo_sync_duration_ms = Some(30);
pp3.repo_terminal_state = "failed_no_cache".to_string();
let stats = build_repo_sync_stats(&[pp1, pp2, pp3]);
assert_eq!(stats.publication_points_total, 3);
assert_eq!(stats.by_phase["rrdp_ok"].count, 1);
assert_eq!(stats.by_phase["rrdp_ok"].duration_ms_total, 10);
assert_eq!(stats.by_phase["rrdp_failed_rsync_failed"].count, 2);
assert_eq!(stats.by_phase["rrdp_failed_rsync_failed"].duration_ms_total, 50);
assert_eq!(stats.by_terminal_state["fresh"].count, 1);
assert_eq!(stats.by_terminal_state["failed_no_cache"].count, 2);
assert_eq!(
stats.by_terminal_state["failed_no_cache"].duration_ms_total,
50
);
}
} }

View File

@ -3,12 +3,9 @@ use crate::data_model::oid::{
}; };
use crate::data_model::rc::{ use crate::data_model::rc::{
AsIdOrRange, AsIdentifierChoice, ResourceCertKind, ResourceCertificate, AsIdOrRange, AsIdentifierChoice, ResourceCertKind, ResourceCertificate,
ResourceCertificateParseError, ResourceCertificateParsed, ResourceCertificateParseError, ResourceCertificateParsed, ResourceCertificateProfileError,
ResourceCertificateProfileError,
};
use crate::validation::cert_path::{
CertPathError, validate_ee_cert_path_with_predecoded_ee,
}; };
use crate::validation::cert_path::{CertPathError, validate_ee_cert_path_with_predecoded_ee};
use x509_parser::extensions::ParsedExtension; use x509_parser::extensions::ParsedExtension;
use x509_parser::prelude::{FromDer, X509Certificate}; use x509_parser::prelude::{FromDer, X509Certificate};
use x509_parser::public_key::PublicKey; use x509_parser::public_key::PublicKey;
@ -33,7 +30,7 @@ pub enum BgpsecRouterCertificateParseError {
#[error("resource certificate parse error: {0} (RFC 5280 §4.1; RFC 6487 §4; RFC 8209 §3.1)")] #[error("resource certificate parse error: {0} (RFC 5280 §4.1; RFC 6487 §4; RFC 8209 §3.1)")]
ResourceCertificate(#[from] ResourceCertificateParseError), ResourceCertificate(#[from] ResourceCertificateParseError),
#[error("X.509 parse error: {0} (RFC 5280 §4.1; RFC 8209 §3.1)" )] #[error("X.509 parse error: {0} (RFC 5280 §4.1; RFC 8209 §3.1)")]
X509(String), X509(String),
#[error("trailing bytes after router certificate DER: {0} bytes (DER; RFC 5280 §4.1)")] #[error("trailing bytes after router certificate DER: {0} bytes (DER; RFC 5280 §4.1)")]
@ -54,46 +51,72 @@ pub enum BgpsecRouterCertificateProfileError {
#[error("BGPsec router certificate must be an EE certificate (RFC 8209 §3.1)")] #[error("BGPsec router certificate must be an EE certificate (RFC 8209 §3.1)")]
NotEe, NotEe,
#[error("BGPsec router certificate must contain SubjectKeyIdentifier (RFC 6487 §4.8.2; RFC 8209 §3.3)")] #[error(
"BGPsec router certificate must contain SubjectKeyIdentifier (RFC 6487 §4.8.2; RFC 8209 §3.3)"
)]
MissingSki, MissingSki,
#[error("BGPsec router certificate must include ExtendedKeyUsage (RFC 8209 §3.1.3.2; RFC 8209 §3.3)")] #[error(
"BGPsec router certificate must include ExtendedKeyUsage (RFC 8209 §3.1.3.2; RFC 8209 §3.3)"
)]
MissingExtendedKeyUsage, MissingExtendedKeyUsage,
#[error("BGPsec router certificate ExtendedKeyUsage must be non-critical (RFC 6487 §4.8.4; RFC 8209 §3.1.3.2)")] #[error(
"BGPsec router certificate ExtendedKeyUsage must be non-critical (RFC 6487 §4.8.4; RFC 8209 §3.1.3.2)"
)]
ExtendedKeyUsageCriticality, ExtendedKeyUsageCriticality,
#[error("BGPsec router certificate ExtendedKeyUsage must contain id-kp-bgpsec-router ({OID_KP_BGPSEC_ROUTER}) (RFC 8209 §3.1.3.2; RFC 8209 §3.3)")] #[error(
"BGPsec router certificate ExtendedKeyUsage must contain id-kp-bgpsec-router ({OID_KP_BGPSEC_ROUTER}) (RFC 8209 §3.1.3.2; RFC 8209 §3.3)"
)]
MissingBgpsecRouterEku, MissingBgpsecRouterEku,
#[error("BGPsec router certificate MUST NOT include Subject Information Access (RFC 8209 §3.1.3.3; RFC 8209 §3.3)")] #[error(
"BGPsec router certificate MUST NOT include Subject Information Access (RFC 8209 §3.1.3.3; RFC 8209 §3.3)"
)]
SubjectInfoAccessPresent, SubjectInfoAccessPresent,
#[error("BGPsec router certificate MUST NOT include IP resources extension (RFC 8209 §3.1.3.4; RFC 8209 §3.3)")] #[error(
"BGPsec router certificate MUST NOT include IP resources extension (RFC 8209 §3.1.3.4; RFC 8209 §3.3)"
)]
IpResourcesPresent, IpResourcesPresent,
#[error("BGPsec router certificate MUST include AS resources extension (RFC 8209 §3.1.3.5; RFC 8209 §3.3)")] #[error(
"BGPsec router certificate MUST include AS resources extension (RFC 8209 §3.1.3.5; RFC 8209 §3.3)"
)]
AsResourcesMissing, AsResourcesMissing,
#[error("BGPsec router certificate AS resources MUST include one or more ASNs (RFC 8209 §3.1.3.5)")] #[error(
"BGPsec router certificate AS resources MUST include one or more ASNs (RFC 8209 §3.1.3.5)"
)]
AsResourcesAsnumMissing, AsResourcesAsnumMissing,
#[error("BGPsec router certificate AS resources MUST NOT use inherit (RFC 8209 §3.1.3.5)")] #[error("BGPsec router certificate AS resources MUST NOT use inherit (RFC 8209 §3.1.3.5)")]
AsResourcesInherit, AsResourcesInherit,
#[error("BGPsec router certificate AS resources MUST contain explicit ASNs, not ranges (RFC 8209 §3.1.3.5)")] #[error(
"BGPsec router certificate AS resources MUST contain explicit ASNs, not ranges (RFC 8209 §3.1.3.5)"
)]
AsResourcesRangeNotAllowed, AsResourcesRangeNotAllowed,
#[error("BGPsec router certificate subjectPublicKeyInfo.algorithm must be id-ecPublicKey ({OID_EC_PUBLIC_KEY}) (RFC 8208 §3.1)")] #[error(
"BGPsec router certificate subjectPublicKeyInfo.algorithm must be id-ecPublicKey ({OID_EC_PUBLIC_KEY}) (RFC 8208 §3.1)"
)]
SpkiAlgorithmNotEcPublicKey, SpkiAlgorithmNotEcPublicKey,
#[error("BGPsec router certificate subjectPublicKeyInfo.parameters must be secp256r1 ({OID_SECP256R1}) (RFC 8208 §3.1)")] #[error(
"BGPsec router certificate subjectPublicKeyInfo.parameters must be secp256r1 ({OID_SECP256R1}) (RFC 8208 §3.1)"
)]
SpkiWrongCurve, SpkiWrongCurve,
#[error("BGPsec router certificate subjectPublicKeyInfo.parameters missing or invalid (RFC 8208 §3.1)")] #[error(
"BGPsec router certificate subjectPublicKeyInfo.parameters missing or invalid (RFC 8208 §3.1)"
)]
SpkiParametersMissingOrInvalid, SpkiParametersMissingOrInvalid,
#[error("BGPsec router certificate subjectPublicKey MUST be uncompressed P-256 ECPoint (RFC 8208 §3.1)")] #[error(
"BGPsec router certificate subjectPublicKey MUST be uncompressed P-256 ECPoint (RFC 8208 §3.1)"
)]
SpkiEcPointNotUncompressedP256, SpkiEcPointNotUncompressedP256,
} }
@ -116,16 +139,21 @@ pub enum BgpsecRouterCertificatePathError {
} }
impl BgpsecRouterCertificate { impl BgpsecRouterCertificate {
pub fn parse_der(der: &[u8]) -> Result<BgpsecRouterCertificateParsed, BgpsecRouterCertificateParseError> { pub fn parse_der(
der: &[u8],
) -> Result<BgpsecRouterCertificateParsed, BgpsecRouterCertificateParseError> {
let (rem, cert) = X509Certificate::from_der(der) let (rem, cert) = X509Certificate::from_der(der)
.map_err(|e| BgpsecRouterCertificateParseError::X509(e.to_string()))?; .map_err(|e| BgpsecRouterCertificateParseError::X509(e.to_string()))?;
if !rem.is_empty() { if !rem.is_empty() {
return Err(BgpsecRouterCertificateParseError::TrailingBytes(rem.len())); return Err(BgpsecRouterCertificateParseError::TrailingBytes(rem.len()));
} }
let (spki_rem, _spki) = SubjectPublicKeyInfo::from_der(cert.tbs_certificate.subject_pki.raw) let (spki_rem, _spki) =
SubjectPublicKeyInfo::from_der(cert.tbs_certificate.subject_pki.raw)
.map_err(|e| BgpsecRouterCertificateParseError::SpkiParse(e.to_string()))?; .map_err(|e| BgpsecRouterCertificateParseError::SpkiParse(e.to_string()))?;
if !spki_rem.is_empty() { if !spki_rem.is_empty() {
return Err(BgpsecRouterCertificateParseError::SpkiTrailingBytes(spki_rem.len())); return Err(BgpsecRouterCertificateParseError::SpkiTrailingBytes(
spki_rem.len(),
));
} }
let rc_parsed = ResourceCertificate::parse_der(der)?; let rc_parsed = ResourceCertificate::parse_der(der)?;
Ok(BgpsecRouterCertificateParsed { rc_parsed }) Ok(BgpsecRouterCertificateParsed { rc_parsed })
@ -170,7 +198,9 @@ impl BgpsecRouterCertificate {
} }
impl BgpsecRouterCertificateParsed { impl BgpsecRouterCertificateParsed {
pub fn validate_profile(self) -> Result<BgpsecRouterCertificate, BgpsecRouterCertificateProfileError> { pub fn validate_profile(
self,
) -> Result<BgpsecRouterCertificate, BgpsecRouterCertificateProfileError> {
let rc = self.rc_parsed.validate_profile()?; let rc = self.rc_parsed.validate_profile()?;
if rc.kind != ResourceCertKind::Ee { if rc.kind != ResourceCertKind::Ee {
return Err(BgpsecRouterCertificateProfileError::NotEe); return Err(BgpsecRouterCertificateProfileError::NotEe);
@ -196,15 +226,17 @@ impl BgpsecRouterCertificateParsed {
.ok_or(BgpsecRouterCertificateProfileError::AsResourcesMissing)?; .ok_or(BgpsecRouterCertificateProfileError::AsResourcesMissing)?;
let asns = extract_router_asns(as_resources)?; let asns = extract_router_asns(as_resources)?;
let (rem, cert) = X509Certificate::from_der(&rc.raw_der) let (rem, cert) = X509Certificate::from_der(&rc.raw_der).map_err(|e| {
.map_err(|e| BgpsecRouterCertificateProfileError::ResourceCertificate( BgpsecRouterCertificateProfileError::ResourceCertificate(
ResourceCertificateProfileError::InvalidCertificatePolicy(e.to_string()) ResourceCertificateProfileError::InvalidCertificatePolicy(e.to_string()),
))?; )
})?;
if !rem.is_empty() { if !rem.is_empty() {
return Err(BgpsecRouterCertificateProfileError::ResourceCertificate( return Err(BgpsecRouterCertificateProfileError::ResourceCertificate(
ResourceCertificateProfileError::InvalidCertificatePolicy( ResourceCertificateProfileError::InvalidCertificatePolicy(format!(
format!("trailing bytes after router certificate DER: {}", rem.len()), "trailing bytes after router certificate DER: {}",
), rem.len()
)),
)); ));
} }
validate_router_eku(&cert)?; validate_router_eku(&cert)?;
@ -243,7 +275,7 @@ fn extract_router_asns(
match item { match item {
AsIdOrRange::Id(v) => asns.push(*v), AsIdOrRange::Id(v) => asns.push(*v),
AsIdOrRange::Range { .. } => { AsIdOrRange::Range { .. } => {
return Err(BgpsecRouterCertificateProfileError::AsResourcesRangeNotAllowed) return Err(BgpsecRouterCertificateProfileError::AsResourcesRangeNotAllowed);
} }
} }
} }
@ -252,7 +284,9 @@ fn extract_router_asns(
Ok(asns) Ok(asns)
} }
fn validate_router_eku(cert: &X509Certificate<'_>) -> Result<(), BgpsecRouterCertificateProfileError> { fn validate_router_eku(
cert: &X509Certificate<'_>,
) -> Result<(), BgpsecRouterCertificateProfileError> {
let mut matches = cert let mut matches = cert
.tbs_certificate .tbs_certificate
.extensions() .extensions()

View File

@ -326,10 +326,7 @@ impl RpkiSignedObject {
/// Verify the CMS signature using the embedded EE certificate public key. /// Verify the CMS signature using the embedded EE certificate public key.
pub fn verify_signature(&self) -> Result<(), SignedObjectVerifyError> { pub fn verify_signature(&self) -> Result<(), SignedObjectVerifyError> {
let ee = &self.signed_data.certificates[0]; let ee = &self.signed_data.certificates[0];
self.verify_signature_with_rsa_components( self.verify_signature_with_rsa_components(&ee.rsa_public_modulus, &ee.rsa_public_exponent)
&ee.rsa_public_modulus,
&ee.rsa_public_exponent,
)
} }
/// Verify the CMS signature using a DER-encoded SubjectPublicKeyInfo. /// Verify the CMS signature using a DER-encoded SubjectPublicKeyInfo.
@ -451,9 +448,7 @@ impl<'a> CmsReader<'a> {
let (rem, any) = Any::from_ber(self.buf).map_err(|e| format!("BER parse error: {e}"))?; let (rem, any) = Any::from_ber(self.buf).map_err(|e| format!("BER parse error: {e}"))?;
let tag = header_to_single_byte_tag(&any.header)?; let tag = header_to_single_byte_tag(&any.header)?;
if tag != 0x04 && tag != 0x24 { if tag != 0x04 && tag != 0x24 {
return Err(format!( return Err(format!("unexpected tag: got 0x{tag:02X}, expected 0x04"));
"unexpected tag: got 0x{tag:02X}, expected 0x04"
));
} }
let octets = flatten_octet_string(any)?; let octets = flatten_octet_string(any)?;
self.buf = rem; self.buf = rem;
@ -516,7 +511,9 @@ fn header_to_single_byte_tag(header: &Header<'_>) -> Result<u8, String> {
if tag_no > 30 { if tag_no > 30 {
return Err(format!("high-tag-number form not supported: {tag_no}")); return Err(format!("high-tag-number form not supported: {tag_no}"));
} }
Ok(((header.class() as u8) << 6) | if header.constructed() { 0x20 } else { 0x00 } | tag_no as u8) Ok(((header.class() as u8) << 6)
| if header.constructed() { 0x20 } else { 0x00 }
| tag_no as u8)
} }
fn cms_take_tlv(input: &[u8]) -> Result<(u8, &[u8], &[u8]), String> { fn cms_take_tlv(input: &[u8]) -> Result<(u8, &[u8], &[u8]), String> {
@ -759,20 +756,18 @@ fn validate_ee_certificate(der: &[u8]) -> Result<ResourceEeCertificate, SignedOb
"trailing bytes after EE SubjectPublicKeyInfo DER".to_string(), "trailing bytes after EE SubjectPublicKeyInfo DER".to_string(),
)); ));
} }
let parsed_pk = spki let parsed_pk = spki.parsed().map_err(|_e| {
.parsed() SignedObjectValidateError::EeCertificateParse(
.map_err(|_e| SignedObjectValidateError::EeCertificateParse(
"unsupported EE public key algorithm".to_string(), "unsupported EE public key algorithm".to_string(),
))?; )
})?;
let (rsa_public_modulus, rsa_public_exponent) = match parsed_pk { let (rsa_public_modulus, rsa_public_exponent) = match parsed_pk {
PublicKey::RSA(rsa) => { PublicKey::RSA(rsa) => {
let modulus = strip_leading_zeros(rsa.modulus).to_vec(); let modulus = strip_leading_zeros(rsa.modulus).to_vec();
let exponent = strip_leading_zeros(rsa.exponent).to_vec(); let exponent = strip_leading_zeros(rsa.exponent).to_vec();
let _ = rsa let _ = rsa.try_exponent().map_err(|_e| {
.try_exponent() SignedObjectValidateError::EeCertificateParse("invalid EE RSA exponent".to_string())
.map_err(|_e| SignedObjectValidateError::EeCertificateParse( })?;
"invalid EE RSA exponent".to_string(),
))?;
(modulus, exponent) (modulus, exponent)
} }
_ => { _ => {

View File

@ -7,6 +7,8 @@ use crate::sync::rrdp::Fetcher;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct HttpFetcherConfig { pub struct HttpFetcherConfig {
/// Connection-establishment timeout for HTTP requests.
pub connect_timeout: Duration,
/// Short timeout used for connection establishment and small metadata objects. /// Short timeout used for connection establishment and small metadata objects.
pub timeout: Duration, pub timeout: Duration,
/// Larger timeout used for RRDP snapshot / delta bodies. /// Larger timeout used for RRDP snapshot / delta bodies.
@ -17,7 +19,8 @@ pub struct HttpFetcherConfig {
impl Default for HttpFetcherConfig { impl Default for HttpFetcherConfig {
fn default() -> Self { fn default() -> Self {
Self { Self {
timeout: Duration::from_secs(20), connect_timeout: Duration::from_secs(15),
timeout: Duration::from_secs(30),
large_body_timeout: Duration::from_secs(180), large_body_timeout: Duration::from_secs(180),
user_agent: "rpki-dev/0.1 (stage2)".to_string(), user_agent: "rpki-dev/0.1 (stage2)".to_string(),
} }
@ -41,14 +44,15 @@ impl BlockingHttpFetcher {
pub fn new(config: HttpFetcherConfig) -> Result<Self, String> { pub fn new(config: HttpFetcherConfig) -> Result<Self, String> {
let short_timeout = config.timeout; let short_timeout = config.timeout;
let large_body_timeout = std::cmp::max(config.large_body_timeout, config.timeout); let large_body_timeout = std::cmp::max(config.large_body_timeout, config.timeout);
let connect_timeout = std::cmp::min(config.connect_timeout, config.timeout);
let short_client = Client::builder() let short_client = Client::builder()
.connect_timeout(config.timeout) .connect_timeout(connect_timeout)
.timeout(config.timeout) .timeout(config.timeout)
.user_agent(config.user_agent.clone()) .user_agent(config.user_agent.clone())
.build() .build()
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
let large_body_client = Client::builder() let large_body_client = Client::builder()
.connect_timeout(config.timeout) .connect_timeout(connect_timeout)
.timeout(large_body_timeout) .timeout(large_body_timeout)
.user_agent(config.user_agent) .user_agent(config.user_agent)
.build() .build()
@ -64,10 +68,7 @@ impl BlockingHttpFetcher {
pub fn fetch_bytes(&self, uri: &str) -> Result<Vec<u8>, String> { pub fn fetch_bytes(&self, uri: &str) -> Result<Vec<u8>, String> {
let started = std::time::Instant::now(); let started = std::time::Instant::now();
let (client, timeout_profile, timeout_value) = self.client_for_uri(uri); let (client, timeout_profile, timeout_value) = self.client_for_uri(uri);
let resp = client let resp = client.get(uri).send().map_err(|e| {
.get(uri)
.send()
.map_err(|e| {
let msg = format!("http request failed: {e}"); let msg = format!("http request failed: {e}");
crate::progress_log::emit( crate::progress_log::emit(
"http_fetch_failed", "http_fetch_failed",
@ -175,7 +176,11 @@ impl BlockingHttpFetcher {
fn client_for_uri(&self, uri: &str) -> (&Client, &'static str, Duration) { fn client_for_uri(&self, uri: &str) -> (&Client, &'static str, Duration) {
if uses_large_body_timeout(uri) { if uses_large_body_timeout(uri) {
(&self.large_body_client, "large_body", self.large_body_timeout) (
&self.large_body_client,
"large_body",
self.large_body_timeout,
)
} else { } else {
(&self.short_client, "short", self.short_timeout) (&self.short_client, "short", self.short_timeout)
} }
@ -200,9 +205,7 @@ fn header_value_opt(headers: &HeaderMap, name: &str) -> Option<String> {
} }
fn uses_large_body_timeout(uri: &str) -> bool { fn uses_large_body_timeout(uri: &str) -> bool {
uri.starts_with("https://") uri.starts_with("https://") && uri.ends_with(".xml") && !uri.ends_with("notification.xml")
&& uri.ends_with(".xml")
&& !uri.ends_with("notification.xml")
} }
#[cfg(test)] #[cfg(test)]
@ -280,13 +283,17 @@ mod tests {
#[test] #[test]
fn uses_large_body_timeout_selects_rrdp_snapshot_and_delta_not_notification() { fn uses_large_body_timeout_selects_rrdp_snapshot_and_delta_not_notification() {
assert!(!uses_large_body_timeout("https://rrdp.example.test/notification.xml")); assert!(!uses_large_body_timeout(
"https://rrdp.example.test/notification.xml"
));
assert!(uses_large_body_timeout( assert!(uses_large_body_timeout(
"https://rrdp.example.test/session/123/snapshot.xml" "https://rrdp.example.test/session/123/snapshot.xml"
)); ));
assert!(uses_large_body_timeout( assert!(uses_large_body_timeout(
"https://rrdp.example.test/session/123/delta-42.xml" "https://rrdp.example.test/session/123/delta-42.xml"
)); ));
assert!(!uses_large_body_timeout("https://tal.example.test/example.tal")); assert!(!uses_large_body_timeout(
"https://tal.example.test/example.tal"
));
} }
} }

View File

@ -1,6 +1,10 @@
use std::cell::RefCell;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::process::Command; use std::process::Command;
use std::process::Stdio;
use std::thread;
use std::time::Duration; use std::time::Duration;
use std::time::Instant;
use sha2::Digest; use sha2::Digest;
use uuid::Uuid; use uuid::Uuid;
@ -12,6 +16,7 @@ use crate::fetch::rsync::{
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct SystemRsyncConfig { pub struct SystemRsyncConfig {
pub rsync_bin: PathBuf, pub rsync_bin: PathBuf,
pub connect_timeout: Duration,
pub timeout: Duration, pub timeout: Duration,
pub extra_args: Vec<String>, pub extra_args: Vec<String>,
/// Optional root directory for persistent rsync mirrors. /// Optional root directory for persistent rsync mirrors.
@ -28,7 +33,8 @@ impl Default for SystemRsyncConfig {
fn default() -> Self { fn default() -> Self {
Self { Self {
rsync_bin: PathBuf::from("rsync"), rsync_bin: PathBuf::from("rsync"),
timeout: Duration::from_secs(60), connect_timeout: Duration::from_secs(15),
timeout: Duration::from_secs(30),
extra_args: Vec::new(), extra_args: Vec::new(),
mirror_root: None, mirror_root: None,
} }
@ -44,6 +50,39 @@ pub struct SystemRsyncFetcher {
config: SystemRsyncConfig, config: SystemRsyncConfig,
} }
thread_local! {
static RSYNC_TIMEOUT_OVERRIDE: RefCell<Option<Duration>> = const { RefCell::new(None) };
static RSYNC_FAIL_FAST_PROFILE: RefCell<Option<RsyncFailFastProfile>> = const { RefCell::new(None) };
}
pub fn with_scoped_rsync_timeout_override<R>(timeout: Duration, f: impl FnOnce() -> R) -> R {
RSYNC_TIMEOUT_OVERRIDE.with(|cell| {
let previous = cell.replace(Some(timeout));
let result = f();
let _ = cell.replace(previous);
result
})
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct RsyncFailFastProfile {
pub initial_wall_clock_timeout: Duration,
pub max_wall_clock_timeout: Duration,
pub max_attempts: usize,
}
pub fn with_scoped_rsync_fail_fast_profile<R>(
profile: RsyncFailFastProfile,
f: impl FnOnce() -> R,
) -> R {
RSYNC_FAIL_FAST_PROFILE.with(|cell| {
let previous = cell.replace(Some(profile));
let result = f();
let _ = cell.replace(previous);
result
})
}
impl SystemRsyncFetcher { impl SystemRsyncFetcher {
pub fn new(config: SystemRsyncConfig) -> Self { pub fn new(config: SystemRsyncConfig) -> Self {
Self { config } Self { config }
@ -69,22 +108,61 @@ impl SystemRsyncFetcher {
} }
fn run_rsync(&self, src: &str, dst: &Path) -> Result<(), String> { fn run_rsync(&self, src: &str, dst: &Path) -> Result<(), String> {
let fail_fast = RSYNC_FAIL_FAST_PROFILE.with(|cell| *cell.borrow());
if let Some(profile) = fail_fast {
return self.run_rsync_fail_fast(src, dst, profile);
}
self.run_rsync_once(src, dst, None, false)
}
fn run_rsync_once(
&self,
src: &str,
dst: &Path,
wall_clock_timeout: Option<Duration>,
keep_partial: bool,
) -> Result<(), String> {
// `--timeout` is I/O timeout in seconds (applies to network reads/writes). // `--timeout` is I/O timeout in seconds (applies to network reads/writes).
let timeout_secs = self.config.timeout.as_secs().max(1).to_string(); let timeout =
RSYNC_TIMEOUT_OVERRIDE.with(|cell| cell.borrow().unwrap_or(self.config.timeout));
let connect_timeout_secs = self.config.connect_timeout.as_secs().max(1).to_string();
let timeout_secs = timeout.as_secs().max(1).to_string();
let is_remote_rsync = src.starts_with("rsync://");
let mut cmd = Command::new(&self.config.rsync_bin); let mut cmd = Command::new(&self.config.rsync_bin);
cmd.arg("-rt") cmd.arg("-rt")
.arg("--delete") .arg("--delete")
.arg("--timeout") .arg("--timeout")
.arg(timeout_secs) .arg(timeout_secs)
.args(&self.config.extra_args) .args(&self.config.extra_args);
.arg(src) if is_remote_rsync {
.arg(dst); cmd.arg("--contimeout").arg(connect_timeout_secs);
}
if keep_partial {
cmd.arg("--partial");
}
cmd.arg(src)
.arg(dst)
.stdout(Stdio::piped())
.stderr(Stdio::piped());
let out = cmd let mut child = cmd
.output() .spawn()
.map_err(|e| format!("rsync spawn failed: {e}"))?; .map_err(|e| format!("rsync spawn failed: {e}"))?;
if !out.status.success() { if let Some(limit) = wall_clock_timeout {
let started = Instant::now();
loop {
match child
.try_wait()
.map_err(|e| format!("rsync wait failed: {e}"))?
{
Some(_status) => {
let out = child
.wait_with_output()
.map_err(|e| format!("rsync wait_with_output failed: {e}"))?;
if out.status.success() {
return Ok(());
}
let stderr = String::from_utf8_lossy(&out.stderr); let stderr = String::from_utf8_lossy(&out.stderr);
let stdout = String::from_utf8_lossy(&out.stdout); let stdout = String::from_utf8_lossy(&out.stdout);
return Err(format!( return Err(format!(
@ -94,7 +172,100 @@ impl SystemRsyncFetcher {
stderr.trim() stderr.trim()
)); ));
} }
Ok(()) None => {
if started.elapsed() >= limit {
let _ = child.kill();
let out = child
.wait_with_output()
.map_err(|e| format!("rsync wait_with_output failed: {e}"))?;
let stderr = String::from_utf8_lossy(&out.stderr);
let stdout = String::from_utf8_lossy(&out.stdout);
return Err(format!(
"rsync wall-clock timeout after {}s: stdout={} stderr={}",
limit.as_secs(),
stdout.trim(),
stderr.trim()
));
}
thread::sleep(Duration::from_millis(100));
}
}
}
}
let out = child
.wait_with_output()
.map_err(|e| format!("rsync wait_with_output failed: {e}"))?;
if out.status.success() {
return Ok(());
}
let stderr = String::from_utf8_lossy(&out.stderr);
let stdout = String::from_utf8_lossy(&out.stdout);
Err(format!(
"rsync failed: status={} stdout={} stderr={}",
out.status,
stdout.trim(),
stderr.trim()
))
}
fn run_rsync_fail_fast(
&self,
src: &str,
dst: &Path,
profile: RsyncFailFastProfile,
) -> Result<(), String> {
let mut attempt = 0usize;
let mut timeout = profile.initial_wall_clock_timeout;
let mut previous_progress = (0usize, 0u64);
let mut zero_progress_attempts = 0usize;
let max_timeout = std::cmp::max(
profile.max_wall_clock_timeout,
profile.initial_wall_clock_timeout,
);
loop {
attempt += 1;
match self.run_rsync_once(src, dst, Some(timeout), true) {
Ok(()) => return Ok(()),
Err(err) => {
if is_hard_fail_rsync_error(&err) {
return Err(format!(
"rsync fail-fast hard-fail on attempt {}: {}",
attempt, err
));
}
if !err.contains("wall-clock timeout") {
return Err(err);
}
let progress = dir_progress(dst)
.map_err(|e| format!("rsync fail-fast progress stat failed: {e}"))?;
if progress == (0, 0) {
zero_progress_attempts += 1;
if zero_progress_attempts >= 2 || attempt >= profile.max_attempts {
return Err(format!(
"rsync fail-fast gave up after {} attempts with no progress: {}",
attempt, err
));
}
} else if progress == previous_progress {
return Err(format!(
"rsync fail-fast gave up after {} attempts with no additional progress: {}",
attempt, err
));
} else {
previous_progress = progress;
}
if attempt >= profile.max_attempts {
return Err(format!(
"rsync fail-fast exhausted {} attempts: {}",
profile.max_attempts, err
));
}
timeout = std::cmp::min(timeout.saturating_mul(2), max_timeout);
}
}
}
} }
fn module_fetch_uri(&self, rsync_base_uri: &str) -> String { fn module_fetch_uri(&self, rsync_base_uri: &str) -> String {
@ -140,7 +311,8 @@ impl RsyncFetcher for SystemRsyncFetcher {
let tmp = TempDir::new().map_err(|e| RsyncFetchError::Fetch(e.to_string()))?; let tmp = TempDir::new().map_err(|e| RsyncFetchError::Fetch(e.to_string()))?;
self.run_rsync(&base, tmp.path()) self.run_rsync(&base, tmp.path())
.map_err(RsyncFetchError::Fetch)?; .map_err(RsyncFetchError::Fetch)?;
walk_dir_visit(tmp.path(), tmp.path(), &base, &mut wrapped).map_err(RsyncFetchError::Fetch)?; walk_dir_visit(tmp.path(), tmp.path(), &base, &mut wrapped)
.map_err(RsyncFetchError::Fetch)?;
Ok((count, bytes_total)) Ok((count, bytes_total))
} }
@ -178,7 +350,10 @@ fn rsync_module_root_uri(s: &str) -> Option<String> {
let mut host_and_path = rest.splitn(2, '/'); let mut host_and_path = rest.splitn(2, '/');
let authority = host_and_path.next()?; let authority = host_and_path.next()?;
let path = host_and_path.next()?; let path = host_and_path.next()?;
let mut segments: Vec<&str> = path.split('/').filter(|segment| !segment.is_empty()).collect(); let mut segments: Vec<&str> = path
.split('/')
.filter(|segment| !segment.is_empty())
.collect();
if segments.is_empty() { if segments.is_empty() {
return None; return None;
} }
@ -216,6 +391,38 @@ fn walk_dir_collect(
Ok(()) Ok(())
} }
fn dir_progress(root: &Path) -> Result<(usize, u64), String> {
if !root.exists() {
return Ok((0, 0));
}
let mut files = 0usize;
let mut bytes = 0u64;
let mut stack = vec![root.to_path_buf()];
while let Some(path) = stack.pop() {
let rd = std::fs::read_dir(&path).map_err(|e| e.to_string())?;
for entry in rd {
let entry = entry.map_err(|e| e.to_string())?;
let path = entry.path();
let meta = entry.metadata().map_err(|e| e.to_string())?;
if meta.is_dir() {
stack.push(path);
} else if meta.is_file() {
files += 1;
bytes += meta.len();
}
}
}
Ok((files, bytes))
}
fn is_hard_fail_rsync_error(msg: &str) -> bool {
let lower = msg.to_ascii_lowercase();
lower.contains("no route to host")
|| lower.contains("network is unreachable")
|| lower.contains("connection refused")
|| lower.contains("name or service not known")
}
fn walk_dir_visit( fn walk_dir_visit(
root: &Path, root: &Path,
current: &Path, current: &Path,
@ -314,6 +521,7 @@ mod tests {
// 1) Spawn error. // 1) Spawn error.
let f = SystemRsyncFetcher::new(SystemRsyncConfig { let f = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: PathBuf::from("/this/does/not/exist/rsync"), rsync_bin: PathBuf::from("/this/does/not/exist/rsync"),
connect_timeout: Duration::from_secs(1),
timeout: Duration::from_secs(1), timeout: Duration::from_secs(1),
extra_args: Vec::new(), extra_args: Vec::new(),
mirror_root: None, mirror_root: None,
@ -326,6 +534,7 @@ mod tests {
// 2) Non-zero exit status. // 2) Non-zero exit status.
let f = SystemRsyncFetcher::new(SystemRsyncConfig { let f = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: PathBuf::from("false"), rsync_bin: PathBuf::from("false"),
connect_timeout: Duration::from_secs(1),
timeout: Duration::from_secs(1), timeout: Duration::from_secs(1),
extra_args: Vec::new(), extra_args: Vec::new(),
mirror_root: None, mirror_root: None,
@ -345,6 +554,7 @@ mod tests {
let fetcher = SystemRsyncFetcher::new(SystemRsyncConfig { let fetcher = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: PathBuf::from("rsync"), rsync_bin: PathBuf::from("rsync"),
connect_timeout: Duration::from_secs(1),
timeout: Duration::from_secs(1), timeout: Duration::from_secs(1),
extra_args: Vec::new(), extra_args: Vec::new(),
mirror_root: Some(root_file.clone()), mirror_root: Some(root_file.clone()),
@ -371,6 +581,7 @@ mod tests {
let fetcher = SystemRsyncFetcher::new(SystemRsyncConfig { let fetcher = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: PathBuf::from("rsync"), rsync_bin: PathBuf::from("rsync"),
connect_timeout: Duration::from_secs(1),
timeout: Duration::from_secs(1), timeout: Duration::from_secs(1),
extra_args: Vec::new(), extra_args: Vec::new(),
mirror_root: Some(root.clone()), mirror_root: Some(root.clone()),
@ -406,4 +617,168 @@ mod tests {
assert_eq!(out.len(), 1); assert_eq!(out.len(), 1);
assert_eq!(out[0].0, "rsync://example.net/repo/a.cer"); assert_eq!(out[0].0, "rsync://example.net/repo/a.cer");
} }
#[cfg(unix)]
#[test]
fn rsync_fail_fast_retries_when_progress_is_made() {
use std::os::unix::fs::PermissionsExt;
let temp = tempfile::tempdir().expect("tempdir");
let script = temp.path().join("fake-rsync.sh");
let state = temp.path().join("state.txt");
std::fs::write(
&script,
format!(
"#!/usr/bin/env bash\nset -euo pipefail\nSTATE=\"{}\"\nDST=\"${{@: -1}}\"\nCOUNT=0\nif [[ -f \"$STATE\" ]]; then COUNT=$(cat \"$STATE\"); fi\nCOUNT=$((COUNT+1))\necho \"$COUNT\" > \"$STATE\"\nmkdir -p \"$DST\"\nif [[ \"$COUNT\" -eq 1 ]]; then\n echo first > \"$DST/part1\"\n sleep 2\nelse\n echo second > \"$DST/part2\"\nfi\n",
state.display()
),
)
.expect("write script");
let mut perms = std::fs::metadata(&script).unwrap().permissions();
perms.set_mode(0o755);
std::fs::set_permissions(&script, perms).unwrap();
let dst = temp.path().join("dst");
let fetcher = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: script,
connect_timeout: Duration::from_secs(15),
timeout: Duration::from_secs(60),
extra_args: Vec::new(),
mirror_root: None,
});
fetcher
.run_rsync_fail_fast(
"rsync://example.net/repo/",
&dst,
RsyncFailFastProfile {
initial_wall_clock_timeout: Duration::from_secs(1),
max_wall_clock_timeout: Duration::from_secs(4),
max_attempts: 3,
},
)
.expect("eventual success");
assert!(dst.join("part1").exists());
assert!(dst.join("part2").exists());
}
#[cfg(unix)]
#[test]
fn rsync_fail_fast_gives_up_after_two_zero_progress_timeouts() {
use std::os::unix::fs::PermissionsExt;
let temp = tempfile::tempdir().expect("tempdir");
let script = temp.path().join("fake-rsync.sh");
std::fs::write(&script, "#!/usr/bin/env bash\nset -euo pipefail\nsleep 5\n")
.expect("write script");
let mut perms = std::fs::metadata(&script).unwrap().permissions();
perms.set_mode(0o755);
std::fs::set_permissions(&script, perms).unwrap();
let dst = temp.path().join("dst");
let fetcher = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: script,
connect_timeout: Duration::from_secs(15),
timeout: Duration::from_secs(60),
extra_args: Vec::new(),
mirror_root: None,
});
let err = fetcher
.run_rsync_fail_fast(
"rsync://example.net/repo/",
&dst,
RsyncFailFastProfile {
initial_wall_clock_timeout: Duration::from_secs(1),
max_wall_clock_timeout: Duration::from_secs(2),
max_attempts: 4,
},
)
.expect_err("must fail");
assert!(err.contains("no progress"), "{err}");
}
#[cfg(unix)]
#[test]
fn rsync_fail_fast_hard_fail_stops_after_first_attempt() {
use std::os::unix::fs::PermissionsExt;
let temp = tempfile::tempdir().expect("tempdir");
let script = temp.path().join("fake-rsync.sh");
let state = temp.path().join("state.txt");
std::fs::write(
&script,
format!(
"#!/usr/bin/env bash\nset -euo pipefail\nSTATE=\"{}\"\nCOUNT=0\nif [[ -f \"$STATE\" ]]; then COUNT=$(cat \"$STATE\"); fi\nCOUNT=$((COUNT+1))\necho \"$COUNT\" > \"$STATE\"\necho 'rsync: [Receiver] failed to connect to host (1.2.3.4): Connection refused (111)' >&2\nexit 10\n",
state.display()
),
)
.expect("write script");
let mut perms = std::fs::metadata(&script).unwrap().permissions();
perms.set_mode(0o755);
std::fs::set_permissions(&script, perms).unwrap();
let dst = temp.path().join("dst");
let fetcher = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: script,
connect_timeout: Duration::from_secs(15),
timeout: Duration::from_secs(60),
extra_args: Vec::new(),
mirror_root: None,
});
let err = fetcher
.run_rsync_fail_fast(
"rsync://example.net/repo/",
&dst,
RsyncFailFastProfile {
initial_wall_clock_timeout: Duration::from_secs(10),
max_wall_clock_timeout: Duration::from_secs(80),
max_attempts: 4,
},
)
.expect_err("must hard fail");
assert!(err.contains("hard-fail"), "{err}");
let count = std::fs::read_to_string(&state).unwrap();
assert_eq!(count.trim(), "1");
}
#[cfg(unix)]
#[test]
fn run_rsync_once_passes_contimeout_and_timeout_args() {
use std::os::unix::fs::PermissionsExt;
let temp = tempfile::tempdir().expect("tempdir");
let script = temp.path().join("capture-rsync.sh");
let args_file = temp.path().join("args.txt");
std::fs::write(
&script,
format!(
"#!/usr/bin/env bash\nset -euo pipefail\nprintf '%s\\n' \"$@\" > \"{}\"\nDST=\"${{@: -1}}\"\nmkdir -p \"$DST\"\n",
args_file.display()
),
)
.expect("write script");
let mut perms = std::fs::metadata(&script).unwrap().permissions();
perms.set_mode(0o755);
std::fs::set_permissions(&script, perms).unwrap();
let dst = temp.path().join("dst");
let fetcher = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: script,
connect_timeout: Duration::from_secs(15),
timeout: Duration::from_secs(30),
extra_args: Vec::new(),
mirror_root: None,
});
fetcher
.run_rsync_once("rsync://example.net/repo/", &dst, None, false)
.expect("rsync");
let args = std::fs::read_to_string(&args_file).expect("read args");
assert!(args.contains("--contimeout\n15\n"), "{args}");
assert!(args.contains("--timeout\n30\n"), "{args}");
}
} }

View File

@ -3,9 +3,9 @@ pub mod cir;
pub mod data_model; pub mod data_model;
#[cfg(feature = "full")] #[cfg(feature = "full")]
pub mod analysis; pub mod blob_store;
#[cfg(feature = "full")] #[cfg(feature = "full")]
pub mod bundle; pub mod analysis;
#[cfg(feature = "full")] #[cfg(feature = "full")]
pub mod audit; pub mod audit;
#[cfg(feature = "full")] #[cfg(feature = "full")]
@ -13,6 +13,8 @@ pub mod audit_downloads;
#[cfg(feature = "full")] #[cfg(feature = "full")]
pub mod audit_trace; pub mod audit_trace;
#[cfg(feature = "full")] #[cfg(feature = "full")]
pub mod bundle;
#[cfg(feature = "full")]
pub mod cli; pub mod cli;
#[cfg(feature = "full")] #[cfg(feature = "full")]
pub mod fetch; pub mod fetch;

View File

@ -874,9 +874,13 @@ mod tests {
.join("meta.json"); .join("meta.json");
std::fs::remove_file(&meta_path).expect("remove rsync module meta"); std::fs::remove_file(&meta_path).expect("remove rsync module meta");
let index = ReplayArchiveIndex::load_allow_missing_rsync_modules(&archive_root, &locks_path) let index =
ReplayArchiveIndex::load_allow_missing_rsync_modules(&archive_root, &locks_path)
.expect("load replay index without rsync meta"); .expect("load replay index without rsync meta");
let module = index.rsync_modules.get(&module_uri).expect("module present"); let module = index
.rsync_modules
.get(&module_uri)
.expect("module present");
assert_eq!(module.meta.module, module_uri); assert_eq!(module.meta.module, module_uri);
assert_eq!(module.meta.version, 1); assert_eq!(module.meta.version, 1);
} }

View File

@ -916,7 +916,10 @@ mod tests {
let index = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path) let index = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path)
.expect("load delta replay index without rsync meta"); .expect("load delta replay index without rsync meta");
let module = index.rsync_modules.get(&module_uri).expect("module present"); let module = index
.rsync_modules
.get(&module_uri)
.expect("module present");
assert_eq!(module.meta.module, module_uri); assert_eq!(module.meta.module, module_uri);
assert_eq!(module.meta.version, 1); assert_eq!(module.meta.version, 1);
} }

View File

@ -8,6 +8,7 @@ use rocksdb::{
use serde::{Deserialize, Serialize, de::DeserializeOwned}; use serde::{Deserialize, Serialize, de::DeserializeOwned};
use sha2::Digest; use sha2::Digest;
use crate::blob_store::{ExternalRawStoreDb, RawObjectStore};
use crate::data_model::rc::{AsResourceSet, IpResourceSet}; use crate::data_model::rc::{AsResourceSet, IpResourceSet};
pub const CF_REPOSITORY_VIEW: &str = "repository_view"; pub const CF_REPOSITORY_VIEW: &str = "repository_view";
@ -77,6 +78,7 @@ pub type StorageResult<T> = Result<T, StorageError>;
pub struct RocksStore { pub struct RocksStore {
db: DB, db: DB,
external_raw_store: Option<ExternalRawStoreDb>,
} }
pub mod pack { pub mod pack {
@ -769,7 +771,19 @@ impl RocksStore {
let db = DB::open_cf_descriptors(&base_opts, path, column_family_descriptors()) let db = DB::open_cf_descriptors(&base_opts, path, column_family_descriptors())
.map_err(|e| StorageError::RocksDb(e.to_string()))?; .map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(Self { db }) Ok(Self {
db,
external_raw_store: None,
})
}
pub fn open_with_external_raw_store(
path: &Path,
raw_store_path: &Path,
) -> StorageResult<Self> {
let mut store = Self::open(path)?;
store.external_raw_store = Some(ExternalRawStoreDb::open(raw_store_path)?);
Ok(store)
} }
fn cf(&self, name: &'static str) -> StorageResult<&ColumnFamily> { fn cf(&self, name: &'static str) -> StorageResult<&ColumnFamily> {
@ -822,7 +836,10 @@ impl RocksStore {
member_records: &[RrdpSourceMemberRecord], member_records: &[RrdpSourceMemberRecord],
owner_records: &[RrdpUriOwnerRecord], owner_records: &[RrdpUriOwnerRecord],
) -> StorageResult<()> { ) -> StorageResult<()> {
if repository_view_entries.is_empty() && member_records.is_empty() && owner_records.is_empty() { if repository_view_entries.is_empty()
&& member_records.is_empty()
&& owner_records.is_empty()
{
return Ok(()); return Ok(());
} }
@ -877,6 +894,9 @@ impl RocksStore {
pub fn put_raw_by_hash_entry(&self, entry: &RawByHashEntry) -> StorageResult<()> { pub fn put_raw_by_hash_entry(&self, entry: &RawByHashEntry) -> StorageResult<()> {
entry.validate_internal()?; entry.validate_internal()?;
if let Some(raw_store) = self.external_raw_store.as_ref() {
return raw_store.put_raw_entry(entry);
}
let cf = self.cf(CF_RAW_BY_HASH)?; let cf = self.cf(CF_RAW_BY_HASH)?;
let key = raw_by_hash_key(&entry.sha256_hex); let key = raw_by_hash_key(&entry.sha256_hex);
let value = encode_cbor(entry, "raw_by_hash")?; let value = encode_cbor(entry, "raw_by_hash")?;
@ -890,6 +910,9 @@ impl RocksStore {
if entries.is_empty() { if entries.is_empty() {
return Ok(()); return Ok(());
} }
if let Some(raw_store) = self.external_raw_store.as_ref() {
return raw_store.put_raw_entries_batch(entries);
}
let cf = self.cf(CF_RAW_BY_HASH)?; let cf = self.cf(CF_RAW_BY_HASH)?;
let mut batch = WriteBatch::default(); let mut batch = WriteBatch::default();
@ -909,6 +932,9 @@ impl RocksStore {
if entries.is_empty() { if entries.is_empty() {
return Ok(()); return Ok(());
} }
if let Some(raw_store) = self.external_raw_store.as_ref() {
return raw_store.put_raw_entries_batch(entries);
}
let cf = self.cf(CF_RAW_BY_HASH)?; let cf = self.cf(CF_RAW_BY_HASH)?;
let mut batch = WriteBatch::default(); let mut batch = WriteBatch::default();
@ -922,6 +948,9 @@ impl RocksStore {
pub fn delete_raw_by_hash_entry(&self, sha256_hex: &str) -> StorageResult<()> { pub fn delete_raw_by_hash_entry(&self, sha256_hex: &str) -> StorageResult<()> {
validate_sha256_hex("raw_by_hash.sha256_hex", sha256_hex)?; validate_sha256_hex("raw_by_hash.sha256_hex", sha256_hex)?;
if let Some(raw_store) = self.external_raw_store.as_ref() {
return raw_store.delete_raw_entry(sha256_hex);
}
let cf = self.cf(CF_RAW_BY_HASH)?; let cf = self.cf(CF_RAW_BY_HASH)?;
let key = raw_by_hash_key(sha256_hex); let key = raw_by_hash_key(sha256_hex);
self.db self.db
@ -931,6 +960,9 @@ impl RocksStore {
} }
pub fn get_raw_by_hash_entry(&self, sha256_hex: &str) -> StorageResult<Option<RawByHashEntry>> { pub fn get_raw_by_hash_entry(&self, sha256_hex: &str) -> StorageResult<Option<RawByHashEntry>> {
if let Some(raw_store) = self.external_raw_store.as_ref() {
return raw_store.get_raw_entry(sha256_hex);
}
let cf = self.cf(CF_RAW_BY_HASH)?; let cf = self.cf(CF_RAW_BY_HASH)?;
let key = raw_by_hash_key(sha256_hex); let key = raw_by_hash_key(sha256_hex);
let Some(bytes) = self let Some(bytes) = self
@ -952,9 +984,15 @@ impl RocksStore {
if sha256_hexes.is_empty() { if sha256_hexes.is_empty() {
return Ok(Vec::new()); return Ok(Vec::new());
} }
if let Some(raw_store) = self.external_raw_store.as_ref() {
return raw_store.get_raw_entries_batch(sha256_hexes);
}
let cf = self.cf(CF_RAW_BY_HASH)?; let cf = self.cf(CF_RAW_BY_HASH)?;
let keys: Vec<String> = sha256_hexes.iter().map(|hash| raw_by_hash_key(hash)).collect(); let keys: Vec<String> = sha256_hexes
.iter()
.map(|hash| raw_by_hash_key(hash))
.collect();
self.db self.db
.multi_get_cf(keys.iter().map(|key| (cf, key.as_bytes()))) .multi_get_cf(keys.iter().map(|key| (cf, key.as_bytes())))
.into_iter() .into_iter()
@ -1217,7 +1255,10 @@ impl RocksStore {
)) ))
} }
pub fn load_current_object_bytes_by_uri(&self, rsync_uri: &str) -> StorageResult<Option<Vec<u8>>> { pub fn load_current_object_bytes_by_uri(
&self,
rsync_uri: &str,
) -> StorageResult<Option<Vec<u8>>> {
let Some(view) = self.get_repository_view_entry(rsync_uri)? else { let Some(view) = self.get_repository_view_entry(rsync_uri)? else {
return Ok(None); return Ok(None);
}; };
@ -1230,17 +1271,15 @@ impl RocksStore {
.as_deref() .as_deref()
.ok_or(StorageError::InvalidData { .ok_or(StorageError::InvalidData {
entity: "repository_view", entity: "repository_view",
detail: format!( detail: format!("current_hash missing for current object URI: {rsync_uri}"),
"current_hash missing for current object URI: {rsync_uri}"
),
})?; })?;
let raw = self.get_raw_by_hash_entry(hash)?.ok_or(StorageError::InvalidData { let bytes = self.get_blob_bytes(hash)?.ok_or(StorageError::InvalidData {
entity: "repository_view", entity: "repository_view",
detail: format!( detail: format!(
"raw_by_hash entry missing for current object URI: {rsync_uri} (hash={hash})" "raw_by_hash entry missing for current object URI: {rsync_uri} (hash={hash})"
), ),
})?; })?;
Ok(Some(raw.bytes)) Ok(Some(bytes))
} }
} }
} }
@ -1283,7 +1322,6 @@ impl RocksStore {
Ok(()) Ok(())
} }
#[allow(dead_code)] #[allow(dead_code)]
pub fn write_batch(&self, batch: WriteBatch) -> StorageResult<()> { pub fn write_batch(&self, batch: WriteBatch) -> StorageResult<()> {
@ -1721,6 +1759,37 @@ mod tests {
assert_eq!(got_raw, raw); assert_eq!(got_raw, raw);
} }
#[test]
fn raw_by_hash_routes_to_external_raw_store_when_configured() {
let td = tempfile::tempdir().expect("tempdir");
let main_db = td.path().join("main-db");
let raw_db = td.path().join("raw-store.db");
let raw = sample_raw_by_hash_entry(b"external-raw".to_vec());
{
let store =
RocksStore::open_with_external_raw_store(&main_db, &raw_db).expect("open store");
store
.put_raw_by_hash_entry(&raw)
.expect("put external raw");
let got = store
.get_raw_by_hash_entry(&raw.sha256_hex)
.expect("get external raw")
.expect("raw exists");
assert_eq!(got, raw);
}
let main_store = RocksStore::open(&main_db).expect("open main only");
assert!(
main_store
.get_raw_by_hash_entry(&raw.sha256_hex)
.expect("read main store")
.is_none(),
"main db should not contain raw entry when external raw store is configured"
);
}
#[test] #[test]
fn repository_view_and_raw_by_hash_validation_errors_are_reported() { fn repository_view_and_raw_by_hash_validation_errors_are_reported() {
let td = tempfile::tempdir().expect("tempdir"); let td = tempfile::tempdir().expect("tempdir");
@ -1880,10 +1949,15 @@ mod tests {
store store
.replace_vcir_and_audit_rule_indexes(None, &previous) .replace_vcir_and_audit_rule_indexes(None, &previous)
.expect("store previous vcir"); .expect("store previous vcir");
assert!(store assert!(
.get_audit_rule_index_entry(AuditRuleKind::Roa, &previous.local_outputs[0].rule_hash) store
.get_audit_rule_index_entry(
AuditRuleKind::Roa,
&previous.local_outputs[0].rule_hash
)
.expect("get old audit entry") .expect("get old audit entry")
.is_some()); .is_some()
);
let mut current = sample_vcir("rsync://example.test/repo/current.mft"); let mut current = sample_vcir("rsync://example.test/repo/current.mft");
current.local_outputs = vec![VcirLocalOutput { current.local_outputs = vec![VcirLocalOutput {
@ -1909,14 +1983,24 @@ mod tests {
.expect("get replaced vcir") .expect("get replaced vcir")
.expect("vcir exists"); .expect("vcir exists");
assert_eq!(got, current); assert_eq!(got, current);
assert!(store assert!(
.get_audit_rule_index_entry(AuditRuleKind::Roa, &previous.local_outputs[0].rule_hash) store
.get_audit_rule_index_entry(
AuditRuleKind::Roa,
&previous.local_outputs[0].rule_hash
)
.expect("get deleted old audit entry") .expect("get deleted old audit entry")
.is_none()); .is_none()
assert!(store );
.get_audit_rule_index_entry(AuditRuleKind::Aspa, &current.local_outputs[0].rule_hash) assert!(
store
.get_audit_rule_index_entry(
AuditRuleKind::Aspa,
&current.local_outputs[0].rule_hash
)
.expect("get new audit entry") .expect("get new audit entry")
.is_some()); .is_some()
);
} }
#[test] #[test]
@ -2218,8 +2302,11 @@ mod tests {
let present_bytes = b"present-object".to_vec(); let present_bytes = b"present-object".to_vec();
let present_hash = sha256_hex(&present_bytes); let present_hash = sha256_hex(&present_bytes);
let mut present_raw = RawByHashEntry::from_bytes(present_hash.clone(), present_bytes.clone()); let mut present_raw =
present_raw.origin_uris.push("rsync://example.test/repo/present.roa".to_string()); RawByHashEntry::from_bytes(present_hash.clone(), present_bytes.clone());
present_raw
.origin_uris
.push("rsync://example.test/repo/present.roa".to_string());
present_raw.object_type = Some("roa".to_string()); present_raw.object_type = Some("roa".to_string());
store store
.put_raw_by_hash_entry(&present_raw) .put_raw_by_hash_entry(&present_raw)
@ -2238,7 +2325,9 @@ mod tests {
let replaced_hash = sha256_hex(&replaced_bytes); let replaced_hash = sha256_hex(&replaced_bytes);
let mut replaced_raw = let mut replaced_raw =
RawByHashEntry::from_bytes(replaced_hash.clone(), replaced_bytes.clone()); RawByHashEntry::from_bytes(replaced_hash.clone(), replaced_bytes.clone());
replaced_raw.origin_uris.push("rsync://example.test/repo/replaced.cer".to_string()); replaced_raw
.origin_uris
.push("rsync://example.test/repo/replaced.cer".to_string());
replaced_raw.object_type = Some("cer".to_string()); replaced_raw.object_type = Some("cer".to_string());
store store
.put_raw_by_hash_entry(&replaced_raw) .put_raw_by_hash_entry(&replaced_raw)
@ -2309,5 +2398,4 @@ mod tests {
.expect_err("missing raw_by_hash should error"); .expect_err("missing raw_by_hash should error");
assert!(matches!(err, StorageError::InvalidData { .. })); assert!(matches!(err, StorageError::InvalidData { .. }));
} }
} }

View File

@ -10,36 +10,37 @@ use crate::storage::{RawByHashEntry, RocksStore};
use crate::sync::rrdp::sync_from_notification_with_timing_and_download_log; use crate::sync::rrdp::sync_from_notification_with_timing_and_download_log;
use crate::sync::rrdp::{Fetcher as HttpFetcher, RrdpSyncError, load_rrdp_local_state}; use crate::sync::rrdp::{Fetcher as HttpFetcher, RrdpSyncError, load_rrdp_local_state};
use crate::sync::store_projection::{ use crate::sync::store_projection::{
build_repository_view_present_entry, build_repository_view_present_entry, build_repository_view_withdrawn_entry, compute_sha256_hex,
build_repository_view_withdrawn_entry, infer_object_type_from_uri,
compute_sha256_hex, infer_object_type_from_uri,
}; };
use std::collections::{BTreeMap, HashSet}; use std::collections::{BTreeMap, HashSet};
use std::thread;
use std::time::Duration;
#[cfg(test)] #[cfg(test)]
use crate::storage::RrdpSourceSyncState; use crate::storage::RrdpSourceSyncState;
#[cfg(test)] #[cfg(test)]
use crate::sync::rrdp::persist_rrdp_local_state; use crate::sync::rrdp::persist_rrdp_local_state;
const RRDP_RETRY_BACKOFFS_PROD: [Duration; 3] = [
Duration::from_millis(200),
Duration::from_millis(500),
Duration::from_secs(1),
];
const RRDP_RETRY_BACKOFFS_TEST: [Duration; 2] =
[Duration::from_millis(0), Duration::from_millis(0)];
#[derive(Clone, Copy, Debug, PartialEq, Eq)] #[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum RepoSyncSource { pub enum RepoSyncSource {
Rrdp, Rrdp,
Rsync, Rsync,
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum RepoSyncPhase {
RrdpOk,
RrdpFailedRsyncOk,
RsyncOnlyOk,
ReplayRrdpOk,
ReplayRsyncOk,
ReplayNoopRrdp,
ReplayNoopRsync,
}
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub struct RepoSyncResult { pub struct RepoSyncResult {
pub source: RepoSyncSource, pub source: RepoSyncSource,
pub phase: RepoSyncPhase,
pub objects_written: usize, pub objects_written: usize,
pub warnings: Vec<Warning>, pub warnings: Vec<Warning>,
} }
@ -99,6 +100,7 @@ pub fn sync_publication_point(
); );
Ok(RepoSyncResult { Ok(RepoSyncResult {
source: RepoSyncSource::Rrdp, source: RepoSyncSource::Rrdp,
phase: RepoSyncPhase::RrdpOk,
objects_written: written, objects_written: written,
warnings: Vec::new(), warnings: Vec::new(),
}) })
@ -115,6 +117,14 @@ pub fn sync_publication_point(
"rrdp_error": err.to_string(), "rrdp_error": err.to_string(),
}), }),
); );
crate::progress_log::emit(
"rrdp_failed_fallback_rsync",
serde_json::json!({
"notify_uri": notification_uri,
"rsync_base_uri": rsync_base_uri,
"rrdp_error": err.to_string(),
}),
);
let warnings = vec![ let warnings = vec![
Warning::new(format!("RRDP failed; falling back to rsync: {err}")) Warning::new(format!("RRDP failed; falling back to rsync: {err}"))
.with_rfc_refs(&[RfcRef("RFC 8182 §3.4.5")]) .with_rfc_refs(&[RfcRef("RFC 8182 §3.4.5")])
@ -127,12 +137,16 @@ pub fn sync_publication_point(
timing, timing,
download_log, download_log,
)?; )?;
if let Some(t) = timing.as_ref() {
t.record_count("repo_sync_rsync_peer_aligned_profile_total", 1);
}
if let Some(t) = timing.as_ref() { if let Some(t) = timing.as_ref() {
t.record_count("repo_sync_rsync_fallback_ok_total", 1); t.record_count("repo_sync_rsync_fallback_ok_total", 1);
t.record_count("repo_sync_rsync_objects_written_total", written as u64); t.record_count("repo_sync_rsync_objects_written_total", written as u64);
} }
Ok(RepoSyncResult { Ok(RepoSyncResult {
source: RepoSyncSource::Rsync, source: RepoSyncSource::Rsync,
phase: RepoSyncPhase::RrdpFailedRsyncOk,
objects_written: written, objects_written: written,
warnings, warnings,
}) })
@ -147,6 +161,9 @@ pub fn sync_publication_point(
timing, timing,
download_log, download_log,
)?; )?;
if let Some(t) = timing.as_ref() {
t.record_count("repo_sync_rsync_peer_aligned_profile_total", 1);
}
crate::progress_log::emit( crate::progress_log::emit(
"repo_sync_rsync_direct", "repo_sync_rsync_direct",
serde_json::json!({ serde_json::json!({
@ -160,6 +177,7 @@ pub fn sync_publication_point(
} }
Ok(RepoSyncResult { Ok(RepoSyncResult {
source: RepoSyncSource::Rsync, source: RepoSyncSource::Rsync,
phase: RepoSyncPhase::RsyncOnlyOk,
objects_written: written, objects_written: written,
warnings: Vec::new(), warnings: Vec::new(),
}) })
@ -192,6 +210,7 @@ pub fn sync_publication_point_replay(
} }
Ok(RepoSyncResult { Ok(RepoSyncResult {
source: RepoSyncSource::Rrdp, source: RepoSyncSource::Rrdp,
phase: RepoSyncPhase::ReplayRrdpOk,
objects_written: written, objects_written: written,
warnings: Vec::new(), warnings: Vec::new(),
}) })
@ -210,6 +229,7 @@ pub fn sync_publication_point_replay(
} }
Ok(RepoSyncResult { Ok(RepoSyncResult {
source: RepoSyncSource::Rsync, source: RepoSyncSource::Rsync,
phase: RepoSyncPhase::ReplayRsyncOk,
objects_written: written, objects_written: written,
warnings: Vec::new(), warnings: Vec::new(),
}) })
@ -243,6 +263,7 @@ pub fn sync_publication_point_replay_delta(
} }
Ok(RepoSyncResult { Ok(RepoSyncResult {
source: RepoSyncSource::Rrdp, source: RepoSyncSource::Rrdp,
phase: RepoSyncPhase::ReplayRrdpOk,
objects_written: written, objects_written: written,
warnings: Vec::new(), warnings: Vec::new(),
}) })
@ -261,12 +282,17 @@ pub fn sync_publication_point_replay_delta(
} }
Ok(RepoSyncResult { Ok(RepoSyncResult {
source: RepoSyncSource::Rsync, source: RepoSyncSource::Rsync,
phase: RepoSyncPhase::ReplayRsyncOk,
objects_written: written, objects_written: written,
warnings: Vec::new(), warnings: Vec::new(),
}) })
} }
ReplayDeltaResolvedTransport::Noop(source) => Ok(RepoSyncResult { ReplayDeltaResolvedTransport::Noop(source) => Ok(RepoSyncResult {
source, source,
phase: match source {
RepoSyncSource::Rrdp => RepoSyncPhase::ReplayNoopRrdp,
RepoSyncSource::Rsync => RepoSyncPhase::ReplayNoopRsync,
},
objects_written: 0, objects_written: 0,
warnings: Vec::new(), warnings: Vec::new(),
}), }),
@ -448,14 +474,6 @@ fn is_retryable_http_fetch_error(msg: &str) -> bool {
code == 408 || code == 429 || (500..600).contains(&code) code == 408 || code == 429 || (500..600).contains(&code)
} }
fn rrdp_retry_backoffs() -> &'static [Duration] {
if cfg!(test) {
&RRDP_RETRY_BACKOFFS_TEST
} else {
&RRDP_RETRY_BACKOFFS_PROD
}
}
fn try_rrdp_sync_with_retry( fn try_rrdp_sync_with_retry(
store: &RocksStore, store: &RocksStore,
notification_uri: &str, notification_uri: &str,
@ -463,12 +481,7 @@ fn try_rrdp_sync_with_retry(
timing: Option<&TimingHandle>, timing: Option<&TimingHandle>,
download_log: Option<&DownloadLogHandle>, download_log: Option<&DownloadLogHandle>,
) -> Result<usize, RrdpSyncError> { ) -> Result<usize, RrdpSyncError> {
let backoffs = rrdp_retry_backoffs(); let attempt = 1usize;
let max_attempts = backoffs.len().saturating_add(1).max(1);
let mut attempt: usize = 0;
loop {
attempt += 1;
crate::progress_log::emit( crate::progress_log::emit(
"rrdp_sync_attempt", "rrdp_sync_attempt",
serde_json::json!({ serde_json::json!({
@ -490,41 +503,13 @@ fn try_rrdp_sync_with_retry(
"objects_written": written, "objects_written": written,
}), }),
); );
if attempt > 1 { Ok(written)
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_retry_success_total", 1);
}
}
return Ok(written);
} }
Err(err) => { Err(err) => {
let retryable = match &err { let retryable = match &err {
RrdpSyncError::Fetch(msg) => is_retryable_http_fetch_error(msg), RrdpSyncError::Fetch(msg) => is_retryable_http_fetch_error(msg),
_ => false, _ => false,
}; };
if retryable && attempt < max_attempts {
crate::progress_log::emit(
"rrdp_sync_retry",
serde_json::json!({
"notify_uri": notification_uri,
"attempt": attempt,
"error": err.to_string(),
}),
);
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_retry_sleep_total", 1);
}
let backoff = backoffs
.get(attempt.saturating_sub(1))
.copied()
.unwrap_or_else(|| Duration::from_secs(0));
if !backoff.is_zero() {
thread::sleep(backoff);
}
continue;
}
crate::progress_log::emit( crate::progress_log::emit(
"rrdp_sync_failed", "rrdp_sync_failed",
serde_json::json!({ serde_json::json!({
@ -540,12 +525,8 @@ fn try_rrdp_sync_with_retry(
RrdpSyncError::Rrdp(_) => t.record_count("rrdp_failed_protocol_total", 1), RrdpSyncError::Rrdp(_) => t.record_count("rrdp_failed_protocol_total", 1),
RrdpSyncError::Storage(_) => t.record_count("rrdp_failed_storage_total", 1), RrdpSyncError::Storage(_) => t.record_count("rrdp_failed_storage_total", 1),
} }
if retryable && attempt >= max_attempts && attempt > 1 {
t.record_count("rrdp_retry_exhausted_total", 1);
}
}
return Err(err);
} }
Err(err)
} }
} }
} }
@ -575,7 +556,8 @@ fn rsync_sync_into_current_store(
let mut new_set: HashSet<String> = HashSet::new(); let mut new_set: HashSet<String> = HashSet::new();
let mut uri_to_hash: BTreeMap<String, String> = BTreeMap::new(); let mut uri_to_hash: BTreeMap<String, String> = BTreeMap::new();
let mut pending_raw: BTreeMap<String, RawByHashEntry> = BTreeMap::new(); let mut pending_raw: BTreeMap<String, RawByHashEntry> = BTreeMap::new();
let (object_count, bytes_total) = match rsync_fetcher.visit_objects(rsync_base_uri, &mut |uri, bytes| { let (object_count, bytes_total) =
match rsync_fetcher.visit_objects(rsync_base_uri, &mut |uri, bytes| {
let sha256_hex = compute_sha256_hex(&bytes); let sha256_hex = compute_sha256_hex(&bytes);
new_set.insert(uri.clone()); new_set.insert(uri.clone());
uri_to_hash.insert(uri.clone(), sha256_hex.clone()); uri_to_hash.insert(uri.clone(), sha256_hex.clone());
@ -639,9 +621,9 @@ fn rsync_sync_into_current_store(
.map_err(|e| RepoSyncError::Storage(e.to_string()))?; .map_err(|e| RepoSyncError::Storage(e.to_string()))?;
let mut entries_to_write = Vec::new(); let mut entries_to_write = Vec::new();
for (hash, existing_opt) in hashes.into_iter().zip(existing_entries.into_iter()) { for (hash, existing_opt) in hashes.into_iter().zip(existing_entries.into_iter()) {
let mut pending_entry = pending_raw let mut pending_entry = pending_raw.remove(&hash).ok_or_else(|| {
.remove(&hash) RepoSyncError::Storage(format!("missing pending raw entry for {hash}"))
.ok_or_else(|| RepoSyncError::Storage(format!("missing pending raw entry for {hash}")))?; })?;
match existing_opt { match existing_opt {
Some(mut existing) => { Some(mut existing) => {
if existing.bytes != pending_entry.bytes { if existing.bytes != pending_entry.bytes {
@ -651,7 +633,11 @@ fn rsync_sync_into_current_store(
} }
let mut changed = false; let mut changed = false;
for uri in pending_entry.origin_uris.drain(..) { for uri in pending_entry.origin_uris.drain(..) {
if !existing.origin_uris.iter().any(|existing_uri| existing_uri == &uri) { if !existing
.origin_uris
.iter()
.any(|existing_uri| existing_uri == &uri)
{
existing.origin_uris.push(uri); existing.origin_uris.push(uri);
changed = true; changed = true;
} }
@ -679,10 +665,9 @@ fn rsync_sync_into_current_store(
} }
for uri in &new_set { for uri in &new_set {
let current_hash = uri_to_hash let current_hash = uri_to_hash.get(uri).cloned().ok_or_else(|| {
.get(uri) RepoSyncError::Storage(format!("missing raw_by_hash mapping for {uri}"))
.cloned() })?;
.ok_or_else(|| RepoSyncError::Storage(format!("missing raw_by_hash mapping for {uri}")))?;
repository_view_entries.push(build_repository_view_present_entry( repository_view_entries.push(build_repository_view_present_entry(
&sync_scope_uri, &sync_scope_uri,
uri, uri,
@ -736,9 +721,9 @@ mod tests {
use crate::replay::fetch_http::PayloadReplayHttpFetcher; use crate::replay::fetch_http::PayloadReplayHttpFetcher;
use crate::replay::fetch_rsync::PayloadReplayRsyncFetcher; use crate::replay::fetch_rsync::PayloadReplayRsyncFetcher;
use crate::storage::RepositoryViewState; use crate::storage::RepositoryViewState;
use crate::sync::store_projection::build_repository_view_present_entry;
use crate::sync::rrdp::Fetcher as HttpFetcher; use crate::sync::rrdp::Fetcher as HttpFetcher;
use crate::sync::rrdp::RrdpState; use crate::sync::rrdp::RrdpState;
use crate::sync::store_projection::build_repository_view_present_entry;
use base64::Engine; use base64::Engine;
use sha2::Digest; use sha2::Digest;
use std::collections::HashMap; use std::collections::HashMap;
@ -1322,7 +1307,7 @@ mod tests {
} }
#[test] #[test]
fn rrdp_retry_succeeds_without_rsync_when_notification_fetch_is_transient() { fn rrdp_fetch_error_falls_back_to_rsync_without_retry() {
let temp = tempfile::tempdir().expect("tempdir"); let temp = tempfile::tempdir().expect("tempdir");
let store_dir = temp.path().join("db"); let store_dir = temp.path().join("db");
let store = RocksStore::open(&store_dir).expect("open rocksdb"); let store = RocksStore::open(&store_dir).expect("open rocksdb");
@ -1335,50 +1320,33 @@ mod tests {
}); });
let notification_uri = "https://example.test/notification.xml"; let notification_uri = "https://example.test/notification.xml";
let snapshot_uri = "https://example.test/snapshot.xml";
let published_uri = "rsync://example.test/repo/a.mft"; let published_uri = "rsync://example.test/repo/a.mft";
let published_bytes = b"x"; let published_bytes = b"x";
struct AlwaysFailHttp {
let snapshot = snapshot_xml(
"9df4b597-af9e-4dca-bdda-719cce2c4e28",
1,
&[(published_uri, published_bytes)],
);
let snapshot_hash = hex::encode(sha2::Sha256::digest(&snapshot));
let notif = notification_xml(
"9df4b597-af9e-4dca-bdda-719cce2c4e28",
1,
snapshot_uri,
&snapshot_hash,
);
let mut map = HashMap::new();
map.insert(notification_uri.to_string(), notif);
map.insert(snapshot_uri.to_string(), snapshot);
struct RetryThenMap {
inner: MapFetcher,
notification_uri: String,
fail_times: usize,
notification_calls: AtomicUsize, notification_calls: AtomicUsize,
} }
impl HttpFetcher for RetryThenMap { impl HttpFetcher for AlwaysFailHttp {
fn fetch(&self, uri: &str) -> Result<Vec<u8>, String> { fn fetch(&self, _uri: &str) -> Result<Vec<u8>, String> {
if uri == self.notification_uri { self.notification_calls.fetch_add(1, Ordering::SeqCst);
let n = self.notification_calls.fetch_add(1, Ordering::SeqCst); Err("http request failed: simulated transient".to_string())
if n < self.fail_times {
return Err("http request failed: simulated transient".to_string());
}
}
self.inner.fetch(uri)
} }
} }
let http = RetryThenMap { struct SingleObjectRsync {
inner: MapFetcher { map }, uri: String,
notification_uri: notification_uri.to_string(), bytes: Vec<u8>,
fail_times: 2, }
impl RsyncFetcher for SingleObjectRsync {
fn fetch_objects(
&self,
_rsync_base_uri: &str,
) -> Result<Vec<(String, Vec<u8>)>, RsyncFetchError> {
Ok(vec![(self.uri.clone(), self.bytes.clone())])
}
}
let http = AlwaysFailHttp {
notification_calls: AtomicUsize::new(0), notification_calls: AtomicUsize::new(0),
}; };
@ -1394,29 +1362,26 @@ mod tests {
Some(notification_uri), Some(notification_uri),
"rsync://example.test/repo/", "rsync://example.test/repo/",
&http, &http,
&PanicRsyncFetcher, &SingleObjectRsync {
uri: published_uri.to_string(),
bytes: published_bytes.to_vec(),
},
Some(&timing), Some(&timing),
Some(&download_log), Some(&download_log),
) )
.expect("sync ok"); .expect("sync ok");
assert_eq!(out.source, RepoSyncSource::Rrdp); assert_eq!(out.source, RepoSyncSource::Rsync);
assert_current_object(&store, published_uri, published_bytes); assert_current_object(&store, published_uri, published_bytes);
assert_eq!(http.notification_calls.load(Ordering::SeqCst), 1);
let events = download_log.snapshot_events(); let events = download_log.snapshot_events();
assert_eq!(events.len(), 4, "expected 3x notification + 1x snapshot"); assert_eq!(events.len(), 2, "expected 1x notification + 1x rsync");
assert_eq!( assert_eq!(
events events
.iter() .iter()
.filter(|e| e.kind == AuditDownloadKind::RrdpNotification) .filter(|e| e.kind == AuditDownloadKind::RrdpNotification)
.count(), .count(),
3
);
assert_eq!(
events
.iter()
.filter(|e| e.kind == AuditDownloadKind::RrdpSnapshot)
.count(),
1 1
); );
assert_eq!( assert_eq!(
@ -1424,7 +1389,14 @@ mod tests {
.iter() .iter()
.filter(|e| e.kind == AuditDownloadKind::RrdpNotification && !e.success) .filter(|e| e.kind == AuditDownloadKind::RrdpNotification && !e.success)
.count(), .count(),
2 1
);
assert_eq!(
events
.iter()
.filter(|e| e.kind == AuditDownloadKind::Rsync)
.count(),
1
); );
let v = timing_to_json(temp.path(), &timing); let v = timing_to_json(temp.path(), &timing);
@ -1433,17 +1405,17 @@ mod tests {
counts counts
.get("rrdp_retry_attempt_total") .get("rrdp_retry_attempt_total")
.and_then(|v| v.as_u64()), .and_then(|v| v.as_u64()),
Some(3) Some(1)
); );
assert_eq!( assert_eq!(
counts counts
.get("rrdp_retry_success_total") .get("repo_sync_rrdp_failed_total")
.and_then(|v| v.as_u64()), .and_then(|v| v.as_u64()),
Some(1) Some(1)
); );
assert_eq!( assert_eq!(
counts counts
.get("repo_sync_rrdp_ok_total") .get("repo_sync_rsync_fallback_ok_total")
.and_then(|v| v.as_u64()), .and_then(|v| v.as_u64()),
Some(1) Some(1)
); );
@ -1749,7 +1721,11 @@ mod tests {
assert_eq!(out.source, RepoSyncSource::Rsync); assert_eq!(out.source, RepoSyncSource::Rsync);
assert_eq!(out.objects_written, 1); assert_eq!(out.objects_written, 1);
assert_eq!(out.warnings.len(), 0); assert_eq!(out.warnings.len(), 0);
assert_current_object(&store, "rsync://rsync.example.test/repo/sub/fallback.cer", b"cer"); assert_current_object(
&store,
"rsync://rsync.example.test/repo/sub/fallback.cer",
b"cer",
);
} }
#[test] #[test]
@ -1995,7 +1971,11 @@ mod tests {
assert_eq!(out.source, RepoSyncSource::Rsync); assert_eq!(out.source, RepoSyncSource::Rsync);
assert_eq!(out.objects_written, 2); assert_eq!(out.objects_written, 2);
assert_current_object(&store, "rsync://rsync.example.test/repo/a.mft", b"base"); assert_current_object(&store, "rsync://rsync.example.test/repo/a.mft", b"base");
assert_current_object(&store, "rsync://rsync.example.test/repo/sub/x.cer", b"overlay-cer"); assert_current_object(
&store,
"rsync://rsync.example.test/repo/sub/x.cer",
b"overlay-cer",
);
} }
#[test] #[test]

View File

@ -5,12 +5,10 @@ use crate::storage::{RocksStore, RrdpDeltaOp, RrdpSourceSyncState};
use crate::sync::store_projection::{ use crate::sync::store_projection::{
build_repository_view_present_entry, build_repository_view_withdrawn_entry, build_repository_view_present_entry, build_repository_view_withdrawn_entry,
build_rrdp_source_member_present_record, build_rrdp_source_member_withdrawn_record, build_rrdp_source_member_present_record, build_rrdp_source_member_withdrawn_record,
build_rrdp_uri_owner_active_record, build_rrdp_uri_owner_withdrawn_record, build_rrdp_uri_owner_active_record, build_rrdp_uri_owner_withdrawn_record, compute_sha256_hex,
compute_sha256_hex, current_rrdp_owner_is, ensure_rrdp_uri_can_be_owned_by, current_rrdp_owner_is, ensure_rrdp_uri_can_be_owned_by, prepare_raw_by_hash_evidence_batch,
prepare_raw_by_hash_evidence_batch, put_repository_view_present, put_repository_view_withdrawn, put_rrdp_source_member_present,
put_repository_view_present, put_repository_view_withdrawn, put_rrdp_source_member_withdrawn, put_rrdp_uri_owner_active, put_rrdp_uri_owner_withdrawn,
put_rrdp_source_member_present, put_rrdp_source_member_withdrawn,
put_rrdp_uri_owner_active, put_rrdp_uri_owner_withdrawn,
update_rrdp_source_record_on_success, upsert_raw_by_hash_evidence, update_rrdp_source_record_on_success, upsert_raw_by_hash_evidence,
}; };
use base64::Engine; use base64::Engine;
@ -1228,18 +1226,14 @@ fn apply_snapshot(
} }
let session_id = expected_session_id.to_string(); let session_id = expected_session_id.to_string();
let prepared_raw = prepare_raw_by_hash_evidence_batch(store, &published) let prepared_raw =
.map_err(RrdpSyncError::Storage)?; prepare_raw_by_hash_evidence_batch(store, &published).map_err(RrdpSyncError::Storage)?;
let mut repository_view_entries = Vec::with_capacity(published.len() + withdrawn.len()); let mut repository_view_entries = Vec::with_capacity(published.len() + withdrawn.len());
let mut member_records = Vec::with_capacity(published.len() + withdrawn.len()); let mut member_records = Vec::with_capacity(published.len() + withdrawn.len());
let mut owner_records = Vec::with_capacity(published.len() + withdrawn.len()); let mut owner_records = Vec::with_capacity(published.len() + withdrawn.len());
for (uri, _bytes) in &published { for (uri, _bytes) in &published {
let current_hash = prepared_raw let current_hash = prepared_raw.uri_to_hash.get(uri).cloned().ok_or_else(|| {
.uri_to_hash
.get(uri)
.cloned()
.ok_or_else(|| {
RrdpSyncError::Storage(format!("missing raw_by_hash mapping for {uri}")) RrdpSyncError::Storage(format!("missing raw_by_hash mapping for {uri}"))
})?; })?;
repository_view_entries.push(build_repository_view_present_entry( repository_view_entries.push(build_repository_view_present_entry(

View File

@ -56,7 +56,11 @@ pub fn prepare_raw_by_hash_evidence_batch(
} }
let mut changed = false; let mut changed = false;
for uri in pending_entry.origin_uris.drain(..) { for uri in pending_entry.origin_uris.drain(..) {
if !existing.origin_uris.iter().any(|existing_uri| existing_uri == &uri) { if !existing
.origin_uris
.iter()
.any(|existing_uri| existing_uri == &uri)
{
existing.origin_uris.push(uri); existing.origin_uris.push(uri);
changed = true; changed = true;
} }
@ -195,10 +199,8 @@ pub fn upsert_raw_by_hash_evidence(
rsync_uri: &str, rsync_uri: &str,
bytes: &[u8], bytes: &[u8],
) -> Result<String, String> { ) -> Result<String, String> {
let prepared = prepare_raw_by_hash_evidence_batch( let prepared =
store, prepare_raw_by_hash_evidence_batch(store, &[(rsync_uri.to_string(), bytes.to_vec())])?;
&[(rsync_uri.to_string(), bytes.to_vec())],
)?;
store store
.put_raw_by_hash_entries_batch_unchecked(&prepared.entries_to_write) .put_raw_by_hash_entries_batch_unchecked(&prepared.entries_to_write)
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
@ -277,7 +279,13 @@ pub fn put_rrdp_source_member_present(
rsync_uri: &str, rsync_uri: &str,
current_hash: &str, current_hash: &str,
) -> Result<(), String> { ) -> Result<(), String> {
let record = build_rrdp_source_member_present_record(notification_uri, session_id, serial, rsync_uri, current_hash); let record = build_rrdp_source_member_present_record(
notification_uri,
session_id,
serial,
rsync_uri,
current_hash,
);
store store
.put_rrdp_source_member_record(&record) .put_rrdp_source_member_record(&record)
.map_err(|e| e.to_string()) .map_err(|e| e.to_string())
@ -291,7 +299,13 @@ pub fn put_rrdp_source_member_withdrawn(
rsync_uri: &str, rsync_uri: &str,
current_hash: Option<String>, current_hash: Option<String>,
) -> Result<(), String> { ) -> Result<(), String> {
let record = build_rrdp_source_member_withdrawn_record(notification_uri, session_id, serial, rsync_uri, current_hash); let record = build_rrdp_source_member_withdrawn_record(
notification_uri,
session_id,
serial,
rsync_uri,
current_hash,
);
store store
.put_rrdp_source_member_record(&record) .put_rrdp_source_member_record(&record)
.map_err(|e| e.to_string()) .map_err(|e| e.to_string())
@ -305,7 +319,13 @@ pub fn put_rrdp_uri_owner_active(
rsync_uri: &str, rsync_uri: &str,
current_hash: &str, current_hash: &str,
) -> Result<(), String> { ) -> Result<(), String> {
let record = build_rrdp_uri_owner_active_record(notification_uri, session_id, serial, rsync_uri, current_hash); let record = build_rrdp_uri_owner_active_record(
notification_uri,
session_id,
serial,
rsync_uri,
current_hash,
);
store store
.put_rrdp_uri_owner_record(&record) .put_rrdp_uri_owner_record(&record)
.map_err(|e| e.to_string()) .map_err(|e| e.to_string())
@ -319,7 +339,13 @@ pub fn put_rrdp_uri_owner_withdrawn(
rsync_uri: &str, rsync_uri: &str,
current_hash: Option<String>, current_hash: Option<String>,
) -> Result<(), String> { ) -> Result<(), String> {
let record = build_rrdp_uri_owner_withdrawn_record(notification_uri, session_id, serial, rsync_uri, current_hash); let record = build_rrdp_uri_owner_withdrawn_record(
notification_uri,
session_id,
serial,
rsync_uri,
current_hash,
);
store store
.put_rrdp_uri_owner_record(&record) .put_rrdp_uri_owner_record(&record)
.map_err(|e| e.to_string()) .map_err(|e| e.to_string())

View File

@ -29,7 +29,8 @@ pub struct ValidatedSubordinateCaLite {
#[derive(Clone, Debug, Default)] #[derive(Clone, Debug, Default)]
pub struct IssuerEffectiveResourcesIndex { pub struct IssuerEffectiveResourcesIndex {
parent_ip_by_afi_items: Option<BTreeMap<crate::data_model::rc::Afi, Vec<crate::data_model::rc::IpAddressOrRange>>>, parent_ip_by_afi_items:
Option<BTreeMap<crate::data_model::rc::Afi, Vec<crate::data_model::rc::IpAddressOrRange>>>,
parent_ip_merged_intervals: HashMap<crate::data_model::rc::Afi, Vec<(Vec<u8>, Vec<u8>)>>, parent_ip_merged_intervals: HashMap<crate::data_model::rc::Afi, Vec<(Vec<u8>, Vec<u8>)>>,
parent_asnum_intervals: Option<Vec<(u32, u32)>>, parent_asnum_intervals: Option<Vec<(u32, u32)>>,
parent_rdi_intervals: Option<Vec<(u32, u32)>>, parent_rdi_intervals: Option<Vec<(u32, u32)>>,
@ -40,21 +41,18 @@ impl IssuerEffectiveResourcesIndex {
issuer_effective_ip: Option<&IpResourceSet>, issuer_effective_ip: Option<&IpResourceSet>,
issuer_effective_as: Option<&AsResourceSet>, issuer_effective_as: Option<&AsResourceSet>,
) -> Result<Self, CaPathError> { ) -> Result<Self, CaPathError> {
let parent_ip_by_afi_items = issuer_effective_ip.map(ip_resources_by_afi_items).transpose()?; let parent_ip_by_afi_items = issuer_effective_ip
.map(ip_resources_by_afi_items)
.transpose()?;
let parent_ip_merged_intervals = issuer_effective_ip let parent_ip_merged_intervals = issuer_effective_ip
.map(ip_resources_to_merged_intervals_by_afi) .map(ip_resources_to_merged_intervals_by_afi)
.unwrap_or_default(); .unwrap_or_default();
let parent_asnum_intervals = issuer_effective_as.and_then(|resources| { let parent_asnum_intervals = issuer_effective_as
resources .and_then(|resources| resources.asnum.as_ref().map(as_choice_to_merged_intervals));
.asnum let parent_rdi_intervals = issuer_effective_as
.as_ref() .and_then(|resources| resources.rdi.as_ref().map(as_choice_to_merged_intervals));
.map(as_choice_to_merged_intervals)
});
let parent_rdi_intervals = issuer_effective_as.and_then(|resources| {
resources.rdi.as_ref().map(as_choice_to_merged_intervals)
});
Ok(Self { Ok(Self {
parent_ip_by_afi_items, parent_ip_by_afi_items,
@ -512,7 +510,9 @@ fn resolve_child_ip_resources(
child_ip: Option<&IpResourceSet>, child_ip: Option<&IpResourceSet>,
issuer_effective: Option<&IpResourceSet>, issuer_effective: Option<&IpResourceSet>,
) -> Result<Option<IpResourceSet>, CaPathError> { ) -> Result<Option<IpResourceSet>, CaPathError> {
let precomputed_parent_by_afi = issuer_effective.map(ip_resources_by_afi_items).transpose()?; let precomputed_parent_by_afi = issuer_effective
.map(ip_resources_by_afi_items)
.transpose()?;
let precomputed_parent_intervals = issuer_effective let precomputed_parent_intervals = issuer_effective
.map(ip_resources_to_merged_intervals_by_afi) .map(ip_resources_to_merged_intervals_by_afi)
.unwrap_or_default(); .unwrap_or_default();
@ -527,7 +527,9 @@ fn resolve_child_ip_resources(
fn resolve_child_ip_resources_indexed( fn resolve_child_ip_resources_indexed(
child_ip: Option<&IpResourceSet>, child_ip: Option<&IpResourceSet>,
issuer_effective: Option<&IpResourceSet>, issuer_effective: Option<&IpResourceSet>,
parent_by_afi: Option<&BTreeMap<crate::data_model::rc::Afi, Vec<crate::data_model::rc::IpAddressOrRange>>>, parent_by_afi: Option<
&BTreeMap<crate::data_model::rc::Afi, Vec<crate::data_model::rc::IpAddressOrRange>>,
>,
parent_intervals_by_afi: &HashMap<crate::data_model::rc::Afi, Vec<(Vec<u8>, Vec<u8>)>>, parent_intervals_by_afi: &HashMap<crate::data_model::rc::Afi, Vec<(Vec<u8>, Vec<u8>)>>,
) -> Result<Option<IpResourceSet>, CaPathError> { ) -> Result<Option<IpResourceSet>, CaPathError> {
let Some(child_ip) = child_ip else { let Some(child_ip) = child_ip else {
@ -583,15 +585,10 @@ fn resolve_child_as_resources(
child_as: Option<&AsResourceSet>, child_as: Option<&AsResourceSet>,
issuer_effective: Option<&AsResourceSet>, issuer_effective: Option<&AsResourceSet>,
) -> Result<Option<AsResourceSet>, CaPathError> { ) -> Result<Option<AsResourceSet>, CaPathError> {
let precomputed_asnum = issuer_effective.and_then(|resources| { let precomputed_asnum = issuer_effective
resources .and_then(|resources| resources.asnum.as_ref().map(as_choice_to_merged_intervals));
.asnum let precomputed_rdi = issuer_effective
.as_ref() .and_then(|resources| resources.rdi.as_ref().map(as_choice_to_merged_intervals));
.map(as_choice_to_merged_intervals)
});
let precomputed_rdi = issuer_effective.and_then(|resources| {
resources.rdi.as_ref().map(as_choice_to_merged_intervals)
});
resolve_child_as_resources_indexed( resolve_child_as_resources_indexed(
child_as, child_as,
issuer_effective, issuer_effective,
@ -974,8 +971,8 @@ mod tests {
use super::*; use super::*;
use crate::data_model::common::X509NameDer; use crate::data_model::common::X509NameDer;
use crate::data_model::rc::{ use crate::data_model::rc::{
Afi, AsIdentifierChoice, AsResourceSet, IpAddressChoice, IpAddressFamily, Afi, AsIdentifierChoice, AsResourceSet, IpAddressChoice, IpAddressFamily, IpAddressOrRange,
IpAddressOrRange, IpResourceSet, IpResourceSet,
}; };
use crate::data_model::rc::{ use crate::data_model::rc::{
RcExtensions, ResourceCertKind, ResourceCertificate, RpkixTbsCertificate, RcExtensions, ResourceCertKind, ResourceCertificate, RpkixTbsCertificate,
@ -1470,17 +1467,30 @@ mod tests {
}], }],
}; };
let parent_as = AsResourceSet { let parent_as = AsResourceSet {
asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Range { asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![
AsIdOrRange::Range {
min: 64500, min: 64500,
max: 64599, max: 64599,
}])), },
rdi: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(65000)])), ])),
rdi: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(
65000,
)])),
}; };
let idx = IssuerEffectiveResourcesIndex::from_effective_resources(Some(&parent_ip), Some(&parent_as)) let idx = IssuerEffectiveResourcesIndex::from_effective_resources(
Some(&parent_ip),
Some(&parent_as),
)
.expect("index builds"); .expect("index builds");
assert_eq!(idx.parent_ip_by_afi_items.as_ref().map(|v| v.len()), Some(1)); assert_eq!(
idx.parent_ip_by_afi_items.as_ref().map(|v| v.len()),
Some(1)
);
assert_eq!(idx.parent_ip_merged_intervals.len(), 1); assert_eq!(idx.parent_ip_merged_intervals.len(), 1);
assert_eq!(idx.parent_asnum_intervals.as_ref().map(|v| v.len()), Some(1)); assert_eq!(
idx.parent_asnum_intervals.as_ref().map(|v| v.len()),
Some(1)
);
assert_eq!(idx.parent_rdi_intervals.as_ref().map(|v| v.len()), Some(1)); assert_eq!(idx.parent_rdi_intervals.as_ref().map(|v| v.len()), Some(1));
let child_ip_subset = IpResourceSet { let child_ip_subset = IpResourceSet {
@ -1495,14 +1505,16 @@ mod tests {
)]), )]),
}], }],
}; };
assert!(resolve_child_ip_resources_indexed( assert!(
resolve_child_ip_resources_indexed(
Some(&child_ip_subset), Some(&child_ip_subset),
Some(&parent_ip), Some(&parent_ip),
idx.parent_ip_by_afi_items.as_ref(), idx.parent_ip_by_afi_items.as_ref(),
&idx.parent_ip_merged_intervals, &idx.parent_ip_merged_intervals,
) )
.expect("subset should resolve") .expect("subset should resolve")
.is_some()); .is_some()
);
let child_ip_bad = IpResourceSet { let child_ip_bad = IpResourceSet {
families: vec![IpAddressFamily { families: vec![IpAddressFamily {
@ -1526,20 +1538,28 @@ mod tests {
assert!(matches!(err, CaPathError::ResourcesNotSubset)); assert!(matches!(err, CaPathError::ResourcesNotSubset));
let child_as_subset = AsResourceSet { let child_as_subset = AsResourceSet {
asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(64542)])), asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(
rdi: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(65000)])), 64542,
)])),
rdi: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(
65000,
)])),
}; };
assert!(resolve_child_as_resources_indexed( assert!(
resolve_child_as_resources_indexed(
Some(&child_as_subset), Some(&child_as_subset),
Some(&parent_as), Some(&parent_as),
idx.parent_asnum_intervals.as_deref(), idx.parent_asnum_intervals.as_deref(),
idx.parent_rdi_intervals.as_deref(), idx.parent_rdi_intervals.as_deref(),
) )
.expect("subset as resolves") .expect("subset as resolves")
.is_some()); .is_some()
);
let child_as_bad = AsResourceSet { let child_as_bad = AsResourceSet {
asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(65123)])), asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(
65123,
)])),
rdi: None, rdi: None,
}; };
let err = resolve_child_as_resources_indexed( let err = resolve_child_as_resources_indexed(

View File

@ -196,7 +196,9 @@ fn rsync_parent_uri(ta_rsync_uri: &str) -> Result<String, String> {
.ok_or_else(|| format!("missing path in rsync URI: {ta_rsync_uri}"))? .ok_or_else(|| format!("missing path in rsync URI: {ta_rsync_uri}"))?
.collect::<Vec<_>>(); .collect::<Vec<_>>();
if segments.is_empty() || segments.last().copied().unwrap_or_default().is_empty() { if segments.is_empty() || segments.last().copied().unwrap_or_default().is_empty() {
return Err(format!("rsync URI must reference a file object: {ta_rsync_uri}")); return Err(format!(
"rsync URI must reference a file object: {ta_rsync_uri}"
));
} }
let parent_segments = &segments[..segments.len() - 1]; let parent_segments = &segments[..segments.len() - 1];
let mut parent = format!("rsync://{host}/"); let mut parent = format!("rsync://{host}/");
@ -248,29 +250,32 @@ mod tests {
.clone(); .clone();
let td = tempfile::tempdir().unwrap(); let td = tempfile::tempdir().unwrap();
let mirror_root = td.path().join(rsync_uri.host_str().unwrap()).join("repository"); let mirror_root = td
.path()
.join(rsync_uri.host_str().unwrap())
.join("repository");
std::fs::create_dir_all(&mirror_root).unwrap(); std::fs::create_dir_all(&mirror_root).unwrap();
std::fs::write( std::fs::write(mirror_root.join("apnic-rpki-root-iana-origin.cer"), ta_der).unwrap();
mirror_root.join("apnic-rpki-root-iana-origin.cer"),
ta_der,
)
.unwrap();
let http = crate::fetch::http::BlockingHttpFetcher::new( let http = crate::fetch::http::BlockingHttpFetcher::new(
crate::fetch::http::HttpFetcherConfig::default(), crate::fetch::http::HttpFetcherConfig::default(),
) )
.unwrap(); .unwrap();
let rsync = LocalDirRsyncFetcher::new( let rsync = LocalDirRsyncFetcher::new(
td.path().join(rsync_uri.host_str().unwrap()).join("repository"), td.path()
.join(rsync_uri.host_str().unwrap())
.join("repository"),
); );
let discovery = discover_root_ca_instance_from_tal_with_fetchers(&http, &rsync, tal, None) let discovery = discover_root_ca_instance_from_tal_with_fetchers(&http, &rsync, tal, None)
.expect("discover via rsync TA"); .expect("discover via rsync TA");
assert!(discovery assert!(
discovery
.trust_anchor .trust_anchor
.resolved_ta_uri .resolved_ta_uri
.unwrap() .unwrap()
.as_str() .as_str()
.starts_with("rsync://")); .starts_with("rsync://")
);
} }
} }

View File

@ -1,3 +1,4 @@
use crate::blob_store::RawObjectStore;
use crate::data_model::manifest::{ManifestDecodeError, ManifestObject, ManifestValidateError}; use crate::data_model::manifest::{ManifestDecodeError, ManifestObject, ManifestValidateError};
use crate::data_model::signed_object::SignedObjectVerifyError; use crate::data_model::signed_object::SignedObjectVerifyError;
use crate::policy::{CaFailedFetchPolicy, Policy}; use crate::policy::{CaFailedFetchPolicy, Policy};
@ -448,8 +449,8 @@ pub fn load_current_instance_vcir_publication_point(
) )
})?; })?;
let manifest_entry = store let manifest_bytes = store
.get_raw_by_hash_entry(&manifest_artifact.sha256)? .get_blob_bytes(&manifest_artifact.sha256)?
.ok_or_else(|| ManifestReuseError::MissingManifestRaw(manifest_artifact.sha256.clone()))?; .ok_or_else(|| ManifestReuseError::MissingManifestRaw(manifest_artifact.sha256.clone()))?;
let mut seen = HashSet::new(); let mut seen = HashSet::new();
@ -468,12 +469,12 @@ pub fn load_current_instance_vcir_publication_point(
if !seen.insert(uri.clone()) { if !seen.insert(uri.clone()) {
continue; continue;
} }
let entry = store let entry_bytes = store
.get_raw_by_hash_entry(&artifact.sha256)? .get_blob_bytes(&artifact.sha256)?
.ok_or_else(|| ManifestReuseError::MissingArtifactRaw { .ok_or_else(|| ManifestReuseError::MissingArtifactRaw {
rsync_uri: uri.clone(), rsync_uri: uri.clone(),
})?; })?;
files.push(PackFile::from_bytes_compute_sha256(uri, entry.bytes)); files.push(PackFile::from_bytes_compute_sha256(uri, entry_bytes));
} }
Ok(PublicationPointSnapshot { Ok(PublicationPointSnapshot {
@ -493,7 +494,7 @@ pub fn load_current_instance_vcir_publication_point(
.validated_manifest_next_update .validated_manifest_next_update
.clone(), .clone(),
verified_at: vcir.last_successful_validation_time.clone(), verified_at: vcir.last_successful_validation_time.clone(),
manifest_bytes: manifest_entry.bytes, manifest_bytes,
files, files,
}) })
} }
@ -836,12 +837,7 @@ mod tests {
entry entry
} }
fn put_current_object( fn put_current_object(store: &RocksStore, rsync_uri: &str, bytes: Vec<u8>, object_type: &str) {
store: &RocksStore,
rsync_uri: &str,
bytes: Vec<u8>,
object_type: &str,
) {
let hash = hex::encode(sha2::Sha256::digest(&bytes)); let hash = hex::encode(sha2::Sha256::digest(&bytes));
store store
.put_raw_by_hash_entry(&raw_by_hash_entry(rsync_uri, bytes, object_type)) .put_raw_by_hash_entry(&raw_by_hash_entry(rsync_uri, bytes, object_type))

View File

@ -11,9 +11,7 @@ use crate::data_model::signed_object::SignedObjectVerifyError;
use crate::policy::{Policy, SignedObjectFailurePolicy}; use crate::policy::{Policy, SignedObjectFailurePolicy};
use crate::report::{RfcRef, Warning}; use crate::report::{RfcRef, Warning};
use crate::storage::{PackFile, PackTime, VcirLocalOutput, VcirOutputType}; use crate::storage::{PackFile, PackTime, VcirLocalOutput, VcirOutputType};
use crate::validation::cert_path::{ use crate::validation::cert_path::{CertPathError, validate_ee_cert_path_with_predecoded_ee};
CertPathError, validate_ee_cert_path_with_predecoded_ee,
};
use crate::validation::manifest::PublicationPointData; use crate::validation::manifest::PublicationPointData;
use crate::validation::publication_point::PublicationPointSnapshot; use crate::validation::publication_point::PublicationPointSnapshot;
use x509_parser::prelude::FromDer; use x509_parser::prelude::FromDer;
@ -603,11 +601,7 @@ fn process_roa_with_issuer(
let ee = &roa.signed_object.signed_data.certificates[0].resource_cert; let ee = &roa.signed_object.signed_data.certificates[0].resource_cert;
let ee_der = &roa.signed_object.signed_data.certificates[0].raw_der; let ee_der = &roa.signed_object.signed_data.certificates[0].raw_der;
let ee_crldp_uris = ee let ee_crldp_uris = ee.tbs.extensions.crl_distribution_points_uris.as_ref();
.tbs
.extensions
.crl_distribution_points_uris
.as_ref();
let issuer_crl_rsync_uri = choose_crl_uri_for_certificate(ee_crldp_uris, crl_cache)?; let issuer_crl_rsync_uri = choose_crl_uri_for_certificate(ee_crldp_uris, crl_cache)?;
let verified_crl = ensure_issuer_crl_verified(issuer_crl_rsync_uri, crl_cache, issuer_ca_der)?; let verified_crl = ensure_issuer_crl_verified(issuer_crl_rsync_uri, crl_cache, issuer_ca_der)?;
@ -715,11 +709,7 @@ fn process_aspa_with_issuer(
let ee = &aspa.signed_object.signed_data.certificates[0].resource_cert; let ee = &aspa.signed_object.signed_data.certificates[0].resource_cert;
let ee_der = &aspa.signed_object.signed_data.certificates[0].raw_der; let ee_der = &aspa.signed_object.signed_data.certificates[0].raw_der;
let ee_crldp_uris = ee let ee_crldp_uris = ee.tbs.extensions.crl_distribution_points_uris.as_ref();
.tbs
.extensions
.crl_distribution_points_uris
.as_ref();
let issuer_crl_rsync_uri = choose_crl_uri_for_certificate(ee_crldp_uris, crl_cache)?; let issuer_crl_rsync_uri = choose_crl_uri_for_certificate(ee_crldp_uris, crl_cache)?;
let verified_crl = ensure_issuer_crl_verified(issuer_crl_rsync_uri, crl_cache, issuer_ca_der)?; let verified_crl = ensure_issuer_crl_verified(issuer_crl_rsync_uri, crl_cache, issuer_ca_der)?;
@ -794,7 +784,6 @@ fn process_aspa_with_issuer(
Ok((attestation, local_output)) Ok((attestation, local_output))
} }
fn vrp_prefix_to_string(vrp: &Vrp) -> String { fn vrp_prefix_to_string(vrp: &Vrp) -> String {
let prefix = &vrp.prefix; let prefix = &vrp.prefix;
match prefix.afi { match prefix.afi {

View File

@ -15,8 +15,7 @@ use crate::replay::fetch_rsync::PayloadReplayRsyncFetcher;
use crate::sync::rrdp::Fetcher; use crate::sync::rrdp::Fetcher;
use crate::validation::from_tal::{ use crate::validation::from_tal::{
DiscoveredRootCaInstance, FromTalError, discover_root_ca_instance_from_tal_and_ta_der, DiscoveredRootCaInstance, FromTalError, discover_root_ca_instance_from_tal_and_ta_der,
discover_root_ca_instance_from_tal_with_fetchers, discover_root_ca_instance_from_tal_url, discover_root_ca_instance_from_tal_with_fetchers,
discover_root_ca_instance_from_tal_url,
}; };
use crate::validation::tree::{ use crate::validation::tree::{
CaInstanceHandle, TreeRunAuditOutput, TreeRunConfig, TreeRunError, TreeRunOutput, CaInstanceHandle, TreeRunAuditOutput, TreeRunConfig, TreeRunError, TreeRunOutput,
@ -307,8 +306,12 @@ pub fn run_tree_from_tal_bytes_serial_audit(
config: &TreeRunConfig, config: &TreeRunConfig,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> { ) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let tal = crate::data_model::tal::Tal::decode_bytes(tal_bytes).map_err(FromTalError::from)?; let tal = crate::data_model::tal::Tal::decode_bytes(tal_bytes).map_err(FromTalError::from)?;
let discovery = let discovery = discover_root_ca_instance_from_tal_with_fetchers(
discover_root_ca_instance_from_tal_with_fetchers(http_fetcher, rsync_fetcher, tal, tal_uri)?; http_fetcher,
rsync_fetcher,
tal,
tal_uri,
)?;
let download_log = DownloadLogHandle::new(); let download_log = DownloadLogHandle::new();
let runner = Rpkiv1PublicationPointRunner { let runner = Rpkiv1PublicationPointRunner {
@ -362,8 +365,12 @@ pub fn run_tree_from_tal_bytes_serial_audit_with_timing(
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> { ) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let _tal = timing.span_phase("tal_bootstrap"); let _tal = timing.span_phase("tal_bootstrap");
let tal = crate::data_model::tal::Tal::decode_bytes(tal_bytes).map_err(FromTalError::from)?; let tal = crate::data_model::tal::Tal::decode_bytes(tal_bytes).map_err(FromTalError::from)?;
let discovery = let discovery = discover_root_ca_instance_from_tal_with_fetchers(
discover_root_ca_instance_from_tal_with_fetchers(http_fetcher, rsync_fetcher, tal, tal_uri)?; http_fetcher,
rsync_fetcher,
tal,
tal_uri,
)?;
drop(_tal); drop(_tal);
let download_log = DownloadLogHandle::new(); let download_log = DownloadLogHandle::new();
@ -531,7 +538,10 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial(
let discovery = let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?; discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let replay_index = Arc::new( let replay_index = Arc::new(
ReplayArchiveIndex::load_allow_missing_rsync_modules(payload_archive_root, payload_locks_path) ReplayArchiveIndex::load_allow_missing_rsync_modules(
payload_archive_root,
payload_locks_path,
)
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?, .map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?,
); );
let http_fetcher = PayloadReplayHttpFetcher::new(replay_index.clone()) let http_fetcher = PayloadReplayHttpFetcher::new(replay_index.clone())
@ -579,7 +589,10 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
let discovery = let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?; discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let replay_index = Arc::new( let replay_index = Arc::new(
ReplayArchiveIndex::load_allow_missing_rsync_modules(payload_archive_root, payload_locks_path) ReplayArchiveIndex::load_allow_missing_rsync_modules(
payload_archive_root,
payload_locks_path,
)
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?, .map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?,
); );
let http_fetcher = PayloadReplayHttpFetcher::new(replay_index.clone()) let http_fetcher = PayloadReplayHttpFetcher::new(replay_index.clone())
@ -642,7 +655,10 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit_with_timing(
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?; discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
drop(_tal); drop(_tal);
let replay_index = Arc::new( let replay_index = Arc::new(
ReplayArchiveIndex::load_allow_missing_rsync_modules(payload_archive_root, payload_locks_path) ReplayArchiveIndex::load_allow_missing_rsync_modules(
payload_archive_root,
payload_locks_path,
)
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?, .map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?,
); );
let http_fetcher = PayloadReplayHttpFetcher::new(replay_index.clone()) let http_fetcher = PayloadReplayHttpFetcher::new(replay_index.clone())
@ -1088,10 +1104,12 @@ mod replay_api_tests {
.expect("read apnic tal fixture"); .expect("read apnic tal fixture");
let ta_der = let ta_der =
std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture"); std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let archive_root = let archive_root = std::path::PathBuf::from(
std::path::PathBuf::from("../../rpki/target/live/20260316-112341-multi-final3/apnic/base-payload-archive"); "../../rpki/target/live/20260316-112341-multi-final3/apnic/base-payload-archive",
let locks_path = );
std::path::PathBuf::from("../../rpki/target/live/20260316-112341-multi-final3/apnic/base-locks.json"); let locks_path = std::path::PathBuf::from(
"../../rpki/target/live/20260316-112341-multi-final3/apnic/base-locks.json",
);
let validation_time = time::OffsetDateTime::parse("2026-03-16T11:49:48+08:00", &Rfc3339) let validation_time = time::OffsetDateTime::parse("2026-03-16T11:49:48+08:00", &Rfc3339)
.expect("parse validation time"); .expect("parse validation time");
(tal_bytes, ta_der, archive_root, locks_path, validation_time) (tal_bytes, ta_der, archive_root, locks_path, validation_time)
@ -1160,16 +1178,14 @@ mod replay_api_tests {
let temp = tempfile::tempdir().expect("tempdir"); let temp = tempfile::tempdir().expect("tempdir");
let store = crate::storage::RocksStore::open(&temp.path().join("db")).expect("open db"); let store = crate::storage::RocksStore::open(&temp.path().join("db")).expect("open db");
let (tal_bytes, ta_der, archive_root, locks_path, validation_time) = apnic_replay_inputs(); let (tal_bytes, ta_der, archive_root, locks_path, validation_time) = apnic_replay_inputs();
assert!( if !archive_root.is_dir() || !locks_path.is_file() {
archive_root.is_dir(), eprintln!(
"payload replay archive missing: {}", "skipping payload replay api test; missing fixtures: archive={} locks={}",
archive_root.display() archive_root.display(),
);
assert!(
locks_path.is_file(),
"payload replay locks missing: {}",
locks_path.display() locks_path.display()
); );
return;
}
let out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit( let out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
&store, &store,
@ -1198,15 +1214,22 @@ mod replay_api_tests {
); );
} }
#[test] #[test]
fn payload_replay_api_root_only_apnic_multi_rir_bundle_runs_with_lenient_rsync_modules() { fn payload_replay_api_root_only_apnic_multi_rir_bundle_runs_with_lenient_rsync_modules() {
let temp = tempfile::tempdir().expect("tempdir"); let temp = tempfile::tempdir().expect("tempdir");
let store = crate::storage::RocksStore::open(&temp.path().join("db")).expect("open db"); let store = crate::storage::RocksStore::open(&temp.path().join("db")).expect("open db");
let (tal_bytes, ta_der, archive_root, locks_path, validation_time) = let (tal_bytes, ta_der, archive_root, locks_path, validation_time) =
apnic_multi_rir_replay_inputs(); apnic_multi_rir_replay_inputs();
assert!(archive_root.is_dir(), "payload replay archive missing: {}", archive_root.display()); assert!(
assert!(locks_path.is_file(), "payload replay locks missing: {}", locks_path.display()); archive_root.is_dir(),
"payload replay archive missing: {}",
archive_root.display()
);
assert!(
locks_path.is_file(),
"payload replay locks missing: {}",
locks_path.display()
);
let out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit( let out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
&store, &store,
@ -1235,16 +1258,14 @@ mod replay_api_tests {
let db_path = temp.path().join("db"); let db_path = temp.path().join("db");
let store = crate::storage::RocksStore::open(&db_path).expect("open db"); let store = crate::storage::RocksStore::open(&db_path).expect("open db");
let (tal_bytes, ta_der, archive_root, locks_path, validation_time) = apnic_replay_inputs(); let (tal_bytes, ta_der, archive_root, locks_path, validation_time) = apnic_replay_inputs();
assert!( if !archive_root.is_dir() || !locks_path.is_file() {
archive_root.is_dir(), eprintln!(
"payload replay archive missing: {}", "skipping payload replay api timing test; missing fixtures: archive={} locks={}",
archive_root.display() archive_root.display(),
);
assert!(
locks_path.is_file(),
"payload replay locks missing: {}",
locks_path.display() locks_path.display()
); );
return;
}
let timing = TimingHandle::new(TimingMeta { let timing = TimingHandle::new(TimingMeta {
recorded_at_utc_rfc3339: "2026-03-13T03:00:00Z".to_string(), recorded_at_utc_rfc3339: "2026-03-13T03:00:00Z".to_string(),
@ -1366,26 +1387,20 @@ mod replay_api_tests {
delta_locks, delta_locks,
validation_time, validation_time,
) = apnic_delta_replay_inputs(); ) = apnic_delta_replay_inputs();
assert!( if !base_archive.is_dir()
base_archive.is_dir(), || !base_locks.is_file()
"base archive missing: {}", || !delta_archive.is_dir()
base_archive.display() || !delta_locks.is_file()
); {
assert!( eprintln!(
base_locks.is_file(), "skipping payload delta replay api test; missing fixtures: base_archive={} base_locks={} delta_archive={} delta_locks={}",
"base locks missing: {}", base_archive.display(),
base_locks.display() base_locks.display(),
); delta_archive.display(),
assert!(
delta_archive.is_dir(),
"delta archive missing: {}",
delta_archive.display()
);
assert!(
delta_locks.is_file(),
"delta locks missing: {}",
delta_locks.display() delta_locks.display()
); );
return;
}
let out = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit( let out = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit(
&store, &store,
@ -1425,6 +1440,20 @@ mod replay_api_tests {
delta_locks, delta_locks,
validation_time, validation_time,
) = apnic_delta_replay_inputs(); ) = apnic_delta_replay_inputs();
if !base_archive.is_dir()
|| !base_locks.is_file()
|| !delta_archive.is_dir()
|| !delta_locks.is_file()
{
eprintln!(
"skipping payload delta replay timing test; missing fixtures: base_archive={} base_locks={} delta_archive={} delta_locks={}",
base_archive.display(),
base_locks.display(),
delta_archive.display(),
delta_locks.display()
);
return;
}
let timing = TimingHandle::new(TimingMeta { let timing = TimingHandle::new(TimingMeta {
recorded_at_utc_rfc3339: "2026-03-16T00:00:00Z".to_string(), recorded_at_utc_rfc3339: "2026-03-16T00:00:00Z".to_string(),
validation_time_utc_rfc3339: "2026-03-15T10:00:00Z".to_string(), validation_time_utc_rfc3339: "2026-03-15T10:00:00Z".to_string(),

View File

@ -10,8 +10,8 @@ use crate::data_model::manifest::ManifestObject;
use crate::data_model::rc::ResourceCertificate; use crate::data_model::rc::ResourceCertificate;
use crate::data_model::roa::{RoaAfi, RoaObject}; use crate::data_model::roa::{RoaAfi, RoaObject};
use crate::data_model::router_cert::{ use crate::data_model::router_cert::{
BgpsecRouterCertificate, BgpsecRouterCertificateDecodeError, BgpsecRouterCertificate, BgpsecRouterCertificateDecodeError, BgpsecRouterCertificatePathError,
BgpsecRouterCertificatePathError, BgpsecRouterCertificateProfileError, BgpsecRouterCertificateProfileError,
}; };
use crate::fetch::rsync::RsyncFetcher; use crate::fetch::rsync::RsyncFetcher;
use crate::policy::Policy; use crate::policy::Policy;
@ -19,10 +19,9 @@ use crate::replay::archive::ReplayArchiveIndex;
use crate::replay::delta_archive::ReplayDeltaArchiveIndex; use crate::replay::delta_archive::ReplayDeltaArchiveIndex;
use crate::report::{RfcRef, Warning}; use crate::report::{RfcRef, Warning};
use crate::storage::{ use crate::storage::{
PackFile, PackTime, RawByHashEntry, RocksStore, PackFile, PackTime, RawByHashEntry, RocksStore, ValidatedCaInstanceResult, VcirArtifactKind,
ValidatedCaInstanceResult, VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary, VcirChildEntry,
VcirAuditSummary, VcirChildEntry, VcirInstanceGate, VcirLocalOutput, VcirOutputType, VcirInstanceGate, VcirLocalOutput, VcirOutputType, VcirRelatedArtifact, VcirSummary,
VcirRelatedArtifact, VcirSummary,
}; };
use crate::sync::repo::{ use crate::sync::repo::{
sync_publication_point, sync_publication_point_replay, sync_publication_point_replay_delta, sync_publication_point, sync_publication_point_replay, sync_publication_point_replay_delta,
@ -37,7 +36,9 @@ use crate::validation::manifest::{
ManifestFreshError, PublicationPointData, PublicationPointSource, ManifestFreshError, PublicationPointData, PublicationPointSource,
process_manifest_publication_point_fresh_after_repo_sync, process_manifest_publication_point_fresh_after_repo_sync,
}; };
use crate::validation::objects::{AspaAttestation, RouterKeyPayload, Vrp, process_publication_point_for_issuer}; use crate::validation::objects::{
AspaAttestation, RouterKeyPayload, Vrp, process_publication_point_for_issuer,
};
use crate::validation::publication_point::PublicationPointSnapshot; use crate::validation::publication_point::PublicationPointSnapshot;
use crate::validation::tree::{ use crate::validation::tree::{
CaInstanceHandle, DiscoveredChildCaInstance, PublicationPointRunResult, PublicationPointRunner, CaInstanceHandle, DiscoveredChildCaInstance, PublicationPointRunResult, PublicationPointRunner,
@ -45,8 +46,8 @@ use crate::validation::tree::{
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use serde::Deserialize;
use base64::Engine as _; use base64::Engine as _;
use serde::Deserialize;
use serde_json::json; use serde_json::json;
use x509_parser::prelude::FromDer; use x509_parser::prelude::FromDer;
use x509_parser::x509::SubjectPublicKeyInfo; use x509_parser::x509::SubjectPublicKeyInfo;
@ -168,13 +169,19 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
} }
let repo_sync_started = std::time::Instant::now(); let repo_sync_started = std::time::Instant::now();
let (repo_sync_ok, repo_sync_err, repo_sync_source): (bool, Option<String>, Option<String>) = if skip_sync_due_to_dedup { let (repo_sync_ok, repo_sync_err, repo_sync_source, repo_sync_phase): (
bool,
Option<String>,
Option<String>,
Option<String>,
) = if skip_sync_due_to_dedup {
let source = if effective_notification_uri.is_some() { let source = if effective_notification_uri.is_some() {
Some("rrdp_dedup_skip".to_string()) Some("rrdp_dedup_skip".to_string())
} else { } else {
Some("rsync_dedup_skip".to_string()) Some("rsync_dedup_skip".to_string())
}; };
(true, None, source) let phase = source.clone();
(true, None, source, phase)
} else { } else {
let repo_key = effective_notification_uri.unwrap_or_else(|| ca.rsync_base_uri.as_str()); let repo_key = effective_notification_uri.unwrap_or_else(|| ca.rsync_base_uri.as_str());
let _repo_total = self let _repo_total = self
@ -249,7 +256,12 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
} }
warnings.extend(res.warnings); warnings.extend(res.warnings);
(true, None, Some(repo_sync_source_label(res.source).to_string())) (
true,
None,
Some(repo_sync_source_label(res.source).to_string()),
Some(repo_sync_phase_label(res.phase).to_string()),
)
} }
Err(e) => { Err(e) => {
if attempted_rrdp && self.rrdp_dedup { if attempted_rrdp && self.rrdp_dedup {
@ -267,7 +279,19 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
.with_rfc_refs(&[RfcRef("RFC 8182 §3.4.5"), RfcRef("RFC 9286 §6.6")]) .with_rfc_refs(&[RfcRef("RFC 8182 §3.4.5"), RfcRef("RFC 9286 §6.6")])
.with_context(&ca.rsync_base_uri), .with_context(&ca.rsync_base_uri),
); );
(false, Some(e.to_string()), None) (
false,
Some(e.to_string()),
None,
Some(
repo_sync_failure_phase_label(
attempted_rrdp,
original_notification_uri,
effective_notification_uri,
)
.to_string(),
),
)
} }
} }
}; };
@ -279,6 +303,7 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
"publication_point_rsync_uri": ca.publication_point_rsync_uri, "publication_point_rsync_uri": ca.publication_point_rsync_uri,
"repo_sync_ok": repo_sync_ok, "repo_sync_ok": repo_sync_ok,
"repo_sync_source": repo_sync_source, "repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_error": repo_sync_err, "repo_sync_error": repo_sync_err,
"repo_sync_duration_ms": repo_sync_duration_ms, "repo_sync_duration_ms": repo_sync_duration_ms,
}), }),
@ -367,6 +392,7 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
ca, ca,
PublicationPointSource::Fresh, PublicationPointSource::Fresh,
repo_sync_source.as_deref(), repo_sync_source.as_deref(),
repo_sync_phase.as_deref(),
Some(repo_sync_duration_ms), Some(repo_sync_duration_ms),
repo_sync_err.as_deref(), repo_sync_err.as_deref(),
&pack, &pack,
@ -390,6 +416,7 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
"publication_point_rsync_uri": ca.publication_point_rsync_uri, "publication_point_rsync_uri": ca.publication_point_rsync_uri,
"source": "fresh", "source": "fresh",
"repo_sync_source": repo_sync_source, "repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_duration_ms": repo_sync_duration_ms, "repo_sync_duration_ms": repo_sync_duration_ms,
"total_duration_ms": total_duration_ms, "total_duration_ms": total_duration_ms,
"warning_count": result.warnings.len(), "warning_count": result.warnings.len(),
@ -399,7 +426,8 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
"child_count": result.discovered_children.len(), "child_count": result.discovered_children.len(),
}), }),
); );
if (total_duration_ms as f64) / 1000.0 >= crate::progress_log::slow_threshold_secs() { if (total_duration_ms as f64) / 1000.0 >= crate::progress_log::slow_threshold_secs()
{
crate::progress_log::emit( crate::progress_log::emit(
"publication_point_slow", "publication_point_slow",
serde_json::json!({ serde_json::json!({
@ -407,6 +435,7 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
"publication_point_rsync_uri": ca.publication_point_rsync_uri, "publication_point_rsync_uri": ca.publication_point_rsync_uri,
"source": "fresh", "source": "fresh",
"repo_sync_source": repo_sync_source, "repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_duration_ms": repo_sync_duration_ms, "repo_sync_duration_ms": repo_sync_duration_ms,
"total_duration_ms": total_duration_ms, "total_duration_ms": total_duration_ms,
}), }),
@ -424,11 +453,25 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
"publication_point_rsync_uri": ca.publication_point_rsync_uri, "publication_point_rsync_uri": ca.publication_point_rsync_uri,
"source": "error", "source": "error",
"repo_sync_source": repo_sync_source, "repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_duration_ms": repo_sync_duration_ms, "repo_sync_duration_ms": repo_sync_duration_ms,
"total_duration_ms": total_duration_ms, "total_duration_ms": total_duration_ms,
"error": fresh_err.to_string(), "error": fresh_err.to_string(),
}), }),
); );
crate::progress_log::emit(
"repo_terminal_failure",
serde_json::json!({
"manifest_rsync_uri": ca.manifest_rsync_uri,
"publication_point_rsync_uri": ca.publication_point_rsync_uri,
"repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_error": repo_sync_err,
"repo_sync_duration_ms": repo_sync_duration_ms,
"terminal_state": "stop_all_output",
"error": fresh_err.to_string(),
}),
);
Err(format!("{fresh_err}")) Err(format!("{fresh_err}"))
} }
crate::policy::CaFailedFetchPolicy::ReuseCurrentInstanceVcir => { crate::policy::CaFailedFetchPolicy::ReuseCurrentInstanceVcir => {
@ -444,6 +487,7 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
ca, ca,
projection.source, projection.source,
repo_sync_source.as_deref(), repo_sync_source.as_deref(),
repo_sync_phase.as_deref(),
Some(repo_sync_duration_ms), Some(repo_sync_duration_ms),
repo_sync_err.as_deref(), repo_sync_err.as_deref(),
projection.vcir.as_ref(), projection.vcir.as_ref(),
@ -468,6 +512,7 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
"publication_point_rsync_uri": ca.publication_point_rsync_uri, "publication_point_rsync_uri": ca.publication_point_rsync_uri,
"source": source_label(result.source), "source": source_label(result.source),
"repo_sync_source": repo_sync_source, "repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_duration_ms": repo_sync_duration_ms, "repo_sync_duration_ms": repo_sync_duration_ms,
"total_duration_ms": total_duration_ms, "total_duration_ms": total_duration_ms,
"warning_count": result.warnings.len(), "warning_count": result.warnings.len(),
@ -477,7 +522,55 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
"child_count": result.discovered_children.len(), "child_count": result.discovered_children.len(),
}), }),
); );
if (total_duration_ms as f64) / 1000.0 >= crate::progress_log::slow_threshold_secs() { match result.source {
PublicationPointSource::VcirCurrentInstance if !repo_sync_ok => {
crate::progress_log::emit(
"rsync_failed_fallback_current_instance",
serde_json::json!({
"manifest_rsync_uri": ca.manifest_rsync_uri,
"publication_point_rsync_uri": ca.publication_point_rsync_uri,
"repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_error": repo_sync_err,
"repo_sync_duration_ms": repo_sync_duration_ms,
"terminal_state": "fallback_current_instance",
}),
);
}
PublicationPointSource::FailedFetchNoCache => {
if !repo_sync_ok {
crate::progress_log::emit(
"rsync_failed_no_cache",
serde_json::json!({
"manifest_rsync_uri": ca.manifest_rsync_uri,
"publication_point_rsync_uri": ca.publication_point_rsync_uri,
"repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_error": repo_sync_err,
"repo_sync_duration_ms": repo_sync_duration_ms,
"terminal_state": "failed_no_cache",
}),
);
}
crate::progress_log::emit(
"repo_terminal_failure",
serde_json::json!({
"manifest_rsync_uri": ca.manifest_rsync_uri,
"publication_point_rsync_uri": ca.publication_point_rsync_uri,
"repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_error": repo_sync_err,
"repo_sync_duration_ms": repo_sync_duration_ms,
"terminal_state": "failed_no_cache",
}),
);
}
PublicationPointSource::Fresh => {}
PublicationPointSource::VcirCurrentInstance => {}
}
if (total_duration_ms as f64) / 1000.0
>= crate::progress_log::slow_threshold_secs()
{
crate::progress_log::emit( crate::progress_log::emit(
"publication_point_slow", "publication_point_slow",
serde_json::json!({ serde_json::json!({
@ -485,6 +578,7 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
"publication_point_rsync_uri": ca.publication_point_rsync_uri, "publication_point_rsync_uri": ca.publication_point_rsync_uri,
"source": source_label(result.source), "source": source_label(result.source),
"repo_sync_source": repo_sync_source, "repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_duration_ms": repo_sync_duration_ms, "repo_sync_duration_ms": repo_sync_duration_ms,
"total_duration_ms": total_duration_ms, "total_duration_ms": total_duration_ms,
}), }),
@ -856,9 +950,7 @@ fn discover_children_from_fresh_snapshot_with_audit<P: PublicationPointData>(
sha256_hex: sha256_hex_from_32(&f.sha256), sha256_hex: sha256_hex_from_32(&f.sha256),
kind: AuditObjectKind::RouterCertificate, kind: AuditObjectKind::RouterCertificate,
result: AuditObjectResult::Error, result: AuditObjectResult::Error,
detail: Some(format!( detail: Some(format!("router certificate validation failed: {err}")),
"router certificate validation failed: {err}"
)),
}); });
} }
} }
@ -977,7 +1069,10 @@ fn discover_children_from_fresh_snapshot_with_audit<P: PublicationPointData>(
t.record_phase_nanos("child_select_issuer_crl_total", select_crl_nanos); t.record_phase_nanos("child_select_issuer_crl_total", select_crl_nanos);
t.record_phase_nanos("child_decode_certificate_total", child_decode_nanos); t.record_phase_nanos("child_decode_certificate_total", child_decode_nanos);
t.record_phase_nanos("child_validate_subordinate_total", validate_sub_ca_nanos); t.record_phase_nanos("child_validate_subordinate_total", validate_sub_ca_nanos);
t.record_phase_nanos("child_validate_router_certificate_total", validate_router_nanos); t.record_phase_nanos(
"child_validate_router_certificate_total",
validate_router_nanos,
);
t.record_phase_nanos("child_ca_instance_uri_discovery_total", uri_discovery_nanos); t.record_phase_nanos("child_ca_instance_uri_discovery_total", uri_discovery_nanos);
t.record_phase_nanos("child_enqueue_total", enqueue_nanos); t.record_phase_nanos("child_enqueue_total", enqueue_nanos);
} }
@ -1150,6 +1245,43 @@ fn source_label(source: PublicationPointSource) -> String {
} }
} }
fn repo_sync_phase_label(phase: crate::sync::repo::RepoSyncPhase) -> &'static str {
match phase {
crate::sync::repo::RepoSyncPhase::RrdpOk => "rrdp_ok",
crate::sync::repo::RepoSyncPhase::RrdpFailedRsyncOk => "rrdp_failed_rsync_ok",
crate::sync::repo::RepoSyncPhase::RsyncOnlyOk => "rsync_only_ok",
crate::sync::repo::RepoSyncPhase::ReplayRrdpOk => "replay_rrdp_ok",
crate::sync::repo::RepoSyncPhase::ReplayRsyncOk => "replay_rsync_ok",
crate::sync::repo::RepoSyncPhase::ReplayNoopRrdp => "replay_noop_rrdp",
crate::sync::repo::RepoSyncPhase::ReplayNoopRsync => "replay_noop_rsync",
}
}
fn repo_sync_failure_phase_label(
attempted_rrdp: bool,
original_notification_uri: Option<&str>,
effective_notification_uri: Option<&str>,
) -> &'static str {
if attempted_rrdp && original_notification_uri.is_some() && effective_notification_uri.is_some() {
"rrdp_failed_rsync_failed"
} else if attempted_rrdp
&& original_notification_uri.is_some()
&& effective_notification_uri.is_none()
{
"rsync_only_failed_after_rrdp_dedup"
} else {
"rsync_only_failed"
}
}
fn terminal_state_label(source: PublicationPointSource) -> &'static str {
match source {
PublicationPointSource::Fresh => "fresh",
PublicationPointSource::VcirCurrentInstance => "fallback_current_instance",
PublicationPointSource::FailedFetchNoCache => "failed_no_cache",
}
}
fn repo_sync_source_label(source: crate::sync::repo::RepoSyncSource) -> &'static str { fn repo_sync_source_label(source: crate::sync::repo::RepoSyncSource) -> &'static str {
match source { match source {
crate::sync::repo::RepoSyncSource::Rrdp => "rrdp", crate::sync::repo::RepoSyncSource::Rrdp => "rrdp",
@ -1182,6 +1314,7 @@ fn build_publication_point_audit_from_snapshot(
ca: &CaInstanceHandle, ca: &CaInstanceHandle,
source: PublicationPointSource, source: PublicationPointSource,
repo_sync_source: Option<&str>, repo_sync_source: Option<&str>,
repo_sync_phase: Option<&str>,
repo_sync_duration_ms: Option<u64>, repo_sync_duration_ms: Option<u64>,
repo_sync_error: Option<&str>, repo_sync_error: Option<&str>,
pack: &PublicationPointSnapshot, pack: &PublicationPointSnapshot,
@ -1275,8 +1408,10 @@ fn build_publication_point_audit_from_snapshot(
rrdp_notification_uri: ca.rrdp_notification_uri.clone(), rrdp_notification_uri: ca.rrdp_notification_uri.clone(),
source: source_label(source), source: source_label(source),
repo_sync_source: repo_sync_source.map(ToString::to_string), repo_sync_source: repo_sync_source.map(ToString::to_string),
repo_sync_phase: repo_sync_phase.map(ToString::to_string),
repo_sync_duration_ms, repo_sync_duration_ms,
repo_sync_error: repo_sync_error.map(ToString::to_string), repo_sync_error: repo_sync_error.map(ToString::to_string),
repo_terminal_state: terminal_state_label(source).to_string(),
this_update_rfc3339_utc: pack.this_update.rfc3339_utc.clone(), this_update_rfc3339_utc: pack.this_update.rfc3339_utc.clone(),
next_update_rfc3339_utc: pack.next_update.rfc3339_utc.clone(), next_update_rfc3339_utc: pack.next_update.rfc3339_utc.clone(),
verified_at_rfc3339_utc: pack.verified_at.rfc3339_utc.clone(), verified_at_rfc3339_utc: pack.verified_at.rfc3339_utc.clone(),
@ -1289,6 +1424,7 @@ fn build_publication_point_audit_from_vcir(
ca: &CaInstanceHandle, ca: &CaInstanceHandle,
source: PublicationPointSource, source: PublicationPointSource,
repo_sync_source: Option<&str>, repo_sync_source: Option<&str>,
repo_sync_phase: Option<&str>,
repo_sync_duration_ms: Option<u64>, repo_sync_duration_ms: Option<u64>,
repo_sync_error: Option<&str>, repo_sync_error: Option<&str>,
vcir: Option<&ValidatedCaInstanceResult>, vcir: Option<&ValidatedCaInstanceResult>,
@ -1302,6 +1438,7 @@ fn build_publication_point_audit_from_vcir(
ca, ca,
source, source,
repo_sync_source, repo_sync_source,
repo_sync_phase,
repo_sync_duration_ms, repo_sync_duration_ms,
repo_sync_error, repo_sync_error,
pack, pack,
@ -1326,8 +1463,10 @@ fn build_publication_point_audit_from_vcir(
rrdp_notification_uri: ca.rrdp_notification_uri.clone(), rrdp_notification_uri: ca.rrdp_notification_uri.clone(),
source: source_label(source), source: source_label(source),
repo_sync_source: repo_sync_source.map(ToString::to_string), repo_sync_source: repo_sync_source.map(ToString::to_string),
repo_sync_phase: repo_sync_phase.map(ToString::to_string),
repo_sync_duration_ms, repo_sync_duration_ms,
repo_sync_error: repo_sync_error.map(ToString::to_string), repo_sync_error: repo_sync_error.map(ToString::to_string),
repo_terminal_state: terminal_state_label(source).to_string(),
this_update_rfc3339_utc: String::new(), this_update_rfc3339_utc: String::new(),
next_update_rfc3339_utc: String::new(), next_update_rfc3339_utc: String::new(),
verified_at_rfc3339_utc: String::new(), verified_at_rfc3339_utc: String::new(),
@ -1407,8 +1546,10 @@ fn build_publication_point_audit_from_vcir(
rrdp_notification_uri: ca.rrdp_notification_uri.clone(), rrdp_notification_uri: ca.rrdp_notification_uri.clone(),
source: source_label(source), source: source_label(source),
repo_sync_source: repo_sync_source.map(ToString::to_string), repo_sync_source: repo_sync_source.map(ToString::to_string),
repo_sync_phase: repo_sync_phase.map(ToString::to_string),
repo_sync_duration_ms, repo_sync_duration_ms,
repo_sync_error: repo_sync_error.map(ToString::to_string), repo_sync_error: repo_sync_error.map(ToString::to_string),
repo_terminal_state: terminal_state_label(source).to_string(),
this_update_rfc3339_utc: vcir this_update_rfc3339_utc: vcir
.validated_manifest_meta .validated_manifest_meta
.validated_manifest_this_update .validated_manifest_this_update
@ -1790,7 +1931,9 @@ fn build_objects_output_from_vcir(
sha256_hex: local.source_object_hash.clone(), sha256_hex: local.source_object_hash.clone(),
kind: AuditObjectKind::RouterCertificate, kind: AuditObjectKind::RouterCertificate,
result: AuditObjectResult::Error, result: AuditObjectResult::Error,
detail: Some(format!("cached Router Key local output parse failed: {e}")), detail: Some(format!(
"cached Router Key local output parse failed: {e}"
)),
}, },
); );
} }
@ -1830,9 +1973,10 @@ fn parse_vcir_aspa_output(local: &VcirLocalOutput) -> Result<AspaAttestation, St
fn parse_vcir_router_key_output(local: &VcirLocalOutput) -> Result<RouterKeyPayload, String> { fn parse_vcir_router_key_output(local: &VcirLocalOutput) -> Result<RouterKeyPayload, String> {
let payload: VcirRouterKeyPayload = serde_json::from_str(&local.payload_json) let payload: VcirRouterKeyPayload = serde_json::from_str(&local.payload_json)
.map_err(|e| format!("invalid Router Key payload JSON: {e}"))?; .map_err(|e| format!("invalid Router Key payload JSON: {e}"))?;
let ski = hex::decode(&payload.ski_hex) let ski =
.map_err(|e| format!("invalid Router Key SKI hex: {e}"))?; hex::decode(&payload.ski_hex).map_err(|e| format!("invalid Router Key SKI hex: {e}"))?;
let spki_der = base64::engine::general_purpose::STANDARD.decode(&payload.spki_der_base64) let spki_der = base64::engine::general_purpose::STANDARD
.decode(&payload.spki_der_base64)
.map_err(|e| format!("invalid Router Key SPKI base64: {e}"))?; .map_err(|e| format!("invalid Router Key SPKI base64: {e}"))?;
Ok(RouterKeyPayload { Ok(RouterKeyPayload {
as_id: payload.as_id, as_id: payload.as_id,
@ -2255,7 +2399,8 @@ fn build_router_key_local_outputs(
.iter() .iter()
.map(|router_key| { .map(|router_key| {
let ski_hex = hex::encode(&router_key.ski); let ski_hex = hex::encode(&router_key.ski);
let spki_der_base64 = base64::engine::general_purpose::STANDARD.encode(&router_key.spki_der); let spki_der_base64 =
base64::engine::general_purpose::STANDARD.encode(&router_key.spki_der);
let rule_hash = sha256_hex( let rule_hash = sha256_hex(
format!( format!(
"router-key-rule:{}:{}:{}:{}", "router-key-rule:{}:{}:{}:{}",
@ -2587,7 +2732,6 @@ fn audit_result_to_vcir_status(result: &AuditObjectResult) -> VcirArtifactValida
} }
} }
fn roa_to_vrps_for_vcir(roa: &RoaObject) -> Vec<Vrp> { fn roa_to_vrps_for_vcir(roa: &RoaObject) -> Vec<Vrp> {
let asn = roa.roa.as_id; let asn = roa.roa.as_id;
let mut out = Vec::new(); let mut out = Vec::new();
@ -2629,10 +2773,10 @@ mod tests {
use crate::fetch::rsync::LocalDirRsyncFetcher; use crate::fetch::rsync::LocalDirRsyncFetcher;
use crate::fetch::rsync::{RsyncFetchError, RsyncFetcher}; use crate::fetch::rsync::{RsyncFetchError, RsyncFetcher};
use crate::storage::{ use crate::storage::{
PackFile, PackTime, RawByHashEntry, RocksStore, ValidatedCaInstanceResult, ValidatedManifestMeta, PackFile, PackTime, RawByHashEntry, RocksStore, ValidatedCaInstanceResult,
VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary, ValidatedManifestMeta, VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus,
VcirChildEntry, VcirInstanceGate, VcirLocalOutput, VcirOutputType, VcirRelatedArtifact, VcirAuditSummary, VcirChildEntry, VcirInstanceGate, VcirLocalOutput, VcirOutputType,
VcirSummary, VcirRelatedArtifact, VcirSummary,
}; };
use crate::sync::rrdp::Fetcher; use crate::sync::rrdp::Fetcher;
use crate::validation::publication_point::PublicationPointSnapshot; use crate::validation::publication_point::PublicationPointSnapshot;
@ -2845,8 +2989,6 @@ authorityKeyIdentifier = keyid:always
} }
} }
struct GeneratedRouter { struct GeneratedRouter {
issuer_ca_der: Vec<u8>, issuer_ca_der: Vec<u8>,
router_der: Vec<u8>, router_der: Vec<u8>,
@ -3045,15 +3187,16 @@ authorityKeyIdentifier = keyid:always
} }
} }
fn cernet_publication_point_snapshot_for_vcir_tests( fn cernet_publication_point_snapshot_for_vcir_tests()
) -> (PublicationPointSnapshot, Vec<u8>, time::OffsetDateTime) { -> (PublicationPointSnapshot, Vec<u8>, time::OffsetDateTime) {
let dir = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) let dir = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0"); .join("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0");
let rsync_base_uri = "rsync://rpki.cernet.net/repo/cernet/0/"; let rsync_base_uri = "rsync://rpki.cernet.net/repo/cernet/0/";
let manifest_file = "05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft"; let manifest_file = "05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft";
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}"); let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
let manifest_bytes = std::fs::read(dir.join(manifest_file)).expect("read manifest fixture"); let manifest_bytes = std::fs::read(dir.join(manifest_file)).expect("read manifest fixture");
let manifest = ManifestObject::decode_der(&manifest_bytes).expect("decode manifest fixture"); let manifest =
ManifestObject::decode_der(&manifest_bytes).expect("decode manifest fixture");
let candidate = manifest.manifest.this_update + time::Duration::seconds(60); let candidate = manifest.manifest.this_update + time::Duration::seconds(60);
let validation_time = if candidate < manifest.manifest.next_update { let validation_time = if candidate < manifest.manifest.next_update {
candidate candidate
@ -3333,22 +3476,32 @@ authorityKeyIdentifier = keyid:always
#[test] #[test]
fn collect_and_persist_vcir_embedded_evidence_for_real_signed_objects() { fn collect_and_persist_vcir_embedded_evidence_for_real_signed_objects() {
let (pack, issuer_ca_der, validation_time) = cernet_publication_point_snapshot_for_vcir_tests(); let (pack, issuer_ca_der, validation_time) =
cernet_publication_point_snapshot_for_vcir_tests();
let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer ca"); let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer ca");
let objects = crate::validation::objects::process_publication_point_snapshot_for_issuer( let objects = crate::validation::objects::process_publication_point_snapshot_for_issuer(
&pack, &pack,
&Policy::default(), &Policy::default(),
issuer_ca_der.as_slice(), issuer_ca_der.as_slice(),
Some("rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer"), Some(
"rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer",
),
issuer_ca.tbs.extensions.ip_resources.as_ref(), issuer_ca.tbs.extensions.ip_resources.as_ref(),
issuer_ca.tbs.extensions.as_resources.as_ref(), issuer_ca.tbs.extensions.as_resources.as_ref(),
validation_time, validation_time,
None, None,
); );
assert!(!objects.local_outputs_cache.is_empty(), "expected local outputs from signed objects"); assert!(
!objects.local_outputs_cache.is_empty(),
"expected local outputs from signed objects"
);
let evidence = collect_vcir_embedded_evidence(&pack, &objects).expect("collect embedded evidence"); let evidence =
assert!(evidence.len() >= 2, "expected manifest EE and signed-object EE evidence"); collect_vcir_embedded_evidence(&pack, &objects).expect("collect embedded evidence");
assert!(
evidence.len() >= 2,
"expected manifest EE and signed-object EE evidence"
);
let store_dir = tempfile::tempdir().expect("store dir"); let store_dir = tempfile::tempdir().expect("store dir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb"); let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
@ -3375,15 +3528,19 @@ authorityKeyIdentifier = keyid:always
.get_raw_by_hash_entry(&issuer_hash) .get_raw_by_hash_entry(&issuer_hash)
.expect("load issuer raw entry") .expect("load issuer raw entry")
.expect("issuer raw entry present"); .expect("issuer raw entry present");
assert!(issuer_entry assert!(
issuer_entry
.origin_uris .origin_uris
.iter() .iter()
.any(|uri| uri.ends_with("BfycW4hQb3wNP4YsiJW-1n6fjro.cer"))); .any(|uri| uri.ends_with("BfycW4hQb3wNP4YsiJW-1n6fjro.cer"))
);
for entry in &evidence { for entry in &evidence {
assert!(store assert!(
store
.get_raw_by_hash_entry(&entry.raw_entry.sha256_hex) .get_raw_by_hash_entry(&entry.raw_entry.sha256_hex)
.expect("load evidence raw entry") .expect("load evidence raw entry")
.is_some()); .is_some()
);
} }
} }
@ -3411,7 +3568,9 @@ authorityKeyIdentifier = keyid:always
source_object_uri: "rsync://example.test/repo/issuer/router.cer".to_string(), source_object_uri: "rsync://example.test/repo/issuer/router.cer".to_string(),
source_object_hash: "11".repeat(32), source_object_hash: "11".repeat(32),
source_ee_cert_hash: "11".repeat(32), source_ee_cert_hash: "11".repeat(32),
item_effective_until: PackTime { rfc3339_utc: "2026-12-31T00:00:00Z".to_string() }, item_effective_until: PackTime {
rfc3339_utc: "2026-12-31T00:00:00Z".to_string(),
},
}], }],
); );
assert_eq!(outputs.len(), 1); assert_eq!(outputs.len(), 1);
@ -3422,13 +3581,16 @@ authorityKeyIdentifier = keyid:always
#[test] #[test]
fn build_vcir_local_outputs_falls_back_to_decoding_accepted_objects_when_cache_is_empty() { fn build_vcir_local_outputs_falls_back_to_decoding_accepted_objects_when_cache_is_empty() {
let (pack, issuer_ca_der, validation_time) = cernet_publication_point_snapshot_for_vcir_tests(); let (pack, issuer_ca_der, validation_time) =
cernet_publication_point_snapshot_for_vcir_tests();
let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer ca"); let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer ca");
let objects = crate::validation::objects::process_publication_point_snapshot_for_issuer( let objects = crate::validation::objects::process_publication_point_snapshot_for_issuer(
&pack, &pack,
&Policy::default(), &Policy::default(),
issuer_ca_der.as_slice(), issuer_ca_der.as_slice(),
Some("rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer"), Some(
"rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer",
),
issuer_ca.tbs.extensions.ip_resources.as_ref(), issuer_ca.tbs.extensions.ip_resources.as_ref(),
issuer_ca.tbs.extensions.as_resources.as_ref(), issuer_ca.tbs.extensions.as_resources.as_ref(),
validation_time, validation_time,
@ -3455,20 +3617,25 @@ authorityKeyIdentifier = keyid:always
.expect("rebuild vcir local outputs"); .expect("rebuild vcir local outputs");
assert!(!local_outputs.is_empty()); assert!(!local_outputs.is_empty());
assert_eq!(local_outputs.len(), objects.vrps.len()); assert_eq!(local_outputs.len(), objects.vrps.len());
assert!(local_outputs assert!(
local_outputs
.iter() .iter()
.all(|output| output.output_type == VcirOutputType::Vrp)); .all(|output| output.output_type == VcirOutputType::Vrp)
);
} }
#[test] #[test]
fn persist_vcir_for_fresh_result_stores_vcir_and_audit_indexes_for_real_snapshot() { fn persist_vcir_for_fresh_result_stores_vcir_and_audit_indexes_for_real_snapshot() {
let (pack, issuer_ca_der, validation_time) = cernet_publication_point_snapshot_for_vcir_tests(); let (pack, issuer_ca_der, validation_time) =
cernet_publication_point_snapshot_for_vcir_tests();
let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer ca"); let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer ca");
let objects = crate::validation::objects::process_publication_point_snapshot_for_issuer( let objects = crate::validation::objects::process_publication_point_snapshot_for_issuer(
&pack, &pack,
&Policy::default(), &Policy::default(),
issuer_ca_der.as_slice(), issuer_ca_der.as_slice(),
Some("rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer"), Some(
"rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer",
),
issuer_ca.tbs.extensions.ip_resources.as_ref(), issuer_ca.tbs.extensions.ip_resources.as_ref(),
issuer_ca.tbs.extensions.as_resources.as_ref(), issuer_ca.tbs.extensions.as_resources.as_ref(),
validation_time, validation_time,
@ -3492,16 +3659,7 @@ authorityKeyIdentifier = keyid:always
rrdp_notification_uri: None, rrdp_notification_uri: None,
}; };
persist_vcir_for_fresh_result( persist_vcir_for_fresh_result(&store, &ca, &pack, &objects, &[], &[], &[], validation_time)
&store,
&ca,
&pack,
&objects,
&[],
&[],
&[],
validation_time,
)
.expect("persist vcir for fresh result"); .expect("persist vcir for fresh result");
let vcir = store let vcir = store
@ -3511,10 +3669,15 @@ authorityKeyIdentifier = keyid:always
assert_eq!(vcir.manifest_rsync_uri, pack.manifest_rsync_uri); assert_eq!(vcir.manifest_rsync_uri, pack.manifest_rsync_uri);
assert_eq!(vcir.summary.local_vrp_count as usize, objects.vrps.len()); assert_eq!(vcir.summary.local_vrp_count as usize, objects.vrps.len());
let first_output = vcir.local_outputs.first().expect("local outputs stored"); let first_output = vcir.local_outputs.first().expect("local outputs stored");
assert!(store assert!(
.get_audit_rule_index_entry(crate::storage::AuditRuleKind::Roa, &first_output.rule_hash) store
.get_audit_rule_index_entry(
crate::storage::AuditRuleKind::Roa,
&first_output.rule_hash
)
.expect("get audit rule index entry") .expect("get audit rule index entry")
.is_some()); .is_some()
);
} }
#[test] #[test]
@ -3574,7 +3737,9 @@ authorityKeyIdentifier = keyid:always
tal_id: "test-tal".to_string(), tal_id: "test-tal".to_string(),
parent_manifest_rsync_uri: None, parent_manifest_rsync_uri: None,
ca_certificate_der: vec![0x11, 0x22], ca_certificate_der: vec![0x11, 0x22],
ca_certificate_rsync_uri: Some("rsync://example.test/repo/issuer/issuer.cer".to_string()), ca_certificate_rsync_uri: Some(
"rsync://example.test/repo/issuer/issuer.cer".to_string(),
),
effective_ip_resources: None, effective_ip_resources: None,
effective_as_resources: None, effective_as_resources: None,
rsync_base_uri: pack.publication_point_rsync_uri.clone(), rsync_base_uri: pack.publication_point_rsync_uri.clone(),
@ -3625,21 +3790,37 @@ authorityKeyIdentifier = keyid:always
&[], &[],
&embedded, &embedded,
); );
assert!(artifacts.iter().any(|artifact| artifact.artifact_role == VcirArtifactRole::Manifest)); assert!(
assert!(artifacts.iter().any(|artifact| artifact.artifact_role == VcirArtifactRole::TrustAnchorCert)); artifacts
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref() == Some("rsync://example.test/repo/issuer/issuer.crl") .iter()
.any(|artifact| artifact.artifact_role == VcirArtifactRole::Manifest)
);
assert!(
artifacts
.iter()
.any(|artifact| artifact.artifact_role == VcirArtifactRole::TrustAnchorCert)
);
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref()
== Some("rsync://example.test/repo/issuer/issuer.crl")
&& artifact.artifact_role == VcirArtifactRole::CurrentCrl)); && artifact.artifact_role == VcirArtifactRole::CurrentCrl));
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref() == Some("rsync://example.test/repo/issuer/child.cer") assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref()
== Some("rsync://example.test/repo/issuer/child.cer")
&& artifact.artifact_role == VcirArtifactRole::ChildCaCert)); && artifact.artifact_role == VcirArtifactRole::ChildCaCert));
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref() == Some("rsync://example.test/repo/issuer/a.roa") assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref()
== Some("rsync://example.test/repo/issuer/a.roa")
&& artifact.validation_status == VcirArtifactValidationStatus::Rejected)); && artifact.validation_status == VcirArtifactValidationStatus::Rejected));
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref() == Some("rsync://example.test/repo/issuer/a.asa") assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref()
== Some("rsync://example.test/repo/issuer/a.asa")
&& artifact.validation_status == VcirArtifactValidationStatus::WarningOnly)); && artifact.validation_status == VcirArtifactValidationStatus::WarningOnly));
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref() == Some("rsync://example.test/repo/issuer/a.gbr") assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref()
== Some("rsync://example.test/repo/issuer/a.gbr")
&& artifact.artifact_kind == VcirArtifactKind::Gbr)); && artifact.artifact_kind == VcirArtifactKind::Gbr));
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref() == Some("rsync://example.test/repo/issuer/extra.bin") assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref()
== Some("rsync://example.test/repo/issuer/extra.bin")
&& artifact.artifact_kind == VcirArtifactKind::Other)); && artifact.artifact_kind == VcirArtifactKind::Other));
assert!(artifacts.iter().any(|artifact| artifact.uri.is_none() && artifact.sha256 == sha256_hex(b"embedded-ee"))); assert!(artifacts.iter().any(
|artifact| artifact.uri.is_none() && artifact.sha256 == sha256_hex(b"embedded-ee")
));
} }
#[test] #[test]
@ -4130,7 +4311,11 @@ authorityKeyIdentifier = keyid:always
PublicationPointSource::Fresh | PublicationPointSource::VcirCurrentInstance PublicationPointSource::Fresh | PublicationPointSource::VcirCurrentInstance
)); ));
assert_eq!(calls.load(Ordering::SeqCst), 1, "module-scope dedup should skip second sync"); assert_eq!(
calls.load(Ordering::SeqCst),
1,
"module-scope dedup should skip second sync"
);
} }
#[test] #[test]
@ -4385,12 +4570,15 @@ authorityKeyIdentifier = keyid:always
None, None,
None, None,
None, None,
None,
&pp.snapshot, &pp.snapshot,
&[], &[],
&objects, &objects,
&[], &[],
); );
assert_eq!(audit.source, "vcir_current_instance"); assert_eq!(audit.source, "vcir_current_instance");
assert_eq!(audit.repo_sync_phase, None);
assert_eq!(audit.repo_terminal_state, "fallback_current_instance");
assert!( assert!(
audit audit
.objects .objects
@ -4453,15 +4641,21 @@ authorityKeyIdentifier = keyid:always
let audit = build_publication_point_audit_from_snapshot( let audit = build_publication_point_audit_from_snapshot(
&issuer, &issuer,
pp.source, pp.source,
None, Some("rsync"),
None, Some("rsync_only_ok"),
None, Some(123),
Some("none"),
&pp.snapshot, &pp.snapshot,
&[], &[],
&objects, &objects,
&[], &[],
); );
assert_eq!(audit.objects[0].kind, AuditObjectKind::Manifest); assert_eq!(audit.objects[0].kind, AuditObjectKind::Manifest);
assert_eq!(audit.repo_sync_source.as_deref(), Some("rsync"));
assert_eq!(audit.repo_sync_phase.as_deref(), Some("rsync_only_ok"));
assert_eq!(audit.repo_sync_duration_ms, Some(123));
assert_eq!(audit.repo_sync_error.as_deref(), Some("none"));
assert_eq!(audit.repo_terminal_state, "fresh");
let crl = audit let crl = audit
.objects .objects
@ -4484,7 +4678,6 @@ authorityKeyIdentifier = keyid:always
let _ = now; let _ = now;
} }
#[test] #[test]
fn discover_children_with_router_certificate_records_ok_audit_and_no_child() { fn discover_children_with_router_certificate_records_ok_audit_and_no_child() {
let g = generate_router_cert_with_variant("ec-p256", true); let g = generate_router_cert_with_variant("ec-p256", true);
@ -4505,7 +4698,9 @@ authorityKeyIdentifier = keyid:always
tal_id: "test-tal".to_string(), tal_id: "test-tal".to_string(),
parent_manifest_rsync_uri: None, parent_manifest_rsync_uri: None,
ca_certificate_der: g.issuer_ca_der.clone(), ca_certificate_der: g.issuer_ca_der.clone(),
ca_certificate_rsync_uri: Some("rsync://example.test/repo/issuer/issuer.cer".to_string()), ca_certificate_rsync_uri: Some(
"rsync://example.test/repo/issuer/issuer.cer".to_string(),
),
effective_ip_resources: issuer_ca.tbs.extensions.ip_resources.clone(), effective_ip_resources: issuer_ca.tbs.extensions.ip_resources.clone(),
effective_as_resources: issuer_ca.tbs.extensions.as_resources.clone(), effective_as_resources: issuer_ca.tbs.extensions.as_resources.clone(),
rsync_base_uri: "rsync://example.test/repo/issuer/".to_string(), rsync_base_uri: "rsync://example.test/repo/issuer/".to_string(),
@ -4524,11 +4719,13 @@ authorityKeyIdentifier = keyid:always
assert!(out.children.is_empty()); assert!(out.children.is_empty());
assert_eq!(out.audits.len(), 1); assert_eq!(out.audits.len(), 1);
assert!(matches!(out.audits[0].result, AuditObjectResult::Ok)); assert!(matches!(out.audits[0].result, AuditObjectResult::Ok));
assert!(out.audits[0] assert!(
out.audits[0]
.detail .detail
.as_deref() .as_deref()
.unwrap_or("") .unwrap_or("")
.contains("validated BGPsec router certificate")); .contains("validated BGPsec router certificate")
);
} }
#[test] #[test]
@ -4551,7 +4748,9 @@ authorityKeyIdentifier = keyid:always
tal_id: "test-tal".to_string(), tal_id: "test-tal".to_string(),
parent_manifest_rsync_uri: None, parent_manifest_rsync_uri: None,
ca_certificate_der: g.issuer_ca_der.clone(), ca_certificate_der: g.issuer_ca_der.clone(),
ca_certificate_rsync_uri: Some("rsync://example.test/repo/issuer/issuer.cer".to_string()), ca_certificate_rsync_uri: Some(
"rsync://example.test/repo/issuer/issuer.cer".to_string(),
),
effective_ip_resources: issuer_ca.tbs.extensions.ip_resources.clone(), effective_ip_resources: issuer_ca.tbs.extensions.ip_resources.clone(),
effective_as_resources: issuer_ca.tbs.extensions.as_resources.clone(), effective_as_resources: issuer_ca.tbs.extensions.as_resources.clone(),
rsync_base_uri: "rsync://example.test/repo/issuer/".to_string(), rsync_base_uri: "rsync://example.test/repo/issuer/".to_string(),
@ -4570,11 +4769,13 @@ authorityKeyIdentifier = keyid:always
assert!(out.children.is_empty()); assert!(out.children.is_empty());
assert_eq!(out.audits.len(), 1); assert_eq!(out.audits.len(), 1);
assert!(matches!(out.audits[0].result, AuditObjectResult::Skipped)); assert!(matches!(out.audits[0].result, AuditObjectResult::Skipped));
assert!(out.audits[0] assert!(
out.audits[0]
.detail .detail
.as_deref() .as_deref()
.unwrap_or("") .unwrap_or("")
.contains("not a CA resource certificate or BGPsec router certificate")); .contains("not a CA resource certificate or BGPsec router certificate")
);
} }
#[test] #[test]
@ -4597,7 +4798,9 @@ authorityKeyIdentifier = keyid:always
tal_id: "test-tal".to_string(), tal_id: "test-tal".to_string(),
parent_manifest_rsync_uri: None, parent_manifest_rsync_uri: None,
ca_certificate_der: g.issuer_ca_der.clone(), ca_certificate_der: g.issuer_ca_der.clone(),
ca_certificate_rsync_uri: Some("rsync://example.test/repo/issuer/issuer.cer".to_string()), ca_certificate_rsync_uri: Some(
"rsync://example.test/repo/issuer/issuer.cer".to_string(),
),
effective_ip_resources: issuer_ca.tbs.extensions.ip_resources.clone(), effective_ip_resources: issuer_ca.tbs.extensions.ip_resources.clone(),
effective_as_resources: issuer_ca.tbs.extensions.as_resources.clone(), effective_as_resources: issuer_ca.tbs.extensions.as_resources.clone(),
rsync_base_uri: "rsync://example.test/repo/issuer/".to_string(), rsync_base_uri: "rsync://example.test/repo/issuer/".to_string(),
@ -4616,11 +4819,13 @@ authorityKeyIdentifier = keyid:always
assert!(out.children.is_empty()); assert!(out.children.is_empty());
assert_eq!(out.audits.len(), 1); assert_eq!(out.audits.len(), 1);
assert!(matches!(out.audits[0].result, AuditObjectResult::Error)); assert!(matches!(out.audits[0].result, AuditObjectResult::Error));
assert!(out.audits[0] assert!(
out.audits[0]
.detail .detail
.as_deref() .as_deref()
.unwrap_or("") .unwrap_or("")
.contains("router certificate validation failed")); .contains("router certificate validation failed")
);
} }
#[test] #[test]
@ -5210,9 +5415,10 @@ authorityKeyIdentifier = keyid:always
let audit = build_publication_point_audit_from_vcir( let audit = build_publication_point_audit_from_vcir(
&ca, &ca,
PublicationPointSource::VcirCurrentInstance, PublicationPointSource::VcirCurrentInstance,
None, Some("rsync"),
None, Some("rrdp_failed_rsync_failed"),
None, Some(456),
Some("rsync failed"),
Some(&vcir), Some(&vcir),
None, None,
&runner_warnings, &runner_warnings,
@ -5221,6 +5427,14 @@ authorityKeyIdentifier = keyid:always
); );
assert_eq!(audit.source, "vcir_current_instance"); assert_eq!(audit.source, "vcir_current_instance");
assert_eq!(audit.repo_sync_source.as_deref(), Some("rsync"));
assert_eq!(
audit.repo_sync_phase.as_deref(),
Some("rrdp_failed_rsync_failed")
);
assert_eq!(audit.repo_sync_duration_ms, Some(456));
assert_eq!(audit.repo_sync_error.as_deref(), Some("rsync failed"));
assert_eq!(audit.repo_terminal_state, "fallback_current_instance");
assert_eq!(audit.objects[0].rsync_uri, vcir.current_manifest_rsync_uri); assert_eq!(audit.objects[0].rsync_uri, vcir.current_manifest_rsync_uri);
assert_eq!(audit.objects[0].kind, AuditObjectKind::Manifest); assert_eq!(audit.objects[0].kind, AuditObjectKind::Manifest);
assert_eq!( assert_eq!(
@ -5270,9 +5484,10 @@ authorityKeyIdentifier = keyid:always
let audit = build_publication_point_audit_from_vcir( let audit = build_publication_point_audit_from_vcir(
&ca, &ca,
PublicationPointSource::FailedFetchNoCache, PublicationPointSource::FailedFetchNoCache,
None, Some("rsync"),
None, Some("rsync_only_failed"),
None, Some(789),
Some("load from network failed, fallback to cache"),
None, None,
None, None,
&[Warning::new("runner warning")], &[Warning::new("runner warning")],
@ -5289,6 +5504,14 @@ authorityKeyIdentifier = keyid:always
); );
assert_eq!(audit.source, "failed_fetch_no_cache"); assert_eq!(audit.source, "failed_fetch_no_cache");
assert_eq!(audit.repo_sync_source.as_deref(), Some("rsync"));
assert_eq!(audit.repo_sync_phase.as_deref(), Some("rsync_only_failed"));
assert_eq!(audit.repo_sync_duration_ms, Some(789));
assert_eq!(
audit.repo_sync_error.as_deref(),
Some("load from network failed, fallback to cache")
);
assert_eq!(audit.repo_terminal_state, "failed_no_cache");
assert!(audit.this_update_rfc3339_utc.is_empty()); assert!(audit.this_update_rfc3339_utc.is_empty());
assert!(audit.next_update_rfc3339_utc.is_empty()); assert!(audit.next_update_rfc3339_utc.is_empty());
assert!(audit.verified_at_rfc3339_utc.is_empty()); assert!(audit.verified_at_rfc3339_utc.is_empty());

View File

@ -311,7 +311,9 @@ fn landing_packfile_cbor_put(store: &RocksStore, obj_type: ObjType, sample: &str
entry.origin_uris.push(key); entry.origin_uris.push(key);
entry.object_type = Some("cbor".to_string()); entry.object_type = Some("cbor".to_string());
entry.encoding = Some("cbor".to_string()); entry.encoding = Some("cbor".to_string());
store.put_raw_by_hash_entry(&entry).expect("store raw_by_hash"); store
.put_raw_by_hash_entry(&entry)
.expect("store raw_by_hash");
} }
#[derive(Clone, Debug, serde::Serialize)] #[derive(Clone, Debug, serde::Serialize)]
@ -624,7 +626,9 @@ fn stage2_decode_validate_and_landing_benchmark_selected_der_v2() {
println!(); println!();
} }
if mode.do_landing() { if mode.do_landing() {
println!("## landing (PackFile::from_bytes_compute_sha256 + CBOR + RocksDB current-state landing)"); println!(
"## landing (PackFile::from_bytes_compute_sha256 + CBOR + RocksDB current-state landing)"
);
println!(); println!();
println!("| type | sample | size_bytes | complexity | avg ns/op | ops/s |"); println!("| type | sample | size_bytes | complexity | avg ns/op | ops/s |");
println!("|---|---|---:|---:|---:|---:|"); println!("|---|---|---:|---:|---:|---:|");

View File

@ -30,6 +30,7 @@ fn live_http_fetcher() -> BlockingHttpFetcher {
timeout: Duration::from_secs(timeout_secs), timeout: Duration::from_secs(timeout_secs),
large_body_timeout: Duration::from_secs(timeout_secs), large_body_timeout: Duration::from_secs(timeout_secs),
user_agent: "rpki-dev/0.1 (stage2 live rrdp delta test)".to_string(), user_agent: "rpki-dev/0.1 (stage2 live rrdp delta test)".to_string(),
..HttpFetcherConfig::default()
}) })
.expect("http fetcher") .expect("http fetcher")
} }

View File

@ -1,8 +1,8 @@
use rpki::ccr::{ use rpki::ccr::{
AspaPayloadSet, AspaPayloadState, CcrContentInfo, CcrDigestAlgorithm, ManifestInstance, AspaPayloadSet, AspaPayloadState, CcrContentInfo, CcrDigestAlgorithm, ManifestInstance,
ManifestState, RoaPayloadSet, RoaPayloadState, RouterKey, RouterKeySet, RouterKeyState, ManifestState, RoaPayloadSet, RoaPayloadState, RouterKey, RouterKeySet, RouterKeyState,
RpkiCanonicalCacheRepresentation, TrustAnchorState, compute_state_hash, RpkiCanonicalCacheRepresentation, TrustAnchorState, compute_state_hash, decode_content_info,
decode_content_info, encode::{ encode::{
encode_manifest_state_payload_der, encode_router_key_state_payload_der, encode_manifest_state_payload_der, encode_router_key_state_payload_der,
encode_trust_anchor_state_payload_der, encode_trust_anchor_state_payload_der,
}, },
@ -22,7 +22,8 @@ fn sample_time() -> time::OffsetDateTime {
#[test] #[test]
fn minimal_trust_anchor_ccr_roundtrips() { fn minimal_trust_anchor_ccr_roundtrips() {
let skis = vec![vec![0x11; 20], vec![0x22; 20]]; let skis = vec![vec![0x11; 20], vec![0x22; 20]];
let skis_der = encode_trust_anchor_state_payload_der(&skis).expect("encode trust anchor payload"); let skis_der =
encode_trust_anchor_state_payload_der(&skis).expect("encode trust anchor payload");
let state = TrustAnchorState { let state = TrustAnchorState {
skis, skis,
hash: compute_state_hash(&skis_der), hash: compute_state_hash(&skis_der),
@ -47,7 +48,8 @@ fn minimal_trust_anchor_ccr_roundtrips() {
#[test] #[test]
fn decode_rejects_wrong_content_type_oid() { fn decode_rejects_wrong_content_type_oid() {
let skis = vec![vec![0x11; 20]]; let skis = vec![vec![0x11; 20]];
let skis_der = encode_trust_anchor_state_payload_der(&skis).expect("encode trust anchor payload"); let skis_der =
encode_trust_anchor_state_payload_der(&skis).expect("encode trust anchor payload");
let content_info = CcrContentInfo::new(RpkiCanonicalCacheRepresentation { let content_info = CcrContentInfo::new(RpkiCanonicalCacheRepresentation {
version: 0, version: 0,
hash_alg: CcrDigestAlgorithm::Sha256, hash_alg: CcrDigestAlgorithm::Sha256,
@ -69,7 +71,10 @@ fn decode_rejects_wrong_content_type_oid() {
.expect("oid present"); .expect("oid present");
der[pos + needle.len() - 1] ^= 0x01; der[pos + needle.len() - 1] ^= 0x01;
let err = decode_content_info(&der).expect_err("wrong content type must fail"); let err = decode_content_info(&der).expect_err("wrong content type must fail");
assert!(err.to_string().contains("unexpected contentType OID"), "{err}"); assert!(
err.to_string().contains("unexpected contentType OID"),
"{err}"
);
} }
#[test] #[test]
@ -85,13 +90,17 @@ fn ccr_requires_at_least_one_state_aspect() {
rks: None, rks: None,
}); });
let err = encode_content_info(&ccr).expect_err("empty state aspects must fail"); let err = encode_content_info(&ccr).expect_err("empty state aspects must fail");
assert!(err.to_string().contains("at least one of mfts/vrps/vaps/tas/rks")); assert!(
err.to_string()
.contains("at least one of mfts/vrps/vaps/tas/rks")
);
} }
#[test] #[test]
fn state_hash_helpers_accept_matching_and_reject_tampered_payload() { fn state_hash_helpers_accept_matching_and_reject_tampered_payload() {
let skis = vec![vec![0x11; 20]]; let skis = vec![vec![0x11; 20]];
let payload_der = encode_trust_anchor_state_payload_der(&skis).expect("encode trust anchor payload"); let payload_der =
encode_trust_anchor_state_payload_der(&skis).expect("encode trust anchor payload");
let hash = compute_state_hash(&payload_der); let hash = compute_state_hash(&payload_der);
assert!(verify_state_hash(&hash, &payload_der)); assert!(verify_state_hash(&hash, &payload_der));
let mut tampered = payload_der.clone(); let mut tampered = payload_der.clone();
@ -105,12 +114,15 @@ fn manifest_and_router_key_skeletons_encode_payloads_and_validate_sorting() {
hash: vec![0x33; 32], hash: vec![0x33; 32],
size: 2048, size: 2048,
aki: vec![0x44; 20], aki: vec![0x44; 20],
manifest_number: BigUnsigned { bytes_be: vec![0x01] }, manifest_number: BigUnsigned {
bytes_be: vec![0x01],
},
this_update: sample_time(), this_update: sample_time(),
locations: vec![vec![0x30, 0x00]], locations: vec![vec![0x30, 0x00]],
subordinates: vec![vec![0x55; 20]], subordinates: vec![vec![0x55; 20]],
}]; }];
let mis_der = encode_manifest_state_payload_der(&manifest_instances).expect("encode manifest state payload"); let mis_der = encode_manifest_state_payload_der(&manifest_instances)
.expect("encode manifest state payload");
let manifest_state = ManifestState { let manifest_state = ManifestState {
mis: manifest_instances, mis: manifest_instances,
most_recent_update: sample_time(), most_recent_update: sample_time(),
@ -239,10 +251,16 @@ fn decode_rejects_wrong_digest_algorithm_oid() {
}); });
let mut der = encode_content_info(&ccr).expect("encode ccr"); let mut der = encode_content_info(&ccr).expect("encode ccr");
let oid = rpki::data_model::oid::OID_SHA256_RAW; let oid = rpki::data_model::oid::OID_SHA256_RAW;
let pos = der.windows(oid.len()).position(|w| w == oid).expect("sha256 oid present"); let pos = der
.windows(oid.len())
.position(|w| w == oid)
.expect("sha256 oid present");
der[pos + oid.len() - 1] ^= 0x01; der[pos + oid.len() - 1] ^= 0x01;
let err = decode_content_info(&der).expect_err("decode must reject wrong digest oid"); let err = decode_content_info(&der).expect_err("decode must reject wrong digest oid");
assert!(err.to_string().contains("unexpected digest algorithm OID"), "{err}"); assert!(
err.to_string().contains("unexpected digest algorithm OID"),
"{err}"
);
} }
#[test] #[test]
@ -258,7 +276,10 @@ fn decode_rejects_bad_generalized_time() {
rks: None, rks: None,
}); });
let mut der = encode_content_info(&ccr).expect("encode ccr"); let mut der = encode_content_info(&ccr).expect("encode ccr");
let pos = der.windows(15).position(|w| w == b"20260324000000Z").expect("time present"); let pos = der
.windows(15)
.position(|w| w == b"20260324000000Z")
.expect("time present");
der[pos + 14] = b'X'; der[pos + 14] = b'X';
let err = decode_content_info(&der).expect_err("bad time must fail"); let err = decode_content_info(&der).expect_err("bad time must fail");
assert!(err.to_string().contains("GeneralizedTime"), "{err}"); assert!(err.to_string().contains("GeneralizedTime"), "{err}");
@ -268,7 +289,9 @@ fn decode_rejects_bad_generalized_time() {
fn manifest_state_validate_rejects_unsorted_subordinates() { fn manifest_state_validate_rejects_unsorted_subordinates() {
let mut state = sample_manifest_state(); let mut state = sample_manifest_state();
state.mis[0].subordinates = vec![vec![0x40; 20], vec![0x30; 20]]; state.mis[0].subordinates = vec![vec![0x40; 20], vec![0x30; 20]];
let err = state.validate().expect_err("unsorted subordinates must fail"); let err = state
.validate()
.expect_err("unsorted subordinates must fail");
assert!(err.to_string().contains("subordinates"), "{err}"); assert!(err.to_string().contains("subordinates"), "{err}");
} }
@ -276,8 +299,14 @@ fn manifest_state_validate_rejects_unsorted_subordinates() {
fn roa_payload_state_validate_rejects_duplicate_asn_sets() { fn roa_payload_state_validate_rejects_duplicate_asn_sets() {
let state = RoaPayloadState { let state = RoaPayloadState {
rps: vec![ rps: vec![
RoaPayloadSet { as_id: 64496, ip_addr_blocks: vec![vec![0x30, 0x00]] }, RoaPayloadSet {
RoaPayloadSet { as_id: 64496, ip_addr_blocks: vec![vec![0x30, 0x00]] }, as_id: 64496,
ip_addr_blocks: vec![vec![0x30, 0x00]],
},
RoaPayloadSet {
as_id: 64496,
ip_addr_blocks: vec![vec![0x30, 0x00]],
},
], ],
hash: vec![0u8; 32], hash: vec![0u8; 32],
}; };
@ -288,7 +317,10 @@ fn roa_payload_state_validate_rejects_duplicate_asn_sets() {
#[test] #[test]
fn aspa_payload_state_validate_rejects_unsorted_providers() { fn aspa_payload_state_validate_rejects_unsorted_providers() {
let state = AspaPayloadState { let state = AspaPayloadState {
aps: vec![AspaPayloadSet { customer_as_id: 64496, providers: vec![64498, 64497] }], aps: vec![AspaPayloadSet {
customer_as_id: 64496,
providers: vec![64498, 64497],
}],
hash: vec![0u8; 32], hash: vec![0u8; 32],
}; };
let err = state.validate().expect_err("unsorted providers must fail"); let err = state.validate().expect_err("unsorted providers must fail");
@ -311,13 +343,21 @@ fn router_key_state_validate_rejects_unsorted_router_keys() {
rksets: vec![RouterKeySet { rksets: vec![RouterKeySet {
as_id: 64496, as_id: 64496,
router_keys: vec![ router_keys: vec![
RouterKey { ski: vec![0x42; 20], spki_der: vec![0x30, 0x00] }, RouterKey {
RouterKey { ski: vec![0x41; 20], spki_der: vec![0x30, 0x00] }, ski: vec![0x42; 20],
spki_der: vec![0x30, 0x00],
},
RouterKey {
ski: vec![0x41; 20],
spki_der: vec![0x30, 0x00],
},
], ],
}], }],
hash: vec![0u8; 32], hash: vec![0u8; 32],
}; };
let err = state.validate().expect_err("unsorted router keys must fail"); let err = state
.validate()
.expect_err("unsorted router keys must fail");
assert!(err.to_string().contains("router_keys"), "{err}"); assert!(err.to_string().contains("router_keys"), "{err}");
} }
@ -332,6 +372,8 @@ fn manifest_instance_validate_rejects_bad_location_tag() {
locations: vec![vec![0x04, 0x00]], locations: vec![vec![0x04, 0x00]],
subordinates: vec![], subordinates: vec![],
}; };
let err = instance.validate().expect_err("bad AccessDescription tag must fail"); let err = instance
.validate()
.expect_err("bad AccessDescription tag must fail");
assert!(err.to_string().contains("unexpected tag"), "{err}"); assert!(err.to_string().contains("unexpected tag"), "{err}");
} }

View File

@ -1,19 +1,21 @@
use rpki::ccr::{ use rpki::ccr::{
CcrContentInfo, CcrDigestAlgorithm, ManifestInstance, ManifestState, RoaPayloadSet, CcrContentInfo, CcrDigestAlgorithm, ManifestInstance, ManifestState, RoaPayloadSet,
RoaPayloadState, RouterKey, RouterKeySet, RouterKeyState, TrustAnchorState, RoaPayloadState, RouterKey, RouterKeySet, RouterKeyState, TrustAnchorState, compute_state_hash,
compute_state_hash, decode_content_info, dump_content_info_json_value, decode_content_info, dump_content_info_json_value,
encode::{ encode::{
encode_aspa_payload_state_payload_der, encode_content_info, encode_aspa_payload_state_payload_der, encode_content_info,
encode_manifest_state_payload_der, encode_roa_payload_state_payload_der, encode_manifest_state_payload_der, encode_roa_payload_state_payload_der,
encode_router_key_state_payload_der, encode_trust_anchor_state_payload_der, encode_router_key_state_payload_der, encode_trust_anchor_state_payload_der,
}, },
verify::{verify_against_report_json_path, verify_against_vcir_store, verify_content_info_bytes}, verify::{
verify_against_report_json_path, verify_against_vcir_store, verify_content_info_bytes,
},
}; };
use rpki::data_model::common::BigUnsigned; use rpki::data_model::common::BigUnsigned;
use rpki::storage::{ use rpki::storage::{
PackTime, RocksStore, ValidatedCaInstanceResult, ValidatedManifestMeta, PackTime, RocksStore, ValidatedCaInstanceResult, ValidatedManifestMeta, VcirArtifactKind,
VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary, VcirChildEntry,
VcirChildEntry, VcirInstanceGate, VcirRelatedArtifact, VcirSummary, VcirInstanceGate, VcirRelatedArtifact, VcirSummary,
}; };
fn sample_time() -> time::OffsetDateTime { fn sample_time() -> time::OffsetDateTime {
@ -45,28 +47,51 @@ fn sample_manifest_state() -> ManifestState {
fn sample_roa_state() -> RoaPayloadState { fn sample_roa_state() -> RoaPayloadState {
let rps = vec![RoaPayloadSet { let rps = vec![RoaPayloadSet {
as_id: 64496, as_id: 64496,
ip_addr_blocks: vec![vec![0x30, 0x08, 0x04, 0x02, 0x00, 0x01, 0x30, 0x02, 0x03, 0x00]], ip_addr_blocks: vec![vec![
0x30, 0x08, 0x04, 0x02, 0x00, 0x01, 0x30, 0x02, 0x03, 0x00,
]],
}]; }];
let der = encode_roa_payload_state_payload_der(&rps).expect("encode rps"); let der = encode_roa_payload_state_payload_der(&rps).expect("encode rps");
RoaPayloadState { rps, hash: compute_state_hash(&der) } RoaPayloadState {
rps,
hash: compute_state_hash(&der),
}
} }
fn sample_aspa_state() -> rpki::ccr::AspaPayloadState { fn sample_aspa_state() -> rpki::ccr::AspaPayloadState {
let aps = vec![rpki::ccr::AspaPayloadSet { customer_as_id: 64496, providers: vec![64497] }]; let aps = vec![rpki::ccr::AspaPayloadSet {
customer_as_id: 64496,
providers: vec![64497],
}];
let der = encode_aspa_payload_state_payload_der(&aps).expect("encode aps"); let der = encode_aspa_payload_state_payload_der(&aps).expect("encode aps");
rpki::ccr::AspaPayloadState { aps, hash: compute_state_hash(&der) } rpki::ccr::AspaPayloadState {
aps,
hash: compute_state_hash(&der),
}
} }
fn sample_ta_state() -> TrustAnchorState { fn sample_ta_state() -> TrustAnchorState {
let skis = vec![vec![0x11; 20]]; let skis = vec![vec![0x11; 20]];
let der = encode_trust_anchor_state_payload_der(&skis).expect("encode skis"); let der = encode_trust_anchor_state_payload_der(&skis).expect("encode skis");
TrustAnchorState { skis, hash: compute_state_hash(&der) } TrustAnchorState {
skis,
hash: compute_state_hash(&der),
}
} }
fn sample_rks() -> RouterKeyState { fn sample_rks() -> RouterKeyState {
let rksets = vec![RouterKeySet { as_id: 64496, router_keys: vec![RouterKey { ski: vec![0x22; 20], spki_der: vec![0x30, 0x00] }] }]; let rksets = vec![RouterKeySet {
as_id: 64496,
router_keys: vec![RouterKey {
ski: vec![0x22; 20],
spki_der: vec![0x30, 0x00],
}],
}];
let der = encode_router_key_state_payload_der(&rksets).expect("encode rk"); let der = encode_router_key_state_payload_der(&rksets).expect("encode rk");
RouterKeyState { rksets, hash: compute_state_hash(&der) } RouterKeyState {
rksets,
hash: compute_state_hash(&der),
}
} }
fn sample_ccr() -> Vec<u8> { fn sample_ccr() -> Vec<u8> {
@ -102,7 +127,10 @@ fn verify_content_info_bytes_rejects_tampered_manifest_hash() {
content_info.content.mfts.as_mut().unwrap().hash[0] ^= 0x01; content_info.content.mfts.as_mut().unwrap().hash[0] ^= 0x01;
let der = encode_content_info(&content_info).expect("encode tampered ccr"); let der = encode_content_info(&content_info).expect("encode tampered ccr");
let err = verify_content_info_bytes(&der).expect_err("tampered hash must fail"); let err = verify_content_info_bytes(&der).expect_err("tampered hash must fail");
assert!(err.to_string().contains("ManifestState hash mismatch"), "{err}"); assert!(
err.to_string().contains("ManifestState hash mismatch"),
"{err}"
);
} }
#[test] #[test]
@ -128,8 +156,21 @@ fn verify_against_report_json_path_rejects_mismatching_report() {
let mut ci = decode_content_info(&sample_ccr()).expect("decode ccr"); let mut ci = decode_content_info(&sample_ccr()).expect("decode ccr");
ci.content.vrps = Some(RoaPayloadState { ci.content.vrps = Some(RoaPayloadState {
rps: vec![RoaPayloadSet { as_id: 64496, ip_addr_blocks: vec![vec![0x30, 0x08, 0x04, 0x02, 0x00, 0x01, 0x30, 0x02, 0x03, 0x00]] }], rps: vec![RoaPayloadSet {
hash: compute_state_hash(&encode_roa_payload_state_payload_der(&[RoaPayloadSet { as_id: 64496, ip_addr_blocks: vec![vec![0x30, 0x08, 0x04, 0x02, 0x00, 0x01, 0x30, 0x02, 0x03, 0x00]] }]).unwrap()), as_id: 64496,
ip_addr_blocks: vec![vec![
0x30, 0x08, 0x04, 0x02, 0x00, 0x01, 0x30, 0x02, 0x03, 0x00,
]],
}],
hash: compute_state_hash(
&encode_roa_payload_state_payload_der(&[RoaPayloadSet {
as_id: 64496,
ip_addr_blocks: vec![vec![
0x30, 0x08, 0x04, 0x02, 0x00, 0x01, 0x30, 0x02, 0x03, 0x00,
]],
}])
.unwrap(),
),
}); });
verify_against_report_json_path(&ci, &report_path).expect_err("report mismatch expected"); verify_against_report_json_path(&ci, &report_path).expect_err("report mismatch expected");
} }

View File

@ -1,4 +1,3 @@
use rpki::ccr::{ use rpki::ccr::{
CcrContentInfo, CcrDigestAlgorithm, TrustAnchorState, compute_state_hash, CcrContentInfo, CcrDigestAlgorithm, TrustAnchorState, compute_state_hash,
encode::{encode_content_info, encode_trust_anchor_state_payload_der}, encode::{encode_content_info, encode_trust_anchor_state_payload_der},
@ -20,7 +19,10 @@ fn sample_ccr_file() -> (tempfile::TempDir, std::path::PathBuf) {
mfts: None, mfts: None,
vrps: None, vrps: None,
vaps: None, vaps: None,
tas: Some(TrustAnchorState { skis, hash: compute_state_hash(&skis_der) }), tas: Some(TrustAnchorState {
skis,
hash: compute_state_hash(&skis_der),
}),
rks: None, rks: None,
}); });
let path = dir.path().join("sample.ccr"); let path = dir.path().join("sample.ccr");
@ -36,7 +38,11 @@ fn ccr_dump_binary_prints_json_summary() {
.args(["--ccr", ccr_path.to_string_lossy().as_ref()]) .args(["--ccr", ccr_path.to_string_lossy().as_ref()])
.output() .output()
.expect("run ccr_dump"); .expect("run ccr_dump");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let json: serde_json::Value = serde_json::from_slice(&out.stdout).expect("parse json"); let json: serde_json::Value = serde_json::from_slice(&out.stdout).expect("parse json");
assert_eq!(json["version"], 0); assert_eq!(json["version"], 0);
assert_eq!(json["state_aspects"]["tas"]["ski_count"], 1); assert_eq!(json["state_aspects"]["tas"]["ski_count"], 1);
@ -50,22 +56,25 @@ fn ccr_verify_binary_prints_summary() {
.args(["--ccr", ccr_path.to_string_lossy().as_ref()]) .args(["--ccr", ccr_path.to_string_lossy().as_ref()])
.output() .output()
.expect("run ccr_verify"); .expect("run ccr_verify");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let json: serde_json::Value = serde_json::from_slice(&out.stdout).expect("parse json"); let json: serde_json::Value = serde_json::from_slice(&out.stdout).expect("parse json");
assert_eq!(json["version"], 0); assert_eq!(json["version"], 0);
assert_eq!(json["trust_anchor_ski_count"], 1); assert_eq!(json["trust_anchor_ski_count"], 1);
assert_eq!(json["state_hashes_ok"], true); assert_eq!(json["state_hashes_ok"], true);
} }
#[test] #[test]
fn ccr_to_routinator_csv_binary_writes_vrp_csv() { fn ccr_to_routinator_csv_binary_writes_vrp_csv() {
use rpki::ccr::{ use rpki::ccr::{
CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation, CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation,
build_roa_payload_state, encode::encode_content_info, build_roa_payload_state, encode::encode_content_info,
}; };
use rpki::validation::objects::Vrp;
use rpki::data_model::roa::{IpPrefix, RoaAfi}; use rpki::data_model::roa::{IpPrefix, RoaAfi};
use rpki::validation::objects::Vrp;
let dir = tempfile::tempdir().expect("tempdir"); let dir = tempfile::tempdir().expect("tempdir");
let ccr_path = dir.path().join("vrp.ccr"); let ccr_path = dir.path().join("vrp.ccr");
let csv_path = dir.path().join("out.csv"); let csv_path = dir.path().join("out.csv");
@ -107,7 +116,11 @@ fn ccr_to_routinator_csv_binary_writes_vrp_csv() {
]) ])
.output() .output()
.expect("run ccr_to_routinator_csv"); .expect("run ccr_to_routinator_csv");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let csv = std::fs::read_to_string(csv_path).expect("read csv"); let csv = std::fs::read_to_string(csv_path).expect("read csv");
assert!(csv.contains("ASN,IP Prefix,Max Length,Trust Anchor")); assert!(csv.contains("ASN,IP Prefix,Max Length,Trust Anchor"));
assert!(csv.contains("AS64496,203.0.113.0/24,24,apnic")); assert!(csv.contains("AS64496,203.0.113.0/24,24,apnic"));
@ -171,7 +184,11 @@ fn ccr_to_compare_views_binary_writes_vrp_and_vap_csvs() {
]) ])
.output() .output()
.expect("run ccr_to_compare_views"); .expect("run ccr_to_compare_views");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let vrps_csv = std::fs::read_to_string(vrps_path).expect("read vrps csv"); let vrps_csv = std::fs::read_to_string(vrps_path).expect("read vrps csv");
let vaps_csv = std::fs::read_to_string(vaps_path).expect("read vaps csv"); let vaps_csv = std::fs::read_to_string(vaps_path).expect("read vaps csv");
@ -234,7 +251,11 @@ fn ccr_to_compare_views_binary_writes_header_only_vap_csv_when_absent() {
]) ])
.output() .output()
.expect("run ccr_to_compare_views"); .expect("run ccr_to_compare_views");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let vaps_csv = std::fs::read_to_string(vaps_path).expect("read vaps csv"); let vaps_csv = std::fs::read_to_string(vaps_path).expect("read vaps csv");
assert_eq!(vaps_csv, "Customer ASN,Providers,Trust Anchor\n"); assert_eq!(vaps_csv, "Customer ASN,Providers,Trust Anchor\n");

View File

@ -2,12 +2,18 @@ use std::collections::BTreeSet;
use std::path::PathBuf; use std::path::PathBuf;
use std::process::Command; use std::process::Command;
use rpki::ccr::{encode_content_info, CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation, TrustAnchorState}; use rpki::ccr::{
use rpki::cir::{encode_cir, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1}; CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation, TrustAnchorState,
encode_content_info,
};
use rpki::cir::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, encode_cir,
};
#[test] #[test]
fn cir_full_and_delta_pair_reuses_shared_static_pool() { fn cir_full_and_delta_pair_reuses_shared_static_pool() {
let script = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_record_full_delta.sh"); let script =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_record_full_delta.sh");
let out_dir = tempfile::tempdir().expect("tempdir"); let out_dir = tempfile::tempdir().expect("tempdir");
let out = out_dir.path().join("cir-pair"); let out = out_dir.path().join("cir-pair");
let fixture_root = out_dir.path().join("fixture"); let fixture_root = out_dir.path().join("fixture");
@ -16,11 +22,7 @@ fn cir_full_and_delta_pair_reuses_shared_static_pool() {
std::fs::create_dir_all(&static_payload_root).unwrap(); std::fs::create_dir_all(&static_payload_root).unwrap();
let base_locks = fixture_root.join("base-locks.json"); let base_locks = fixture_root.join("base-locks.json");
let delta_locks = fixture_root.join("locks-delta.json"); let delta_locks = fixture_root.join("locks-delta.json");
std::fs::write( std::fs::write(&base_locks, br#"{"validationTime":"2026-03-16T11:49:15Z"}"#).unwrap();
&base_locks,
br#"{"validationTime":"2026-03-16T11:49:15Z"}"#,
)
.unwrap();
std::fs::write( std::fs::write(
&delta_locks, &delta_locks,
br#"{"validationTime":"2026-03-16T11:50:15Z"}"#, br#"{"validationTime":"2026-03-16T11:50:15Z"}"#,
@ -84,7 +86,10 @@ fn cir_full_and_delta_pair_reuses_shared_static_pool() {
mfts: None, mfts: None,
vrps: None, vrps: None,
vaps: None, vaps: None,
tas: Some(TrustAnchorState { skis: vec![vec![0x11; 20]], hash: vec![0x22; 32] }), tas: Some(TrustAnchorState {
skis: vec![vec![0x11; 20]],
hash: vec![0x22; 32],
}),
rks: None, rks: None,
}); });
let full_cir_path = fixture_root.join("full.cir"); let full_cir_path = fixture_root.join("full.cir");
@ -97,8 +102,16 @@ fn cir_full_and_delta_pair_reuses_shared_static_pool() {
std::fs::write(&delta_cir_path, encode_cir(&delta_cir).unwrap()).unwrap(); std::fs::write(&delta_cir_path, encode_cir(&delta_cir).unwrap()).unwrap();
std::fs::write(&full_ccr_path, encode_content_info(&empty_ccr).unwrap()).unwrap(); std::fs::write(&full_ccr_path, encode_content_info(&empty_ccr).unwrap()).unwrap();
std::fs::write(&delta_ccr_path, encode_content_info(&empty_ccr).unwrap()).unwrap(); std::fs::write(&delta_ccr_path, encode_content_info(&empty_ccr).unwrap()).unwrap();
std::fs::write(&full_report_path, br#"{"format_version":2,"publication_points":[]}"#).unwrap(); std::fs::write(
std::fs::write(&delta_report_path, br#"{"format_version":2,"publication_points":[]}"#).unwrap(); &full_report_path,
br#"{"format_version":2,"publication_points":[]}"#,
)
.unwrap();
std::fs::write(
&delta_report_path,
br#"{"format_version":2,"publication_points":[]}"#,
)
.unwrap();
let stub = out_dir.path().join("stub-rpki.sh"); let stub = out_dir.path().join("stub-rpki.sh");
std::fs::write( std::fs::write(
@ -195,7 +208,8 @@ fi
String::from_utf8_lossy(&proc.stderr) String::from_utf8_lossy(&proc.stderr)
); );
let full_cir = rpki::cir::decode_cir(&std::fs::read(out.join("full").join("input.cir")).unwrap()) let full_cir =
rpki::cir::decode_cir(&std::fs::read(out.join("full").join("input.cir")).unwrap())
.expect("decode full cir"); .expect("decode full cir");
let delta_cir = let delta_cir =
rpki::cir::decode_cir(&std::fs::read(out.join("delta-001").join("input.cir")).unwrap()) rpki::cir::decode_cir(&std::fs::read(out.join("delta-001").join("input.cir")).unwrap())

View File

@ -2,10 +2,12 @@ use std::path::PathBuf;
use std::process::Command; use std::process::Command;
use rpki::ccr::{ use rpki::ccr::{
encode_content_info, CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation, CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation, TrustAnchorState,
TrustAnchorState, encode_content_info,
};
use rpki::cir::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, encode_cir,
}; };
use rpki::cir::{encode_cir, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1};
#[test] #[test]
fn cir_drop_report_counts_dropped_roa_objects_and_vrps() { fn cir_drop_report_counts_dropped_roa_objects_and_vrps() {
@ -24,7 +26,10 @@ fn cir_drop_report_counts_dropped_roa_objects_and_vrps() {
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
hex::encode(Sha256::digest(&roa_bytes)) hex::encode(Sha256::digest(&roa_bytes))
}; };
let dir = static_root.join("20260409").join(&hash[0..2]).join(&hash[2..4]); let dir = static_root
.join("20260409")
.join(&hash[0..2])
.join(&hash[2..4]);
std::fs::create_dir_all(&dir).unwrap(); std::fs::create_dir_all(&dir).unwrap();
std::fs::write(dir.join(&hash), &roa_bytes).unwrap(); std::fs::write(dir.join(&hash), &roa_bytes).unwrap();
@ -100,12 +105,20 @@ fn cir_drop_report_counts_dropped_roa_objects_and_vrps() {
]) ])
.output() .output()
.expect("run cir_drop_report"); .expect("run cir_drop_report");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let output: serde_json::Value = let output: serde_json::Value =
serde_json::from_slice(&std::fs::read(&json_out).unwrap()).unwrap(); serde_json::from_slice(&std::fs::read(&json_out).unwrap()).unwrap();
assert_eq!(output["summary"]["droppedObjectCount"], 1); assert_eq!(output["summary"]["droppedObjectCount"], 1);
assert!(output["summary"]["droppedVrpCount"].as_u64().unwrap_or(0) >= 1); assert!(output["summary"]["droppedVrpCount"].as_u64().unwrap_or(0) >= 1);
assert_eq!(output["summary"]["droppedByKind"]["roa"], 1); assert_eq!(output["summary"]["droppedByKind"]["roa"], 1);
assert!(std::fs::read_to_string(&md_out).unwrap().contains("Dropped By Reason")); assert!(
std::fs::read_to_string(&md_out)
.unwrap()
.contains("Dropped By Reason")
);
} }

View File

@ -2,8 +2,8 @@ use std::path::{Path, PathBuf};
use std::process::Command; use std::process::Command;
use rpki::cir::{ use rpki::cir::{
encode_cir, materialize_cir, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, encode_cir,
CIR_VERSION_V1, materialize_cir,
}; };
fn apnic_tal_path() -> PathBuf { fn apnic_tal_path() -> PathBuf {
@ -59,7 +59,11 @@ fn write_static(root: &Path, date: &str, bytes: &[u8]) {
std::fs::write(dir.join(hash), bytes).expect("write static object"); std::fs::write(dir.join(hash), bytes).expect("write static object");
} }
fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror_root: &Path) -> PathBuf { fn prepare_reference_ccr(
work: &Path,
cir: &CanonicalInputRepresentation,
mirror_root: &Path,
) -> PathBuf {
let reference_ccr = work.join("reference.ccr"); let reference_ccr = work.join("reference.ccr");
let rpki_bin = env!("CARGO_BIN_EXE_rpki"); let rpki_bin = env!("CARGO_BIN_EXE_rpki");
let wrapper = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper"); let wrapper = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper");
@ -91,7 +95,11 @@ fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror
]) ])
.output() .output()
.expect("run reference rpki"); .expect("run reference rpki");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
reference_ccr reference_ccr
} }
@ -116,7 +124,8 @@ fn cir_replay_matrix_script_matches_reference_for_all_participants() {
materialize_cir(&cir, &static_root, &mirror_root, true).expect("materialize"); materialize_cir(&cir, &static_root, &mirror_root, true).expect("materialize");
let reference_ccr = prepare_reference_ccr(td.path(), &cir, &mirror_root); let reference_ccr = prepare_reference_ccr(td.path(), &cir, &mirror_root);
let script = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_matrix.sh"); let script =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_matrix.sh");
let out = Command::new(script) let out = Command::new(script)
.args([ .args([
"--cir", "--cir",
@ -134,13 +143,19 @@ fn cir_replay_matrix_script_matches_reference_for_all_participants() {
]) ])
.output() .output()
.expect("run cir matrix script"); .expect("run cir matrix script");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let summary: serde_json::Value = let summary: serde_json::Value =
serde_json::from_slice(&std::fs::read(out_dir.join("summary.json")).expect("read summary")) serde_json::from_slice(&std::fs::read(out_dir.join("summary.json")).expect("read summary"))
.expect("parse summary"); .expect("parse summary");
assert_eq!(summary["allMatch"], true); assert_eq!(summary["allMatch"], true);
let participants = summary["participants"].as_array().expect("participants array"); let participants = summary["participants"]
.as_array()
.expect("participants array");
assert_eq!(participants.len(), 3); assert_eq!(participants.len(), 3);
for participant in participants { for participant in participants {
assert_eq!(participant["exitCode"], 0); assert_eq!(participant["exitCode"], 0);

View File

@ -59,7 +59,11 @@ fn write_static(root: &Path, date: &str, bytes: &[u8]) {
std::fs::write(dir.join(hash), bytes).expect("write static object"); std::fs::write(dir.join(hash), bytes).expect("write static object");
} }
fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror_root: &Path) -> PathBuf { fn prepare_reference_ccr(
work: &Path,
cir: &CanonicalInputRepresentation,
mirror_root: &Path,
) -> PathBuf {
let reference_ccr = work.join("reference.ccr"); let reference_ccr = work.join("reference.ccr");
let rpki_bin = env!("CARGO_BIN_EXE_rpki"); let rpki_bin = env!("CARGO_BIN_EXE_rpki");
let wrapper = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper"); let wrapper = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper");
@ -91,7 +95,11 @@ fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror
]) ])
.output() .output()
.expect("run reference rpki"); .expect("run reference rpki");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
reference_ccr reference_ccr
} }
@ -114,7 +122,8 @@ fn cir_routinator_script_matches_reference_on_ta_only_cir() {
materialize_cir(&cir, &static_root, &mirror_root, true).expect("materialize"); materialize_cir(&cir, &static_root, &mirror_root, true).expect("materialize");
let reference_ccr = prepare_reference_ccr(td.path(), &cir, &mirror_root); let reference_ccr = prepare_reference_ccr(td.path(), &cir, &mirror_root);
let script = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_routinator.sh"); let script =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_routinator.sh");
let out = Command::new(script) let out = Command::new(script)
.args([ .args([
"--cir", "--cir",
@ -128,7 +137,11 @@ fn cir_routinator_script_matches_reference_on_ta_only_cir() {
]) ])
.output() .output()
.expect("run routinator cir script"); .expect("run routinator cir script");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let summary: serde_json::Value = serde_json::from_slice( let summary: serde_json::Value = serde_json::from_slice(
&std::fs::read(out_dir.join("compare-summary.json")).expect("read summary"), &std::fs::read(out_dir.join("compare-summary.json")).expect("read summary"),
) )
@ -156,7 +169,8 @@ fn cir_rpki_client_script_matches_reference_on_ta_only_cir() {
materialize_cir(&cir, &static_root, &mirror_root, true).expect("materialize"); materialize_cir(&cir, &static_root, &mirror_root, true).expect("materialize");
let reference_ccr = prepare_reference_ccr(td.path(), &cir, &mirror_root); let reference_ccr = prepare_reference_ccr(td.path(), &cir, &mirror_root);
let script = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_rpki_client.sh"); let script =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_rpki_client.sh");
let out = Command::new(script) let out = Command::new(script)
.args([ .args([
"--cir", "--cir",
@ -172,7 +186,11 @@ fn cir_rpki_client_script_matches_reference_on_ta_only_cir() {
]) ])
.output() .output()
.expect("run rpki-client cir script"); .expect("run rpki-client cir script");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let summary: serde_json::Value = serde_json::from_slice( let summary: serde_json::Value = serde_json::from_slice(
&std::fs::read(out_dir.join("compare-summary.json")).expect("read summary"), &std::fs::read(out_dir.join("compare-summary.json")).expect("read summary"),
) )

View File

@ -1,27 +1,40 @@
use std::path::PathBuf; use std::path::PathBuf;
use std::process::Command; use std::process::Command;
use rpki::ccr::{encode_content_info, CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation, TrustAnchorState}; use rpki::ccr::{
use rpki::cir::{encode_cir, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1}; CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation, TrustAnchorState,
encode_content_info,
};
use rpki::cir::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, encode_cir,
};
#[test] #[test]
fn cir_offline_sequence_writes_parseable_sequence_json_and_steps() { fn cir_offline_sequence_writes_parseable_sequence_json_and_steps() {
let out_dir = tempfile::tempdir().expect("tempdir"); let out_dir = tempfile::tempdir().expect("tempdir");
let out = out_dir.path().join("cir-sequence"); let out = out_dir.path().join("cir-sequence");
let script = let script = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_record_sequence_offline.sh"); .join("scripts/cir/run_cir_record_sequence_offline.sh");
let fixture_root = out_dir.path().join("fixture"); let fixture_root = out_dir.path().join("fixture");
let static_payload_root = fixture_root.join("payloads"); let static_payload_root = fixture_root.join("payloads");
std::fs::create_dir_all(&static_payload_root).unwrap(); std::fs::create_dir_all(&static_payload_root).unwrap();
let base_locks = fixture_root.join("base-locks.json"); let base_locks = fixture_root.join("base-locks.json");
let delta_locks = fixture_root.join("locks-delta.json"); let delta_locks = fixture_root.join("locks-delta.json");
std::fs::write(&base_locks, br#"{"validationTime":"2026-03-16T11:49:15Z"}"#).unwrap(); std::fs::write(&base_locks, br#"{"validationTime":"2026-03-16T11:49:15Z"}"#).unwrap();
std::fs::write(&delta_locks, br#"{"validationTime":"2026-03-16T11:50:15Z"}"#).unwrap(); std::fs::write(
&delta_locks,
br#"{"validationTime":"2026-03-16T11:50:15Z"}"#,
)
.unwrap();
let mk_cir = |uri: &str, hash_hex: &str, vt: &str| CanonicalInputRepresentation { let mk_cir = |uri: &str, hash_hex: &str, vt: &str| CanonicalInputRepresentation {
version: CIR_VERSION_V1, version: CIR_VERSION_V1,
hash_alg: CirHashAlgorithm::Sha256, hash_alg: CirHashAlgorithm::Sha256,
validation_time: time::OffsetDateTime::parse(vt, &time::format_description::well_known::Rfc3339).unwrap(), validation_time: time::OffsetDateTime::parse(
vt,
&time::format_description::well_known::Rfc3339,
)
.unwrap(),
objects: vec![CirObject { objects: vec![CirObject {
rsync_uri: uri.to_string(), rsync_uri: uri.to_string(),
sha256: hex::decode(hash_hex).unwrap(), sha256: hex::decode(hash_hex).unwrap(),
@ -39,7 +52,11 @@ fn cir_offline_sequence_writes_parseable_sequence_json_and_steps() {
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
hex::encode(Sha256::digest(b"delta-object")) hex::encode(Sha256::digest(b"delta-object"))
}; };
let full_cir = mk_cir("rsync://example.net/repo/full.roa", &full_hash, "2026-03-16T11:49:15Z"); let full_cir = mk_cir(
"rsync://example.net/repo/full.roa",
&full_hash,
"2026-03-16T11:49:15Z",
);
let delta_cir = CanonicalInputRepresentation { let delta_cir = CanonicalInputRepresentation {
version: CIR_VERSION_V1, version: CIR_VERSION_V1,
hash_alg: CirHashAlgorithm::Sha256, hash_alg: CirHashAlgorithm::Sha256,
@ -68,7 +85,10 @@ fn cir_offline_sequence_writes_parseable_sequence_json_and_steps() {
mfts: None, mfts: None,
vrps: None, vrps: None,
vaps: None, vaps: None,
tas: Some(TrustAnchorState { skis: vec![vec![0x11; 20]], hash: vec![0x22; 32] }), tas: Some(TrustAnchorState {
skis: vec![vec![0x11; 20]],
hash: vec![0x22; 32],
}),
rks: None, rks: None,
}); });
let full_cir_path = fixture_root.join("full.cir"); let full_cir_path = fixture_root.join("full.cir");
@ -82,8 +102,16 @@ fn cir_offline_sequence_writes_parseable_sequence_json_and_steps() {
std::fs::write(&delta_cir_path, encode_cir(&delta_cir).unwrap()).unwrap(); std::fs::write(&delta_cir_path, encode_cir(&delta_cir).unwrap()).unwrap();
std::fs::write(&full_ccr_path, encode_content_info(&empty_ccr).unwrap()).unwrap(); std::fs::write(&full_ccr_path, encode_content_info(&empty_ccr).unwrap()).unwrap();
std::fs::write(&delta_ccr_path, encode_content_info(&empty_ccr).unwrap()).unwrap(); std::fs::write(&delta_ccr_path, encode_content_info(&empty_ccr).unwrap()).unwrap();
std::fs::write(&full_report_path, br#"{"format_version":2,"publication_points":[]}"#).unwrap(); std::fs::write(
std::fs::write(&delta_report_path, br#"{"format_version":2,"publication_points":[]}"#).unwrap(); &full_report_path,
br#"{"format_version":2,"publication_points":[]}"#,
)
.unwrap();
std::fs::write(
&delta_report_path,
br#"{"format_version":2,"publication_points":[]}"#,
)
.unwrap();
std::fs::write(static_payload_root.join("full-object"), b"full-object").unwrap(); std::fs::write(static_payload_root.join("full-object"), b"full-object").unwrap();
std::fs::write(static_payload_root.join("delta-object"), b"delta-object").unwrap(); std::fs::write(static_payload_root.join("delta-object"), b"delta-object").unwrap();

View File

@ -1,7 +1,10 @@
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::process::Command; use std::process::Command;
use rpki::cir::{encode_cir, materialize_cir, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1}; use rpki::cir::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, encode_cir,
materialize_cir,
};
fn apnic_tal_path() -> PathBuf { fn apnic_tal_path() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/tal/apnic-rfc7730-https.tal") PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/tal/apnic-rfc7730-https.tal")
@ -56,7 +59,11 @@ fn write_static(root: &Path, date: &str, bytes: &[u8]) {
std::fs::write(dir.join(hash), bytes).expect("write static object"); std::fs::write(dir.join(hash), bytes).expect("write static object");
} }
fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror_root: &Path) -> PathBuf { fn prepare_reference_ccr(
work: &Path,
cir: &CanonicalInputRepresentation,
mirror_root: &Path,
) -> PathBuf {
let reference_ccr = work.join("reference.ccr"); let reference_ccr = work.join("reference.ccr");
let rpki_bin = env!("CARGO_BIN_EXE_rpki"); let rpki_bin = env!("CARGO_BIN_EXE_rpki");
let wrapper = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper"); let wrapper = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper");
@ -77,7 +84,9 @@ fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror
"--rsync-command", "--rsync-command",
wrapper.to_string_lossy().as_ref(), wrapper.to_string_lossy().as_ref(),
"--validation-time", "--validation-time",
&cir.validation_time.format(&time::format_description::well_known::Rfc3339).unwrap(), &cir.validation_time
.format(&time::format_description::well_known::Rfc3339)
.unwrap(),
"--max-depth", "--max-depth",
"0", "0",
"--max-instances", "--max-instances",
@ -87,7 +96,11 @@ fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror
]) ])
.output() .output()
.expect("run reference rpki"); .expect("run reference rpki");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
reference_ccr reference_ccr
} }
@ -102,14 +115,30 @@ fn prepare_sequence_root(td: &Path) -> PathBuf {
let (cir, ta_bytes) = build_ta_only_cir(); let (cir, ta_bytes) = build_ta_only_cir();
let cir_bytes = encode_cir(&cir).expect("encode cir"); let cir_bytes = encode_cir(&cir).expect("encode cir");
std::fs::write(sequence_root.join("full").join("input.cir"), &cir_bytes).unwrap(); std::fs::write(sequence_root.join("full").join("input.cir"), &cir_bytes).unwrap();
std::fs::write(sequence_root.join("delta-001").join("input.cir"), &cir_bytes).unwrap(); std::fs::write(
std::fs::write(sequence_root.join("delta-002").join("input.cir"), &cir_bytes).unwrap(); sequence_root.join("delta-001").join("input.cir"),
&cir_bytes,
)
.unwrap();
std::fs::write(
sequence_root.join("delta-002").join("input.cir"),
&cir_bytes,
)
.unwrap();
write_static(&static_root, "20260407", &ta_bytes); write_static(&static_root, "20260407", &ta_bytes);
materialize_cir(&cir, &static_root, &mirror_root, true).unwrap(); materialize_cir(&cir, &static_root, &mirror_root, true).unwrap();
let reference = prepare_reference_ccr(td, &cir, &mirror_root); let reference = prepare_reference_ccr(td, &cir, &mirror_root);
std::fs::copy(&reference, sequence_root.join("full").join("result.ccr")).unwrap(); std::fs::copy(&reference, sequence_root.join("full").join("result.ccr")).unwrap();
std::fs::copy(&reference, sequence_root.join("delta-001").join("result.ccr")).unwrap(); std::fs::copy(
std::fs::copy(&reference, sequence_root.join("delta-002").join("result.ccr")).unwrap(); &reference,
sequence_root.join("delta-001").join("result.ccr"),
)
.unwrap();
std::fs::copy(
&reference,
sequence_root.join("delta-002").join("result.ccr"),
)
.unwrap();
std::fs::write(sequence_root.join("full").join("report.json"), b"{}").unwrap(); std::fs::write(sequence_root.join("full").join("report.json"), b"{}").unwrap();
std::fs::write(sequence_root.join("delta-001").join("report.json"), b"{}").unwrap(); std::fs::write(sequence_root.join("delta-001").join("report.json"), b"{}").unwrap();
std::fs::write(sequence_root.join("delta-002").join("report.json"), b"{}").unwrap(); std::fs::write(sequence_root.join("delta-002").join("report.json"), b"{}").unwrap();
@ -122,7 +151,11 @@ fn prepare_sequence_root(td: &Path) -> PathBuf {
{"stepId":"delta-002","kind":"delta","validationTime":"2026-04-07T00:00:00Z","cirPath":"delta-002/input.cir","ccrPath":"delta-002/result.ccr","reportPath":"delta-002/report.json","previousStepId":"delta-001"} {"stepId":"delta-002","kind":"delta","validationTime":"2026-04-07T00:00:00Z","cirPath":"delta-002/input.cir","ccrPath":"delta-002/result.ccr","reportPath":"delta-002/report.json","previousStepId":"delta-001"}
] ]
}); });
std::fs::write(sequence_root.join("sequence.json"), serde_json::to_vec_pretty(&sequence).unwrap()).unwrap(); std::fs::write(
sequence_root.join("sequence.json"),
serde_json::to_vec_pretty(&sequence).unwrap(),
)
.unwrap();
sequence_root sequence_root
} }
@ -141,13 +174,14 @@ fn peer_sequence_replay_scripts_replay_all_steps() {
let routinator_script = PathBuf::from(env!("CARGO_MANIFEST_DIR")) let routinator_script = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("scripts/cir/run_cir_replay_sequence_routinator.sh"); .join("scripts/cir/run_cir_replay_sequence_routinator.sh");
let out = Command::new(routinator_script) let out = Command::new(routinator_script)
.args([ .args(["--sequence-root", sequence_root.to_string_lossy().as_ref()])
"--sequence-root",
sequence_root.to_string_lossy().as_ref(),
])
.output() .output()
.expect("run routinator sequence replay"); .expect("run routinator sequence replay");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let r_summary: serde_json::Value = serde_json::from_slice( let r_summary: serde_json::Value = serde_json::from_slice(
&std::fs::read(sequence_root.join("sequence-summary-routinator.json")).unwrap(), &std::fs::read(sequence_root.join("sequence-summary-routinator.json")).unwrap(),
) )
@ -166,7 +200,11 @@ fn peer_sequence_replay_scripts_replay_all_steps() {
]) ])
.output() .output()
.expect("run rpki-client sequence replay"); .expect("run rpki-client sequence replay");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let c_summary: serde_json::Value = serde_json::from_slice( let c_summary: serde_json::Value = serde_json::from_slice(
&std::fs::read(sequence_root.join("sequence-summary-rpki-client.json")).unwrap(), &std::fs::read(sequence_root.join("sequence-summary-rpki-client.json")).unwrap(),
) )

View File

@ -1,7 +1,10 @@
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::process::Command; use std::process::Command;
use rpki::cir::{encode_cir, materialize_cir, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1}; use rpki::cir::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, encode_cir,
materialize_cir,
};
fn apnic_tal_path() -> PathBuf { fn apnic_tal_path() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/tal/apnic-rfc7730-https.tal") PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/tal/apnic-rfc7730-https.tal")
@ -56,7 +59,11 @@ fn write_static(root: &Path, date: &str, bytes: &[u8]) {
std::fs::write(dir.join(hash), bytes).expect("write static object"); std::fs::write(dir.join(hash), bytes).expect("write static object");
} }
fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror_root: &Path) -> PathBuf { fn prepare_reference_ccr(
work: &Path,
cir: &CanonicalInputRepresentation,
mirror_root: &Path,
) -> PathBuf {
let reference_ccr = work.join("reference.ccr"); let reference_ccr = work.join("reference.ccr");
let rpki_bin = env!("CARGO_BIN_EXE_rpki"); let rpki_bin = env!("CARGO_BIN_EXE_rpki");
let wrapper = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper"); let wrapper = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper");
@ -77,7 +84,9 @@ fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror
"--rsync-command", "--rsync-command",
wrapper.to_string_lossy().as_ref(), wrapper.to_string_lossy().as_ref(),
"--validation-time", "--validation-time",
&cir.validation_time.format(&time::format_description::well_known::Rfc3339).unwrap(), &cir.validation_time
.format(&time::format_description::well_known::Rfc3339)
.unwrap(),
"--max-depth", "--max-depth",
"0", "0",
"--max-instances", "--max-instances",
@ -87,7 +96,11 @@ fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror
]) ])
.output() .output()
.expect("run reference rpki"); .expect("run reference rpki");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
reference_ccr reference_ccr
} }
@ -107,14 +120,30 @@ fn ours_sequence_replay_script_replays_all_steps() {
let (cir, ta_bytes) = build_ta_only_cir(); let (cir, ta_bytes) = build_ta_only_cir();
let cir_bytes = encode_cir(&cir).expect("encode cir"); let cir_bytes = encode_cir(&cir).expect("encode cir");
std::fs::write(sequence_root.join("full").join("input.cir"), &cir_bytes).unwrap(); std::fs::write(sequence_root.join("full").join("input.cir"), &cir_bytes).unwrap();
std::fs::write(sequence_root.join("delta-001").join("input.cir"), &cir_bytes).unwrap(); std::fs::write(
std::fs::write(sequence_root.join("delta-002").join("input.cir"), &cir_bytes).unwrap(); sequence_root.join("delta-001").join("input.cir"),
&cir_bytes,
)
.unwrap();
std::fs::write(
sequence_root.join("delta-002").join("input.cir"),
&cir_bytes,
)
.unwrap();
write_static(&static_root, "20260407", &ta_bytes); write_static(&static_root, "20260407", &ta_bytes);
materialize_cir(&cir, &static_root, &mirror_root, true).unwrap(); materialize_cir(&cir, &static_root, &mirror_root, true).unwrap();
let reference = prepare_reference_ccr(td.path(), &cir, &mirror_root); let reference = prepare_reference_ccr(td.path(), &cir, &mirror_root);
std::fs::copy(&reference, sequence_root.join("full").join("result.ccr")).unwrap(); std::fs::copy(&reference, sequence_root.join("full").join("result.ccr")).unwrap();
std::fs::copy(&reference, sequence_root.join("delta-001").join("result.ccr")).unwrap(); std::fs::copy(
std::fs::copy(&reference, sequence_root.join("delta-002").join("result.ccr")).unwrap(); &reference,
sequence_root.join("delta-001").join("result.ccr"),
)
.unwrap();
std::fs::copy(
&reference,
sequence_root.join("delta-002").join("result.ccr"),
)
.unwrap();
std::fs::write(sequence_root.join("full").join("report.json"), b"{}").unwrap(); std::fs::write(sequence_root.join("full").join("report.json"), b"{}").unwrap();
std::fs::write(sequence_root.join("delta-001").join("report.json"), b"{}").unwrap(); std::fs::write(sequence_root.join("delta-001").join("report.json"), b"{}").unwrap();
std::fs::write(sequence_root.join("delta-002").join("report.json"), b"{}").unwrap(); std::fs::write(sequence_root.join("delta-002").join("report.json"), b"{}").unwrap();
@ -134,8 +163,8 @@ fn ours_sequence_replay_script_replays_all_steps() {
) )
.unwrap(); .unwrap();
let script = let script = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_sequence_ours.sh"); .join("scripts/cir/run_cir_replay_sequence_ours.sh");
let out = Command::new(script) let out = Command::new(script)
.args([ .args([
"--sequence-root", "--sequence-root",
@ -145,10 +174,15 @@ fn ours_sequence_replay_script_replays_all_steps() {
]) ])
.output() .output()
.expect("run sequence replay"); .expect("run sequence replay");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let summary: serde_json::Value = let summary: serde_json::Value = serde_json::from_slice(
serde_json::from_slice(&std::fs::read(sequence_root.join("sequence-summary.json")).unwrap()) &std::fs::read(sequence_root.join("sequence-summary.json")).unwrap(),
)
.unwrap(); .unwrap();
assert_eq!(summary["stepCount"], 3); assert_eq!(summary["stepCount"], 3);
assert_eq!(summary["allMatch"], true); assert_eq!(summary["allMatch"], true);

View File

@ -1,6 +1,6 @@
use std::os::unix::fs::MetadataExt;
use std::path::PathBuf; use std::path::PathBuf;
use std::process::Command; use std::process::Command;
use std::os::unix::fs::MetadataExt;
fn wrapper_path() -> PathBuf { fn wrapper_path() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper") PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper")
@ -24,7 +24,11 @@ fn cir_rsync_wrapper_passes_through_help() {
.arg("-h") .arg("-h")
.output() .output()
.expect("run wrapper -h"); .expect("run wrapper -h");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let stdout = String::from_utf8_lossy(&out.stdout); let stdout = String::from_utf8_lossy(&out.stdout);
let stderr = String::from_utf8_lossy(&out.stderr); let stderr = String::from_utf8_lossy(&out.stderr);
assert!(stdout.contains("rsync") || stderr.contains("rsync")); assert!(stdout.contains("rsync") || stderr.contains("rsync"));
@ -60,8 +64,15 @@ fn cir_rsync_wrapper_rewrites_rsync_source_to_mirror_tree() {
]) ])
.output() .output()
.expect("run wrapper rewrite"); .expect("run wrapper rewrite");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
assert_eq!(std::fs::read(dest_root.join("a.roa")).expect("read copied roa"), b"roa"); out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
assert_eq!(
std::fs::read(dest_root.join("a.roa")).expect("read copied roa"),
b"roa"
);
assert!(!dest_root.join("nested").join("b.txt").exists()); assert!(!dest_root.join("nested").join("b.txt").exists());
} }
@ -93,13 +104,23 @@ fn cir_rsync_wrapper_rewrites_module_root_without_trailing_slash_as_contents() {
]) ])
.output() .output()
.expect("run wrapper rewrite"); .expect("run wrapper rewrite");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
assert_eq!(std::fs::read(dest_root.join("root.cer")).expect("read copied root cer"), b"cer"); out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
assert_eq!(
std::fs::read(dest_root.join("root.cer")).expect("read copied root cer"),
b"cer"
);
assert_eq!( assert_eq!(
std::fs::read(dest_root.join("sub").join("child.roa")).expect("read copied child roa"), std::fs::read(dest_root.join("sub").join("child.roa")).expect("read copied child roa"),
b"roa" b"roa"
); );
assert!(!dest_root.join("repo").exists(), "module root must not be nested under destination"); assert!(
!dest_root.join("repo").exists(),
"module root must not be nested under destination"
);
} }
#[test] #[test]
@ -113,7 +134,11 @@ fn cir_rsync_wrapper_requires_mirror_root_for_rsync_source() {
let out = Command::new(wrapper_path()) let out = Command::new(wrapper_path())
.env("REAL_RSYNC_BIN", real) .env("REAL_RSYNC_BIN", real)
.args(["-rt", "rsync://example.net/repo/", dest_root.to_string_lossy().as_ref()]) .args([
"-rt",
"rsync://example.net/repo/",
dest_root.to_string_lossy().as_ref(),
])
.output() .output()
.expect("run wrapper missing env"); .expect("run wrapper missing env");
assert!(!out.status.success()); assert!(!out.status.success());
@ -141,8 +166,15 @@ fn cir_rsync_wrapper_leaves_local_source_untouched() {
]) ])
.output() .output()
.expect("run wrapper local passthrough"); .expect("run wrapper local passthrough");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
assert_eq!(std::fs::read(dest_root.join("src").join("x.cer")).expect("read copied file"), b"x"); out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
assert_eq!(
std::fs::read(dest_root.join("src").join("x.cer")).expect("read copied file"),
b"x"
);
} }
#[test] #[test]
@ -177,7 +209,11 @@ fn cir_rsync_wrapper_local_link_mode_uses_hardlinks_for_rewritten_sources() {
]) ])
.output() .output()
.expect("run wrapper local-link mode"); .expect("run wrapper local-link mode");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let dst_file = dest_root.join("a.roa"); let dst_file = dest_root.join("a.roa");
let dst_nested = dest_root.join("nested").join("b.cer"); let dst_nested = dest_root.join("nested").join("b.cer");
@ -186,5 +222,9 @@ fn cir_rsync_wrapper_local_link_mode_uses_hardlinks_for_rewritten_sources() {
let src_meta = std::fs::metadata(&src_file).expect("src metadata"); let src_meta = std::fs::metadata(&src_file).expect("src metadata");
let dst_meta = std::fs::metadata(&dst_file).expect("dst metadata"); let dst_meta = std::fs::metadata(&dst_file).expect("dst metadata");
assert_eq!(src_meta.ino(), dst_meta.ino(), "expected hardlinked destination file"); assert_eq!(
src_meta.ino(),
dst_meta.ino(),
"expected hardlinked destination file"
);
} }

View File

@ -19,21 +19,15 @@ fn cli_payload_delta_replay_rejects_wrong_base_locks() {
let delta_archive = demo_root.join("payload-delta-archive"); let delta_archive = demo_root.join("payload-delta-archive");
let delta_locks = demo_root.join("locks-delta.json"); let delta_locks = demo_root.join("locks-delta.json");
assert!( if !base_archive.is_dir() || !delta_archive.is_dir() || !delta_locks.is_file() {
base_archive.is_dir(), eprintln!(
"base archive missing: {}", "skipping cli delta replay smoke; missing fixtures: base_archive={} delta_archive={} delta_locks={}",
base_archive.display() base_archive.display(),
); delta_archive.display(),
assert!(
delta_archive.is_dir(),
"delta archive missing: {}",
delta_archive.display()
);
assert!(
delta_locks.is_file(),
"delta locks missing: {}",
delta_locks.display() delta_locks.display()
); );
return;
}
let out = Command::new(bin) let out = Command::new(bin)
.args([ .args([

View File

@ -16,16 +16,14 @@ fn cli_payload_replay_root_only_smoke_writes_report_json() {
let locks_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) let locks_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("target/live/payload_replay/locks.json"); .join("target/live/payload_replay/locks.json");
assert!( if !archive_root.is_dir() || !locks_path.is_file() {
archive_root.is_dir(), eprintln!(
"payload replay archive missing: {}", "skipping cli payload replay smoke; missing fixtures: archive={} locks={}",
archive_root.display() archive_root.display(),
);
assert!(
locks_path.is_file(),
"payload replay locks missing: {}",
locks_path.display() locks_path.display()
); );
return;
}
let out = Command::new(bin) let out = Command::new(bin)
.args([ .args([

View File

@ -45,7 +45,6 @@ fn cli_run_offline_mode_executes_and_writes_json_and_ccr() {
assert_eq!(v["format_version"], 2); assert_eq!(v["format_version"], 2);
} }
#[test] #[test]
fn cli_run_offline_mode_writes_decodable_ccr() { fn cli_run_offline_mode_writes_decodable_ccr() {
let db_dir = tempfile::tempdir().expect("db tempdir"); let db_dir = tempfile::tempdir().expect("db tempdir");
@ -130,10 +129,11 @@ fn cli_run_offline_mode_writes_cir_and_static_pool() {
let cir = rpki::cir::decode_cir(&bytes).expect("decode cir"); let cir = rpki::cir::decode_cir(&bytes).expect("decode cir");
assert_eq!(cir.tals.len(), 1); assert_eq!(cir.tals.len(), 1);
assert_eq!(cir.tals[0].tal_uri, "https://example.test/root.tal"); assert_eq!(cir.tals[0].tal_uri, "https://example.test/root.tal");
assert!(cir assert!(
.objects cir.objects
.iter() .iter()
.any(|item| item.rsync_uri.contains("apnic-rpki-root-iana-origin.cer"))); .any(|item| item.rsync_uri.contains("apnic-rpki-root-iana-origin.cer"))
);
let mut file_count = 0usize; let mut file_count = 0usize;
let mut stack = vec![static_root.clone()]; let mut stack = vec![static_root.clone()];
@ -182,14 +182,18 @@ fn cli_run_blackbox_rsync_wrapper_mode_matches_reference_ccr_without_ta_path() {
.expect("write ta into mirror"); .expect("write ta into mirror");
let bin = env!("CARGO_BIN_EXE_rpki"); let bin = env!("CARGO_BIN_EXE_rpki");
let wrapper = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")) let wrapper =
.join("scripts/cir/cir-rsync-wrapper"); std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper");
let reference = Command::new(bin) let reference = Command::new(bin)
.env("REAL_RSYNC_BIN", real_rsync) .env("REAL_RSYNC_BIN", real_rsync)
.env("CIR_MIRROR_ROOT", &mirror_root) .env("CIR_MIRROR_ROOT", &mirror_root)
.args([ .args([
"--db", "--db",
db_dir.path().join("reference-db").to_string_lossy().as_ref(), db_dir
.path()
.join("reference-db")
.to_string_lossy()
.as_ref(),
"--tal-path", "--tal-path",
tal_path.to_string_lossy().as_ref(), tal_path.to_string_lossy().as_ref(),
"--ta-path", "--ta-path",
@ -208,7 +212,11 @@ fn cli_run_blackbox_rsync_wrapper_mode_matches_reference_ccr_without_ta_path() {
]) ])
.output() .output()
.expect("run reference wrapper mode"); .expect("run reference wrapper mode");
assert!(reference.status.success(), "stderr={}", String::from_utf8_lossy(&reference.stderr)); assert!(
reference.status.success(),
"stderr={}",
String::from_utf8_lossy(&reference.stderr)
);
let out = Command::new(bin) let out = Command::new(bin)
.env("REAL_RSYNC_BIN", real_rsync) .env("REAL_RSYNC_BIN", real_rsync)
@ -232,7 +240,11 @@ fn cli_run_blackbox_rsync_wrapper_mode_matches_reference_ccr_without_ta_path() {
]) ])
.output() .output()
.expect("run blackbox wrapper mode"); .expect("run blackbox wrapper mode");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr)); assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let reference = rpki::ccr::decode_content_info(&std::fs::read(&ref_ccr_path).unwrap()) let reference = rpki::ccr::decode_content_info(&std::fs::read(&ref_ccr_path).unwrap())
.expect("decode reference ccr"); .expect("decode reference ccr");

View File

@ -6,9 +6,9 @@ use rpki::data_model::manifest::ManifestObject;
use rpki::policy::{CaFailedFetchPolicy, Policy}; use rpki::policy::{CaFailedFetchPolicy, Policy};
use rpki::storage::{ use rpki::storage::{
PackTime, RawByHashEntry, RepositoryViewEntry, RepositoryViewState, RocksStore, PackTime, RawByHashEntry, RepositoryViewEntry, RepositoryViewState, RocksStore,
ValidatedCaInstanceResult, ValidatedManifestMeta, ValidatedCaInstanceResult, ValidatedManifestMeta, VcirArtifactKind, VcirArtifactRole,
VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary, VcirArtifactValidationStatus, VcirAuditSummary, VcirInstanceGate, VcirRelatedArtifact,
VcirInstanceGate, VcirRelatedArtifact, VcirSummary, VcirSummary,
}; };
use rpki::validation::manifest::{PublicationPointSource, process_manifest_publication_point}; use rpki::validation::manifest::{PublicationPointSource, process_manifest_publication_point};
@ -165,7 +165,12 @@ fn manifest_success_returns_validated_publication_point_data() {
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
put_current_object(&store, &rsync_uri, bytes, rsync_uri.rsplit('.').next().unwrap_or("bin")); put_current_object(
&store,
&rsync_uri,
bytes,
rsync_uri.rsplit('.').next().unwrap_or("bin"),
);
} }
let policy = Policy::default(); let policy = Policy::default();
@ -218,7 +223,12 @@ fn manifest_hash_mismatch_reuses_current_instance_vcir_when_enabled() {
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
put_current_object(&store, &rsync_uri, bytes, rsync_uri.rsplit('.').next().unwrap_or("bin")); put_current_object(
&store,
&rsync_uri,
bytes,
rsync_uri.rsplit('.').next().unwrap_or("bin"),
);
} }
let policy = Policy::default(); let policy = Policy::default();
@ -255,7 +265,12 @@ fn manifest_hash_mismatch_reuses_current_instance_vcir_when_enabled() {
.expect("load victim raw") .expect("load victim raw")
.expect("victim raw exists"); .expect("victim raw exists");
tampered[0] ^= 0xFF; tampered[0] ^= 0xFF;
put_current_object(&store, &victim_uri, tampered, victim_uri.rsplit('.').next().unwrap_or("bin")); put_current_object(
&store,
&victim_uri,
tampered,
victim_uri.rsplit('.').next().unwrap_or("bin"),
);
let second = process_manifest_publication_point( let second = process_manifest_publication_point(
&store, &store,
@ -304,7 +319,12 @@ fn manifest_failed_fetch_stop_all_output() {
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
put_current_object(&store, &rsync_uri, bytes, rsync_uri.rsplit('.').next().unwrap_or("bin")); put_current_object(
&store,
&rsync_uri,
bytes,
rsync_uri.rsplit('.').next().unwrap_or("bin"),
);
} }
let mut policy = Policy::default(); let mut policy = Policy::default();
@ -332,7 +352,12 @@ fn manifest_failed_fetch_stop_all_output() {
.expect("load victim raw") .expect("load victim raw")
.expect("victim raw exists"); .expect("victim raw exists");
tampered[0] ^= 0xFF; tampered[0] ^= 0xFF;
put_current_object(&store, &victim_uri, tampered, victim_uri.rsplit('.').next().unwrap_or("bin")); put_current_object(
&store,
&victim_uri,
tampered,
victim_uri.rsplit('.').next().unwrap_or("bin"),
);
policy.ca_failed_fetch_policy = CaFailedFetchPolicy::StopAllOutput; policy.ca_failed_fetch_policy = CaFailedFetchPolicy::StopAllOutput;
let err = process_manifest_publication_point( let err = process_manifest_publication_point(
@ -378,7 +403,12 @@ fn manifest_failed_fetch_rejects_stale_current_instance_vcir() {
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
put_current_object(&store, &rsync_uri, bytes, rsync_uri.rsplit('.').next().unwrap_or("bin")); put_current_object(
&store,
&rsync_uri,
bytes,
rsync_uri.rsplit('.').next().unwrap_or("bin"),
);
} }
let policy = Policy::default(); let policy = Policy::default();
@ -459,7 +489,12 @@ fn manifest_revalidation_with_unchanged_manifest_is_fresh() {
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
put_current_object(&store, &rsync_uri, bytes, rsync_uri.rsplit('.').next().unwrap_or("bin")); put_current_object(
&store,
&rsync_uri,
bytes,
rsync_uri.rsplit('.').next().unwrap_or("bin"),
);
} }
let policy = Policy::default(); let policy = Policy::default();
@ -539,7 +574,12 @@ fn manifest_rollback_is_treated_as_failed_fetch_and_reuses_current_instance_vcir
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
put_current_object(&store, &rsync_uri, bytes, rsync_uri.rsplit('.').next().unwrap_or("bin")); put_current_object(
&store,
&rsync_uri,
bytes,
rsync_uri.rsplit('.').next().unwrap_or("bin"),
);
} }
let policy = Policy::default(); let policy = Policy::default();

View File

@ -6,9 +6,9 @@ use rpki::data_model::manifest::ManifestObject;
use rpki::policy::{CaFailedFetchPolicy, Policy}; use rpki::policy::{CaFailedFetchPolicy, Policy};
use rpki::storage::{ use rpki::storage::{
PackTime, RawByHashEntry, RepositoryViewEntry, RepositoryViewState, RocksStore, PackTime, RawByHashEntry, RepositoryViewEntry, RepositoryViewState, RocksStore,
ValidatedCaInstanceResult, ValidatedManifestMeta, ValidatedCaInstanceResult, ValidatedManifestMeta, VcirArtifactKind, VcirArtifactRole,
VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary, VcirArtifactValidationStatus, VcirAuditSummary, VcirInstanceGate, VcirRelatedArtifact,
VcirInstanceGate, VcirRelatedArtifact, VcirSummary, VcirSummary,
}; };
use rpki::validation::manifest::{ use rpki::validation::manifest::{
ManifestProcessError, PublicationPointSource, process_manifest_publication_point, ManifestProcessError, PublicationPointSource, process_manifest_publication_point,
@ -161,7 +161,12 @@ fn store_manifest_and_locked_files(
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
put_current_object(&store, &rsync_uri, bytes, rsync_uri.rsplit('.').next().unwrap_or("bin")); put_current_object(
&store,
&rsync_uri,
bytes,
rsync_uri.rsplit('.').next().unwrap_or("bin"),
);
} }
} }

View File

@ -19,7 +19,11 @@ fn wrapper_script() -> std::path::PathBuf {
#[test] #[test]
fn multi_rir_case_info_resolves_all_five_rirs_and_timings() { fn multi_rir_case_info_resolves_all_five_rirs_and_timings() {
let bundle_root = multi_rir_bundle_root(); let bundle_root = multi_rir_bundle_root();
assert!(bundle_root.is_dir(), "bundle root missing: {}", bundle_root.display()); assert!(
bundle_root.is_dir(),
"bundle root missing: {}",
bundle_root.display()
);
let expected = [ let expected = [
("afrinic", "afrinic", "afrinic.tal", "afrinic-ta.cer"), ("afrinic", "afrinic", "afrinic.tal", "afrinic-ta.cer"),
@ -53,22 +57,61 @@ fn multi_rir_case_info_resolves_all_five_rirs_and_timings() {
serde_json::from_slice(&out.stdout).expect("parse helper json"); serde_json::from_slice(&out.stdout).expect("parse helper json");
assert_eq!(json["rir"].as_str(), Some(rir)); assert_eq!(json["rir"].as_str(), Some(rir));
assert_eq!(json["trust_anchor"].as_str(), Some(trust_anchor)); assert_eq!(json["trust_anchor"].as_str(), Some(trust_anchor));
assert!(json["base_archive"].as_str().unwrap_or("").ends_with("base-payload-archive")); assert!(
assert!(json["delta_archive"].as_str().unwrap_or("").ends_with("payload-delta-archive")); json["base_archive"]
assert!(json["base_locks"].as_str().unwrap_or("").ends_with("base-locks.json")); .as_str()
assert!(json["delta_locks"].as_str().unwrap_or("").ends_with("locks-delta.json")); .unwrap_or("")
assert!(json["tal_path"].as_str().unwrap_or("").ends_with(tal_suffix)); .ends_with("base-payload-archive")
);
assert!(
json["delta_archive"]
.as_str()
.unwrap_or("")
.ends_with("payload-delta-archive")
);
assert!(
json["base_locks"]
.as_str()
.unwrap_or("")
.ends_with("base-locks.json")
);
assert!(
json["delta_locks"]
.as_str()
.unwrap_or("")
.ends_with("locks-delta.json")
);
assert!(
json["tal_path"]
.as_str()
.unwrap_or("")
.ends_with(tal_suffix)
);
assert!(json["ta_path"].as_str().unwrap_or("").ends_with(ta_suffix)); assert!(json["ta_path"].as_str().unwrap_or("").ends_with(ta_suffix));
assert!(json["validation_times"]["snapshot"].as_str().unwrap_or("").contains("T")); assert!(
assert!(json["validation_times"]["delta"].as_str().unwrap_or("").contains("T")); json["validation_times"]["snapshot"]
assert!(json["routinator_timings"]["base_replay_seconds"] .as_str()
.unwrap_or("")
.contains("T")
);
assert!(
json["validation_times"]["delta"]
.as_str()
.unwrap_or("")
.contains("T")
);
assert!(
json["routinator_timings"]["base_replay_seconds"]
.as_f64() .as_f64()
.unwrap_or(0.0) .unwrap_or(0.0)
> 0.0); > 0.0
assert!(json["routinator_timings"]["delta_replay_seconds"] );
assert!(
json["routinator_timings"]["delta_replay_seconds"]
.as_f64() .as_f64()
.unwrap_or(0.0) .unwrap_or(0.0)
> 0.0); > 0.0
);
} }
} }
@ -113,8 +156,12 @@ fn multi_rir_case_info_prefers_lock_validation_time_over_replay_started_at() {
"verification.json", "verification.json",
"README.md", "README.md",
] { ] {
fs::write(rir_root.join(rel), "placeholder fs::write(
").expect("write required file"); rir_root.join(rel),
"placeholder
",
)
.expect("write required file");
} }
let repo_root = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")); let repo_root = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
@ -163,7 +210,11 @@ stderr={}",
#[test] #[test]
fn multi_rir_wrapper_describe_mode_works_for_ripe() { fn multi_rir_wrapper_describe_mode_works_for_ripe() {
let bundle_root = multi_rir_bundle_root(); let bundle_root = multi_rir_bundle_root();
assert!(bundle_root.is_dir(), "bundle root missing: {}", bundle_root.display()); assert!(
bundle_root.is_dir(),
"bundle root missing: {}",
bundle_root.display()
);
let out = Command::new(wrapper_script()) let out = Command::new(wrapper_script())
.env("BUNDLE_ROOT", &bundle_root) .env("BUNDLE_ROOT", &bundle_root)
@ -183,8 +234,10 @@ fn multi_rir_wrapper_describe_mode_works_for_ripe() {
serde_json::from_slice(&out.stdout).expect("parse wrapper describe json"); serde_json::from_slice(&out.stdout).expect("parse wrapper describe json");
assert_eq!(json["rir"].as_str(), Some("ripe")); assert_eq!(json["rir"].as_str(), Some("ripe"));
assert_eq!(json["trust_anchor"].as_str(), Some("ripe")); assert_eq!(json["trust_anchor"].as_str(), Some("ripe"));
assert!(json["verification_json"] assert!(
json["verification_json"]
.as_str() .as_str()
.unwrap_or("") .unwrap_or("")
.ends_with("verification.json")); .ends_with("verification.json")
);
} }

View File

@ -403,7 +403,6 @@ fn process_snapshot_for_issuer_drop_publication_point_on_invalid_aspa_bytes() {
assert!(!out.warnings.is_empty()); assert!(!out.warnings.is_empty());
} }
#[test] #[test]
fn process_snapshot_for_issuer_populates_local_outputs_cache_from_real_cernet_fixture() { fn process_snapshot_for_issuer_populates_local_outputs_cache_from_real_cernet_fixture() {
let (dir, rsync_base_uri, manifest_file) = cernet_fixture(); let (dir, rsync_base_uri, manifest_file) = cernet_fixture();
@ -432,16 +431,21 @@ fn process_snapshot_for_issuer_populates_local_outputs_cache_from_real_cernet_fi
None, None,
); );
assert!(!out.local_outputs_cache.is_empty(), "expected cached VCIR local outputs"); assert!(
!out.local_outputs_cache.is_empty(),
"expected cached VCIR local outputs"
);
assert_eq!(out.local_outputs_cache.len(), out.vrps.len()); assert_eq!(out.local_outputs_cache.len(), out.vrps.len());
assert!(out assert!(
.local_outputs_cache out.local_outputs_cache
.iter() .iter()
.all(|entry| entry.output_type == VcirOutputType::Vrp)); .all(|entry| entry.output_type == VcirOutputType::Vrp)
assert!(out );
.local_outputs_cache assert!(
out.local_outputs_cache
.iter() .iter()
.all(|entry| entry.source_object_type == "roa")); .all(|entry| entry.source_object_type == "roa")
);
} }
// NOTE: DN-based issuer resolution and pack-local CA indexing have been removed for determinism. // NOTE: DN-based issuer resolution and pack-local CA indexing have been removed for determinism.

View File

@ -206,8 +206,14 @@ fn write_multi_rir_case_report_combines_compare_and_timing() {
let md = std::fs::read_to_string(&out_md).expect("read markdown"); let md = std::fs::read_to_string(&out_md).expect("read markdown");
assert!(md.contains("AFRINIC Replay Report"), "{md}"); assert!(md.contains("AFRINIC Replay Report"), "{md}");
assert!(md.contains("| snapshot | true | 12.000 | 6.000 | 2.000 | 0 | 0 |"), "{md}"); assert!(
assert!(md.contains("| delta | true | 8.000 | 4.000 | 2.000 | 0 | 0 |"), "{md}"); md.contains("| snapshot | true | 12.000 | 6.000 | 2.000 | 0 | 0 |"),
"{md}"
);
assert!(
md.contains("| delta | true | 8.000 | 4.000 | 2.000 | 0 | 0 |"),
"{md}"
);
} }
#[test] #[test]
@ -282,7 +288,10 @@ fn write_multi_rir_summary_aggregates_case_reports() {
let md = std::fs::read_to_string(&out_md).expect("read summary md"); let md = std::fs::read_to_string(&out_md).expect("read summary md");
assert!(md.contains("Multi-RIR Replay Summary"), "{md}"); assert!(md.contains("Multi-RIR Replay Summary"), "{md}");
assert!(md.contains("| afrinic | true | 10.000 | 5.000 | 2.000 | true | 12.000 | 6.000 | 3.000 |"), "{md}"); assert!(
md.contains("| afrinic | true | 10.000 | 5.000 | 2.000 | true | 12.000 | 6.000 | 3.000 |"),
"{md}"
);
} }
#[test] #[test]

View File

@ -4,7 +4,7 @@ use std::sync::{Arc, Mutex};
use rpki::fetch::rsync::{LocalDirRsyncFetcher, RsyncFetchError, RsyncFetcher}; use rpki::fetch::rsync::{LocalDirRsyncFetcher, RsyncFetchError, RsyncFetcher};
use rpki::policy::{Policy, SyncPreference}; use rpki::policy::{Policy, SyncPreference};
use rpki::storage::RocksStore; use rpki::storage::RocksStore;
use rpki::sync::repo::{RepoSyncSource, sync_publication_point}; use rpki::sync::repo::{RepoSyncPhase, RepoSyncSource, sync_publication_point};
use rpki::sync::rrdp::Fetcher; use rpki::sync::rrdp::Fetcher;
struct MapFetcher { struct MapFetcher {
@ -98,6 +98,7 @@ fn repo_sync_uses_rrdp_when_available() {
.expect("sync"); .expect("sync");
assert_eq!(out.source, RepoSyncSource::Rrdp); assert_eq!(out.source, RepoSyncSource::Rrdp);
assert_eq!(out.phase, RepoSyncPhase::RrdpOk);
assert_eq!(out.objects_written, 2); assert_eq!(out.objects_written, 2);
assert_eq!(*calls.lock().unwrap(), 0); assert_eq!(*calls.lock().unwrap(), 0);
@ -145,6 +146,7 @@ fn repo_sync_skips_snapshot_when_state_unchanged() {
) )
.expect("sync 1"); .expect("sync 1");
assert_eq!(out1.source, RepoSyncSource::Rrdp); assert_eq!(out1.source, RepoSyncSource::Rrdp);
assert_eq!(out1.phase, RepoSyncPhase::RrdpOk);
assert_eq!(out1.objects_written, 2); assert_eq!(out1.objects_written, 2);
let out2 = sync_publication_point( let out2 = sync_publication_point(
@ -159,6 +161,7 @@ fn repo_sync_skips_snapshot_when_state_unchanged() {
) )
.expect("sync 2"); .expect("sync 2");
assert_eq!(out2.source, RepoSyncSource::Rrdp); assert_eq!(out2.source, RepoSyncSource::Rrdp);
assert_eq!(out2.phase, RepoSyncPhase::RrdpOk);
assert_eq!( assert_eq!(
out2.objects_written, 0, out2.objects_written, 0,
"expected to skip snapshot apply when state unchanged" "expected to skip snapshot apply when state unchanged"
@ -208,6 +211,7 @@ fn repo_sync_falls_back_to_rsync_on_rrdp_failure() {
.expect("fallback sync"); .expect("fallback sync");
assert_eq!(out.source, RepoSyncSource::Rsync); assert_eq!(out.source, RepoSyncSource::Rsync);
assert_eq!(out.phase, RepoSyncPhase::RrdpFailedRsyncOk);
assert_eq!(out.objects_written, 1); assert_eq!(out.objects_written, 1);
assert_eq!(*calls.lock().unwrap(), 1); assert_eq!(*calls.lock().unwrap(), 1);
assert!(!out.warnings.is_empty()); assert!(!out.warnings.is_empty());
@ -252,6 +256,7 @@ fn repo_sync_rsync_populates_current_repository_view() {
.expect("rsync-only sync"); .expect("rsync-only sync");
assert_eq!(out.source, RepoSyncSource::Rsync); assert_eq!(out.source, RepoSyncSource::Rsync);
assert_eq!(out.phase, RepoSyncPhase::RsyncOnlyOk);
assert_eq!(out.objects_written, 2); assert_eq!(out.objects_written, 2);
assert_current_object(&store, "rsync://example.net/repo/a/one.cer", b"1"); assert_current_object(&store, "rsync://example.net/repo/a/one.cer", b"1");

View File

@ -1,9 +1,9 @@
use std::process::Command; use std::process::Command;
use rpki::data_model::rc::{ResourceCertificate, ResourceCertKind}; use rpki::data_model::rc::{ResourceCertKind, ResourceCertificate};
use rpki::data_model::router_cert::{ use rpki::data_model::router_cert::{
BgpsecRouterCertificate, BgpsecRouterCertificateDecodeError, BgpsecRouterCertificate, BgpsecRouterCertificateDecodeError, BgpsecRouterCertificatePathError,
BgpsecRouterCertificatePathError, BgpsecRouterCertificateProfileError, BgpsecRouterCertificateProfileError,
}; };
fn openssl_available() -> bool { fn openssl_available() -> bool {
@ -104,7 +104,11 @@ sbgp-autonomousSysNum = critical, AS:64496
authorityKeyIdentifier = keyid:always authorityKeyIdentifier = keyid:always
"#, "#,
dir = dir.display(), dir = dir.display(),
eku_line = if include_eku { "extendedKeyUsage = 1.3.6.1.5.5.7.3.30" } else { "" }, eku_line = if include_eku {
"extendedKeyUsage = 1.3.6.1.5.5.7.3.30"
} else {
""
},
extra_ext = extra_ext extra_ext = extra_ext
); );
std::fs::write(dir.join("openssl.cnf"), cnf.as_bytes()).expect("write cnf"); std::fs::write(dir.join("openssl.cnf"), cnf.as_bytes()).expect("write cnf");
@ -266,7 +270,16 @@ fn decode_bgpsec_router_certificate_fixture_smoke() {
fn router_certificate_profile_rejects_missing_eku() { fn router_certificate_profile_rejects_missing_eku() {
let g = generate_router_cert_with_variant("ec-p256", false, ""); let g = generate_router_cert_with_variant("ec-p256", false, "");
let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err(); let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err();
assert!(matches!(err, BgpsecRouterCertificateDecodeError::Validate(BgpsecRouterCertificateProfileError::MissingBgpsecRouterEku | BgpsecRouterCertificateProfileError::MissingExtendedKeyUsage)), "{err}"); assert!(
matches!(
err,
BgpsecRouterCertificateDecodeError::Validate(
BgpsecRouterCertificateProfileError::MissingBgpsecRouterEku
| BgpsecRouterCertificateProfileError::MissingExtendedKeyUsage
)
),
"{err}"
);
} }
#[test] #[test]
@ -277,7 +290,15 @@ fn router_certificate_profile_rejects_sia_and_ip_resources_and_ranges() {
"subjectInfoAccess = caRepository;URI:rsync://example.test/repo/router/\n", "subjectInfoAccess = caRepository;URI:rsync://example.test/repo/router/\n",
); );
let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err(); let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err();
assert!(matches!(err, BgpsecRouterCertificateDecodeError::Validate(BgpsecRouterCertificateProfileError::SubjectInfoAccessPresent)), "{err}"); assert!(
matches!(
err,
BgpsecRouterCertificateDecodeError::Validate(
BgpsecRouterCertificateProfileError::SubjectInfoAccessPresent
)
),
"{err}"
);
let g = generate_router_cert_with_variant( let g = generate_router_cert_with_variant(
"ec-p256", "ec-p256",
@ -285,7 +306,15 @@ fn router_certificate_profile_rejects_sia_and_ip_resources_and_ranges() {
"sbgp-ipAddrBlock = critical, IPv4:10.0.0.0/8\n", "sbgp-ipAddrBlock = critical, IPv4:10.0.0.0/8\n",
); );
let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err(); let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err();
assert!(matches!(err, BgpsecRouterCertificateDecodeError::Validate(BgpsecRouterCertificateProfileError::IpResourcesPresent)), "{err}"); assert!(
matches!(
err,
BgpsecRouterCertificateDecodeError::Validate(
BgpsecRouterCertificateProfileError::IpResourcesPresent
)
),
"{err}"
);
let g = generate_router_cert_with_variant( let g = generate_router_cert_with_variant(
"ec-p256", "ec-p256",
@ -293,18 +322,43 @@ fn router_certificate_profile_rejects_sia_and_ip_resources_and_ranges() {
"sbgp-autonomousSysNum = critical, AS:64496-64500\n", "sbgp-autonomousSysNum = critical, AS:64496-64500\n",
); );
let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err(); let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err();
assert!(matches!(err, BgpsecRouterCertificateDecodeError::Validate(BgpsecRouterCertificateProfileError::AsResourcesRangeNotAllowed)), "{err}"); assert!(
matches!(
err,
BgpsecRouterCertificateDecodeError::Validate(
BgpsecRouterCertificateProfileError::AsResourcesRangeNotAllowed
)
),
"{err}"
);
} }
#[test] #[test]
fn router_certificate_profile_rejects_wrong_spki_algorithm_or_curve() { fn router_certificate_profile_rejects_wrong_spki_algorithm_or_curve() {
let g = generate_router_cert_with_variant("rsa", true, ""); let g = generate_router_cert_with_variant("rsa", true, "");
let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err(); let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err();
assert!(matches!(err, BgpsecRouterCertificateDecodeError::Validate(BgpsecRouterCertificateProfileError::SpkiAlgorithmNotEcPublicKey)), "{err}"); assert!(
matches!(
err,
BgpsecRouterCertificateDecodeError::Validate(
BgpsecRouterCertificateProfileError::SpkiAlgorithmNotEcPublicKey
)
),
"{err}"
);
let g = generate_router_cert_with_variant("ec-p384", true, ""); let g = generate_router_cert_with_variant("ec-p384", true, "");
let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err(); let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err();
assert!(matches!(err, BgpsecRouterCertificateDecodeError::Validate(BgpsecRouterCertificateProfileError::SpkiWrongCurve | BgpsecRouterCertificateProfileError::SpkiEcPointNotUncompressedP256)), "{err}"); assert!(
matches!(
err,
BgpsecRouterCertificateDecodeError::Validate(
BgpsecRouterCertificateProfileError::SpkiWrongCurve
| BgpsecRouterCertificateProfileError::SpkiEcPointNotUncompressedP256
)
),
"{err}"
);
} }
#[test] #[test]
@ -317,11 +371,15 @@ fn router_certificate_path_validation_accepts_valid_and_rejects_wrong_issuer() {
let g = generate_router_cert_with_variant("ec-p256", true, ""); let g = generate_router_cert_with_variant("ec-p256", true, "");
let issuer = ResourceCertificate::decode_der(&g.issuer_ca_der).expect("decode issuer"); let issuer = ResourceCertificate::decode_der(&g.issuer_ca_der).expect("decode issuer");
let wrong_issuer = ResourceCertificate::decode_der(&g.wrong_issuer_der).expect("decode wrong issuer"); let wrong_issuer =
ResourceCertificate::decode_der(&g.wrong_issuer_der).expect("decode wrong issuer");
let issuer_crl = RpkixCrl::decode_der(&g.issuer_crl_der).expect("decode crl"); let issuer_crl = RpkixCrl::decode_der(&g.issuer_crl_der).expect("decode crl");
let (rem, issuer_spki) = SubjectPublicKeyInfo::from_der(&issuer.tbs.subject_public_key_info).expect("issuer spki"); let (rem, issuer_spki) =
SubjectPublicKeyInfo::from_der(&issuer.tbs.subject_public_key_info).expect("issuer spki");
assert!(rem.is_empty()); assert!(rem.is_empty());
let (rem, wrong_spki) = SubjectPublicKeyInfo::from_der(&wrong_issuer.tbs.subject_public_key_info).expect("wrong issuer spki"); let (rem, wrong_spki) =
SubjectPublicKeyInfo::from_der(&wrong_issuer.tbs.subject_public_key_info)
.expect("wrong issuer spki");
assert!(rem.is_empty()); assert!(rem.is_empty());
let now = time::OffsetDateTime::now_utc(); let now = time::OffsetDateTime::now_utc();
@ -334,7 +392,8 @@ fn router_certificate_path_validation_accepts_valid_and_rejects_wrong_issuer() {
Some("rsync://example.test/repo/issuer/issuer.cer"), Some("rsync://example.test/repo/issuer/issuer.cer"),
Some("rsync://example.test/repo/issuer/issuer.crl"), Some("rsync://example.test/repo/issuer/issuer.crl"),
now, now,
).expect("router path valid"); )
.expect("router path valid");
assert_eq!(cert.asns, vec![64496]); assert_eq!(cert.asns, vec![64496]);
let err = BgpsecRouterCertificate::validate_path_with_prevalidated_issuer( let err = BgpsecRouterCertificate::validate_path_with_prevalidated_issuer(
@ -346,8 +405,12 @@ fn router_certificate_path_validation_accepts_valid_and_rejects_wrong_issuer() {
None, None,
None, None,
now, now,
).unwrap_err(); )
assert!(matches!(err, BgpsecRouterCertificatePathError::CertPath(_)), "{err}"); .unwrap_err();
assert!(
matches!(err, BgpsecRouterCertificatePathError::CertPath(_)),
"{err}"
);
let rc = ResourceCertificate::decode_der(&g.router_der).expect("decode router rc"); let rc = ResourceCertificate::decode_der(&g.router_der).expect("decode router rc");
let mut revoked = HashSet::new(); let mut revoked = HashSet::new();
@ -361,6 +424,10 @@ fn router_certificate_path_validation_accepts_valid_and_rejects_wrong_issuer() {
Some("rsync://example.test/repo/issuer/issuer.cer"), Some("rsync://example.test/repo/issuer/issuer.cer"),
Some("rsync://example.test/repo/issuer/issuer.crl"), Some("rsync://example.test/repo/issuer/issuer.crl"),
now, now,
).unwrap_err(); )
assert!(matches!(err, BgpsecRouterCertificatePathError::CertPath(_)), "{err}"); .unwrap_err();
assert!(
matches!(err, BgpsecRouterCertificatePathError::CertPath(_)),
"{err}"
);
} }

View File

@ -94,12 +94,7 @@ fn rsync_fallback_breakdown_luys_cloud() {
let sha256_hex = hex::encode(sha2::Sha256::digest(bytes)); let sha256_hex = hex::encode(sha2::Sha256::digest(bytes));
let mut raw = RawByHashEntry::from_bytes(sha256_hex.clone(), bytes.clone()); let mut raw = RawByHashEntry::from_bytes(sha256_hex.clone(), bytes.clone());
raw.origin_uris.push(uri.clone()); raw.origin_uris.push(uri.clone());
raw.object_type = Some( raw.object_type = Some(uri.rsplit('.').next().unwrap_or("bin").to_ascii_lowercase());
uri.rsplit('.')
.next()
.unwrap_or("bin")
.to_ascii_lowercase(),
);
raw.encoding = Some("der".to_string()); raw.encoding = Some("der".to_string());
store.put_raw_by_hash_entry(&raw).expect("put raw_by_hash"); store.put_raw_by_hash_entry(&raw).expect("put raw_by_hash");
store store
@ -107,12 +102,7 @@ fn rsync_fallback_breakdown_luys_cloud() {
rsync_uri: uri.clone(), rsync_uri: uri.clone(),
current_hash: Some(sha256_hex), current_hash: Some(sha256_hex),
repository_source: Some(rsync_base_uri.clone()), repository_source: Some(rsync_base_uri.clone()),
object_type: Some( object_type: Some(uri.rsplit('.').next().unwrap_or("bin").to_ascii_lowercase()),
uri.rsplit('.')
.next()
.unwrap_or("bin")
.to_ascii_lowercase(),
),
state: RepositoryViewState::Present, state: RepositoryViewState::Present,
}) })
.expect("put repository view"); .expect("put repository view");

View File

@ -17,7 +17,11 @@ fn signed_object_decode_accepts_real_arin_ber_indefinite_roa_from_multi_rir_bund
let xml_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join( let xml_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../rpki/target/live/20260316-112341-multi-final3/arin/base-payload-archive/v1/captures/arin-base-arin/rrdp/repos/8bd1405242614aed9f13321d266fe3193db0ca842e0cbffda2b3df05481c331b/4a394319-7460-4141-a416-1addb69284ff/snapshot-160090-880434ae2a6f0e5fff224391c65a22ab037e09ac1d3ebad0ceda18558b49b13e.xml", "../../rpki/target/live/20260316-112341-multi-final3/arin/base-payload-archive/v1/captures/arin-base-arin/rrdp/repos/8bd1405242614aed9f13321d266fe3193db0ca842e0cbffda2b3df05481c331b/4a394319-7460-4141-a416-1addb69284ff/snapshot-160090-880434ae2a6f0e5fff224391c65a22ab037e09ac1d3ebad0ceda18558b49b13e.xml",
); );
assert!(xml_path.is_file(), "xml path missing: {}", xml_path.display()); assert!(
xml_path.is_file(),
"xml path missing: {}",
xml_path.display()
);
let xml = std::fs::read_to_string(&xml_path).expect("read snapshot xml"); let xml = std::fs::read_to_string(&xml_path).expect("read snapshot xml");
let uri = "rsync://rpki.arin.net/repository/arin-rpki-ta/5e4a23ea-e80a-403e-b08c-2171da2157d3/2a246947-2d62-4a6c-ba05-87187f0099b2/9ed5ce80-224e-46ab-94f1-1afce8ccf13f/0b13beb5-6bbb-3994-a254-02c5b10175c5.roa"; let uri = "rsync://rpki.arin.net/repository/arin-rpki-ta/5e4a23ea-e80a-403e-b08c-2171da2157d3/2a246947-2d62-4a6c-ba05-87187f0099b2/9ed5ce80-224e-46ab-94f1-1afce8ccf13f/0b13beb5-6bbb-3994-a254-02c5b10175c5.roa";
let der = extract_publish_bytes(&xml, uri); let der = extract_publish_bytes(&xml, uri);
@ -25,8 +29,12 @@ fn signed_object_decode_accepts_real_arin_ber_indefinite_roa_from_multi_rir_bund
assert_eq!(der.first().copied(), Some(0x30)); assert_eq!(der.first().copied(), Some(0x30));
assert_eq!(der.get(1).copied(), Some(0x80)); assert_eq!(der.get(1).copied(), Some(0x80));
let signed_object = RpkiSignedObject::decode_der(&der).expect("decode BER-indefinite CMS signed object"); let signed_object =
assert_eq!(signed_object.signed_data.encap_content_info.econtent_type, "1.2.840.113549.1.9.16.1.24"); RpkiSignedObject::decode_der(&der).expect("decode BER-indefinite CMS signed object");
assert_eq!(
signed_object.signed_data.encap_content_info.econtent_type,
"1.2.840.113549.1.9.16.1.24"
);
let roa = RoaObject::decode_der(&der).expect("decode ROA object from BER-indefinite CMS"); let roa = RoaObject::decode_der(&der).expect("decode ROA object from BER-indefinite CMS");
assert!(!roa.roa.ip_addr_blocks.is_empty()); assert!(!roa.roa.ip_addr_blocks.is_empty());

View File

@ -79,7 +79,9 @@ fn rrdp_source_roundtrip_by_notification_uri() {
last_snapshot_hash: None, last_snapshot_hash: None,
last_error: None, last_error: None,
}; };
store.put_rrdp_source_record(&record).expect("put rrdp_source"); store
.put_rrdp_source_record(&record)
.expect("put rrdp_source");
let got = store let got = store
.get_rrdp_source_record(notif) .get_rrdp_source_record(notif)

View File

@ -395,7 +395,9 @@ fn tree_aggregates_router_keys_from_publication_point_results() {
source_object_uri: "rsync://example.test/repo/router1.cer".to_string(), source_object_uri: "rsync://example.test/repo/router1.cer".to_string(),
source_object_hash: "11".repeat(32), source_object_hash: "11".repeat(32),
source_ee_cert_hash: "11".repeat(32), source_ee_cert_hash: "11".repeat(32),
item_effective_until: PackTime { rfc3339_utc: "2026-12-31T00:00:00Z".to_string() }, item_effective_until: PackTime {
rfc3339_utc: "2026-12-31T00:00:00Z".to_string(),
},
}, },
RouterKeyPayload { RouterKeyPayload {
as_id: 64497, as_id: 64497,
@ -404,7 +406,9 @@ fn tree_aggregates_router_keys_from_publication_point_results() {
source_object_uri: "rsync://example.test/repo/router2.cer".to_string(), source_object_uri: "rsync://example.test/repo/router2.cer".to_string(),
source_object_hash: "22".repeat(32), source_object_hash: "22".repeat(32),
source_ee_cert_hash: "22".repeat(32), source_ee_cert_hash: "22".repeat(32),
item_effective_until: PackTime { rfc3339_utc: "2026-12-31T00:00:00Z".to_string() }, item_effective_until: PackTime {
rfc3339_utc: "2026-12-31T00:00:00Z".to_string(),
},
}, },
], ],
local_outputs_cache: Vec::new(), local_outputs_cache: Vec::new(),