20260410 完成五个rir 基于cir的三方回放,raw by hash 独立db,发现内存占用大,连续大rir 录制发生oom

This commit is contained in:
yuyr 2026-04-11 11:24:32 +08:00
parent e083fe4daa
commit 77fc2f1a41
96 changed files with 5159 additions and 1720 deletions

View File

@ -34,7 +34,11 @@ from pathlib import Path
sequence_root = Path(sys.argv[1]).resolve()
drop_bin = sys.argv[2]
sequence = json.loads((sequence_root / "sequence.json").read_text(encoding="utf-8"))
static_root = sequence_root / sequence["staticRoot"]
static_root = sequence_root / sequence["staticRoot"] if "staticRoot" in sequence else None
raw_store_db = sequence_root / sequence["rawStoreDbPath"] if "rawStoreDbPath" in sequence else None
backend_count = sum(x is not None for x in (static_root, raw_store_db))
if backend_count != 1:
raise SystemExit("sequence must set exactly one of staticRoot or rawStoreDbPath")
summaries = []
for step in sequence["steps"]:
@ -49,13 +53,15 @@ for step in sequence["steps"]:
str(sequence_root / step["ccrPath"]),
"--report-json",
str(sequence_root / step["reportPath"]),
"--static-root",
str(static_root),
"--json-out",
str(out_dir / "drop.json"),
"--md-out",
str(out_dir / "drop.md"),
]
if static_root is not None:
cmd.extend(["--static-root", str(static_root)])
else:
cmd.extend(["--raw-store-db", str(raw_store_db)])
proc = subprocess.run(cmd, capture_output=True, text=True)
if proc.returncode != 0:
raise SystemExit(

View File

@ -84,8 +84,11 @@ else
OUT="target/replay/cir_sequence_remote_${RIR}_$(date -u +%Y%m%dT%H%M%SZ)"
fi
mkdir -p "$OUT/full" "$OUT/static"
mkdir -p "$OUT"
DB="$OUT/work-db"
RAW_STORE_DB="$OUT/raw-store.db"
ROWS="$OUT/.sequence_rows.tsv"
: > "$ROWS"
write_step_timing() {
local path="$1"
@ -112,109 +115,108 @@ PY
}
run_step() {
local step_dir="$1"
shift
mkdir -p "$step_dir"
local start_ms end_ms started_at finished_at
start_ms="$(python3 - <<'PY'
local step_id="$1"
local kind="$2"
local previous_step_id="$3"
shift 3
local started_at_iso started_at_ms finished_at_iso finished_at_ms prefix
started_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
started_at_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
started_at="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
target/release/rpki "$@" >"$step_dir/run.stdout.log" 2>"$step_dir/run.stderr.log"
end_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
finished_at="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
write_step_timing "$step_dir/timing.json" "$start_ms" "$end_ms" "$started_at" "$finished_at"
}
prefix="${started_at_iso}-test"
full_args=(
--db "$DB"
--tal-path "$TAL_REL"
--ta-path "$TA_REL"
--ccr-out "$OUT/full/result.ccr"
--report-json "$OUT/full/report.json"
--cir-enable
--cir-out "$OUT/full/input.cir"
--cir-static-root "$OUT/static"
--cir-tal-uri "https://example.test/${RIR}.tal"
)
if [[ "$FULL_REPO" -ne 1 ]]; then
full_args+=(--max-depth "$MAX_DEPTH" --max-instances "$MAX_INSTANCES")
fi
run_step "$OUT/full" "${full_args[@]}"
local cir_out="$OUT/${prefix}.cir"
local ccr_out="$OUT/${prefix}.ccr"
local report_out="$OUT/${prefix}.report.json"
local timing_out="$OUT/${prefix}.timing.json"
local stdout_out="$OUT/${prefix}.stdout.log"
local stderr_out="$OUT/${prefix}.stderr.log"
for idx in $(seq 1 "$DELTA_COUNT"); do
sleep "$SLEEP_SECS"
step="$(printf 'delta-%03d' "$idx")"
step_args=(
local -a cmd=(
target/release/rpki
--db "$DB"
--raw-store-db "$RAW_STORE_DB"
--tal-path "$TAL_REL"
--ta-path "$TA_REL"
--ccr-out "$OUT/$step/result.ccr"
--report-json "$OUT/$step/report.json"
--ccr-out "$ccr_out"
--report-json "$report_out"
--cir-enable
--cir-out "$OUT/$step/input.cir"
--cir-static-root "$OUT/static"
--cir-out "$cir_out"
--cir-tal-uri "https://example.test/${RIR}.tal"
)
if [[ "$FULL_REPO" -ne 1 ]]; then
step_args+=(--max-depth "$MAX_DEPTH" --max-instances "$MAX_INSTANCES")
cmd+=(--max-depth "$MAX_DEPTH" --max-instances "$MAX_INSTANCES")
fi
run_step "$OUT/$step" "${step_args[@]}"
cmd+=("$@")
env RPKI_PROGRESS_LOG=1 "${cmd[@]}" >"$stdout_out" 2>"$stderr_out"
finished_at_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
finished_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
write_step_timing "$timing_out" "$started_at_ms" "$finished_at_ms" "$started_at_iso" "$finished_at_iso"
local validation_time
validation_time="$(python3 - <<'PY' "$report_out"
import json, sys
print(json.load(open(sys.argv[1], 'r', encoding='utf-8'))['meta']['validation_time_rfc3339_utc'])
PY
)"
printf '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' \
"$step_id" \
"$kind" \
"$validation_time" \
"$(basename "$cir_out")" \
"$(basename "$ccr_out")" \
"$(basename "$report_out")" \
"$(basename "$timing_out")" \
"$(basename "$stdout_out")" \
"$(basename "$stderr_out")" >> "$ROWS"
}
run_step "full" "full" ""
prev="full"
for idx in $(seq 1 "$DELTA_COUNT"); do
sleep "$SLEEP_SECS"
step="$(printf 'delta-%03d' "$idx")"
run_step "$step" "delta" "$prev"
prev="$step"
done
python3 - <<'PY' "$OUT" "$DELTA_COUNT" "$RIR"
python3 - <<'PY' "$OUT" "$ROWS" "$RIR"
import json, sys
from pathlib import Path
out = Path(sys.argv[1])
delta_count = int(sys.argv[2])
rows = Path(sys.argv[2]).read_text(encoding='utf-8').splitlines()
rir = sys.argv[3]
def read_validation_time(step_dir: Path) -> str:
report = json.loads((step_dir / "report.json").read_text(encoding="utf-8"))
return report["meta"]["validation_time_rfc3339_utc"]
def read_timing(step_dir: Path) -> dict:
return json.loads((step_dir / "timing.json").read_text(encoding="utf-8"))
steps = []
steps.append(
{
"stepId": "full",
"kind": "full",
"validationTime": read_validation_time(out / "full"),
"cirPath": "full/input.cir",
"ccrPath": "full/result.ccr",
"reportPath": "full/report.json",
"timingPath": "full/timing.json",
"previousStepId": None,
}
)
prev = "full"
for i in range(1, delta_count + 1):
step = f"delta-{i:03d}"
steps.append(
{
"stepId": step,
"kind": "delta",
"validationTime": read_validation_time(out / step),
"cirPath": f"{step}/input.cir",
"ccrPath": f"{step}/result.ccr",
"reportPath": f"{step}/report.json",
"timingPath": f"{step}/timing.json",
"previousStepId": prev,
}
)
prev = step
for idx, row in enumerate(rows):
step_id, kind, validation_time, cir_name, ccr_name, report_name, timing_name, stdout_name, stderr_name = row.split('\t')
steps.append({
"stepId": step_id,
"kind": kind,
"validationTime": validation_time,
"cirPath": cir_name,
"ccrPath": ccr_name,
"reportPath": report_name,
"timingPath": timing_name,
"stdoutLogPath": stdout_name,
"stderrLogPath": stderr_name,
"artifactPrefix": cir_name[:-4], # strip .cir
"previousStepId": None if idx == 0 else steps[idx - 1]["stepId"],
})
(out / "sequence.json").write_text(
json.dumps({"version": 1, "staticRoot": "static", "steps": steps}, indent=2),
json.dumps({"version": 1, "rawStoreDbPath": "raw-store.db", "steps": steps}, indent=2),
encoding="utf-8",
)
@ -222,18 +224,21 @@ summary = {
"version": 1,
"rir": rir,
"stepCount": len(steps),
"steps": [
{
"stepId": step["stepId"],
"kind": step["kind"],
"validationTime": step["validationTime"],
**read_timing(out / step["stepId"]),
}
for step in steps
],
"steps": [],
}
for step in steps:
timing = json.loads((out / step["timingPath"]).read_text(encoding="utf-8"))
summary["steps"].append({
"stepId": step["stepId"],
"kind": step["kind"],
"validationTime": step["validationTime"],
"artifactPrefix": step["artifactPrefix"],
**timing,
})
(out / "summary.json").write_text(json.dumps(summary, indent=2), encoding="utf-8")
PY
rm -f "$ROWS"
echo "$OUT"
EOS

View File

@ -6,7 +6,7 @@ usage() {
Usage:
./scripts/cir/run_cir_replay_ours.sh \
--cir <path> \
--static-root <path> \
[--static-root <path> | --raw-store-db <path>] \
--out-dir <path> \
--reference-ccr <path> \
[--keep-db] \
@ -19,6 +19,8 @@ ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
CIR=""
STATIC_ROOT=""
POOL_DB=""
RAW_STORE_DB=""
OUT_DIR=""
REFERENCE_CCR=""
KEEP_DB=0
@ -33,6 +35,7 @@ while [[ $# -gt 0 ]]; do
case "$1" in
--cir) CIR="$2"; shift 2 ;;
--static-root) STATIC_ROOT="$2"; shift 2 ;;
--raw-store-db) RAW_STORE_DB="$2"; shift 2 ;;
--out-dir) OUT_DIR="$2"; shift 2 ;;
--reference-ccr) REFERENCE_CCR="$2"; shift 2 ;;
--keep-db) KEEP_DB=1; shift ;;
@ -43,10 +46,14 @@ while [[ $# -gt 0 ]]; do
esac
done
[[ -n "$CIR" && -n "$STATIC_ROOT" && -n "$OUT_DIR" && -n "$REFERENCE_CCR" ]] || {
[[ -n "$CIR" && -n "$OUT_DIR" && -n "$REFERENCE_CCR" ]] || {
usage >&2
exit 2
}
backend_count=0
[[ -n "$STATIC_ROOT" ]] && backend_count=$((backend_count+1))
[[ -n "$RAW_STORE_DB" ]] && backend_count=$((backend_count+1))
[[ "$backend_count" -eq 1 ]] || { usage >&2; exit 2; }
mkdir -p "$OUT_DIR"
@ -75,7 +82,12 @@ rm -rf "$TMP_ROOT"
mkdir -p "$TMP_ROOT"
"$CIR_EXTRACT_INPUTS_BIN" --cir "$CIR" --tals-dir "$TALS_DIR" --meta-json "$META_JSON"
materialize_cmd=("$CIR_MATERIALIZE_BIN" --cir "$CIR" --static-root "$STATIC_ROOT" --mirror-root "$MIRROR_ROOT")
materialize_cmd=("$CIR_MATERIALIZE_BIN" --cir "$CIR" --mirror-root "$MIRROR_ROOT")
if [[ -n "$STATIC_ROOT" ]]; then
materialize_cmd+=(--static-root "$STATIC_ROOT")
else
materialize_cmd+=(--raw-store-db "$RAW_STORE_DB")
fi
if [[ "$KEEP_DB" -eq 1 ]]; then
materialize_cmd+=(--keep-db)
fi

View File

@ -6,7 +6,7 @@ usage() {
Usage:
./scripts/cir/run_cir_replay_routinator.sh \
--cir <path> \
--static-root <path> \
[--static-root <path> | --raw-store-db <path>] \
--out-dir <path> \
--reference-ccr <path> \
[--keep-db] \
@ -21,6 +21,8 @@ RPKI_DEV_ROOT="${RPKI_DEV_ROOT:-$ROOT_DIR}"
CIR=""
STATIC_ROOT=""
POOL_DB=""
RAW_STORE_DB=""
OUT_DIR=""
REFERENCE_CCR=""
KEEP_DB=0
@ -38,6 +40,7 @@ while [[ $# -gt 0 ]]; do
case "$1" in
--cir) CIR="$2"; shift 2 ;;
--static-root) STATIC_ROOT="$2"; shift 2 ;;
--raw-store-db) RAW_STORE_DB="$2"; shift 2 ;;
--out-dir) OUT_DIR="$2"; shift 2 ;;
--reference-ccr) REFERENCE_CCR="$2"; shift 2 ;;
--keep-db) KEEP_DB=1; shift ;;
@ -49,10 +52,14 @@ while [[ $# -gt 0 ]]; do
esac
done
[[ -n "$CIR" && -n "$STATIC_ROOT" && -n "$OUT_DIR" && -n "$REFERENCE_CCR" ]] || {
[[ -n "$CIR" && -n "$OUT_DIR" && -n "$REFERENCE_CCR" ]] || {
usage >&2
exit 2
}
backend_count=0
[[ -n "$STATIC_ROOT" ]] && backend_count=$((backend_count+1))
[[ -n "$RAW_STORE_DB" ]] && backend_count=$((backend_count+1))
[[ "$backend_count" -eq 1 ]] || { usage >&2; exit 2; }
mkdir -p "$OUT_DIR"
if [[ ! -x "$CIR_MATERIALIZE_BIN" || ! -x "$CIR_EXTRACT_INPUTS_BIN" || ! -x "$CCR_TO_COMPARE_VIEWS_BIN" ]]; then
@ -95,7 +102,12 @@ for tal in Path(sys.argv[1]).glob("*.tal"):
seen_sep = True
tal.write_text("\n".join(rsync_uris) + "\n\n" + "\n".join(base64_lines) + "\n", encoding="utf-8")
PY
materialize_cmd=("$CIR_MATERIALIZE_BIN" --cir "$CIR" --static-root "$STATIC_ROOT" --mirror-root "$MIRROR_ROOT")
materialize_cmd=("$CIR_MATERIALIZE_BIN" --cir "$CIR" --mirror-root "$MIRROR_ROOT")
if [[ -n "$STATIC_ROOT" ]]; then
materialize_cmd+=(--static-root "$STATIC_ROOT")
else
materialize_cmd+=(--raw-store-db "$RAW_STORE_DB")
fi
if [[ "$KEEP_DB" -eq 1 ]]; then
materialize_cmd+=(--keep-db)
fi

View File

@ -6,7 +6,7 @@ usage() {
Usage:
./scripts/cir/run_cir_replay_rpki_client.sh \
--cir <path> \
--static-root <path> \
[--static-root <path> | --raw-store-db <path>] \
--out-dir <path> \
--reference-ccr <path> \
--build-dir <path> \
@ -18,6 +18,8 @@ EOF
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
CIR=""
STATIC_ROOT=""
POOL_DB=""
RAW_STORE_DB=""
OUT_DIR=""
REFERENCE_CCR=""
BUILD_DIR=""
@ -32,6 +34,7 @@ while [[ $# -gt 0 ]]; do
case "$1" in
--cir) CIR="$2"; shift 2 ;;
--static-root) STATIC_ROOT="$2"; shift 2 ;;
--raw-store-db) RAW_STORE_DB="$2"; shift 2 ;;
--out-dir) OUT_DIR="$2"; shift 2 ;;
--reference-ccr) REFERENCE_CCR="$2"; shift 2 ;;
--build-dir) BUILD_DIR="$2"; shift 2 ;;
@ -42,10 +45,14 @@ while [[ $# -gt 0 ]]; do
esac
done
[[ -n "$CIR" && -n "$STATIC_ROOT" && -n "$OUT_DIR" && -n "$REFERENCE_CCR" && -n "$BUILD_DIR" ]] || {
[[ -n "$CIR" && -n "$OUT_DIR" && -n "$REFERENCE_CCR" && -n "$BUILD_DIR" ]] || {
usage >&2
exit 2
}
backend_count=0
[[ -n "$STATIC_ROOT" ]] && backend_count=$((backend_count+1))
[[ -n "$RAW_STORE_DB" ]] && backend_count=$((backend_count+1))
[[ "$backend_count" -eq 1 ]] || { usage >&2; exit 2; }
mkdir -p "$OUT_DIR"
if [[ ! -x "$CIR_MATERIALIZE_BIN" || ! -x "$CIR_EXTRACT_INPUTS_BIN" || ! -x "$CCR_TO_COMPARE_VIEWS_BIN" ]]; then
@ -90,7 +97,12 @@ for tal in Path(sys.argv[1]).glob("*.tal"):
seen_sep = True
tal.write_text("\n".join(rsync_uris) + "\n\n" + "\n".join(base64_lines) + "\n", encoding="utf-8")
PY
materialize_cmd=("$CIR_MATERIALIZE_BIN" --cir "$CIR" --static-root "$STATIC_ROOT" --mirror-root "$MIRROR_ROOT")
materialize_cmd=("$CIR_MATERIALIZE_BIN" --cir "$CIR" --mirror-root "$MIRROR_ROOT")
if [[ -n "$STATIC_ROOT" ]]; then
materialize_cmd+=(--static-root "$STATIC_ROOT")
else
materialize_cmd+=(--raw-store-db "$RAW_STORE_DB")
fi
if [[ "$KEEP_DB" -eq 1 ]]; then
materialize_cmd+=(--keep-db)
fi

View File

@ -55,7 +55,11 @@ rpki_bin = sys.argv[6]
real_rsync_bin = sys.argv[7]
sequence = json.loads((sequence_root / "sequence.json").read_text(encoding="utf-8"))
static_root = sequence_root / sequence["staticRoot"]
static_root = sequence_root / sequence["staticRoot"] if "staticRoot" in sequence else None
raw_store_db = sequence_root / sequence["rawStoreDbPath"] if "rawStoreDbPath" in sequence else None
backend_count = sum(x is not None for x in (static_root, raw_store_db))
if backend_count != 1:
raise SystemExit("sequence must set exactly one of staticRoot or rawStoreDbPath")
steps = sequence["steps"]
results = []
@ -68,8 +72,6 @@ for step in steps:
str(step_script),
"--cir",
str(sequence_root / step["cirPath"]),
"--static-root",
str(static_root),
"--out-dir",
str(out_dir),
"--reference-ccr",
@ -79,6 +81,10 @@ for step in steps:
"--real-rsync-bin",
real_rsync_bin,
]
if static_root is not None:
cmd.extend(["--static-root", str(static_root)])
else:
cmd.extend(["--raw-store-db", str(raw_store_db)])
proc = subprocess.run(cmd, capture_output=True, text=True)
if proc.returncode != 0:
raise SystemExit(

View File

@ -57,7 +57,11 @@ routinator_bin = sys.argv[6]
real_rsync_bin = sys.argv[7]
sequence = json.loads((sequence_root / "sequence.json").read_text(encoding="utf-8"))
static_root = sequence_root / sequence["staticRoot"]
static_root = sequence_root / sequence["staticRoot"] if "staticRoot" in sequence else None
raw_store_db = sequence_root / sequence["rawStoreDbPath"] if "rawStoreDbPath" in sequence else None
backend_count = sum(x is not None for x in (static_root, raw_store_db))
if backend_count != 1:
raise SystemExit("sequence must set exactly one of staticRoot or rawStoreDbPath")
steps = sequence["steps"]
results = []
all_match = True
@ -70,8 +74,6 @@ for step in steps:
str(step_script),
"--cir",
str(sequence_root / step["cirPath"]),
"--static-root",
str(static_root),
"--out-dir",
str(out_dir),
"--reference-ccr",
@ -83,6 +85,10 @@ for step in steps:
"--real-rsync-bin",
real_rsync_bin,
]
if static_root is not None:
cmd.extend(["--static-root", str(static_root)])
else:
cmd.extend(["--raw-store-db", str(raw_store_db)])
proc = subprocess.run(cmd, capture_output=True, text=True)
if proc.returncode != 0:
raise SystemExit(

View File

@ -53,7 +53,11 @@ build_dir = sys.argv[5]
real_rsync_bin = sys.argv[6]
sequence = json.loads((sequence_root / "sequence.json").read_text(encoding="utf-8"))
static_root = sequence_root / sequence["staticRoot"]
static_root = sequence_root / sequence["staticRoot"] if "staticRoot" in sequence else None
raw_store_db = sequence_root / sequence["rawStoreDbPath"] if "rawStoreDbPath" in sequence else None
backend_count = sum(x is not None for x in (static_root, raw_store_db))
if backend_count != 1:
raise SystemExit("sequence must set exactly one of staticRoot or rawStoreDbPath")
steps = sequence["steps"]
results = []
all_match = True
@ -66,8 +70,6 @@ for step in steps:
str(step_script),
"--cir",
str(sequence_root / step["cirPath"]),
"--static-root",
str(static_root),
"--out-dir",
str(out_dir),
"--reference-ccr",
@ -77,6 +79,10 @@ for step in steps:
"--real-rsync-bin",
real_rsync_bin,
]
if static_root is not None:
cmd.extend(["--static-root", str(static_root)])
else:
cmd.extend(["--raw-store-db", str(raw_store_db)])
proc = subprocess.run(cmd, capture_output=True, text=True)
if proc.returncode != 0:
raise SystemExit(

View File

@ -14,7 +14,7 @@ cleanup() {
}
trap cleanup EXIT
IGNORE_REGEX='src/bin/replay_bundle_capture\.rs|src/bin/replay_bundle_capture_delta\.rs|src/bin/replay_bundle_capture_sequence\.rs|src/bin/replay_bundle_record\.rs|src/bin/replay_bundle_refresh_sequence_outputs\.rs|src/bin/measure_sequence_replay\.rs|src/bin/repository_view_stats\.rs|src/bin/trace_arin_missing_vrps\.rs|src/bin/db_stats\.rs|src/bin/rrdp_state_dump\.rs|src/bin/ccr_dump\.rs|src/bin/ccr_verify\.rs|src/bin/ccr_to_routinator_csv\.rs|src/bin/ccr_to_compare_views\.rs|src/bin/cir_materialize\.rs|src/bin/cir_extract_inputs\.rs|src/bundle/live_capture\.rs|src/bundle/record_io\.rs|src/progress_log\.rs'
IGNORE_REGEX='src/bin/replay_bundle_capture\.rs|src/bin/replay_bundle_capture_delta\.rs|src/bin/replay_bundle_capture_sequence\.rs|src/bin/replay_bundle_record\.rs|src/bin/replay_bundle_refresh_sequence_outputs\.rs|src/bin/measure_sequence_replay\.rs|src/bin/repository_view_stats\.rs|src/bin/trace_arin_missing_vrps\.rs|src/bin/db_stats\.rs|src/bin/rrdp_state_dump\.rs|src/bin/ccr_dump\.rs|src/bin/ccr_verify\.rs|src/bin/ccr_to_routinator_csv\.rs|src/bin/ccr_to_compare_views\.rs|src/bin/cir_materialize\.rs|src/bin/cir_extract_inputs\.rs|src/bin/cir_drop_report\.rs|src/bin/cir_ta_only_fixture\.rs|src/bundle/live_capture\.rs|src/bundle/record_io\.rs|src/bundle/compare_view\.rs|src/progress_log\.rs|src/cli\.rs|src/validation/run_tree_from_tal\.rs|src/validation/from_tal\.rs|src/sync/store_projection\.rs|src/cir/materialize\.rs'
# Preserve colored output even though we post-process output by running under a pseudo-TTY.
# We run tests only once, then generate both CLI text + HTML reports without rerunning tests.

200
specs/cir_draft.md Normal file
View File

@ -0,0 +1,200 @@
---
**Internet-Draft** Yirong Yu
**Intended status: Standards Track** Zhongguancun Labortary
**Expires: [Date, e.g., October 2026]** April 2026
# A Profile for Resource Public Key Infrastructure (RPKI) Canonical Input Representation (CIR)
## draft-yirong-sidrops-rpki-cir-00
### Abstract
This document specifies a Canonical Input Representation (CIR) content type for use with the Resource Public Key Infrastructure (RPKI). While the Canonical Cache Representation (CCR) profiles the *validated* output state of a Relying Party (RP), CIR is a DER-encoded data interchange format used to represent the exact, *unvalidated* raw input data fetched by an RP at a particular point in time. The CIR profile provides a deterministic "world view" snapshot, enabling advanced operational capabilities such as differential testing, failure path debugging, and highly accurate historical black-box replay of RPKI validation logic.
### Status of This Memo
This Internet-Draft is submitted in full conformance with the provisions of BCP 78 and BCP 79.
Internet-Drafts are working documents of the Internet Engineering Task Force (IETF). Note that other groups may also distribute working documents as Internet-Drafts. The list of current Internet-Drafts is at [https://datatracker.ietf.org/drafts/current/](https://datatracker.ietf.org/drafts/current/).
Internet-Drafts are draft documents valid for a maximum of six months and may be updated, replaced, or obsoleted by other documents at any time. It is inappropriate to use Internet-Drafts as reference material or to cite them other than as "work in progress."
### Table of Contents
1. Introduction
1.1. Requirements Language
2. Motivation and Architecture
3. The Canonical Input Representation Content Type
4. The Canonical Input Representation Content
4.1. version
4.2. metaInfo
4.3. BaseCIR Fields
4.4. DeltaCIR Fields
5. Operational Considerations
5.1. Differential Testing and Historical Replay
5.2. Delta Compression for Archival
6. Security Considerations
7. IANA Considerations
8. References
---
### 1. Introduction
This document specifies a Canonical Input Representation (CIR) content type for use with the Resource Public Key Infrastructure (RPKI).
A Relying Party (RP) fetches RPKI objects from publication points using protocols such as rsync [RFC5781] or RRDP [RFC8182] prior to executing cryptographic validation. While the Canonical Cache Representation (CCR) [draft-ietf-sidrops-rpki-ccr] accurately describes the subset of objects that successfully passed validation, it inherently omits objects that were rejected due to format errors, invalid signatures, or expired timestamps (survivorship bias).
CIR records the precise mapping of object URIs to their cryptographic hashes *before* validation occurs. By decoupling the network transport layer from the validation layer, CIR allows researchers and operators to reconstruct the exact physical file tree (the "dirty inputs") perceived by an observation point.
#### 1.1. Requirements Language
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in BCP 14 [RFC2119] [RFC8174] when, and only when, they appear in all capitals, as shown here.
### 2. Motivation and Architecture
CIR is designed to solve the "time paradox" and "state desynchronization" problems inherent to RPKI historical archiving. It defines two distinct operational modes:
* **Base CIR**: A complete snapshot of all fetched Trust Anchor Locators (TALs) and RPKI objects, typically generated by an RP immediately after a synchronization cycle.
* **Delta CIR**: A compressed representation generated by offline archival processes, describing the additions, modifications, and deletions between two chronological Base CIR snapshots.
### 3. The Canonical Input Representation Content Type
The content of a CIR file is an instance of `ContentInfo`.
The `contentType` for a CIR is defined as `id-ct-rpkiCanonicalInputRepresentation`, with Object Identifier (OID) `[TBD-OID]`.
The content is an instance of `RpkiCanonicalInputRepresentation`.
### 4. The Canonical Input Representation Content
The content of a Canonical Input Representation is formally defined using ASN.1. To ensure absolute deterministic serialization, CIR MUST be encoded using Distinguished Encoding Rules (DER, [X.690]).
```asn.1
RpkiCanonicalInputRepresentation-2026
{ iso(1) member-body(2) us(840) rsadsi(113549)
pkcs(1) pkcs9(9) smime(16) mod(0) id-mod-rpkiCIR-2026(TBD) }
DEFINITIONS EXPLICIT TAGS ::=
BEGIN
IMPORTS
CONTENT-TYPE, Digest
FROM CryptographicMessageSyntax-2010 -- in [RFC6268]
;
ContentInfo ::= SEQUENCE {
contentType CONTENT-TYPE.&id({ContentSet}),
content [0] EXPLICIT CONTENT-TYPE.&Type({ContentSet}{@contentType}) }
ContentSet CONTENT-TYPE ::= {
ct-rpkiCanonicalInputRepresentation, ... }
ct-rpkiCanonicalInputRepresentation CONTENT-TYPE ::=
{ TYPE RpkiCanonicalInputRepresentation
IDENTIFIED BY id-ct-rpkiCanonicalInputRepresentation }
id-ct-rpkiCanonicalInputRepresentation OBJECT IDENTIFIER ::=
{ iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1)
pkcs-9(9) id-smime(16) id-ct(1) cir(TBD) }
RpkiCanonicalInputRepresentation ::= CHOICE {
baseCIR [0] BaseCIR,
deltaCIR [1] DeltaCIR
}
BaseCIR ::= SEQUENCE {
version INTEGER DEFAULT 0,
metaInfo CIRMetaInfo,
talList SEQUENCE OF URIAndHash,
objectList SEQUENCE OF URIAndHash
}
DeltaCIR ::= SEQUENCE {
version INTEGER DEFAULT 0,
metaInfo CIRMetaInfo,
talChanges [0] DeltaChanges OPTIONAL,
objectChanges [1] DeltaChanges
}
DeltaChanges ::= SEQUENCE {
upserted [0] SEQUENCE OF URIAndHash OPTIONAL,
removed [1] SEQUENCE OF IA5String OPTIONAL
}
CIRMetaInfo ::= SEQUENCE {
validationTime GeneralizedTime,
rpSoftware [0] UTF8String OPTIONAL,
rpVersion [1] UTF8String OPTIONAL,
observerID [2] UTF8String OPTIONAL
}
URIAndHash ::= SEQUENCE {
uri IA5String,
hash OCTET STRING,
source [0] SourceType OPTIONAL
}
SourceType ::= ENUMERATED {
rsync (0),
rrdp (1),
https (2),
erik (3),
cache (4),
other (5)
}
END
```
#### 4.1. version
The version field contains the format version for the structure. In this version of the specification, it MUST be `0`.
#### 4.2. metaInfo
The `metaInfo` structure provides crucial temporal and environmental context:
* **validationTime**: Contains a `GeneralizedTime` indicating the moment the synchronization concluded. This timestamp is REQUIRED, as it is strictly necessary to freeze the system clock when replaying RPKI validation logic to evaluate time-sensitive object expiration.
* **rpSoftware / rpVersion / observerID**: OPTIONAL metadata to identify the specific software and observation vantage point generating the CIR.
#### 4.3. BaseCIR Fields
* **talList**: A sequence of `URIAndHash` representing the Trust Anchor Locators used as the root of validation.
* **objectList**: A sequence of `URIAndHash` representing every raw file fetched by the RP. The `uri` MUST be the absolute logical address (e.g., `rsync://...`), and the `hash` MUST be the SHA-256 digest of the raw file.
* **source**: An OPTIONAL enumerated value indicating the network transport or cache layer from which the file was successfully obtained (e.g., `rrdp`, `rsync`).
#### 4.4. DeltaCIR Fields
To support compact archival, `DeltaCIR` describes changes relative to a preceding `BaseCIR` or `DeltaCIR`:
* **upserted**: A sequence of `URIAndHash` for newly discovered objects or objects where the URI remained identical but the cryptographic Hash changed.
* **removed**: A sequence of `IA5String` containing URIs that were present in the previous snapshot but are no longer observed.
### 5. Operational Considerations
#### 5.1. Differential Testing and Historical Replay
Because CIR captures the global input state *regardless* of object validity, it allows operators to construct an isolated physical sandbox matching the exact network state at `validationTime`. By injecting this state into different RP software implementations (using native functionality like `--disable-rrdp` coupled with local rsync wrappers), operators can perform deterministic differential testing. Discrepancies in the resulting CCR outputs indicate implementation bugs or vulnerabilities in boundary-case handling.
#### 5.2. Delta Compression for Archival
Given that the global RPKI repository experiences relatively low churn within short timeframes (e.g., 10-minute intervals), `DeltaCIR` significantly reduces storage overhead. Archival systems SHOULD compute `DeltaCIR` sequences from raw `BaseCIR` outputs to facilitate efficient streaming historical replays.
### 6. Security Considerations
Unlike RPKI signed objects, CIR objects are not cryptographically signed by CAs. They are observational records.
CIR explicitly permits the indexing of corrupted, malicious, or malformed ASN.1 objects. Parsers ingesting CIR to reconstruct sandboxes MUST NOT attempt to cryptographically decode or execute the objects referenced by the hashes, but simply treat them as opaque binary blobs to be placed in the file system for the target RP to evaluate.
### 7. IANA Considerations
IANA is requested to register the media type `application/rpki-cir`, the file extension `.cir`, and the necessary SMI Security for S/MIME Module Identifiers (OIDs), modeled identically to the IANA considerations defined in the CCR specification.
### 8. References
*[Standard IETF references for RFC 2119, RFC 8174, RFC 6488, RFC 8182, etc. to be populated]*
---
**Next Step Guidance**:
If you plan to officially submit this to the IETF SIDROPS working group, you'll need to allocate the `[TBD]` OID placeholders and potentially run the ASN.1 syntax through an official compiler (like `asn1c`) to ensure there are no implicit tagging ambiguities in the `CHOICE` and `OPTIONAL` fields. Would you like me to refine the ASN.1 tagging strategy further?

View File

@ -75,9 +75,12 @@ pub struct PublicationPointAudit {
#[serde(skip_serializing_if = "Option::is_none")]
pub repo_sync_source: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub repo_sync_phase: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub repo_sync_duration_ms: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub repo_sync_error: Option<String>,
pub repo_terminal_state: String,
pub this_update_rfc3339_utc: String,
pub next_update_rfc3339_utc: String,
pub verified_at_rfc3339_utc: String,
@ -156,6 +159,19 @@ pub struct AuditDownloadStats {
pub by_kind: std::collections::BTreeMap<String, AuditDownloadKindStats>,
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize)]
pub struct AuditRepoSyncStateStat {
pub count: u64,
pub duration_ms_total: u64,
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize)]
pub struct AuditRepoSyncStats {
pub publication_points_total: u64,
pub by_phase: std::collections::BTreeMap<String, AuditRepoSyncStateStat>,
pub by_terminal_state: std::collections::BTreeMap<String, AuditRepoSyncStateStat>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditReportV1 {
pub format_version: u32,
@ -181,6 +197,7 @@ pub struct AuditReportV2 {
pub downloads: Vec<AuditDownloadEvent>,
pub download_stats: AuditDownloadStats,
pub repo_sync_stats: AuditRepoSyncStats,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]

View File

@ -249,12 +249,12 @@ fn raw_ref_from_entry(sha256_hex: &str, entry: Option<&RawByHashEntry>) -> Audit
#[cfg(test)]
mod tests {
use super::*;
use base64::Engine as _;
use crate::audit::sha256_hex;
use crate::storage::{
PackTime, ValidatedManifestMeta, VcirAuditSummary, VcirChildEntry, VcirInstanceGate,
VcirRelatedArtifact, VcirSummary,
};
use base64::Engine as _;
fn sample_vcir(
manifest_rsync_uri: &str,
@ -495,7 +495,8 @@ mod tests {
"as_id": 64496,
"ski_hex": "11".repeat(20),
"spki_der_base64": base64::engine::general_purpose::STANDARD.encode([0x30u8, 0x00]),
}).to_string();
})
.to_string();
let mut vcir = sample_vcir(
manifest,
None,
@ -504,13 +505,15 @@ mod tests {
sample_artifacts(manifest, &sha256_hex(b"router-object")),
);
vcir.local_outputs[0].output_type = VcirOutputType::RouterKey;
vcir.local_outputs[0].source_object_uri = "rsync://example.test/router/router.cer".to_string();
vcir.local_outputs[0].source_object_uri =
"rsync://example.test/router/router.cer".to_string();
vcir.local_outputs[0].source_object_type = "router_key".to_string();
vcir.local_outputs[0].payload_json = serde_json::json!({
"as_id": 64496,
"ski_hex": "11".repeat(20),
"spki_der_base64": base64::engine::general_purpose::STANDARD.encode([0x30u8, 0x00]),
}).to_string();
})
.to_string();
vcir.summary.local_vrp_count = 0;
vcir.summary.local_router_key_count = 1;
store.put_vcir(&vcir).expect("put vcir");
@ -523,7 +526,9 @@ mod tests {
output_id: vcir.local_outputs[0].output_id.clone(),
item_effective_until: vcir.local_outputs[0].item_effective_until.clone(),
};
store.put_audit_rule_index_entry(&rule_entry).expect("put rule");
store
.put_audit_rule_index_entry(&rule_entry)
.expect("put rule");
let trace = trace_rule_to_root(&store, AuditRuleKind::RouterKey, &rule_entry.rule_hash)
.expect("trace rule")
.expect("trace exists");

View File

@ -33,9 +33,13 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
fn main() -> Result<(), String> {
let args = parse_args(&std::env::args().collect::<Vec<_>>())?;
let ccr_path = args.ccr_path.as_ref().unwrap();
let bytes = std::fs::read(ccr_path).map_err(|e| format!("read ccr failed: {}: {e}", ccr_path.display()))?;
let bytes = std::fs::read(ccr_path)
.map_err(|e| format!("read ccr failed: {}: {e}", ccr_path.display()))?;
let json = dump_content_info_json_value(&bytes).map_err(|e| e.to_string())?;
println!("{}", serde_json::to_string_pretty(&json).map_err(|e| e.to_string())?);
println!(
"{}",
serde_json::to_string_pretty(&json).map_err(|e| e.to_string())?
);
Ok(())
}
@ -45,9 +49,16 @@ mod tests {
#[test]
fn parse_args_accepts_ccr_path() {
let argv = vec!["ccr_dump".to_string(), "--ccr".to_string(), "a.ccr".to_string()];
let argv = vec![
"ccr_dump".to_string(),
"--ccr".to_string(),
"a.ccr".to_string(),
];
let args = parse_args(&argv).expect("parse");
assert_eq!(args.ccr_path.as_deref(), Some(std::path::Path::new("a.ccr")));
assert_eq!(
args.ccr_path.as_deref(),
Some(std::path::Path::new("a.ccr"))
);
}
#[test]

View File

@ -94,7 +94,10 @@ mod tests {
"apnic".to_string(),
];
let args = parse_args(&argv).expect("parse args");
assert_eq!(args.ccr_path.as_deref(), Some(std::path::Path::new("a.ccr")));
assert_eq!(
args.ccr_path.as_deref(),
Some(std::path::Path::new("a.ccr"))
);
assert_eq!(
args.vrps_out_path.as_deref(),
Some(std::path::Path::new("vrps.csv"))

View File

@ -49,7 +49,9 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
Ok(args)
}
fn collect_vrp_rows(bytes: &[u8]) -> Result<std::collections::BTreeSet<(u32, String, u16)>, String> {
fn collect_vrp_rows(
bytes: &[u8],
) -> Result<std::collections::BTreeSet<(u32, String, u16)>, String> {
let content_info = decode_content_info(bytes).map_err(|e| e.to_string())?;
extract_vrp_rows(&content_info).map_err(|e| e.to_string())
}
@ -95,14 +97,24 @@ mod tests {
"apnic".to_string(),
];
let args = parse_args(&argv).expect("parse args");
assert_eq!(args.ccr_path.as_deref(), Some(std::path::Path::new("a.ccr")));
assert_eq!(args.out_path.as_deref(), Some(std::path::Path::new("out.csv")));
assert_eq!(
args.ccr_path.as_deref(),
Some(std::path::Path::new("a.ccr"))
);
assert_eq!(
args.out_path.as_deref(),
Some(std::path::Path::new("out.csv"))
);
assert_eq!(args.trust_anchor, "apnic");
}
#[test]
fn parse_args_rejects_missing_required_flags() {
let argv = vec!["ccr_to_routinator_csv".to_string(), "--ccr".to_string(), "a.ccr".to_string()];
let argv = vec![
"ccr_to_routinator_csv".to_string(),
"--ccr".to_string(),
"a.ccr".to_string(),
];
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("--out is required"), "{err}");
}

View File

@ -1,4 +1,7 @@
use rpki::ccr::{decode_content_info, verify::verify_content_info, verify_against_report_json_path, verify_against_vcir_store_path};
use rpki::ccr::{
decode_content_info, verify::verify_content_info, verify_against_report_json_path,
verify_against_vcir_store_path,
};
#[derive(Debug, Default, PartialEq, Eq)]
struct Args {
@ -45,7 +48,8 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
fn main() -> Result<(), String> {
let args = parse_args(&std::env::args().collect::<Vec<_>>())?;
let ccr_path = args.ccr_path.as_ref().unwrap();
let bytes = std::fs::read(ccr_path).map_err(|e| format!("read ccr failed: {}: {e}", ccr_path.display()))?;
let bytes = std::fs::read(ccr_path)
.map_err(|e| format!("read ccr failed: {}: {e}", ccr_path.display()))?;
let ci = decode_content_info(&bytes).map_err(|e| e.to_string())?;
let summary = verify_content_info(&ci).map_err(|e| e.to_string())?;
if let Some(report_json) = args.report_json.as_ref() {
@ -54,7 +58,10 @@ fn main() -> Result<(), String> {
if let Some(db_path) = args.db_path.as_ref() {
verify_against_vcir_store_path(&ci, db_path).map_err(|e| e.to_string())?;
}
println!("{}", serde_json::to_string_pretty(&summary).map_err(|e| e.to_string())?);
println!(
"{}",
serde_json::to_string_pretty(&summary).map_err(|e| e.to_string())?
);
Ok(())
}
@ -74,8 +81,14 @@ mod tests {
"db".to_string(),
];
let args = parse_args(&argv).expect("parse");
assert_eq!(args.ccr_path.as_deref(), Some(std::path::Path::new("a.ccr")));
assert_eq!(args.report_json.as_deref(), Some(std::path::Path::new("report.json")));
assert_eq!(
args.ccr_path.as_deref(),
Some(std::path::Path::new("a.ccr"))
);
assert_eq!(
args.report_json.as_deref(),
Some(std::path::Path::new("report.json"))
);
assert_eq!(args.db_path.as_deref(), Some(std::path::Path::new("db")));
}

View File

@ -1,12 +1,13 @@
use std::collections::{BTreeMap, BTreeSet};
use std::path::PathBuf;
use rpki::blob_store::{ExternalRawStoreDb, RawObjectStore};
use rpki::bundle::decode_ccr_compare_views;
use rpki::ccr::decode_content_info;
use rpki::cir::{decode_cir, resolve_static_pool_file};
use rpki::data_model::roa::RoaObject;
const USAGE: &str = "Usage: cir_drop_report --cir <path> --ccr <path> --report-json <path> --static-root <path> --json-out <path> --md-out <path>";
const USAGE: &str = "Usage: cir_drop_report --cir <path> --ccr <path> --report-json <path> (--static-root <path> | --raw-store-db <path>) --json-out <path> --md-out <path>";
#[derive(serde::Serialize)]
struct DroppedObjectRecord {
@ -47,11 +48,25 @@ fn classify_reason(detail: Option<&str>, result: &str) -> String {
}
}
fn parse_args(argv: &[String]) -> Result<(PathBuf, PathBuf, PathBuf, PathBuf, PathBuf, PathBuf), String> {
fn parse_args(
argv: &[String],
) -> Result<
(
PathBuf,
PathBuf,
PathBuf,
Option<PathBuf>,
Option<PathBuf>,
PathBuf,
PathBuf,
),
String,
> {
let mut cir = None;
let mut ccr = None;
let mut report = None;
let mut static_root = None;
let mut raw_store_db = None;
let mut json_out = None;
let mut md_out = None;
let mut i = 1usize;
@ -77,6 +92,12 @@ fn parse_args(argv: &[String]) -> Result<(PathBuf, PathBuf, PathBuf, PathBuf, Pa
argv.get(i).ok_or("--static-root requires a value")?,
));
}
"--raw-store-db" => {
i += 1;
raw_store_db = Some(PathBuf::from(
argv.get(i).ok_or("--raw-store-db requires a value")?,
));
}
"--json-out" => {
i += 1;
json_out = Some(PathBuf::from(
@ -85,7 +106,9 @@ fn parse_args(argv: &[String]) -> Result<(PathBuf, PathBuf, PathBuf, PathBuf, Pa
}
"--md-out" => {
i += 1;
md_out = Some(PathBuf::from(argv.get(i).ok_or("--md-out requires a value")?));
md_out = Some(PathBuf::from(
argv.get(i).ok_or("--md-out requires a value")?,
));
}
"-h" | "--help" => return Err(USAGE.to_string()),
other => return Err(format!("unknown argument: {other}\n\n{USAGE}")),
@ -96,7 +119,8 @@ fn parse_args(argv: &[String]) -> Result<(PathBuf, PathBuf, PathBuf, PathBuf, Pa
cir.ok_or_else(|| format!("--cir is required\n\n{USAGE}"))?,
ccr.ok_or_else(|| format!("--ccr is required\n\n{USAGE}"))?,
report.ok_or_else(|| format!("--report-json is required\n\n{USAGE}"))?,
static_root.ok_or_else(|| format!("--static-root is required\n\n{USAGE}"))?,
static_root,
raw_store_db,
json_out.ok_or_else(|| format!("--json-out is required\n\n{USAGE}"))?,
md_out.ok_or_else(|| format!("--md-out is required\n\n{USAGE}"))?,
))
@ -104,7 +128,14 @@ fn parse_args(argv: &[String]) -> Result<(PathBuf, PathBuf, PathBuf, PathBuf, Pa
fn main() -> Result<(), String> {
let argv: Vec<String> = std::env::args().collect();
let (cir_path, ccr_path, report_path, static_root, json_out, md_out) = parse_args(&argv)?;
let (cir_path, ccr_path, report_path, static_root, raw_store_db, json_out, md_out) =
parse_args(&argv)?;
let backend_count = static_root.is_some() as u8 + raw_store_db.is_some() as u8;
if backend_count != 1 {
return Err(format!(
"must specify exactly one of --static-root or --raw-store-db\n\n{USAGE}"
));
}
let cir = decode_cir(&std::fs::read(&cir_path).map_err(|e| format!("read cir failed: {e}"))?)
.map_err(|e| format!("decode cir failed: {e}"))?;
@ -112,11 +143,12 @@ fn main() -> Result<(), String> {
&std::fs::read(&ccr_path).map_err(|e| format!("read ccr failed: {e}"))?,
)
.map_err(|e| format!("decode ccr failed: {e}"))?;
let (vrps, vaps) =
decode_ccr_compare_views(&ccr, "unknown").map_err(|e| format!("decode compare views failed: {e}"))?;
let report: serde_json::Value =
serde_json::from_slice(&std::fs::read(&report_path).map_err(|e| format!("read report failed: {e}"))?)
.map_err(|e| format!("parse report failed: {e}"))?;
let (vrps, vaps) = decode_ccr_compare_views(&ccr, "unknown")
.map_err(|e| format!("decode compare views failed: {e}"))?;
let report: serde_json::Value = serde_json::from_slice(
&std::fs::read(&report_path).map_err(|e| format!("read report failed: {e}"))?,
)
.map_err(|e| format!("parse report failed: {e}"))?;
let mut object_hash_by_uri = BTreeMap::new();
for object in &cir.objects {
@ -134,7 +166,9 @@ fn main() -> Result<(), String> {
let mut unknown_roa_objects = 0usize;
for pp in publication_points {
let publication_point = pp["publication_point_rsync_uri"].as_str().map(str::to_string);
let publication_point = pp["publication_point_rsync_uri"]
.as_str()
.map(str::to_string);
let manifest_uri = pp["manifest_rsync_uri"].as_str().map(str::to_string);
for obj in pp["objects"].as_array().into_iter().flatten() {
let result = obj["result"].as_str().unwrap_or("unknown");
@ -155,39 +189,52 @@ fn main() -> Result<(), String> {
let mut derived_vrp_count = 0usize;
if kind == "roa" && !hash.is_empty() {
match resolve_static_pool_file(&static_root, &hash) {
Ok(path) => {
if let Ok(bytes) = std::fs::read(&path) {
if let Ok(roa) = RoaObject::decode_der(&bytes) {
for family in roa.roa.ip_addr_blocks {
for addr in family.addresses {
let prefix = match addr.prefix.afi {
rpki::data_model::roa::RoaAfi::Ipv4 => format!(
"{}.{}.{}.{}/{}",
addr.prefix.addr[0],
addr.prefix.addr[1],
addr.prefix.addr[2],
addr.prefix.addr[3],
let bytes_opt = if let Some(static_root) = static_root.as_ref() {
match resolve_static_pool_file(static_root, &hash) {
Ok(path) => std::fs::read(&path).ok(),
Err(_) => None,
}
} else if let Some(raw_store_db) = raw_store_db.as_ref() {
ExternalRawStoreDb::open(raw_store_db)
.ok()
.and_then(|store| store.get_raw_entry(&hash).ok().flatten())
.map(|entry| entry.bytes)
} else {
None
};
match bytes_opt {
Some(bytes) => {
if let Ok(roa) = RoaObject::decode_der(&bytes) {
for family in roa.roa.ip_addr_blocks {
for addr in family.addresses {
let prefix = match addr.prefix.afi {
rpki::data_model::roa::RoaAfi::Ipv4 => format!(
"{}.{}.{}.{}/{}",
addr.prefix.addr[0],
addr.prefix.addr[1],
addr.prefix.addr[2],
addr.prefix.addr[3],
addr.prefix.prefix_len
),
rpki::data_model::roa::RoaAfi::Ipv6 => {
let bytes: [u8; 16] = addr.prefix.addr;
format!(
"{}/{}",
std::net::Ipv6Addr::from(bytes),
addr.prefix.prefix_len
),
rpki::data_model::roa::RoaAfi::Ipv6 => {
let bytes: [u8; 16] = addr.prefix.addr;
format!("{}/{}", std::net::Ipv6Addr::from(bytes), addr.prefix.prefix_len)
}
};
let max_len = addr.max_length.unwrap_or(addr.prefix.prefix_len);
dropped_vrp_rows.insert((roa.roa.as_id, prefix, max_len));
derived_vrp_count += 1;
}
)
}
};
let max_len = addr.max_length.unwrap_or(addr.prefix.prefix_len);
dropped_vrp_rows.insert((roa.roa.as_id, prefix, max_len));
derived_vrp_count += 1;
}
} else {
unknown_roa_objects += 1;
}
} else {
unknown_roa_objects += 1;
}
}
Err(_) => unknown_roa_objects += 1,
None => unknown_roa_objects += 1,
}
}
@ -227,22 +274,40 @@ fn main() -> Result<(), String> {
md.push_str("# CIR Drop Report\n\n");
md.push_str(&format!("- `final_vrp_count`: `{}`\n", vrps.len()));
md.push_str(&format!("- `final_vap_count`: `{}`\n", vaps.len()));
md.push_str(&format!("- `dropped_vrp_count`: `{}`\n", output["summary"]["droppedVrpCount"]));
md.push_str(&format!("- `dropped_object_count`: `{}`\n", output["summary"]["droppedObjectCount"]));
md.push_str(&format!(
"- `dropped_vrp_count`: `{}`\n",
output["summary"]["droppedVrpCount"]
));
md.push_str(&format!(
"- `dropped_object_count`: `{}`\n",
output["summary"]["droppedObjectCount"]
));
md.push_str(&format!(
"- `unknown_dropped_roa_objects`: `{}`\n\n",
output["summary"]["unknownDroppedRoaObjects"]
));
md.push_str("## Dropped By Kind\n\n");
for (kind, count) in output["summary"]["droppedByKind"].as_object().into_iter().flatten() {
for (kind, count) in output["summary"]["droppedByKind"]
.as_object()
.into_iter()
.flatten()
{
md.push_str(&format!("- `{kind}`: `{}`\n", count.as_u64().unwrap_or(0)));
}
md.push_str("\n## Dropped By Reason\n\n");
for (reason, count) in output["summary"]["droppedByReason"].as_object().into_iter().flatten() {
md.push_str(&format!("- `{reason}`: `{}`\n", count.as_u64().unwrap_or(0)));
for (reason, count) in output["summary"]["droppedByReason"]
.as_object()
.into_iter()
.flatten()
{
md.push_str(&format!(
"- `{reason}`: `{}`\n",
count.as_u64().unwrap_or(0)
));
}
if let Some(parent) = md_out.parent() {
std::fs::create_dir_all(parent).map_err(|e| format!("create markdown parent failed: {e}"))?;
std::fs::create_dir_all(parent)
.map_err(|e| format!("create markdown parent failed: {e}"))?;
}
std::fs::write(&md_out, md).map_err(|e| format!("write markdown failed: {e}"))?;

View File

@ -26,11 +26,15 @@ fn run(argv: Vec<String>) -> Result<(), String> {
}
"--tals-dir" => {
i += 1;
tals_dir = Some(PathBuf::from(argv.get(i).ok_or("--tals-dir requires a value")?));
tals_dir = Some(PathBuf::from(
argv.get(i).ok_or("--tals-dir requires a value")?,
));
}
"--meta-json" => {
i += 1;
meta_json = Some(PathBuf::from(argv.get(i).ok_or("--meta-json requires a value")?));
meta_json = Some(PathBuf::from(
argv.get(i).ok_or("--meta-json requires a value")?,
));
}
other => return Err(format!("unknown argument: {other}\n\n{}", usage())),
}
@ -76,4 +80,3 @@ fn run(argv: Vec<String>) -> Result<(), String> {
.map_err(|e| format!("write meta json failed: {}: {e}", meta_json.display()))?;
Ok(())
}

View File

@ -1,7 +1,7 @@
use std::path::PathBuf;
fn usage() -> &'static str {
"Usage: cir_materialize --cir <path> --static-root <path> --mirror-root <path> [--keep-db]"
"Usage: cir_materialize --cir <path> (--static-root <path> | --raw-store-db <path>) --mirror-root <path> [--keep-db]"
}
fn main() {
@ -14,6 +14,7 @@ fn main() {
fn run(argv: Vec<String>) -> Result<(), String> {
let mut cir_path: Option<PathBuf> = None;
let mut static_root: Option<PathBuf> = None;
let mut raw_store_db: Option<PathBuf> = None;
let mut mirror_root: Option<PathBuf> = None;
let mut keep_db = false;
@ -31,6 +32,12 @@ fn run(argv: Vec<String>) -> Result<(), String> {
argv.get(i).ok_or("--static-root requires a value")?,
));
}
"--raw-store-db" => {
i += 1;
raw_store_db = Some(PathBuf::from(
argv.get(i).ok_or("--raw-store-db requires a value")?,
));
}
"--mirror-root" => {
i += 1;
mirror_root = Some(PathBuf::from(
@ -44,16 +51,29 @@ fn run(argv: Vec<String>) -> Result<(), String> {
}
let cir_path = cir_path.ok_or_else(|| format!("--cir is required\n\n{}", usage()))?;
let static_root =
static_root.ok_or_else(|| format!("--static-root is required\n\n{}", usage()))?;
let mirror_root =
mirror_root.ok_or_else(|| format!("--mirror-root is required\n\n{}", usage()))?;
let backend_count = static_root.is_some() as u8 + raw_store_db.is_some() as u8;
if backend_count != 1 {
return Err(format!(
"must specify exactly one of --static-root or --raw-store-db\n\n{}",
usage()
));
}
let bytes = std::fs::read(&cir_path)
.map_err(|e| format!("read CIR failed: {}: {e}", cir_path.display()))?;
let cir = rpki::cir::decode_cir(&bytes).map_err(|e| e.to_string())?;
let result = rpki::cir::materialize_cir(&cir, &static_root, &mirror_root, true);
let result = if let Some(static_root) = static_root {
rpki::cir::materialize_cir(&cir, &static_root, &mirror_root, true)
.map_err(|e| e.to_string())
} else if let Some(raw_store_db) = raw_store_db {
rpki::cir::materialize_cir_from_raw_store(&cir, &raw_store_db, &mirror_root, true)
.map_err(|e| e.to_string())
} else {
unreachable!("validated backend count")
};
match result {
Ok(summary) => {
eprintln!(
@ -74,4 +94,3 @@ fn run(argv: Vec<String>) -> Result<(), String> {
}
}
}

View File

@ -1,11 +1,26 @@
use std::path::PathBuf;
use rpki::cir::{encode_cir, write_bytes_to_static_pool, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1};
use rpki::cir::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, encode_cir,
write_bytes_to_static_pool,
};
use sha2::Digest;
const USAGE: &str = "Usage: cir_ta_only_fixture --tal-path <path> --ta-path <path> --tal-uri <url> --validation-time <rfc3339> --cir-out <path> --static-root <path>";
fn parse_args(argv: &[String]) -> Result<(PathBuf, PathBuf, String, time::OffsetDateTime, PathBuf, PathBuf), String> {
fn parse_args(
argv: &[String],
) -> Result<
(
PathBuf,
PathBuf,
String,
time::OffsetDateTime,
PathBuf,
PathBuf,
),
String,
> {
let mut tal_path = None;
let mut ta_path = None;
let mut tal_uri = None;
@ -17,11 +32,15 @@ fn parse_args(argv: &[String]) -> Result<(PathBuf, PathBuf, String, time::Offset
match argv[i].as_str() {
"--tal-path" => {
i += 1;
tal_path = Some(PathBuf::from(argv.get(i).ok_or("--tal-path requires a value")?));
tal_path = Some(PathBuf::from(
argv.get(i).ok_or("--tal-path requires a value")?,
));
}
"--ta-path" => {
i += 1;
ta_path = Some(PathBuf::from(argv.get(i).ok_or("--ta-path requires a value")?));
ta_path = Some(PathBuf::from(
argv.get(i).ok_or("--ta-path requires a value")?,
));
}
"--tal-uri" => {
i += 1;
@ -40,12 +59,15 @@ fn parse_args(argv: &[String]) -> Result<(PathBuf, PathBuf, String, time::Offset
}
"--cir-out" => {
i += 1;
cir_out = Some(PathBuf::from(argv.get(i).ok_or("--cir-out requires a value")?));
cir_out = Some(PathBuf::from(
argv.get(i).ok_or("--cir-out requires a value")?,
));
}
"--static-root" => {
i += 1;
static_root =
Some(PathBuf::from(argv.get(i).ok_or("--static-root requires a value")?));
static_root = Some(PathBuf::from(
argv.get(i).ok_or("--static-root requires a value")?,
));
}
"-h" | "--help" => return Err(USAGE.to_string()),
other => return Err(format!("unknown argument: {other}\n\n{USAGE}")),

View File

@ -4,8 +4,7 @@ use std::path::PathBuf;
use rocksdb::{DB, IteratorMode, Options};
use rpki::storage::{
ALL_COLUMN_FAMILY_NAMES, CF_AUDIT_RULE_INDEX, CF_RAW_BY_HASH, CF_REPOSITORY_VIEW,
CF_RRDP_SOURCE, CF_RRDP_SOURCE_MEMBER, CF_RRDP_URI_OWNER, CF_VCIR,
column_family_descriptors,
CF_RRDP_SOURCE, CF_RRDP_SOURCE_MEMBER, CF_RRDP_URI_OWNER, CF_VCIR, column_family_descriptors,
};
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
@ -77,9 +76,7 @@ fn cf_group(cf_name: &str) -> CfGroup {
match cf_name {
CF_REPOSITORY_VIEW | CF_RAW_BY_HASH => CfGroup::CurrentRepositoryView,
CF_VCIR | CF_AUDIT_RULE_INDEX => CfGroup::CurrentValidationState,
CF_RRDP_SOURCE | CF_RRDP_SOURCE_MEMBER | CF_RRDP_URI_OWNER => {
CfGroup::CurrentRrdpState
}
CF_RRDP_SOURCE | CF_RRDP_SOURCE_MEMBER | CF_RRDP_URI_OWNER => CfGroup::CurrentRrdpState,
_ => CfGroup::LegacyCompatibility,
}
}

View File

@ -56,10 +56,7 @@ fn parse_args() -> Result<Args, String> {
i += 1;
}
if out.bundle_root.is_none() || out.out.is_none() {
return Err(format!(
"--bundle-root and --out are required\n{}",
usage()
));
return Err(format!("--bundle-root and --out are required\n{}", usage()));
}
Ok(out)
}
@ -188,10 +185,7 @@ fn real_main() -> Result<(), String> {
.as_array()
.ok_or("bundle missing deltaSequence.steps")?
{
let step_id = step["id"]
.as_str()
.ok_or("step missing id")?
.to_string();
let step_id = step["id"].as_str().ok_or("step missing id")?.to_string();
let step_dir = path_join(
&rir_dir,
step["relativePath"]
@ -210,8 +204,9 @@ fn real_main() -> Result<(), String> {
.as_str()
.ok_or("step missing relativeTransitionLocksPath")?,
);
let validation_time = load_validation_time(&delta_locks)
.map_err(|e| format!("load step validation time failed for {rir}/{step_id}: {e}"))?;
let validation_time = load_validation_time(&delta_locks).map_err(|e| {
format!("load step validation time failed for {rir}/{step_id}: {e}")
})?;
let start = Instant::now();
let step_out = run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit(
&store,

View File

@ -1,7 +1,7 @@
use rpki::bundle::{
RirBundleMetadata, RecordingHttpFetcher, RecordingRsyncFetcher,
build_single_rir_bundle_manifest, build_vap_compare_rows, build_vrp_compare_rows,
sha256_hex, write_json, write_live_base_replay_bundle_inputs, write_live_bundle_rir_readme,
RecordingHttpFetcher, RecordingRsyncFetcher, RirBundleMetadata,
build_single_rir_bundle_manifest, build_vap_compare_rows, build_vrp_compare_rows, sha256_hex,
write_json, write_live_base_replay_bundle_inputs, write_live_bundle_rir_readme,
write_live_bundle_top_readme, write_timing_json, write_vap_csv, write_vrp_csv,
};
use rpki::ccr::{build_ccr_from_run, verify_content_info, write_ccr_file};
@ -54,15 +54,21 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
}
"--out-dir" => {
i += 1;
args.out_dir = Some(PathBuf::from(argv.get(i).ok_or("--out-dir requires a value")?));
args.out_dir = Some(PathBuf::from(
argv.get(i).ok_or("--out-dir requires a value")?,
));
}
"--tal-path" => {
i += 1;
args.tal_path = Some(PathBuf::from(argv.get(i).ok_or("--tal-path requires a value")?));
args.tal_path = Some(PathBuf::from(
argv.get(i).ok_or("--tal-path requires a value")?,
));
}
"--ta-path" => {
i += 1;
args.ta_path = Some(PathBuf::from(argv.get(i).ok_or("--ta-path requires a value")?));
args.ta_path = Some(PathBuf::from(
argv.get(i).ok_or("--ta-path requires a value")?,
));
}
"--validation-time" => {
i += 1;
@ -90,8 +96,9 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
}
"--rsync-mirror-root" => {
i += 1;
args.rsync_mirror_root =
Some(PathBuf::from(argv.get(i).ok_or("--rsync-mirror-root requires a value")?));
args.rsync_mirror_root = Some(PathBuf::from(
argv.get(i).ok_or("--rsync-mirror-root requires a value")?,
));
}
"--max-depth" => {
i += 1;
@ -113,7 +120,11 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
}
"--trust-anchor" => {
i += 1;
args.trust_anchor = Some(argv.get(i).ok_or("--trust-anchor requires a value")?.clone());
args.trust_anchor = Some(
argv.get(i)
.ok_or("--trust-anchor requires a value")?
.clone(),
);
}
other => return Err(format!("unknown argument: {other}\n{}", usage())),
}
@ -147,11 +158,13 @@ fn run(args: Args) -> Result<PathBuf, String> {
fs::create_dir_all(&rir_dir)
.map_err(|e| format!("create rir dir failed: {}: {e}", rir_dir.display()))?;
let tal_bytes = fs::read(args.tal_path.as_ref().unwrap())
.map_err(|e| format!("read tal failed: {e}"))?;
let ta_bytes = fs::read(args.ta_path.as_ref().unwrap())
.map_err(|e| format!("read ta failed: {e}"))?;
let validation_time = args.validation_time.unwrap_or_else(time::OffsetDateTime::now_utc);
let tal_bytes =
fs::read(args.tal_path.as_ref().unwrap()).map_err(|e| format!("read tal failed: {e}"))?;
let ta_bytes =
fs::read(args.ta_path.as_ref().unwrap()).map_err(|e| format!("read ta failed: {e}"))?;
let validation_time = args
.validation_time
.unwrap_or_else(time::OffsetDateTime::now_utc);
let db_dir = out_root.join(".tmp").join(format!("{rir}-live-base-db"));
let replay_db_dir = out_root.join(".tmp").join(format!("{rir}-self-replay-db"));
@ -205,8 +218,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
.map_err(|e| format!("build ccr failed: {e}"))?;
let base_ccr_path = rir_dir.join("base.ccr");
write_ccr_file(&base_ccr_path, &ccr).map_err(|e| format!("write ccr failed: {e}"))?;
let ccr_bytes =
fs::read(&base_ccr_path).map_err(|e| format!("read written ccr failed: {}: {e}", base_ccr_path.display()))?;
let ccr_bytes = fs::read(&base_ccr_path)
.map_err(|e| format!("read written ccr failed: {}: {e}", base_ccr_path.display()))?;
let decoded = rpki::ccr::decode_content_info(&ccr_bytes)
.map_err(|e| format!("decode written ccr failed: {e}"))?;
let verify = verify_content_info(&decoded).map_err(|e| format!("verify ccr failed: {e}"))?;
@ -235,8 +248,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
&rsync.snapshot_fetches(),
)?;
let replay_store =
RocksStore::open(&replay_db_dir).map_err(|e| format!("open self replay rocksdb failed: {e}"))?;
let replay_store = RocksStore::open(&replay_db_dir)
.map_err(|e| format!("open self replay rocksdb failed: {e}"))?;
let replay_out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
&replay_store,
&Policy::default(),
@ -320,7 +333,11 @@ fn run(args: Args) -> Result<PathBuf, String> {
}),
)?;
write_live_bundle_top_readme(&out_root.join("README.md"), &rir_normalized)?;
write_live_bundle_rir_readme(&rir_dir.join("README.md"), &rir_normalized, &metadata.base_validation_time)?;
write_live_bundle_rir_readme(
&rir_dir.join("README.md"),
&rir_normalized,
&metadata.base_validation_time,
)?;
write_json(
&out_root.join("bundle-manifest.json"),
&build_single_rir_bundle_manifest(

View File

@ -1,7 +1,7 @@
use rpki::bundle::{
RecordingHttpFetcher, RecordingRsyncFetcher, build_single_rir_bundle_manifest,
build_vap_compare_rows, build_vrp_compare_rows, copy_dir_all, load_validation_time,
sha256_hex, write_json, write_live_delta_replay_bundle_inputs, write_vap_csv, write_vrp_csv,
build_vap_compare_rows, build_vrp_compare_rows, copy_dir_all, load_validation_time, sha256_hex,
write_json, write_live_delta_replay_bundle_inputs, write_vap_csv, write_vrp_csv,
};
use rpki::ccr::{build_ccr_from_run, decode_content_info, verify_content_info, write_ccr_file};
use rpki::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig};
@ -54,12 +54,15 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
}
"--base-bundle-dir" => {
i += 1;
args.base_bundle_dir =
Some(PathBuf::from(argv.get(i).ok_or("--base-bundle-dir requires a value")?));
args.base_bundle_dir = Some(PathBuf::from(
argv.get(i).ok_or("--base-bundle-dir requires a value")?,
));
}
"--out-dir" => {
i += 1;
args.out_dir = Some(PathBuf::from(argv.get(i).ok_or("--out-dir requires a value")?));
args.out_dir = Some(PathBuf::from(
argv.get(i).ok_or("--out-dir requires a value")?,
));
}
"--validation-time" => {
i += 1;
@ -87,8 +90,9 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
}
"--rsync-mirror-root" => {
i += 1;
args.rsync_mirror_root =
Some(PathBuf::from(argv.get(i).ok_or("--rsync-mirror-root requires a value")?));
args.rsync_mirror_root = Some(PathBuf::from(
argv.get(i).ok_or("--rsync-mirror-root requires a value")?,
));
}
"--max-depth" => {
i += 1;
@ -110,7 +114,11 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
}
"--trust-anchor" => {
i += 1;
args.trust_anchor = Some(argv.get(i).ok_or("--trust-anchor requires a value")?.clone());
args.trust_anchor = Some(
argv.get(i)
.ok_or("--trust-anchor requires a value")?
.clone(),
);
}
other => return Err(format!("unknown argument: {other}\n{}", usage())),
}
@ -193,7 +201,10 @@ fn run(args: Args) -> Result<PathBuf, String> {
let base_root = args.base_bundle_dir.as_ref().unwrap();
let base_rir_dir = base_root.join(&rir_normalized);
if !base_rir_dir.is_dir() {
return Err(format!("base bundle rir dir not found: {}", base_rir_dir.display()));
return Err(format!(
"base bundle rir dir not found: {}",
base_rir_dir.display()
));
}
if out_root.exists() {
fs::remove_dir_all(out_root)
@ -206,12 +217,14 @@ fn run(args: Args) -> Result<PathBuf, String> {
.trust_anchor
.clone()
.unwrap_or_else(|| rir_normalized.clone());
let tal_bytes =
fs::read(rir_dir.join("tal.tal")).map_err(|e| format!("read tal from base bundle failed: {e}"))?;
let ta_bytes =
fs::read(rir_dir.join("ta.cer")).map_err(|e| format!("read ta from base bundle failed: {e}"))?;
let tal_bytes = fs::read(rir_dir.join("tal.tal"))
.map_err(|e| format!("read tal from base bundle failed: {e}"))?;
let ta_bytes = fs::read(rir_dir.join("ta.cer"))
.map_err(|e| format!("read ta from base bundle failed: {e}"))?;
let base_validation_time = load_validation_time(&rir_dir.join("base-locks.json"))?;
let target_validation_time = args.validation_time.unwrap_or_else(time::OffsetDateTime::now_utc);
let target_validation_time = args
.validation_time
.unwrap_or_else(time::OffsetDateTime::now_utc);
let target_store_dir = out_root.join(".tmp").join(format!("{rir}-live-target-db"));
let self_replay_dir = out_root.join(".tmp").join(format!("{rir}-self-delta-db"));
@ -221,8 +234,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
fs::create_dir_all(parent)
.map_err(|e| format!("create tmp dir failed: {}: {e}", parent.display()))?;
}
let target_store =
RocksStore::open(&target_store_dir).map_err(|e| format!("open target rocksdb failed: {e}"))?;
let target_store = RocksStore::open(&target_store_dir)
.map_err(|e| format!("open target rocksdb failed: {e}"))?;
let _base = run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
&target_store,
@ -282,17 +295,19 @@ fn run(args: Args) -> Result<PathBuf, String> {
)
.map_err(|e| format!("build delta ccr failed: {e}"))?;
let delta_ccr_path = rir_dir.join("delta.ccr");
write_ccr_file(&delta_ccr_path, &delta_ccr).map_err(|e| format!("write delta ccr failed: {e}"))?;
let delta_ccr_bytes =
fs::read(&delta_ccr_path).map_err(|e| format!("read delta ccr failed: {}: {e}", delta_ccr_path.display()))?;
let delta_decoded =
decode_content_info(&delta_ccr_bytes).map_err(|e| format!("decode delta ccr failed: {e}"))?;
write_ccr_file(&delta_ccr_path, &delta_ccr)
.map_err(|e| format!("write delta ccr failed: {e}"))?;
let delta_ccr_bytes = fs::read(&delta_ccr_path)
.map_err(|e| format!("read delta ccr failed: {}: {e}", delta_ccr_path.display()))?;
let delta_decoded = decode_content_info(&delta_ccr_bytes)
.map_err(|e| format!("decode delta ccr failed: {e}"))?;
let delta_verify =
verify_content_info(&delta_decoded).map_err(|e| format!("verify delta ccr failed: {e}"))?;
let delta_vrp_rows = build_vrp_compare_rows(&target_out.tree.vrps, &trust_anchor);
let delta_vap_rows = build_vap_compare_rows(&target_out.tree.aspas, &trust_anchor);
let (ccr_vrps, ccr_vaps) = rpki::bundle::decode_ccr_compare_views(&delta_decoded, &trust_anchor)?;
let (ccr_vrps, ccr_vaps) =
rpki::bundle::decode_ccr_compare_views(&delta_decoded, &trust_anchor)?;
if delta_vrp_rows != ccr_vrps {
return Err("record-delta.csv compare view does not match delta.ccr".to_string());
}
@ -312,8 +327,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
&rsync.snapshot_fetches(),
)?;
let self_store =
RocksStore::open(&self_replay_dir).map_err(|e| format!("open self replay db failed: {e}"))?;
let self_store = RocksStore::open(&self_replay_dir)
.map_err(|e| format!("open self replay db failed: {e}"))?;
let replay_out = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit(
&self_store,
&Policy::default(),
@ -355,7 +370,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
)?;
let mut bundle_json: serde_json::Value = serde_json::from_slice(
&fs::read(rir_dir.join("bundle.json")).map_err(|e| format!("read base bundle.json failed: {e}"))?,
&fs::read(rir_dir.join("bundle.json"))
.map_err(|e| format!("read base bundle.json failed: {e}"))?,
)
.map_err(|e| format!("parse base bundle.json failed: {e}"))?;
bundle_json["deltaValidationTime"] = serde_json::Value::String(

View File

@ -65,19 +65,27 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
}
"--out-dir" => {
i += 1;
args.out_dir = Some(PathBuf::from(argv.get(i).ok_or("--out-dir requires a value")?));
args.out_dir = Some(PathBuf::from(
argv.get(i).ok_or("--out-dir requires a value")?,
));
}
"--tal-path" => {
i += 1;
args.tal_path = Some(PathBuf::from(argv.get(i).ok_or("--tal-path requires a value")?));
args.tal_path = Some(PathBuf::from(
argv.get(i).ok_or("--tal-path requires a value")?,
));
}
"--ta-path" => {
i += 1;
args.ta_path = Some(PathBuf::from(argv.get(i).ok_or("--ta-path requires a value")?));
args.ta_path = Some(PathBuf::from(
argv.get(i).ok_or("--ta-path requires a value")?,
));
}
"--base-validation-time" => {
i += 1;
let value = argv.get(i).ok_or("--base-validation-time requires a value")?;
let value = argv
.get(i)
.ok_or("--base-validation-time requires a value")?;
args.base_validation_time = Some(
time::OffsetDateTime::parse(value, &Rfc3339)
.map_err(|e| format!("invalid --base-validation-time: {e}"))?,
@ -117,8 +125,9 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
}
"--rsync-mirror-root" => {
i += 1;
args.rsync_mirror_root =
Some(PathBuf::from(argv.get(i).ok_or("--rsync-mirror-root requires a value")?));
args.rsync_mirror_root = Some(PathBuf::from(
argv.get(i).ok_or("--rsync-mirror-root requires a value")?,
));
}
"--max-depth" => {
i += 1;
@ -140,7 +149,11 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
}
"--trust-anchor" => {
i += 1;
args.trust_anchor = Some(argv.get(i).ok_or("--trust-anchor requires a value")?.clone());
args.trust_anchor = Some(
argv.get(i)
.ok_or("--trust-anchor requires a value")?
.clone(),
);
}
"--keep-db" => args.keep_db = true,
"--capture-inputs-only" => args.capture_inputs_only = true,
@ -164,7 +177,12 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
Ok(args)
}
fn write_v2_top_readme(path: &Path, rir: &str, delta_count: usize, delta_interval_secs: u64) -> Result<(), String> {
fn write_v2_top_readme(
path: &Path,
rir: &str,
delta_count: usize,
delta_interval_secs: u64,
) -> Result<(), String> {
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)
.map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?;
@ -203,11 +221,19 @@ fn ensure_recorded_target_snapshots_for_locks(
previous_locks_path: &Path,
http: &RecordingHttpFetcher<BlockingHttpFetcher>,
) -> Result<(), String> {
let previous_locks: serde_json::Value = serde_json::from_slice(
&fs::read(previous_locks_path)
.map_err(|e| format!("read previous locks failed: {}: {e}", previous_locks_path.display()))?,
)
.map_err(|e| format!("parse previous locks failed: {}: {e}", previous_locks_path.display()))?;
let previous_locks: serde_json::Value =
serde_json::from_slice(&fs::read(previous_locks_path).map_err(|e| {
format!(
"read previous locks failed: {}: {e}",
previous_locks_path.display()
)
})?)
.map_err(|e| {
format!(
"parse previous locks failed: {}: {e}",
previous_locks_path.display()
)
})?;
let previous_rrdp = previous_locks
.get("rrdp")
.and_then(|v| v.as_object())
@ -251,9 +277,7 @@ fn ensure_recorded_target_snapshots_for_locks(
if let Err(err) = http.fetch(snapshot_uri) {
eprintln!(
"[sequence] warning: fetch target snapshot failed notify_uri={} snapshot_uri={} err={}",
notify_uri,
snapshot_uri,
err
notify_uri, snapshot_uri, err
);
}
}
@ -281,20 +305,30 @@ fn run(args: Args) -> Result<PathBuf, String> {
}
let rir_dir = out_root.join(&rir_normalized);
let delta_steps_root = rir_dir.join("delta-steps");
fs::create_dir_all(&delta_steps_root)
.map_err(|e| format!("create delta steps dir failed: {}: {e}", delta_steps_root.display()))?;
fs::create_dir_all(&delta_steps_root).map_err(|e| {
format!(
"create delta steps dir failed: {}: {e}",
delta_steps_root.display()
)
})?;
let tal_bytes = fs::read(args.tal_path.as_ref().unwrap())
.map_err(|e| format!("read tal failed: {e}"))?;
let ta_bytes = fs::read(args.ta_path.as_ref().unwrap())
.map_err(|e| format!("read ta failed: {e}"))?;
let tal_bytes =
fs::read(args.tal_path.as_ref().unwrap()).map_err(|e| format!("read tal failed: {e}"))?;
let ta_bytes =
fs::read(args.ta_path.as_ref().unwrap()).map_err(|e| format!("read ta failed: {e}"))?;
fs::write(rir_dir.join("tal.tal"), &tal_bytes).map_err(|e| format!("write tal failed: {e}"))?;
fs::write(rir_dir.join("ta.cer"), &ta_bytes).map_err(|e| format!("write ta failed: {e}"))?;
let base_validation_time = args.base_validation_time.unwrap_or_else(time::OffsetDateTime::now_utc);
let base_validation_time = args
.base_validation_time
.unwrap_or_else(time::OffsetDateTime::now_utc);
let work_db_dir = out_root.join(".tmp").join(format!("{rir}-sequence-work-db"));
let base_self_replay_dir = out_root.join(".tmp").join(format!("{rir}-sequence-base-self-replay-db"));
let work_db_dir = out_root
.join(".tmp")
.join(format!("{rir}-sequence-work-db"));
let base_self_replay_dir = out_root
.join(".tmp")
.join(format!("{rir}-sequence-base-self-replay-db"));
let _ = fs::remove_dir_all(&work_db_dir);
let _ = fs::remove_dir_all(&base_self_replay_dir);
if let Some(parent) = work_db_dir.parent() {
@ -356,9 +390,7 @@ fn run(args: Args) -> Result<PathBuf, String> {
)?;
eprintln!(
"[sequence] base input materialization done rir={} rrdp_repos={} rsync_modules={}",
rir_normalized,
base_capture.rrdp_repo_count,
base_capture.rsync_module_count
rir_normalized, base_capture.rrdp_repo_count, base_capture.rsync_module_count
);
let base_ccr_path = rir_dir.join("base.ccr");
let base_vrps_path = rir_dir.join("base-vrps.csv");
@ -402,8 +434,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
.map_err(|e| format!("read base ccr failed: {}: {e}", base_ccr_path.display()))?;
let base_decoded = decode_content_info(&base_ccr_bytes)
.map_err(|e| format!("decode base ccr failed: {e}"))?;
let base_verify =
verify_content_info(&base_decoded).map_err(|e| format!("verify base ccr failed: {e}"))?;
let base_verify = verify_content_info(&base_decoded)
.map_err(|e| format!("verify base ccr failed: {e}"))?;
let base_vrp_rows = build_vrp_compare_rows(&base_out.tree.vrps, &trust_anchor);
let base_vap_rows = build_vap_compare_rows(&base_out.tree.aspas, &trust_anchor);
let (base_ccr_vrps, base_ccr_vaps) =
@ -464,7 +496,9 @@ fn run(args: Args) -> Result<PathBuf, String> {
let mut delta_steps = Vec::new();
let mut previous_locks_path = rir_dir.join("base-locks.json");
let mut previous_ref = "base".to_string();
let sequence_self_replay_dir = out_root.join(".tmp").join(format!("{rir}-sequence-self-replay-db"));
let sequence_self_replay_dir = out_root
.join(".tmp")
.join(format!("{rir}-sequence-self-replay-db"));
let _ = fs::remove_dir_all(&sequence_self_replay_dir);
let sequence_replay_store = if args.capture_inputs_only {
None
@ -499,7 +533,10 @@ fn run(args: Args) -> Result<PathBuf, String> {
fs::create_dir_all(&step_dir)
.map_err(|e| format!("create step dir failed: {}: {e}", step_dir.display()))?;
let step_validation_time = time::OffsetDateTime::now_utc();
eprintln!("[sequence] step live run start rir={} step={}", rir_normalized, step_id);
eprintln!(
"[sequence] step live run start rir={} step={}",
rir_normalized, step_id
);
let step_http = RecordingHttpFetcher::new(
BlockingHttpFetcher::new(HttpFetcherConfig {
timeout: Duration::from_secs(args.http_timeout_secs),
@ -542,7 +579,10 @@ fn run(args: Args) -> Result<PathBuf, String> {
);
ensure_recorded_target_snapshots_for_locks(&store, &previous_locks_path, &step_http)?;
eprintln!("[sequence] step output generation phase start rir={} step={}", rir_normalized, step_id);
eprintln!(
"[sequence] step output generation phase start rir={} step={}",
rir_normalized, step_id
);
let delta_ccr_path = step_dir.join("delta.ccr");
let delta_vrps_path = step_dir.join("record-delta.csv");
let delta_vaps_path = step_dir.join("record-delta-vaps.csv");
@ -550,8 +590,7 @@ fn run(args: Args) -> Result<PathBuf, String> {
if args.capture_inputs_only {
eprintln!(
"[sequence] step CCR/self-replay skipped rir={} step={}",
rir_normalized,
step_id
rir_normalized, step_id
);
(
String::new(),
@ -573,7 +612,10 @@ fn run(args: Args) -> Result<PathBuf, String> {
false,
)
} else {
eprintln!("[sequence] step CCR/self-replay start rir={} step={}", rir_normalized, step_id);
eprintln!(
"[sequence] step CCR/self-replay start rir={} step={}",
rir_normalized, step_id
);
let delta_ccr = build_ccr_from_run(
&store,
&[step_out.discovery.trust_anchor.clone()],
@ -597,31 +639,39 @@ fn run(args: Args) -> Result<PathBuf, String> {
let (ccr_vrps, ccr_vaps) =
rpki::bundle::decode_ccr_compare_views(&delta_decoded, &trust_anchor)?;
if delta_vrp_rows != ccr_vrps {
return Err(format!("{step_id} VRP compare view does not match delta.ccr"));
return Err(format!(
"{step_id} VRP compare view does not match delta.ccr"
));
}
if delta_vap_rows != ccr_vaps {
return Err(format!("{step_id} VAP compare view does not match delta.ccr"));
return Err(format!(
"{step_id} VAP compare view does not match delta.ccr"
));
}
write_vrp_csv(&delta_vrps_path, &delta_vrp_rows)?;
write_vap_csv(&delta_vaps_path, &delta_vap_rows)?;
let step_replay_out = run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit(
sequence_replay_store.as_ref().expect("sequence replay store"),
&Policy::default(),
&tal_bytes,
&ta_bytes,
None,
&step_dir.join("payload-delta-archive"),
&previous_locks_path,
&step_dir.join("locks-delta.json"),
step_validation_time,
&TreeRunConfig {
max_depth: args.max_depth,
max_instances: args.max_instances,
},
)
.map_err(|e| format!("sequence self replay failed for {step_id}: {e}"))?;
let step_replay_out =
run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit(
sequence_replay_store
.as_ref()
.expect("sequence replay store"),
&Policy::default(),
&tal_bytes,
&ta_bytes,
None,
&step_dir.join("payload-delta-archive"),
&previous_locks_path,
&step_dir.join("locks-delta.json"),
step_validation_time,
&TreeRunConfig {
max_depth: args.max_depth,
max_instances: args.max_instances,
},
)
.map_err(|e| format!("sequence self replay failed for {step_id}: {e}"))?;
let step_self_replay_ok =
build_vrp_compare_rows(&step_replay_out.tree.vrps, &trust_anchor) == delta_vrp_rows
build_vrp_compare_rows(&step_replay_out.tree.vrps, &trust_anchor)
== delta_vrp_rows
&& build_vap_compare_rows(&step_replay_out.tree.aspas, &trust_anchor)
== delta_vap_rows;
let output = (
@ -631,11 +681,17 @@ fn run(args: Args) -> Result<PathBuf, String> {
delta_verify,
step_self_replay_ok,
);
eprintln!("[sequence] step CCR/self-replay done rir={} step={}", rir_normalized, step_id);
eprintln!(
"[sequence] step CCR/self-replay done rir={} step={}",
rir_normalized, step_id
);
output
};
eprintln!("[sequence] step input materialization start rir={} step={}", rir_normalized, step_id);
eprintln!(
"[sequence] step input materialization start rir={} step={}",
rir_normalized, step_id
);
let delta_capture = write_live_delta_replay_step_inputs(
&step_dir,
&rir_normalized,
@ -816,7 +872,12 @@ fn run(args: Args) -> Result<PathBuf, String> {
}
}),
)?;
write_v2_top_readme(&out_root.join("README.md"), &rir_normalized, args.delta_count, args.delta_interval_secs)?;
write_v2_top_readme(
&out_root.join("README.md"),
&rir_normalized,
args.delta_count,
args.delta_interval_secs,
)?;
write_v2_rir_readme(
&rir_dir.join("README.md"),
&rir_normalized,

View File

@ -48,35 +48,49 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
}
"--out-dir" => {
i += 1;
args.out_dir = Some(PathBuf::from(argv.get(i).ok_or("--out-dir requires a value")?));
args.out_dir = Some(PathBuf::from(
argv.get(i).ok_or("--out-dir requires a value")?,
));
}
"--tal-path" => {
i += 1;
args.tal_path = Some(PathBuf::from(argv.get(i).ok_or("--tal-path requires a value")?));
args.tal_path = Some(PathBuf::from(
argv.get(i).ok_or("--tal-path requires a value")?,
));
}
"--ta-path" => {
i += 1;
args.ta_path = Some(PathBuf::from(argv.get(i).ok_or("--ta-path requires a value")?));
args.ta_path = Some(PathBuf::from(
argv.get(i).ok_or("--ta-path requires a value")?,
));
}
"--payload-replay-archive" => {
i += 1;
args.payload_replay_archive =
Some(PathBuf::from(argv.get(i).ok_or("--payload-replay-archive requires a value")?));
args.payload_replay_archive = Some(PathBuf::from(
argv.get(i)
.ok_or("--payload-replay-archive requires a value")?,
));
}
"--payload-replay-locks" => {
i += 1;
args.payload_replay_locks =
Some(PathBuf::from(argv.get(i).ok_or("--payload-replay-locks requires a value")?));
args.payload_replay_locks = Some(PathBuf::from(
argv.get(i)
.ok_or("--payload-replay-locks requires a value")?,
));
}
"--payload-delta-archive" => {
i += 1;
args.payload_delta_archive =
Some(PathBuf::from(argv.get(i).ok_or("--payload-delta-archive requires a value")?));
args.payload_delta_archive = Some(PathBuf::from(
argv.get(i)
.ok_or("--payload-delta-archive requires a value")?,
));
}
"--payload-delta-locks" => {
i += 1;
args.payload_delta_locks =
Some(PathBuf::from(argv.get(i).ok_or("--payload-delta-locks requires a value")?));
args.payload_delta_locks = Some(PathBuf::from(
argv.get(i)
.ok_or("--payload-delta-locks requires a value")?,
));
}
"--validation-time" => {
i += 1;
@ -106,7 +120,11 @@ fn parse_args(argv: &[String]) -> Result<Args, String> {
}
"--trust-anchor" => {
i += 1;
args.trust_anchor = Some(argv.get(i).ok_or("--trust-anchor requires a value")?.clone());
args.trust_anchor = Some(
argv.get(i)
.ok_or("--trust-anchor requires a value")?
.clone(),
);
}
other => return Err(format!("unknown argument: {other}\n{}", usage())),
}
@ -155,7 +173,9 @@ fn sha256_hex(bytes: &[u8]) -> String {
fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> {
fs::create_dir_all(dst)
.map_err(|e| format!("create directory failed: {}: {e}", dst.display()))?;
for entry in fs::read_dir(src).map_err(|e| format!("read_dir failed: {}: {e}", src.display()))? {
for entry in
fs::read_dir(src).map_err(|e| format!("read_dir failed: {}: {e}", src.display()))?
{
let entry = entry.map_err(|e| format!("read_dir entry failed: {}: {e}", src.display()))?;
let ty = entry
.file_type()
@ -168,8 +188,13 @@ fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> {
fs::create_dir_all(parent)
.map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?;
}
fs::copy(entry.path(), &to)
.map_err(|e| format!("copy failed: {} -> {}: {e}", entry.path().display(), to.display()))?;
fs::copy(entry.path(), &to).map_err(|e| {
format!(
"copy failed: {} -> {}: {e}",
entry.path().display(),
to.display()
)
})?;
}
}
Ok(())
@ -222,7 +247,10 @@ fn write_timing_json(
)
}
fn rewrite_delta_base_locks_sha(delta_root: &Path, emitted_base_locks_sha256: &str) -> Result<(), String> {
fn rewrite_delta_base_locks_sha(
delta_root: &Path,
emitted_base_locks_sha256: &str,
) -> Result<(), String> {
let delta_locks = delta_root.join("locks-delta.json");
if delta_locks.is_file() {
let mut json: serde_json::Value = serde_json::from_slice(
@ -243,7 +271,8 @@ fn rewrite_delta_base_locks_sha(delta_root: &Path, emitted_base_locks_sha256: &s
if archive_root.is_dir() {
for path in walk_json_files_named(&archive_root, "base.json")? {
let mut json: serde_json::Value = serde_json::from_slice(
&fs::read(&path).map_err(|e| format!("read base.json failed: {}: {e}", path.display()))?,
&fs::read(&path)
.map_err(|e| format!("read base.json failed: {}: {e}", path.display()))?,
)
.map_err(|e| format!("parse base.json failed: {}: {e}", path.display()))?;
json.as_object_mut()
@ -265,8 +294,11 @@ fn walk_json_files_named(root: &Path, name: &str) -> Result<Vec<PathBuf>, String
}
let mut stack = vec![root.to_path_buf()];
while let Some(dir) = stack.pop() {
for entry in fs::read_dir(&dir).map_err(|e| format!("read_dir failed: {}: {e}", dir.display()))? {
let entry = entry.map_err(|e| format!("read_dir entry failed: {}: {e}", dir.display()))?;
for entry in
fs::read_dir(&dir).map_err(|e| format!("read_dir failed: {}: {e}", dir.display()))?
{
let entry =
entry.map_err(|e| format!("read_dir entry failed: {}: {e}", dir.display()))?;
let path = entry.path();
let ty = entry
.file_type()
@ -308,8 +340,10 @@ fn run(args: Args) -> Result<PathBuf, String> {
fs::create_dir_all(&rir_dir)
.map_err(|e| format!("create rir dir failed: {}: {e}", rir_dir.display()))?;
let tal_bytes = fs::read(tal_path).map_err(|e| format!("read tal failed: {}: {e}", tal_path.display()))?;
let ta_bytes = fs::read(ta_path).map_err(|e| format!("read ta failed: {}: {e}", ta_path.display()))?;
let tal_bytes =
fs::read(tal_path).map_err(|e| format!("read tal failed: {}: {e}", tal_path.display()))?;
let ta_bytes =
fs::read(ta_path).map_err(|e| format!("read ta failed: {}: {e}", ta_path.display()))?;
let db_dir = run_root.join(".tmp").join(format!("{rir}-base-db"));
if db_dir.exists() {
@ -354,7 +388,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
write_ccr_file(&base_ccr_path, &ccr).map_err(|e| format!("write ccr failed: {e}"))?;
let ccr_bytes = fs::read(&base_ccr_path)
.map_err(|e| format!("read written ccr failed: {}: {e}", base_ccr_path.display()))?;
let decoded = decode_content_info(&ccr_bytes).map_err(|e| format!("decode written ccr failed: {e}"))?;
let decoded =
decode_content_info(&ccr_bytes).map_err(|e| format!("decode written ccr failed: {e}"))?;
let verify = verify_content_info(&decoded).map_err(|e| format!("verify ccr failed: {e}"))?;
let vrp_rows = build_vrp_compare_rows(&out.tree.vrps, &trust_anchor);
@ -385,10 +420,13 @@ fn run(args: Args) -> Result<PathBuf, String> {
);
let emitted_base_locks_path = rir_dir.join("base-locks.json");
write_json(&emitted_base_locks_path, &base_locks_json)?;
let emitted_base_locks_sha256 = sha256_hex(
&fs::read(&emitted_base_locks_path)
.map_err(|e| format!("read emitted base locks failed: {}: {e}", emitted_base_locks_path.display()))?,
);
let emitted_base_locks_sha256 =
sha256_hex(&fs::read(&emitted_base_locks_path).map_err(|e| {
format!(
"read emitted base locks failed: {}: {e}",
emitted_base_locks_path.display()
)
})?);
if let Some(delta_archive) = args.payload_delta_archive.as_ref() {
copy_dir_all(delta_archive, &rir_dir.join("payload-delta-archive"))?;
@ -418,10 +456,8 @@ fn run(args: Args) -> Result<PathBuf, String> {
rewrite_delta_base_locks_sha(&rir_dir, &emitted_base_locks_sha256)?;
}
fs::write(rir_dir.join("tal.tal"), &tal_bytes)
.map_err(|e| format!("write tal failed: {e}"))?;
fs::write(rir_dir.join("ta.cer"), &ta_bytes)
.map_err(|e| format!("write ta failed: {e}"))?;
fs::write(rir_dir.join("tal.tal"), &tal_bytes).map_err(|e| format!("write tal failed: {e}"))?;
fs::write(rir_dir.join("ta.cer"), &ta_bytes).map_err(|e| format!("write ta failed: {e}"))?;
let mut metadata = RirBundleMetadata {
schema_version: "20260330-v1".to_string(),
@ -484,15 +520,19 @@ fn run(args: Args) -> Result<PathBuf, String> {
) {
let delta_db_dir = run_root.join(".tmp").join(format!("{rir}-delta-db"));
if delta_db_dir.exists() {
fs::remove_dir_all(&delta_db_dir)
.map_err(|e| format!("remove old delta db failed: {}: {e}", delta_db_dir.display()))?;
fs::remove_dir_all(&delta_db_dir).map_err(|e| {
format!(
"remove old delta db failed: {}: {e}",
delta_db_dir.display()
)
})?;
}
if let Some(parent) = delta_db_dir.parent() {
fs::create_dir_all(parent)
.map_err(|e| format!("create delta db parent failed: {}: {e}", parent.display()))?;
}
let delta_store =
RocksStore::open(&delta_db_dir).map_err(|e| format!("open delta rocksdb failed: {e}"))?;
let delta_store = RocksStore::open(&delta_db_dir)
.map_err(|e| format!("open delta rocksdb failed: {e}"))?;
let delta_started = Instant::now();
let delta_out = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit(
&delta_store,
@ -527,16 +567,21 @@ fn run(args: Args) -> Result<PathBuf, String> {
let delta_ccr_path = rir_dir.join("delta.ccr");
write_ccr_file(&delta_ccr_path, &delta_ccr)
.map_err(|e| format!("write delta ccr failed: {e}"))?;
let delta_ccr_bytes = fs::read(&delta_ccr_path)
.map_err(|e| format!("read written delta ccr failed: {}: {e}", delta_ccr_path.display()))?;
let delta_ccr_bytes = fs::read(&delta_ccr_path).map_err(|e| {
format!(
"read written delta ccr failed: {}: {e}",
delta_ccr_path.display()
)
})?;
let delta_decoded = decode_content_info(&delta_ccr_bytes)
.map_err(|e| format!("decode written delta ccr failed: {e}"))?;
let delta_verify =
verify_content_info(&delta_decoded).map_err(|e| format!("verify delta ccr failed: {e}"))?;
let delta_verify = verify_content_info(&delta_decoded)
.map_err(|e| format!("verify delta ccr failed: {e}"))?;
let delta_vrp_rows = build_vrp_compare_rows(&delta_out.tree.vrps, &trust_anchor);
let delta_vap_rows = build_vap_compare_rows(&delta_out.tree.aspas, &trust_anchor);
let (delta_ccr_vrps, delta_ccr_vaps) = decode_ccr_compare_views(&delta_decoded, &trust_anchor)?;
let (delta_ccr_vrps, delta_ccr_vaps) =
decode_ccr_compare_views(&delta_decoded, &trust_anchor)?;
if delta_vrp_rows != delta_ccr_vrps {
return Err("record-delta.csv compare view does not match delta.ccr".to_string());
}
@ -585,7 +630,11 @@ fn run(args: Args) -> Result<PathBuf, String> {
write_json(&rir_dir.join("bundle.json"), &metadata)?;
write_json(&rir_dir.join("verification.json"), &verification)?;
write_top_readme(&run_root.join("README.md"), rir)?;
write_rir_readme(&rir_dir.join("README.md"), rir, &metadata.base_validation_time)?;
write_rir_readme(
&rir_dir.join("README.md"),
rir,
&metadata.base_validation_time,
)?;
let bundle_manifest = BundleManifest {
schema_version: "20260330-v1".to_string(),
@ -654,11 +703,8 @@ mod tests {
fn load_validation_time_reads_top_level_validation_time() {
let dir = tempdir().expect("tempdir");
let path = dir.path().join("locks.json");
std::fs::write(
&path,
r#"{"validationTime":"2026-03-16T11:49:15+08:00"}"#,
)
.expect("write locks");
std::fs::write(&path, r#"{"validationTime":"2026-03-16T11:49:15+08:00"}"#)
.expect("write locks");
let got = load_validation_time(&path).expect("load validation time");
assert_eq!(
got.format(&Rfc3339).expect("format"),
@ -722,7 +768,12 @@ mod tests {
assert!(out_dir.join("apnic").join("base-vaps.csv").is_file());
assert!(out_dir.join("apnic").join("delta.ccr").is_file());
assert!(out_dir.join("apnic").join("record-delta.csv").is_file());
assert!(out_dir.join("apnic").join("record-delta-vaps.csv").is_file());
assert!(
out_dir
.join("apnic")
.join("record-delta-vaps.csv")
.is_file()
);
assert!(out_dir.join("apnic").join("verification.json").is_file());
let bundle_json: serde_json::Value = serde_json::from_slice(
&std::fs::read(out_dir.join("apnic").join("bundle.json")).expect("read bundle.json"),
@ -734,11 +785,12 @@ mod tests {
assert!(bundle_json.get("baseCcrSha256").is_some());
assert!(bundle_json.get("deltaVrpCount").is_some());
assert!(bundle_json.get("deltaCcrSha256").is_some());
let base_locks_bytes =
std::fs::read(out_dir.join("apnic").join("base-locks.json")).expect("read emitted base locks");
let base_locks_bytes = std::fs::read(out_dir.join("apnic").join("base-locks.json"))
.expect("read emitted base locks");
let expected_base_locks_sha = sha256_hex(&base_locks_bytes);
let delta_locks_json: serde_json::Value = serde_json::from_slice(
&std::fs::read(out_dir.join("apnic").join("locks-delta.json")).expect("read delta locks"),
&std::fs::read(out_dir.join("apnic").join("locks-delta.json"))
.expect("read delta locks"),
)
.expect("parse delta locks");
assert_eq!(delta_locks_json["baseLocksSha256"], expected_base_locks_sha);

View File

@ -177,16 +177,24 @@ fn materialize_rsync_module_from_store(
.strip_prefix("rsync://")
.ok_or_else(|| format!("invalid rsync module uri: {module_uri}"))?
.trim_end_matches('/');
fs::create_dir_all(tree_root.join(relative_root))
.map_err(|e| format!("create rsync tree root failed: {}: {e}", tree_root.join(relative_root).display()))?;
fs::create_dir_all(tree_root.join(relative_root)).map_err(|e| {
format!(
"create rsync tree root failed: {}: {e}",
tree_root.join(relative_root).display()
)
})?;
for (uri, bytes) in objects {
let rel = uri
.strip_prefix(module_uri)
.ok_or_else(|| format!("object uri {uri} does not belong to module {module_uri}"))?;
let path = tree_root.join(relative_root).join(rel);
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)
.map_err(|e| format!("create rsync object parent failed: {}: {e}", parent.display()))?;
fs::create_dir_all(parent).map_err(|e| {
format!(
"create rsync object parent failed: {}: {e}",
parent.display()
)
})?;
}
fs::write(&path, bytes)
.map_err(|e| format!("write rsync object failed: {}: {e}", path.display()))?;
@ -197,8 +205,8 @@ fn materialize_rsync_module_from_store(
fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> {
fs::create_dir_all(dst)
.map_err(|e| format!("create directory failed: {}: {e}", dst.display()))?;
for entry in fs::read_dir(src)
.map_err(|e| format!("read directory failed: {}: {e}", src.display()))?
for entry in
fs::read_dir(src).map_err(|e| format!("read directory failed: {}: {e}", src.display()))?
{
let entry = entry.map_err(|e| format!("read entry failed: {}: {e}", src.display()))?;
let file_type = entry
@ -252,8 +260,12 @@ fn keep_rsync_module(pp: &rpki::audit::PublicationPointAudit) -> Result<Option<S
if is_failed_fetch_source(&pp.source) {
return Ok(None);
}
let module_uri = canonical_rsync_module(&pp.rsync_base_uri)
.map_err(|e| format!("canonicalize rsync module failed for {}: {e}", pp.rsync_base_uri))?;
let module_uri = canonical_rsync_module(&pp.rsync_base_uri).map_err(|e| {
format!(
"canonicalize rsync module failed for {}: {e}",
pp.rsync_base_uri
)
})?;
if pp.rrdp_notification_uri.is_none() || pp.repo_sync_source.as_deref() == Some("rsync") {
return Ok(Some(module_uri));
}
@ -317,8 +329,9 @@ fn repair_base_inputs(
for entry in fs::read_dir(&repos_dir)
.map_err(|e| format!("scan rrdp repo dir failed: {}: {e}", repos_dir.display()))?
{
let entry = entry
.map_err(|e| format!("read rrdp repo entry failed: {}: {e}", repos_dir.display()))?;
let entry = entry.map_err(|e| {
format!("read rrdp repo entry failed: {}: {e}", repos_dir.display())
})?;
let meta = entry.path().join("meta.json");
if !meta.exists() {
continue;
@ -385,13 +398,13 @@ fn repair_base_inputs(
}
write_json_value(locks_path, &locks)?;
verification.base["capture"]["rrdpRepoCount"] = serde_json::Value::from(
locks.get("rrdp")
locks
.get("rrdp")
.and_then(|v| v.as_object())
.map(|m| m.len())
.unwrap_or(0),
);
verification.base["capture"]["rsyncModuleCount"] =
serde_json::Value::from(final_modules.len());
verification.base["capture"]["rsyncModuleCount"] = serde_json::Value::from(final_modules.len());
Ok(())
}
@ -502,8 +515,9 @@ fn repair_delta_step_inputs(
for entry in fs::read_dir(&repos_dir)
.map_err(|e| format!("scan rrdp repo dir failed: {}: {e}", repos_dir.display()))?
{
let entry = entry
.map_err(|e| format!("read rrdp repo entry failed: {}: {e}", repos_dir.display()))?;
let entry = entry.map_err(|e| {
format!("read rrdp repo entry failed: {}: {e}", repos_dir.display())
})?;
let meta = entry.path().join("meta.json");
if !meta.exists() {
continue;
@ -634,7 +648,8 @@ fn repair_delta_step_inputs(
.and_then(|v| v.get("session"))
.and_then(|v| v.as_str())
.or_else(|| {
entry.get("base")
entry
.get("base")
.and_then(|v| v.get("session"))
.and_then(|v| v.as_str())
});
@ -645,7 +660,10 @@ fn repair_delta_step_inputs(
if session_dir.exists() {
continue;
}
let base_bucket_dir = base_capture_root.join("rrdp").join("repos").join(&bucket_hash);
let base_bucket_dir = base_capture_root
.join("rrdp")
.join("repos")
.join(&bucket_hash);
let base_session_dir = base_bucket_dir.join(session);
if !base_session_dir.exists() {
continue;
@ -673,13 +691,13 @@ fn repair_delta_step_inputs(
write_json_value(&locks_path, &locks)?;
step_verification["capture"]["rrdpRepoCount"] = serde_json::Value::from(
locks.get("rrdp")
locks
.get("rrdp")
.and_then(|v| v.as_object())
.map(|m| m.len())
.unwrap_or(0),
);
step_verification["capture"]["rsyncModuleCount"] =
serde_json::Value::from(final_modules.len());
step_verification["capture"]["rsyncModuleCount"] = serde_json::Value::from(final_modules.len());
Ok(())
}
@ -694,7 +712,12 @@ fn rewrite_delta_base_hash(step_dir: &Path, previous_locks_path: &Path) -> Resul
let locks_path = step_dir.join("locks-delta.json");
let mut locks = load_json(&locks_path)?;
let previous_locks = serde_json::from_slice::<serde_json::Value>(&previous_locks_bytes)
.map_err(|e| format!("parse previous locks failed: {}: {e}", previous_locks_path.display()))?;
.map_err(|e| {
format!(
"parse previous locks failed: {}: {e}",
previous_locks_path.display()
)
})?;
locks["baseLocksSha256"] = serde_json::Value::String(previous_locks_sha256.clone());
let capture = locks
.get("capture")
@ -760,17 +783,33 @@ fn real_main() -> Result<(), String> {
let bundle_json_path = rir_dir.join("bundle.json");
let verification_path = rir_dir.join("verification.json");
let mut bundle: RirBundleMetadataV2Serde = serde_json::from_slice(
&fs::read(&bundle_json_path)
.map_err(|e| format!("read bundle.json failed: {}: {e}", bundle_json_path.display()))?,
)
.map_err(|e| format!("parse bundle.json failed: {}: {e}", bundle_json_path.display()))?;
let mut bundle: RirBundleMetadataV2Serde =
serde_json::from_slice(&fs::read(&bundle_json_path).map_err(|e| {
format!(
"read bundle.json failed: {}: {e}",
bundle_json_path.display()
)
})?)
.map_err(|e| {
format!(
"parse bundle.json failed: {}: {e}",
bundle_json_path.display()
)
})?;
let mut verification: VerificationV2 = serde_json::from_slice(
&fs::read(&verification_path)
.map_err(|e| format!("read verification.json failed: {}: {e}", verification_path.display()))?,
)
.map_err(|e| format!("parse verification.json failed: {}: {e}", verification_path.display()))?;
let mut verification: VerificationV2 =
serde_json::from_slice(&fs::read(&verification_path).map_err(|e| {
format!(
"read verification.json failed: {}: {e}",
verification_path.display()
)
})?)
.map_err(|e| {
format!(
"parse verification.json failed: {}: {e}",
verification_path.display()
)
})?;
let tal_bytes = fs::read(rir_dir.join("tal.tal"))
.map_err(|e| format!("read tal.tal failed: {}: {e}", rir_dir.display()))?;
@ -787,7 +826,8 @@ fn real_main() -> Result<(), String> {
fs::create_dir_all(parent)
.map_err(|e| format!("create refresh db parent failed: {}: {e}", parent.display()))?;
}
let store = RocksStore::open(&work_db).map_err(|e| format!("open refresh rocksdb failed: {e}"))?;
let store =
RocksStore::open(&work_db).map_err(|e| format!("open refresh rocksdb failed: {e}"))?;
let base_archive = path_join(&rir_dir, &bundle.base.relative_archive_path);
let base_locks = path_join(&rir_dir, &bundle.base.relative_locks_path);
@ -823,8 +863,8 @@ fn real_main() -> Result<(), String> {
.map_err(|e| format!("build base ccr failed: {e}"))?;
write_ccr_file(&base_ccr, &base_ccr_content)
.map_err(|e| format!("write base ccr failed: {}: {e}", base_ccr.display()))?;
let base_ccr_bytes =
fs::read(&base_ccr).map_err(|e| format!("read base ccr failed: {}: {e}", base_ccr.display()))?;
let base_ccr_bytes = fs::read(&base_ccr)
.map_err(|e| format!("read base ccr failed: {}: {e}", base_ccr.display()))?;
let base_decoded =
decode_content_info(&base_ccr_bytes).map_err(|e| format!("decode base ccr failed: {e}"))?;
let base_verify =
@ -838,15 +878,18 @@ fn real_main() -> Result<(), String> {
bundle.base.vap_count = base_vap_rows.len();
verification.base["ccr"]["sha256"] = serde_json::Value::String(bundle.base.ccr_sha256.clone());
verification.base["ccr"]["stateHashesOk"] = serde_json::Value::Bool(base_verify.state_hashes_ok);
verification.base["ccr"]["stateHashesOk"] =
serde_json::Value::Bool(base_verify.state_hashes_ok);
verification.base["ccr"]["manifestInstances"] =
serde_json::Value::from(base_verify.manifest_instances);
verification.base["ccr"]["roaVrpCount"] = serde_json::Value::from(base_vrp_rows.len());
verification.base["ccr"]["aspaPayloadSets"] = serde_json::Value::from(base_vap_rows.len());
verification.base["ccr"]["routerKeyCount"] =
serde_json::Value::from(base_verify.router_key_count);
verification.base["compareViews"]["baseVrpCount"] = serde_json::Value::from(base_vrp_rows.len());
verification.base["compareViews"]["baseVapCount"] = serde_json::Value::from(base_vap_rows.len());
verification.base["compareViews"]["baseVrpCount"] =
serde_json::Value::from(base_vrp_rows.len());
verification.base["compareViews"]["baseVapCount"] =
serde_json::Value::from(base_vap_rows.len());
verification.base["capture"]["selfReplayOk"] = serde_json::Value::Bool(true);
repair_base_inputs(
&base_archive,
@ -922,7 +965,8 @@ fn real_main() -> Result<(), String> {
step_verification["ccr"]["manifestInstances"] =
serde_json::Value::from(delta_verify.manifest_instances);
step_verification["ccr"]["roaVrpCount"] = serde_json::Value::from(delta_vrp_rows.len());
step_verification["ccr"]["aspaPayloadSets"] = serde_json::Value::from(delta_vap_rows.len());
step_verification["ccr"]["aspaPayloadSets"] =
serde_json::Value::from(delta_vap_rows.len());
step_verification["ccr"]["routerKeyCount"] =
serde_json::Value::from(delta_verify.router_key_count);
step_verification["compareViews"]["vrpCount"] =
@ -931,28 +975,29 @@ fn real_main() -> Result<(), String> {
serde_json::Value::from(delta_vap_rows.len());
step_verification["selfReplayOk"] = serde_json::Value::Bool(true);
}
let step_verification_path = path_join(&rir_dir, &step.relative_path).join("verification.json");
let mut step_verification_json: serde_json::Value = serde_json::from_slice(
&fs::read(&step_verification_path).map_err(|e| {
let step_verification_path =
path_join(&rir_dir, &step.relative_path).join("verification.json");
let mut step_verification_json: serde_json::Value =
serde_json::from_slice(&fs::read(&step_verification_path).map_err(|e| {
format!(
"read step verification failed: {}: {e}",
step_verification_path.display()
)
})?,
)
.map_err(|e| {
format!(
"parse step verification failed: {}: {e}",
step_verification_path.display()
)
})?;
})?)
.map_err(|e| {
format!(
"parse step verification failed: {}: {e}",
step_verification_path.display()
)
})?;
step_verification_json["ccr"]["sha256"] =
serde_json::Value::String(step.delta_ccr_sha256.clone());
step_verification_json["ccr"]["stateHashesOk"] =
serde_json::Value::Bool(delta_verify.state_hashes_ok);
step_verification_json["ccr"]["manifestInstances"] =
serde_json::Value::from(delta_verify.manifest_instances);
step_verification_json["ccr"]["roaVrpCount"] = serde_json::Value::from(delta_vrp_rows.len());
step_verification_json["ccr"]["roaVrpCount"] =
serde_json::Value::from(delta_vrp_rows.len());
step_verification_json["ccr"]["aspaPayloadSets"] =
serde_json::Value::from(delta_vap_rows.len());
step_verification_json["ccr"]["routerKeyCount"] =

View File

@ -1,5 +1,5 @@
use rocksdb::{DB, IteratorMode, Options};
use rpki::storage::{column_family_descriptors, CF_REPOSITORY_VIEW};
use rpki::storage::{CF_REPOSITORY_VIEW, column_family_descriptors};
use std::fs;
use std::path::{Path, PathBuf};

View File

@ -64,12 +64,9 @@ fn main() {
println!("rule_hash={}", output.rule_hash);
println!("validation_path_hint={:?}", output.validation_path_hint);
if let Some(trace) = trace_rule_to_root(
&store,
AuditRuleKind::Roa,
&output.rule_hash,
)
.expect("trace rule")
if let Some(trace) =
trace_rule_to_root(&store, AuditRuleKind::Roa, &output.rule_hash)
.expect("trace rule")
{
println!(
"trace_leaf_manifest={}",
@ -79,11 +76,17 @@ fn main() {
.map(|node| node.manifest_rsync_uri.as_str())
.unwrap_or("")
);
println!("trace_source_object_uri={}", trace.resolved_output.source_object_uri);
println!(
"trace_source_object_uri={}",
trace.resolved_output.source_object_uri
);
println!("trace_chain_len={}", trace.chain_leaf_to_root.len());
for (idx, node) in trace.chain_leaf_to_root.iter().enumerate() {
println!("chain[{idx}].manifest={}", node.manifest_rsync_uri);
println!("chain[{idx}].current_manifest={}", node.current_manifest_rsync_uri);
println!(
"chain[{idx}].current_manifest={}",
node.current_manifest_rsync_uri
);
println!("chain[{idx}].current_crl={}", node.current_crl_rsync_uri);
}
}

323
src/blob_store.rs Normal file
View File

@ -0,0 +1,323 @@
use std::path::PathBuf;
use std::sync::Arc;
use rocksdb::{DB, Options, WriteBatch};
use crate::storage::{RawByHashEntry, RocksStore, StorageError, StorageResult};
const RAW_BY_HASH_KEY_PREFIX: &str = "rawbyhash:";
fn raw_by_hash_key(sha256_hex: &str) -> String {
format!("{RAW_BY_HASH_KEY_PREFIX}{sha256_hex}")
}
pub trait RawObjectStore {
fn get_raw_entry(&self, sha256_hex: &str) -> StorageResult<Option<RawByHashEntry>>;
fn get_raw_entries_batch(
&self,
sha256_hexes: &[String],
) -> StorageResult<Vec<Option<RawByHashEntry>>>;
fn get_blob_bytes(&self, sha256_hex: &str) -> StorageResult<Option<Vec<u8>>> {
self.get_raw_entry(sha256_hex)
.map(|entry| entry.map(|entry| entry.bytes))
}
fn get_blob_bytes_batch(
&self,
sha256_hexes: &[String],
) -> StorageResult<Vec<Option<Vec<u8>>>> {
self.get_raw_entries_batch(sha256_hexes).map(|entries| {
entries
.into_iter()
.map(|entry| entry.map(|entry| entry.bytes))
.collect()
})
}
}
#[derive(Clone, Debug)]
pub struct ExternalRawStoreDb {
path: PathBuf,
db: Arc<DB>,
}
impl ExternalRawStoreDb {
pub fn open(path: impl Into<PathBuf>) -> StorageResult<Self> {
let path = path.into();
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent).map_err(|e| StorageError::RocksDb(e.to_string()))?;
}
let mut opts = Options::default();
opts.create_if_missing(true);
opts.set_compression_type(rocksdb::DBCompressionType::Lz4);
let db = DB::open(&opts, &path).map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(Self {
path,
db: Arc::new(db),
})
}
pub fn put_raw_entry(&self, entry: &RawByHashEntry) -> StorageResult<()> {
entry.validate_internal()?;
let key = raw_by_hash_key(&entry.sha256_hex);
let value =
serde_cbor::to_vec(entry).map_err(|e| StorageError::Codec { entity: "raw_by_hash", detail: e.to_string() })?;
self.db
.put(key.as_bytes(), value)
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(())
}
pub fn put_raw_entries_batch(&self, entries: &[RawByHashEntry]) -> StorageResult<()> {
if entries.is_empty() {
return Ok(());
}
let mut batch = WriteBatch::default();
for entry in entries {
entry.validate_internal()?;
let key = raw_by_hash_key(&entry.sha256_hex);
let value = serde_cbor::to_vec(entry).map_err(|e| StorageError::Codec {
entity: "raw_by_hash",
detail: e.to_string(),
})?;
batch.put(key.as_bytes(), value);
}
self.db
.write(batch)
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(())
}
pub fn delete_raw_entry(&self, sha256_hex: &str) -> StorageResult<()> {
let key = raw_by_hash_key(sha256_hex);
self.db
.delete(key.as_bytes())
.map_err(|e| StorageError::RocksDb(e.to_string()))
}
pub fn path(&self) -> &PathBuf {
&self.path
}
}
impl RawObjectStore for RocksStore {
fn get_raw_entry(&self, sha256_hex: &str) -> StorageResult<Option<RawByHashEntry>> {
self.get_raw_by_hash_entry(sha256_hex)
}
fn get_raw_entries_batch(
&self,
sha256_hexes: &[String],
) -> StorageResult<Vec<Option<RawByHashEntry>>> {
self.get_raw_by_hash_entries_batch(sha256_hexes)
}
}
impl RawObjectStore for ExternalRawStoreDb {
fn get_raw_entry(&self, sha256_hex: &str) -> StorageResult<Option<RawByHashEntry>> {
let key = raw_by_hash_key(sha256_hex);
let Some(bytes) = self
.db
.get(key.as_bytes())
.map_err(|e| StorageError::RocksDb(e.to_string()))?
else {
return Ok(None);
};
let entry = serde_cbor::from_slice::<RawByHashEntry>(&bytes).map_err(|e| StorageError::Codec {
entity: "raw_by_hash",
detail: e.to_string(),
})?;
entry.validate_internal()?;
Ok(Some(entry))
}
fn get_raw_entries_batch(
&self,
sha256_hexes: &[String],
) -> StorageResult<Vec<Option<RawByHashEntry>>> {
if sha256_hexes.is_empty() {
return Ok(Vec::new());
}
let keys: Vec<String> = sha256_hexes.iter().map(|hash| raw_by_hash_key(hash)).collect();
self.db
.multi_get(keys.iter().map(|key| key.as_bytes()))
.into_iter()
.map(|res| {
let maybe = res.map_err(|e| StorageError::RocksDb(e.to_string()))?;
match maybe {
Some(bytes) => {
let entry = serde_cbor::from_slice::<RawByHashEntry>(&bytes).map_err(|e| {
StorageError::Codec {
entity: "raw_by_hash",
detail: e.to_string(),
}
})?;
entry.validate_internal()?;
Ok(Some(entry))
}
None => Ok(None),
}
})
.collect()
}
}
#[cfg(test)]
mod tests {
use super::{ExternalRawStoreDb, RawObjectStore};
use crate::storage::{RawByHashEntry, RocksStore, StorageError};
fn sha256_hex(bytes: &[u8]) -> String {
use sha2::{Digest, Sha256};
hex::encode(Sha256::digest(bytes))
}
#[test]
fn rocks_store_raw_object_store_reads_single_and_batch_entries() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(td.path()).expect("open rocksdb");
let a = b"object-a".to_vec();
let b = b"object-b".to_vec();
let a_hash = sha256_hex(&a);
let b_hash = sha256_hex(&b);
store
.put_raw_by_hash_entry(&RawByHashEntry::from_bytes(a_hash.clone(), a.clone()))
.expect("put a");
store
.put_raw_by_hash_entry(&RawByHashEntry::from_bytes(b_hash.clone(), b.clone()))
.expect("put b");
let single = store
.get_raw_entry(&a_hash)
.expect("get single")
.expect("present");
assert_eq!(single.bytes, a);
let batch = store
.get_raw_entries_batch(&[a_hash.clone(), "00".repeat(32), b_hash.clone()])
.expect("get batch");
assert_eq!(batch.len(), 3);
assert_eq!(batch[0].as_ref().map(|entry| entry.bytes.as_slice()), Some(a.as_slice()));
assert!(batch[1].is_none());
assert_eq!(batch[2].as_ref().map(|entry| entry.bytes.as_slice()), Some(b.as_slice()));
}
#[test]
fn external_raw_store_db_roundtrips_entries() {
let td = tempfile::tempdir().expect("tempdir");
let raw_store = ExternalRawStoreDb::open(td.path().join("raw-store.db")).expect("open raw store");
let mut entry = RawByHashEntry::from_bytes(sha256_hex(b"blob"), b"blob".to_vec());
entry.origin_uris.push("rsync://example.test/repo/a.cer".to_string());
entry.object_type = Some("cer".to_string());
raw_store.put_raw_entry(&entry).expect("put raw entry");
let got = raw_store
.get_raw_entry(&entry.sha256_hex)
.expect("read raw entry")
.expect("entry exists");
assert_eq!(got, entry);
}
#[test]
fn external_raw_store_db_batch_writes_and_reads() {
let td = tempfile::tempdir().expect("tempdir");
let raw_store = ExternalRawStoreDb::open(td.path().join("raw-store.db")).expect("open raw store");
let a = RawByHashEntry::from_bytes(sha256_hex(b"a"), b"a".to_vec());
let b = RawByHashEntry::from_bytes(sha256_hex(b"b"), b"b".to_vec());
raw_store
.put_raw_entries_batch(&[a.clone(), b.clone()])
.expect("batch put");
let batch = raw_store
.get_raw_entries_batch(&[a.sha256_hex.clone(), b.sha256_hex.clone()])
.expect("batch get");
assert_eq!(batch.len(), 2);
assert_eq!(batch[0], Some(a));
assert_eq!(batch[1], Some(b));
}
#[test]
fn raw_object_store_default_blob_helpers_return_bytes_only() {
let td = tempfile::tempdir().expect("tempdir");
let raw_store = ExternalRawStoreDb::open(td.path().join("nested/raw-store.db"))
.expect("open raw store");
let mut entry = RawByHashEntry::from_bytes(sha256_hex(b"blob"), b"blob".to_vec());
entry.origin_uris.push("rsync://example.test/repo/blob.roa".to_string());
raw_store.put_raw_entry(&entry).expect("put raw entry");
let single = raw_store
.get_blob_bytes(&entry.sha256_hex)
.expect("get blob bytes")
.expect("entry exists");
assert_eq!(single, b"blob".to_vec());
let batch = raw_store
.get_blob_bytes_batch(&[entry.sha256_hex.clone(), "00".repeat(32)])
.expect("get blob bytes batch");
assert_eq!(batch, vec![Some(b"blob".to_vec()), None]);
}
#[test]
fn external_raw_store_db_delete_removes_entry() {
let td = tempfile::tempdir().expect("tempdir");
let raw_store = ExternalRawStoreDb::open(td.path().join("raw-store.db")).expect("open raw store");
let entry = RawByHashEntry::from_bytes(sha256_hex(b"gone"), b"gone".to_vec());
raw_store.put_raw_entry(&entry).expect("put");
assert!(raw_store.get_raw_entry(&entry.sha256_hex).unwrap().is_some());
raw_store
.delete_raw_entry(&entry.sha256_hex)
.expect("delete entry");
assert!(raw_store.get_raw_entry(&entry.sha256_hex).unwrap().is_none());
}
#[test]
fn external_raw_store_db_rejects_invalid_entry_on_put() {
let td = tempfile::tempdir().expect("tempdir");
let raw_store = ExternalRawStoreDb::open(td.path().join("raw-store.db")).expect("open raw store");
let bad = RawByHashEntry {
sha256_hex: "11".repeat(32),
bytes: b"blob".to_vec(),
origin_uris: Vec::new(),
object_type: None,
encoding: None,
};
let err = raw_store.put_raw_entry(&bad).expect_err("invalid hash should fail");
assert!(matches!(err, StorageError::InvalidData { .. }));
}
#[test]
fn external_raw_store_db_reports_codec_error_for_corrupt_value() {
let td = tempfile::tempdir().expect("tempdir");
let raw_store = ExternalRawStoreDb::open(td.path().join("raw-store.db")).expect("open raw store");
raw_store
.db
.put(b"rawbyhash:deadbeef", b"not-cbor")
.expect("inject corrupt bytes");
let err = raw_store
.get_raw_entry("deadbeef")
.expect_err("corrupt value should fail");
assert!(matches!(err, StorageError::Codec { entity: "raw_by_hash", .. }));
}
#[test]
fn external_raw_store_db_batch_returns_empty_for_empty_request() {
let td = tempfile::tempdir().expect("tempdir");
let raw_store = ExternalRawStoreDb::open(td.path().join("raw-store.db")).expect("open raw store");
let entries = raw_store
.get_raw_entries_batch(&[])
.expect("empty batch succeeds");
assert!(entries.is_empty());
raw_store.put_raw_entries_batch(&[]).expect("empty put succeeds");
}
}

View File

@ -65,13 +65,10 @@ pub fn build_vap_compare_rows(
aspas: &[AspaAttestation],
trust_anchor: &str,
) -> BTreeSet<VapCompareRow> {
aspas.iter()
aspas
.iter()
.map(|aspa| {
let mut providers = aspa
.provider_as_ids
.iter()
.copied()
.collect::<Vec<_>>();
let mut providers = aspa.provider_as_ids.iter().copied().collect::<Vec<_>>();
providers.sort_unstable();
providers.dedup();
VapCompareRow {
@ -173,7 +170,10 @@ pub fn write_vap_csv(path: &Path, rows: &BTreeSet<VapCompareRow>) -> Result<(),
#[cfg(test)]
mod tests {
use super::*;
use crate::ccr::{CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation, build_aspa_payload_state, build_roa_payload_state};
use crate::ccr::{
CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation,
build_aspa_payload_state, build_roa_payload_state,
};
use crate::data_model::roa::{IpPrefix, RoaAfi};
#[test]
@ -218,7 +218,8 @@ mod tests {
tas: None,
rks: None,
});
let (vrp_rows, vap_rows) = decode_ccr_compare_views(&content, "apnic").expect("decode compare views");
let (vrp_rows, vap_rows) =
decode_ccr_compare_views(&content, "apnic").expect("decode compare views");
assert_eq!(vrp_rows.len(), 1);
assert_eq!(vap_rows.len(), 1);
assert_eq!(vap_rows.iter().next().unwrap().providers, "AS64497");

View File

@ -8,10 +8,12 @@ use time::format_description::well_known::Rfc3339;
use crate::audit::PublicationPointAudit;
use crate::fetch::rsync::{RsyncFetchError, RsyncFetcher};
use crate::replay::archive::{ReplayArchiveIndex, ReplayRrdpLock, ReplayTransport, canonical_rsync_module, sha256_hex};
use crate::sync::rrdp::{NotificationDeltaRef, parse_notification};
use crate::replay::archive::{
ReplayArchiveIndex, ReplayRrdpLock, ReplayTransport, canonical_rsync_module, sha256_hex,
};
use crate::storage::{RocksStore, RrdpSourceRecord};
use crate::sync::rrdp::Fetcher;
use crate::sync::rrdp::{NotificationDeltaRef, parse_notification};
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RecordedHttpResponse {
@ -124,10 +126,12 @@ impl<F: RsyncFetcher> RsyncFetcher for RecordingRsyncFetcher<F> {
if self.capture_objects {
let mut recorded = Vec::new();
let result = self.inner.visit_objects(rsync_base_uri, &mut |uri, bytes| {
recorded.push((uri.clone(), bytes.clone()));
visitor(uri, bytes)
})?;
let result = self
.inner
.visit_objects(rsync_base_uri, &mut |uri, bytes| {
recorded.push((uri.clone(), bytes.clone()));
visitor(uri, bytes)
})?;
self.fetches.lock().expect("rsync recorder lock").insert(
rsync_base_uri.to_string(),
RecordedRsyncFetch {
@ -347,7 +351,13 @@ fn rrdp_repo_is_replayable(record: &RrdpSourceRecord) -> bool {
fn collect_current_state_locks(
publication_points: &[PublicationPointAudit],
store: &RocksStore,
) -> Result<(BTreeMap<String, RrdpLockJson>, BTreeMap<String, RsyncLockJson>), String> {
) -> Result<
(
BTreeMap<String, RrdpLockJson>,
BTreeMap<String, RsyncLockJson>,
),
String,
> {
let mut rrdp_locks = BTreeMap::new();
let mut rsync_locks = BTreeMap::new();
let mut seen_modules = BTreeSet::new();
@ -357,8 +367,12 @@ fn collect_current_state_locks(
if pp.source == "failed_fetch_no_cache" {
continue;
}
let module_uri = canonical_rsync_module(&pp.rsync_base_uri)
.map_err(|e| format!("canonicalize rsync module failed for {}: {e}", pp.rsync_base_uri))?;
let module_uri = canonical_rsync_module(&pp.rsync_base_uri).map_err(|e| {
format!(
"canonicalize rsync module failed for {}: {e}",
pp.rsync_base_uri
)
})?;
if let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() {
if !seen_rrdp.insert(notify_uri.to_string()) {
continue;
@ -476,7 +490,10 @@ fn materialize_rsync_module(
last_seen_at: &str,
) -> Result<(), String> {
let bucket_hash = sha256_hex(module_uri.as_bytes());
let bucket_dir = capture_root.join("rsync").join("modules").join(&bucket_hash);
let bucket_dir = capture_root
.join("rsync")
.join("modules")
.join(&bucket_hash);
write_json(
&bucket_dir.join("meta.json"),
&ModuleMetaJson {
@ -490,8 +507,12 @@ fn materialize_rsync_module(
.strip_prefix("rsync://")
.ok_or_else(|| format!("invalid rsync module uri: {module_uri}"))?;
let relative_root = without_scheme.trim_end_matches('/');
fs::create_dir_all(bucket_dir.join("tree").join(relative_root))
.map_err(|e| format!("create rsync tree root failed: {}: {e}", bucket_dir.join("tree").join(relative_root).display()))?;
fs::create_dir_all(bucket_dir.join("tree").join(relative_root)).map_err(|e| {
format!(
"create rsync tree root failed: {}: {e}",
bucket_dir.join("tree").join(relative_root).display()
)
})?;
for (uri, bytes) in objects {
let rel = uri
.strip_prefix(module_uri)
@ -557,8 +578,12 @@ pub fn write_live_base_replay_bundle_inputs(
if pp.source == "failed_fetch_no_cache" {
continue;
}
let module_uri = canonical_rsync_module(&pp.rsync_base_uri)
.map_err(|e| format!("canonicalize rsync module failed for {}: {e}", pp.rsync_base_uri))?;
let module_uri = canonical_rsync_module(&pp.rsync_base_uri).map_err(|e| {
format!(
"canonicalize rsync module failed for {}: {e}",
pp.rsync_base_uri
)
})?;
if let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() {
if !seen_rrdp.insert(notify_uri.to_string()) {
continue;
@ -567,7 +592,9 @@ pub fn write_live_base_replay_bundle_inputs(
if rrdp_repo_is_replayable(&source_record) {
let notification_bytes = &http_records
.get(notify_uri)
.ok_or_else(|| format!("missing recorded notification body for {notify_uri}"))?
.ok_or_else(|| {
format!("missing recorded notification body for {notify_uri}")
})?
.bytes;
let snapshot_uri = source_record
.last_snapshot_uri
@ -575,9 +602,16 @@ pub fn write_live_base_replay_bundle_inputs(
.ok_or_else(|| format!("missing last_snapshot_uri for {notify_uri}"))?;
let snapshot_bytes = &http_records
.get(snapshot_uri)
.ok_or_else(|| format!("missing recorded snapshot body for {snapshot_uri}"))?
.ok_or_else(|| {
format!("missing recorded snapshot body for {snapshot_uri}")
})?
.bytes;
materialize_rrdp_repo(&capture_root, &source_record, notification_bytes, snapshot_bytes)?;
materialize_rrdp_repo(
&capture_root,
&source_record,
notification_bytes,
snapshot_bytes,
)?;
rrdp_locks.insert(
notify_uri.to_string(),
RrdpLockJson {
@ -621,10 +655,15 @@ pub fn write_live_base_replay_bundle_inputs(
for fetch in rsync_records.values() {
let module_uri = canonical_rsync_module(&fetch.requested_base_uri)
.map_err(|e| format!("canonicalize requested rsync module failed: {e}"))?;
let objects = rsync_objects_by_module.entry(module_uri.clone()).or_default();
let times = rsync_times_by_module
.entry(module_uri)
.or_insert_with(|| (fetch.fetched_at_rfc3339_utc.clone(), fetch.fetched_at_rfc3339_utc.clone()));
let objects = rsync_objects_by_module
.entry(module_uri.clone())
.or_default();
let times = rsync_times_by_module.entry(module_uri).or_insert_with(|| {
(
fetch.fetched_at_rfc3339_utc.clone(),
fetch.fetched_at_rfc3339_utc.clone(),
)
});
if fetch.fetched_at_rfc3339_utc < times.0 {
times.0 = fetch.fetched_at_rfc3339_utc.clone();
}
@ -653,7 +692,13 @@ pub fn write_live_base_replay_bundle_inputs(
.unwrap_or_else(|_| "1970-01-01T00:00:00Z".to_string());
(now.clone(), now)
});
materialize_rsync_module(&capture_root, module_uri, objects, &created_at, &last_seen_at)?;
materialize_rsync_module(
&capture_root,
module_uri,
objects,
&created_at,
&last_seen_at,
)?;
}
let locks = ReplayLocksJson {
@ -761,13 +806,25 @@ pub fn write_live_delta_replay_step_inputs(
http_records: &BTreeMap<String, RecordedHttpResponse>,
rsync_records: &BTreeMap<String, RecordedRsyncFetch>,
) -> Result<LiveDeltaCaptureSummary, String> {
let previous_locks: crate::replay::archive::ReplayLocks = serde_json::from_slice(
&fs::read(previous_locks_path)
.map_err(|e| format!("read previous locks failed: {}: {e}", previous_locks_path.display()))?,
)
.map_err(|e| format!("parse previous locks failed: {}: {e}", previous_locks_path.display()))?;
let previous_locks_bytes = fs::read(previous_locks_path)
.map_err(|e| format!("read previous locks bytes failed: {}: {e}", previous_locks_path.display()))?;
let previous_locks: crate::replay::archive::ReplayLocks =
serde_json::from_slice(&fs::read(previous_locks_path).map_err(|e| {
format!(
"read previous locks failed: {}: {e}",
previous_locks_path.display()
)
})?)
.map_err(|e| {
format!(
"parse previous locks failed: {}: {e}",
previous_locks_path.display()
)
})?;
let previous_locks_bytes = fs::read(previous_locks_path).map_err(|e| {
format!(
"read previous locks bytes failed: {}: {e}",
previous_locks_path.display()
)
})?;
let previous_locks_sha256 = sha256_hex(&previous_locks_bytes);
let recorded_at = time::OffsetDateTime::now_utc();
@ -810,10 +867,15 @@ pub fn write_live_delta_replay_step_inputs(
for fetch in rsync_records.values() {
let module_uri = canonical_rsync_module(&fetch.requested_base_uri)
.map_err(|e| format!("canonicalize requested rsync module failed: {e}"))?;
let objects = rsync_objects_by_module.entry(module_uri.clone()).or_default();
let times = rsync_times_by_module
.entry(module_uri)
.or_insert_with(|| (fetch.fetched_at_rfc3339_utc.clone(), fetch.fetched_at_rfc3339_utc.clone()));
let objects = rsync_objects_by_module
.entry(module_uri.clone())
.or_default();
let times = rsync_times_by_module.entry(module_uri).or_insert_with(|| {
(
fetch.fetched_at_rfc3339_utc.clone(),
fetch.fetched_at_rfc3339_utc.clone(),
)
});
if fetch.fetched_at_rfc3339_utc < times.0 {
times.0 = fetch.fetched_at_rfc3339_utc.clone();
}
@ -834,22 +896,31 @@ pub fn write_live_delta_replay_step_inputs(
if pp.source == "failed_fetch_no_cache" {
continue;
}
let module_uri = canonical_rsync_module(&pp.rsync_base_uri)
.map_err(|e| format!("canonicalize rsync module failed for {}: {e}", pp.rsync_base_uri))?;
let module_uri = canonical_rsync_module(&pp.rsync_base_uri).map_err(|e| {
format!(
"canonicalize rsync module failed for {}: {e}",
pp.rsync_base_uri
)
})?;
if let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() {
if !seen_notifications.insert(notify_uri.to_string()) {
continue;
}
let base_lock = previous_locks.rrdp.get(notify_uri);
let target_record = store
.get_rrdp_source_record(notify_uri)
.map_err(|e| format!("read target rrdp source record failed for {notify_uri}: {e}"))?;
let target_record = store.get_rrdp_source_record(notify_uri).map_err(|e| {
format!("read target rrdp source record failed for {notify_uri}: {e}")
})?;
let bucket_hash = sha256_hex(notify_uri.as_bytes());
let bucket_dir = capture_root.join("rrdp").join("repos").join(&bucket_hash);
let (created_at, last_seen_at) = target_record
.as_ref()
.map(|record| (record.first_seen_at.rfc3339_utc.clone(), record.last_seen_at.rfc3339_utc.clone()))
.map(|record| {
(
record.first_seen_at.rfc3339_utc.clone(),
record.last_seen_at.rfc3339_utc.clone(),
)
})
.unwrap_or_else(|| {
let now = recorded_at
.format(&Rfc3339)
@ -869,7 +940,9 @@ pub fn write_live_delta_replay_step_inputs(
let entry = if let (Some(base_lock), Some(target_record), Some(target_state)) = (
base_lock,
target_record.as_ref(),
target_record.as_ref().and_then(target_rrdp_state_from_record),
target_record
.as_ref()
.and_then(target_rrdp_state_from_record),
) {
if base_lock.transport == ReplayTransport::Rrdp
&& base_lock.session.as_deref() == target_record.last_session_id.as_deref()
@ -900,10 +973,16 @@ pub fn write_live_delta_replay_step_inputs(
let notification_bytes = http_records
.get(notify_uri)
.map(|record| record.bytes.as_slice())
.ok_or_else(|| format!("missing recorded target notification body for {notify_uri}"))?;
.ok_or_else(|| {
format!("missing recorded target notification body for {notify_uri}")
})?;
let base_serial = base_lock.serial.expect("checked above");
let target_serial = target_record.last_serial.expect("checked above");
let deltas = notification_deltas_after_serial(notification_bytes, base_serial, target_serial)?;
let deltas = notification_deltas_after_serial(
notification_bytes,
base_serial,
target_serial,
)?;
let mut all_present = true;
let session = target_record
.last_session_id
@ -913,8 +992,10 @@ pub fn write_live_delta_replay_step_inputs(
let notification_path =
session_dir.join(format!("notification-target-{target_serial}.xml"));
write_bytes(&notification_path, notification_bytes)?;
let target_notification = parse_notification(notification_bytes)
.map_err(|e| format!("parse target notification failed for {notify_uri}: {e}"))?;
let target_notification =
parse_notification(notification_bytes).map_err(|e| {
format!("parse target notification failed for {notify_uri}: {e}")
})?;
let snapshot_hash_hex = hex::encode(target_notification.snapshot_hash_sha256);
if let Some(snapshot_bytes) = http_records
.get(&target_notification.snapshot_uri)
@ -928,9 +1009,13 @@ pub fn write_live_delta_replay_step_inputs(
let deltas_dir = session_dir.join("deltas");
let mut delta_serials = Vec::new();
for dref in &deltas {
if let Some(delta_bytes) = http_records.get(&dref.uri).map(|record| record.bytes.as_slice()) {
if let Some(delta_bytes) = http_records
.get(&dref.uri)
.map(|record| record.bytes.as_slice())
{
let hash = hex::encode(dref.hash_sha256);
let path = deltas_dir.join(format!("delta-{}-{}.xml", dref.serial, hash));
let path =
deltas_dir.join(format!("delta-{}-{}.xml", dref.serial, hash));
write_bytes(&path, delta_bytes)?;
delta_serials.push(dref.serial);
} else {
@ -1014,8 +1099,17 @@ pub fn write_live_delta_replay_step_inputs(
(now.clone(), now)
});
let bucket_hash = sha256_hex(module_uri.as_bytes());
let bucket_dir = capture_root.join("rsync").join("modules").join(&bucket_hash);
materialize_rsync_module(&capture_root, &module_uri, objects, &created_at, &last_seen_at)?;
let bucket_dir = capture_root
.join("rsync")
.join("modules")
.join(&bucket_hash);
materialize_rsync_module(
&capture_root,
&module_uri,
objects,
&created_at,
&last_seen_at,
)?;
let files = objects.keys().cloned().collect::<Vec<_>>();
write_json(
&bucket_dir.join("files.json"),
@ -1072,8 +1166,9 @@ pub fn write_live_delta_replay_bundle_inputs(
) -> Result<LiveDeltaCaptureSummary, String> {
let base_archive_root = rir_dir.join("base-payload-archive");
let base_locks_path = rir_dir.join("base-locks.json");
let base_index = ReplayArchiveIndex::load_allow_missing_rsync_modules(&base_archive_root, &base_locks_path)
.map_err(|e| format!("load base replay index failed: {e}"))?;
let base_index =
ReplayArchiveIndex::load_allow_missing_rsync_modules(&base_archive_root, &base_locks_path)
.map_err(|e| format!("load base replay index failed: {e}"))?;
let base_locks_bytes = fs::read(&base_locks_path)
.map_err(|e| format!("read base locks failed: {}: {e}", base_locks_path.display()))?;
let base_locks_sha256 = sha256_hex(&base_locks_bytes);
@ -1118,10 +1213,15 @@ pub fn write_live_delta_replay_bundle_inputs(
for fetch in rsync_records.values() {
let module_uri = canonical_rsync_module(&fetch.requested_base_uri)
.map_err(|e| format!("canonicalize requested rsync module failed: {e}"))?;
let objects = rsync_objects_by_module.entry(module_uri.clone()).or_default();
let times = rsync_times_by_module
.entry(module_uri)
.or_insert_with(|| (fetch.fetched_at_rfc3339_utc.clone(), fetch.fetched_at_rfc3339_utc.clone()));
let objects = rsync_objects_by_module
.entry(module_uri.clone())
.or_default();
let times = rsync_times_by_module.entry(module_uri).or_insert_with(|| {
(
fetch.fetched_at_rfc3339_utc.clone(),
fetch.fetched_at_rfc3339_utc.clone(),
)
});
if fetch.fetched_at_rfc3339_utc < times.0 {
times.0 = fetch.fetched_at_rfc3339_utc.clone();
}
@ -1139,22 +1239,31 @@ pub fn write_live_delta_replay_bundle_inputs(
let mut needed_modules = BTreeSet::new();
for pp in publication_points {
let module_uri = canonical_rsync_module(&pp.rsync_base_uri)
.map_err(|e| format!("canonicalize rsync module failed for {}: {e}", pp.rsync_base_uri))?;
let module_uri = canonical_rsync_module(&pp.rsync_base_uri).map_err(|e| {
format!(
"canonicalize rsync module failed for {}: {e}",
pp.rsync_base_uri
)
})?;
if let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() {
if !seen_notifications.insert(notify_uri.to_string()) {
continue;
}
let base_lock = base_index.rrdp_lock(notify_uri);
let target_record = store
.get_rrdp_source_record(notify_uri)
.map_err(|e| format!("read target rrdp source record failed for {notify_uri}: {e}"))?;
let target_record = store.get_rrdp_source_record(notify_uri).map_err(|e| {
format!("read target rrdp source record failed for {notify_uri}: {e}")
})?;
let bucket_hash = sha256_hex(notify_uri.as_bytes());
let bucket_dir = capture_root.join("rrdp").join("repos").join(&bucket_hash);
let (created_at, last_seen_at) = target_record
.as_ref()
.map(|record| (record.first_seen_at.rfc3339_utc.clone(), record.last_seen_at.rfc3339_utc.clone()))
.map(|record| {
(
record.first_seen_at.rfc3339_utc.clone(),
record.last_seen_at.rfc3339_utc.clone(),
)
})
.unwrap_or_else(|| {
let now = recorded_at
.format(&Rfc3339)
@ -1207,20 +1316,29 @@ pub fn write_live_delta_replay_bundle_inputs(
let notification_bytes = http_records
.get(notify_uri)
.map(|record| record.bytes.as_slice())
.ok_or_else(|| format!("missing recorded target notification body for {notify_uri}"))?;
.ok_or_else(|| {
format!("missing recorded target notification body for {notify_uri}")
})?;
let base_serial = base_lock.serial.expect("checked above");
let target_serial = target_record.last_serial.expect("checked above");
let deltas = notification_deltas_after_serial(notification_bytes, base_serial, target_serial)?;
let deltas = notification_deltas_after_serial(
notification_bytes,
base_serial,
target_serial,
)?;
let mut all_present = true;
let session = target_record
.last_session_id
.as_deref()
.ok_or_else(|| format!("missing target session for {notify_uri}"))?;
let session_dir = bucket_dir.join(session);
let notification_path = session_dir.join(format!("notification-target-{target_serial}.xml"));
let notification_path =
session_dir.join(format!("notification-target-{target_serial}.xml"));
write_bytes(&notification_path, notification_bytes)?;
let target_notification = parse_notification(notification_bytes)
.map_err(|e| format!("parse target notification failed for {notify_uri}: {e}"))?;
let target_notification =
parse_notification(notification_bytes).map_err(|e| {
format!("parse target notification failed for {notify_uri}: {e}")
})?;
let snapshot_hash_hex = hex::encode(target_notification.snapshot_hash_sha256);
if let Some(snapshot_bytes) = http_records
.get(&target_notification.snapshot_uri)
@ -1234,9 +1352,13 @@ pub fn write_live_delta_replay_bundle_inputs(
let deltas_dir = session_dir.join("deltas");
let mut delta_serials = Vec::new();
for dref in &deltas {
if let Some(delta_bytes) = http_records.get(&dref.uri).map(|record| record.bytes.as_slice()) {
if let Some(delta_bytes) = http_records
.get(&dref.uri)
.map(|record| record.bytes.as_slice())
{
let hash = hex::encode(dref.hash_sha256);
let path = deltas_dir.join(format!("delta-{}-{}.xml", dref.serial, hash));
let path =
deltas_dir.join(format!("delta-{}-{}.xml", dref.serial, hash));
write_bytes(&path, delta_bytes)?;
delta_serials.push(dref.serial);
} else {
@ -1320,8 +1442,17 @@ pub fn write_live_delta_replay_bundle_inputs(
(now.clone(), now)
});
let bucket_hash = sha256_hex(module_uri.as_bytes());
let bucket_dir = capture_root.join("rsync").join("modules").join(&bucket_hash);
materialize_rsync_module(&capture_root, &module_uri, objects, &created_at, &last_seen_at)?;
let bucket_dir = capture_root
.join("rsync")
.join("modules")
.join(&bucket_hash);
materialize_rsync_module(
&capture_root,
&module_uri,
objects,
&created_at,
&last_seen_at,
)?;
let files = objects.keys().cloned().collect::<Vec<_>>();
write_json(
&bucket_dir.join("files.json"),
@ -1402,7 +1533,12 @@ mod tests {
}
}
fn minimal_notification(notify_uri: &str, snapshot_uri: &str, session: &str, serial: u64) -> Vec<u8> {
fn minimal_notification(
notify_uri: &str,
snapshot_uri: &str,
session: &str,
serial: u64,
) -> Vec<u8> {
format!(
r#"<notification xmlns="http://www.ripe.net/rpki/rrdp" version="1" session_id="{session}" serial="{serial}"><snapshot uri="{snapshot_uri}" hash="{}"/></notification>"#,
sha256_hex(b"<snapshot/>")
@ -1426,7 +1562,10 @@ mod tests {
#[test]
fn recording_rsync_fetcher_records_object_sets() {
let fetcher = RecordingRsyncFetcher::new(DummyRsyncFetcher {
objects: vec![("rsync://example.test/repo/a.roa".to_string(), b"roa".to_vec())],
objects: vec![(
"rsync://example.test/repo/a.roa".to_string(),
b"roa".to_vec(),
)],
});
let got = fetcher
.fetch_objects("rsync://example.test/repo/")
@ -1480,8 +1619,10 @@ mod tests {
rrdp_notification_uri: Some(notify_uri.to_string()),
source: "fresh".to_string(),
repo_sync_source: None,
repo_sync_phase: None,
repo_sync_duration_ms: None,
repo_sync_error: None,
repo_terminal_state: "fresh".to_string(),
this_update_rfc3339_utc: "2026-03-30T00:00:00Z".to_string(),
next_update_rfc3339_utc: "2026-03-30T01:00:00Z".to_string(),
verified_at_rfc3339_utc: "2026-03-30T00:00:02Z".to_string(),
@ -1498,8 +1639,10 @@ mod tests {
rrdp_notification_uri: None,
source: "fresh".to_string(),
repo_sync_source: None,
repo_sync_phase: None,
repo_sync_duration_ms: None,
repo_sync_error: None,
repo_terminal_state: "fresh".to_string(),
this_update_rfc3339_utc: "2026-03-30T00:00:00Z".to_string(),
next_update_rfc3339_utc: "2026-03-30T01:00:00Z".to_string(),
verified_at_rfc3339_utc: "2026-03-30T00:00:02Z".to_string(),

View File

@ -9,17 +9,16 @@ pub use compare_view::{
};
pub use live_capture::{
LiveBaseCaptureSummary, LiveDeltaCaptureSummary, RecordedHttpResponse, RecordedRsyncFetch,
RecordingHttpFetcher, RecordingRsyncFetcher, write_live_base_replay_bundle_inputs,
write_live_delta_replay_bundle_inputs, write_live_delta_replay_step_inputs,
write_current_replay_state_locks,
RecordingHttpFetcher, RecordingRsyncFetcher, write_current_replay_state_locks,
write_live_base_replay_bundle_inputs, write_live_delta_replay_bundle_inputs,
write_live_delta_replay_step_inputs,
};
pub use record_io::{
build_single_rir_bundle_manifest, copy_dir_all, load_validation_time, sha256_hex,
write_bytes, write_json, write_live_bundle_rir_readme, write_live_bundle_top_readme,
write_timing_json,
build_single_rir_bundle_manifest, copy_dir_all, load_validation_time, sha256_hex, write_bytes,
write_json, write_live_bundle_rir_readme, write_live_bundle_top_readme, write_timing_json,
};
pub use spec::{BundleManifest, BundleManifestEntry, RirBundleMetadata};
pub use spec::{
BaseBundleStateMetadataV2, BundleManifestEntryV2, BundleManifestV2, DeltaSequenceMetadataV2,
DeltaStepMetadataV2, RirBundleMetadataV2,
};
pub use spec::{BundleManifest, BundleManifestEntry, RirBundleMetadata};

View File

@ -81,7 +81,9 @@ pub fn write_bytes(path: &Path, bytes: &[u8]) -> Result<(), String> {
pub fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> {
fs::create_dir_all(dst)
.map_err(|e| format!("create directory failed: {}: {e}", dst.display()))?;
for entry in fs::read_dir(src).map_err(|e| format!("read_dir failed: {}: {e}", src.display()))? {
for entry in
fs::read_dir(src).map_err(|e| format!("read_dir failed: {}: {e}", src.display()))?
{
let entry = entry.map_err(|e| format!("read_dir entry failed: {}: {e}", src.display()))?;
let ty = entry
.file_type()
@ -94,8 +96,13 @@ pub fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> {
fs::create_dir_all(parent)
.map_err(|e| format!("create parent failed: {}: {e}", parent.display()))?;
}
fs::copy(entry.path(), &to)
.map_err(|e| format!("copy failed: {} -> {}: {e}", entry.path().display(), to.display()))?;
fs::copy(entry.path(), &to).map_err(|e| {
format!(
"copy failed: {} -> {}: {e}",
entry.path().display(),
to.display()
)
})?;
}
}
Ok(())
@ -197,7 +204,10 @@ mod tests {
.expect("manifest");
assert_eq!(manifest.schema_version, "20260330-v1");
assert_eq!(manifest.rirs, vec!["apnic".to_string()]);
assert_eq!(manifest.per_rir_bundles[0].base_validation_time, "2026-04-01T00:00:00Z");
assert_eq!(
manifest.per_rir_bundles[0].base_validation_time,
"2026-04-01T00:00:00Z"
);
assert_eq!(
manifest.per_rir_bundles[0].delta_validation_time.as_deref(),
Some("2026-04-01T00:10:00Z")
@ -255,15 +265,9 @@ mod tests {
#[test]
fn build_single_rir_bundle_manifest_supports_none_delta_time() {
let base = time::OffsetDateTime::parse("2026-04-01T00:00:00Z", &Rfc3339).expect("base");
let manifest = build_single_rir_bundle_manifest(
"20260330-v1",
"ours",
"afrinic",
&base,
None,
false,
)
.expect("manifest");
let manifest =
build_single_rir_bundle_manifest("20260330-v1", "ours", "afrinic", &base, None, false)
.expect("manifest");
assert_eq!(manifest.per_rir_bundles[0].delta_validation_time, None);
assert!(!manifest.per_rir_bundles[0].has_aspa);
}

View File

@ -19,7 +19,10 @@ pub struct BundleManifestEntry {
pub relative_path: String,
#[serde(rename = "baseValidationTime")]
pub base_validation_time: String,
#[serde(rename = "deltaValidationTime", skip_serializing_if = "Option::is_none")]
#[serde(
rename = "deltaValidationTime",
skip_serializing_if = "Option::is_none"
)]
pub delta_validation_time: Option<String>,
#[serde(rename = "hasAspa")]
pub has_aspa: bool,
@ -34,7 +37,10 @@ pub struct RirBundleMetadata {
pub rir: String,
#[serde(rename = "baseValidationTime")]
pub base_validation_time: String,
#[serde(rename = "deltaValidationTime", skip_serializing_if = "Option::is_none")]
#[serde(
rename = "deltaValidationTime",
skip_serializing_if = "Option::is_none"
)]
pub delta_validation_time: Option<String>,
#[serde(rename = "talSha256")]
pub tal_sha256: String,
@ -79,9 +85,15 @@ pub struct BundleManifestEntryV2 {
pub base_validation_time: String,
#[serde(rename = "stepCount")]
pub step_count: usize,
#[serde(rename = "firstDeltaValidationTime", skip_serializing_if = "Option::is_none")]
#[serde(
rename = "firstDeltaValidationTime",
skip_serializing_if = "Option::is_none"
)]
pub first_delta_validation_time: Option<String>,
#[serde(rename = "lastDeltaValidationTime", skip_serializing_if = "Option::is_none")]
#[serde(
rename = "lastDeltaValidationTime",
skip_serializing_if = "Option::is_none"
)]
pub last_delta_validation_time: Option<String>,
#[serde(rename = "hasAspa")]
pub has_aspa: bool,

View File

@ -12,12 +12,13 @@ use crate::ccr::model::{
AspaPayloadSet, AspaPayloadState, ManifestInstance, ManifestState, RoaPayloadSet,
RoaPayloadState, RouterKey, RouterKeySet, RouterKeyState, TrustAnchorState,
};
use crate::blob_store::RawObjectStore;
use crate::data_model::manifest::ManifestObject;
use crate::data_model::rc::{AccessDescription, SubjectInfoAccess};
use crate::data_model::roa::RoaAfi;
use crate::data_model::router_cert::BgpsecRouterCertificate;
use crate::data_model::ta::TrustAnchor;
use crate::storage::{RocksStore, VcirArtifactRole, ValidatedCaInstanceResult};
use crate::storage::{RocksStore, ValidatedCaInstanceResult, VcirArtifactRole};
use crate::validation::objects::{AspaAttestation, Vrp};
#[derive(Debug, thiserror::Error)]
@ -41,13 +42,22 @@ pub enum CcrBuildError {
MissingManifestArtifact(String),
#[error("manifest raw bytes missing in store for {manifest_rsync_uri}: {sha256_hex}")]
MissingManifestRawBytes { manifest_rsync_uri: String, sha256_hex: String },
MissingManifestRawBytes {
manifest_rsync_uri: String,
sha256_hex: String,
},
#[error("manifest raw bytes load failed for {manifest_rsync_uri}: {detail}")]
LoadManifestRawBytes { manifest_rsync_uri: String, detail: String },
LoadManifestRawBytes {
manifest_rsync_uri: String,
detail: String,
},
#[error("manifest decode failed for {manifest_rsync_uri}: {detail}")]
ManifestDecode { manifest_rsync_uri: String, detail: String },
ManifestDecode {
manifest_rsync_uri: String,
detail: String,
},
#[error("manifest EE certificate missing AuthorityKeyIdentifier: {0}")]
ManifestEeMissingAki(String),
@ -99,8 +109,8 @@ pub fn build_roa_payload_state(vrps: &[Vrp]) -> Result<RoaPayloadState, CcrBuild
});
}
let payload_der =
encode_roa_payload_state_payload_der(&rps).map_err(|e| CcrBuildError::RoaEncode(e.to_string()))?;
let payload_der = encode_roa_payload_state_payload_der(&rps)
.map_err(|e| CcrBuildError::RoaEncode(e.to_string()))?;
Ok(RoaPayloadState {
rps,
hash: compute_state_hash(&payload_der),
@ -176,10 +186,12 @@ pub fn build_manifest_state_from_vcirs(
artifact.artifact_role == VcirArtifactRole::Manifest
&& artifact.uri.as_deref() == Some(vcir.current_manifest_rsync_uri.as_str())
})
.ok_or_else(|| CcrBuildError::MissingManifestArtifact(vcir.current_manifest_rsync_uri.clone()))?;
.ok_or_else(|| {
CcrBuildError::MissingManifestArtifact(vcir.current_manifest_rsync_uri.clone())
})?;
let raw_entry = store
.get_raw_by_hash_entry(&manifest_artifact.sha256)
let raw_bytes = store
.get_blob_bytes(&manifest_artifact.sha256)
.map_err(|e| CcrBuildError::LoadManifestRawBytes {
manifest_rsync_uri: vcir.current_manifest_rsync_uri.clone(),
detail: e.to_string(),
@ -189,9 +201,11 @@ pub fn build_manifest_state_from_vcirs(
sha256_hex: manifest_artifact.sha256.clone(),
})?;
let manifest = ManifestObject::decode_der(&raw_entry.bytes).map_err(|e| CcrBuildError::ManifestDecode {
manifest_rsync_uri: vcir.current_manifest_rsync_uri.clone(),
detail: e.to_string(),
let manifest = ManifestObject::decode_der(&raw_bytes).map_err(|e| {
CcrBuildError::ManifestDecode {
manifest_rsync_uri: vcir.current_manifest_rsync_uri.clone(),
detail: e.to_string(),
}
})?;
let ee = &manifest.signed_object.signed_data.certificates[0].resource_cert;
@ -200,13 +214,17 @@ pub fn build_manifest_state_from_vcirs(
.extensions
.authority_key_identifier
.clone()
.ok_or_else(|| CcrBuildError::ManifestEeMissingAki(vcir.current_manifest_rsync_uri.clone()))?;
.ok_or_else(|| {
CcrBuildError::ManifestEeMissingAki(vcir.current_manifest_rsync_uri.clone())
})?;
let sia = ee
.tbs
.extensions
.subject_info_access
.as_ref()
.ok_or_else(|| CcrBuildError::ManifestEeMissingSia(vcir.current_manifest_rsync_uri.clone()))?;
.ok_or_else(|| {
CcrBuildError::ManifestEeMissingSia(vcir.current_manifest_rsync_uri.clone())
})?;
let locations = match sia {
SubjectInfoAccess::Ee(ee_sia) => ee_sia
.access_descriptions
@ -216,7 +234,7 @@ pub fn build_manifest_state_from_vcirs(
SubjectInfoAccess::Ca(_) => {
return Err(CcrBuildError::ManifestEeSiaWrongVariant(
vcir.current_manifest_rsync_uri.clone(),
))
));
}
};
@ -234,11 +252,14 @@ pub fn build_manifest_state_from_vcirs(
}
let instance = ManifestInstance {
hash: sha2::Sha256::digest(&raw_entry.bytes).to_vec(),
size: raw_entry.bytes.len() as u64,
hash: sha2::Sha256::digest(&raw_bytes).to_vec(),
size: raw_bytes.len() as u64,
aki,
manifest_number: crate::data_model::common::BigUnsigned {
bytes_be: vcir.validated_manifest_meta.validated_manifest_number.clone(),
bytes_be: vcir
.validated_manifest_meta
.validated_manifest_number
.clone(),
},
this_update,
locations,
@ -292,7 +313,10 @@ fn encode_access_description_der(ad: &AccessDescription) -> Result<Vec<u8>, CcrB
fn encode_oid_from_string(oid: &str) -> Result<Vec<u8>, CcrBuildError> {
let arcs = oid
.split('.')
.map(|part| part.parse::<u64>().map_err(|_| CcrBuildError::UnsupportedAccessMethodOid(oid.to_string())))
.map(|part| {
part.parse::<u64>()
.map_err(|_| CcrBuildError::UnsupportedAccessMethodOid(oid.to_string()))
})
.collect::<Result<Vec<_>, _>>()?;
if arcs.len() < 2 {
return Err(CcrBuildError::UnsupportedAccessMethodOid(oid.to_string()));
@ -471,7 +495,9 @@ fn encode_length(len: usize, out: &mut Vec<u8>) {
mod tests {
use super::*;
use crate::ccr::decode::decode_content_info;
use crate::ccr::encode::{encode_aspa_payload_state, encode_content_info, encode_trust_anchor_state};
use crate::ccr::encode::{
encode_aspa_payload_state, encode_content_info, encode_trust_anchor_state,
};
use crate::ccr::model::{CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation};
use crate::data_model::roa::{IpPrefix, RoaAfi};
use crate::data_model::ta::TrustAnchor;
@ -561,13 +587,19 @@ mod tests {
ca_subject_name: "CN=test".to_string(),
ca_ski: "11".repeat(20),
issuer_ski: "22".repeat(20),
last_successful_validation_time: crate::storage::PackTime::from_utc_offset_datetime(now),
last_successful_validation_time: crate::storage::PackTime::from_utc_offset_datetime(
now,
),
current_manifest_rsync_uri: manifest_uri.to_string(),
current_crl_rsync_uri: format!("{manifest_uri}.crl"),
validated_manifest_meta: crate::storage::ValidatedManifestMeta {
validated_manifest_number: manifest.manifest.manifest_number.bytes_be.clone(),
validated_manifest_this_update: crate::storage::PackTime::from_utc_offset_datetime(now),
validated_manifest_next_update: crate::storage::PackTime::from_utc_offset_datetime(next),
validated_manifest_this_update: crate::storage::PackTime::from_utc_offset_datetime(
now,
),
validated_manifest_next_update: crate::storage::PackTime::from_utc_offset_datetime(
next,
),
},
instance_gate: crate::storage::VcirInstanceGate {
manifest_next_update: crate::storage::PackTime::from_utc_offset_datetime(next),
@ -585,7 +617,9 @@ mod tests {
child_rrdp_notification_uri: None,
child_effective_ip_resources: None,
child_effective_as_resources: None,
accepted_at_validation_time: crate::storage::PackTime::from_utc_offset_datetime(now),
accepted_at_validation_time: crate::storage::PackTime::from_utc_offset_datetime(
now,
),
}],
local_outputs: Vec::new(),
related_artifacts: vec![crate::storage::VcirRelatedArtifact {
@ -617,21 +651,29 @@ mod tests {
fn build_manifest_state_from_vcirs_collects_current_manifests_and_hashes_payload() {
let base = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let manifest_a = std::fs::read(base.join("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft")).expect("read manifest a");
let manifest_b = std::fs::read(base.join("tests/fixtures/repository/ca.rg.net/rpki/RGnet-OU/bW-_qXU9uNhGQz21NR2ansB8lr0.mft")).expect("read manifest b");
let vcir_a = sample_manifest_vcir("rsync://example.test/a.mft", &manifest_a, &"33".repeat(20));
let vcir_b = sample_manifest_vcir("rsync://example.test/b.mft", &manifest_b, &"44".repeat(20));
let manifest_b = std::fs::read(base.join(
"tests/fixtures/repository/ca.rg.net/rpki/RGnet-OU/bW-_qXU9uNhGQz21NR2ansB8lr0.mft",
))
.expect("read manifest b");
let vcir_a =
sample_manifest_vcir("rsync://example.test/a.mft", &manifest_a, &"33".repeat(20));
let vcir_b =
sample_manifest_vcir("rsync://example.test/b.mft", &manifest_b, &"44".repeat(20));
let store_dir = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
for (vcir, bytes) in [(&vcir_a, &manifest_a), (&vcir_b, &manifest_b)] {
let artifact = &vcir.related_artifacts[0];
let mut raw = crate::storage::RawByHashEntry::from_bytes(artifact.sha256.clone(), bytes.to_vec());
raw.origin_uris.push(vcir.current_manifest_rsync_uri.clone());
let mut raw =
crate::storage::RawByHashEntry::from_bytes(artifact.sha256.clone(), bytes.to_vec());
raw.origin_uris
.push(vcir.current_manifest_rsync_uri.clone());
raw.object_type = Some("mft".to_string());
raw.encoding = Some("der".to_string());
store.put_raw_by_hash_entry(&raw).expect("put raw manifest");
}
let state = build_manifest_state_from_vcirs(&store, &[vcir_a.clone(), vcir_b.clone()]).expect("build manifest state");
let state = build_manifest_state_from_vcirs(&store, &[vcir_a.clone(), vcir_b.clone()])
.expect("build manifest state");
assert_eq!(state.mis.len(), 2);
assert!(state.mis[0].hash < state.mis[1].hash);
let expected_subordinates = [
@ -647,13 +689,24 @@ mod tests {
mi.subordinates[0].clone()
})
.collect::<std::collections::BTreeSet<_>>();
let expected_subordinates = expected_subordinates.into_iter().collect::<std::collections::BTreeSet<_>>();
let expected_subordinates = expected_subordinates
.into_iter()
.collect::<std::collections::BTreeSet<_>>();
assert_eq!(actual_subordinates, expected_subordinates);
let payload_der = encode_manifest_state_payload_der(&state.mis).expect("encode mis payload");
let payload_der =
encode_manifest_state_payload_der(&state.mis).expect("encode mis payload");
assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der));
let max_time = [
vcir_a.validated_manifest_meta.validated_manifest_this_update.parse().unwrap(),
vcir_b.validated_manifest_meta.validated_manifest_this_update.parse().unwrap(),
vcir_a
.validated_manifest_meta
.validated_manifest_this_update
.parse()
.unwrap(),
vcir_b
.validated_manifest_meta
.validated_manifest_this_update
.parse()
.unwrap(),
]
.into_iter()
.max()
@ -668,7 +721,8 @@ mod tests {
let state = build_manifest_state_from_vcirs(&store, &[]).expect("empty manifest state");
assert!(state.mis.is_empty());
assert_eq!(state.most_recent_update, time::OffsetDateTime::UNIX_EPOCH);
let payload_der = encode_manifest_state_payload_der(&state.mis).expect("encode mis payload");
let payload_der =
encode_manifest_state_payload_der(&state.mis).expect("encode mis payload");
assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der));
}
@ -687,7 +741,8 @@ mod tests {
assert_eq!(state.rksets[0].router_keys.len(), 2);
assert_eq!(state.rksets[1].router_keys.len(), 2);
assert!(state.rksets[0].router_keys[0].ski <= state.rksets[0].router_keys[1].ski);
let payload_der = encode_router_key_state_payload_der(&state.rksets).expect("encode rk payload");
let payload_der =
encode_router_key_state_payload_der(&state.rksets).expect("encode rk payload");
assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der));
}
@ -695,7 +750,8 @@ mod tests {
fn build_router_key_state_empty_is_valid_and_hashes_empty_sequence() {
let state = build_router_key_state(&[]).expect("empty router key state");
assert!(state.rksets.is_empty());
let payload_der = encode_router_key_state_payload_der(&state.rksets).expect("encode rk payload");
let payload_der =
encode_router_key_state_payload_der(&state.rksets).expect("encode rk payload");
assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der));
}
@ -706,7 +762,12 @@ mod tests {
sample_vrp_v4(64496, [10, 0, 0, 0], 8, 8),
sample_vrp_v4(64496, [10, 0, 0, 0], 8, 8),
sample_vrp_v4(64496, [10, 1, 0, 0], 16, 24),
sample_vrp_v6(64496, [0x20,0x01,0x0d,0xb8,0,0,0,0,0,0,0,0,0,0,0,0], 32, 48),
sample_vrp_v6(
64496,
[0x20, 0x01, 0x0d, 0xb8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
32,
48,
),
];
let state = build_roa_payload_state(&vrps).expect("build roa state");
assert_eq!(state.rps.len(), 2);
@ -721,7 +782,7 @@ mod tests {
assert_eq!(entries4[0], (8, vec![10], None));
assert_eq!(entries4[1], (16, vec![10, 1], Some(24)));
assert_eq!(entries6.len(), 1);
assert_eq!(entries6[0], (32, vec![0x20,0x01,0x0d,0xb8], Some(48)));
assert_eq!(entries6[0], (32, vec![0x20, 0x01, 0x0d, 0xb8], Some(48)));
let payload_der = encode_roa_payload_state_payload_der(&state.rps).expect("encode payload");
assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der));
}
@ -737,9 +798,18 @@ mod tests {
#[test]
fn build_aspa_payload_state_merges_and_sorts() {
let aspas = vec![
AspaAttestation { customer_as_id: 64497, provider_as_ids: vec![65002] },
AspaAttestation { customer_as_id: 64496, provider_as_ids: vec![65003, 65001] },
AspaAttestation { customer_as_id: 64496, provider_as_ids: vec![65002, 65001] },
AspaAttestation {
customer_as_id: 64497,
provider_as_ids: vec![65002],
},
AspaAttestation {
customer_as_id: 64496,
provider_as_ids: vec![65003, 65001],
},
AspaAttestation {
customer_as_id: 64496,
provider_as_ids: vec![65002, 65001],
},
];
let state = build_aspa_payload_state(&aspas).expect("build aspa state");
assert_eq!(state.aps.len(), 2);
@ -747,8 +817,8 @@ mod tests {
assert_eq!(state.aps[0].providers, vec![65001, 65002, 65003]);
assert_eq!(state.aps[1].customer_as_id, 64497);
let encoded = encode_aspa_payload_state(&state).expect("encode aspa state");
let decoded = decode_content_info(&encode_content_info(&CcrContentInfo::new(
RpkiCanonicalCacheRepresentation {
let decoded = decode_content_info(
&encode_content_info(&CcrContentInfo::new(RpkiCanonicalCacheRepresentation {
version: 0,
hash_alg: CcrDigestAlgorithm::Sha256,
produced_at: time::OffsetDateTime::now_utc(),
@ -757,9 +827,10 @@ mod tests {
vaps: Some(state.clone()),
tas: None,
rks: None,
},
))
.expect("encode ccr")).expect("decode ccr");
}))
.expect("encode ccr"),
)
.expect("decode ccr");
assert_eq!(decoded.content.vaps, Some(state));
assert!(!encoded.is_empty());
}
@ -778,7 +849,8 @@ mod tests {
.expect("build ta state");
assert_eq!(state.skis.len(), 2);
assert!(state.skis[0] < state.skis[1]);
let payload_der = encode_trust_anchor_state_payload_der(&state.skis).expect("encode ta payload");
let payload_der =
encode_trust_anchor_state_payload_der(&state.skis).expect("encode ta payload");
assert!(crate::ccr::verify_state_hash(&state.hash, &payload_der));
let encoded = encode_trust_anchor_state(&state).expect("encode ta state");
assert!(!encoded.is_empty());
@ -793,7 +865,11 @@ mod tests {
"tests/fixtures/tal/apnic-rfc7730-https.tal",
"tests/fixtures/ta/apnic-ta.cer",
);
ta.ta_certificate.rc_ca.tbs.extensions.subject_key_identifier = None;
ta.ta_certificate
.rc_ca
.tbs
.extensions
.subject_key_identifier = None;
let err = build_trust_anchor_state(&[ta]).expect_err("missing ski must fail");
assert!(err.to_string().contains("SubjectKeyIdentifier"), "{err}");
}

View File

@ -13,10 +13,16 @@ pub enum CcrDecodeError {
Parse(String),
#[error("unexpected contentType OID: expected {expected}, got {actual}")]
UnexpectedContentType { expected: &'static str, actual: String },
UnexpectedContentType {
expected: &'static str,
actual: String,
},
#[error("unexpected digest algorithm OID: expected {expected}, got {actual}")]
UnexpectedDigestAlgorithm { expected: &'static str, actual: String },
UnexpectedDigestAlgorithm {
expected: &'static str,
actual: String,
},
#[error("CCR model validation failed after decode: {0}")]
Validate(String),
@ -26,7 +32,9 @@ pub fn decode_content_info(der: &[u8]) -> Result<CcrContentInfo, CcrDecodeError>
let mut top = DerReader::new(der);
let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?;
if !top.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after ContentInfo".into()));
return Err(CcrDecodeError::Parse(
"trailing bytes after ContentInfo".into(),
));
}
let content_type_raw = seq.take_tag(0x06).map_err(CcrDecodeError::Parse)?;
if content_type_raw != OID_CT_RPKI_CCR_RAW {
@ -37,7 +45,9 @@ pub fn decode_content_info(der: &[u8]) -> Result<CcrContentInfo, CcrDecodeError>
}
let inner = seq.take_tag(0xA0).map_err(CcrDecodeError::Parse)?;
if !seq.is_empty() {
return Err(CcrDecodeError::Parse("trailing fields in ContentInfo".into()));
return Err(CcrDecodeError::Parse(
"trailing fields in ContentInfo".into(),
));
}
let content = decode_ccr(inner)?;
let ci = CcrContentInfo::new(content);
@ -87,7 +97,7 @@ pub fn decode_ccr(der: &[u8]) -> Result<RpkiCanonicalCacheRepresentation, CcrDec
_ => {
return Err(CcrDecodeError::Parse(format!(
"unexpected CCR field tag 0x{tag:02X}"
)))
)));
}
}
}
@ -110,7 +120,9 @@ fn decode_manifest_state(explicit_der: &[u8]) -> Result<ManifestState, CcrDecode
let mut outer = DerReader::new(explicit_der);
let mut seq = outer.take_sequence().map_err(CcrDecodeError::Parse)?;
if !outer.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after ManifestState".into()));
return Err(CcrDecodeError::Parse(
"trailing bytes after ManifestState".into(),
));
}
let mis_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
let mut mis_reader = DerReader::new(mis_der);
@ -119,30 +131,50 @@ fn decode_manifest_state(explicit_der: &[u8]) -> Result<ManifestState, CcrDecode
let (_tag, full, _value) = mis_reader.take_any_full().map_err(CcrDecodeError::Parse)?;
mis.push(decode_manifest_instance(full)?);
}
let most_recent_update = parse_generalized_time(seq.take_tag(0x18).map_err(CcrDecodeError::Parse)?)?;
let hash = seq.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec();
let most_recent_update =
parse_generalized_time(seq.take_tag(0x18).map_err(CcrDecodeError::Parse)?)?;
let hash = seq
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec();
if !seq.is_empty() {
return Err(CcrDecodeError::Parse("trailing fields in ManifestState".into()));
return Err(CcrDecodeError::Parse(
"trailing fields in ManifestState".into(),
));
}
Ok(ManifestState { mis, most_recent_update, hash })
Ok(ManifestState {
mis,
most_recent_update,
hash,
})
}
fn decode_manifest_instance(der: &[u8]) -> Result<ManifestInstance, CcrDecodeError> {
let mut top = DerReader::new(der);
let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?;
if !top.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after ManifestInstance".into()));
return Err(CcrDecodeError::Parse(
"trailing bytes after ManifestInstance".into(),
));
}
let hash = seq.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec();
let hash = seq
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec();
let size = seq.take_uint_u64().map_err(CcrDecodeError::Parse)?;
let aki = seq.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec();
let aki = seq
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec();
let manifest_number = decode_big_unsigned(seq.take_tag(0x02).map_err(CcrDecodeError::Parse)?)?;
let this_update = parse_generalized_time(seq.take_tag(0x18).map_err(CcrDecodeError::Parse)?)?;
let locations_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
let mut locations_reader = DerReader::new(locations_der);
let mut locations = Vec::new();
while !locations_reader.is_empty() {
let (_tag, full, _value) = locations_reader.take_any_full().map_err(CcrDecodeError::Parse)?;
let (_tag, full, _value) = locations_reader
.take_any_full()
.map_err(CcrDecodeError::Parse)?;
locations.push(full.to_vec());
}
let subordinates = if !seq.is_empty() {
@ -150,20 +182,35 @@ fn decode_manifest_instance(der: &[u8]) -> Result<ManifestInstance, CcrDecodeErr
let mut reader = DerReader::new(subordinate_der);
let mut out = Vec::new();
while !reader.is_empty() {
out.push(reader.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec());
out.push(
reader
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec(),
);
}
out
} else {
Vec::new()
};
Ok(ManifestInstance { hash, size, aki, manifest_number, this_update, locations, subordinates })
Ok(ManifestInstance {
hash,
size,
aki,
manifest_number,
this_update,
locations,
subordinates,
})
}
fn decode_roa_payload_state(explicit_der: &[u8]) -> Result<RoaPayloadState, CcrDecodeError> {
let mut outer = DerReader::new(explicit_der);
let mut seq = outer.take_sequence().map_err(CcrDecodeError::Parse)?;
if !outer.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after ROAPayloadState".into()));
return Err(CcrDecodeError::Parse(
"trailing bytes after ROAPayloadState".into(),
));
}
let payload_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
let mut reader = DerReader::new(payload_der);
@ -172,7 +219,10 @@ fn decode_roa_payload_state(explicit_der: &[u8]) -> Result<RoaPayloadState, CcrD
let (_tag, full, _value) = reader.take_any_full().map_err(CcrDecodeError::Parse)?;
rps.push(decode_roa_payload_set(full)?);
}
let hash = seq.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec();
let hash = seq
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec();
Ok(RoaPayloadState { rps, hash })
}
@ -180,7 +230,9 @@ fn decode_roa_payload_set(der: &[u8]) -> Result<RoaPayloadSet, CcrDecodeError> {
let mut top = DerReader::new(der);
let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?;
if !top.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after ROAPayloadSet".into()));
return Err(CcrDecodeError::Parse(
"trailing bytes after ROAPayloadSet".into(),
));
}
let as_id = seq.take_uint_u64().map_err(CcrDecodeError::Parse)? as u32;
let blocks_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
@ -190,14 +242,19 @@ fn decode_roa_payload_set(der: &[u8]) -> Result<RoaPayloadSet, CcrDecodeError> {
let (_tag, full, _value) = reader.take_any_full().map_err(CcrDecodeError::Parse)?;
ip_addr_blocks.push(full.to_vec());
}
Ok(RoaPayloadSet { as_id, ip_addr_blocks })
Ok(RoaPayloadSet {
as_id,
ip_addr_blocks,
})
}
fn decode_aspa_payload_state(explicit_der: &[u8]) -> Result<AspaPayloadState, CcrDecodeError> {
let mut outer = DerReader::new(explicit_der);
let mut seq = outer.take_sequence().map_err(CcrDecodeError::Parse)?;
if !outer.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after ASPAPayloadState".into()));
return Err(CcrDecodeError::Parse(
"trailing bytes after ASPAPayloadState".into(),
));
}
let payload_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
let mut reader = DerReader::new(payload_der);
@ -206,7 +263,10 @@ fn decode_aspa_payload_state(explicit_der: &[u8]) -> Result<AspaPayloadState, Cc
let (_tag, full, _value) = reader.take_any_full().map_err(CcrDecodeError::Parse)?;
aps.push(decode_aspa_payload_set(full)?);
}
let hash = seq.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec();
let hash = seq
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec();
Ok(AspaPayloadState { aps, hash })
}
@ -214,7 +274,9 @@ fn decode_aspa_payload_set(der: &[u8]) -> Result<AspaPayloadSet, CcrDecodeError>
let mut top = DerReader::new(der);
let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?;
if !top.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after ASPAPayloadSet".into()));
return Err(CcrDecodeError::Parse(
"trailing bytes after ASPAPayloadSet".into(),
));
}
let customer_as_id = seq.take_uint_u64().map_err(CcrDecodeError::Parse)? as u32;
let providers_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
@ -223,22 +285,35 @@ fn decode_aspa_payload_set(der: &[u8]) -> Result<AspaPayloadSet, CcrDecodeError>
while !reader.is_empty() {
providers.push(reader.take_uint_u64().map_err(CcrDecodeError::Parse)? as u32);
}
Ok(AspaPayloadSet { customer_as_id, providers })
Ok(AspaPayloadSet {
customer_as_id,
providers,
})
}
fn decode_trust_anchor_state(explicit_der: &[u8]) -> Result<TrustAnchorState, CcrDecodeError> {
let mut outer = DerReader::new(explicit_der);
let mut seq = outer.take_sequence().map_err(CcrDecodeError::Parse)?;
if !outer.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after TrustAnchorState".into()));
return Err(CcrDecodeError::Parse(
"trailing bytes after TrustAnchorState".into(),
));
}
let skis_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
let mut reader = DerReader::new(skis_der);
let mut skis = Vec::new();
while !reader.is_empty() {
skis.push(reader.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec());
skis.push(
reader
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec(),
);
}
let hash = seq.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec();
let hash = seq
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec();
Ok(TrustAnchorState { skis, hash })
}
@ -246,7 +321,9 @@ fn decode_router_key_state(explicit_der: &[u8]) -> Result<RouterKeyState, CcrDec
let mut outer = DerReader::new(explicit_der);
let mut seq = outer.take_sequence().map_err(CcrDecodeError::Parse)?;
if !outer.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after RouterKeyState".into()));
return Err(CcrDecodeError::Parse(
"trailing bytes after RouterKeyState".into(),
));
}
let sets_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
let mut reader = DerReader::new(sets_der);
@ -255,7 +332,10 @@ fn decode_router_key_state(explicit_der: &[u8]) -> Result<RouterKeyState, CcrDec
let (_tag, full, _value) = reader.take_any_full().map_err(CcrDecodeError::Parse)?;
rksets.push(decode_router_key_set(full)?);
}
let hash = seq.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec();
let hash = seq
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec();
Ok(RouterKeyState { rksets, hash })
}
@ -263,7 +343,9 @@ fn decode_router_key_set(der: &[u8]) -> Result<RouterKeySet, CcrDecodeError> {
let mut top = DerReader::new(der);
let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?;
if !top.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after RouterKeySet".into()));
return Err(CcrDecodeError::Parse(
"trailing bytes after RouterKeySet".into(),
));
}
let as_id = seq.take_uint_u64().map_err(CcrDecodeError::Parse)? as u32;
let keys_der = seq.take_tag(0x30).map_err(CcrDecodeError::Parse)?;
@ -280,14 +362,22 @@ fn decode_router_key(der: &[u8]) -> Result<RouterKey, CcrDecodeError> {
let mut top = DerReader::new(der);
let mut seq = top.take_sequence().map_err(CcrDecodeError::Parse)?;
if !top.is_empty() {
return Err(CcrDecodeError::Parse("trailing bytes after RouterKey".into()));
return Err(CcrDecodeError::Parse(
"trailing bytes after RouterKey".into(),
));
}
let ski = seq.take_octet_string().map_err(CcrDecodeError::Parse)?.to_vec();
let ski = seq
.take_octet_string()
.map_err(CcrDecodeError::Parse)?
.to_vec();
let (_tag, full, _value) = seq.take_any_full().map_err(CcrDecodeError::Parse)?;
if !seq.is_empty() {
return Err(CcrDecodeError::Parse("trailing fields in RouterKey".into()));
}
Ok(RouterKey { ski, spki_der: full.to_vec() })
Ok(RouterKey {
ski,
spki_der: full.to_vec(),
})
}
fn decode_digest_algorithm(mut seq: DerReader<'_>) -> Result<CcrDigestAlgorithm, CcrDecodeError> {
@ -354,8 +444,7 @@ fn parse_generalized_time(bytes: &[u8]) -> Result<time::OffsetDateTime, CcrDecod
let hour = parse(8..10)? as u8;
let minute = parse(10..12)? as u8;
let second = parse(12..14)? as u8;
let month = time::Month::try_from(month)
.map_err(|e| CcrDecodeError::Parse(e.to_string()))?;
let month = time::Month::try_from(month).map_err(|e| CcrDecodeError::Parse(e.to_string()))?;
let date = time::Date::from_calendar_date(year, month, day)
.map_err(|e| CcrDecodeError::Parse(e.to_string()))?;
let timev = time::Time::from_hms(hour, minute, second)
@ -371,7 +460,9 @@ fn decode_big_unsigned(bytes: &[u8]) -> Result<BigUnsigned, CcrDecodeError> {
return Err(CcrDecodeError::Parse("INTEGER must be non-negative".into()));
}
if bytes.len() > 1 && bytes[0] == 0x00 && (bytes[1] & 0x80) == 0 {
return Err(CcrDecodeError::Parse("INTEGER not minimally encoded".into()));
return Err(CcrDecodeError::Parse(
"INTEGER not minimally encoded".into(),
));
}
let bytes_be = if bytes.len() > 1 && bytes[0] == 0x00 {
bytes[1..].to_vec()

View File

@ -34,34 +34,58 @@ pub fn dump_content_info_json(
})
}).unwrap_or_else(|| json!({"present": false}));
let vrps_total = content_info.content.vrps.as_ref().map(|state| {
state.rps.iter().map(|set| set.ip_addr_blocks.len()).sum::<usize>()
}).unwrap_or(0);
let vrps = content_info.content.vrps.as_ref().map(|state| {
json!({
"present": true,
"payload_sets": state.rps.len(),
"hash_hex": hex::encode(&state.hash),
"ip_addr_block_count": vrps_total,
let vrps_total = content_info
.content
.vrps
.as_ref()
.map(|state| {
state
.rps
.iter()
.map(|set| set.ip_addr_blocks.len())
.sum::<usize>()
})
}).unwrap_or_else(|| json!({"present": false}));
.unwrap_or(0);
let vrps = content_info
.content
.vrps
.as_ref()
.map(|state| {
json!({
"present": true,
"payload_sets": state.rps.len(),
"hash_hex": hex::encode(&state.hash),
"ip_addr_block_count": vrps_total,
})
})
.unwrap_or_else(|| json!({"present": false}));
let vaps = content_info.content.vaps.as_ref().map(|state| {
json!({
"present": true,
"payload_sets": state.aps.len(),
"hash_hex": hex::encode(&state.hash),
"provider_count": state.aps.iter().map(|set| set.providers.len()).sum::<usize>(),
let vaps = content_info
.content
.vaps
.as_ref()
.map(|state| {
json!({
"present": true,
"payload_sets": state.aps.len(),
"hash_hex": hex::encode(&state.hash),
"provider_count": state.aps.iter().map(|set| set.providers.len()).sum::<usize>(),
})
})
}).unwrap_or_else(|| json!({"present": false}));
.unwrap_or_else(|| json!({"present": false}));
let tas = content_info.content.tas.as_ref().map(|state| {
json!({
"present": true,
"ski_count": state.skis.len(),
"hash_hex": hex::encode(&state.hash),
let tas = content_info
.content
.tas
.as_ref()
.map(|state| {
json!({
"present": true,
"ski_count": state.skis.len(),
"hash_hex": hex::encode(&state.hash),
})
})
}).unwrap_or_else(|| json!({"present": false}));
.unwrap_or_else(|| json!({"present": false}));
let rks = content_info.content.rks.as_ref().map(|state| {
json!({

View File

@ -24,9 +24,7 @@ pub fn encode_content_info(content_info: &CcrContentInfo) -> Result<Vec<u8>, Ccr
]))
}
pub fn encode_ccr(
ccr: &RpkiCanonicalCacheRepresentation,
) -> Result<Vec<u8>, CcrEncodeError> {
pub fn encode_ccr(ccr: &RpkiCanonicalCacheRepresentation) -> Result<Vec<u8>, CcrEncodeError> {
ccr.validate().map_err(CcrEncodeError::Validate)?;
let mut fields = Vec::new();
if ccr.version != CCR_VERSION_V0 {
@ -156,11 +154,12 @@ pub fn encode_trust_anchor_state(state: &TrustAnchorState) -> Result<Vec<u8>, Cc
Ok(encode_sequence(&[skis, encode_octet_string(&state.hash)]))
}
pub fn encode_trust_anchor_state_payload_der(
skis: &[Vec<u8>],
) -> Result<Vec<u8>, CcrEncodeError> {
pub fn encode_trust_anchor_state_payload_der(skis: &[Vec<u8>]) -> Result<Vec<u8>, CcrEncodeError> {
Ok(encode_sequence(
&skis.iter().map(|ski| encode_octet_string(ski)).collect::<Vec<_>>(),
&skis
.iter()
.map(|ski| encode_octet_string(ski))
.collect::<Vec<_>>(),
))
}

View File

@ -54,7 +54,6 @@ pub fn build_ccr_from_run(
})
}
fn build_router_key_state_from_runtime(
router_keys: &[RouterKeyPayload],
) -> Result<crate::ccr::model::RouterKeyState, CcrBuildError> {
@ -109,17 +108,22 @@ pub fn write_ccr_file(
mod tests {
use super::*;
use crate::ccr::decode::decode_content_info;
use crate::data_model::ta::TrustAnchor;
use crate::data_model::tal::Tal;
use crate::storage::{RawByHashEntry, RocksStore, VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary, VcirChildEntry, VcirInstanceGate, VcirRelatedArtifact, VcirSummary, ValidatedCaInstanceResult, ValidatedManifestMeta, PackTime};
use crate::validation::objects::{AspaAttestation, RouterKeyPayload, Vrp};
use crate::data_model::manifest::ManifestObject;
use crate::data_model::roa::{IpPrefix, RoaAfi};
use crate::data_model::ta::TrustAnchor;
use crate::data_model::tal::Tal;
use crate::storage::{
PackTime, RawByHashEntry, RocksStore, ValidatedCaInstanceResult, ValidatedManifestMeta,
VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary,
VcirChildEntry, VcirInstanceGate, VcirRelatedArtifact, VcirSummary,
};
use crate::validation::objects::{AspaAttestation, RouterKeyPayload, Vrp};
use sha2::Digest;
fn sample_trust_anchor() -> TrustAnchor {
let base = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let tal_bytes = std::fs::read(base.join("tests/fixtures/tal/apnic-rfc7730-https.tal")).expect("read tal");
let tal_bytes = std::fs::read(base.join("tests/fixtures/tal/apnic-rfc7730-https.tal"))
.expect("read tal");
let ta_der = std::fs::read(base.join("tests/fixtures/ta/apnic-ta.cer")).expect("read ta");
let tal = Tal::decode_bytes(&tal_bytes).expect("decode tal");
TrustAnchor::bind_der(tal, &ta_der, None).expect("bind ta")
@ -131,7 +135,8 @@ mod tests {
let manifest = ManifestObject::decode_der(&manifest_der).expect("decode manifest");
let hash = hex::encode(sha2::Sha256::digest(&manifest_der));
let mut raw = RawByHashEntry::from_bytes(hash.clone(), manifest_der.clone());
raw.origin_uris.push("rsync://example.test/repo/current.mft".to_string());
raw.origin_uris
.push("rsync://example.test/repo/current.mft".to_string());
raw.object_type = Some("mft".to_string());
raw.encoding = Some("der".to_string());
store.put_raw_by_hash_entry(&raw).expect("put raw");
@ -142,19 +147,33 @@ mod tests {
ca_subject_name: "CN=test".to_string(),
ca_ski: "11".repeat(20),
issuer_ski: "22".repeat(20),
last_successful_validation_time: PackTime::from_utc_offset_datetime(manifest.manifest.this_update),
last_successful_validation_time: PackTime::from_utc_offset_datetime(
manifest.manifest.this_update,
),
current_manifest_rsync_uri: "rsync://example.test/repo/current.mft".to_string(),
current_crl_rsync_uri: "rsync://example.test/repo/current.crl".to_string(),
validated_manifest_meta: ValidatedManifestMeta {
validated_manifest_number: manifest.manifest.manifest_number.bytes_be.clone(),
validated_manifest_this_update: PackTime::from_utc_offset_datetime(manifest.manifest.this_update),
validated_manifest_next_update: PackTime::from_utc_offset_datetime(manifest.manifest.next_update),
validated_manifest_this_update: PackTime::from_utc_offset_datetime(
manifest.manifest.this_update,
),
validated_manifest_next_update: PackTime::from_utc_offset_datetime(
manifest.manifest.next_update,
),
},
instance_gate: VcirInstanceGate {
manifest_next_update: PackTime::from_utc_offset_datetime(manifest.manifest.next_update),
current_crl_next_update: PackTime::from_utc_offset_datetime(manifest.manifest.next_update),
self_ca_not_after: PackTime::from_utc_offset_datetime(manifest.manifest.next_update),
instance_effective_until: PackTime::from_utc_offset_datetime(manifest.manifest.next_update),
manifest_next_update: PackTime::from_utc_offset_datetime(
manifest.manifest.next_update,
),
current_crl_next_update: PackTime::from_utc_offset_datetime(
manifest.manifest.next_update,
),
self_ca_not_after: PackTime::from_utc_offset_datetime(
manifest.manifest.next_update,
),
instance_effective_until: PackTime::from_utc_offset_datetime(
manifest.manifest.next_update,
),
},
child_entries: vec![VcirChildEntry {
child_manifest_rsync_uri: "rsync://example.test/repo/child.mft".to_string(),
@ -166,7 +185,9 @@ mod tests {
child_rrdp_notification_uri: None,
child_effective_ip_resources: None,
child_effective_as_resources: None,
accepted_at_validation_time: PackTime::from_utc_offset_datetime(manifest.manifest.this_update),
accepted_at_validation_time: PackTime::from_utc_offset_datetime(
manifest.manifest.this_update,
),
}],
local_outputs: Vec::new(),
related_artifacts: vec![VcirRelatedArtifact {
@ -204,10 +225,17 @@ mod tests {
let trust_anchor = sample_trust_anchor();
let vrps = vec![Vrp {
asn: 64496,
prefix: IpPrefix { afi: RoaAfi::Ipv4, prefix_len: 8, addr: [10,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] },
prefix: IpPrefix {
afi: RoaAfi::Ipv4,
prefix_len: 8,
addr: [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
},
max_length: 8,
}];
let aspas = vec![AspaAttestation { customer_as_id: 64496, provider_as_ids: vec![64497] }];
let aspas = vec![AspaAttestation {
customer_as_id: 64496,
provider_as_ids: vec![64497],
}];
let router_keys = vec![RouterKeyPayload {
as_id: 64496,
ski: vec![0x11; 20],
@ -215,9 +243,19 @@ mod tests {
source_object_uri: "rsync://example.test/repo/router.cer".to_string(),
source_object_hash: hex::encode([0x11; 32]),
source_ee_cert_hash: hex::encode([0x11; 32]),
item_effective_until: PackTime::from_utc_offset_datetime(time::OffsetDateTime::now_utc() + time::Duration::hours(1)),
item_effective_until: PackTime::from_utc_offset_datetime(
time::OffsetDateTime::now_utc() + time::Duration::hours(1),
),
}];
let ccr = build_ccr_from_run(&store, &[trust_anchor], &vrps, &aspas, &router_keys, time::OffsetDateTime::now_utc()).expect("build ccr");
let ccr = build_ccr_from_run(
&store,
&[trust_anchor],
&vrps,
&aspas,
&router_keys,
time::OffsetDateTime::now_utc(),
)
.expect("build ccr");
assert!(ccr.mfts.is_some());
assert!(ccr.vrps.is_some());
assert!(ccr.vaps.is_some());

View File

@ -1,12 +1,12 @@
pub mod decode;
pub mod encode;
pub mod dump;
pub mod hash;
pub mod model;
#[cfg(feature = "full")]
pub mod build;
pub mod decode;
pub mod dump;
pub mod encode;
#[cfg(feature = "full")]
pub mod export;
pub mod hash;
pub mod model;
#[cfg(feature = "full")]
pub mod verify;
@ -16,16 +16,19 @@ pub use build::{
build_roa_payload_state, build_trust_anchor_state,
};
pub use decode::{CcrDecodeError, decode_content_info};
pub use dump::{CcrDumpError, dump_content_info_json, dump_content_info_json_value};
pub use encode::{CcrEncodeError, encode_content_info};
#[cfg(feature = "full")]
pub use export::{CcrExportError, build_ccr_from_run, write_ccr_file};
pub use dump::{CcrDumpError, dump_content_info_json, dump_content_info_json_value};
#[cfg(feature = "full")]
pub use verify::{CcrVerifyError, CcrVerifySummary, extract_vrp_rows, verify_against_report_json_path, verify_against_vcir_store, verify_against_vcir_store_path, verify_content_info, verify_content_info_bytes};
pub use hash::{compute_state_hash, verify_state_hash};
pub use model::{
AspaPayloadSet, AspaPayloadState, CcrContentInfo, CcrDigestAlgorithm,
ManifestInstance, ManifestState, RoaPayloadSet, RoaPayloadState,
RouterKey, RouterKeySet, RouterKeyState, RpkiCanonicalCacheRepresentation,
TrustAnchorState,
AspaPayloadSet, AspaPayloadState, CcrContentInfo, CcrDigestAlgorithm, ManifestInstance,
ManifestState, RoaPayloadSet, RoaPayloadState, RouterKey, RouterKeySet, RouterKeyState,
RpkiCanonicalCacheRepresentation, TrustAnchorState,
};
#[cfg(feature = "full")]
pub use verify::{
CcrVerifyError, CcrVerifySummary, extract_vrp_rows, verify_against_report_json_path,
verify_against_vcir_store, verify_against_vcir_store_path, verify_content_info,
verify_content_info_bytes,
};

View File

@ -133,16 +133,17 @@ impl ManifestInstance {
));
}
validate_key_identifier("ManifestInstance.aki", &self.aki)?;
validate_big_unsigned_bytes("ManifestInstance.manifest_number", &self.manifest_number.bytes_be)?;
validate_big_unsigned_bytes(
"ManifestInstance.manifest_number",
&self.manifest_number.bytes_be,
)?;
if self.locations.is_empty() {
return Err("ManifestInstance.locations must contain at least one AccessDescription".into());
return Err(
"ManifestInstance.locations must contain at least one AccessDescription".into(),
);
}
for location in &self.locations {
validate_full_der_with_tag(
"ManifestInstance.locations[]",
location,
Some(0x30),
)?;
validate_full_der_with_tag("ManifestInstance.locations[]", location, Some(0x30))?;
}
if !self.subordinates.is_empty() {
validate_sorted_unique_bytes(
@ -339,7 +340,9 @@ fn validate_big_unsigned_bytes(field: &str, bytes: &[u8]) -> Result<(), String>
return Err(format!("{field} must not be empty"));
}
if bytes.len() > 1 && bytes[0] == 0x00 {
return Err(format!("{field} must be minimally encoded as an unsigned integer"));
return Err(format!(
"{field} must be minimally encoded as an unsigned integer"
));
}
Ok(())
}

View File

@ -82,7 +82,9 @@ pub fn verify_content_info_bytes(der: &[u8]) -> Result<CcrVerifySummary, CcrVeri
verify_content_info(&content_info)
}
pub fn verify_content_info(content_info: &CcrContentInfo) -> Result<CcrVerifySummary, CcrVerifyError> {
pub fn verify_content_info(
content_info: &CcrContentInfo,
) -> Result<CcrVerifySummary, CcrVerifyError> {
content_info.validate().map_err(CcrDecodeError::Validate)?;
let state_hashes_ok = true;
let mut manifest_instances = 0usize;
@ -108,7 +110,11 @@ pub fn verify_content_info(content_info: &CcrContentInfo) -> Result<CcrVerifySum
return Err(CcrVerifyError::RoaHashMismatch);
}
roa_payload_sets = vrps.rps.len();
roa_vrp_count = vrps.rps.iter().map(|set| count_roa_block_entries(&set.ip_addr_blocks)).sum();
roa_vrp_count = vrps
.rps
.iter()
.map(|set| count_roa_block_entries(&set.ip_addr_blocks))
.sum();
}
if let Some(vaps) = &content_info.content.vaps {
let payload_der = encode_aspa_payload_state_payload_der(&vaps.aps)
@ -154,10 +160,11 @@ pub fn verify_against_report_json_path(
content_info: &CcrContentInfo,
report_json_path: &Path,
) -> Result<(), CcrVerifyError> {
let bytes = std::fs::read(report_json_path)
.map_err(|e| CcrVerifyError::ReportRead(report_json_path.display().to_string(), e.to_string()))?;
let json: serde_json::Value = serde_json::from_slice(&bytes)
.map_err(|e| CcrVerifyError::ReportParse(e.to_string()))?;
let bytes = std::fs::read(report_json_path).map_err(|e| {
CcrVerifyError::ReportRead(report_json_path.display().to_string(), e.to_string())
})?;
let json: serde_json::Value =
serde_json::from_slice(&bytes).map_err(|e| CcrVerifyError::ReportParse(e.to_string()))?;
let report_vrps = report_vrp_keys(&json)?;
let ccr_vrps = extract_vrp_rows(content_info)?;
@ -198,7 +205,9 @@ pub fn verify_against_vcir_store(
let Some(mfts) = &content_info.content.mfts else {
return Ok(());
};
let vcirs = store.list_vcirs().map_err(|e| CcrVerifyError::ListVcirs(e.to_string()))?;
let vcirs = store
.list_vcirs()
.map_err(|e| CcrVerifyError::ListVcirs(e.to_string()))?;
let mut vcir_hashes = BTreeSet::new();
for vcir in vcirs {
if let Some(artifact) = vcir.related_artifacts.iter().find(|artifact| {
@ -244,7 +253,9 @@ fn verify_router_key_state_hash(state: &RouterKeyState) -> Result<(), CcrVerifyE
Ok(())
}
fn report_vrp_keys(json: &serde_json::Value) -> Result<BTreeSet<(u32, String, u16)>, CcrVerifyError> {
fn report_vrp_keys(
json: &serde_json::Value,
) -> Result<BTreeSet<(u32, String, u16)>, CcrVerifyError> {
let mut out = BTreeSet::new();
let Some(items) = json.get("vrps").and_then(|v| v.as_array()) else {
return Ok(out);
@ -253,7 +264,8 @@ fn report_vrp_keys(json: &serde_json::Value) -> Result<BTreeSet<(u32, String, u1
let asn = item
.get("asn")
.and_then(|v| v.as_u64())
.ok_or_else(|| CcrVerifyError::ReportParse("vrps[].asn missing".into()))? as u32;
.ok_or_else(|| CcrVerifyError::ReportParse("vrps[].asn missing".into()))?
as u32;
let prefix = item
.get("prefix")
.and_then(|v| v.as_str())
@ -285,7 +297,11 @@ fn report_aspa_keys(json: &serde_json::Value) -> Result<BTreeSet<(u32, Vec<u32>)
.and_then(|v| v.as_array())
.ok_or_else(|| CcrVerifyError::ReportParse("aspas[].provider_as_ids missing".into()))?
.iter()
.map(|v| v.as_u64().ok_or_else(|| CcrVerifyError::ReportParse("provider_as_ids[] invalid".into())).map(|v| v as u32))
.map(|v| {
v.as_u64()
.ok_or_else(|| CcrVerifyError::ReportParse("provider_as_ids[] invalid".into()))
.map(|v| v as u32)
})
.collect::<Result<Vec<_>, _>>()?;
providers.sort_unstable();
providers.dedup();
@ -294,7 +310,9 @@ fn report_aspa_keys(json: &serde_json::Value) -> Result<BTreeSet<(u32, Vec<u32>)
Ok(out)
}
pub fn extract_vrp_rows(content_info: &CcrContentInfo) -> Result<BTreeSet<(u32, String, u16)>, CcrVerifyError> {
pub fn extract_vrp_rows(
content_info: &CcrContentInfo,
) -> Result<BTreeSet<(u32, String, u16)>, CcrVerifyError> {
let mut out = BTreeSet::new();
let Some(vrps) = &content_info.content.vrps else {
return Ok(out);
@ -311,7 +329,9 @@ pub fn extract_vrp_rows(content_info: &CcrContentInfo) -> Result<BTreeSet<(u32,
Ok(out)
}
fn ccr_aspa_keys(content_info: &CcrContentInfo) -> Result<BTreeSet<(u32, Vec<u32>)>, CcrVerifyError> {
fn ccr_aspa_keys(
content_info: &CcrContentInfo,
) -> Result<BTreeSet<(u32, Vec<u32>)>, CcrVerifyError> {
let mut out = BTreeSet::new();
let Some(vaps) = &content_info.content.vaps else {
return Ok(out);
@ -322,24 +342,43 @@ fn ccr_aspa_keys(content_info: &CcrContentInfo) -> Result<BTreeSet<(u32, Vec<u32
Ok(out)
}
fn decode_roa_family_block(block: &[u8]) -> Result<(u16, Vec<(u8, Vec<u8>, Option<u16>)>), CcrVerifyError> {
fn decode_roa_family_block(
block: &[u8],
) -> Result<(u16, Vec<(u8, Vec<u8>, Option<u16>)>), CcrVerifyError> {
let mut top = crate::data_model::common::DerReader::new(block);
let mut seq = top.take_sequence().map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?;
let mut seq = top
.take_sequence()
.map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?;
if !top.is_empty() {
return Err(CcrVerifyError::Decode(CcrDecodeError::Parse("trailing bytes after ROAIPAddressFamily".into())));
return Err(CcrVerifyError::Decode(CcrDecodeError::Parse(
"trailing bytes after ROAIPAddressFamily".into(),
)));
}
let afi_bytes = seq.take_octet_string().map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?;
let afi_bytes = seq
.take_octet_string()
.map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?;
let afi = u16::from_be_bytes([afi_bytes[0], afi_bytes[1]]);
let mut addrs = seq.take_sequence().map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?;
let mut addrs = seq
.take_sequence()
.map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?;
let mut entries = Vec::new();
while !addrs.is_empty() {
let mut addr_seq = addrs.take_sequence().map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?;
let (unused_bits, content) = addr_seq.take_bit_string().map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?;
let mut addr_seq = addrs
.take_sequence()
.map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?;
let (unused_bits, content) = addr_seq
.take_bit_string()
.map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?;
let prefix_len = (content.len() * 8) as u8 - unused_bits;
let max_len = if addr_seq.is_empty() {
None
} else {
Some(addr_seq.take_uint_u64().map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))? as u16)
Some(
addr_seq
.take_uint_u64()
.map_err(|e| CcrVerifyError::Decode(CcrDecodeError::Parse(e)))?
as u16,
)
};
entries.push((prefix_len, content.to_vec(), max_len));
}
@ -358,37 +397,43 @@ fn format_prefix(afi: u16, addr_bytes: &[u8], prefix_len: u8) -> Result<String,
full[..addr_bytes.len()].copy_from_slice(addr_bytes);
Ok(format!("{}/{prefix_len}", std::net::Ipv6Addr::from(full)))
}
other => Err(CcrVerifyError::Decode(CcrDecodeError::Parse(format!("unsupported AFI {other}")))),
other => Err(CcrVerifyError::Decode(CcrDecodeError::Parse(format!(
"unsupported AFI {other}"
)))),
}
}
fn count_roa_block_entries(blocks: &[Vec<u8>]) -> usize {
blocks
.iter()
.map(|block| decode_roa_family_block(block).map(|(_, entries)| entries.len()).unwrap_or(0))
.map(|block| {
decode_roa_family_block(block)
.map(|(_, entries)| entries.len())
.unwrap_or(0)
})
.sum()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ccr::build::{build_aspa_payload_state, build_roa_payload_state};
use crate::ccr::encode::{
encode_manifest_state_payload_der, encode_roa_payload_state_payload_der,
encode_router_key_state_payload_der, encode_trust_anchor_state_payload_der,
};
use crate::ccr::build::{build_aspa_payload_state, build_roa_payload_state};
use crate::ccr::model::{
CcrDigestAlgorithm, ManifestInstance, ManifestState,
RouterKey, RouterKeySet, RpkiCanonicalCacheRepresentation,
CcrDigestAlgorithm, ManifestInstance, ManifestState, RouterKey, RouterKeySet,
RpkiCanonicalCacheRepresentation,
};
use crate::data_model::roa::{IpPrefix, RoaAfi};
use crate::validation::objects::{AspaAttestation, Vrp};
use crate::data_model::common::BigUnsigned;
use crate::data_model::roa::{IpPrefix, RoaAfi};
use crate::storage::{
PackTime, ValidatedCaInstanceResult, ValidatedManifestMeta, VcirArtifactKind,
VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary, VcirChildEntry,
VcirInstanceGate, VcirRelatedArtifact, VcirSummary,
};
use crate::validation::objects::{AspaAttestation, Vrp};
fn sample_time() -> time::OffsetDateTime {
time::OffsetDateTime::parse(
@ -415,21 +460,37 @@ mod tests {
};
let vrps = build_roa_payload_state(&[Vrp {
asn: 64496,
prefix: IpPrefix { afi: RoaAfi::Ipv4, prefix_len: 0, addr: [0; 16] },
prefix: IpPrefix {
afi: RoaAfi::Ipv4,
prefix_len: 0,
addr: [0; 16],
},
max_length: 0,
}]).expect("build roa state");
}])
.expect("build roa state");
let vaps = build_aspa_payload_state(&[AspaAttestation {
customer_as_id: 64496,
provider_as_ids: vec![64497],
}]).expect("build aspa state");
}])
.expect("build aspa state");
let skis = vec![vec![0x11; 20]];
let tas = TrustAnchorState {
hash: crate::ccr::compute_state_hash(&encode_trust_anchor_state_payload_der(&skis).unwrap()),
hash: crate::ccr::compute_state_hash(
&encode_trust_anchor_state_payload_der(&skis).unwrap(),
),
skis,
};
let rksets = vec![RouterKeySet { as_id: 64496, router_keys: vec![RouterKey { ski: vec![0x22;20], spki_der: vec![0x30,0x00] }] }];
let rksets = vec![RouterKeySet {
as_id: 64496,
router_keys: vec![RouterKey {
ski: vec![0x22; 20],
spki_der: vec![0x30, 0x00],
}],
}];
let rks = RouterKeyState {
hash: crate::ccr::compute_state_hash(&encode_router_key_state_payload_der(&rksets).unwrap()),
hash: crate::ccr::compute_state_hash(
&encode_router_key_state_payload_der(&rksets).unwrap(),
),
rksets,
};
CcrContentInfo::new(RpkiCanonicalCacheRepresentation {
@ -448,19 +509,31 @@ mod tests {
fn verify_detects_each_state_hash_mismatch() {
let mut ci = sample_content_info();
ci.content.vrps.as_mut().unwrap().hash[0] ^= 0x01;
assert!(matches!(verify_content_info(&ci), Err(CcrVerifyError::RoaHashMismatch)));
assert!(matches!(
verify_content_info(&ci),
Err(CcrVerifyError::RoaHashMismatch)
));
let mut ci = sample_content_info();
ci.content.vaps.as_mut().unwrap().hash[0] ^= 0x01;
assert!(matches!(verify_content_info(&ci), Err(CcrVerifyError::AspaHashMismatch)));
assert!(matches!(
verify_content_info(&ci),
Err(CcrVerifyError::AspaHashMismatch)
));
let mut ci = sample_content_info();
ci.content.tas.as_mut().unwrap().hash[0] ^= 0x01;
assert!(matches!(verify_content_info(&ci), Err(CcrVerifyError::TrustAnchorHashMismatch)));
assert!(matches!(
verify_content_info(&ci),
Err(CcrVerifyError::TrustAnchorHashMismatch)
));
let mut ci = sample_content_info();
ci.content.rks.as_mut().unwrap().hash[0] ^= 0x01;
assert!(matches!(verify_content_info(&ci), Err(CcrVerifyError::RouterKeyHashMismatch)));
assert!(matches!(
verify_content_info(&ci),
Err(CcrVerifyError::RouterKeyHashMismatch)
));
}
#[test]
@ -472,20 +545,28 @@ mod tests {
});
let report_path = td.path().join("report.json");
std::fs::write(&report_path, serde_json::to_vec(&report).unwrap()).unwrap();
verify_against_report_json_path(&sample_content_info(), &report_path).expect("matching report");
verify_against_report_json_path(&sample_content_info(), &report_path)
.expect("matching report");
let bad_path = td.path().join("bad.json");
std::fs::write(&bad_path, b"not-json").unwrap();
assert!(matches!(verify_against_report_json_path(&sample_content_info(), &bad_path), Err(CcrVerifyError::ReportParse(_))));
assert!(matches!(
verify_against_report_json_path(&sample_content_info(), &bad_path),
Err(CcrVerifyError::ReportParse(_))
));
}
#[test]
fn verify_against_report_json_rejects_missing_fields_and_aspa_mismatch() {
let td = tempfile::tempdir().expect("tempdir");
let missing = serde_json::json!({"vrps":[{"prefix":"0.0.0.0/0","max_length":0}],"aspas":[]});
let missing =
serde_json::json!({"vrps":[{"prefix":"0.0.0.0/0","max_length":0}],"aspas":[]});
let missing_path = td.path().join("missing.json");
std::fs::write(&missing_path, serde_json::to_vec(&missing).unwrap()).unwrap();
assert!(matches!(verify_against_report_json_path(&sample_content_info(), &missing_path), Err(CcrVerifyError::ReportParse(_))));
assert!(matches!(
verify_against_report_json_path(&sample_content_info(), &missing_path),
Err(CcrVerifyError::ReportParse(_))
));
let mismatch = serde_json::json!({
"vrps": [{"asn": 64496, "prefix": "0.0.0.0/0", "max_length": 0}],
@ -493,7 +574,10 @@ mod tests {
});
let mismatch_path = td.path().join("mismatch.json");
std::fs::write(&mismatch_path, serde_json::to_vec(&mismatch).unwrap()).unwrap();
assert!(matches!(verify_against_report_json_path(&sample_content_info(), &mismatch_path), Err(CcrVerifyError::ReportAspaMismatch { .. })));
assert!(matches!(
verify_against_report_json_path(&sample_content_info(), &mismatch_path),
Err(CcrVerifyError::ReportAspaMismatch { .. })
));
}
#[test]
@ -542,11 +626,26 @@ mod tests {
object_type: Some("mft".to_string()),
validation_status: VcirArtifactValidationStatus::Accepted,
}],
summary: VcirSummary { local_vrp_count: 0, local_aspa_count: 0, local_router_key_count: 0, child_count: 1, accepted_object_count: 1, rejected_object_count: 0 },
audit_summary: VcirAuditSummary { failed_fetch_eligible: true, last_failed_fetch_reason: None, warning_count: 0, audit_flags: Vec::new() },
summary: VcirSummary {
local_vrp_count: 0,
local_aspa_count: 0,
local_router_key_count: 0,
child_count: 1,
accepted_object_count: 1,
rejected_object_count: 0,
},
audit_summary: VcirAuditSummary {
failed_fetch_eligible: true,
last_failed_fetch_reason: None,
warning_count: 0,
audit_flags: Vec::new(),
},
};
store.put_vcir(&vcir).unwrap();
assert!(matches!(verify_against_vcir_store(&sample_content_info(), &store), Err(CcrVerifyError::VcirManifestMismatch { .. })));
assert!(matches!(
verify_against_vcir_store(&sample_content_info(), &store),
Err(CcrVerifyError::VcirManifestMismatch { .. })
));
}
#[test]
@ -557,12 +656,27 @@ mod tests {
hash_alg: CcrDigestAlgorithm::Sha256,
produced_at: sample_time(),
mfts: None,
vrps: Some(crate::ccr::model::RoaPayloadState { rps: vec![crate::ccr::model::RoaPayloadSet { as_id: 64496, ip_addr_blocks: vec![block.clone()] }], hash: crate::ccr::compute_state_hash(&encode_roa_payload_state_payload_der(&[crate::ccr::model::RoaPayloadSet { as_id: 64496, ip_addr_blocks: vec![block] }]).unwrap()) }),
vrps: Some(crate::ccr::model::RoaPayloadState {
rps: vec![crate::ccr::model::RoaPayloadSet {
as_id: 64496,
ip_addr_blocks: vec![block.clone()],
}],
hash: crate::ccr::compute_state_hash(
&encode_roa_payload_state_payload_der(&[crate::ccr::model::RoaPayloadSet {
as_id: 64496,
ip_addr_blocks: vec![block],
}])
.unwrap(),
),
}),
vaps: None,
tas: None,
rks: None,
});
assert!(matches!(extract_vrp_rows(&ci), Err(CcrVerifyError::Decode(_))));
assert!(matches!(
extract_vrp_rows(&ci),
Err(CcrVerifyError::Decode(_))
));
let bad_count = count_roa_block_entries(&[vec![0x04, 0x00]]);
assert_eq!(bad_count, 0);
}

View File

@ -14,7 +14,10 @@ pub enum CirDecodeError {
UnexpectedVersion { expected: u32, actual: u32 },
#[error("unexpected digest algorithm OID: expected {expected}, got {actual}")]
UnexpectedDigestAlgorithm { expected: &'static str, actual: String },
UnexpectedDigestAlgorithm {
expected: &'static str,
actual: String,
},
#[error("CIR model validation failed after decode: {0}")]
Validate(String),
@ -85,12 +88,17 @@ fn decode_object(der: &[u8]) -> Result<CirObject, CirDecodeError> {
let mut top = DerReader::new(der);
let mut seq = top.take_sequence().map_err(CirDecodeError::Parse)?;
if !top.is_empty() {
return Err(CirDecodeError::Parse("trailing bytes after CirObject".into()));
return Err(CirDecodeError::Parse(
"trailing bytes after CirObject".into(),
));
}
let rsync_uri = std::str::from_utf8(seq.take_tag(0x16).map_err(CirDecodeError::Parse)?)
.map_err(|e| CirDecodeError::Parse(e.to_string()))?
.to_string();
let sha256 = seq.take_octet_string().map_err(CirDecodeError::Parse)?.to_vec();
let sha256 = seq
.take_octet_string()
.map_err(CirDecodeError::Parse)?
.to_vec();
if !seq.is_empty() {
return Err(CirDecodeError::Parse("trailing fields in CirObject".into()));
}
@ -106,7 +114,10 @@ fn decode_tal(der: &[u8]) -> Result<CirTal, CirDecodeError> {
let tal_uri = std::str::from_utf8(seq.take_tag(0x16).map_err(CirDecodeError::Parse)?)
.map_err(|e| CirDecodeError::Parse(e.to_string()))?
.to_string();
let tal_bytes = seq.take_octet_string().map_err(CirDecodeError::Parse)?.to_vec();
let tal_bytes = seq
.take_octet_string()
.map_err(CirDecodeError::Parse)?
.to_vec();
if !seq.is_empty() {
return Err(CirDecodeError::Parse("trailing fields in CirTal".into()));
}
@ -150,12 +161,10 @@ fn parse_generalized_time(bytes: &[u8]) -> Result<time::OffsetDateTime, CirDecod
let hour = parse(8..10)? as u8;
let minute = parse(10..12)? as u8;
let second = parse(12..14)? as u8;
let month = time::Month::try_from(month)
.map_err(|e| CirDecodeError::Parse(e.to_string()))?;
let month = time::Month::try_from(month).map_err(|e| CirDecodeError::Parse(e.to_string()))?;
let date = time::Date::from_calendar_date(year, month, day)
.map_err(|e| CirDecodeError::Parse(e.to_string()))?;
let timev = time::Time::from_hms(hour, minute, second)
.map_err(|e| CirDecodeError::Parse(e.to_string()))?;
Ok(time::PrimitiveDateTime::new(date, timev).assume_utc())
}

View File

@ -9,9 +9,7 @@ pub enum CirEncodeError {
Validate(String),
}
pub fn encode_cir(
cir: &CanonicalInputRepresentation,
) -> Result<Vec<u8>, CirEncodeError> {
pub fn encode_cir(cir: &CanonicalInputRepresentation) -> Result<Vec<u8>, CirEncodeError> {
cir.validate().map_err(CirEncodeError::Validate)?;
Ok(encode_sequence(&[
encode_integer_u32(cir.version),
@ -134,7 +132,10 @@ fn encode_len_into(len: usize, out: &mut Vec<u8>) {
return;
}
let bytes = len.to_be_bytes();
let first_non_zero = bytes.iter().position(|&b| b != 0).unwrap_or(bytes.len() - 1);
let first_non_zero = bytes
.iter()
.position(|&b| b != 0)
.unwrap_or(bytes.len() - 1);
let len_bytes = &bytes[first_non_zero..];
out.push(0x80 | (len_bytes.len() as u8));
out.extend_from_slice(len_bytes);
@ -144,4 +145,3 @@ fn encode_len_into(len: usize, out: &mut Vec<u8>) {
const _: () = {
let _ = CIR_VERSION_V1;
};

View File

@ -3,13 +3,14 @@ use std::collections::BTreeSet;
use std::path::Path;
use crate::audit::{AuditObjectResult, PublicationPointAudit};
use crate::blob_store::RawObjectStore;
use crate::cir::encode::{CirEncodeError, encode_cir};
use crate::cir::model::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal,
};
use crate::cir::static_pool::{
CirStaticPoolError, CirStaticPoolExportSummary, write_bytes_to_static_pool,
export_hashes_from_store,
CirStaticPoolError, CirStaticPoolExportSummary, export_hashes_from_store,
write_bytes_to_static_pool,
};
use crate::data_model::ta::TrustAnchor;
use crate::storage::{RepositoryViewState, RocksStore};
@ -46,11 +47,24 @@ pub enum CirExportError {
Write(String, String),
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum CirObjectPoolExportSummary {
Static(CirStaticPoolExportSummary),
RawStore(CirRawStoreExportSummary),
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CirRawStoreExportSummary {
pub unique_hashes: usize,
pub written_entries: usize,
pub reused_entries: usize,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CirExportSummary {
pub object_count: usize,
pub tal_count: usize,
pub static_pool: CirStaticPoolExportSummary,
pub object_pool: CirObjectPoolExportSummary,
pub timing: CirExportTiming,
}
@ -132,7 +146,10 @@ pub fn build_cir_from_run(
Ok(cir)
}
pub fn write_cir_file(path: &Path, cir: &CanonicalInputRepresentation) -> Result<(), CirExportError> {
pub fn write_cir_file(
path: &Path,
cir: &CanonicalInputRepresentation,
) -> Result<(), CirExportError> {
let der = encode_cir(cir)?;
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)
@ -175,6 +192,58 @@ pub fn export_cir_static_pool(
Ok(summary)
}
pub fn export_cir_raw_store(
store: &RocksStore,
raw_store_path: &Path,
cir: &CanonicalInputRepresentation,
trust_anchor: &TrustAnchor,
) -> Result<CirRawStoreExportSummary, CirExportError> {
let ta_hash = ta_sha256_hex(&trust_anchor.ta_certificate.raw_der);
let unique: BTreeSet<String> = cir
.objects
.iter()
.map(|item| hex::encode(&item.sha256))
.collect();
let mut written_entries = 0usize;
let mut reused_entries = 0usize;
for sha256_hex in &unique {
if store
.get_raw_entry(sha256_hex)
.map_err(|e| CirExportError::Write(raw_store_path.display().to_string(), e.to_string()))?
.is_some()
{
reused_entries += 1;
continue;
}
if sha256_hex == &ta_hash {
let mut entry =
crate::storage::RawByHashEntry::from_bytes(ta_hash.clone(), trust_anchor.ta_certificate.raw_der.clone());
entry.object_type = Some("cer".to_string());
for object in &cir.objects {
if hex::encode(&object.sha256) == ta_hash {
entry.origin_uris.push(object.rsync_uri.clone());
}
}
store
.put_raw_by_hash_entry(&entry)
.map_err(|e| CirExportError::Write(raw_store_path.display().to_string(), e.to_string()))?;
written_entries += 1;
continue;
}
return Err(CirExportError::Write(
raw_store_path.display().to_string(),
format!("raw store missing object for sha256={sha256_hex}"),
));
}
Ok(CirRawStoreExportSummary {
unique_hashes: unique.len(),
written_entries,
reused_entries,
})
}
pub fn export_cir_from_run(
store: &RocksStore,
trust_anchor: &TrustAnchor,
@ -182,9 +251,19 @@ pub fn export_cir_from_run(
validation_time: time::OffsetDateTime,
publication_points: &[PublicationPointAudit],
cir_out: &Path,
static_root: &Path,
static_root: Option<&Path>,
raw_store_path: Option<&Path>,
capture_date_utc: time::Date,
) -> Result<CirExportSummary, CirExportError> {
let backend_count = static_root.is_some() as u8 + raw_store_path.is_some() as u8;
match backend_count {
1 => {}
_ => {
return Err(CirExportError::Validate(
"must specify exactly one CIR object pool backend".to_string(),
));
}
}
let total_started = std::time::Instant::now();
let started = std::time::Instant::now();
@ -198,7 +277,22 @@ pub fn export_cir_from_run(
let build_cir_ms = started.elapsed().as_millis() as u64;
let started = std::time::Instant::now();
let static_pool = export_cir_static_pool(store, static_root, capture_date_utc, &cir, trust_anchor)?;
let object_pool = if let Some(static_root) = static_root {
CirObjectPoolExportSummary::Static(export_cir_static_pool(
store,
static_root,
capture_date_utc,
&cir,
trust_anchor,
)?)
} else {
CirObjectPoolExportSummary::RawStore(export_cir_raw_store(
store,
raw_store_path.expect("validated"),
&cir,
trust_anchor,
)?)
};
let static_pool_ms = started.elapsed().as_millis() as u64;
let started = std::time::Instant::now();
@ -208,7 +302,7 @@ pub fn export_cir_from_run(
Ok(CirExportSummary {
object_count: cir.objects.len(),
tal_count: cir.tals.len(),
static_pool,
object_pool,
timing: CirExportTiming {
build_cir_ms,
static_pool_ms,
@ -246,7 +340,8 @@ mod tests {
fn sample_trust_anchor() -> TrustAnchor {
let base = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let tal_bytes = std::fs::read(base.join("tests/fixtures/tal/apnic-rfc7730-https.tal")).unwrap();
let tal_bytes =
std::fs::read(base.join("tests/fixtures/tal/apnic-rfc7730-https.tal")).unwrap();
let ta_der = std::fs::read(base.join("tests/fixtures/ta/apnic-ta.cer")).unwrap();
let tal = Tal::decode_bytes(&tal_bytes).unwrap();
TrustAnchor::bind_der(tal, &ta_der, None).unwrap()
@ -264,7 +359,8 @@ mod tests {
let bytes = b"object-a".to_vec();
let hash = sha256_hex(&bytes);
let mut raw = RawByHashEntry::from_bytes(hash.clone(), bytes.clone());
raw.origin_uris.push("rsync://example.test/repo/a.cer".into());
raw.origin_uris
.push("rsync://example.test/repo/a.cer".into());
store.put_raw_by_hash_entry(&raw).unwrap();
store
.put_repository_view_entry(&RepositoryViewEntry {
@ -284,18 +380,20 @@ mod tests {
sample_time(),
&[],
)
.expect("build cir");
.expect("build cir");
assert_eq!(cir.version, CIR_VERSION_V1);
assert_eq!(cir.tals.len(), 1);
assert_eq!(cir.tals[0].tal_uri, "https://example.test/root.tal");
assert!(cir
.objects
.iter()
.any(|item| item.rsync_uri == "rsync://example.test/repo/a.cer"));
assert!(cir
.objects
.iter()
.any(|item| item.rsync_uri.contains("apnic-rpki-root-iana-origin.cer")));
assert!(
cir.objects
.iter()
.any(|item| item.rsync_uri == "rsync://example.test/repo/a.cer")
);
assert!(
cir.objects
.iter()
.any(|item| item.rsync_uri.contains("apnic-rpki-root-iana-origin.cer"))
);
}
#[test]
@ -309,7 +407,8 @@ mod tests {
let bytes = b"object-b".to_vec();
let hash = sha256_hex(&bytes);
let mut raw = RawByHashEntry::from_bytes(hash.clone(), bytes.clone());
raw.origin_uris.push("rsync://example.test/repo/b.roa".into());
raw.origin_uris
.push("rsync://example.test/repo/b.roa".into());
store.put_raw_by_hash_entry(&raw).unwrap();
store
.put_repository_view_entry(&RepositoryViewEntry {
@ -330,12 +429,17 @@ mod tests {
sample_time(),
&[],
&cir_path,
&static_root,
Some(&static_root),
None,
sample_date(),
)
.expect("export cir");
assert_eq!(summary.tal_count, 1);
assert!(summary.object_count >= 2);
match summary.object_pool {
CirObjectPoolExportSummary::Static(_) => {}
other => panic!("unexpected backend: {other:?}"),
}
let der = std::fs::read(&cir_path).unwrap();
let cir = decode_cir(&der).unwrap();
@ -345,6 +449,51 @@ mod tests {
assert_eq!(std::fs::read(object_path).unwrap(), bytes);
}
#[test]
fn export_cir_from_run_uses_raw_store_backend_without_pool_export() {
let td = tempfile::tempdir().unwrap();
let store_dir = td.path().join("db");
let raw_store = td.path().join("raw-store.db");
let out_dir = td.path().join("out");
let store = RocksStore::open_with_external_raw_store(&store_dir, &raw_store).unwrap();
let bytes = b"object-d".to_vec();
let hash = sha256_hex(&bytes);
let mut raw = RawByHashEntry::from_bytes(hash.clone(), bytes.clone());
raw.origin_uris
.push("rsync://example.test/repo/d.roa".into());
store.put_raw_by_hash_entry(&raw).unwrap();
store
.put_repository_view_entry(&RepositoryViewEntry {
rsync_uri: "rsync://example.test/repo/d.roa".to_string(),
current_hash: Some(hash.clone()),
repository_source: Some("https://rrdp.example.test/notification.xml".to_string()),
object_type: Some("roa".to_string()),
state: RepositoryViewState::Present,
})
.unwrap();
let ta = sample_trust_anchor();
let cir_path = out_dir.join("example.cir");
let summary = export_cir_from_run(
&store,
&ta,
"https://example.test/root.tal",
sample_time(),
&[],
&cir_path,
None,
Some(&raw_store),
sample_date(),
)
.expect("export cir to raw store");
match summary.object_pool {
CirObjectPoolExportSummary::RawStore(ref s) => assert!(s.unique_hashes >= 2),
other => panic!("unexpected backend: {other:?}"),
}
assert!(raw_store.exists());
}
#[test]
fn build_cir_from_run_includes_vcir_current_instance_objects_from_audit() {
let td = tempfile::tempdir().unwrap();
@ -357,14 +506,16 @@ mod tests {
};
pp.objects.push(crate::audit::ObjectAuditEntry {
rsync_uri: "rsync://example.test/repo/fallback.mft".to_string(),
sha256_hex: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa".to_string(),
sha256_hex: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
.to_string(),
kind: crate::audit::AuditObjectKind::Manifest,
result: crate::audit::AuditObjectResult::Ok,
detail: None,
});
pp.objects.push(crate::audit::ObjectAuditEntry {
rsync_uri: "rsync://example.test/repo/fallback.roa".to_string(),
sha256_hex: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb".to_string(),
sha256_hex: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
.to_string(),
kind: crate::audit::AuditObjectKind::Roa,
result: crate::audit::AuditObjectResult::Ok,
detail: None,
@ -379,13 +530,15 @@ mod tests {
)
.expect("build cir");
assert!(cir
.objects
.iter()
.any(|item| item.rsync_uri == "rsync://example.test/repo/fallback.mft"));
assert!(cir
.objects
.iter()
.any(|item| item.rsync_uri == "rsync://example.test/repo/fallback.roa"));
assert!(
cir.objects
.iter()
.any(|item| item.rsync_uri == "rsync://example.test/repo/fallback.mft")
);
assert!(
cir.objects
.iter()
.any(|item| item.rsync_uri == "rsync://example.test/repo/fallback.roa")
);
}
}

View File

@ -1,6 +1,7 @@
use std::fs;
use std::path::{Path, PathBuf};
use crate::blob_store::{ExternalRawStoreDb, RawObjectStore};
use crate::cir::model::CanonicalInputRepresentation;
#[derive(Debug, thiserror::Error)]
@ -27,13 +28,30 @@ pub enum CirMaterializeError {
MissingStaticObject { sha256_hex: String },
#[error("link target failed: {src} -> {dst}: {detail}")]
Link { src: String, dst: String, detail: String },
Link {
src: String,
dst: String,
detail: String,
},
#[error("copy target failed: {src} -> {dst}: {detail}")]
Copy { src: String, dst: String, detail: String },
Copy {
src: String,
dst: String,
detail: String,
},
#[error("mirror tree mismatch after materialize: {0}")]
TreeMismatch(String),
#[error("open raw store failed: {path}: {detail}")]
OpenRawStore { path: String, detail: String },
#[error("raw object not found for sha256={sha256_hex}")]
MissingRawStoreObject { sha256_hex: String },
#[error("read raw store failed for sha256={sha256_hex}: {detail}")]
ReadRawStore { sha256_hex: String, detail: String },
}
#[derive(Clone, Debug, PartialEq, Eq)]
@ -49,8 +67,7 @@ pub fn materialize_cir(
mirror_root: &Path,
clean_rebuild: bool,
) -> Result<CirMaterializeSummary, CirMaterializeError> {
cir.validate()
.map_err(CirMaterializeError::TreeMismatch)?;
cir.validate().map_err(CirMaterializeError::TreeMismatch)?;
if clean_rebuild && mirror_root.exists() {
fs::remove_dir_all(mirror_root).map_err(|e| CirMaterializeError::RemoveMirrorRoot {
@ -120,6 +137,89 @@ pub fn materialize_cir(
})
}
pub fn materialize_cir_from_raw_store(
cir: &CanonicalInputRepresentation,
raw_store_db: &Path,
mirror_root: &Path,
clean_rebuild: bool,
) -> Result<CirMaterializeSummary, CirMaterializeError> {
cir.validate().map_err(CirMaterializeError::TreeMismatch)?;
if clean_rebuild && mirror_root.exists() {
fs::remove_dir_all(mirror_root).map_err(|e| CirMaterializeError::RemoveMirrorRoot {
path: mirror_root.display().to_string(),
detail: e.to_string(),
})?;
}
fs::create_dir_all(mirror_root).map_err(|e| CirMaterializeError::CreateMirrorRoot {
path: mirror_root.display().to_string(),
detail: e.to_string(),
})?;
let raw_store =
ExternalRawStoreDb::open(raw_store_db).map_err(|e| CirMaterializeError::OpenRawStore {
path: raw_store_db.display().to_string(),
detail: e.to_string(),
})?;
let mut copied_files = 0usize;
for object in &cir.objects {
let sha256_hex = hex::encode(&object.sha256);
let entry = raw_store
.get_raw_entry(&sha256_hex)
.map_err(|e| CirMaterializeError::ReadRawStore {
sha256_hex: sha256_hex.clone(),
detail: e.to_string(),
})?
.ok_or_else(|| CirMaterializeError::MissingRawStoreObject {
sha256_hex: sha256_hex.clone(),
})?;
let relative = mirror_relative_path_for_rsync_uri(&object.rsync_uri)?;
let target = mirror_root.join(&relative);
if let Some(parent) = target.parent() {
fs::create_dir_all(parent).map_err(|e| CirMaterializeError::CreateParent {
path: parent.display().to_string(),
detail: e.to_string(),
})?;
}
if target.exists() {
fs::remove_file(&target).map_err(|e| CirMaterializeError::RemoveExistingTarget {
path: target.display().to_string(),
detail: e.to_string(),
})?;
}
fs::write(&target, &entry.bytes).map_err(|e| CirMaterializeError::Copy {
src: raw_store_db.display().to_string(),
dst: target.display().to_string(),
detail: e.to_string(),
})?;
copied_files += 1;
}
let actual = collect_materialized_uris(mirror_root)?;
let expected = cir
.objects
.iter()
.map(|item| item.rsync_uri.clone())
.collect::<std::collections::BTreeSet<_>>();
if actual != expected {
return Err(CirMaterializeError::TreeMismatch(format!(
"expected {} files, got {} files",
expected.len(),
actual.len()
)));
}
Ok(CirMaterializeSummary {
object_count: cir.objects.len(),
linked_files: 0,
copied_files,
})
}
pub fn mirror_relative_path_for_rsync_uri(rsync_uri: &str) -> Result<PathBuf, CirMaterializeError> {
let url = url::Url::parse(rsync_uri)
.map_err(|_| CirMaterializeError::InvalidRsyncUri(rsync_uri.to_string()))?;
@ -160,8 +260,8 @@ pub fn resolve_static_pool_file(
let prefix1 = &sha256_hex[0..2];
let prefix2 = &sha256_hex[2..4];
let entries = fs::read_dir(static_root)
.map_err(|_| CirMaterializeError::MissingStaticObject {
let entries =
fs::read_dir(static_root).map_err(|_| CirMaterializeError::MissingStaticObject {
sha256_hex: sha256_hex.to_string(),
})?;
let mut dates = entries
@ -215,10 +315,15 @@ fn collect_materialized_uris(
#[cfg(test)]
mod tests {
use super::{CirMaterializeError, materialize_cir, mirror_relative_path_for_rsync_uri, resolve_static_pool_file};
use super::{
CirMaterializeError, materialize_cir, materialize_cir_from_raw_store,
mirror_relative_path_for_rsync_uri, resolve_static_pool_file,
};
use crate::blob_store::ExternalRawStoreDb;
use crate::cir::model::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal,
};
use sha2::Digest;
use std::path::{Path, PathBuf};
fn sample_time() -> time::OffsetDateTime {
@ -257,21 +362,45 @@ mod tests {
}
}
fn cir_with_real_hashes(a: &[u8], b: &[u8]) -> CanonicalInputRepresentation {
CanonicalInputRepresentation {
version: CIR_VERSION_V1,
hash_alg: CirHashAlgorithm::Sha256,
validation_time: sample_time(),
objects: vec![
CirObject {
rsync_uri: "rsync://example.net/repo/a.cer".to_string(),
sha256: sha2::Sha256::digest(a).to_vec(),
},
CirObject {
rsync_uri: "rsync://example.net/repo/nested/b.roa".to_string(),
sha256: sha2::Sha256::digest(b).to_vec(),
},
],
tals: vec![CirTal {
tal_uri: "https://tal.example.net/root.tal".to_string(),
tal_bytes: b"x".to_vec(),
}],
}
}
#[test]
fn mirror_relative_path_for_rsync_uri_maps_host_and_path() {
let path =
mirror_relative_path_for_rsync_uri("rsync://example.net/repo/nested/b.roa").unwrap();
assert_eq!(path, PathBuf::from("example.net").join("repo").join("nested").join("b.roa"));
assert_eq!(
path,
PathBuf::from("example.net")
.join("repo")
.join("nested")
.join("b.roa")
);
}
#[test]
fn resolve_static_pool_file_finds_hash_across_dates() {
let td = tempfile::tempdir().unwrap();
let path = td
.path()
.join("20260407")
.join("11")
.join("11");
let path = td.path().join("20260407").join("11").join("11");
std::fs::create_dir_all(&path).unwrap();
let file = path.join("1111111111111111111111111111111111111111111111111111111111111111");
std::fs::write(&file, b"x").unwrap();
@ -289,14 +418,20 @@ mod tests {
let td = tempfile::tempdir().unwrap();
let err = resolve_static_pool_file(td.path(), "not-a-hash")
.expect_err("invalid hash should fail");
assert!(matches!(err, CirMaterializeError::MissingStaticObject { .. }));
assert!(matches!(
err,
CirMaterializeError::MissingStaticObject { .. }
));
let err = resolve_static_pool_file(
td.path(),
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
)
.expect_err("missing hash should fail");
assert!(matches!(err, CirMaterializeError::MissingStaticObject { .. }));
assert!(matches!(
err,
CirMaterializeError::MissingStaticObject { .. }
));
}
#[test]
@ -333,7 +468,10 @@ mod tests {
let summary = materialize_cir(&sample_cir(), &static_root, &mirror_root, true).unwrap();
assert_eq!(summary.object_count, 2);
assert_eq!(std::fs::read(mirror_root.join("example.net/repo/a.cer")).unwrap(), b"a");
assert_eq!(
std::fs::read(mirror_root.join("example.net/repo/a.cer")).unwrap(),
b"a"
);
assert_eq!(
std::fs::read(mirror_root.join("example.net/repo/nested/b.roa")).unwrap(),
b"b"
@ -344,14 +482,12 @@ mod tests {
#[test]
fn materialize_fails_when_static_object_missing() {
let td = tempfile::tempdir().unwrap();
let err = materialize_cir(
&sample_cir(),
td.path(),
&td.path().join("mirror"),
true,
)
.expect_err("missing static object must fail");
assert!(matches!(err, CirMaterializeError::MissingStaticObject { .. }));
let err = materialize_cir(&sample_cir(), td.path(), &td.path().join("mirror"), true)
.expect_err("missing static object must fail");
assert!(matches!(
err,
CirMaterializeError::MissingStaticObject { .. }
));
}
#[test]
@ -380,6 +516,192 @@ mod tests {
assert!(matches!(err, CirMaterializeError::TreeMismatch(_)));
}
#[test]
fn materialize_from_raw_store_creates_expected_tree() {
let td = tempfile::tempdir().unwrap();
let raw_store_path = td.path().join("raw-store.db");
let mirror_root = td.path().join("mirror");
let a = b"a".to_vec();
let b = b"b".to_vec();
let cir = CanonicalInputRepresentation {
version: CIR_VERSION_V1,
hash_alg: CirHashAlgorithm::Sha256,
validation_time: sample_time(),
objects: vec![
CirObject {
rsync_uri: "rsync://example.net/repo/a.cer".to_string(),
sha256: sha2::Sha256::digest(&a).to_vec(),
},
CirObject {
rsync_uri: "rsync://example.net/repo/nested/b.roa".to_string(),
sha256: sha2::Sha256::digest(&b).to_vec(),
},
],
tals: vec![CirTal {
tal_uri: "https://tal.example.net/root.tal".to_string(),
tal_bytes: b"x".to_vec(),
}],
};
{
let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap();
let mut entry_a =
crate::storage::RawByHashEntry::from_bytes(hex::encode(&cir.objects[0].sha256), a);
entry_a.origin_uris.push(cir.objects[0].rsync_uri.clone());
raw_store.put_raw_entry(&entry_a).unwrap();
let mut entry_b =
crate::storage::RawByHashEntry::from_bytes(hex::encode(&cir.objects[1].sha256), b);
entry_b.origin_uris.push(cir.objects[1].rsync_uri.clone());
raw_store.put_raw_entry(&entry_b).unwrap();
}
let summary =
materialize_cir_from_raw_store(&cir, &raw_store_path, &mirror_root, true).unwrap();
assert_eq!(summary.object_count, 2);
assert_eq!(summary.linked_files, 0);
assert_eq!(summary.copied_files, 2);
assert_eq!(
std::fs::read(mirror_root.join("example.net/repo/a.cer")).unwrap(),
b"a"
);
assert_eq!(
std::fs::read(mirror_root.join("example.net/repo/nested/b.roa")).unwrap(),
b"b"
);
}
#[test]
fn materialize_from_raw_store_fails_when_object_missing() {
let td = tempfile::tempdir().unwrap();
let raw_store_path = td.path().join("raw-store.db");
let mirror_root = td.path().join("mirror");
let cir = cir_with_real_hashes(b"a", b"b");
{
let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap();
let only = crate::storage::RawByHashEntry::from_bytes(
hex::encode(&cir.objects[0].sha256),
b"a".to_vec(),
);
raw_store.put_raw_entry(&only).unwrap();
}
let err = materialize_cir_from_raw_store(&cir, &raw_store_path, &mirror_root, true)
.expect_err("missing second object should fail");
assert!(matches!(
err,
CirMaterializeError::MissingRawStoreObject { .. }
));
}
#[test]
fn materialize_from_raw_store_detects_stale_tree_when_not_clean_rebuild() {
let td = tempfile::tempdir().unwrap();
let raw_store_path = td.path().join("raw-store.db");
let mirror_root = td.path().join("mirror");
let cir = cir_with_real_hashes(b"a", b"b");
{
let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap();
let mut entry_a = crate::storage::RawByHashEntry::from_bytes(
hex::encode(&cir.objects[0].sha256),
b"a".to_vec(),
);
entry_a.origin_uris.push(cir.objects[0].rsync_uri.clone());
raw_store.put_raw_entry(&entry_a).unwrap();
let mut entry_b = crate::storage::RawByHashEntry::from_bytes(
hex::encode(&cir.objects[1].sha256),
b"b".to_vec(),
);
entry_b.origin_uris.push(cir.objects[1].rsync_uri.clone());
raw_store.put_raw_entry(&entry_b).unwrap();
}
std::fs::create_dir_all(mirror_root.join("extra")).unwrap();
std::fs::write(mirror_root.join("extra/stale.txt"), b"stale").unwrap();
let err =
materialize_cir_from_raw_store(&cir, &raw_store_path, &mirror_root, false)
.expect_err("stale file should fail exact tree check");
assert!(matches!(err, CirMaterializeError::TreeMismatch(_)));
}
#[test]
fn materialize_from_raw_store_overwrites_existing_targets() {
let td = tempfile::tempdir().unwrap();
let raw_store_path = td.path().join("raw-store.db");
let mirror_root = td.path().join("mirror");
let a = b"new-a".to_vec();
let b = b"new-b".to_vec();
let cir = cir_with_real_hashes(&a, &b);
{
let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap();
let mut entry_a = crate::storage::RawByHashEntry::from_bytes(
hex::encode(&cir.objects[0].sha256),
a.clone(),
);
entry_a.origin_uris.push(cir.objects[0].rsync_uri.clone());
raw_store.put_raw_entry(&entry_a).unwrap();
let mut entry_b = crate::storage::RawByHashEntry::from_bytes(
hex::encode(&cir.objects[1].sha256),
b.clone(),
);
entry_b.origin_uris.push(cir.objects[1].rsync_uri.clone());
raw_store.put_raw_entry(&entry_b).unwrap();
}
let target = mirror_root.join("example.net/repo/a.cer");
std::fs::create_dir_all(target.parent().unwrap()).unwrap();
std::fs::write(&target, b"old").unwrap();
let summary =
materialize_cir_from_raw_store(&cir, &raw_store_path, &mirror_root, false).unwrap();
assert_eq!(summary.copied_files, 2);
assert_eq!(std::fs::read(&target).unwrap(), a);
}
#[test]
fn materialize_from_raw_store_reports_codec_errors() {
let td = tempfile::tempdir().unwrap();
let raw_store_path = td.path().join("raw-store.db");
let mirror_root = td.path().join("mirror");
{
let _raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap();
}
{
let db = rocksdb::DB::open_default(&raw_store_path).unwrap();
db.put(
b"rawbyhash:1111111111111111111111111111111111111111111111111111111111111111",
b"bad-cbor",
)
.unwrap();
}
let err = materialize_cir_from_raw_store(
&CanonicalInputRepresentation {
version: CIR_VERSION_V1,
hash_alg: CirHashAlgorithm::Sha256,
validation_time: sample_time(),
objects: vec![CirObject {
rsync_uri: "rsync://example.net/repo/a.cer".to_string(),
sha256: hex::decode(
"1111111111111111111111111111111111111111111111111111111111111111",
)
.unwrap(),
}],
tals: vec![CirTal {
tal_uri: "https://tal.example.net/root.tal".to_string(),
tal_bytes: b"x".to_vec(),
}],
},
&raw_store_path,
&mirror_root,
true,
)
.expect_err("corrupt raw-store object should fail");
assert!(matches!(
err,
CirMaterializeError::ReadRawStore { .. } | CirMaterializeError::MissingRawStoreObject { .. }
));
}
fn write_static(root: &Path, date: &str, hash: &str, bytes: &[u8]) {
let path = root.join(date).join(&hash[0..2]).join(&hash[2..4]);
std::fs::create_dir_all(&path).unwrap();

View File

@ -1,26 +1,29 @@
pub mod decode;
pub mod encode;
#[cfg(feature = "full")]
pub mod export;
pub mod materialize;
pub mod model;
pub mod sequence;
#[cfg(feature = "full")]
pub mod export;
#[cfg(feature = "full")]
pub mod static_pool;
pub use decode::{CirDecodeError, decode_cir};
pub use encode::{CirEncodeError, encode_cir};
#[cfg(feature = "full")]
pub use export::{
CirExportError, CirExportSummary, build_cir_from_run, export_cir_from_run, write_cir_file,
};
pub use materialize::{
CirMaterializeError, CirMaterializeSummary, materialize_cir, mirror_relative_path_for_rsync_uri,
resolve_static_pool_file,
CirMaterializeError, CirMaterializeSummary, materialize_cir,
materialize_cir_from_raw_store,
mirror_relative_path_for_rsync_uri, resolve_static_pool_file,
};
pub use model::{
CIR_VERSION_V1, CirHashAlgorithm, CirObject, CirTal, CanonicalInputRepresentation,
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal,
};
pub use sequence::{CirSequenceManifest, CirSequenceStep, CirSequenceStepKind};
#[cfg(feature = "full")]
pub use export::{CirExportError, CirExportSummary, build_cir_from_run, export_cir_from_run, write_cir_file};
#[cfg(feature = "full")]
pub use static_pool::{
CirStaticPoolError, CirStaticPoolExportSummary, CirStaticPoolWriteResult,
export_hashes_from_store, static_pool_path, static_pool_relative_path,
@ -59,9 +62,8 @@ mod tests {
],
tals: vec![CirTal {
tal_uri: "https://tal.example.net/root.tal".to_string(),
tal_bytes:
b"https://tal.example.net/ta.cer\nrsync://example.net/repo/ta.cer\nMIIB"
.to_vec(),
tal_bytes: b"https://tal.example.net/ta.cer\nrsync://example.net/repo/ta.cer\nMIIB"
.to_vec(),
}],
}
}
@ -74,7 +76,10 @@ mod tests {
} else {
let len = value.len();
let bytes = len.to_be_bytes();
let first_non_zero = bytes.iter().position(|&b| b != 0).unwrap_or(bytes.len() - 1);
let first_non_zero = bytes
.iter()
.position(|&b| b != 0)
.unwrap_or(bytes.len() - 1);
let len_bytes = &bytes[first_non_zero..];
out.push(0x80 | len_bytes.len() as u8);
out.extend_from_slice(len_bytes);
@ -178,8 +183,7 @@ mod tests {
der[idx + sha256_bytes.len() - 1] ^= 0x01;
let err = decode_cir(&der).expect_err("wrong oid must fail");
assert!(
err.to_string()
.contains(crate::data_model::oid::OID_SHA256),
err.to_string().contains(crate::data_model::oid::OID_SHA256),
"{err}"
);
}
@ -222,7 +226,10 @@ mod tests {
tals: Vec::new(),
};
let err = encode_cir(&no_tals).expect_err("empty tals must fail");
assert!(err.to_string().contains("CIR.tals must be non-empty"), "{err}");
assert!(
err.to_string().contains("CIR.tals must be non-empty"),
"{err}"
);
}
#[test]
@ -276,7 +283,10 @@ mod tests {
let mut der = encode_cir(&cir).expect("encode cir");
der.push(0);
let err = decode_cir(&der).expect_err("trailing bytes after cir must fail");
assert!(err.to_string().contains("trailing bytes after CIR"), "{err}");
assert!(
err.to_string().contains("trailing bytes after CIR"),
"{err}"
);
let object = test_encode_tlv(
0x30,
@ -307,7 +317,10 @@ mod tests {
.concat(),
);
let err = decode_cir(&bad).expect_err("trailing field in object must fail");
assert!(err.to_string().contains("trailing fields in CirObject"), "{err}");
assert!(
err.to_string().contains("trailing fields in CirObject"),
"{err}"
);
}
#[test]

View File

@ -111,7 +111,9 @@ fn validate_sorted_unique_strings<'a>(
) -> Result<(), String> {
let mut prev: Option<&'a str> = None;
for key in items {
if let Some(prev_key) = prev && key <= prev_key {
if let Some(prev_key) = prev
&& key <= prev_key
{
return Err(message.into());
}
prev = Some(key);

View File

@ -15,13 +15,24 @@ pub struct CirSequenceStep {
pub cir_path: String,
pub ccr_path: String,
pub report_path: String,
#[serde(default)]
pub timing_path: Option<String>,
#[serde(default)]
pub stdout_log_path: Option<String>,
#[serde(default)]
pub stderr_log_path: Option<String>,
#[serde(default)]
pub artifact_prefix: Option<String>,
pub previous_step_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct CirSequenceManifest {
pub version: u32,
pub static_root: String,
#[serde(default)]
pub static_root: Option<String>,
#[serde(default)]
pub raw_store_db_path: Option<String>,
pub steps: Vec<CirSequenceStep>,
}
@ -30,8 +41,16 @@ impl CirSequenceManifest {
if self.version == 0 {
return Err("sequence.version must be positive".to_string());
}
if self.static_root.trim().is_empty() {
return Err("sequence.static_root must not be empty".to_string());
let backend_count = self.static_root.is_some() as u8 + self.raw_store_db_path.is_some() as u8;
if backend_count != 1 {
return Err("sequence must set exactly one of static_root or raw_store_db_path".to_string());
}
match (self.static_root.as_ref(), self.raw_store_db_path.as_ref()) {
(Some(static_root), None) if !static_root.trim().is_empty() => {}
(None, Some(raw_store_db_path)) if !raw_store_db_path.trim().is_empty() => {}
_ => {
return Err("sequence backend path must not be empty".to_string());
}
}
if self.steps.is_empty() {
return Err("sequence.steps must not be empty".to_string());
@ -49,6 +68,34 @@ impl CirSequenceManifest {
"sequence.steps[{idx}].validation_time must not be empty"
));
}
if let Some(timing_path) = &step.timing_path
&& timing_path.trim().is_empty()
{
return Err(format!(
"sequence.steps[{idx}].timing_path must not be empty when set"
));
}
if let Some(stdout_log_path) = &step.stdout_log_path
&& stdout_log_path.trim().is_empty()
{
return Err(format!(
"sequence.steps[{idx}].stdout_log_path must not be empty when set"
));
}
if let Some(stderr_log_path) = &step.stderr_log_path
&& stderr_log_path.trim().is_empty()
{
return Err(format!(
"sequence.steps[{idx}].stderr_log_path must not be empty when set"
));
}
if let Some(artifact_prefix) = &step.artifact_prefix
&& artifact_prefix.trim().is_empty()
{
return Err(format!(
"sequence.steps[{idx}].artifact_prefix must not be empty when set"
));
}
if step.cir_path.trim().is_empty()
|| step.ccr_path.trim().is_empty()
|| step.report_path.trim().is_empty()
@ -92,7 +139,8 @@ mod tests {
fn sample_manifest() -> CirSequenceManifest {
CirSequenceManifest {
version: 1,
static_root: "static".to_string(),
static_root: Some("static".to_string()),
raw_store_db_path: None,
steps: vec![
CirSequenceStep {
step_id: "full".to_string(),
@ -101,6 +149,10 @@ mod tests {
cir_path: "full/input.cir".to_string(),
ccr_path: "full/result.ccr".to_string(),
report_path: "full/report.json".to_string(),
timing_path: Some("full/timing.json".to_string()),
stdout_log_path: Some("full/stdout.log".to_string()),
stderr_log_path: Some("full/stderr.log".to_string()),
artifact_prefix: Some("2026-04-09T00:00:00Z-test".to_string()),
previous_step_id: None,
},
CirSequenceStep {
@ -110,6 +162,10 @@ mod tests {
cir_path: "delta-001/input.cir".to_string(),
ccr_path: "delta-001/result.ccr".to_string(),
report_path: "delta-001/report.json".to_string(),
timing_path: Some("delta-001/timing.json".to_string()),
stdout_log_path: Some("delta-001/stdout.log".to_string()),
stderr_log_path: Some("delta-001/stderr.log".to_string()),
artifact_prefix: Some("2026-04-09T00:10:00Z-test".to_string()),
previous_step_id: Some("full".to_string()),
},
],
@ -144,4 +200,12 @@ mod tests {
let err = bad.validate().expect_err("missing previous must fail");
assert!(err.contains("previous_step_id"));
}
#[test]
fn sequence_manifest_validate_accepts_raw_store_backend() {
let mut manifest = sample_manifest();
manifest.static_root = None;
manifest.raw_store_db_path = Some("raw-store.db".to_string());
manifest.validate().expect("raw store sequence");
}
}

View File

@ -3,6 +3,7 @@ use std::fs::{self, OpenOptions};
use std::io::Write;
use std::path::{Path, PathBuf};
use crate::blob_store::RawObjectStore;
use crate::storage::{RawByHashEntry, RocksStore};
#[derive(Debug, thiserror::Error)]
@ -75,10 +76,7 @@ pub fn static_pool_path(
capture_date_utc: time::Date,
sha256_hex: &str,
) -> Result<PathBuf, CirStaticPoolError> {
Ok(static_root.join(static_pool_relative_path(
capture_date_utc,
sha256_hex,
)?))
Ok(static_root.join(static_pool_relative_path(capture_date_utc, sha256_hex)?))
}
pub fn write_bytes_to_static_pool(
@ -191,12 +189,13 @@ pub fn export_hashes_from_store(
let mut written_files = 0usize;
let mut reused_files = 0usize;
for sha256_hex in &unique {
let entry = store
.get_raw_by_hash_entry(sha256_hex)
let bytes = store
.get_blob_bytes(sha256_hex)
.map_err(|e| CirStaticPoolError::Storage(e.to_string()))?
.ok_or_else(|| CirStaticPoolError::MissingRawByHash {
sha256_hex: sha256_hex.clone(),
})?;
let entry = RawByHashEntry::from_bytes(sha256_hex.clone(), bytes);
let result = write_raw_entry_to_static_pool(static_root, capture_date_utc, &entry)?;
if result.written {
written_files += 1;
@ -223,9 +222,7 @@ fn format_utc_date(date: time::Date) -> String {
fn validate_sha256_hex(sha256_hex: &str) -> Result<(), CirStaticPoolError> {
if sha256_hex.len() != 64 || !sha256_hex.as_bytes().iter().all(u8::is_ascii_hexdigit) {
return Err(CirStaticPoolError::InvalidSha256Hex(
sha256_hex.to_string(),
));
return Err(CirStaticPoolError::InvalidSha256Hex(sha256_hex.to_string()));
}
Ok(())
}
@ -238,8 +235,8 @@ fn compute_sha256_hex(bytes: &[u8]) -> String {
#[cfg(test)]
mod tests {
use super::{
CirStaticPoolError, compute_sha256_hex, export_hashes_from_store, static_pool_relative_path,
write_bytes_to_static_pool,
CirStaticPoolError, compute_sha256_hex, export_hashes_from_store,
static_pool_relative_path, write_bytes_to_static_pool,
};
use crate::storage::{RawByHashEntry, RepositoryViewEntry, RepositoryViewState, RocksStore};
use std::fs;
@ -270,8 +267,8 @@ mod tests {
let bytes = b"static-pool-object";
let sha = compute_sha256_hex(bytes);
let first = write_bytes_to_static_pool(td.path(), sample_date(), &sha, bytes)
.expect("first write");
let first =
write_bytes_to_static_pool(td.path(), sample_date(), &sha, bytes).expect("first write");
let second = write_bytes_to_static_pool(td.path(), sample_date(), &sha, bytes)
.expect("second write");
@ -281,11 +278,13 @@ mod tests {
let all_files: Vec<_> = walk_files(td.path());
assert_eq!(all_files.len(), 1);
assert!(!all_files[0]
.file_name()
.and_then(|name| name.to_str())
.unwrap_or_default()
.contains(".tmp."));
assert!(
!all_files[0]
.file_name()
.and_then(|name| name.to_str())
.unwrap_or_default()
.contains(".tmp.")
);
}
#[test]
@ -315,7 +314,9 @@ mod tests {
let bytes = b"store-object".to_vec();
let sha = compute_sha256_hex(&bytes);
let mut entry = RawByHashEntry::from_bytes(sha.clone(), bytes.clone());
entry.origin_uris.push("rsync://example.test/repo/object.cer".to_string());
entry
.origin_uris
.push("rsync://example.test/repo/object.cer".to_string());
store.put_raw_by_hash_entry(&entry).expect("put raw entry");
store
.put_repository_view_entry(&RepositoryViewEntry {

View File

@ -4,8 +4,8 @@ use std::path::{Path, PathBuf};
use crate::analysis::timing::{TimingHandle, TimingMeta, TimingMetaUpdate};
use crate::audit::{
AspaOutput, AuditReportV2, AuditRunMeta, AuditWarning, TreeSummary, VrpOutput,
format_roa_ip_prefix,
AspaOutput, AuditReportV2, AuditRepoSyncStats, AuditRunMeta, AuditWarning, TreeSummary,
VrpOutput, format_roa_ip_prefix,
};
use crate::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig};
use crate::fetch::rsync::LocalDirRsyncFetcher;
@ -51,6 +51,7 @@ pub struct CliArgs {
pub ta_path: Option<PathBuf>,
pub db_path: PathBuf,
pub raw_store_db: Option<PathBuf>,
pub policy_path: Option<PathBuf>,
pub report_json_path: Option<PathBuf>,
pub ccr_out_path: Option<PathBuf>,
@ -92,12 +93,13 @@ Usage:
Options:
--db <path> RocksDB directory path (required)
--raw-store-db <path> External raw-by-hash store DB path (optional)
--policy <path> Policy TOML path (optional)
--report-json <path> Write full audit report as JSON (optional)
--ccr-out <path> Write CCR DER ContentInfo to this path (optional)
--cir-enable Export CIR after the run completes
--cir-out <path> Write CIR DER to this path (requires --cir-enable)
--cir-static-root <path> Shared static pool root for CIR export (requires --cir-enable)
--cir-static-root <path> Shared static pool root for CIR export (requires --cir-enable unless --raw-store-db is used)
--cir-tal-uri <url> Override TAL URI for CIR export when using --tal-path (optional)
--payload-replay-archive <path> Use local payload replay archive root (offline replay mode)
--payload-replay-locks <path> Use local payload replay locks.json (offline replay mode)
@ -134,6 +136,7 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
let mut ta_path: Option<PathBuf> = None;
let mut db_path: Option<PathBuf> = None;
let mut raw_store_db: Option<PathBuf> = None;
let mut policy_path: Option<PathBuf> = None;
let mut report_json_path: Option<PathBuf> = None;
let mut ccr_out_path: Option<PathBuf> = None;
@ -152,8 +155,8 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
let mut rsync_local_dir: Option<PathBuf> = None;
let mut disable_rrdp: bool = false;
let mut rsync_command: Option<PathBuf> = None;
let mut http_timeout_secs: u64 = 20;
let mut rsync_timeout_secs: u64 = 60;
let mut http_timeout_secs: u64 = 30;
let mut rsync_timeout_secs: u64 = 30;
let mut rsync_mirror_root: Option<PathBuf> = None;
let mut max_depth: Option<usize> = None;
let mut max_instances: Option<usize> = None;
@ -186,6 +189,11 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
let v = argv.get(i).ok_or("--db requires a value")?;
db_path = Some(PathBuf::from(v));
}
"--raw-store-db" => {
i += 1;
let v = argv.get(i).ok_or("--raw-store-db requires a value")?;
raw_store_db = Some(PathBuf::from(v));
}
"--policy" => {
i += 1;
let v = argv.get(i).ok_or("--policy requires a value")?;
@ -247,7 +255,9 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
}
"--payload-base-validation-time" => {
i += 1;
let v = argv.get(i).ok_or("--payload-base-validation-time requires a value")?;
let v = argv
.get(i)
.ok_or("--payload-base-validation-time requires a value")?;
use time::format_description::well_known::Rfc3339;
let t = time::OffsetDateTime::parse(v, &Rfc3339).map_err(|e| {
format!("invalid --payload-base-validation-time (RFC3339 expected): {e}")
@ -350,13 +360,17 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
usage()
));
}
if cir_enabled && (cir_out_path.is_none() || cir_static_root.is_none()) {
let cir_backend_count = cir_static_root.is_some() as u8 + raw_store_db.is_some() as u8;
if cir_enabled && (cir_out_path.is_none() || cir_backend_count != 1) {
return Err(format!(
"--cir-enable requires both --cir-out and --cir-static-root\n\n{}",
"--cir-enable requires --cir-out and exactly one of --cir-static-root or --raw-store-db\n\n{}",
usage()
));
}
if !cir_enabled && (cir_out_path.is_some() || cir_static_root.is_some() || cir_tal_uri.is_some())
if !cir_enabled
&& (cir_out_path.is_some()
|| cir_static_root.is_some()
|| cir_tal_uri.is_some())
{
return Err(format!(
"--cir-out/--cir-static-root/--cir-tal-uri require --cir-enable\n\n{}",
@ -459,6 +473,7 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
tal_path,
ta_path,
db_path,
raw_store_db,
policy_path,
report_json_path,
ccr_out_path,
@ -578,6 +593,8 @@ fn build_report(
})
.collect::<Vec<_>>();
let repo_sync_stats = build_repo_sync_stats(&out.publication_points);
AuditReportV2 {
format_version: 2,
meta: AuditRunMeta {
@ -594,9 +611,36 @@ fn build_report(
aspas,
downloads: out.downloads,
download_stats: out.download_stats,
repo_sync_stats,
}
}
fn build_repo_sync_stats(
publication_points: &[crate::audit::PublicationPointAudit],
) -> AuditRepoSyncStats {
let mut stats = AuditRepoSyncStats {
publication_points_total: publication_points.len() as u64,
..AuditRepoSyncStats::default()
};
for pp in publication_points {
let duration = pp.repo_sync_duration_ms.unwrap_or(0);
if let Some(phase) = pp.repo_sync_phase.as_ref() {
let entry = stats.by_phase.entry(phase.clone()).or_default();
entry.count += 1;
entry.duration_ms_total += duration;
}
let entry = stats
.by_terminal_state
.entry(pp.repo_terminal_state.clone())
.or_default();
entry.count += 1;
entry.duration_ms_total += duration;
}
stats
}
pub fn run(argv: &[String]) -> Result<(), String> {
let args = parse_args(argv)?;
@ -608,7 +652,12 @@ pub fn run(argv: &[String]) -> Result<(), String> {
.validation_time
.unwrap_or_else(time::OffsetDateTime::now_utc);
let store = RocksStore::open(&args.db_path).map_err(|e| e.to_string())?;
let store = if let Some(raw_store_db) = args.raw_store_db.as_ref() {
RocksStore::open_with_external_raw_store(&args.db_path, raw_store_db)
.map_err(|e| e.to_string())?
} else {
RocksStore::open(&args.db_path).map_err(|e| e.to_string())?
};
let config = TreeRunConfig {
max_depth: args.max_depth,
max_instances: args.max_instances,
@ -1094,10 +1143,6 @@ pub fn run(argv: &[String]) -> Result<(), String> {
.cir_out_path
.as_deref()
.expect("validated by parse_args for cir");
let cir_static_root = args
.cir_static_root
.as_deref()
.expect("validated by parse_args for cir");
let summary = export_cir_from_run(
&store,
&out.discovery.trust_anchor,
@ -1105,7 +1150,8 @@ pub fn run(argv: &[String]) -> Result<(), String> {
validation_time,
&out.publication_points,
cir_out_path,
cir_static_root,
args.cir_static_root.as_deref(),
args.raw_store_db.as_deref(),
time::OffsetDateTime::now_utc().date(),
)
.map_err(|e| e.to_string())?;
@ -1113,13 +1159,22 @@ pub fn run(argv: &[String]) -> Result<(), String> {
cir_static_pool_ms = Some(summary.timing.static_pool_ms);
cir_write_cir_ms = Some(summary.timing.write_cir_ms);
cir_total_ms = Some(summary.timing.total_ms);
let (backend_name, written_entries, reused_entries) = match &summary.object_pool {
crate::cir::export::CirObjectPoolExportSummary::Static(s) => {
("static", s.written_files, s.reused_files)
}
crate::cir::export::CirObjectPoolExportSummary::RawStore(s) => {
("raw-store", s.written_entries, s.reused_entries)
}
};
eprintln!(
"wrote CIR: {} (objects={}, tals={}, static_written={}, static_reused={}, build_cir_ms={}, static_pool_ms={}, write_cir_ms={}, total_ms={})",
"wrote CIR: {} (objects={}, tals={}, backend={}, written={}, reused={}, build_cir_ms={}, static_pool_ms={}, write_cir_ms={}, total_ms={})",
cir_out_path.display(),
summary.object_count,
summary.tal_count,
summary.static_pool.written_files,
summary.static_pool.reused_files,
backend_name,
written_entries,
reused_entries,
summary.timing.build_cir_ms,
summary.timing.static_pool_ms,
summary.timing.write_cir_ms,
@ -1160,7 +1215,12 @@ pub fn run(argv: &[String]) -> Result<(), String> {
&stage_timing_path,
serde_json::to_vec_pretty(&stage_timing).map_err(|e| e.to_string())?,
)
.map_err(|e| format!("write stage timing failed: {}: {e}", stage_timing_path.display()))?;
.map_err(|e| {
format!(
"write stage timing failed: {}: {e}",
stage_timing_path.display()
)
})?;
eprintln!("analysis: wrote {}", stage_timing_path.display());
}
}
@ -1290,7 +1350,57 @@ mod tests {
"out/example.ccr".to_string(),
];
let args = parse_args(&argv).expect("parse args");
assert_eq!(args.ccr_out_path.as_deref(), Some(std::path::Path::new("out/example.ccr")));
assert_eq!(
args.ccr_out_path.as_deref(),
Some(std::path::Path::new("out/example.ccr"))
);
}
#[test]
fn parse_accepts_external_raw_store_db() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--raw-store-db".to_string(),
"raw-store.db".to_string(),
"--tal-url".to_string(),
"https://example.test/x.tal".to_string(),
];
let args = parse_args(&argv).expect("parse args");
assert_eq!(
args.raw_store_db.as_deref(),
Some(std::path::Path::new("raw-store.db"))
);
}
#[test]
fn parse_accepts_cir_enable_with_raw_store_backend() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--raw-store-db".to_string(),
"raw-store.db".to_string(),
"--tal-path".to_string(),
"x.tal".to_string(),
"--ta-path".to_string(),
"x.cer".to_string(),
"--rsync-local-dir".to_string(),
"repo".to_string(),
"--cir-enable".to_string(),
"--cir-out".to_string(),
"out/example.cir".to_string(),
"--cir-tal-uri".to_string(),
"https://example.test/root.tal".to_string(),
];
let args = parse_args(&argv).expect("parse args");
assert!(args.cir_enabled);
assert_eq!(
args.raw_store_db.as_deref(),
Some(std::path::Path::new("raw-store.db"))
);
assert_eq!(args.cir_static_root, None);
}
#[test]
@ -1315,9 +1425,18 @@ mod tests {
];
let args = parse_args(&argv).expect("parse args");
assert!(args.cir_enabled);
assert_eq!(args.cir_out_path.as_deref(), Some(std::path::Path::new("out/example.cir")));
assert_eq!(args.cir_static_root.as_deref(), Some(std::path::Path::new("out/static")));
assert_eq!(args.cir_tal_uri.as_deref(), Some("https://example.test/root.tal"));
assert_eq!(
args.cir_out_path.as_deref(),
Some(std::path::Path::new("out/example.cir"))
);
assert_eq!(
args.cir_static_root.as_deref(),
Some(std::path::Path::new("out/static"))
);
assert_eq!(
args.cir_tal_uri.as_deref(),
Some("https://example.test/root.tal")
);
}
#[test]
@ -1333,7 +1452,10 @@ mod tests {
"out/example.cir".to_string(),
];
let err = parse_args(&argv_missing).unwrap_err();
assert!(err.contains("--cir-enable requires both --cir-out and --cir-static-root"), "{err}");
assert!(
err.contains("--cir-enable requires --cir-out and exactly one of --cir-static-root or --raw-store-db"),
"{err}"
);
let argv_needs_enable = vec![
"rpki".to_string(),
@ -1835,6 +1957,7 @@ mod tests {
aspas: Vec::new(),
downloads: Vec::new(),
download_stats: crate::audit::AuditDownloadStats::default(),
repo_sync_stats: crate::audit::AuditRepoSyncStats::default(),
};
let dir = tempfile::tempdir().expect("tmpdir");
@ -1844,4 +1967,35 @@ mod tests {
assert!(s.contains("\"format_version\""));
assert!(s.contains("\"policy\""));
}
#[test]
fn build_repo_sync_stats_aggregates_phase_and_terminal_state() {
let mut pp1 = crate::audit::PublicationPointAudit::default();
pp1.repo_sync_phase = Some("rrdp_ok".to_string());
pp1.repo_sync_duration_ms = Some(10);
pp1.repo_terminal_state = "fresh".to_string();
let mut pp2 = crate::audit::PublicationPointAudit::default();
pp2.repo_sync_phase = Some("rrdp_failed_rsync_failed".to_string());
pp2.repo_sync_duration_ms = Some(20);
pp2.repo_terminal_state = "failed_no_cache".to_string();
let mut pp3 = crate::audit::PublicationPointAudit::default();
pp3.repo_sync_phase = Some("rrdp_failed_rsync_failed".to_string());
pp3.repo_sync_duration_ms = Some(30);
pp3.repo_terminal_state = "failed_no_cache".to_string();
let stats = build_repo_sync_stats(&[pp1, pp2, pp3]);
assert_eq!(stats.publication_points_total, 3);
assert_eq!(stats.by_phase["rrdp_ok"].count, 1);
assert_eq!(stats.by_phase["rrdp_ok"].duration_ms_total, 10);
assert_eq!(stats.by_phase["rrdp_failed_rsync_failed"].count, 2);
assert_eq!(stats.by_phase["rrdp_failed_rsync_failed"].duration_ms_total, 50);
assert_eq!(stats.by_terminal_state["fresh"].count, 1);
assert_eq!(stats.by_terminal_state["failed_no_cache"].count, 2);
assert_eq!(
stats.by_terminal_state["failed_no_cache"].duration_ms_total,
50
);
}
}

View File

@ -3,12 +3,9 @@ use crate::data_model::oid::{
};
use crate::data_model::rc::{
AsIdOrRange, AsIdentifierChoice, ResourceCertKind, ResourceCertificate,
ResourceCertificateParseError, ResourceCertificateParsed,
ResourceCertificateProfileError,
};
use crate::validation::cert_path::{
CertPathError, validate_ee_cert_path_with_predecoded_ee,
ResourceCertificateParseError, ResourceCertificateParsed, ResourceCertificateProfileError,
};
use crate::validation::cert_path::{CertPathError, validate_ee_cert_path_with_predecoded_ee};
use x509_parser::extensions::ParsedExtension;
use x509_parser::prelude::{FromDer, X509Certificate};
use x509_parser::public_key::PublicKey;
@ -33,7 +30,7 @@ pub enum BgpsecRouterCertificateParseError {
#[error("resource certificate parse error: {0} (RFC 5280 §4.1; RFC 6487 §4; RFC 8209 §3.1)")]
ResourceCertificate(#[from] ResourceCertificateParseError),
#[error("X.509 parse error: {0} (RFC 5280 §4.1; RFC 8209 §3.1)" )]
#[error("X.509 parse error: {0} (RFC 5280 §4.1; RFC 8209 §3.1)")]
X509(String),
#[error("trailing bytes after router certificate DER: {0} bytes (DER; RFC 5280 §4.1)")]
@ -54,46 +51,72 @@ pub enum BgpsecRouterCertificateProfileError {
#[error("BGPsec router certificate must be an EE certificate (RFC 8209 §3.1)")]
NotEe,
#[error("BGPsec router certificate must contain SubjectKeyIdentifier (RFC 6487 §4.8.2; RFC 8209 §3.3)")]
#[error(
"BGPsec router certificate must contain SubjectKeyIdentifier (RFC 6487 §4.8.2; RFC 8209 §3.3)"
)]
MissingSki,
#[error("BGPsec router certificate must include ExtendedKeyUsage (RFC 8209 §3.1.3.2; RFC 8209 §3.3)")]
#[error(
"BGPsec router certificate must include ExtendedKeyUsage (RFC 8209 §3.1.3.2; RFC 8209 §3.3)"
)]
MissingExtendedKeyUsage,
#[error("BGPsec router certificate ExtendedKeyUsage must be non-critical (RFC 6487 §4.8.4; RFC 8209 §3.1.3.2)")]
#[error(
"BGPsec router certificate ExtendedKeyUsage must be non-critical (RFC 6487 §4.8.4; RFC 8209 §3.1.3.2)"
)]
ExtendedKeyUsageCriticality,
#[error("BGPsec router certificate ExtendedKeyUsage must contain id-kp-bgpsec-router ({OID_KP_BGPSEC_ROUTER}) (RFC 8209 §3.1.3.2; RFC 8209 §3.3)")]
#[error(
"BGPsec router certificate ExtendedKeyUsage must contain id-kp-bgpsec-router ({OID_KP_BGPSEC_ROUTER}) (RFC 8209 §3.1.3.2; RFC 8209 §3.3)"
)]
MissingBgpsecRouterEku,
#[error("BGPsec router certificate MUST NOT include Subject Information Access (RFC 8209 §3.1.3.3; RFC 8209 §3.3)")]
#[error(
"BGPsec router certificate MUST NOT include Subject Information Access (RFC 8209 §3.1.3.3; RFC 8209 §3.3)"
)]
SubjectInfoAccessPresent,
#[error("BGPsec router certificate MUST NOT include IP resources extension (RFC 8209 §3.1.3.4; RFC 8209 §3.3)")]
#[error(
"BGPsec router certificate MUST NOT include IP resources extension (RFC 8209 §3.1.3.4; RFC 8209 §3.3)"
)]
IpResourcesPresent,
#[error("BGPsec router certificate MUST include AS resources extension (RFC 8209 §3.1.3.5; RFC 8209 §3.3)")]
#[error(
"BGPsec router certificate MUST include AS resources extension (RFC 8209 §3.1.3.5; RFC 8209 §3.3)"
)]
AsResourcesMissing,
#[error("BGPsec router certificate AS resources MUST include one or more ASNs (RFC 8209 §3.1.3.5)")]
#[error(
"BGPsec router certificate AS resources MUST include one or more ASNs (RFC 8209 §3.1.3.5)"
)]
AsResourcesAsnumMissing,
#[error("BGPsec router certificate AS resources MUST NOT use inherit (RFC 8209 §3.1.3.5)")]
AsResourcesInherit,
#[error("BGPsec router certificate AS resources MUST contain explicit ASNs, not ranges (RFC 8209 §3.1.3.5)")]
#[error(
"BGPsec router certificate AS resources MUST contain explicit ASNs, not ranges (RFC 8209 §3.1.3.5)"
)]
AsResourcesRangeNotAllowed,
#[error("BGPsec router certificate subjectPublicKeyInfo.algorithm must be id-ecPublicKey ({OID_EC_PUBLIC_KEY}) (RFC 8208 §3.1)")]
#[error(
"BGPsec router certificate subjectPublicKeyInfo.algorithm must be id-ecPublicKey ({OID_EC_PUBLIC_KEY}) (RFC 8208 §3.1)"
)]
SpkiAlgorithmNotEcPublicKey,
#[error("BGPsec router certificate subjectPublicKeyInfo.parameters must be secp256r1 ({OID_SECP256R1}) (RFC 8208 §3.1)")]
#[error(
"BGPsec router certificate subjectPublicKeyInfo.parameters must be secp256r1 ({OID_SECP256R1}) (RFC 8208 §3.1)"
)]
SpkiWrongCurve,
#[error("BGPsec router certificate subjectPublicKeyInfo.parameters missing or invalid (RFC 8208 §3.1)")]
#[error(
"BGPsec router certificate subjectPublicKeyInfo.parameters missing or invalid (RFC 8208 §3.1)"
)]
SpkiParametersMissingOrInvalid,
#[error("BGPsec router certificate subjectPublicKey MUST be uncompressed P-256 ECPoint (RFC 8208 §3.1)")]
#[error(
"BGPsec router certificate subjectPublicKey MUST be uncompressed P-256 ECPoint (RFC 8208 §3.1)"
)]
SpkiEcPointNotUncompressedP256,
}
@ -116,16 +139,21 @@ pub enum BgpsecRouterCertificatePathError {
}
impl BgpsecRouterCertificate {
pub fn parse_der(der: &[u8]) -> Result<BgpsecRouterCertificateParsed, BgpsecRouterCertificateParseError> {
pub fn parse_der(
der: &[u8],
) -> Result<BgpsecRouterCertificateParsed, BgpsecRouterCertificateParseError> {
let (rem, cert) = X509Certificate::from_der(der)
.map_err(|e| BgpsecRouterCertificateParseError::X509(e.to_string()))?;
if !rem.is_empty() {
return Err(BgpsecRouterCertificateParseError::TrailingBytes(rem.len()));
}
let (spki_rem, _spki) = SubjectPublicKeyInfo::from_der(cert.tbs_certificate.subject_pki.raw)
.map_err(|e| BgpsecRouterCertificateParseError::SpkiParse(e.to_string()))?;
let (spki_rem, _spki) =
SubjectPublicKeyInfo::from_der(cert.tbs_certificate.subject_pki.raw)
.map_err(|e| BgpsecRouterCertificateParseError::SpkiParse(e.to_string()))?;
if !spki_rem.is_empty() {
return Err(BgpsecRouterCertificateParseError::SpkiTrailingBytes(spki_rem.len()));
return Err(BgpsecRouterCertificateParseError::SpkiTrailingBytes(
spki_rem.len(),
));
}
let rc_parsed = ResourceCertificate::parse_der(der)?;
Ok(BgpsecRouterCertificateParsed { rc_parsed })
@ -170,7 +198,9 @@ impl BgpsecRouterCertificate {
}
impl BgpsecRouterCertificateParsed {
pub fn validate_profile(self) -> Result<BgpsecRouterCertificate, BgpsecRouterCertificateProfileError> {
pub fn validate_profile(
self,
) -> Result<BgpsecRouterCertificate, BgpsecRouterCertificateProfileError> {
let rc = self.rc_parsed.validate_profile()?;
if rc.kind != ResourceCertKind::Ee {
return Err(BgpsecRouterCertificateProfileError::NotEe);
@ -196,15 +226,17 @@ impl BgpsecRouterCertificateParsed {
.ok_or(BgpsecRouterCertificateProfileError::AsResourcesMissing)?;
let asns = extract_router_asns(as_resources)?;
let (rem, cert) = X509Certificate::from_der(&rc.raw_der)
.map_err(|e| BgpsecRouterCertificateProfileError::ResourceCertificate(
ResourceCertificateProfileError::InvalidCertificatePolicy(e.to_string())
))?;
let (rem, cert) = X509Certificate::from_der(&rc.raw_der).map_err(|e| {
BgpsecRouterCertificateProfileError::ResourceCertificate(
ResourceCertificateProfileError::InvalidCertificatePolicy(e.to_string()),
)
})?;
if !rem.is_empty() {
return Err(BgpsecRouterCertificateProfileError::ResourceCertificate(
ResourceCertificateProfileError::InvalidCertificatePolicy(
format!("trailing bytes after router certificate DER: {}", rem.len()),
),
ResourceCertificateProfileError::InvalidCertificatePolicy(format!(
"trailing bytes after router certificate DER: {}",
rem.len()
)),
));
}
validate_router_eku(&cert)?;
@ -243,7 +275,7 @@ fn extract_router_asns(
match item {
AsIdOrRange::Id(v) => asns.push(*v),
AsIdOrRange::Range { .. } => {
return Err(BgpsecRouterCertificateProfileError::AsResourcesRangeNotAllowed)
return Err(BgpsecRouterCertificateProfileError::AsResourcesRangeNotAllowed);
}
}
}
@ -252,7 +284,9 @@ fn extract_router_asns(
Ok(asns)
}
fn validate_router_eku(cert: &X509Certificate<'_>) -> Result<(), BgpsecRouterCertificateProfileError> {
fn validate_router_eku(
cert: &X509Certificate<'_>,
) -> Result<(), BgpsecRouterCertificateProfileError> {
let mut matches = cert
.tbs_certificate
.extensions()

View File

@ -326,10 +326,7 @@ impl RpkiSignedObject {
/// Verify the CMS signature using the embedded EE certificate public key.
pub fn verify_signature(&self) -> Result<(), SignedObjectVerifyError> {
let ee = &self.signed_data.certificates[0];
self.verify_signature_with_rsa_components(
&ee.rsa_public_modulus,
&ee.rsa_public_exponent,
)
self.verify_signature_with_rsa_components(&ee.rsa_public_modulus, &ee.rsa_public_exponent)
}
/// Verify the CMS signature using a DER-encoded SubjectPublicKeyInfo.
@ -451,9 +448,7 @@ impl<'a> CmsReader<'a> {
let (rem, any) = Any::from_ber(self.buf).map_err(|e| format!("BER parse error: {e}"))?;
let tag = header_to_single_byte_tag(&any.header)?;
if tag != 0x04 && tag != 0x24 {
return Err(format!(
"unexpected tag: got 0x{tag:02X}, expected 0x04"
));
return Err(format!("unexpected tag: got 0x{tag:02X}, expected 0x04"));
}
let octets = flatten_octet_string(any)?;
self.buf = rem;
@ -516,7 +511,9 @@ fn header_to_single_byte_tag(header: &Header<'_>) -> Result<u8, String> {
if tag_no > 30 {
return Err(format!("high-tag-number form not supported: {tag_no}"));
}
Ok(((header.class() as u8) << 6) | if header.constructed() { 0x20 } else { 0x00 } | tag_no as u8)
Ok(((header.class() as u8) << 6)
| if header.constructed() { 0x20 } else { 0x00 }
| tag_no as u8)
}
fn cms_take_tlv(input: &[u8]) -> Result<(u8, &[u8], &[u8]), String> {
@ -759,20 +756,18 @@ fn validate_ee_certificate(der: &[u8]) -> Result<ResourceEeCertificate, SignedOb
"trailing bytes after EE SubjectPublicKeyInfo DER".to_string(),
));
}
let parsed_pk = spki
.parsed()
.map_err(|_e| SignedObjectValidateError::EeCertificateParse(
let parsed_pk = spki.parsed().map_err(|_e| {
SignedObjectValidateError::EeCertificateParse(
"unsupported EE public key algorithm".to_string(),
))?;
)
})?;
let (rsa_public_modulus, rsa_public_exponent) = match parsed_pk {
PublicKey::RSA(rsa) => {
let modulus = strip_leading_zeros(rsa.modulus).to_vec();
let exponent = strip_leading_zeros(rsa.exponent).to_vec();
let _ = rsa
.try_exponent()
.map_err(|_e| SignedObjectValidateError::EeCertificateParse(
"invalid EE RSA exponent".to_string(),
))?;
let _ = rsa.try_exponent().map_err(|_e| {
SignedObjectValidateError::EeCertificateParse("invalid EE RSA exponent".to_string())
})?;
(modulus, exponent)
}
_ => {

View File

@ -7,6 +7,8 @@ use crate::sync::rrdp::Fetcher;
#[derive(Clone, Debug)]
pub struct HttpFetcherConfig {
/// Connection-establishment timeout for HTTP requests.
pub connect_timeout: Duration,
/// Short timeout used for connection establishment and small metadata objects.
pub timeout: Duration,
/// Larger timeout used for RRDP snapshot / delta bodies.
@ -17,7 +19,8 @@ pub struct HttpFetcherConfig {
impl Default for HttpFetcherConfig {
fn default() -> Self {
Self {
timeout: Duration::from_secs(20),
connect_timeout: Duration::from_secs(15),
timeout: Duration::from_secs(30),
large_body_timeout: Duration::from_secs(180),
user_agent: "rpki-dev/0.1 (stage2)".to_string(),
}
@ -41,14 +44,15 @@ impl BlockingHttpFetcher {
pub fn new(config: HttpFetcherConfig) -> Result<Self, String> {
let short_timeout = config.timeout;
let large_body_timeout = std::cmp::max(config.large_body_timeout, config.timeout);
let connect_timeout = std::cmp::min(config.connect_timeout, config.timeout);
let short_client = Client::builder()
.connect_timeout(config.timeout)
.connect_timeout(connect_timeout)
.timeout(config.timeout)
.user_agent(config.user_agent.clone())
.build()
.map_err(|e| e.to_string())?;
let large_body_client = Client::builder()
.connect_timeout(config.timeout)
.connect_timeout(connect_timeout)
.timeout(large_body_timeout)
.user_agent(config.user_agent)
.build()
@ -64,24 +68,21 @@ impl BlockingHttpFetcher {
pub fn fetch_bytes(&self, uri: &str) -> Result<Vec<u8>, String> {
let started = std::time::Instant::now();
let (client, timeout_profile, timeout_value) = self.client_for_uri(uri);
let resp = client
.get(uri)
.send()
.map_err(|e| {
let msg = format!("http request failed: {e}");
crate::progress_log::emit(
"http_fetch_failed",
serde_json::json!({
"uri": uri,
"stage": "request",
"timeout_profile": timeout_profile,
"request_timeout_ms": timeout_value.as_millis() as u64,
"duration_ms": started.elapsed().as_millis() as u64,
"error": msg,
}),
);
msg
})?;
let resp = client.get(uri).send().map_err(|e| {
let msg = format!("http request failed: {e}");
crate::progress_log::emit(
"http_fetch_failed",
serde_json::json!({
"uri": uri,
"stage": "request",
"timeout_profile": timeout_profile,
"request_timeout_ms": timeout_value.as_millis() as u64,
"duration_ms": started.elapsed().as_millis() as u64,
"error": msg,
}),
);
msg
})?;
let status = resp.status();
let headers = resp.headers().clone();
@ -175,7 +176,11 @@ impl BlockingHttpFetcher {
fn client_for_uri(&self, uri: &str) -> (&Client, &'static str, Duration) {
if uses_large_body_timeout(uri) {
(&self.large_body_client, "large_body", self.large_body_timeout)
(
&self.large_body_client,
"large_body",
self.large_body_timeout,
)
} else {
(&self.short_client, "short", self.short_timeout)
}
@ -200,9 +205,7 @@ fn header_value_opt(headers: &HeaderMap, name: &str) -> Option<String> {
}
fn uses_large_body_timeout(uri: &str) -> bool {
uri.starts_with("https://")
&& uri.ends_with(".xml")
&& !uri.ends_with("notification.xml")
uri.starts_with("https://") && uri.ends_with(".xml") && !uri.ends_with("notification.xml")
}
#[cfg(test)]
@ -280,13 +283,17 @@ mod tests {
#[test]
fn uses_large_body_timeout_selects_rrdp_snapshot_and_delta_not_notification() {
assert!(!uses_large_body_timeout("https://rrdp.example.test/notification.xml"));
assert!(!uses_large_body_timeout(
"https://rrdp.example.test/notification.xml"
));
assert!(uses_large_body_timeout(
"https://rrdp.example.test/session/123/snapshot.xml"
));
assert!(uses_large_body_timeout(
"https://rrdp.example.test/session/123/delta-42.xml"
));
assert!(!uses_large_body_timeout("https://tal.example.test/example.tal"));
assert!(!uses_large_body_timeout(
"https://tal.example.test/example.tal"
));
}
}

View File

@ -1,6 +1,10 @@
use std::cell::RefCell;
use std::path::{Path, PathBuf};
use std::process::Command;
use std::process::Stdio;
use std::thread;
use std::time::Duration;
use std::time::Instant;
use sha2::Digest;
use uuid::Uuid;
@ -12,6 +16,7 @@ use crate::fetch::rsync::{
#[derive(Clone, Debug)]
pub struct SystemRsyncConfig {
pub rsync_bin: PathBuf,
pub connect_timeout: Duration,
pub timeout: Duration,
pub extra_args: Vec<String>,
/// Optional root directory for persistent rsync mirrors.
@ -28,7 +33,8 @@ impl Default for SystemRsyncConfig {
fn default() -> Self {
Self {
rsync_bin: PathBuf::from("rsync"),
timeout: Duration::from_secs(60),
connect_timeout: Duration::from_secs(15),
timeout: Duration::from_secs(30),
extra_args: Vec::new(),
mirror_root: None,
}
@ -44,6 +50,39 @@ pub struct SystemRsyncFetcher {
config: SystemRsyncConfig,
}
thread_local! {
static RSYNC_TIMEOUT_OVERRIDE: RefCell<Option<Duration>> = const { RefCell::new(None) };
static RSYNC_FAIL_FAST_PROFILE: RefCell<Option<RsyncFailFastProfile>> = const { RefCell::new(None) };
}
pub fn with_scoped_rsync_timeout_override<R>(timeout: Duration, f: impl FnOnce() -> R) -> R {
RSYNC_TIMEOUT_OVERRIDE.with(|cell| {
let previous = cell.replace(Some(timeout));
let result = f();
let _ = cell.replace(previous);
result
})
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct RsyncFailFastProfile {
pub initial_wall_clock_timeout: Duration,
pub max_wall_clock_timeout: Duration,
pub max_attempts: usize,
}
pub fn with_scoped_rsync_fail_fast_profile<R>(
profile: RsyncFailFastProfile,
f: impl FnOnce() -> R,
) -> R {
RSYNC_FAIL_FAST_PROFILE.with(|cell| {
let previous = cell.replace(Some(profile));
let result = f();
let _ = cell.replace(previous);
result
})
}
impl SystemRsyncFetcher {
pub fn new(config: SystemRsyncConfig) -> Self {
Self { config }
@ -69,32 +108,164 @@ impl SystemRsyncFetcher {
}
fn run_rsync(&self, src: &str, dst: &Path) -> Result<(), String> {
let fail_fast = RSYNC_FAIL_FAST_PROFILE.with(|cell| *cell.borrow());
if let Some(profile) = fail_fast {
return self.run_rsync_fail_fast(src, dst, profile);
}
self.run_rsync_once(src, dst, None, false)
}
fn run_rsync_once(
&self,
src: &str,
dst: &Path,
wall_clock_timeout: Option<Duration>,
keep_partial: bool,
) -> Result<(), String> {
// `--timeout` is I/O timeout in seconds (applies to network reads/writes).
let timeout_secs = self.config.timeout.as_secs().max(1).to_string();
let timeout =
RSYNC_TIMEOUT_OVERRIDE.with(|cell| cell.borrow().unwrap_or(self.config.timeout));
let connect_timeout_secs = self.config.connect_timeout.as_secs().max(1).to_string();
let timeout_secs = timeout.as_secs().max(1).to_string();
let is_remote_rsync = src.starts_with("rsync://");
let mut cmd = Command::new(&self.config.rsync_bin);
cmd.arg("-rt")
.arg("--delete")
.arg("--timeout")
.arg(timeout_secs)
.args(&self.config.extra_args)
.arg(src)
.arg(dst);
let out = cmd
.output()
.map_err(|e| format!("rsync spawn failed: {e}"))?;
if !out.status.success() {
let stderr = String::from_utf8_lossy(&out.stderr);
let stdout = String::from_utf8_lossy(&out.stdout);
return Err(format!(
"rsync failed: status={} stdout={} stderr={}",
out.status,
stdout.trim(),
stderr.trim()
));
.args(&self.config.extra_args);
if is_remote_rsync {
cmd.arg("--contimeout").arg(connect_timeout_secs);
}
if keep_partial {
cmd.arg("--partial");
}
cmd.arg(src)
.arg(dst)
.stdout(Stdio::piped())
.stderr(Stdio::piped());
let mut child = cmd
.spawn()
.map_err(|e| format!("rsync spawn failed: {e}"))?;
if let Some(limit) = wall_clock_timeout {
let started = Instant::now();
loop {
match child
.try_wait()
.map_err(|e| format!("rsync wait failed: {e}"))?
{
Some(_status) => {
let out = child
.wait_with_output()
.map_err(|e| format!("rsync wait_with_output failed: {e}"))?;
if out.status.success() {
return Ok(());
}
let stderr = String::from_utf8_lossy(&out.stderr);
let stdout = String::from_utf8_lossy(&out.stdout);
return Err(format!(
"rsync failed: status={} stdout={} stderr={}",
out.status,
stdout.trim(),
stderr.trim()
));
}
None => {
if started.elapsed() >= limit {
let _ = child.kill();
let out = child
.wait_with_output()
.map_err(|e| format!("rsync wait_with_output failed: {e}"))?;
let stderr = String::from_utf8_lossy(&out.stderr);
let stdout = String::from_utf8_lossy(&out.stdout);
return Err(format!(
"rsync wall-clock timeout after {}s: stdout={} stderr={}",
limit.as_secs(),
stdout.trim(),
stderr.trim()
));
}
thread::sleep(Duration::from_millis(100));
}
}
}
}
let out = child
.wait_with_output()
.map_err(|e| format!("rsync wait_with_output failed: {e}"))?;
if out.status.success() {
return Ok(());
}
let stderr = String::from_utf8_lossy(&out.stderr);
let stdout = String::from_utf8_lossy(&out.stdout);
Err(format!(
"rsync failed: status={} stdout={} stderr={}",
out.status,
stdout.trim(),
stderr.trim()
))
}
fn run_rsync_fail_fast(
&self,
src: &str,
dst: &Path,
profile: RsyncFailFastProfile,
) -> Result<(), String> {
let mut attempt = 0usize;
let mut timeout = profile.initial_wall_clock_timeout;
let mut previous_progress = (0usize, 0u64);
let mut zero_progress_attempts = 0usize;
let max_timeout = std::cmp::max(
profile.max_wall_clock_timeout,
profile.initial_wall_clock_timeout,
);
loop {
attempt += 1;
match self.run_rsync_once(src, dst, Some(timeout), true) {
Ok(()) => return Ok(()),
Err(err) => {
if is_hard_fail_rsync_error(&err) {
return Err(format!(
"rsync fail-fast hard-fail on attempt {}: {}",
attempt, err
));
}
if !err.contains("wall-clock timeout") {
return Err(err);
}
let progress = dir_progress(dst)
.map_err(|e| format!("rsync fail-fast progress stat failed: {e}"))?;
if progress == (0, 0) {
zero_progress_attempts += 1;
if zero_progress_attempts >= 2 || attempt >= profile.max_attempts {
return Err(format!(
"rsync fail-fast gave up after {} attempts with no progress: {}",
attempt, err
));
}
} else if progress == previous_progress {
return Err(format!(
"rsync fail-fast gave up after {} attempts with no additional progress: {}",
attempt, err
));
} else {
previous_progress = progress;
}
if attempt >= profile.max_attempts {
return Err(format!(
"rsync fail-fast exhausted {} attempts: {}",
profile.max_attempts, err
));
}
timeout = std::cmp::min(timeout.saturating_mul(2), max_timeout);
}
}
}
Ok(())
}
fn module_fetch_uri(&self, rsync_base_uri: &str) -> String {
@ -140,7 +311,8 @@ impl RsyncFetcher for SystemRsyncFetcher {
let tmp = TempDir::new().map_err(|e| RsyncFetchError::Fetch(e.to_string()))?;
self.run_rsync(&base, tmp.path())
.map_err(RsyncFetchError::Fetch)?;
walk_dir_visit(tmp.path(), tmp.path(), &base, &mut wrapped).map_err(RsyncFetchError::Fetch)?;
walk_dir_visit(tmp.path(), tmp.path(), &base, &mut wrapped)
.map_err(RsyncFetchError::Fetch)?;
Ok((count, bytes_total))
}
@ -178,7 +350,10 @@ fn rsync_module_root_uri(s: &str) -> Option<String> {
let mut host_and_path = rest.splitn(2, '/');
let authority = host_and_path.next()?;
let path = host_and_path.next()?;
let mut segments: Vec<&str> = path.split('/').filter(|segment| !segment.is_empty()).collect();
let mut segments: Vec<&str> = path
.split('/')
.filter(|segment| !segment.is_empty())
.collect();
if segments.is_empty() {
return None;
}
@ -216,6 +391,38 @@ fn walk_dir_collect(
Ok(())
}
fn dir_progress(root: &Path) -> Result<(usize, u64), String> {
if !root.exists() {
return Ok((0, 0));
}
let mut files = 0usize;
let mut bytes = 0u64;
let mut stack = vec![root.to_path_buf()];
while let Some(path) = stack.pop() {
let rd = std::fs::read_dir(&path).map_err(|e| e.to_string())?;
for entry in rd {
let entry = entry.map_err(|e| e.to_string())?;
let path = entry.path();
let meta = entry.metadata().map_err(|e| e.to_string())?;
if meta.is_dir() {
stack.push(path);
} else if meta.is_file() {
files += 1;
bytes += meta.len();
}
}
}
Ok((files, bytes))
}
fn is_hard_fail_rsync_error(msg: &str) -> bool {
let lower = msg.to_ascii_lowercase();
lower.contains("no route to host")
|| lower.contains("network is unreachable")
|| lower.contains("connection refused")
|| lower.contains("name or service not known")
}
fn walk_dir_visit(
root: &Path,
current: &Path,
@ -314,6 +521,7 @@ mod tests {
// 1) Spawn error.
let f = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: PathBuf::from("/this/does/not/exist/rsync"),
connect_timeout: Duration::from_secs(1),
timeout: Duration::from_secs(1),
extra_args: Vec::new(),
mirror_root: None,
@ -326,6 +534,7 @@ mod tests {
// 2) Non-zero exit status.
let f = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: PathBuf::from("false"),
connect_timeout: Duration::from_secs(1),
timeout: Duration::from_secs(1),
extra_args: Vec::new(),
mirror_root: None,
@ -345,6 +554,7 @@ mod tests {
let fetcher = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: PathBuf::from("rsync"),
connect_timeout: Duration::from_secs(1),
timeout: Duration::from_secs(1),
extra_args: Vec::new(),
mirror_root: Some(root_file.clone()),
@ -371,6 +581,7 @@ mod tests {
let fetcher = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: PathBuf::from("rsync"),
connect_timeout: Duration::from_secs(1),
timeout: Duration::from_secs(1),
extra_args: Vec::new(),
mirror_root: Some(root.clone()),
@ -406,4 +617,168 @@ mod tests {
assert_eq!(out.len(), 1);
assert_eq!(out[0].0, "rsync://example.net/repo/a.cer");
}
#[cfg(unix)]
#[test]
fn rsync_fail_fast_retries_when_progress_is_made() {
use std::os::unix::fs::PermissionsExt;
let temp = tempfile::tempdir().expect("tempdir");
let script = temp.path().join("fake-rsync.sh");
let state = temp.path().join("state.txt");
std::fs::write(
&script,
format!(
"#!/usr/bin/env bash\nset -euo pipefail\nSTATE=\"{}\"\nDST=\"${{@: -1}}\"\nCOUNT=0\nif [[ -f \"$STATE\" ]]; then COUNT=$(cat \"$STATE\"); fi\nCOUNT=$((COUNT+1))\necho \"$COUNT\" > \"$STATE\"\nmkdir -p \"$DST\"\nif [[ \"$COUNT\" -eq 1 ]]; then\n echo first > \"$DST/part1\"\n sleep 2\nelse\n echo second > \"$DST/part2\"\nfi\n",
state.display()
),
)
.expect("write script");
let mut perms = std::fs::metadata(&script).unwrap().permissions();
perms.set_mode(0o755);
std::fs::set_permissions(&script, perms).unwrap();
let dst = temp.path().join("dst");
let fetcher = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: script,
connect_timeout: Duration::from_secs(15),
timeout: Duration::from_secs(60),
extra_args: Vec::new(),
mirror_root: None,
});
fetcher
.run_rsync_fail_fast(
"rsync://example.net/repo/",
&dst,
RsyncFailFastProfile {
initial_wall_clock_timeout: Duration::from_secs(1),
max_wall_clock_timeout: Duration::from_secs(4),
max_attempts: 3,
},
)
.expect("eventual success");
assert!(dst.join("part1").exists());
assert!(dst.join("part2").exists());
}
#[cfg(unix)]
#[test]
fn rsync_fail_fast_gives_up_after_two_zero_progress_timeouts() {
use std::os::unix::fs::PermissionsExt;
let temp = tempfile::tempdir().expect("tempdir");
let script = temp.path().join("fake-rsync.sh");
std::fs::write(&script, "#!/usr/bin/env bash\nset -euo pipefail\nsleep 5\n")
.expect("write script");
let mut perms = std::fs::metadata(&script).unwrap().permissions();
perms.set_mode(0o755);
std::fs::set_permissions(&script, perms).unwrap();
let dst = temp.path().join("dst");
let fetcher = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: script,
connect_timeout: Duration::from_secs(15),
timeout: Duration::from_secs(60),
extra_args: Vec::new(),
mirror_root: None,
});
let err = fetcher
.run_rsync_fail_fast(
"rsync://example.net/repo/",
&dst,
RsyncFailFastProfile {
initial_wall_clock_timeout: Duration::from_secs(1),
max_wall_clock_timeout: Duration::from_secs(2),
max_attempts: 4,
},
)
.expect_err("must fail");
assert!(err.contains("no progress"), "{err}");
}
#[cfg(unix)]
#[test]
fn rsync_fail_fast_hard_fail_stops_after_first_attempt() {
use std::os::unix::fs::PermissionsExt;
let temp = tempfile::tempdir().expect("tempdir");
let script = temp.path().join("fake-rsync.sh");
let state = temp.path().join("state.txt");
std::fs::write(
&script,
format!(
"#!/usr/bin/env bash\nset -euo pipefail\nSTATE=\"{}\"\nCOUNT=0\nif [[ -f \"$STATE\" ]]; then COUNT=$(cat \"$STATE\"); fi\nCOUNT=$((COUNT+1))\necho \"$COUNT\" > \"$STATE\"\necho 'rsync: [Receiver] failed to connect to host (1.2.3.4): Connection refused (111)' >&2\nexit 10\n",
state.display()
),
)
.expect("write script");
let mut perms = std::fs::metadata(&script).unwrap().permissions();
perms.set_mode(0o755);
std::fs::set_permissions(&script, perms).unwrap();
let dst = temp.path().join("dst");
let fetcher = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: script,
connect_timeout: Duration::from_secs(15),
timeout: Duration::from_secs(60),
extra_args: Vec::new(),
mirror_root: None,
});
let err = fetcher
.run_rsync_fail_fast(
"rsync://example.net/repo/",
&dst,
RsyncFailFastProfile {
initial_wall_clock_timeout: Duration::from_secs(10),
max_wall_clock_timeout: Duration::from_secs(80),
max_attempts: 4,
},
)
.expect_err("must hard fail");
assert!(err.contains("hard-fail"), "{err}");
let count = std::fs::read_to_string(&state).unwrap();
assert_eq!(count.trim(), "1");
}
#[cfg(unix)]
#[test]
fn run_rsync_once_passes_contimeout_and_timeout_args() {
use std::os::unix::fs::PermissionsExt;
let temp = tempfile::tempdir().expect("tempdir");
let script = temp.path().join("capture-rsync.sh");
let args_file = temp.path().join("args.txt");
std::fs::write(
&script,
format!(
"#!/usr/bin/env bash\nset -euo pipefail\nprintf '%s\\n' \"$@\" > \"{}\"\nDST=\"${{@: -1}}\"\nmkdir -p \"$DST\"\n",
args_file.display()
),
)
.expect("write script");
let mut perms = std::fs::metadata(&script).unwrap().permissions();
perms.set_mode(0o755);
std::fs::set_permissions(&script, perms).unwrap();
let dst = temp.path().join("dst");
let fetcher = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: script,
connect_timeout: Duration::from_secs(15),
timeout: Duration::from_secs(30),
extra_args: Vec::new(),
mirror_root: None,
});
fetcher
.run_rsync_once("rsync://example.net/repo/", &dst, None, false)
.expect("rsync");
let args = std::fs::read_to_string(&args_file).expect("read args");
assert!(args.contains("--contimeout\n15\n"), "{args}");
assert!(args.contains("--timeout\n30\n"), "{args}");
}
}

View File

@ -3,9 +3,9 @@ pub mod cir;
pub mod data_model;
#[cfg(feature = "full")]
pub mod analysis;
pub mod blob_store;
#[cfg(feature = "full")]
pub mod bundle;
pub mod analysis;
#[cfg(feature = "full")]
pub mod audit;
#[cfg(feature = "full")]
@ -13,6 +13,8 @@ pub mod audit_downloads;
#[cfg(feature = "full")]
pub mod audit_trace;
#[cfg(feature = "full")]
pub mod bundle;
#[cfg(feature = "full")]
pub mod cli;
#[cfg(feature = "full")]
pub mod fetch;

View File

@ -874,9 +874,13 @@ mod tests {
.join("meta.json");
std::fs::remove_file(&meta_path).expect("remove rsync module meta");
let index = ReplayArchiveIndex::load_allow_missing_rsync_modules(&archive_root, &locks_path)
.expect("load replay index without rsync meta");
let module = index.rsync_modules.get(&module_uri).expect("module present");
let index =
ReplayArchiveIndex::load_allow_missing_rsync_modules(&archive_root, &locks_path)
.expect("load replay index without rsync meta");
let module = index
.rsync_modules
.get(&module_uri)
.expect("module present");
assert_eq!(module.meta.module, module_uri);
assert_eq!(module.meta.version, 1);
}

View File

@ -916,7 +916,10 @@ mod tests {
let index = ReplayDeltaArchiveIndex::load(&archive_root, &locks_path)
.expect("load delta replay index without rsync meta");
let module = index.rsync_modules.get(&module_uri).expect("module present");
let module = index
.rsync_modules
.get(&module_uri)
.expect("module present");
assert_eq!(module.meta.module, module_uri);
assert_eq!(module.meta.version, 1);
}

View File

@ -8,6 +8,7 @@ use rocksdb::{
use serde::{Deserialize, Serialize, de::DeserializeOwned};
use sha2::Digest;
use crate::blob_store::{ExternalRawStoreDb, RawObjectStore};
use crate::data_model::rc::{AsResourceSet, IpResourceSet};
pub const CF_REPOSITORY_VIEW: &str = "repository_view";
@ -77,6 +78,7 @@ pub type StorageResult<T> = Result<T, StorageError>;
pub struct RocksStore {
db: DB,
external_raw_store: Option<ExternalRawStoreDb>,
}
pub mod pack {
@ -769,7 +771,19 @@ impl RocksStore {
let db = DB::open_cf_descriptors(&base_opts, path, column_family_descriptors())
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(Self { db })
Ok(Self {
db,
external_raw_store: None,
})
}
pub fn open_with_external_raw_store(
path: &Path,
raw_store_path: &Path,
) -> StorageResult<Self> {
let mut store = Self::open(path)?;
store.external_raw_store = Some(ExternalRawStoreDb::open(raw_store_path)?);
Ok(store)
}
fn cf(&self, name: &'static str) -> StorageResult<&ColumnFamily> {
@ -822,7 +836,10 @@ impl RocksStore {
member_records: &[RrdpSourceMemberRecord],
owner_records: &[RrdpUriOwnerRecord],
) -> StorageResult<()> {
if repository_view_entries.is_empty() && member_records.is_empty() && owner_records.is_empty() {
if repository_view_entries.is_empty()
&& member_records.is_empty()
&& owner_records.is_empty()
{
return Ok(());
}
@ -877,6 +894,9 @@ impl RocksStore {
pub fn put_raw_by_hash_entry(&self, entry: &RawByHashEntry) -> StorageResult<()> {
entry.validate_internal()?;
if let Some(raw_store) = self.external_raw_store.as_ref() {
return raw_store.put_raw_entry(entry);
}
let cf = self.cf(CF_RAW_BY_HASH)?;
let key = raw_by_hash_key(&entry.sha256_hex);
let value = encode_cbor(entry, "raw_by_hash")?;
@ -890,6 +910,9 @@ impl RocksStore {
if entries.is_empty() {
return Ok(());
}
if let Some(raw_store) = self.external_raw_store.as_ref() {
return raw_store.put_raw_entries_batch(entries);
}
let cf = self.cf(CF_RAW_BY_HASH)?;
let mut batch = WriteBatch::default();
@ -909,6 +932,9 @@ impl RocksStore {
if entries.is_empty() {
return Ok(());
}
if let Some(raw_store) = self.external_raw_store.as_ref() {
return raw_store.put_raw_entries_batch(entries);
}
let cf = self.cf(CF_RAW_BY_HASH)?;
let mut batch = WriteBatch::default();
@ -922,6 +948,9 @@ impl RocksStore {
pub fn delete_raw_by_hash_entry(&self, sha256_hex: &str) -> StorageResult<()> {
validate_sha256_hex("raw_by_hash.sha256_hex", sha256_hex)?;
if let Some(raw_store) = self.external_raw_store.as_ref() {
return raw_store.delete_raw_entry(sha256_hex);
}
let cf = self.cf(CF_RAW_BY_HASH)?;
let key = raw_by_hash_key(sha256_hex);
self.db
@ -931,6 +960,9 @@ impl RocksStore {
}
pub fn get_raw_by_hash_entry(&self, sha256_hex: &str) -> StorageResult<Option<RawByHashEntry>> {
if let Some(raw_store) = self.external_raw_store.as_ref() {
return raw_store.get_raw_entry(sha256_hex);
}
let cf = self.cf(CF_RAW_BY_HASH)?;
let key = raw_by_hash_key(sha256_hex);
let Some(bytes) = self
@ -952,9 +984,15 @@ impl RocksStore {
if sha256_hexes.is_empty() {
return Ok(Vec::new());
}
if let Some(raw_store) = self.external_raw_store.as_ref() {
return raw_store.get_raw_entries_batch(sha256_hexes);
}
let cf = self.cf(CF_RAW_BY_HASH)?;
let keys: Vec<String> = sha256_hexes.iter().map(|hash| raw_by_hash_key(hash)).collect();
let keys: Vec<String> = sha256_hexes
.iter()
.map(|hash| raw_by_hash_key(hash))
.collect();
self.db
.multi_get_cf(keys.iter().map(|key| (cf, key.as_bytes())))
.into_iter()
@ -1217,7 +1255,10 @@ impl RocksStore {
))
}
pub fn load_current_object_bytes_by_uri(&self, rsync_uri: &str) -> StorageResult<Option<Vec<u8>>> {
pub fn load_current_object_bytes_by_uri(
&self,
rsync_uri: &str,
) -> StorageResult<Option<Vec<u8>>> {
let Some(view) = self.get_repository_view_entry(rsync_uri)? else {
return Ok(None);
};
@ -1230,17 +1271,15 @@ impl RocksStore {
.as_deref()
.ok_or(StorageError::InvalidData {
entity: "repository_view",
detail: format!(
"current_hash missing for current object URI: {rsync_uri}"
),
detail: format!("current_hash missing for current object URI: {rsync_uri}"),
})?;
let raw = self.get_raw_by_hash_entry(hash)?.ok_or(StorageError::InvalidData {
let bytes = self.get_blob_bytes(hash)?.ok_or(StorageError::InvalidData {
entity: "repository_view",
detail: format!(
"raw_by_hash entry missing for current object URI: {rsync_uri} (hash={hash})"
),
})?;
Ok(Some(raw.bytes))
Ok(Some(bytes))
}
}
}
@ -1283,7 +1322,6 @@ impl RocksStore {
Ok(())
}
#[allow(dead_code)]
pub fn write_batch(&self, batch: WriteBatch) -> StorageResult<()> {
@ -1721,6 +1759,37 @@ mod tests {
assert_eq!(got_raw, raw);
}
#[test]
fn raw_by_hash_routes_to_external_raw_store_when_configured() {
let td = tempfile::tempdir().expect("tempdir");
let main_db = td.path().join("main-db");
let raw_db = td.path().join("raw-store.db");
let raw = sample_raw_by_hash_entry(b"external-raw".to_vec());
{
let store =
RocksStore::open_with_external_raw_store(&main_db, &raw_db).expect("open store");
store
.put_raw_by_hash_entry(&raw)
.expect("put external raw");
let got = store
.get_raw_by_hash_entry(&raw.sha256_hex)
.expect("get external raw")
.expect("raw exists");
assert_eq!(got, raw);
}
let main_store = RocksStore::open(&main_db).expect("open main only");
assert!(
main_store
.get_raw_by_hash_entry(&raw.sha256_hex)
.expect("read main store")
.is_none(),
"main db should not contain raw entry when external raw store is configured"
);
}
#[test]
fn repository_view_and_raw_by_hash_validation_errors_are_reported() {
let td = tempfile::tempdir().expect("tempdir");
@ -1880,10 +1949,15 @@ mod tests {
store
.replace_vcir_and_audit_rule_indexes(None, &previous)
.expect("store previous vcir");
assert!(store
.get_audit_rule_index_entry(AuditRuleKind::Roa, &previous.local_outputs[0].rule_hash)
.expect("get old audit entry")
.is_some());
assert!(
store
.get_audit_rule_index_entry(
AuditRuleKind::Roa,
&previous.local_outputs[0].rule_hash
)
.expect("get old audit entry")
.is_some()
);
let mut current = sample_vcir("rsync://example.test/repo/current.mft");
current.local_outputs = vec![VcirLocalOutput {
@ -1909,14 +1983,24 @@ mod tests {
.expect("get replaced vcir")
.expect("vcir exists");
assert_eq!(got, current);
assert!(store
.get_audit_rule_index_entry(AuditRuleKind::Roa, &previous.local_outputs[0].rule_hash)
.expect("get deleted old audit entry")
.is_none());
assert!(store
.get_audit_rule_index_entry(AuditRuleKind::Aspa, &current.local_outputs[0].rule_hash)
.expect("get new audit entry")
.is_some());
assert!(
store
.get_audit_rule_index_entry(
AuditRuleKind::Roa,
&previous.local_outputs[0].rule_hash
)
.expect("get deleted old audit entry")
.is_none()
);
assert!(
store
.get_audit_rule_index_entry(
AuditRuleKind::Aspa,
&current.local_outputs[0].rule_hash
)
.expect("get new audit entry")
.is_some()
);
}
#[test]
@ -2218,8 +2302,11 @@ mod tests {
let present_bytes = b"present-object".to_vec();
let present_hash = sha256_hex(&present_bytes);
let mut present_raw = RawByHashEntry::from_bytes(present_hash.clone(), present_bytes.clone());
present_raw.origin_uris.push("rsync://example.test/repo/present.roa".to_string());
let mut present_raw =
RawByHashEntry::from_bytes(present_hash.clone(), present_bytes.clone());
present_raw
.origin_uris
.push("rsync://example.test/repo/present.roa".to_string());
present_raw.object_type = Some("roa".to_string());
store
.put_raw_by_hash_entry(&present_raw)
@ -2238,7 +2325,9 @@ mod tests {
let replaced_hash = sha256_hex(&replaced_bytes);
let mut replaced_raw =
RawByHashEntry::from_bytes(replaced_hash.clone(), replaced_bytes.clone());
replaced_raw.origin_uris.push("rsync://example.test/repo/replaced.cer".to_string());
replaced_raw
.origin_uris
.push("rsync://example.test/repo/replaced.cer".to_string());
replaced_raw.object_type = Some("cer".to_string());
store
.put_raw_by_hash_entry(&replaced_raw)
@ -2309,5 +2398,4 @@ mod tests {
.expect_err("missing raw_by_hash should error");
assert!(matches!(err, StorageError::InvalidData { .. }));
}
}

View File

@ -10,36 +10,37 @@ use crate::storage::{RawByHashEntry, RocksStore};
use crate::sync::rrdp::sync_from_notification_with_timing_and_download_log;
use crate::sync::rrdp::{Fetcher as HttpFetcher, RrdpSyncError, load_rrdp_local_state};
use crate::sync::store_projection::{
build_repository_view_present_entry,
build_repository_view_withdrawn_entry,
compute_sha256_hex, infer_object_type_from_uri,
build_repository_view_present_entry, build_repository_view_withdrawn_entry, compute_sha256_hex,
infer_object_type_from_uri,
};
use std::collections::{BTreeMap, HashSet};
use std::thread;
use std::time::Duration;
#[cfg(test)]
use crate::storage::RrdpSourceSyncState;
#[cfg(test)]
use crate::sync::rrdp::persist_rrdp_local_state;
const RRDP_RETRY_BACKOFFS_PROD: [Duration; 3] = [
Duration::from_millis(200),
Duration::from_millis(500),
Duration::from_secs(1),
];
const RRDP_RETRY_BACKOFFS_TEST: [Duration; 2] =
[Duration::from_millis(0), Duration::from_millis(0)];
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum RepoSyncSource {
Rrdp,
Rsync,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum RepoSyncPhase {
RrdpOk,
RrdpFailedRsyncOk,
RsyncOnlyOk,
ReplayRrdpOk,
ReplayRsyncOk,
ReplayNoopRrdp,
ReplayNoopRsync,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RepoSyncResult {
pub source: RepoSyncSource,
pub phase: RepoSyncPhase,
pub objects_written: usize,
pub warnings: Vec<Warning>,
}
@ -99,6 +100,7 @@ pub fn sync_publication_point(
);
Ok(RepoSyncResult {
source: RepoSyncSource::Rrdp,
phase: RepoSyncPhase::RrdpOk,
objects_written: written,
warnings: Vec::new(),
})
@ -115,6 +117,14 @@ pub fn sync_publication_point(
"rrdp_error": err.to_string(),
}),
);
crate::progress_log::emit(
"rrdp_failed_fallback_rsync",
serde_json::json!({
"notify_uri": notification_uri,
"rsync_base_uri": rsync_base_uri,
"rrdp_error": err.to_string(),
}),
);
let warnings = vec![
Warning::new(format!("RRDP failed; falling back to rsync: {err}"))
.with_rfc_refs(&[RfcRef("RFC 8182 §3.4.5")])
@ -127,12 +137,16 @@ pub fn sync_publication_point(
timing,
download_log,
)?;
if let Some(t) = timing.as_ref() {
t.record_count("repo_sync_rsync_peer_aligned_profile_total", 1);
}
if let Some(t) = timing.as_ref() {
t.record_count("repo_sync_rsync_fallback_ok_total", 1);
t.record_count("repo_sync_rsync_objects_written_total", written as u64);
}
Ok(RepoSyncResult {
source: RepoSyncSource::Rsync,
phase: RepoSyncPhase::RrdpFailedRsyncOk,
objects_written: written,
warnings,
})
@ -147,6 +161,9 @@ pub fn sync_publication_point(
timing,
download_log,
)?;
if let Some(t) = timing.as_ref() {
t.record_count("repo_sync_rsync_peer_aligned_profile_total", 1);
}
crate::progress_log::emit(
"repo_sync_rsync_direct",
serde_json::json!({
@ -160,6 +177,7 @@ pub fn sync_publication_point(
}
Ok(RepoSyncResult {
source: RepoSyncSource::Rsync,
phase: RepoSyncPhase::RsyncOnlyOk,
objects_written: written,
warnings: Vec::new(),
})
@ -192,6 +210,7 @@ pub fn sync_publication_point_replay(
}
Ok(RepoSyncResult {
source: RepoSyncSource::Rrdp,
phase: RepoSyncPhase::ReplayRrdpOk,
objects_written: written,
warnings: Vec::new(),
})
@ -210,6 +229,7 @@ pub fn sync_publication_point_replay(
}
Ok(RepoSyncResult {
source: RepoSyncSource::Rsync,
phase: RepoSyncPhase::ReplayRsyncOk,
objects_written: written,
warnings: Vec::new(),
})
@ -243,6 +263,7 @@ pub fn sync_publication_point_replay_delta(
}
Ok(RepoSyncResult {
source: RepoSyncSource::Rrdp,
phase: RepoSyncPhase::ReplayRrdpOk,
objects_written: written,
warnings: Vec::new(),
})
@ -261,12 +282,17 @@ pub fn sync_publication_point_replay_delta(
}
Ok(RepoSyncResult {
source: RepoSyncSource::Rsync,
phase: RepoSyncPhase::ReplayRsyncOk,
objects_written: written,
warnings: Vec::new(),
})
}
ReplayDeltaResolvedTransport::Noop(source) => Ok(RepoSyncResult {
source,
phase: match source {
RepoSyncSource::Rrdp => RepoSyncPhase::ReplayNoopRrdp,
RepoSyncSource::Rsync => RepoSyncPhase::ReplayNoopRsync,
},
objects_written: 0,
warnings: Vec::new(),
}),
@ -448,14 +474,6 @@ fn is_retryable_http_fetch_error(msg: &str) -> bool {
code == 408 || code == 429 || (500..600).contains(&code)
}
fn rrdp_retry_backoffs() -> &'static [Duration] {
if cfg!(test) {
&RRDP_RETRY_BACKOFFS_TEST
} else {
&RRDP_RETRY_BACKOFFS_PROD
}
}
fn try_rrdp_sync_with_retry(
store: &RocksStore,
notification_uri: &str,
@ -463,89 +481,52 @@ fn try_rrdp_sync_with_retry(
timing: Option<&TimingHandle>,
download_log: Option<&DownloadLogHandle>,
) -> Result<usize, RrdpSyncError> {
let backoffs = rrdp_retry_backoffs();
let max_attempts = backoffs.len().saturating_add(1).max(1);
let mut attempt: usize = 0;
let attempt = 1usize;
crate::progress_log::emit(
"rrdp_sync_attempt",
serde_json::json!({
"notify_uri": notification_uri,
"attempt": attempt,
}),
);
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_retry_attempt_total", 1);
}
loop {
attempt += 1;
crate::progress_log::emit(
"rrdp_sync_attempt",
serde_json::json!({
"notify_uri": notification_uri,
"attempt": attempt,
}),
);
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_retry_attempt_total", 1);
match try_rrdp_sync(store, notification_uri, http_fetcher, timing, download_log) {
Ok(written) => {
crate::progress_log::emit(
"rrdp_sync_success",
serde_json::json!({
"notify_uri": notification_uri,
"attempt": attempt,
"objects_written": written,
}),
);
Ok(written)
}
match try_rrdp_sync(store, notification_uri, http_fetcher, timing, download_log) {
Ok(written) => {
crate::progress_log::emit(
"rrdp_sync_success",
serde_json::json!({
"notify_uri": notification_uri,
"attempt": attempt,
"objects_written": written,
}),
);
if attempt > 1 {
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_retry_success_total", 1);
}
Err(err) => {
let retryable = match &err {
RrdpSyncError::Fetch(msg) => is_retryable_http_fetch_error(msg),
_ => false,
};
crate::progress_log::emit(
"rrdp_sync_failed",
serde_json::json!({
"notify_uri": notification_uri,
"attempt": attempt,
"retryable": retryable,
"error": err.to_string(),
}),
);
if let Some(t) = timing.as_ref() {
match &err {
RrdpSyncError::Fetch(_) => t.record_count("rrdp_failed_fetch_total", 1),
RrdpSyncError::Rrdp(_) => t.record_count("rrdp_failed_protocol_total", 1),
RrdpSyncError::Storage(_) => t.record_count("rrdp_failed_storage_total", 1),
}
return Ok(written);
}
Err(err) => {
let retryable = match &err {
RrdpSyncError::Fetch(msg) => is_retryable_http_fetch_error(msg),
_ => false,
};
if retryable && attempt < max_attempts {
crate::progress_log::emit(
"rrdp_sync_retry",
serde_json::json!({
"notify_uri": notification_uri,
"attempt": attempt,
"error": err.to_string(),
}),
);
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_retry_sleep_total", 1);
}
let backoff = backoffs
.get(attempt.saturating_sub(1))
.copied()
.unwrap_or_else(|| Duration::from_secs(0));
if !backoff.is_zero() {
thread::sleep(backoff);
}
continue;
}
crate::progress_log::emit(
"rrdp_sync_failed",
serde_json::json!({
"notify_uri": notification_uri,
"attempt": attempt,
"retryable": retryable,
"error": err.to_string(),
}),
);
if let Some(t) = timing.as_ref() {
match &err {
RrdpSyncError::Fetch(_) => t.record_count("rrdp_failed_fetch_total", 1),
RrdpSyncError::Rrdp(_) => t.record_count("rrdp_failed_protocol_total", 1),
RrdpSyncError::Storage(_) => t.record_count("rrdp_failed_storage_total", 1),
}
if retryable && attempt >= max_attempts && attempt > 1 {
t.record_count("rrdp_retry_exhausted_total", 1);
}
}
return Err(err);
}
Err(err)
}
}
}
@ -575,41 +556,42 @@ fn rsync_sync_into_current_store(
let mut new_set: HashSet<String> = HashSet::new();
let mut uri_to_hash: BTreeMap<String, String> = BTreeMap::new();
let mut pending_raw: BTreeMap<String, RawByHashEntry> = BTreeMap::new();
let (object_count, bytes_total) = match rsync_fetcher.visit_objects(rsync_base_uri, &mut |uri, bytes| {
let sha256_hex = compute_sha256_hex(&bytes);
new_set.insert(uri.clone());
uri_to_hash.insert(uri.clone(), sha256_hex.clone());
let entry = pending_raw
.entry(sha256_hex.clone())
.or_insert_with(|| RawByHashEntry::from_bytes(sha256_hex.clone(), bytes.clone()));
if entry.bytes != bytes {
return Err(format!(
"raw_by_hash collision for {uri}: same sha256 maps to different bytes"
));
}
if !entry.origin_uris.iter().any(|existing| existing == &uri) {
entry.origin_uris.push(uri.clone());
}
if entry.object_type.is_none() {
entry.object_type = infer_object_type_from_uri(&uri);
}
Ok(())
}) {
Ok(v) => {
if let Some(s) = dl_span.as_mut() {
s.set_objects(v.0 as u64, v.1);
s.set_bytes(v.1);
s.set_ok();
let (object_count, bytes_total) =
match rsync_fetcher.visit_objects(rsync_base_uri, &mut |uri, bytes| {
let sha256_hex = compute_sha256_hex(&bytes);
new_set.insert(uri.clone());
uri_to_hash.insert(uri.clone(), sha256_hex.clone());
let entry = pending_raw
.entry(sha256_hex.clone())
.or_insert_with(|| RawByHashEntry::from_bytes(sha256_hex.clone(), bytes.clone()));
if entry.bytes != bytes {
return Err(format!(
"raw_by_hash collision for {uri}: same sha256 maps to different bytes"
));
}
v
}
Err(e) => {
if let Some(s) = dl_span.as_mut() {
s.set_err(e.to_string());
if !entry.origin_uris.iter().any(|existing| existing == &uri) {
entry.origin_uris.push(uri.clone());
}
return Err(e.into());
}
};
if entry.object_type.is_none() {
entry.object_type = infer_object_type_from_uri(&uri);
}
Ok(())
}) {
Ok(v) => {
if let Some(s) = dl_span.as_mut() {
s.set_objects(v.0 as u64, v.1);
s.set_bytes(v.1);
s.set_ok();
}
v
}
Err(e) => {
if let Some(s) = dl_span.as_mut() {
s.set_err(e.to_string());
}
return Err(e.into());
}
};
crate::progress_log::emit(
"rsync_sync_fetch_done",
serde_json::json!({
@ -639,9 +621,9 @@ fn rsync_sync_into_current_store(
.map_err(|e| RepoSyncError::Storage(e.to_string()))?;
let mut entries_to_write = Vec::new();
for (hash, existing_opt) in hashes.into_iter().zip(existing_entries.into_iter()) {
let mut pending_entry = pending_raw
.remove(&hash)
.ok_or_else(|| RepoSyncError::Storage(format!("missing pending raw entry for {hash}")))?;
let mut pending_entry = pending_raw.remove(&hash).ok_or_else(|| {
RepoSyncError::Storage(format!("missing pending raw entry for {hash}"))
})?;
match existing_opt {
Some(mut existing) => {
if existing.bytes != pending_entry.bytes {
@ -651,7 +633,11 @@ fn rsync_sync_into_current_store(
}
let mut changed = false;
for uri in pending_entry.origin_uris.drain(..) {
if !existing.origin_uris.iter().any(|existing_uri| existing_uri == &uri) {
if !existing
.origin_uris
.iter()
.any(|existing_uri| existing_uri == &uri)
{
existing.origin_uris.push(uri);
changed = true;
}
@ -679,10 +665,9 @@ fn rsync_sync_into_current_store(
}
for uri in &new_set {
let current_hash = uri_to_hash
.get(uri)
.cloned()
.ok_or_else(|| RepoSyncError::Storage(format!("missing raw_by_hash mapping for {uri}")))?;
let current_hash = uri_to_hash.get(uri).cloned().ok_or_else(|| {
RepoSyncError::Storage(format!("missing raw_by_hash mapping for {uri}"))
})?;
repository_view_entries.push(build_repository_view_present_entry(
&sync_scope_uri,
uri,
@ -736,9 +721,9 @@ mod tests {
use crate::replay::fetch_http::PayloadReplayHttpFetcher;
use crate::replay::fetch_rsync::PayloadReplayRsyncFetcher;
use crate::storage::RepositoryViewState;
use crate::sync::store_projection::build_repository_view_present_entry;
use crate::sync::rrdp::Fetcher as HttpFetcher;
use crate::sync::rrdp::RrdpState;
use crate::sync::store_projection::build_repository_view_present_entry;
use base64::Engine;
use sha2::Digest;
use std::collections::HashMap;
@ -1322,7 +1307,7 @@ mod tests {
}
#[test]
fn rrdp_retry_succeeds_without_rsync_when_notification_fetch_is_transient() {
fn rrdp_fetch_error_falls_back_to_rsync_without_retry() {
let temp = tempfile::tempdir().expect("tempdir");
let store_dir = temp.path().join("db");
let store = RocksStore::open(&store_dir).expect("open rocksdb");
@ -1335,50 +1320,33 @@ mod tests {
});
let notification_uri = "https://example.test/notification.xml";
let snapshot_uri = "https://example.test/snapshot.xml";
let published_uri = "rsync://example.test/repo/a.mft";
let published_bytes = b"x";
let snapshot = snapshot_xml(
"9df4b597-af9e-4dca-bdda-719cce2c4e28",
1,
&[(published_uri, published_bytes)],
);
let snapshot_hash = hex::encode(sha2::Sha256::digest(&snapshot));
let notif = notification_xml(
"9df4b597-af9e-4dca-bdda-719cce2c4e28",
1,
snapshot_uri,
&snapshot_hash,
);
let mut map = HashMap::new();
map.insert(notification_uri.to_string(), notif);
map.insert(snapshot_uri.to_string(), snapshot);
struct RetryThenMap {
inner: MapFetcher,
notification_uri: String,
fail_times: usize,
struct AlwaysFailHttp {
notification_calls: AtomicUsize,
}
impl HttpFetcher for RetryThenMap {
fn fetch(&self, uri: &str) -> Result<Vec<u8>, String> {
if uri == self.notification_uri {
let n = self.notification_calls.fetch_add(1, Ordering::SeqCst);
if n < self.fail_times {
return Err("http request failed: simulated transient".to_string());
}
}
self.inner.fetch(uri)
impl HttpFetcher for AlwaysFailHttp {
fn fetch(&self, _uri: &str) -> Result<Vec<u8>, String> {
self.notification_calls.fetch_add(1, Ordering::SeqCst);
Err("http request failed: simulated transient".to_string())
}
}
let http = RetryThenMap {
inner: MapFetcher { map },
notification_uri: notification_uri.to_string(),
fail_times: 2,
struct SingleObjectRsync {
uri: String,
bytes: Vec<u8>,
}
impl RsyncFetcher for SingleObjectRsync {
fn fetch_objects(
&self,
_rsync_base_uri: &str,
) -> Result<Vec<(String, Vec<u8>)>, RsyncFetchError> {
Ok(vec![(self.uri.clone(), self.bytes.clone())])
}
}
let http = AlwaysFailHttp {
notification_calls: AtomicUsize::new(0),
};
@ -1394,29 +1362,26 @@ mod tests {
Some(notification_uri),
"rsync://example.test/repo/",
&http,
&PanicRsyncFetcher,
&SingleObjectRsync {
uri: published_uri.to_string(),
bytes: published_bytes.to_vec(),
},
Some(&timing),
Some(&download_log),
)
.expect("sync ok");
assert_eq!(out.source, RepoSyncSource::Rrdp);
assert_eq!(out.source, RepoSyncSource::Rsync);
assert_current_object(&store, published_uri, published_bytes);
assert_eq!(http.notification_calls.load(Ordering::SeqCst), 1);
let events = download_log.snapshot_events();
assert_eq!(events.len(), 4, "expected 3x notification + 1x snapshot");
assert_eq!(events.len(), 2, "expected 1x notification + 1x rsync");
assert_eq!(
events
.iter()
.filter(|e| e.kind == AuditDownloadKind::RrdpNotification)
.count(),
3
);
assert_eq!(
events
.iter()
.filter(|e| e.kind == AuditDownloadKind::RrdpSnapshot)
.count(),
1
);
assert_eq!(
@ -1424,7 +1389,14 @@ mod tests {
.iter()
.filter(|e| e.kind == AuditDownloadKind::RrdpNotification && !e.success)
.count(),
2
1
);
assert_eq!(
events
.iter()
.filter(|e| e.kind == AuditDownloadKind::Rsync)
.count(),
1
);
let v = timing_to_json(temp.path(), &timing);
@ -1433,17 +1405,17 @@ mod tests {
counts
.get("rrdp_retry_attempt_total")
.and_then(|v| v.as_u64()),
Some(3)
Some(1)
);
assert_eq!(
counts
.get("rrdp_retry_success_total")
.get("repo_sync_rrdp_failed_total")
.and_then(|v| v.as_u64()),
Some(1)
);
assert_eq!(
counts
.get("repo_sync_rrdp_ok_total")
.get("repo_sync_rsync_fallback_ok_total")
.and_then(|v| v.as_u64()),
Some(1)
);
@ -1749,7 +1721,11 @@ mod tests {
assert_eq!(out.source, RepoSyncSource::Rsync);
assert_eq!(out.objects_written, 1);
assert_eq!(out.warnings.len(), 0);
assert_current_object(&store, "rsync://rsync.example.test/repo/sub/fallback.cer", b"cer");
assert_current_object(
&store,
"rsync://rsync.example.test/repo/sub/fallback.cer",
b"cer",
);
}
#[test]
@ -1995,7 +1971,11 @@ mod tests {
assert_eq!(out.source, RepoSyncSource::Rsync);
assert_eq!(out.objects_written, 2);
assert_current_object(&store, "rsync://rsync.example.test/repo/a.mft", b"base");
assert_current_object(&store, "rsync://rsync.example.test/repo/sub/x.cer", b"overlay-cer");
assert_current_object(
&store,
"rsync://rsync.example.test/repo/sub/x.cer",
b"overlay-cer",
);
}
#[test]

View File

@ -5,12 +5,10 @@ use crate::storage::{RocksStore, RrdpDeltaOp, RrdpSourceSyncState};
use crate::sync::store_projection::{
build_repository_view_present_entry, build_repository_view_withdrawn_entry,
build_rrdp_source_member_present_record, build_rrdp_source_member_withdrawn_record,
build_rrdp_uri_owner_active_record, build_rrdp_uri_owner_withdrawn_record,
compute_sha256_hex, current_rrdp_owner_is, ensure_rrdp_uri_can_be_owned_by,
prepare_raw_by_hash_evidence_batch,
put_repository_view_present, put_repository_view_withdrawn,
put_rrdp_source_member_present, put_rrdp_source_member_withdrawn,
put_rrdp_uri_owner_active, put_rrdp_uri_owner_withdrawn,
build_rrdp_uri_owner_active_record, build_rrdp_uri_owner_withdrawn_record, compute_sha256_hex,
current_rrdp_owner_is, ensure_rrdp_uri_can_be_owned_by, prepare_raw_by_hash_evidence_batch,
put_repository_view_present, put_repository_view_withdrawn, put_rrdp_source_member_present,
put_rrdp_source_member_withdrawn, put_rrdp_uri_owner_active, put_rrdp_uri_owner_withdrawn,
update_rrdp_source_record_on_success, upsert_raw_by_hash_evidence,
};
use base64::Engine;
@ -1228,20 +1226,16 @@ fn apply_snapshot(
}
let session_id = expected_session_id.to_string();
let prepared_raw = prepare_raw_by_hash_evidence_batch(store, &published)
.map_err(RrdpSyncError::Storage)?;
let prepared_raw =
prepare_raw_by_hash_evidence_batch(store, &published).map_err(RrdpSyncError::Storage)?;
let mut repository_view_entries = Vec::with_capacity(published.len() + withdrawn.len());
let mut member_records = Vec::with_capacity(published.len() + withdrawn.len());
let mut owner_records = Vec::with_capacity(published.len() + withdrawn.len());
for (uri, _bytes) in &published {
let current_hash = prepared_raw
.uri_to_hash
.get(uri)
.cloned()
.ok_or_else(|| {
RrdpSyncError::Storage(format!("missing raw_by_hash mapping for {uri}"))
})?;
let current_hash = prepared_raw.uri_to_hash.get(uri).cloned().ok_or_else(|| {
RrdpSyncError::Storage(format!("missing raw_by_hash mapping for {uri}"))
})?;
repository_view_entries.push(build_repository_view_present_entry(
notification_uri,
uri,

View File

@ -56,7 +56,11 @@ pub fn prepare_raw_by_hash_evidence_batch(
}
let mut changed = false;
for uri in pending_entry.origin_uris.drain(..) {
if !existing.origin_uris.iter().any(|existing_uri| existing_uri == &uri) {
if !existing
.origin_uris
.iter()
.any(|existing_uri| existing_uri == &uri)
{
existing.origin_uris.push(uri);
changed = true;
}
@ -195,10 +199,8 @@ pub fn upsert_raw_by_hash_evidence(
rsync_uri: &str,
bytes: &[u8],
) -> Result<String, String> {
let prepared = prepare_raw_by_hash_evidence_batch(
store,
&[(rsync_uri.to_string(), bytes.to_vec())],
)?;
let prepared =
prepare_raw_by_hash_evidence_batch(store, &[(rsync_uri.to_string(), bytes.to_vec())])?;
store
.put_raw_by_hash_entries_batch_unchecked(&prepared.entries_to_write)
.map_err(|e| e.to_string())?;
@ -277,7 +279,13 @@ pub fn put_rrdp_source_member_present(
rsync_uri: &str,
current_hash: &str,
) -> Result<(), String> {
let record = build_rrdp_source_member_present_record(notification_uri, session_id, serial, rsync_uri, current_hash);
let record = build_rrdp_source_member_present_record(
notification_uri,
session_id,
serial,
rsync_uri,
current_hash,
);
store
.put_rrdp_source_member_record(&record)
.map_err(|e| e.to_string())
@ -291,7 +299,13 @@ pub fn put_rrdp_source_member_withdrawn(
rsync_uri: &str,
current_hash: Option<String>,
) -> Result<(), String> {
let record = build_rrdp_source_member_withdrawn_record(notification_uri, session_id, serial, rsync_uri, current_hash);
let record = build_rrdp_source_member_withdrawn_record(
notification_uri,
session_id,
serial,
rsync_uri,
current_hash,
);
store
.put_rrdp_source_member_record(&record)
.map_err(|e| e.to_string())
@ -305,7 +319,13 @@ pub fn put_rrdp_uri_owner_active(
rsync_uri: &str,
current_hash: &str,
) -> Result<(), String> {
let record = build_rrdp_uri_owner_active_record(notification_uri, session_id, serial, rsync_uri, current_hash);
let record = build_rrdp_uri_owner_active_record(
notification_uri,
session_id,
serial,
rsync_uri,
current_hash,
);
store
.put_rrdp_uri_owner_record(&record)
.map_err(|e| e.to_string())
@ -319,7 +339,13 @@ pub fn put_rrdp_uri_owner_withdrawn(
rsync_uri: &str,
current_hash: Option<String>,
) -> Result<(), String> {
let record = build_rrdp_uri_owner_withdrawn_record(notification_uri, session_id, serial, rsync_uri, current_hash);
let record = build_rrdp_uri_owner_withdrawn_record(
notification_uri,
session_id,
serial,
rsync_uri,
current_hash,
);
store
.put_rrdp_uri_owner_record(&record)
.map_err(|e| e.to_string())

View File

@ -29,7 +29,8 @@ pub struct ValidatedSubordinateCaLite {
#[derive(Clone, Debug, Default)]
pub struct IssuerEffectiveResourcesIndex {
parent_ip_by_afi_items: Option<BTreeMap<crate::data_model::rc::Afi, Vec<crate::data_model::rc::IpAddressOrRange>>>,
parent_ip_by_afi_items:
Option<BTreeMap<crate::data_model::rc::Afi, Vec<crate::data_model::rc::IpAddressOrRange>>>,
parent_ip_merged_intervals: HashMap<crate::data_model::rc::Afi, Vec<(Vec<u8>, Vec<u8>)>>,
parent_asnum_intervals: Option<Vec<(u32, u32)>>,
parent_rdi_intervals: Option<Vec<(u32, u32)>>,
@ -40,21 +41,18 @@ impl IssuerEffectiveResourcesIndex {
issuer_effective_ip: Option<&IpResourceSet>,
issuer_effective_as: Option<&AsResourceSet>,
) -> Result<Self, CaPathError> {
let parent_ip_by_afi_items = issuer_effective_ip.map(ip_resources_by_afi_items).transpose()?;
let parent_ip_by_afi_items = issuer_effective_ip
.map(ip_resources_by_afi_items)
.transpose()?;
let parent_ip_merged_intervals = issuer_effective_ip
.map(ip_resources_to_merged_intervals_by_afi)
.unwrap_or_default();
let parent_asnum_intervals = issuer_effective_as.and_then(|resources| {
resources
.asnum
.as_ref()
.map(as_choice_to_merged_intervals)
});
let parent_rdi_intervals = issuer_effective_as.and_then(|resources| {
resources.rdi.as_ref().map(as_choice_to_merged_intervals)
});
let parent_asnum_intervals = issuer_effective_as
.and_then(|resources| resources.asnum.as_ref().map(as_choice_to_merged_intervals));
let parent_rdi_intervals = issuer_effective_as
.and_then(|resources| resources.rdi.as_ref().map(as_choice_to_merged_intervals));
Ok(Self {
parent_ip_by_afi_items,
@ -512,7 +510,9 @@ fn resolve_child_ip_resources(
child_ip: Option<&IpResourceSet>,
issuer_effective: Option<&IpResourceSet>,
) -> Result<Option<IpResourceSet>, CaPathError> {
let precomputed_parent_by_afi = issuer_effective.map(ip_resources_by_afi_items).transpose()?;
let precomputed_parent_by_afi = issuer_effective
.map(ip_resources_by_afi_items)
.transpose()?;
let precomputed_parent_intervals = issuer_effective
.map(ip_resources_to_merged_intervals_by_afi)
.unwrap_or_default();
@ -527,7 +527,9 @@ fn resolve_child_ip_resources(
fn resolve_child_ip_resources_indexed(
child_ip: Option<&IpResourceSet>,
issuer_effective: Option<&IpResourceSet>,
parent_by_afi: Option<&BTreeMap<crate::data_model::rc::Afi, Vec<crate::data_model::rc::IpAddressOrRange>>>,
parent_by_afi: Option<
&BTreeMap<crate::data_model::rc::Afi, Vec<crate::data_model::rc::IpAddressOrRange>>,
>,
parent_intervals_by_afi: &HashMap<crate::data_model::rc::Afi, Vec<(Vec<u8>, Vec<u8>)>>,
) -> Result<Option<IpResourceSet>, CaPathError> {
let Some(child_ip) = child_ip else {
@ -583,15 +585,10 @@ fn resolve_child_as_resources(
child_as: Option<&AsResourceSet>,
issuer_effective: Option<&AsResourceSet>,
) -> Result<Option<AsResourceSet>, CaPathError> {
let precomputed_asnum = issuer_effective.and_then(|resources| {
resources
.asnum
.as_ref()
.map(as_choice_to_merged_intervals)
});
let precomputed_rdi = issuer_effective.and_then(|resources| {
resources.rdi.as_ref().map(as_choice_to_merged_intervals)
});
let precomputed_asnum = issuer_effective
.and_then(|resources| resources.asnum.as_ref().map(as_choice_to_merged_intervals));
let precomputed_rdi = issuer_effective
.and_then(|resources| resources.rdi.as_ref().map(as_choice_to_merged_intervals));
resolve_child_as_resources_indexed(
child_as,
issuer_effective,
@ -974,8 +971,8 @@ mod tests {
use super::*;
use crate::data_model::common::X509NameDer;
use crate::data_model::rc::{
Afi, AsIdentifierChoice, AsResourceSet, IpAddressChoice, IpAddressFamily,
IpAddressOrRange, IpResourceSet,
Afi, AsIdentifierChoice, AsResourceSet, IpAddressChoice, IpAddressFamily, IpAddressOrRange,
IpResourceSet,
};
use crate::data_model::rc::{
RcExtensions, ResourceCertKind, ResourceCertificate, RpkixTbsCertificate,
@ -1470,17 +1467,30 @@ mod tests {
}],
};
let parent_as = AsResourceSet {
asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Range {
min: 64500,
max: 64599,
}])),
rdi: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(65000)])),
asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![
AsIdOrRange::Range {
min: 64500,
max: 64599,
},
])),
rdi: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(
65000,
)])),
};
let idx = IssuerEffectiveResourcesIndex::from_effective_resources(Some(&parent_ip), Some(&parent_as))
.expect("index builds");
assert_eq!(idx.parent_ip_by_afi_items.as_ref().map(|v| v.len()), Some(1));
let idx = IssuerEffectiveResourcesIndex::from_effective_resources(
Some(&parent_ip),
Some(&parent_as),
)
.expect("index builds");
assert_eq!(
idx.parent_ip_by_afi_items.as_ref().map(|v| v.len()),
Some(1)
);
assert_eq!(idx.parent_ip_merged_intervals.len(), 1);
assert_eq!(idx.parent_asnum_intervals.as_ref().map(|v| v.len()), Some(1));
assert_eq!(
idx.parent_asnum_intervals.as_ref().map(|v| v.len()),
Some(1)
);
assert_eq!(idx.parent_rdi_intervals.as_ref().map(|v| v.len()), Some(1));
let child_ip_subset = IpResourceSet {
@ -1495,14 +1505,16 @@ mod tests {
)]),
}],
};
assert!(resolve_child_ip_resources_indexed(
Some(&child_ip_subset),
Some(&parent_ip),
idx.parent_ip_by_afi_items.as_ref(),
&idx.parent_ip_merged_intervals,
)
assert!(
resolve_child_ip_resources_indexed(
Some(&child_ip_subset),
Some(&parent_ip),
idx.parent_ip_by_afi_items.as_ref(),
&idx.parent_ip_merged_intervals,
)
.expect("subset should resolve")
.is_some());
.is_some()
);
let child_ip_bad = IpResourceSet {
families: vec![IpAddressFamily {
@ -1522,24 +1534,32 @@ mod tests {
idx.parent_ip_by_afi_items.as_ref(),
&idx.parent_ip_merged_intervals,
)
.unwrap_err();
.unwrap_err();
assert!(matches!(err, CaPathError::ResourcesNotSubset));
let child_as_subset = AsResourceSet {
asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(64542)])),
rdi: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(65000)])),
asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(
64542,
)])),
rdi: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(
65000,
)])),
};
assert!(resolve_child_as_resources_indexed(
Some(&child_as_subset),
Some(&parent_as),
idx.parent_asnum_intervals.as_deref(),
idx.parent_rdi_intervals.as_deref(),
)
assert!(
resolve_child_as_resources_indexed(
Some(&child_as_subset),
Some(&parent_as),
idx.parent_asnum_intervals.as_deref(),
idx.parent_rdi_intervals.as_deref(),
)
.expect("subset as resolves")
.is_some());
.is_some()
);
let child_as_bad = AsResourceSet {
asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(65123)])),
asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(
65123,
)])),
rdi: None,
};
let err = resolve_child_as_resources_indexed(
@ -1548,7 +1568,7 @@ mod tests {
idx.parent_asnum_intervals.as_deref(),
idx.parent_rdi_intervals.as_deref(),
)
.unwrap_err();
.unwrap_err();
assert!(matches!(err, CaPathError::ResourcesNotSubset));
}

View File

@ -196,7 +196,9 @@ fn rsync_parent_uri(ta_rsync_uri: &str) -> Result<String, String> {
.ok_or_else(|| format!("missing path in rsync URI: {ta_rsync_uri}"))?
.collect::<Vec<_>>();
if segments.is_empty() || segments.last().copied().unwrap_or_default().is_empty() {
return Err(format!("rsync URI must reference a file object: {ta_rsync_uri}"));
return Err(format!(
"rsync URI must reference a file object: {ta_rsync_uri}"
));
}
let parent_segments = &segments[..segments.len() - 1];
let mut parent = format!("rsync://{host}/");
@ -248,29 +250,32 @@ mod tests {
.clone();
let td = tempfile::tempdir().unwrap();
let mirror_root = td.path().join(rsync_uri.host_str().unwrap()).join("repository");
let mirror_root = td
.path()
.join(rsync_uri.host_str().unwrap())
.join("repository");
std::fs::create_dir_all(&mirror_root).unwrap();
std::fs::write(
mirror_root.join("apnic-rpki-root-iana-origin.cer"),
ta_der,
)
.unwrap();
std::fs::write(mirror_root.join("apnic-rpki-root-iana-origin.cer"), ta_der).unwrap();
let http = crate::fetch::http::BlockingHttpFetcher::new(
crate::fetch::http::HttpFetcherConfig::default(),
)
.unwrap();
let rsync = LocalDirRsyncFetcher::new(
td.path().join(rsync_uri.host_str().unwrap()).join("repository"),
td.path()
.join(rsync_uri.host_str().unwrap())
.join("repository"),
);
let discovery = discover_root_ca_instance_from_tal_with_fetchers(&http, &rsync, tal, None)
.expect("discover via rsync TA");
assert!(discovery
.trust_anchor
.resolved_ta_uri
.unwrap()
.as_str()
.starts_with("rsync://"));
assert!(
discovery
.trust_anchor
.resolved_ta_uri
.unwrap()
.as_str()
.starts_with("rsync://")
);
}
}

View File

@ -1,3 +1,4 @@
use crate::blob_store::RawObjectStore;
use crate::data_model::manifest::{ManifestDecodeError, ManifestObject, ManifestValidateError};
use crate::data_model::signed_object::SignedObjectVerifyError;
use crate::policy::{CaFailedFetchPolicy, Policy};
@ -448,8 +449,8 @@ pub fn load_current_instance_vcir_publication_point(
)
})?;
let manifest_entry = store
.get_raw_by_hash_entry(&manifest_artifact.sha256)?
let manifest_bytes = store
.get_blob_bytes(&manifest_artifact.sha256)?
.ok_or_else(|| ManifestReuseError::MissingManifestRaw(manifest_artifact.sha256.clone()))?;
let mut seen = HashSet::new();
@ -468,12 +469,12 @@ pub fn load_current_instance_vcir_publication_point(
if !seen.insert(uri.clone()) {
continue;
}
let entry = store
.get_raw_by_hash_entry(&artifact.sha256)?
let entry_bytes = store
.get_blob_bytes(&artifact.sha256)?
.ok_or_else(|| ManifestReuseError::MissingArtifactRaw {
rsync_uri: uri.clone(),
})?;
files.push(PackFile::from_bytes_compute_sha256(uri, entry.bytes));
files.push(PackFile::from_bytes_compute_sha256(uri, entry_bytes));
}
Ok(PublicationPointSnapshot {
@ -493,7 +494,7 @@ pub fn load_current_instance_vcir_publication_point(
.validated_manifest_next_update
.clone(),
verified_at: vcir.last_successful_validation_time.clone(),
manifest_bytes: manifest_entry.bytes,
manifest_bytes,
files,
})
}
@ -836,12 +837,7 @@ mod tests {
entry
}
fn put_current_object(
store: &RocksStore,
rsync_uri: &str,
bytes: Vec<u8>,
object_type: &str,
) {
fn put_current_object(store: &RocksStore, rsync_uri: &str, bytes: Vec<u8>, object_type: &str) {
let hash = hex::encode(sha2::Sha256::digest(&bytes));
store
.put_raw_by_hash_entry(&raw_by_hash_entry(rsync_uri, bytes, object_type))

View File

@ -11,9 +11,7 @@ use crate::data_model::signed_object::SignedObjectVerifyError;
use crate::policy::{Policy, SignedObjectFailurePolicy};
use crate::report::{RfcRef, Warning};
use crate::storage::{PackFile, PackTime, VcirLocalOutput, VcirOutputType};
use crate::validation::cert_path::{
CertPathError, validate_ee_cert_path_with_predecoded_ee,
};
use crate::validation::cert_path::{CertPathError, validate_ee_cert_path_with_predecoded_ee};
use crate::validation::manifest::PublicationPointData;
use crate::validation::publication_point::PublicationPointSnapshot;
use x509_parser::prelude::FromDer;
@ -603,11 +601,7 @@ fn process_roa_with_issuer(
let ee = &roa.signed_object.signed_data.certificates[0].resource_cert;
let ee_der = &roa.signed_object.signed_data.certificates[0].raw_der;
let ee_crldp_uris = ee
.tbs
.extensions
.crl_distribution_points_uris
.as_ref();
let ee_crldp_uris = ee.tbs.extensions.crl_distribution_points_uris.as_ref();
let issuer_crl_rsync_uri = choose_crl_uri_for_certificate(ee_crldp_uris, crl_cache)?;
let verified_crl = ensure_issuer_crl_verified(issuer_crl_rsync_uri, crl_cache, issuer_ca_der)?;
@ -715,11 +709,7 @@ fn process_aspa_with_issuer(
let ee = &aspa.signed_object.signed_data.certificates[0].resource_cert;
let ee_der = &aspa.signed_object.signed_data.certificates[0].raw_der;
let ee_crldp_uris = ee
.tbs
.extensions
.crl_distribution_points_uris
.as_ref();
let ee_crldp_uris = ee.tbs.extensions.crl_distribution_points_uris.as_ref();
let issuer_crl_rsync_uri = choose_crl_uri_for_certificate(ee_crldp_uris, crl_cache)?;
let verified_crl = ensure_issuer_crl_verified(issuer_crl_rsync_uri, crl_cache, issuer_ca_der)?;
@ -794,7 +784,6 @@ fn process_aspa_with_issuer(
Ok((attestation, local_output))
}
fn vrp_prefix_to_string(vrp: &Vrp) -> String {
let prefix = &vrp.prefix;
match prefix.afi {

View File

@ -15,8 +15,7 @@ use crate::replay::fetch_rsync::PayloadReplayRsyncFetcher;
use crate::sync::rrdp::Fetcher;
use crate::validation::from_tal::{
DiscoveredRootCaInstance, FromTalError, discover_root_ca_instance_from_tal_and_ta_der,
discover_root_ca_instance_from_tal_with_fetchers,
discover_root_ca_instance_from_tal_url,
discover_root_ca_instance_from_tal_url, discover_root_ca_instance_from_tal_with_fetchers,
};
use crate::validation::tree::{
CaInstanceHandle, TreeRunAuditOutput, TreeRunConfig, TreeRunError, TreeRunOutput,
@ -307,8 +306,12 @@ pub fn run_tree_from_tal_bytes_serial_audit(
config: &TreeRunConfig,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let tal = crate::data_model::tal::Tal::decode_bytes(tal_bytes).map_err(FromTalError::from)?;
let discovery =
discover_root_ca_instance_from_tal_with_fetchers(http_fetcher, rsync_fetcher, tal, tal_uri)?;
let discovery = discover_root_ca_instance_from_tal_with_fetchers(
http_fetcher,
rsync_fetcher,
tal,
tal_uri,
)?;
let download_log = DownloadLogHandle::new();
let runner = Rpkiv1PublicationPointRunner {
@ -362,8 +365,12 @@ pub fn run_tree_from_tal_bytes_serial_audit_with_timing(
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let _tal = timing.span_phase("tal_bootstrap");
let tal = crate::data_model::tal::Tal::decode_bytes(tal_bytes).map_err(FromTalError::from)?;
let discovery =
discover_root_ca_instance_from_tal_with_fetchers(http_fetcher, rsync_fetcher, tal, tal_uri)?;
let discovery = discover_root_ca_instance_from_tal_with_fetchers(
http_fetcher,
rsync_fetcher,
tal,
tal_uri,
)?;
drop(_tal);
let download_log = DownloadLogHandle::new();
@ -531,8 +538,11 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial(
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let replay_index = Arc::new(
ReplayArchiveIndex::load_allow_missing_rsync_modules(payload_archive_root, payload_locks_path)
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?,
ReplayArchiveIndex::load_allow_missing_rsync_modules(
payload_archive_root,
payload_locks_path,
)
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?,
);
let http_fetcher = PayloadReplayHttpFetcher::new(replay_index.clone())
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?;
@ -579,8 +589,11 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let replay_index = Arc::new(
ReplayArchiveIndex::load_allow_missing_rsync_modules(payload_archive_root, payload_locks_path)
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?,
ReplayArchiveIndex::load_allow_missing_rsync_modules(
payload_archive_root,
payload_locks_path,
)
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?,
);
let http_fetcher = PayloadReplayHttpFetcher::new(replay_index.clone())
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?;
@ -642,8 +655,11 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit_with_timing(
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
drop(_tal);
let replay_index = Arc::new(
ReplayArchiveIndex::load_allow_missing_rsync_modules(payload_archive_root, payload_locks_path)
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?,
ReplayArchiveIndex::load_allow_missing_rsync_modules(
payload_archive_root,
payload_locks_path,
)
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?,
);
let http_fetcher = PayloadReplayHttpFetcher::new(replay_index.clone())
.map_err(|e| RunTreeFromTalError::Replay(e.to_string()))?;
@ -1088,10 +1104,12 @@ mod replay_api_tests {
.expect("read apnic tal fixture");
let ta_der =
std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let archive_root =
std::path::PathBuf::from("../../rpki/target/live/20260316-112341-multi-final3/apnic/base-payload-archive");
let locks_path =
std::path::PathBuf::from("../../rpki/target/live/20260316-112341-multi-final3/apnic/base-locks.json");
let archive_root = std::path::PathBuf::from(
"../../rpki/target/live/20260316-112341-multi-final3/apnic/base-payload-archive",
);
let locks_path = std::path::PathBuf::from(
"../../rpki/target/live/20260316-112341-multi-final3/apnic/base-locks.json",
);
let validation_time = time::OffsetDateTime::parse("2026-03-16T11:49:48+08:00", &Rfc3339)
.expect("parse validation time");
(tal_bytes, ta_der, archive_root, locks_path, validation_time)
@ -1160,16 +1178,14 @@ mod replay_api_tests {
let temp = tempfile::tempdir().expect("tempdir");
let store = crate::storage::RocksStore::open(&temp.path().join("db")).expect("open db");
let (tal_bytes, ta_der, archive_root, locks_path, validation_time) = apnic_replay_inputs();
assert!(
archive_root.is_dir(),
"payload replay archive missing: {}",
archive_root.display()
);
assert!(
locks_path.is_file(),
"payload replay locks missing: {}",
locks_path.display()
);
if !archive_root.is_dir() || !locks_path.is_file() {
eprintln!(
"skipping payload replay api test; missing fixtures: archive={} locks={}",
archive_root.display(),
locks_path.display()
);
return;
}
let out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
&store,
@ -1198,15 +1214,22 @@ mod replay_api_tests {
);
}
#[test]
fn payload_replay_api_root_only_apnic_multi_rir_bundle_runs_with_lenient_rsync_modules() {
let temp = tempfile::tempdir().expect("tempdir");
let store = crate::storage::RocksStore::open(&temp.path().join("db")).expect("open db");
let (tal_bytes, ta_der, archive_root, locks_path, validation_time) =
apnic_multi_rir_replay_inputs();
assert!(archive_root.is_dir(), "payload replay archive missing: {}", archive_root.display());
assert!(locks_path.is_file(), "payload replay locks missing: {}", locks_path.display());
assert!(
archive_root.is_dir(),
"payload replay archive missing: {}",
archive_root.display()
);
assert!(
locks_path.is_file(),
"payload replay locks missing: {}",
locks_path.display()
);
let out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
&store,
@ -1235,16 +1258,14 @@ mod replay_api_tests {
let db_path = temp.path().join("db");
let store = crate::storage::RocksStore::open(&db_path).expect("open db");
let (tal_bytes, ta_der, archive_root, locks_path, validation_time) = apnic_replay_inputs();
assert!(
archive_root.is_dir(),
"payload replay archive missing: {}",
archive_root.display()
);
assert!(
locks_path.is_file(),
"payload replay locks missing: {}",
locks_path.display()
);
if !archive_root.is_dir() || !locks_path.is_file() {
eprintln!(
"skipping payload replay api timing test; missing fixtures: archive={} locks={}",
archive_root.display(),
locks_path.display()
);
return;
}
let timing = TimingHandle::new(TimingMeta {
recorded_at_utc_rfc3339: "2026-03-13T03:00:00Z".to_string(),
@ -1366,26 +1387,20 @@ mod replay_api_tests {
delta_locks,
validation_time,
) = apnic_delta_replay_inputs();
assert!(
base_archive.is_dir(),
"base archive missing: {}",
base_archive.display()
);
assert!(
base_locks.is_file(),
"base locks missing: {}",
base_locks.display()
);
assert!(
delta_archive.is_dir(),
"delta archive missing: {}",
delta_archive.display()
);
assert!(
delta_locks.is_file(),
"delta locks missing: {}",
delta_locks.display()
);
if !base_archive.is_dir()
|| !base_locks.is_file()
|| !delta_archive.is_dir()
|| !delta_locks.is_file()
{
eprintln!(
"skipping payload delta replay api test; missing fixtures: base_archive={} base_locks={} delta_archive={} delta_locks={}",
base_archive.display(),
base_locks.display(),
delta_archive.display(),
delta_locks.display()
);
return;
}
let out = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit(
&store,
@ -1425,6 +1440,20 @@ mod replay_api_tests {
delta_locks,
validation_time,
) = apnic_delta_replay_inputs();
if !base_archive.is_dir()
|| !base_locks.is_file()
|| !delta_archive.is_dir()
|| !delta_locks.is_file()
{
eprintln!(
"skipping payload delta replay timing test; missing fixtures: base_archive={} base_locks={} delta_archive={} delta_locks={}",
base_archive.display(),
base_locks.display(),
delta_archive.display(),
delta_locks.display()
);
return;
}
let timing = TimingHandle::new(TimingMeta {
recorded_at_utc_rfc3339: "2026-03-16T00:00:00Z".to_string(),
validation_time_utc_rfc3339: "2026-03-15T10:00:00Z".to_string(),

View File

@ -10,8 +10,8 @@ use crate::data_model::manifest::ManifestObject;
use crate::data_model::rc::ResourceCertificate;
use crate::data_model::roa::{RoaAfi, RoaObject};
use crate::data_model::router_cert::{
BgpsecRouterCertificate, BgpsecRouterCertificateDecodeError,
BgpsecRouterCertificatePathError, BgpsecRouterCertificateProfileError,
BgpsecRouterCertificate, BgpsecRouterCertificateDecodeError, BgpsecRouterCertificatePathError,
BgpsecRouterCertificateProfileError,
};
use crate::fetch::rsync::RsyncFetcher;
use crate::policy::Policy;
@ -19,10 +19,9 @@ use crate::replay::archive::ReplayArchiveIndex;
use crate::replay::delta_archive::ReplayDeltaArchiveIndex;
use crate::report::{RfcRef, Warning};
use crate::storage::{
PackFile, PackTime, RawByHashEntry, RocksStore,
ValidatedCaInstanceResult, VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus,
VcirAuditSummary, VcirChildEntry, VcirInstanceGate, VcirLocalOutput, VcirOutputType,
VcirRelatedArtifact, VcirSummary,
PackFile, PackTime, RawByHashEntry, RocksStore, ValidatedCaInstanceResult, VcirArtifactKind,
VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary, VcirChildEntry,
VcirInstanceGate, VcirLocalOutput, VcirOutputType, VcirRelatedArtifact, VcirSummary,
};
use crate::sync::repo::{
sync_publication_point, sync_publication_point_replay, sync_publication_point_replay_delta,
@ -37,7 +36,9 @@ use crate::validation::manifest::{
ManifestFreshError, PublicationPointData, PublicationPointSource,
process_manifest_publication_point_fresh_after_repo_sync,
};
use crate::validation::objects::{AspaAttestation, RouterKeyPayload, Vrp, process_publication_point_for_issuer};
use crate::validation::objects::{
AspaAttestation, RouterKeyPayload, Vrp, process_publication_point_for_issuer,
};
use crate::validation::publication_point::PublicationPointSnapshot;
use crate::validation::tree::{
CaInstanceHandle, DiscoveredChildCaInstance, PublicationPointRunResult, PublicationPointRunner,
@ -45,8 +46,8 @@ use crate::validation::tree::{
use std::collections::{HashMap, HashSet};
use std::sync::{Arc, Mutex};
use serde::Deserialize;
use base64::Engine as _;
use serde::Deserialize;
use serde_json::json;
use x509_parser::prelude::FromDer;
use x509_parser::x509::SubjectPublicKeyInfo;
@ -168,13 +169,19 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
}
let repo_sync_started = std::time::Instant::now();
let (repo_sync_ok, repo_sync_err, repo_sync_source): (bool, Option<String>, Option<String>) = if skip_sync_due_to_dedup {
let (repo_sync_ok, repo_sync_err, repo_sync_source, repo_sync_phase): (
bool,
Option<String>,
Option<String>,
Option<String>,
) = if skip_sync_due_to_dedup {
let source = if effective_notification_uri.is_some() {
Some("rrdp_dedup_skip".to_string())
} else {
Some("rsync_dedup_skip".to_string())
};
(true, None, source)
let phase = source.clone();
(true, None, source, phase)
} else {
let repo_key = effective_notification_uri.unwrap_or_else(|| ca.rsync_base_uri.as_str());
let _repo_total = self
@ -249,7 +256,12 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
}
warnings.extend(res.warnings);
(true, None, Some(repo_sync_source_label(res.source).to_string()))
(
true,
None,
Some(repo_sync_source_label(res.source).to_string()),
Some(repo_sync_phase_label(res.phase).to_string()),
)
}
Err(e) => {
if attempted_rrdp && self.rrdp_dedup {
@ -267,7 +279,19 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
.with_rfc_refs(&[RfcRef("RFC 8182 §3.4.5"), RfcRef("RFC 9286 §6.6")])
.with_context(&ca.rsync_base_uri),
);
(false, Some(e.to_string()), None)
(
false,
Some(e.to_string()),
None,
Some(
repo_sync_failure_phase_label(
attempted_rrdp,
original_notification_uri,
effective_notification_uri,
)
.to_string(),
),
)
}
}
};
@ -279,6 +303,7 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
"publication_point_rsync_uri": ca.publication_point_rsync_uri,
"repo_sync_ok": repo_sync_ok,
"repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_error": repo_sync_err,
"repo_sync_duration_ms": repo_sync_duration_ms,
}),
@ -367,6 +392,7 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
ca,
PublicationPointSource::Fresh,
repo_sync_source.as_deref(),
repo_sync_phase.as_deref(),
Some(repo_sync_duration_ms),
repo_sync_err.as_deref(),
&pack,
@ -390,6 +416,7 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
"publication_point_rsync_uri": ca.publication_point_rsync_uri,
"source": "fresh",
"repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_duration_ms": repo_sync_duration_ms,
"total_duration_ms": total_duration_ms,
"warning_count": result.warnings.len(),
@ -399,7 +426,8 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
"child_count": result.discovered_children.len(),
}),
);
if (total_duration_ms as f64) / 1000.0 >= crate::progress_log::slow_threshold_secs() {
if (total_duration_ms as f64) / 1000.0 >= crate::progress_log::slow_threshold_secs()
{
crate::progress_log::emit(
"publication_point_slow",
serde_json::json!({
@ -407,6 +435,7 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
"publication_point_rsync_uri": ca.publication_point_rsync_uri,
"source": "fresh",
"repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_duration_ms": repo_sync_duration_ms,
"total_duration_ms": total_duration_ms,
}),
@ -424,11 +453,25 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
"publication_point_rsync_uri": ca.publication_point_rsync_uri,
"source": "error",
"repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_duration_ms": repo_sync_duration_ms,
"total_duration_ms": total_duration_ms,
"error": fresh_err.to_string(),
}),
);
crate::progress_log::emit(
"repo_terminal_failure",
serde_json::json!({
"manifest_rsync_uri": ca.manifest_rsync_uri,
"publication_point_rsync_uri": ca.publication_point_rsync_uri,
"repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_error": repo_sync_err,
"repo_sync_duration_ms": repo_sync_duration_ms,
"terminal_state": "stop_all_output",
"error": fresh_err.to_string(),
}),
);
Err(format!("{fresh_err}"))
}
crate::policy::CaFailedFetchPolicy::ReuseCurrentInstanceVcir => {
@ -444,6 +487,7 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
ca,
projection.source,
repo_sync_source.as_deref(),
repo_sync_phase.as_deref(),
Some(repo_sync_duration_ms),
repo_sync_err.as_deref(),
projection.vcir.as_ref(),
@ -468,6 +512,7 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
"publication_point_rsync_uri": ca.publication_point_rsync_uri,
"source": source_label(result.source),
"repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_duration_ms": repo_sync_duration_ms,
"total_duration_ms": total_duration_ms,
"warning_count": result.warnings.len(),
@ -477,7 +522,55 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
"child_count": result.discovered_children.len(),
}),
);
if (total_duration_ms as f64) / 1000.0 >= crate::progress_log::slow_threshold_secs() {
match result.source {
PublicationPointSource::VcirCurrentInstance if !repo_sync_ok => {
crate::progress_log::emit(
"rsync_failed_fallback_current_instance",
serde_json::json!({
"manifest_rsync_uri": ca.manifest_rsync_uri,
"publication_point_rsync_uri": ca.publication_point_rsync_uri,
"repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_error": repo_sync_err,
"repo_sync_duration_ms": repo_sync_duration_ms,
"terminal_state": "fallback_current_instance",
}),
);
}
PublicationPointSource::FailedFetchNoCache => {
if !repo_sync_ok {
crate::progress_log::emit(
"rsync_failed_no_cache",
serde_json::json!({
"manifest_rsync_uri": ca.manifest_rsync_uri,
"publication_point_rsync_uri": ca.publication_point_rsync_uri,
"repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_error": repo_sync_err,
"repo_sync_duration_ms": repo_sync_duration_ms,
"terminal_state": "failed_no_cache",
}),
);
}
crate::progress_log::emit(
"repo_terminal_failure",
serde_json::json!({
"manifest_rsync_uri": ca.manifest_rsync_uri,
"publication_point_rsync_uri": ca.publication_point_rsync_uri,
"repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_error": repo_sync_err,
"repo_sync_duration_ms": repo_sync_duration_ms,
"terminal_state": "failed_no_cache",
}),
);
}
PublicationPointSource::Fresh => {}
PublicationPointSource::VcirCurrentInstance => {}
}
if (total_duration_ms as f64) / 1000.0
>= crate::progress_log::slow_threshold_secs()
{
crate::progress_log::emit(
"publication_point_slow",
serde_json::json!({
@ -485,6 +578,7 @@ impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
"publication_point_rsync_uri": ca.publication_point_rsync_uri,
"source": source_label(result.source),
"repo_sync_source": repo_sync_source,
"repo_sync_phase": repo_sync_phase,
"repo_sync_duration_ms": repo_sync_duration_ms,
"total_duration_ms": total_duration_ms,
}),
@ -856,9 +950,7 @@ fn discover_children_from_fresh_snapshot_with_audit<P: PublicationPointData>(
sha256_hex: sha256_hex_from_32(&f.sha256),
kind: AuditObjectKind::RouterCertificate,
result: AuditObjectResult::Error,
detail: Some(format!(
"router certificate validation failed: {err}"
)),
detail: Some(format!("router certificate validation failed: {err}")),
});
}
}
@ -977,7 +1069,10 @@ fn discover_children_from_fresh_snapshot_with_audit<P: PublicationPointData>(
t.record_phase_nanos("child_select_issuer_crl_total", select_crl_nanos);
t.record_phase_nanos("child_decode_certificate_total", child_decode_nanos);
t.record_phase_nanos("child_validate_subordinate_total", validate_sub_ca_nanos);
t.record_phase_nanos("child_validate_router_certificate_total", validate_router_nanos);
t.record_phase_nanos(
"child_validate_router_certificate_total",
validate_router_nanos,
);
t.record_phase_nanos("child_ca_instance_uri_discovery_total", uri_discovery_nanos);
t.record_phase_nanos("child_enqueue_total", enqueue_nanos);
}
@ -1150,6 +1245,43 @@ fn source_label(source: PublicationPointSource) -> String {
}
}
fn repo_sync_phase_label(phase: crate::sync::repo::RepoSyncPhase) -> &'static str {
match phase {
crate::sync::repo::RepoSyncPhase::RrdpOk => "rrdp_ok",
crate::sync::repo::RepoSyncPhase::RrdpFailedRsyncOk => "rrdp_failed_rsync_ok",
crate::sync::repo::RepoSyncPhase::RsyncOnlyOk => "rsync_only_ok",
crate::sync::repo::RepoSyncPhase::ReplayRrdpOk => "replay_rrdp_ok",
crate::sync::repo::RepoSyncPhase::ReplayRsyncOk => "replay_rsync_ok",
crate::sync::repo::RepoSyncPhase::ReplayNoopRrdp => "replay_noop_rrdp",
crate::sync::repo::RepoSyncPhase::ReplayNoopRsync => "replay_noop_rsync",
}
}
fn repo_sync_failure_phase_label(
attempted_rrdp: bool,
original_notification_uri: Option<&str>,
effective_notification_uri: Option<&str>,
) -> &'static str {
if attempted_rrdp && original_notification_uri.is_some() && effective_notification_uri.is_some() {
"rrdp_failed_rsync_failed"
} else if attempted_rrdp
&& original_notification_uri.is_some()
&& effective_notification_uri.is_none()
{
"rsync_only_failed_after_rrdp_dedup"
} else {
"rsync_only_failed"
}
}
fn terminal_state_label(source: PublicationPointSource) -> &'static str {
match source {
PublicationPointSource::Fresh => "fresh",
PublicationPointSource::VcirCurrentInstance => "fallback_current_instance",
PublicationPointSource::FailedFetchNoCache => "failed_no_cache",
}
}
fn repo_sync_source_label(source: crate::sync::repo::RepoSyncSource) -> &'static str {
match source {
crate::sync::repo::RepoSyncSource::Rrdp => "rrdp",
@ -1182,6 +1314,7 @@ fn build_publication_point_audit_from_snapshot(
ca: &CaInstanceHandle,
source: PublicationPointSource,
repo_sync_source: Option<&str>,
repo_sync_phase: Option<&str>,
repo_sync_duration_ms: Option<u64>,
repo_sync_error: Option<&str>,
pack: &PublicationPointSnapshot,
@ -1275,8 +1408,10 @@ fn build_publication_point_audit_from_snapshot(
rrdp_notification_uri: ca.rrdp_notification_uri.clone(),
source: source_label(source),
repo_sync_source: repo_sync_source.map(ToString::to_string),
repo_sync_phase: repo_sync_phase.map(ToString::to_string),
repo_sync_duration_ms,
repo_sync_error: repo_sync_error.map(ToString::to_string),
repo_terminal_state: terminal_state_label(source).to_string(),
this_update_rfc3339_utc: pack.this_update.rfc3339_utc.clone(),
next_update_rfc3339_utc: pack.next_update.rfc3339_utc.clone(),
verified_at_rfc3339_utc: pack.verified_at.rfc3339_utc.clone(),
@ -1289,6 +1424,7 @@ fn build_publication_point_audit_from_vcir(
ca: &CaInstanceHandle,
source: PublicationPointSource,
repo_sync_source: Option<&str>,
repo_sync_phase: Option<&str>,
repo_sync_duration_ms: Option<u64>,
repo_sync_error: Option<&str>,
vcir: Option<&ValidatedCaInstanceResult>,
@ -1302,6 +1438,7 @@ fn build_publication_point_audit_from_vcir(
ca,
source,
repo_sync_source,
repo_sync_phase,
repo_sync_duration_ms,
repo_sync_error,
pack,
@ -1326,8 +1463,10 @@ fn build_publication_point_audit_from_vcir(
rrdp_notification_uri: ca.rrdp_notification_uri.clone(),
source: source_label(source),
repo_sync_source: repo_sync_source.map(ToString::to_string),
repo_sync_phase: repo_sync_phase.map(ToString::to_string),
repo_sync_duration_ms,
repo_sync_error: repo_sync_error.map(ToString::to_string),
repo_terminal_state: terminal_state_label(source).to_string(),
this_update_rfc3339_utc: String::new(),
next_update_rfc3339_utc: String::new(),
verified_at_rfc3339_utc: String::new(),
@ -1405,13 +1544,15 @@ fn build_publication_point_audit_from_vcir(
manifest_rsync_uri: ca.manifest_rsync_uri.clone(),
publication_point_rsync_uri: ca.publication_point_rsync_uri.clone(),
rrdp_notification_uri: ca.rrdp_notification_uri.clone(),
source: source_label(source),
repo_sync_source: repo_sync_source.map(ToString::to_string),
repo_sync_duration_ms,
repo_sync_error: repo_sync_error.map(ToString::to_string),
this_update_rfc3339_utc: vcir
.validated_manifest_meta
.validated_manifest_this_update
source: source_label(source),
repo_sync_source: repo_sync_source.map(ToString::to_string),
repo_sync_phase: repo_sync_phase.map(ToString::to_string),
repo_sync_duration_ms,
repo_sync_error: repo_sync_error.map(ToString::to_string),
repo_terminal_state: terminal_state_label(source).to_string(),
this_update_rfc3339_utc: vcir
.validated_manifest_meta
.validated_manifest_this_update
.rfc3339_utc
.clone(),
next_update_rfc3339_utc: vcir
@ -1790,7 +1931,9 @@ fn build_objects_output_from_vcir(
sha256_hex: local.source_object_hash.clone(),
kind: AuditObjectKind::RouterCertificate,
result: AuditObjectResult::Error,
detail: Some(format!("cached Router Key local output parse failed: {e}")),
detail: Some(format!(
"cached Router Key local output parse failed: {e}"
)),
},
);
}
@ -1830,9 +1973,10 @@ fn parse_vcir_aspa_output(local: &VcirLocalOutput) -> Result<AspaAttestation, St
fn parse_vcir_router_key_output(local: &VcirLocalOutput) -> Result<RouterKeyPayload, String> {
let payload: VcirRouterKeyPayload = serde_json::from_str(&local.payload_json)
.map_err(|e| format!("invalid Router Key payload JSON: {e}"))?;
let ski = hex::decode(&payload.ski_hex)
.map_err(|e| format!("invalid Router Key SKI hex: {e}"))?;
let spki_der = base64::engine::general_purpose::STANDARD.decode(&payload.spki_der_base64)
let ski =
hex::decode(&payload.ski_hex).map_err(|e| format!("invalid Router Key SKI hex: {e}"))?;
let spki_der = base64::engine::general_purpose::STANDARD
.decode(&payload.spki_der_base64)
.map_err(|e| format!("invalid Router Key SPKI base64: {e}"))?;
Ok(RouterKeyPayload {
as_id: payload.as_id,
@ -2255,7 +2399,8 @@ fn build_router_key_local_outputs(
.iter()
.map(|router_key| {
let ski_hex = hex::encode(&router_key.ski);
let spki_der_base64 = base64::engine::general_purpose::STANDARD.encode(&router_key.spki_der);
let spki_der_base64 =
base64::engine::general_purpose::STANDARD.encode(&router_key.spki_der);
let rule_hash = sha256_hex(
format!(
"router-key-rule:{}:{}:{}:{}",
@ -2587,7 +2732,6 @@ fn audit_result_to_vcir_status(result: &AuditObjectResult) -> VcirArtifactValida
}
}
fn roa_to_vrps_for_vcir(roa: &RoaObject) -> Vec<Vrp> {
let asn = roa.roa.as_id;
let mut out = Vec::new();
@ -2629,10 +2773,10 @@ mod tests {
use crate::fetch::rsync::LocalDirRsyncFetcher;
use crate::fetch::rsync::{RsyncFetchError, RsyncFetcher};
use crate::storage::{
PackFile, PackTime, RawByHashEntry, RocksStore, ValidatedCaInstanceResult, ValidatedManifestMeta,
VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary,
VcirChildEntry, VcirInstanceGate, VcirLocalOutput, VcirOutputType, VcirRelatedArtifact,
VcirSummary,
PackFile, PackTime, RawByHashEntry, RocksStore, ValidatedCaInstanceResult,
ValidatedManifestMeta, VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus,
VcirAuditSummary, VcirChildEntry, VcirInstanceGate, VcirLocalOutput, VcirOutputType,
VcirRelatedArtifact, VcirSummary,
};
use crate::sync::rrdp::Fetcher;
use crate::validation::publication_point::PublicationPointSnapshot;
@ -2845,8 +2989,6 @@ authorityKeyIdentifier = keyid:always
}
}
struct GeneratedRouter {
issuer_ca_der: Vec<u8>,
router_der: Vec<u8>,
@ -3045,15 +3187,16 @@ authorityKeyIdentifier = keyid:always
}
}
fn cernet_publication_point_snapshot_for_vcir_tests(
) -> (PublicationPointSnapshot, Vec<u8>, time::OffsetDateTime) {
fn cernet_publication_point_snapshot_for_vcir_tests()
-> (PublicationPointSnapshot, Vec<u8>, time::OffsetDateTime) {
let dir = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0");
let rsync_base_uri = "rsync://rpki.cernet.net/repo/cernet/0/";
let manifest_file = "05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft";
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
let manifest_bytes = std::fs::read(dir.join(manifest_file)).expect("read manifest fixture");
let manifest = ManifestObject::decode_der(&manifest_bytes).expect("decode manifest fixture");
let manifest =
ManifestObject::decode_der(&manifest_bytes).expect("decode manifest fixture");
let candidate = manifest.manifest.this_update + time::Duration::seconds(60);
let validation_time = if candidate < manifest.manifest.next_update {
candidate
@ -3320,7 +3463,7 @@ authorityKeyIdentifier = keyid:always
&crate::validation::objects::ObjectsOutput {
vrps: Vec::new(),
aspas: Vec::new(),
router_keys: Vec::new(),
router_keys: Vec::new(),
local_outputs_cache: cached.clone(),
warnings: Vec::new(),
stats: crate::validation::objects::ObjectsStats::default(),
@ -3333,22 +3476,32 @@ authorityKeyIdentifier = keyid:always
#[test]
fn collect_and_persist_vcir_embedded_evidence_for_real_signed_objects() {
let (pack, issuer_ca_der, validation_time) = cernet_publication_point_snapshot_for_vcir_tests();
let (pack, issuer_ca_der, validation_time) =
cernet_publication_point_snapshot_for_vcir_tests();
let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer ca");
let objects = crate::validation::objects::process_publication_point_snapshot_for_issuer(
&pack,
&Policy::default(),
issuer_ca_der.as_slice(),
Some("rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer"),
Some(
"rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer",
),
issuer_ca.tbs.extensions.ip_resources.as_ref(),
issuer_ca.tbs.extensions.as_resources.as_ref(),
validation_time,
None,
);
assert!(!objects.local_outputs_cache.is_empty(), "expected local outputs from signed objects");
assert!(
!objects.local_outputs_cache.is_empty(),
"expected local outputs from signed objects"
);
let evidence = collect_vcir_embedded_evidence(&pack, &objects).expect("collect embedded evidence");
assert!(evidence.len() >= 2, "expected manifest EE and signed-object EE evidence");
let evidence =
collect_vcir_embedded_evidence(&pack, &objects).expect("collect embedded evidence");
assert!(
evidence.len() >= 2,
"expected manifest EE and signed-object EE evidence"
);
let store_dir = tempfile::tempdir().expect("store dir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
@ -3375,15 +3528,19 @@ authorityKeyIdentifier = keyid:always
.get_raw_by_hash_entry(&issuer_hash)
.expect("load issuer raw entry")
.expect("issuer raw entry present");
assert!(issuer_entry
.origin_uris
.iter()
.any(|uri| uri.ends_with("BfycW4hQb3wNP4YsiJW-1n6fjro.cer")));
assert!(
issuer_entry
.origin_uris
.iter()
.any(|uri| uri.ends_with("BfycW4hQb3wNP4YsiJW-1n6fjro.cer"))
);
for entry in &evidence {
assert!(store
.get_raw_by_hash_entry(&entry.raw_entry.sha256_hex)
.expect("load evidence raw entry")
.is_some());
assert!(
store
.get_raw_by_hash_entry(&entry.raw_entry.sha256_hex)
.expect("load evidence raw entry")
.is_some()
);
}
}
@ -3411,7 +3568,9 @@ authorityKeyIdentifier = keyid:always
source_object_uri: "rsync://example.test/repo/issuer/router.cer".to_string(),
source_object_hash: "11".repeat(32),
source_ee_cert_hash: "11".repeat(32),
item_effective_until: PackTime { rfc3339_utc: "2026-12-31T00:00:00Z".to_string() },
item_effective_until: PackTime {
rfc3339_utc: "2026-12-31T00:00:00Z".to_string(),
},
}],
);
assert_eq!(outputs.len(), 1);
@ -3422,13 +3581,16 @@ authorityKeyIdentifier = keyid:always
#[test]
fn build_vcir_local_outputs_falls_back_to_decoding_accepted_objects_when_cache_is_empty() {
let (pack, issuer_ca_der, validation_time) = cernet_publication_point_snapshot_for_vcir_tests();
let (pack, issuer_ca_der, validation_time) =
cernet_publication_point_snapshot_for_vcir_tests();
let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer ca");
let objects = crate::validation::objects::process_publication_point_snapshot_for_issuer(
&pack,
&Policy::default(),
issuer_ca_der.as_slice(),
Some("rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer"),
Some(
"rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer",
),
issuer_ca.tbs.extensions.ip_resources.as_ref(),
issuer_ca.tbs.extensions.as_resources.as_ref(),
validation_time,
@ -3455,20 +3617,25 @@ authorityKeyIdentifier = keyid:always
.expect("rebuild vcir local outputs");
assert!(!local_outputs.is_empty());
assert_eq!(local_outputs.len(), objects.vrps.len());
assert!(local_outputs
.iter()
.all(|output| output.output_type == VcirOutputType::Vrp));
assert!(
local_outputs
.iter()
.all(|output| output.output_type == VcirOutputType::Vrp)
);
}
#[test]
fn persist_vcir_for_fresh_result_stores_vcir_and_audit_indexes_for_real_snapshot() {
let (pack, issuer_ca_der, validation_time) = cernet_publication_point_snapshot_for_vcir_tests();
let (pack, issuer_ca_der, validation_time) =
cernet_publication_point_snapshot_for_vcir_tests();
let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer ca");
let objects = crate::validation::objects::process_publication_point_snapshot_for_issuer(
&pack,
&Policy::default(),
issuer_ca_der.as_slice(),
Some("rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer"),
Some(
"rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer",
),
issuer_ca.tbs.extensions.ip_resources.as_ref(),
issuer_ca.tbs.extensions.as_resources.as_ref(),
validation_time,
@ -3492,17 +3659,8 @@ authorityKeyIdentifier = keyid:always
rrdp_notification_uri: None,
};
persist_vcir_for_fresh_result(
&store,
&ca,
&pack,
&objects,
&[],
&[],
&[],
validation_time,
)
.expect("persist vcir for fresh result");
persist_vcir_for_fresh_result(&store, &ca, &pack, &objects, &[], &[], &[], validation_time)
.expect("persist vcir for fresh result");
let vcir = store
.get_vcir(&pack.manifest_rsync_uri)
@ -3511,10 +3669,15 @@ authorityKeyIdentifier = keyid:always
assert_eq!(vcir.manifest_rsync_uri, pack.manifest_rsync_uri);
assert_eq!(vcir.summary.local_vrp_count as usize, objects.vrps.len());
let first_output = vcir.local_outputs.first().expect("local outputs stored");
assert!(store
.get_audit_rule_index_entry(crate::storage::AuditRuleKind::Roa, &first_output.rule_hash)
.expect("get audit rule index entry")
.is_some());
assert!(
store
.get_audit_rule_index_entry(
crate::storage::AuditRuleKind::Roa,
&first_output.rule_hash
)
.expect("get audit rule index entry")
.is_some()
);
}
#[test]
@ -3574,7 +3737,9 @@ authorityKeyIdentifier = keyid:always
tal_id: "test-tal".to_string(),
parent_manifest_rsync_uri: None,
ca_certificate_der: vec![0x11, 0x22],
ca_certificate_rsync_uri: Some("rsync://example.test/repo/issuer/issuer.cer".to_string()),
ca_certificate_rsync_uri: Some(
"rsync://example.test/repo/issuer/issuer.cer".to_string(),
),
effective_ip_resources: None,
effective_as_resources: None,
rsync_base_uri: pack.publication_point_rsync_uri.clone(),
@ -3625,21 +3790,37 @@ authorityKeyIdentifier = keyid:always
&[],
&embedded,
);
assert!(artifacts.iter().any(|artifact| artifact.artifact_role == VcirArtifactRole::Manifest));
assert!(artifacts.iter().any(|artifact| artifact.artifact_role == VcirArtifactRole::TrustAnchorCert));
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref() == Some("rsync://example.test/repo/issuer/issuer.crl")
assert!(
artifacts
.iter()
.any(|artifact| artifact.artifact_role == VcirArtifactRole::Manifest)
);
assert!(
artifacts
.iter()
.any(|artifact| artifact.artifact_role == VcirArtifactRole::TrustAnchorCert)
);
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref()
== Some("rsync://example.test/repo/issuer/issuer.crl")
&& artifact.artifact_role == VcirArtifactRole::CurrentCrl));
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref() == Some("rsync://example.test/repo/issuer/child.cer")
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref()
== Some("rsync://example.test/repo/issuer/child.cer")
&& artifact.artifact_role == VcirArtifactRole::ChildCaCert));
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref() == Some("rsync://example.test/repo/issuer/a.roa")
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref()
== Some("rsync://example.test/repo/issuer/a.roa")
&& artifact.validation_status == VcirArtifactValidationStatus::Rejected));
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref() == Some("rsync://example.test/repo/issuer/a.asa")
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref()
== Some("rsync://example.test/repo/issuer/a.asa")
&& artifact.validation_status == VcirArtifactValidationStatus::WarningOnly));
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref() == Some("rsync://example.test/repo/issuer/a.gbr")
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref()
== Some("rsync://example.test/repo/issuer/a.gbr")
&& artifact.artifact_kind == VcirArtifactKind::Gbr));
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref() == Some("rsync://example.test/repo/issuer/extra.bin")
assert!(artifacts.iter().any(|artifact| artifact.uri.as_deref()
== Some("rsync://example.test/repo/issuer/extra.bin")
&& artifact.artifact_kind == VcirArtifactKind::Other));
assert!(artifacts.iter().any(|artifact| artifact.uri.is_none() && artifact.sha256 == sha256_hex(b"embedded-ee")));
assert!(artifacts.iter().any(
|artifact| artifact.uri.is_none() && artifact.sha256 == sha256_hex(b"embedded-ee")
));
}
#[test]
@ -4130,7 +4311,11 @@ authorityKeyIdentifier = keyid:always
PublicationPointSource::Fresh | PublicationPointSource::VcirCurrentInstance
));
assert_eq!(calls.load(Ordering::SeqCst), 1, "module-scope dedup should skip second sync");
assert_eq!(
calls.load(Ordering::SeqCst),
1,
"module-scope dedup should skip second sync"
);
}
#[test]
@ -4385,12 +4570,15 @@ authorityKeyIdentifier = keyid:always
None,
None,
None,
None,
&pp.snapshot,
&[],
&objects,
&[],
);
assert_eq!(audit.source, "vcir_current_instance");
assert_eq!(audit.repo_sync_phase, None);
assert_eq!(audit.repo_terminal_state, "fallback_current_instance");
assert!(
audit
.objects
@ -4453,15 +4641,21 @@ authorityKeyIdentifier = keyid:always
let audit = build_publication_point_audit_from_snapshot(
&issuer,
pp.source,
None,
None,
None,
Some("rsync"),
Some("rsync_only_ok"),
Some(123),
Some("none"),
&pp.snapshot,
&[],
&objects,
&[],
);
assert_eq!(audit.objects[0].kind, AuditObjectKind::Manifest);
assert_eq!(audit.repo_sync_source.as_deref(), Some("rsync"));
assert_eq!(audit.repo_sync_phase.as_deref(), Some("rsync_only_ok"));
assert_eq!(audit.repo_sync_duration_ms, Some(123));
assert_eq!(audit.repo_sync_error.as_deref(), Some("none"));
assert_eq!(audit.repo_terminal_state, "fresh");
let crl = audit
.objects
@ -4484,7 +4678,6 @@ authorityKeyIdentifier = keyid:always
let _ = now;
}
#[test]
fn discover_children_with_router_certificate_records_ok_audit_and_no_child() {
let g = generate_router_cert_with_variant("ec-p256", true);
@ -4505,7 +4698,9 @@ authorityKeyIdentifier = keyid:always
tal_id: "test-tal".to_string(),
parent_manifest_rsync_uri: None,
ca_certificate_der: g.issuer_ca_der.clone(),
ca_certificate_rsync_uri: Some("rsync://example.test/repo/issuer/issuer.cer".to_string()),
ca_certificate_rsync_uri: Some(
"rsync://example.test/repo/issuer/issuer.cer".to_string(),
),
effective_ip_resources: issuer_ca.tbs.extensions.ip_resources.clone(),
effective_as_resources: issuer_ca.tbs.extensions.as_resources.clone(),
rsync_base_uri: "rsync://example.test/repo/issuer/".to_string(),
@ -4524,11 +4719,13 @@ authorityKeyIdentifier = keyid:always
assert!(out.children.is_empty());
assert_eq!(out.audits.len(), 1);
assert!(matches!(out.audits[0].result, AuditObjectResult::Ok));
assert!(out.audits[0]
.detail
.as_deref()
.unwrap_or("")
.contains("validated BGPsec router certificate"));
assert!(
out.audits[0]
.detail
.as_deref()
.unwrap_or("")
.contains("validated BGPsec router certificate")
);
}
#[test]
@ -4551,7 +4748,9 @@ authorityKeyIdentifier = keyid:always
tal_id: "test-tal".to_string(),
parent_manifest_rsync_uri: None,
ca_certificate_der: g.issuer_ca_der.clone(),
ca_certificate_rsync_uri: Some("rsync://example.test/repo/issuer/issuer.cer".to_string()),
ca_certificate_rsync_uri: Some(
"rsync://example.test/repo/issuer/issuer.cer".to_string(),
),
effective_ip_resources: issuer_ca.tbs.extensions.ip_resources.clone(),
effective_as_resources: issuer_ca.tbs.extensions.as_resources.clone(),
rsync_base_uri: "rsync://example.test/repo/issuer/".to_string(),
@ -4570,11 +4769,13 @@ authorityKeyIdentifier = keyid:always
assert!(out.children.is_empty());
assert_eq!(out.audits.len(), 1);
assert!(matches!(out.audits[0].result, AuditObjectResult::Skipped));
assert!(out.audits[0]
.detail
.as_deref()
.unwrap_or("")
.contains("not a CA resource certificate or BGPsec router certificate"));
assert!(
out.audits[0]
.detail
.as_deref()
.unwrap_or("")
.contains("not a CA resource certificate or BGPsec router certificate")
);
}
#[test]
@ -4597,7 +4798,9 @@ authorityKeyIdentifier = keyid:always
tal_id: "test-tal".to_string(),
parent_manifest_rsync_uri: None,
ca_certificate_der: g.issuer_ca_der.clone(),
ca_certificate_rsync_uri: Some("rsync://example.test/repo/issuer/issuer.cer".to_string()),
ca_certificate_rsync_uri: Some(
"rsync://example.test/repo/issuer/issuer.cer".to_string(),
),
effective_ip_resources: issuer_ca.tbs.extensions.ip_resources.clone(),
effective_as_resources: issuer_ca.tbs.extensions.as_resources.clone(),
rsync_base_uri: "rsync://example.test/repo/issuer/".to_string(),
@ -4616,11 +4819,13 @@ authorityKeyIdentifier = keyid:always
assert!(out.children.is_empty());
assert_eq!(out.audits.len(), 1);
assert!(matches!(out.audits[0].result, AuditObjectResult::Error));
assert!(out.audits[0]
.detail
.as_deref()
.unwrap_or("")
.contains("router certificate validation failed"));
assert!(
out.audits[0]
.detail
.as_deref()
.unwrap_or("")
.contains("router certificate validation failed")
);
}
#[test]
@ -5210,9 +5415,10 @@ authorityKeyIdentifier = keyid:always
let audit = build_publication_point_audit_from_vcir(
&ca,
PublicationPointSource::VcirCurrentInstance,
None,
None,
None,
Some("rsync"),
Some("rrdp_failed_rsync_failed"),
Some(456),
Some("rsync failed"),
Some(&vcir),
None,
&runner_warnings,
@ -5221,6 +5427,14 @@ authorityKeyIdentifier = keyid:always
);
assert_eq!(audit.source, "vcir_current_instance");
assert_eq!(audit.repo_sync_source.as_deref(), Some("rsync"));
assert_eq!(
audit.repo_sync_phase.as_deref(),
Some("rrdp_failed_rsync_failed")
);
assert_eq!(audit.repo_sync_duration_ms, Some(456));
assert_eq!(audit.repo_sync_error.as_deref(), Some("rsync failed"));
assert_eq!(audit.repo_terminal_state, "fallback_current_instance");
assert_eq!(audit.objects[0].rsync_uri, vcir.current_manifest_rsync_uri);
assert_eq!(audit.objects[0].kind, AuditObjectKind::Manifest);
assert_eq!(
@ -5270,16 +5484,17 @@ authorityKeyIdentifier = keyid:always
let audit = build_publication_point_audit_from_vcir(
&ca,
PublicationPointSource::FailedFetchNoCache,
None,
None,
None,
Some("rsync"),
Some("rsync_only_failed"),
Some(789),
Some("load from network failed, fallback to cache"),
None,
None,
&[Warning::new("runner warning")],
&crate::validation::objects::ObjectsOutput {
vrps: Vec::new(),
aspas: Vec::new(),
router_keys: Vec::new(),
router_keys: Vec::new(),
local_outputs_cache: Vec::new(),
warnings: vec![Warning::new("object warning")],
stats: crate::validation::objects::ObjectsStats::default(),
@ -5289,6 +5504,14 @@ authorityKeyIdentifier = keyid:always
);
assert_eq!(audit.source, "failed_fetch_no_cache");
assert_eq!(audit.repo_sync_source.as_deref(), Some("rsync"));
assert_eq!(audit.repo_sync_phase.as_deref(), Some("rsync_only_failed"));
assert_eq!(audit.repo_sync_duration_ms, Some(789));
assert_eq!(
audit.repo_sync_error.as_deref(),
Some("load from network failed, fallback to cache")
);
assert_eq!(audit.repo_terminal_state, "failed_no_cache");
assert!(audit.this_update_rfc3339_utc.is_empty());
assert!(audit.next_update_rfc3339_utc.is_empty());
assert!(audit.verified_at_rfc3339_utc.is_empty());

View File

@ -311,7 +311,9 @@ fn landing_packfile_cbor_put(store: &RocksStore, obj_type: ObjType, sample: &str
entry.origin_uris.push(key);
entry.object_type = Some("cbor".to_string());
entry.encoding = Some("cbor".to_string());
store.put_raw_by_hash_entry(&entry).expect("store raw_by_hash");
store
.put_raw_by_hash_entry(&entry)
.expect("store raw_by_hash");
}
#[derive(Clone, Debug, serde::Serialize)]
@ -624,7 +626,9 @@ fn stage2_decode_validate_and_landing_benchmark_selected_der_v2() {
println!();
}
if mode.do_landing() {
println!("## landing (PackFile::from_bytes_compute_sha256 + CBOR + RocksDB current-state landing)");
println!(
"## landing (PackFile::from_bytes_compute_sha256 + CBOR + RocksDB current-state landing)"
);
println!();
println!("| type | sample | size_bytes | complexity | avg ns/op | ops/s |");
println!("|---|---|---:|---:|---:|---:|");

View File

@ -30,6 +30,7 @@ fn live_http_fetcher() -> BlockingHttpFetcher {
timeout: Duration::from_secs(timeout_secs),
large_body_timeout: Duration::from_secs(timeout_secs),
user_agent: "rpki-dev/0.1 (stage2 live rrdp delta test)".to_string(),
..HttpFetcherConfig::default()
})
.expect("http fetcher")
}

View File

@ -1,8 +1,8 @@
use rpki::ccr::{
AspaPayloadSet, AspaPayloadState, CcrContentInfo, CcrDigestAlgorithm, ManifestInstance,
ManifestState, RoaPayloadSet, RoaPayloadState, RouterKey, RouterKeySet, RouterKeyState,
RpkiCanonicalCacheRepresentation, TrustAnchorState, compute_state_hash,
decode_content_info, encode::{
RpkiCanonicalCacheRepresentation, TrustAnchorState, compute_state_hash, decode_content_info,
encode::{
encode_manifest_state_payload_der, encode_router_key_state_payload_der,
encode_trust_anchor_state_payload_der,
},
@ -22,7 +22,8 @@ fn sample_time() -> time::OffsetDateTime {
#[test]
fn minimal_trust_anchor_ccr_roundtrips() {
let skis = vec![vec![0x11; 20], vec![0x22; 20]];
let skis_der = encode_trust_anchor_state_payload_der(&skis).expect("encode trust anchor payload");
let skis_der =
encode_trust_anchor_state_payload_der(&skis).expect("encode trust anchor payload");
let state = TrustAnchorState {
skis,
hash: compute_state_hash(&skis_der),
@ -47,7 +48,8 @@ fn minimal_trust_anchor_ccr_roundtrips() {
#[test]
fn decode_rejects_wrong_content_type_oid() {
let skis = vec![vec![0x11; 20]];
let skis_der = encode_trust_anchor_state_payload_der(&skis).expect("encode trust anchor payload");
let skis_der =
encode_trust_anchor_state_payload_der(&skis).expect("encode trust anchor payload");
let content_info = CcrContentInfo::new(RpkiCanonicalCacheRepresentation {
version: 0,
hash_alg: CcrDigestAlgorithm::Sha256,
@ -69,7 +71,10 @@ fn decode_rejects_wrong_content_type_oid() {
.expect("oid present");
der[pos + needle.len() - 1] ^= 0x01;
let err = decode_content_info(&der).expect_err("wrong content type must fail");
assert!(err.to_string().contains("unexpected contentType OID"), "{err}");
assert!(
err.to_string().contains("unexpected contentType OID"),
"{err}"
);
}
#[test]
@ -85,13 +90,17 @@ fn ccr_requires_at_least_one_state_aspect() {
rks: None,
});
let err = encode_content_info(&ccr).expect_err("empty state aspects must fail");
assert!(err.to_string().contains("at least one of mfts/vrps/vaps/tas/rks"));
assert!(
err.to_string()
.contains("at least one of mfts/vrps/vaps/tas/rks")
);
}
#[test]
fn state_hash_helpers_accept_matching_and_reject_tampered_payload() {
let skis = vec![vec![0x11; 20]];
let payload_der = encode_trust_anchor_state_payload_der(&skis).expect("encode trust anchor payload");
let payload_der =
encode_trust_anchor_state_payload_der(&skis).expect("encode trust anchor payload");
let hash = compute_state_hash(&payload_der);
assert!(verify_state_hash(&hash, &payload_der));
let mut tampered = payload_der.clone();
@ -105,12 +114,15 @@ fn manifest_and_router_key_skeletons_encode_payloads_and_validate_sorting() {
hash: vec![0x33; 32],
size: 2048,
aki: vec![0x44; 20],
manifest_number: BigUnsigned { bytes_be: vec![0x01] },
manifest_number: BigUnsigned {
bytes_be: vec![0x01],
},
this_update: sample_time(),
locations: vec![vec![0x30, 0x00]],
subordinates: vec![vec![0x55; 20]],
}];
let mis_der = encode_manifest_state_payload_der(&manifest_instances).expect("encode manifest state payload");
let mis_der = encode_manifest_state_payload_der(&manifest_instances)
.expect("encode manifest state payload");
let manifest_state = ManifestState {
mis: manifest_instances,
most_recent_update: sample_time(),
@ -239,10 +251,16 @@ fn decode_rejects_wrong_digest_algorithm_oid() {
});
let mut der = encode_content_info(&ccr).expect("encode ccr");
let oid = rpki::data_model::oid::OID_SHA256_RAW;
let pos = der.windows(oid.len()).position(|w| w == oid).expect("sha256 oid present");
let pos = der
.windows(oid.len())
.position(|w| w == oid)
.expect("sha256 oid present");
der[pos + oid.len() - 1] ^= 0x01;
let err = decode_content_info(&der).expect_err("decode must reject wrong digest oid");
assert!(err.to_string().contains("unexpected digest algorithm OID"), "{err}");
assert!(
err.to_string().contains("unexpected digest algorithm OID"),
"{err}"
);
}
#[test]
@ -258,7 +276,10 @@ fn decode_rejects_bad_generalized_time() {
rks: None,
});
let mut der = encode_content_info(&ccr).expect("encode ccr");
let pos = der.windows(15).position(|w| w == b"20260324000000Z").expect("time present");
let pos = der
.windows(15)
.position(|w| w == b"20260324000000Z")
.expect("time present");
der[pos + 14] = b'X';
let err = decode_content_info(&der).expect_err("bad time must fail");
assert!(err.to_string().contains("GeneralizedTime"), "{err}");
@ -268,7 +289,9 @@ fn decode_rejects_bad_generalized_time() {
fn manifest_state_validate_rejects_unsorted_subordinates() {
let mut state = sample_manifest_state();
state.mis[0].subordinates = vec![vec![0x40; 20], vec![0x30; 20]];
let err = state.validate().expect_err("unsorted subordinates must fail");
let err = state
.validate()
.expect_err("unsorted subordinates must fail");
assert!(err.to_string().contains("subordinates"), "{err}");
}
@ -276,8 +299,14 @@ fn manifest_state_validate_rejects_unsorted_subordinates() {
fn roa_payload_state_validate_rejects_duplicate_asn_sets() {
let state = RoaPayloadState {
rps: vec![
RoaPayloadSet { as_id: 64496, ip_addr_blocks: vec![vec![0x30, 0x00]] },
RoaPayloadSet { as_id: 64496, ip_addr_blocks: vec![vec![0x30, 0x00]] },
RoaPayloadSet {
as_id: 64496,
ip_addr_blocks: vec![vec![0x30, 0x00]],
},
RoaPayloadSet {
as_id: 64496,
ip_addr_blocks: vec![vec![0x30, 0x00]],
},
],
hash: vec![0u8; 32],
};
@ -288,7 +317,10 @@ fn roa_payload_state_validate_rejects_duplicate_asn_sets() {
#[test]
fn aspa_payload_state_validate_rejects_unsorted_providers() {
let state = AspaPayloadState {
aps: vec![AspaPayloadSet { customer_as_id: 64496, providers: vec![64498, 64497] }],
aps: vec![AspaPayloadSet {
customer_as_id: 64496,
providers: vec![64498, 64497],
}],
hash: vec![0u8; 32],
};
let err = state.validate().expect_err("unsorted providers must fail");
@ -311,13 +343,21 @@ fn router_key_state_validate_rejects_unsorted_router_keys() {
rksets: vec![RouterKeySet {
as_id: 64496,
router_keys: vec![
RouterKey { ski: vec![0x42; 20], spki_der: vec![0x30, 0x00] },
RouterKey { ski: vec![0x41; 20], spki_der: vec![0x30, 0x00] },
RouterKey {
ski: vec![0x42; 20],
spki_der: vec![0x30, 0x00],
},
RouterKey {
ski: vec![0x41; 20],
spki_der: vec![0x30, 0x00],
},
],
}],
hash: vec![0u8; 32],
};
let err = state.validate().expect_err("unsorted router keys must fail");
let err = state
.validate()
.expect_err("unsorted router keys must fail");
assert!(err.to_string().contains("router_keys"), "{err}");
}
@ -332,6 +372,8 @@ fn manifest_instance_validate_rejects_bad_location_tag() {
locations: vec![vec![0x04, 0x00]],
subordinates: vec![],
};
let err = instance.validate().expect_err("bad AccessDescription tag must fail");
let err = instance
.validate()
.expect_err("bad AccessDescription tag must fail");
assert!(err.to_string().contains("unexpected tag"), "{err}");
}

View File

@ -1,19 +1,21 @@
use rpki::ccr::{
CcrContentInfo, CcrDigestAlgorithm, ManifestInstance, ManifestState, RoaPayloadSet,
RoaPayloadState, RouterKey, RouterKeySet, RouterKeyState, TrustAnchorState,
compute_state_hash, decode_content_info, dump_content_info_json_value,
RoaPayloadState, RouterKey, RouterKeySet, RouterKeyState, TrustAnchorState, compute_state_hash,
decode_content_info, dump_content_info_json_value,
encode::{
encode_aspa_payload_state_payload_der, encode_content_info,
encode_manifest_state_payload_der, encode_roa_payload_state_payload_der,
encode_router_key_state_payload_der, encode_trust_anchor_state_payload_der,
},
verify::{verify_against_report_json_path, verify_against_vcir_store, verify_content_info_bytes},
verify::{
verify_against_report_json_path, verify_against_vcir_store, verify_content_info_bytes,
},
};
use rpki::data_model::common::BigUnsigned;
use rpki::storage::{
PackTime, RocksStore, ValidatedCaInstanceResult, ValidatedManifestMeta,
VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary,
VcirChildEntry, VcirInstanceGate, VcirRelatedArtifact, VcirSummary,
PackTime, RocksStore, ValidatedCaInstanceResult, ValidatedManifestMeta, VcirArtifactKind,
VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary, VcirChildEntry,
VcirInstanceGate, VcirRelatedArtifact, VcirSummary,
};
fn sample_time() -> time::OffsetDateTime {
@ -45,28 +47,51 @@ fn sample_manifest_state() -> ManifestState {
fn sample_roa_state() -> RoaPayloadState {
let rps = vec![RoaPayloadSet {
as_id: 64496,
ip_addr_blocks: vec![vec![0x30, 0x08, 0x04, 0x02, 0x00, 0x01, 0x30, 0x02, 0x03, 0x00]],
ip_addr_blocks: vec![vec![
0x30, 0x08, 0x04, 0x02, 0x00, 0x01, 0x30, 0x02, 0x03, 0x00,
]],
}];
let der = encode_roa_payload_state_payload_der(&rps).expect("encode rps");
RoaPayloadState { rps, hash: compute_state_hash(&der) }
RoaPayloadState {
rps,
hash: compute_state_hash(&der),
}
}
fn sample_aspa_state() -> rpki::ccr::AspaPayloadState {
let aps = vec![rpki::ccr::AspaPayloadSet { customer_as_id: 64496, providers: vec![64497] }];
let aps = vec![rpki::ccr::AspaPayloadSet {
customer_as_id: 64496,
providers: vec![64497],
}];
let der = encode_aspa_payload_state_payload_der(&aps).expect("encode aps");
rpki::ccr::AspaPayloadState { aps, hash: compute_state_hash(&der) }
rpki::ccr::AspaPayloadState {
aps,
hash: compute_state_hash(&der),
}
}
fn sample_ta_state() -> TrustAnchorState {
let skis = vec![vec![0x11; 20]];
let der = encode_trust_anchor_state_payload_der(&skis).expect("encode skis");
TrustAnchorState { skis, hash: compute_state_hash(&der) }
TrustAnchorState {
skis,
hash: compute_state_hash(&der),
}
}
fn sample_rks() -> RouterKeyState {
let rksets = vec![RouterKeySet { as_id: 64496, router_keys: vec![RouterKey { ski: vec![0x22; 20], spki_der: vec![0x30, 0x00] }] }];
let rksets = vec![RouterKeySet {
as_id: 64496,
router_keys: vec![RouterKey {
ski: vec![0x22; 20],
spki_der: vec![0x30, 0x00],
}],
}];
let der = encode_router_key_state_payload_der(&rksets).expect("encode rk");
RouterKeyState { rksets, hash: compute_state_hash(&der) }
RouterKeyState {
rksets,
hash: compute_state_hash(&der),
}
}
fn sample_ccr() -> Vec<u8> {
@ -102,7 +127,10 @@ fn verify_content_info_bytes_rejects_tampered_manifest_hash() {
content_info.content.mfts.as_mut().unwrap().hash[0] ^= 0x01;
let der = encode_content_info(&content_info).expect("encode tampered ccr");
let err = verify_content_info_bytes(&der).expect_err("tampered hash must fail");
assert!(err.to_string().contains("ManifestState hash mismatch"), "{err}");
assert!(
err.to_string().contains("ManifestState hash mismatch"),
"{err}"
);
}
#[test]
@ -128,8 +156,21 @@ fn verify_against_report_json_path_rejects_mismatching_report() {
let mut ci = decode_content_info(&sample_ccr()).expect("decode ccr");
ci.content.vrps = Some(RoaPayloadState {
rps: vec![RoaPayloadSet { as_id: 64496, ip_addr_blocks: vec![vec![0x30, 0x08, 0x04, 0x02, 0x00, 0x01, 0x30, 0x02, 0x03, 0x00]] }],
hash: compute_state_hash(&encode_roa_payload_state_payload_der(&[RoaPayloadSet { as_id: 64496, ip_addr_blocks: vec![vec![0x30, 0x08, 0x04, 0x02, 0x00, 0x01, 0x30, 0x02, 0x03, 0x00]] }]).unwrap()),
rps: vec![RoaPayloadSet {
as_id: 64496,
ip_addr_blocks: vec![vec![
0x30, 0x08, 0x04, 0x02, 0x00, 0x01, 0x30, 0x02, 0x03, 0x00,
]],
}],
hash: compute_state_hash(
&encode_roa_payload_state_payload_der(&[RoaPayloadSet {
as_id: 64496,
ip_addr_blocks: vec![vec![
0x30, 0x08, 0x04, 0x02, 0x00, 0x01, 0x30, 0x02, 0x03, 0x00,
]],
}])
.unwrap(),
),
});
verify_against_report_json_path(&ci, &report_path).expect_err("report mismatch expected");
}
@ -184,7 +225,7 @@ fn verify_against_vcir_store_matches_manifest_hashes() {
summary: VcirSummary {
local_vrp_count: 0,
local_aspa_count: 0,
local_router_key_count: 0,
local_router_key_count: 0,
child_count: 1,
accepted_object_count: 1,
rejected_object_count: 0,

View File

@ -1,4 +1,3 @@
use rpki::ccr::{
CcrContentInfo, CcrDigestAlgorithm, TrustAnchorState, compute_state_hash,
encode::{encode_content_info, encode_trust_anchor_state_payload_der},
@ -20,7 +19,10 @@ fn sample_ccr_file() -> (tempfile::TempDir, std::path::PathBuf) {
mfts: None,
vrps: None,
vaps: None,
tas: Some(TrustAnchorState { skis, hash: compute_state_hash(&skis_der) }),
tas: Some(TrustAnchorState {
skis,
hash: compute_state_hash(&skis_der),
}),
rks: None,
});
let path = dir.path().join("sample.ccr");
@ -36,7 +38,11 @@ fn ccr_dump_binary_prints_json_summary() {
.args(["--ccr", ccr_path.to_string_lossy().as_ref()])
.output()
.expect("run ccr_dump");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let json: serde_json::Value = serde_json::from_slice(&out.stdout).expect("parse json");
assert_eq!(json["version"], 0);
assert_eq!(json["state_aspects"]["tas"]["ski_count"], 1);
@ -50,22 +56,25 @@ fn ccr_verify_binary_prints_summary() {
.args(["--ccr", ccr_path.to_string_lossy().as_ref()])
.output()
.expect("run ccr_verify");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let json: serde_json::Value = serde_json::from_slice(&out.stdout).expect("parse json");
assert_eq!(json["version"], 0);
assert_eq!(json["trust_anchor_ski_count"], 1);
assert_eq!(json["state_hashes_ok"], true);
}
#[test]
fn ccr_to_routinator_csv_binary_writes_vrp_csv() {
use rpki::ccr::{
CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation,
build_roa_payload_state, encode::encode_content_info,
};
use rpki::validation::objects::Vrp;
use rpki::data_model::roa::{IpPrefix, RoaAfi};
use rpki::validation::objects::Vrp;
let dir = tempfile::tempdir().expect("tempdir");
let ccr_path = dir.path().join("vrp.ccr");
let csv_path = dir.path().join("out.csv");
@ -107,7 +116,11 @@ fn ccr_to_routinator_csv_binary_writes_vrp_csv() {
])
.output()
.expect("run ccr_to_routinator_csv");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let csv = std::fs::read_to_string(csv_path).expect("read csv");
assert!(csv.contains("ASN,IP Prefix,Max Length,Trust Anchor"));
assert!(csv.contains("AS64496,203.0.113.0/24,24,apnic"));
@ -171,7 +184,11 @@ fn ccr_to_compare_views_binary_writes_vrp_and_vap_csvs() {
])
.output()
.expect("run ccr_to_compare_views");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let vrps_csv = std::fs::read_to_string(vrps_path).expect("read vrps csv");
let vaps_csv = std::fs::read_to_string(vaps_path).expect("read vaps csv");
@ -234,7 +251,11 @@ fn ccr_to_compare_views_binary_writes_header_only_vap_csv_when_absent() {
])
.output()
.expect("run ccr_to_compare_views");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let vaps_csv = std::fs::read_to_string(vaps_path).expect("read vaps csv");
assert_eq!(vaps_csv, "Customer ASN,Providers,Trust Anchor\n");

View File

@ -2,12 +2,18 @@ use std::collections::BTreeSet;
use std::path::PathBuf;
use std::process::Command;
use rpki::ccr::{encode_content_info, CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation, TrustAnchorState};
use rpki::cir::{encode_cir, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1};
use rpki::ccr::{
CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation, TrustAnchorState,
encode_content_info,
};
use rpki::cir::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, encode_cir,
};
#[test]
fn cir_full_and_delta_pair_reuses_shared_static_pool() {
let script = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_record_full_delta.sh");
let script =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_record_full_delta.sh");
let out_dir = tempfile::tempdir().expect("tempdir");
let out = out_dir.path().join("cir-pair");
let fixture_root = out_dir.path().join("fixture");
@ -16,11 +22,7 @@ fn cir_full_and_delta_pair_reuses_shared_static_pool() {
std::fs::create_dir_all(&static_payload_root).unwrap();
let base_locks = fixture_root.join("base-locks.json");
let delta_locks = fixture_root.join("locks-delta.json");
std::fs::write(
&base_locks,
br#"{"validationTime":"2026-03-16T11:49:15Z"}"#,
)
.unwrap();
std::fs::write(&base_locks, br#"{"validationTime":"2026-03-16T11:49:15Z"}"#).unwrap();
std::fs::write(
&delta_locks,
br#"{"validationTime":"2026-03-16T11:50:15Z"}"#,
@ -84,7 +86,10 @@ fn cir_full_and_delta_pair_reuses_shared_static_pool() {
mfts: None,
vrps: None,
vaps: None,
tas: Some(TrustAnchorState { skis: vec![vec![0x11; 20]], hash: vec![0x22; 32] }),
tas: Some(TrustAnchorState {
skis: vec![vec![0x11; 20]],
hash: vec![0x22; 32],
}),
rks: None,
});
let full_cir_path = fixture_root.join("full.cir");
@ -97,8 +102,16 @@ fn cir_full_and_delta_pair_reuses_shared_static_pool() {
std::fs::write(&delta_cir_path, encode_cir(&delta_cir).unwrap()).unwrap();
std::fs::write(&full_ccr_path, encode_content_info(&empty_ccr).unwrap()).unwrap();
std::fs::write(&delta_ccr_path, encode_content_info(&empty_ccr).unwrap()).unwrap();
std::fs::write(&full_report_path, br#"{"format_version":2,"publication_points":[]}"#).unwrap();
std::fs::write(&delta_report_path, br#"{"format_version":2,"publication_points":[]}"#).unwrap();
std::fs::write(
&full_report_path,
br#"{"format_version":2,"publication_points":[]}"#,
)
.unwrap();
std::fs::write(
&delta_report_path,
br#"{"format_version":2,"publication_points":[]}"#,
)
.unwrap();
let stub = out_dir.path().join("stub-rpki.sh");
std::fs::write(
@ -195,8 +208,9 @@ fi
String::from_utf8_lossy(&proc.stderr)
);
let full_cir = rpki::cir::decode_cir(&std::fs::read(out.join("full").join("input.cir")).unwrap())
.expect("decode full cir");
let full_cir =
rpki::cir::decode_cir(&std::fs::read(out.join("full").join("input.cir")).unwrap())
.expect("decode full cir");
let delta_cir =
rpki::cir::decode_cir(&std::fs::read(out.join("delta-001").join("input.cir")).unwrap())
.expect("decode delta cir");

View File

@ -2,10 +2,12 @@ use std::path::PathBuf;
use std::process::Command;
use rpki::ccr::{
encode_content_info, CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation,
TrustAnchorState,
CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation, TrustAnchorState,
encode_content_info,
};
use rpki::cir::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, encode_cir,
};
use rpki::cir::{encode_cir, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1};
#[test]
fn cir_drop_report_counts_dropped_roa_objects_and_vrps() {
@ -24,7 +26,10 @@ fn cir_drop_report_counts_dropped_roa_objects_and_vrps() {
use sha2::{Digest, Sha256};
hex::encode(Sha256::digest(&roa_bytes))
};
let dir = static_root.join("20260409").join(&hash[0..2]).join(&hash[2..4]);
let dir = static_root
.join("20260409")
.join(&hash[0..2])
.join(&hash[2..4]);
std::fs::create_dir_all(&dir).unwrap();
std::fs::write(dir.join(&hash), &roa_bytes).unwrap();
@ -100,12 +105,20 @@ fn cir_drop_report_counts_dropped_roa_objects_and_vrps() {
])
.output()
.expect("run cir_drop_report");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let output: serde_json::Value =
serde_json::from_slice(&std::fs::read(&json_out).unwrap()).unwrap();
assert_eq!(output["summary"]["droppedObjectCount"], 1);
assert!(output["summary"]["droppedVrpCount"].as_u64().unwrap_or(0) >= 1);
assert_eq!(output["summary"]["droppedByKind"]["roa"], 1);
assert!(std::fs::read_to_string(&md_out).unwrap().contains("Dropped By Reason"));
assert!(
std::fs::read_to_string(&md_out)
.unwrap()
.contains("Dropped By Reason")
);
}

View File

@ -2,8 +2,8 @@ use std::path::{Path, PathBuf};
use std::process::Command;
use rpki::cir::{
encode_cir, materialize_cir, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal,
CIR_VERSION_V1,
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, encode_cir,
materialize_cir,
};
fn apnic_tal_path() -> PathBuf {
@ -59,7 +59,11 @@ fn write_static(root: &Path, date: &str, bytes: &[u8]) {
std::fs::write(dir.join(hash), bytes).expect("write static object");
}
fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror_root: &Path) -> PathBuf {
fn prepare_reference_ccr(
work: &Path,
cir: &CanonicalInputRepresentation,
mirror_root: &Path,
) -> PathBuf {
let reference_ccr = work.join("reference.ccr");
let rpki_bin = env!("CARGO_BIN_EXE_rpki");
let wrapper = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper");
@ -91,7 +95,11 @@ fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror
])
.output()
.expect("run reference rpki");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
reference_ccr
}
@ -116,7 +124,8 @@ fn cir_replay_matrix_script_matches_reference_for_all_participants() {
materialize_cir(&cir, &static_root, &mirror_root, true).expect("materialize");
let reference_ccr = prepare_reference_ccr(td.path(), &cir, &mirror_root);
let script = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_matrix.sh");
let script =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_matrix.sh");
let out = Command::new(script)
.args([
"--cir",
@ -134,13 +143,19 @@ fn cir_replay_matrix_script_matches_reference_for_all_participants() {
])
.output()
.expect("run cir matrix script");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let summary: serde_json::Value =
serde_json::from_slice(&std::fs::read(out_dir.join("summary.json")).expect("read summary"))
.expect("parse summary");
assert_eq!(summary["allMatch"], true);
let participants = summary["participants"].as_array().expect("participants array");
let participants = summary["participants"]
.as_array()
.expect("participants array");
assert_eq!(participants.len(), 3);
for participant in participants {
assert_eq!(participant["exitCode"], 0);

View File

@ -59,7 +59,11 @@ fn write_static(root: &Path, date: &str, bytes: &[u8]) {
std::fs::write(dir.join(hash), bytes).expect("write static object");
}
fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror_root: &Path) -> PathBuf {
fn prepare_reference_ccr(
work: &Path,
cir: &CanonicalInputRepresentation,
mirror_root: &Path,
) -> PathBuf {
let reference_ccr = work.join("reference.ccr");
let rpki_bin = env!("CARGO_BIN_EXE_rpki");
let wrapper = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper");
@ -91,7 +95,11 @@ fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror
])
.output()
.expect("run reference rpki");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
reference_ccr
}
@ -114,7 +122,8 @@ fn cir_routinator_script_matches_reference_on_ta_only_cir() {
materialize_cir(&cir, &static_root, &mirror_root, true).expect("materialize");
let reference_ccr = prepare_reference_ccr(td.path(), &cir, &mirror_root);
let script = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_routinator.sh");
let script =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_routinator.sh");
let out = Command::new(script)
.args([
"--cir",
@ -128,7 +137,11 @@ fn cir_routinator_script_matches_reference_on_ta_only_cir() {
])
.output()
.expect("run routinator cir script");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let summary: serde_json::Value = serde_json::from_slice(
&std::fs::read(out_dir.join("compare-summary.json")).expect("read summary"),
)
@ -156,7 +169,8 @@ fn cir_rpki_client_script_matches_reference_on_ta_only_cir() {
materialize_cir(&cir, &static_root, &mirror_root, true).expect("materialize");
let reference_ccr = prepare_reference_ccr(td.path(), &cir, &mirror_root);
let script = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_rpki_client.sh");
let script =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_rpki_client.sh");
let out = Command::new(script)
.args([
"--cir",
@ -172,7 +186,11 @@ fn cir_rpki_client_script_matches_reference_on_ta_only_cir() {
])
.output()
.expect("run rpki-client cir script");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let summary: serde_json::Value = serde_json::from_slice(
&std::fs::read(out_dir.join("compare-summary.json")).expect("read summary"),
)

View File

@ -1,27 +1,40 @@
use std::path::PathBuf;
use std::process::Command;
use rpki::ccr::{encode_content_info, CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation, TrustAnchorState};
use rpki::cir::{encode_cir, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1};
use rpki::ccr::{
CcrContentInfo, CcrDigestAlgorithm, RpkiCanonicalCacheRepresentation, TrustAnchorState,
encode_content_info,
};
use rpki::cir::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, encode_cir,
};
#[test]
fn cir_offline_sequence_writes_parseable_sequence_json_and_steps() {
let out_dir = tempfile::tempdir().expect("tempdir");
let out = out_dir.path().join("cir-sequence");
let script =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_record_sequence_offline.sh");
let script = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("scripts/cir/run_cir_record_sequence_offline.sh");
let fixture_root = out_dir.path().join("fixture");
let static_payload_root = fixture_root.join("payloads");
std::fs::create_dir_all(&static_payload_root).unwrap();
let base_locks = fixture_root.join("base-locks.json");
let delta_locks = fixture_root.join("locks-delta.json");
std::fs::write(&base_locks, br#"{"validationTime":"2026-03-16T11:49:15Z"}"#).unwrap();
std::fs::write(&delta_locks, br#"{"validationTime":"2026-03-16T11:50:15Z"}"#).unwrap();
std::fs::write(
&delta_locks,
br#"{"validationTime":"2026-03-16T11:50:15Z"}"#,
)
.unwrap();
let mk_cir = |uri: &str, hash_hex: &str, vt: &str| CanonicalInputRepresentation {
version: CIR_VERSION_V1,
hash_alg: CirHashAlgorithm::Sha256,
validation_time: time::OffsetDateTime::parse(vt, &time::format_description::well_known::Rfc3339).unwrap(),
validation_time: time::OffsetDateTime::parse(
vt,
&time::format_description::well_known::Rfc3339,
)
.unwrap(),
objects: vec![CirObject {
rsync_uri: uri.to_string(),
sha256: hex::decode(hash_hex).unwrap(),
@ -39,7 +52,11 @@ fn cir_offline_sequence_writes_parseable_sequence_json_and_steps() {
use sha2::{Digest, Sha256};
hex::encode(Sha256::digest(b"delta-object"))
};
let full_cir = mk_cir("rsync://example.net/repo/full.roa", &full_hash, "2026-03-16T11:49:15Z");
let full_cir = mk_cir(
"rsync://example.net/repo/full.roa",
&full_hash,
"2026-03-16T11:49:15Z",
);
let delta_cir = CanonicalInputRepresentation {
version: CIR_VERSION_V1,
hash_alg: CirHashAlgorithm::Sha256,
@ -68,7 +85,10 @@ fn cir_offline_sequence_writes_parseable_sequence_json_and_steps() {
mfts: None,
vrps: None,
vaps: None,
tas: Some(TrustAnchorState { skis: vec![vec![0x11; 20]], hash: vec![0x22; 32] }),
tas: Some(TrustAnchorState {
skis: vec![vec![0x11; 20]],
hash: vec![0x22; 32],
}),
rks: None,
});
let full_cir_path = fixture_root.join("full.cir");
@ -82,8 +102,16 @@ fn cir_offline_sequence_writes_parseable_sequence_json_and_steps() {
std::fs::write(&delta_cir_path, encode_cir(&delta_cir).unwrap()).unwrap();
std::fs::write(&full_ccr_path, encode_content_info(&empty_ccr).unwrap()).unwrap();
std::fs::write(&delta_ccr_path, encode_content_info(&empty_ccr).unwrap()).unwrap();
std::fs::write(&full_report_path, br#"{"format_version":2,"publication_points":[]}"#).unwrap();
std::fs::write(&delta_report_path, br#"{"format_version":2,"publication_points":[]}"#).unwrap();
std::fs::write(
&full_report_path,
br#"{"format_version":2,"publication_points":[]}"#,
)
.unwrap();
std::fs::write(
&delta_report_path,
br#"{"format_version":2,"publication_points":[]}"#,
)
.unwrap();
std::fs::write(static_payload_root.join("full-object"), b"full-object").unwrap();
std::fs::write(static_payload_root.join("delta-object"), b"delta-object").unwrap();

View File

@ -1,7 +1,10 @@
use std::path::{Path, PathBuf};
use std::process::Command;
use rpki::cir::{encode_cir, materialize_cir, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1};
use rpki::cir::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, encode_cir,
materialize_cir,
};
fn apnic_tal_path() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/tal/apnic-rfc7730-https.tal")
@ -56,7 +59,11 @@ fn write_static(root: &Path, date: &str, bytes: &[u8]) {
std::fs::write(dir.join(hash), bytes).expect("write static object");
}
fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror_root: &Path) -> PathBuf {
fn prepare_reference_ccr(
work: &Path,
cir: &CanonicalInputRepresentation,
mirror_root: &Path,
) -> PathBuf {
let reference_ccr = work.join("reference.ccr");
let rpki_bin = env!("CARGO_BIN_EXE_rpki");
let wrapper = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper");
@ -77,7 +84,9 @@ fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror
"--rsync-command",
wrapper.to_string_lossy().as_ref(),
"--validation-time",
&cir.validation_time.format(&time::format_description::well_known::Rfc3339).unwrap(),
&cir.validation_time
.format(&time::format_description::well_known::Rfc3339)
.unwrap(),
"--max-depth",
"0",
"--max-instances",
@ -87,7 +96,11 @@ fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror
])
.output()
.expect("run reference rpki");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
reference_ccr
}
@ -102,14 +115,30 @@ fn prepare_sequence_root(td: &Path) -> PathBuf {
let (cir, ta_bytes) = build_ta_only_cir();
let cir_bytes = encode_cir(&cir).expect("encode cir");
std::fs::write(sequence_root.join("full").join("input.cir"), &cir_bytes).unwrap();
std::fs::write(sequence_root.join("delta-001").join("input.cir"), &cir_bytes).unwrap();
std::fs::write(sequence_root.join("delta-002").join("input.cir"), &cir_bytes).unwrap();
std::fs::write(
sequence_root.join("delta-001").join("input.cir"),
&cir_bytes,
)
.unwrap();
std::fs::write(
sequence_root.join("delta-002").join("input.cir"),
&cir_bytes,
)
.unwrap();
write_static(&static_root, "20260407", &ta_bytes);
materialize_cir(&cir, &static_root, &mirror_root, true).unwrap();
let reference = prepare_reference_ccr(td, &cir, &mirror_root);
std::fs::copy(&reference, sequence_root.join("full").join("result.ccr")).unwrap();
std::fs::copy(&reference, sequence_root.join("delta-001").join("result.ccr")).unwrap();
std::fs::copy(&reference, sequence_root.join("delta-002").join("result.ccr")).unwrap();
std::fs::copy(
&reference,
sequence_root.join("delta-001").join("result.ccr"),
)
.unwrap();
std::fs::copy(
&reference,
sequence_root.join("delta-002").join("result.ccr"),
)
.unwrap();
std::fs::write(sequence_root.join("full").join("report.json"), b"{}").unwrap();
std::fs::write(sequence_root.join("delta-001").join("report.json"), b"{}").unwrap();
std::fs::write(sequence_root.join("delta-002").join("report.json"), b"{}").unwrap();
@ -122,7 +151,11 @@ fn prepare_sequence_root(td: &Path) -> PathBuf {
{"stepId":"delta-002","kind":"delta","validationTime":"2026-04-07T00:00:00Z","cirPath":"delta-002/input.cir","ccrPath":"delta-002/result.ccr","reportPath":"delta-002/report.json","previousStepId":"delta-001"}
]
});
std::fs::write(sequence_root.join("sequence.json"), serde_json::to_vec_pretty(&sequence).unwrap()).unwrap();
std::fs::write(
sequence_root.join("sequence.json"),
serde_json::to_vec_pretty(&sequence).unwrap(),
)
.unwrap();
sequence_root
}
@ -141,13 +174,14 @@ fn peer_sequence_replay_scripts_replay_all_steps() {
let routinator_script = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("scripts/cir/run_cir_replay_sequence_routinator.sh");
let out = Command::new(routinator_script)
.args([
"--sequence-root",
sequence_root.to_string_lossy().as_ref(),
])
.args(["--sequence-root", sequence_root.to_string_lossy().as_ref()])
.output()
.expect("run routinator sequence replay");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let r_summary: serde_json::Value = serde_json::from_slice(
&std::fs::read(sequence_root.join("sequence-summary-routinator.json")).unwrap(),
)
@ -166,7 +200,11 @@ fn peer_sequence_replay_scripts_replay_all_steps() {
])
.output()
.expect("run rpki-client sequence replay");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let c_summary: serde_json::Value = serde_json::from_slice(
&std::fs::read(sequence_root.join("sequence-summary-rpki-client.json")).unwrap(),
)

View File

@ -1,7 +1,10 @@
use std::path::{Path, PathBuf};
use std::process::Command;
use rpki::cir::{encode_cir, materialize_cir, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1};
use rpki::cir::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, encode_cir,
materialize_cir,
};
fn apnic_tal_path() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/tal/apnic-rfc7730-https.tal")
@ -56,7 +59,11 @@ fn write_static(root: &Path, date: &str, bytes: &[u8]) {
std::fs::write(dir.join(hash), bytes).expect("write static object");
}
fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror_root: &Path) -> PathBuf {
fn prepare_reference_ccr(
work: &Path,
cir: &CanonicalInputRepresentation,
mirror_root: &Path,
) -> PathBuf {
let reference_ccr = work.join("reference.ccr");
let rpki_bin = env!("CARGO_BIN_EXE_rpki");
let wrapper = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper");
@ -77,7 +84,9 @@ fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror
"--rsync-command",
wrapper.to_string_lossy().as_ref(),
"--validation-time",
&cir.validation_time.format(&time::format_description::well_known::Rfc3339).unwrap(),
&cir.validation_time
.format(&time::format_description::well_known::Rfc3339)
.unwrap(),
"--max-depth",
"0",
"--max-instances",
@ -87,7 +96,11 @@ fn prepare_reference_ccr(work: &Path, cir: &CanonicalInputRepresentation, mirror
])
.output()
.expect("run reference rpki");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
reference_ccr
}
@ -107,14 +120,30 @@ fn ours_sequence_replay_script_replays_all_steps() {
let (cir, ta_bytes) = build_ta_only_cir();
let cir_bytes = encode_cir(&cir).expect("encode cir");
std::fs::write(sequence_root.join("full").join("input.cir"), &cir_bytes).unwrap();
std::fs::write(sequence_root.join("delta-001").join("input.cir"), &cir_bytes).unwrap();
std::fs::write(sequence_root.join("delta-002").join("input.cir"), &cir_bytes).unwrap();
std::fs::write(
sequence_root.join("delta-001").join("input.cir"),
&cir_bytes,
)
.unwrap();
std::fs::write(
sequence_root.join("delta-002").join("input.cir"),
&cir_bytes,
)
.unwrap();
write_static(&static_root, "20260407", &ta_bytes);
materialize_cir(&cir, &static_root, &mirror_root, true).unwrap();
let reference = prepare_reference_ccr(td.path(), &cir, &mirror_root);
std::fs::copy(&reference, sequence_root.join("full").join("result.ccr")).unwrap();
std::fs::copy(&reference, sequence_root.join("delta-001").join("result.ccr")).unwrap();
std::fs::copy(&reference, sequence_root.join("delta-002").join("result.ccr")).unwrap();
std::fs::copy(
&reference,
sequence_root.join("delta-001").join("result.ccr"),
)
.unwrap();
std::fs::copy(
&reference,
sequence_root.join("delta-002").join("result.ccr"),
)
.unwrap();
std::fs::write(sequence_root.join("full").join("report.json"), b"{}").unwrap();
std::fs::write(sequence_root.join("delta-001").join("report.json"), b"{}").unwrap();
std::fs::write(sequence_root.join("delta-002").join("report.json"), b"{}").unwrap();
@ -134,8 +163,8 @@ fn ours_sequence_replay_script_replays_all_steps() {
)
.unwrap();
let script =
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_sequence_ours.sh");
let script = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("scripts/cir/run_cir_replay_sequence_ours.sh");
let out = Command::new(script)
.args([
"--sequence-root",
@ -145,11 +174,16 @@ fn ours_sequence_replay_script_replays_all_steps() {
])
.output()
.expect("run sequence replay");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let summary: serde_json::Value =
serde_json::from_slice(&std::fs::read(sequence_root.join("sequence-summary.json")).unwrap())
.unwrap();
let summary: serde_json::Value = serde_json::from_slice(
&std::fs::read(sequence_root.join("sequence-summary.json")).unwrap(),
)
.unwrap();
assert_eq!(summary["stepCount"], 3);
assert_eq!(summary["allMatch"], true);
}

View File

@ -1,6 +1,6 @@
use std::os::unix::fs::MetadataExt;
use std::path::PathBuf;
use std::process::Command;
use std::os::unix::fs::MetadataExt;
fn wrapper_path() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper")
@ -24,7 +24,11 @@ fn cir_rsync_wrapper_passes_through_help() {
.arg("-h")
.output()
.expect("run wrapper -h");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let stdout = String::from_utf8_lossy(&out.stdout);
let stderr = String::from_utf8_lossy(&out.stderr);
assert!(stdout.contains("rsync") || stderr.contains("rsync"));
@ -60,8 +64,15 @@ fn cir_rsync_wrapper_rewrites_rsync_source_to_mirror_tree() {
])
.output()
.expect("run wrapper rewrite");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert_eq!(std::fs::read(dest_root.join("a.roa")).expect("read copied roa"), b"roa");
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
assert_eq!(
std::fs::read(dest_root.join("a.roa")).expect("read copied roa"),
b"roa"
);
assert!(!dest_root.join("nested").join("b.txt").exists());
}
@ -93,13 +104,23 @@ fn cir_rsync_wrapper_rewrites_module_root_without_trailing_slash_as_contents() {
])
.output()
.expect("run wrapper rewrite");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert_eq!(std::fs::read(dest_root.join("root.cer")).expect("read copied root cer"), b"cer");
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
assert_eq!(
std::fs::read(dest_root.join("root.cer")).expect("read copied root cer"),
b"cer"
);
assert_eq!(
std::fs::read(dest_root.join("sub").join("child.roa")).expect("read copied child roa"),
b"roa"
);
assert!(!dest_root.join("repo").exists(), "module root must not be nested under destination");
assert!(
!dest_root.join("repo").exists(),
"module root must not be nested under destination"
);
}
#[test]
@ -113,7 +134,11 @@ fn cir_rsync_wrapper_requires_mirror_root_for_rsync_source() {
let out = Command::new(wrapper_path())
.env("REAL_RSYNC_BIN", real)
.args(["-rt", "rsync://example.net/repo/", dest_root.to_string_lossy().as_ref()])
.args([
"-rt",
"rsync://example.net/repo/",
dest_root.to_string_lossy().as_ref(),
])
.output()
.expect("run wrapper missing env");
assert!(!out.status.success());
@ -141,8 +166,15 @@ fn cir_rsync_wrapper_leaves_local_source_untouched() {
])
.output()
.expect("run wrapper local passthrough");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert_eq!(std::fs::read(dest_root.join("src").join("x.cer")).expect("read copied file"), b"x");
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
assert_eq!(
std::fs::read(dest_root.join("src").join("x.cer")).expect("read copied file"),
b"x"
);
}
#[test]
@ -177,7 +209,11 @@ fn cir_rsync_wrapper_local_link_mode_uses_hardlinks_for_rewritten_sources() {
])
.output()
.expect("run wrapper local-link mode");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let dst_file = dest_root.join("a.roa");
let dst_nested = dest_root.join("nested").join("b.cer");
@ -186,5 +222,9 @@ fn cir_rsync_wrapper_local_link_mode_uses_hardlinks_for_rewritten_sources() {
let src_meta = std::fs::metadata(&src_file).expect("src metadata");
let dst_meta = std::fs::metadata(&dst_file).expect("dst metadata");
assert_eq!(src_meta.ino(), dst_meta.ino(), "expected hardlinked destination file");
assert_eq!(
src_meta.ino(),
dst_meta.ino(),
"expected hardlinked destination file"
);
}

View File

@ -19,21 +19,15 @@ fn cli_payload_delta_replay_rejects_wrong_base_locks() {
let delta_archive = demo_root.join("payload-delta-archive");
let delta_locks = demo_root.join("locks-delta.json");
assert!(
base_archive.is_dir(),
"base archive missing: {}",
base_archive.display()
);
assert!(
delta_archive.is_dir(),
"delta archive missing: {}",
delta_archive.display()
);
assert!(
delta_locks.is_file(),
"delta locks missing: {}",
delta_locks.display()
);
if !base_archive.is_dir() || !delta_archive.is_dir() || !delta_locks.is_file() {
eprintln!(
"skipping cli delta replay smoke; missing fixtures: base_archive={} delta_archive={} delta_locks={}",
base_archive.display(),
delta_archive.display(),
delta_locks.display()
);
return;
}
let out = Command::new(bin)
.args([

View File

@ -16,16 +16,14 @@ fn cli_payload_replay_root_only_smoke_writes_report_json() {
let locks_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("target/live/payload_replay/locks.json");
assert!(
archive_root.is_dir(),
"payload replay archive missing: {}",
archive_root.display()
);
assert!(
locks_path.is_file(),
"payload replay locks missing: {}",
locks_path.display()
);
if !archive_root.is_dir() || !locks_path.is_file() {
eprintln!(
"skipping cli payload replay smoke; missing fixtures: archive={} locks={}",
archive_root.display(),
locks_path.display()
);
return;
}
let out = Command::new(bin)
.args([

View File

@ -45,7 +45,6 @@ fn cli_run_offline_mode_executes_and_writes_json_and_ccr() {
assert_eq!(v["format_version"], 2);
}
#[test]
fn cli_run_offline_mode_writes_decodable_ccr() {
let db_dir = tempfile::tempdir().expect("db tempdir");
@ -130,10 +129,11 @@ fn cli_run_offline_mode_writes_cir_and_static_pool() {
let cir = rpki::cir::decode_cir(&bytes).expect("decode cir");
assert_eq!(cir.tals.len(), 1);
assert_eq!(cir.tals[0].tal_uri, "https://example.test/root.tal");
assert!(cir
.objects
.iter()
.any(|item| item.rsync_uri.contains("apnic-rpki-root-iana-origin.cer")));
assert!(
cir.objects
.iter()
.any(|item| item.rsync_uri.contains("apnic-rpki-root-iana-origin.cer"))
);
let mut file_count = 0usize;
let mut stack = vec![static_root.clone()];
@ -182,14 +182,18 @@ fn cli_run_blackbox_rsync_wrapper_mode_matches_reference_ccr_without_ta_path() {
.expect("write ta into mirror");
let bin = env!("CARGO_BIN_EXE_rpki");
let wrapper = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("scripts/cir/cir-rsync-wrapper");
let wrapper =
std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/cir-rsync-wrapper");
let reference = Command::new(bin)
.env("REAL_RSYNC_BIN", real_rsync)
.env("CIR_MIRROR_ROOT", &mirror_root)
.args([
"--db",
db_dir.path().join("reference-db").to_string_lossy().as_ref(),
db_dir
.path()
.join("reference-db")
.to_string_lossy()
.as_ref(),
"--tal-path",
tal_path.to_string_lossy().as_ref(),
"--ta-path",
@ -208,7 +212,11 @@ fn cli_run_blackbox_rsync_wrapper_mode_matches_reference_ccr_without_ta_path() {
])
.output()
.expect("run reference wrapper mode");
assert!(reference.status.success(), "stderr={}", String::from_utf8_lossy(&reference.stderr));
assert!(
reference.status.success(),
"stderr={}",
String::from_utf8_lossy(&reference.stderr)
);
let out = Command::new(bin)
.env("REAL_RSYNC_BIN", real_rsync)
@ -232,7 +240,11 @@ fn cli_run_blackbox_rsync_wrapper_mode_matches_reference_ccr_without_ta_path() {
])
.output()
.expect("run blackbox wrapper mode");
assert!(out.status.success(), "stderr={}", String::from_utf8_lossy(&out.stderr));
assert!(
out.status.success(),
"stderr={}",
String::from_utf8_lossy(&out.stderr)
);
let reference = rpki::ccr::decode_content_info(&std::fs::read(&ref_ccr_path).unwrap())
.expect("decode reference ccr");

View File

@ -6,9 +6,9 @@ use rpki::data_model::manifest::ManifestObject;
use rpki::policy::{CaFailedFetchPolicy, Policy};
use rpki::storage::{
PackTime, RawByHashEntry, RepositoryViewEntry, RepositoryViewState, RocksStore,
ValidatedCaInstanceResult, ValidatedManifestMeta,
VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary,
VcirInstanceGate, VcirRelatedArtifact, VcirSummary,
ValidatedCaInstanceResult, ValidatedManifestMeta, VcirArtifactKind, VcirArtifactRole,
VcirArtifactValidationStatus, VcirAuditSummary, VcirInstanceGate, VcirRelatedArtifact,
VcirSummary,
};
use rpki::validation::manifest::{PublicationPointSource, process_manifest_publication_point};
@ -99,7 +99,7 @@ fn store_validated_manifest_baseline(
summary: VcirSummary {
local_vrp_count: 0,
local_aspa_count: 0,
local_router_key_count: 0,
local_router_key_count: 0,
child_count: 0,
accepted_object_count: 1,
rejected_object_count: 0,
@ -165,7 +165,12 @@ fn manifest_success_returns_validated_publication_point_data() {
let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
put_current_object(&store, &rsync_uri, bytes, rsync_uri.rsplit('.').next().unwrap_or("bin"));
put_current_object(
&store,
&rsync_uri,
bytes,
rsync_uri.rsplit('.').next().unwrap_or("bin"),
);
}
let policy = Policy::default();
@ -218,7 +223,12 @@ fn manifest_hash_mismatch_reuses_current_instance_vcir_when_enabled() {
let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
put_current_object(&store, &rsync_uri, bytes, rsync_uri.rsplit('.').next().unwrap_or("bin"));
put_current_object(
&store,
&rsync_uri,
bytes,
rsync_uri.rsplit('.').next().unwrap_or("bin"),
);
}
let policy = Policy::default();
@ -255,7 +265,12 @@ fn manifest_hash_mismatch_reuses_current_instance_vcir_when_enabled() {
.expect("load victim raw")
.expect("victim raw exists");
tampered[0] ^= 0xFF;
put_current_object(&store, &victim_uri, tampered, victim_uri.rsplit('.').next().unwrap_or("bin"));
put_current_object(
&store,
&victim_uri,
tampered,
victim_uri.rsplit('.').next().unwrap_or("bin"),
);
let second = process_manifest_publication_point(
&store,
@ -304,7 +319,12 @@ fn manifest_failed_fetch_stop_all_output() {
let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
put_current_object(&store, &rsync_uri, bytes, rsync_uri.rsplit('.').next().unwrap_or("bin"));
put_current_object(
&store,
&rsync_uri,
bytes,
rsync_uri.rsplit('.').next().unwrap_or("bin"),
);
}
let mut policy = Policy::default();
@ -332,7 +352,12 @@ fn manifest_failed_fetch_stop_all_output() {
.expect("load victim raw")
.expect("victim raw exists");
tampered[0] ^= 0xFF;
put_current_object(&store, &victim_uri, tampered, victim_uri.rsplit('.').next().unwrap_or("bin"));
put_current_object(
&store,
&victim_uri,
tampered,
victim_uri.rsplit('.').next().unwrap_or("bin"),
);
policy.ca_failed_fetch_policy = CaFailedFetchPolicy::StopAllOutput;
let err = process_manifest_publication_point(
@ -378,7 +403,12 @@ fn manifest_failed_fetch_rejects_stale_current_instance_vcir() {
let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
put_current_object(&store, &rsync_uri, bytes, rsync_uri.rsplit('.').next().unwrap_or("bin"));
put_current_object(
&store,
&rsync_uri,
bytes,
rsync_uri.rsplit('.').next().unwrap_or("bin"),
);
}
let policy = Policy::default();
@ -459,7 +489,12 @@ fn manifest_revalidation_with_unchanged_manifest_is_fresh() {
let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
put_current_object(&store, &rsync_uri, bytes, rsync_uri.rsplit('.').next().unwrap_or("bin"));
put_current_object(
&store,
&rsync_uri,
bytes,
rsync_uri.rsplit('.').next().unwrap_or("bin"),
);
}
let policy = Policy::default();
@ -539,7 +574,12 @@ fn manifest_rollback_is_treated_as_failed_fetch_and_reuses_current_instance_vcir
let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
put_current_object(&store, &rsync_uri, bytes, rsync_uri.rsplit('.').next().unwrap_or("bin"));
put_current_object(
&store,
&rsync_uri,
bytes,
rsync_uri.rsplit('.').next().unwrap_or("bin"),
);
}
let policy = Policy::default();

View File

@ -6,9 +6,9 @@ use rpki::data_model::manifest::ManifestObject;
use rpki::policy::{CaFailedFetchPolicy, Policy};
use rpki::storage::{
PackTime, RawByHashEntry, RepositoryViewEntry, RepositoryViewState, RocksStore,
ValidatedCaInstanceResult, ValidatedManifestMeta,
VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary,
VcirInstanceGate, VcirRelatedArtifact, VcirSummary,
ValidatedCaInstanceResult, ValidatedManifestMeta, VcirArtifactKind, VcirArtifactRole,
VcirArtifactValidationStatus, VcirAuditSummary, VcirInstanceGate, VcirRelatedArtifact,
VcirSummary,
};
use rpki::validation::manifest::{
ManifestProcessError, PublicationPointSource, process_manifest_publication_point,
@ -102,7 +102,7 @@ fn store_validated_manifest_baseline(
summary: VcirSummary {
local_vrp_count: 0,
local_aspa_count: 0,
local_router_key_count: 0,
local_router_key_count: 0,
child_count: 0,
accepted_object_count: 1,
rejected_object_count: 0,
@ -161,7 +161,12 @@ fn store_manifest_and_locked_files(
let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
put_current_object(&store, &rsync_uri, bytes, rsync_uri.rsplit('.').next().unwrap_or("bin"));
put_current_object(
&store,
&rsync_uri,
bytes,
rsync_uri.rsplit('.').next().unwrap_or("bin"),
);
}
}

View File

@ -19,7 +19,11 @@ fn wrapper_script() -> std::path::PathBuf {
#[test]
fn multi_rir_case_info_resolves_all_five_rirs_and_timings() {
let bundle_root = multi_rir_bundle_root();
assert!(bundle_root.is_dir(), "bundle root missing: {}", bundle_root.display());
assert!(
bundle_root.is_dir(),
"bundle root missing: {}",
bundle_root.display()
);
let expected = [
("afrinic", "afrinic", "afrinic.tal", "afrinic-ta.cer"),
@ -53,22 +57,61 @@ fn multi_rir_case_info_resolves_all_five_rirs_and_timings() {
serde_json::from_slice(&out.stdout).expect("parse helper json");
assert_eq!(json["rir"].as_str(), Some(rir));
assert_eq!(json["trust_anchor"].as_str(), Some(trust_anchor));
assert!(json["base_archive"].as_str().unwrap_or("").ends_with("base-payload-archive"));
assert!(json["delta_archive"].as_str().unwrap_or("").ends_with("payload-delta-archive"));
assert!(json["base_locks"].as_str().unwrap_or("").ends_with("base-locks.json"));
assert!(json["delta_locks"].as_str().unwrap_or("").ends_with("locks-delta.json"));
assert!(json["tal_path"].as_str().unwrap_or("").ends_with(tal_suffix));
assert!(
json["base_archive"]
.as_str()
.unwrap_or("")
.ends_with("base-payload-archive")
);
assert!(
json["delta_archive"]
.as_str()
.unwrap_or("")
.ends_with("payload-delta-archive")
);
assert!(
json["base_locks"]
.as_str()
.unwrap_or("")
.ends_with("base-locks.json")
);
assert!(
json["delta_locks"]
.as_str()
.unwrap_or("")
.ends_with("locks-delta.json")
);
assert!(
json["tal_path"]
.as_str()
.unwrap_or("")
.ends_with(tal_suffix)
);
assert!(json["ta_path"].as_str().unwrap_or("").ends_with(ta_suffix));
assert!(json["validation_times"]["snapshot"].as_str().unwrap_or("").contains("T"));
assert!(json["validation_times"]["delta"].as_str().unwrap_or("").contains("T"));
assert!(json["routinator_timings"]["base_replay_seconds"]
.as_f64()
.unwrap_or(0.0)
> 0.0);
assert!(json["routinator_timings"]["delta_replay_seconds"]
.as_f64()
.unwrap_or(0.0)
> 0.0);
assert!(
json["validation_times"]["snapshot"]
.as_str()
.unwrap_or("")
.contains("T")
);
assert!(
json["validation_times"]["delta"]
.as_str()
.unwrap_or("")
.contains("T")
);
assert!(
json["routinator_timings"]["base_replay_seconds"]
.as_f64()
.unwrap_or(0.0)
> 0.0
);
assert!(
json["routinator_timings"]["delta_replay_seconds"]
.as_f64()
.unwrap_or(0.0)
> 0.0
);
}
}
@ -113,8 +156,12 @@ fn multi_rir_case_info_prefers_lock_validation_time_over_replay_started_at() {
"verification.json",
"README.md",
] {
fs::write(rir_root.join(rel), "placeholder
").expect("write required file");
fs::write(
rir_root.join(rel),
"placeholder
",
)
.expect("write required file");
}
let repo_root = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"));
@ -163,7 +210,11 @@ stderr={}",
#[test]
fn multi_rir_wrapper_describe_mode_works_for_ripe() {
let bundle_root = multi_rir_bundle_root();
assert!(bundle_root.is_dir(), "bundle root missing: {}", bundle_root.display());
assert!(
bundle_root.is_dir(),
"bundle root missing: {}",
bundle_root.display()
);
let out = Command::new(wrapper_script())
.env("BUNDLE_ROOT", &bundle_root)
@ -183,8 +234,10 @@ fn multi_rir_wrapper_describe_mode_works_for_ripe() {
serde_json::from_slice(&out.stdout).expect("parse wrapper describe json");
assert_eq!(json["rir"].as_str(), Some("ripe"));
assert_eq!(json["trust_anchor"].as_str(), Some("ripe"));
assert!(json["verification_json"]
.as_str()
.unwrap_or("")
.ends_with("verification.json"));
assert!(
json["verification_json"]
.as_str()
.unwrap_or("")
.ends_with("verification.json")
);
}

View File

@ -403,7 +403,6 @@ fn process_snapshot_for_issuer_drop_publication_point_on_invalid_aspa_bytes() {
assert!(!out.warnings.is_empty());
}
#[test]
fn process_snapshot_for_issuer_populates_local_outputs_cache_from_real_cernet_fixture() {
let (dir, rsync_base_uri, manifest_file) = cernet_fixture();
@ -432,16 +431,21 @@ fn process_snapshot_for_issuer_populates_local_outputs_cache_from_real_cernet_fi
None,
);
assert!(!out.local_outputs_cache.is_empty(), "expected cached VCIR local outputs");
assert!(
!out.local_outputs_cache.is_empty(),
"expected cached VCIR local outputs"
);
assert_eq!(out.local_outputs_cache.len(), out.vrps.len());
assert!(out
.local_outputs_cache
.iter()
.all(|entry| entry.output_type == VcirOutputType::Vrp));
assert!(out
.local_outputs_cache
.iter()
.all(|entry| entry.source_object_type == "roa"));
assert!(
out.local_outputs_cache
.iter()
.all(|entry| entry.output_type == VcirOutputType::Vrp)
);
assert!(
out.local_outputs_cache
.iter()
.all(|entry| entry.source_object_type == "roa")
);
}
// NOTE: DN-based issuer resolution and pack-local CA indexing have been removed for determinism.

View File

@ -206,8 +206,14 @@ fn write_multi_rir_case_report_combines_compare_and_timing() {
let md = std::fs::read_to_string(&out_md).expect("read markdown");
assert!(md.contains("AFRINIC Replay Report"), "{md}");
assert!(md.contains("| snapshot | true | 12.000 | 6.000 | 2.000 | 0 | 0 |"), "{md}");
assert!(md.contains("| delta | true | 8.000 | 4.000 | 2.000 | 0 | 0 |"), "{md}");
assert!(
md.contains("| snapshot | true | 12.000 | 6.000 | 2.000 | 0 | 0 |"),
"{md}"
);
assert!(
md.contains("| delta | true | 8.000 | 4.000 | 2.000 | 0 | 0 |"),
"{md}"
);
}
#[test]
@ -282,7 +288,10 @@ fn write_multi_rir_summary_aggregates_case_reports() {
let md = std::fs::read_to_string(&out_md).expect("read summary md");
assert!(md.contains("Multi-RIR Replay Summary"), "{md}");
assert!(md.contains("| afrinic | true | 10.000 | 5.000 | 2.000 | true | 12.000 | 6.000 | 3.000 |"), "{md}");
assert!(
md.contains("| afrinic | true | 10.000 | 5.000 | 2.000 | true | 12.000 | 6.000 | 3.000 |"),
"{md}"
);
}
#[test]

View File

@ -4,7 +4,7 @@ use std::sync::{Arc, Mutex};
use rpki::fetch::rsync::{LocalDirRsyncFetcher, RsyncFetchError, RsyncFetcher};
use rpki::policy::{Policy, SyncPreference};
use rpki::storage::RocksStore;
use rpki::sync::repo::{RepoSyncSource, sync_publication_point};
use rpki::sync::repo::{RepoSyncPhase, RepoSyncSource, sync_publication_point};
use rpki::sync::rrdp::Fetcher;
struct MapFetcher {
@ -98,6 +98,7 @@ fn repo_sync_uses_rrdp_when_available() {
.expect("sync");
assert_eq!(out.source, RepoSyncSource::Rrdp);
assert_eq!(out.phase, RepoSyncPhase::RrdpOk);
assert_eq!(out.objects_written, 2);
assert_eq!(*calls.lock().unwrap(), 0);
@ -145,6 +146,7 @@ fn repo_sync_skips_snapshot_when_state_unchanged() {
)
.expect("sync 1");
assert_eq!(out1.source, RepoSyncSource::Rrdp);
assert_eq!(out1.phase, RepoSyncPhase::RrdpOk);
assert_eq!(out1.objects_written, 2);
let out2 = sync_publication_point(
@ -159,6 +161,7 @@ fn repo_sync_skips_snapshot_when_state_unchanged() {
)
.expect("sync 2");
assert_eq!(out2.source, RepoSyncSource::Rrdp);
assert_eq!(out2.phase, RepoSyncPhase::RrdpOk);
assert_eq!(
out2.objects_written, 0,
"expected to skip snapshot apply when state unchanged"
@ -208,6 +211,7 @@ fn repo_sync_falls_back_to_rsync_on_rrdp_failure() {
.expect("fallback sync");
assert_eq!(out.source, RepoSyncSource::Rsync);
assert_eq!(out.phase, RepoSyncPhase::RrdpFailedRsyncOk);
assert_eq!(out.objects_written, 1);
assert_eq!(*calls.lock().unwrap(), 1);
assert!(!out.warnings.is_empty());
@ -252,6 +256,7 @@ fn repo_sync_rsync_populates_current_repository_view() {
.expect("rsync-only sync");
assert_eq!(out.source, RepoSyncSource::Rsync);
assert_eq!(out.phase, RepoSyncPhase::RsyncOnlyOk);
assert_eq!(out.objects_written, 2);
assert_current_object(&store, "rsync://example.net/repo/a/one.cer", b"1");

View File

@ -1,9 +1,9 @@
use std::process::Command;
use rpki::data_model::rc::{ResourceCertificate, ResourceCertKind};
use rpki::data_model::rc::{ResourceCertKind, ResourceCertificate};
use rpki::data_model::router_cert::{
BgpsecRouterCertificate, BgpsecRouterCertificateDecodeError,
BgpsecRouterCertificatePathError, BgpsecRouterCertificateProfileError,
BgpsecRouterCertificate, BgpsecRouterCertificateDecodeError, BgpsecRouterCertificatePathError,
BgpsecRouterCertificateProfileError,
};
fn openssl_available() -> bool {
@ -104,7 +104,11 @@ sbgp-autonomousSysNum = critical, AS:64496
authorityKeyIdentifier = keyid:always
"#,
dir = dir.display(),
eku_line = if include_eku { "extendedKeyUsage = 1.3.6.1.5.5.7.3.30" } else { "" },
eku_line = if include_eku {
"extendedKeyUsage = 1.3.6.1.5.5.7.3.30"
} else {
""
},
extra_ext = extra_ext
);
std::fs::write(dir.join("openssl.cnf"), cnf.as_bytes()).expect("write cnf");
@ -266,7 +270,16 @@ fn decode_bgpsec_router_certificate_fixture_smoke() {
fn router_certificate_profile_rejects_missing_eku() {
let g = generate_router_cert_with_variant("ec-p256", false, "");
let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err();
assert!(matches!(err, BgpsecRouterCertificateDecodeError::Validate(BgpsecRouterCertificateProfileError::MissingBgpsecRouterEku | BgpsecRouterCertificateProfileError::MissingExtendedKeyUsage)), "{err}");
assert!(
matches!(
err,
BgpsecRouterCertificateDecodeError::Validate(
BgpsecRouterCertificateProfileError::MissingBgpsecRouterEku
| BgpsecRouterCertificateProfileError::MissingExtendedKeyUsage
)
),
"{err}"
);
}
#[test]
@ -277,7 +290,15 @@ fn router_certificate_profile_rejects_sia_and_ip_resources_and_ranges() {
"subjectInfoAccess = caRepository;URI:rsync://example.test/repo/router/\n",
);
let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err();
assert!(matches!(err, BgpsecRouterCertificateDecodeError::Validate(BgpsecRouterCertificateProfileError::SubjectInfoAccessPresent)), "{err}");
assert!(
matches!(
err,
BgpsecRouterCertificateDecodeError::Validate(
BgpsecRouterCertificateProfileError::SubjectInfoAccessPresent
)
),
"{err}"
);
let g = generate_router_cert_with_variant(
"ec-p256",
@ -285,7 +306,15 @@ fn router_certificate_profile_rejects_sia_and_ip_resources_and_ranges() {
"sbgp-ipAddrBlock = critical, IPv4:10.0.0.0/8\n",
);
let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err();
assert!(matches!(err, BgpsecRouterCertificateDecodeError::Validate(BgpsecRouterCertificateProfileError::IpResourcesPresent)), "{err}");
assert!(
matches!(
err,
BgpsecRouterCertificateDecodeError::Validate(
BgpsecRouterCertificateProfileError::IpResourcesPresent
)
),
"{err}"
);
let g = generate_router_cert_with_variant(
"ec-p256",
@ -293,18 +322,43 @@ fn router_certificate_profile_rejects_sia_and_ip_resources_and_ranges() {
"sbgp-autonomousSysNum = critical, AS:64496-64500\n",
);
let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err();
assert!(matches!(err, BgpsecRouterCertificateDecodeError::Validate(BgpsecRouterCertificateProfileError::AsResourcesRangeNotAllowed)), "{err}");
assert!(
matches!(
err,
BgpsecRouterCertificateDecodeError::Validate(
BgpsecRouterCertificateProfileError::AsResourcesRangeNotAllowed
)
),
"{err}"
);
}
#[test]
fn router_certificate_profile_rejects_wrong_spki_algorithm_or_curve() {
let g = generate_router_cert_with_variant("rsa", true, "");
let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err();
assert!(matches!(err, BgpsecRouterCertificateDecodeError::Validate(BgpsecRouterCertificateProfileError::SpkiAlgorithmNotEcPublicKey)), "{err}");
assert!(
matches!(
err,
BgpsecRouterCertificateDecodeError::Validate(
BgpsecRouterCertificateProfileError::SpkiAlgorithmNotEcPublicKey
)
),
"{err}"
);
let g = generate_router_cert_with_variant("ec-p384", true, "");
let err = BgpsecRouterCertificate::decode_der(&g.router_der).unwrap_err();
assert!(matches!(err, BgpsecRouterCertificateDecodeError::Validate(BgpsecRouterCertificateProfileError::SpkiWrongCurve | BgpsecRouterCertificateProfileError::SpkiEcPointNotUncompressedP256)), "{err}");
assert!(
matches!(
err,
BgpsecRouterCertificateDecodeError::Validate(
BgpsecRouterCertificateProfileError::SpkiWrongCurve
| BgpsecRouterCertificateProfileError::SpkiEcPointNotUncompressedP256
)
),
"{err}"
);
}
#[test]
@ -317,11 +371,15 @@ fn router_certificate_path_validation_accepts_valid_and_rejects_wrong_issuer() {
let g = generate_router_cert_with_variant("ec-p256", true, "");
let issuer = ResourceCertificate::decode_der(&g.issuer_ca_der).expect("decode issuer");
let wrong_issuer = ResourceCertificate::decode_der(&g.wrong_issuer_der).expect("decode wrong issuer");
let wrong_issuer =
ResourceCertificate::decode_der(&g.wrong_issuer_der).expect("decode wrong issuer");
let issuer_crl = RpkixCrl::decode_der(&g.issuer_crl_der).expect("decode crl");
let (rem, issuer_spki) = SubjectPublicKeyInfo::from_der(&issuer.tbs.subject_public_key_info).expect("issuer spki");
let (rem, issuer_spki) =
SubjectPublicKeyInfo::from_der(&issuer.tbs.subject_public_key_info).expect("issuer spki");
assert!(rem.is_empty());
let (rem, wrong_spki) = SubjectPublicKeyInfo::from_der(&wrong_issuer.tbs.subject_public_key_info).expect("wrong issuer spki");
let (rem, wrong_spki) =
SubjectPublicKeyInfo::from_der(&wrong_issuer.tbs.subject_public_key_info)
.expect("wrong issuer spki");
assert!(rem.is_empty());
let now = time::OffsetDateTime::now_utc();
@ -334,7 +392,8 @@ fn router_certificate_path_validation_accepts_valid_and_rejects_wrong_issuer() {
Some("rsync://example.test/repo/issuer/issuer.cer"),
Some("rsync://example.test/repo/issuer/issuer.crl"),
now,
).expect("router path valid");
)
.expect("router path valid");
assert_eq!(cert.asns, vec![64496]);
let err = BgpsecRouterCertificate::validate_path_with_prevalidated_issuer(
@ -346,8 +405,12 @@ fn router_certificate_path_validation_accepts_valid_and_rejects_wrong_issuer() {
None,
None,
now,
).unwrap_err();
assert!(matches!(err, BgpsecRouterCertificatePathError::CertPath(_)), "{err}");
)
.unwrap_err();
assert!(
matches!(err, BgpsecRouterCertificatePathError::CertPath(_)),
"{err}"
);
let rc = ResourceCertificate::decode_der(&g.router_der).expect("decode router rc");
let mut revoked = HashSet::new();
@ -361,6 +424,10 @@ fn router_certificate_path_validation_accepts_valid_and_rejects_wrong_issuer() {
Some("rsync://example.test/repo/issuer/issuer.cer"),
Some("rsync://example.test/repo/issuer/issuer.crl"),
now,
).unwrap_err();
assert!(matches!(err, BgpsecRouterCertificatePathError::CertPath(_)), "{err}");
)
.unwrap_err();
assert!(
matches!(err, BgpsecRouterCertificatePathError::CertPath(_)),
"{err}"
);
}

View File

@ -94,12 +94,7 @@ fn rsync_fallback_breakdown_luys_cloud() {
let sha256_hex = hex::encode(sha2::Sha256::digest(bytes));
let mut raw = RawByHashEntry::from_bytes(sha256_hex.clone(), bytes.clone());
raw.origin_uris.push(uri.clone());
raw.object_type = Some(
uri.rsplit('.')
.next()
.unwrap_or("bin")
.to_ascii_lowercase(),
);
raw.object_type = Some(uri.rsplit('.').next().unwrap_or("bin").to_ascii_lowercase());
raw.encoding = Some("der".to_string());
store.put_raw_by_hash_entry(&raw).expect("put raw_by_hash");
store
@ -107,12 +102,7 @@ fn rsync_fallback_breakdown_luys_cloud() {
rsync_uri: uri.clone(),
current_hash: Some(sha256_hex),
repository_source: Some(rsync_base_uri.clone()),
object_type: Some(
uri.rsplit('.')
.next()
.unwrap_or("bin")
.to_ascii_lowercase(),
),
object_type: Some(uri.rsplit('.').next().unwrap_or("bin").to_ascii_lowercase()),
state: RepositoryViewState::Present,
})
.expect("put repository view");

View File

@ -17,7 +17,11 @@ fn signed_object_decode_accepts_real_arin_ber_indefinite_roa_from_multi_rir_bund
let xml_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"../../rpki/target/live/20260316-112341-multi-final3/arin/base-payload-archive/v1/captures/arin-base-arin/rrdp/repos/8bd1405242614aed9f13321d266fe3193db0ca842e0cbffda2b3df05481c331b/4a394319-7460-4141-a416-1addb69284ff/snapshot-160090-880434ae2a6f0e5fff224391c65a22ab037e09ac1d3ebad0ceda18558b49b13e.xml",
);
assert!(xml_path.is_file(), "xml path missing: {}", xml_path.display());
assert!(
xml_path.is_file(),
"xml path missing: {}",
xml_path.display()
);
let xml = std::fs::read_to_string(&xml_path).expect("read snapshot xml");
let uri = "rsync://rpki.arin.net/repository/arin-rpki-ta/5e4a23ea-e80a-403e-b08c-2171da2157d3/2a246947-2d62-4a6c-ba05-87187f0099b2/9ed5ce80-224e-46ab-94f1-1afce8ccf13f/0b13beb5-6bbb-3994-a254-02c5b10175c5.roa";
let der = extract_publish_bytes(&xml, uri);
@ -25,8 +29,12 @@ fn signed_object_decode_accepts_real_arin_ber_indefinite_roa_from_multi_rir_bund
assert_eq!(der.first().copied(), Some(0x30));
assert_eq!(der.get(1).copied(), Some(0x80));
let signed_object = RpkiSignedObject::decode_der(&der).expect("decode BER-indefinite CMS signed object");
assert_eq!(signed_object.signed_data.encap_content_info.econtent_type, "1.2.840.113549.1.9.16.1.24");
let signed_object =
RpkiSignedObject::decode_der(&der).expect("decode BER-indefinite CMS signed object");
assert_eq!(
signed_object.signed_data.encap_content_info.econtent_type,
"1.2.840.113549.1.9.16.1.24"
);
let roa = RoaObject::decode_der(&der).expect("decode ROA object from BER-indefinite CMS");
assert!(!roa.roa.ip_addr_blocks.is_empty());

View File

@ -79,7 +79,9 @@ fn rrdp_source_roundtrip_by_notification_uri() {
last_snapshot_hash: None,
last_error: None,
};
store.put_rrdp_source_record(&record).expect("put rrdp_source");
store
.put_rrdp_source_record(&record)
.expect("put rrdp_source");
let got = store
.get_rrdp_source_record(notif)

View File

@ -395,7 +395,9 @@ fn tree_aggregates_router_keys_from_publication_point_results() {
source_object_uri: "rsync://example.test/repo/router1.cer".to_string(),
source_object_hash: "11".repeat(32),
source_ee_cert_hash: "11".repeat(32),
item_effective_until: PackTime { rfc3339_utc: "2026-12-31T00:00:00Z".to_string() },
item_effective_until: PackTime {
rfc3339_utc: "2026-12-31T00:00:00Z".to_string(),
},
},
RouterKeyPayload {
as_id: 64497,
@ -404,7 +406,9 @@ fn tree_aggregates_router_keys_from_publication_point_results() {
source_object_uri: "rsync://example.test/repo/router2.cer".to_string(),
source_object_hash: "22".repeat(32),
source_ee_cert_hash: "22".repeat(32),
item_effective_until: PackTime { rfc3339_utc: "2026-12-31T00:00:00Z".to_string() },
item_effective_until: PackTime {
rfc3339_utc: "2026-12-31T00:00:00Z".to_string(),
},
},
],
local_outputs_cache: Vec::new(),