20260418 优化去掉冗余存储repo object,mix quick最快170s

This commit is contained in:
yuyr 2026-04-18 14:10:47 +08:00
parent f485786470
commit 417c82bef6
34 changed files with 1482 additions and 328 deletions

View File

@ -28,7 +28,7 @@ serde_cbor = "0.11.2"
roxmltree = "0.20.0" roxmltree = "0.20.0"
quick-xml = "0.37.2" quick-xml = "0.37.2"
uuid = { version = "1.7.0", features = ["v4"] } uuid = { version = "1.7.0", features = ["v4"] }
reqwest = { version = "0.12.12", default-features = false, features = ["blocking", "rustls-tls"] } reqwest = { version = "0.12.12", default-features = false, features = ["blocking", "rustls-tls", "gzip", "brotli", "deflate"] }
pprof = { version = "0.14.1", optional = true, features = ["flamegraph", "prost-codec"] } pprof = { version = "0.14.1", optional = true, features = ["flamegraph", "prost-codec"] }
flate2 = { version = "1.0.35", optional = true } flate2 = { version = "1.0.35", optional = true }
tempfile = "3.16.0" tempfile = "3.16.0"

View File

@ -0,0 +1,263 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/compare/run_perf_compare_quick_remote.sh \
--run-root <path> \
--remote-root <path> \
[--ssh-target <user@host>] \
[--rpki-client-bin <path>] \
[--libtls-path <path>] \
[--dry-run]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
RUN_ROOT=""
REMOTE_ROOT=""
SSH_TARGET="${SSH_TARGET:-root@47.251.56.108}"
RPKI_CLIENT_BIN="${RPKI_CLIENT_BIN:-/home/yuyr/dev/rpki-client-9.7/build-m5/src/rpki-client}"
LIBTLS_PATH="${LIBTLS_PATH:-/home/yuyr/dev/rpki-client-9.7/.deps/libtls/root/usr/lib/x86_64-linux-gnu/libtls.so.28.0.0}"
DRY_RUN=0
while [[ $# -gt 0 ]]; do
case "$1" in
--run-root) RUN_ROOT="$2"; shift 2 ;;
--remote-root) REMOTE_ROOT="$2"; shift 2 ;;
--ssh-target) SSH_TARGET="$2"; shift 2 ;;
--rpki-client-bin) RPKI_CLIENT_BIN="$2"; shift 2 ;;
--libtls-path) LIBTLS_PATH="$2"; shift 2 ;;
--dry-run) DRY_RUN=1; shift ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$RUN_ROOT" && -n "$REMOTE_ROOT" ]] || { usage >&2; exit 2; }
[[ "$DRY_RUN" -eq 1 || -x "$RPKI_CLIENT_BIN" ]] || { echo "rpki-client binary not executable: $RPKI_CLIENT_BIN" >&2; exit 2; }
[[ "$DRY_RUN" -eq 1 || -f "$LIBTLS_PATH" ]] || { echo "libtls not found: $LIBTLS_PATH" >&2; exit 2; }
RUN_ROOT="$(python3 - <<'PY' "$RUN_ROOT"
from pathlib import Path
import sys
print(Path(sys.argv[1]).resolve())
PY
)"
mkdir -p "$RUN_ROOT/steps/step-001/ours" "$RUN_ROOT/steps/step-001/rpki-client" "$RUN_ROOT/steps/step-001/compare"
mkdir -p "$RUN_ROOT/steps/step-002/ours" "$RUN_ROOT/steps/step-002/rpki-client" "$RUN_ROOT/steps/step-002/compare"
APNIC_TAL="$ROOT_DIR/tests/fixtures/tal/apnic-rfc7730-https.tal"
APNIC_TA="$ROOT_DIR/tests/fixtures/ta/apnic-ta.cer"
ARIN_TAL="$ROOT_DIR/tests/fixtures/tal/arin.tal"
ARIN_TA="$ROOT_DIR/tests/fixtures/ta/arin-ta.cer"
if [[ "$DRY_RUN" -eq 1 ]]; then
cat <<EOF2
workflow_name=性能对比测试快速版
scope=APNIC+ARIN mixed release two-step synchronized compare
run_root=$RUN_ROOT
remote_root=$REMOTE_ROOT
ssh_target=$SSH_TARGET
EOF2
exit 0
fi
if [[ ! -x "$ROOT_DIR/target/release/rpki" || ! -x "$ROOT_DIR/target/release/ccr_to_compare_views" ]]; then
(
cd "$ROOT_DIR"
cargo build --release --bin rpki --bin ccr_to_compare_views
)
fi
ssh "$SSH_TARGET" "set -e; id -u _rpki-client >/dev/null 2>&1 || useradd -r -M -s /usr/sbin/nologin _rpki-client || true; rm -rf '$REMOTE_ROOT'; mkdir -p '$REMOTE_ROOT/bin' '$REMOTE_ROOT/lib' '$REMOTE_ROOT/state/ours' '$REMOTE_ROOT/state/rpki-client' '$REMOTE_ROOT/steps/step-001/ours' '$REMOTE_ROOT/steps/step-001/rpki-client' '$REMOTE_ROOT/steps/step-002/ours' '$REMOTE_ROOT/steps/step-002/rpki-client'"
scp "$ROOT_DIR/target/release/rpki" "$APNIC_TAL" "$APNIC_TA" "$ARIN_TAL" "$ARIN_TA" "$SSH_TARGET:$REMOTE_ROOT/"
scp "$RPKI_CLIENT_BIN" "$SSH_TARGET:$REMOTE_ROOT/bin/rpki-client"
scp "$LIBTLS_PATH" "$SSH_TARGET:$REMOTE_ROOT/lib/libtls.so.28"
run_step() {
local step_id="$1"
local kind="$2"
local local_step="$RUN_ROOT/steps/$step_id"
ssh "$SSH_TARGET" bash -s -- "$REMOTE_ROOT" "$step_id" "$kind" <<'EOS'
set -euo pipefail
REMOTE_ROOT="$1"
STEP_ID="$2"
KIND="$3"
cd "$REMOTE_ROOT"
mkdir -p "steps/$STEP_ID/ours" "steps/$STEP_ID/rpki-client"
if [[ "$KIND" == "snapshot" ]]; then
rm -rf state/ours/work-db state/ours/raw-store.db state/rpki-client/cache state/rpki-client/out state/rpki-client/ta state/rpki-client/.ta
fi
mkdir -p state/ours/work-db state/ours/raw-store.db state/rpki-client/cache state/rpki-client/out state/rpki-client/ta state/rpki-client/.ta
chmod 0777 state/ours/work-db state/ours/raw-store.db
chmod -R 0777 state/rpki-client
START_EPOCH="$(python3 - <<'PY'
import time
print(time.time() + 3.0)
PY
)"
(
python3 - <<'PY' "$START_EPOCH"
import sys, time
x = float(sys.argv[1])
d = x - time.time()
if d > 0:
time.sleep(d)
PY
started_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
set +e
env RPKI_PROGRESS_LOG=1 RPKI_PROGRESS_SLOW_SECS=0 ./rpki \
--db state/ours/work-db \
--raw-store-db state/ours/raw-store.db \
--tal-path apnic-rfc7730-https.tal --ta-path apnic-ta.cer \
--tal-path arin.tal --ta-path arin-ta.cer \
--parallel-phase1 \
--ccr-out "steps/$STEP_ID/ours/result.ccr" \
--report-json "steps/$STEP_ID/ours/report.json" \
> "steps/$STEP_ID/ours/run.log" 2>&1
exit_code=$?
set -e
finished_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
python3 - <<'PY' "steps/$STEP_ID/ours/round-result.json" "$STEP_ID" "$KIND" "$started_ms" "$finished_ms" "$exit_code"
import json, sys
path, step_id, kind, started_ms, finished_ms, exit_code = sys.argv[1:]
json.dump(
{
"stepId": step_id,
"kind": kind,
"durationMs": int(finished_ms) - int(started_ms),
"exitCode": int(exit_code),
},
open(path, "w"),
indent=2,
)
PY
) &
OURS_PID=$!
(
cd state/rpki-client
python3 - <<'PY' "$START_EPOCH"
import sys, time
x = float(sys.argv[1])
d = x - time.time()
if d > 0:
time.sleep(d)
PY
started_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
set +e
LD_LIBRARY_PATH="$REMOTE_ROOT/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}" "$REMOTE_ROOT/bin/rpki-client" \
-vv \
-t ../../apnic-rfc7730-https.tal \
-t ../../arin.tal \
-d cache out \
> "$REMOTE_ROOT/steps/$STEP_ID/rpki-client/run.log" 2>&1
exit_code=$?
set -e
cp out/rpki.ccr "$REMOTE_ROOT/steps/$STEP_ID/rpki-client/result.ccr" 2>/dev/null || true
cp out/openbgpd "$REMOTE_ROOT/steps/$STEP_ID/rpki-client/openbgpd" 2>/dev/null || true
finished_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
python3 - <<'PY' "$REMOTE_ROOT/steps/$STEP_ID/rpki-client/round-result.json" "$STEP_ID" "$KIND" "$started_ms" "$finished_ms" "$exit_code"
import json, sys
path, step_id, kind, started_ms, finished_ms, exit_code = sys.argv[1:]
json.dump(
{
"stepId": step_id,
"kind": kind,
"durationMs": int(finished_ms) - int(started_ms),
"exitCode": int(exit_code),
},
open(path, "w"),
indent=2,
)
PY
) &
CLIENT_PID=$!
wait "$OURS_PID"
wait "$CLIENT_PID"
EOS
for rel in result.ccr round-result.json run.log stage-timing.json; do
scp -C "$SSH_TARGET:$REMOTE_ROOT/steps/$step_id/ours/$rel" "$local_step/ours/"
done
for rel in result.ccr round-result.json run.log openbgpd; do
scp -C "$SSH_TARGET:$REMOTE_ROOT/steps/$step_id/rpki-client/$rel" "$local_step/rpki-client/" || true
done
"$ROOT_DIR/scripts/periodic/compare_ccr_round.sh" \
--ours-ccr "$local_step/ours/result.ccr" \
--rpki-client-ccr "$local_step/rpki-client/result.ccr" \
--out-dir "$local_step/compare" \
--trust-anchor unknown >/dev/null
python3 - <<'PY' "$local_step/ours/round-result.json" "$local_step/rpki-client/round-result.json" "$local_step/ours/stage-timing.json" "$local_step/compare/compare-summary.json" "$local_step/step-summary.json"
import json, sys
ours = json.load(open(sys.argv[1]))
client = json.load(open(sys.argv[2]))
stage = json.load(open(sys.argv[3]))
compare = json.load(open(sys.argv[4]))
json.dump(
{
"stepId": ours["stepId"],
"kind": ours["kind"],
"oursDurationMs": ours["durationMs"],
"rpkiClientDurationMs": client["durationMs"],
"oursExitCode": ours["exitCode"],
"rpkiClientExitCode": client["exitCode"],
"oursTotalMs": stage["total_ms"],
"oursRepoSyncMsTotal": stage["repo_sync_ms_total"],
"oursVrps": compare["vrps"]["ours"],
"rpkiClientVrps": compare["vrps"]["rpkiClient"],
"oursVaps": compare["vaps"]["ours"],
"rpkiClientVaps": compare["vaps"]["rpkiClient"],
"vrpMatch": compare["vrps"]["match"],
"vapMatch": compare["vaps"]["match"],
"allMatch": compare["allMatch"],
"onlyInOurs": len(compare["vrps"]["onlyInOurs"]),
"onlyInRpkiClient": len(compare["vrps"]["onlyInRpkiClient"]),
},
open(sys.argv[5], "w"),
indent=2,
)
PY
}
run_step step-001 snapshot
run_step step-002 delta
python3 - <<'PY' "$RUN_ROOT/steps/step-001/step-summary.json" "$RUN_ROOT/steps/step-002/step-summary.json" "$RUN_ROOT/summary.json"
import json, sys
steps = [json.load(open(p)) for p in sys.argv[1:3]]
summary = {
"workflowName": "性能对比测试快速版",
"scope": "APNIC+ARIN mixed release two-step synchronized compare",
"steps": steps,
}
json.dump(summary, open(sys.argv[3], "w"), indent=2, ensure_ascii=False)
print(json.dumps(summary, indent=2, ensure_ascii=False))
PY

View File

@ -21,6 +21,7 @@ RUN_ROOT=""
SSH_TARGET="${SSH_TARGET:-root@47.77.183.68}" SSH_TARGET="${SSH_TARGET:-root@47.77.183.68}"
REMOTE_ROOT="" REMOTE_ROOT=""
RPKI_CLIENT_BIN="${RPKI_CLIENT_BIN:-/home/yuyr/dev/rpki-client-9.7/build-m5/src/rpki-client}" RPKI_CLIENT_BIN="${RPKI_CLIENT_BIN:-/home/yuyr/dev/rpki-client-9.7/build-m5/src/rpki-client}"
RPKI_CLIENT_LIBTLS_PATH="${RPKI_CLIENT_LIBTLS_PATH:-/home/yuyr/dev/rpki-client-9.7/.deps/libtls/root/usr/lib/x86_64-linux-gnu/libtls.so.28}"
ROUND_COUNT=10 ROUND_COUNT=10
INTERVAL_SECS=600 INTERVAL_SECS=600
START_AT="" START_AT=""
@ -32,6 +33,7 @@ while [[ $# -gt 0 ]]; do
--ssh-target) SSH_TARGET="$2"; shift 2 ;; --ssh-target) SSH_TARGET="$2"; shift 2 ;;
--remote-root) REMOTE_ROOT="$2"; shift 2 ;; --remote-root) REMOTE_ROOT="$2"; shift 2 ;;
--rpki-client-bin) RPKI_CLIENT_BIN="$2"; shift 2 ;; --rpki-client-bin) RPKI_CLIENT_BIN="$2"; shift 2 ;;
--rpki-client-libtls) RPKI_CLIENT_LIBTLS_PATH="$2"; shift 2 ;;
--round-count) ROUND_COUNT="$2"; shift 2 ;; --round-count) ROUND_COUNT="$2"; shift 2 ;;
--interval-secs) INTERVAL_SECS="$2"; shift 2 ;; --interval-secs) INTERVAL_SECS="$2"; shift 2 ;;
--start-at) START_AT="$2"; shift 2 ;; --start-at) START_AT="$2"; shift 2 ;;
@ -47,6 +49,7 @@ done
if [[ "$DRY_RUN" -ne 1 ]]; then if [[ "$DRY_RUN" -ne 1 ]]; then
[[ -n "$REMOTE_ROOT" ]] || { echo "--remote-root is required unless --dry-run" >&2; exit 2; } [[ -n "$REMOTE_ROOT" ]] || { echo "--remote-root is required unless --dry-run" >&2; exit 2; }
[[ -x "$RPKI_CLIENT_BIN" ]] || { echo "rpki-client binary not executable: $RPKI_CLIENT_BIN" >&2; exit 2; } [[ -x "$RPKI_CLIENT_BIN" ]] || { echo "rpki-client binary not executable: $RPKI_CLIENT_BIN" >&2; exit 2; }
[[ -f "$RPKI_CLIENT_LIBTLS_PATH" ]] || { echo "rpki-client libtls not found: $RPKI_CLIENT_LIBTLS_PATH" >&2; exit 2; }
fi fi
mkdir -p "$RUN_ROOT" mkdir -p "$RUN_ROOT"
@ -143,9 +146,10 @@ rsync -a --delete \
--exclude target \ --exclude target \
--exclude .git \ --exclude .git \
"$ROOT_DIR/" "$SSH_TARGET:$REMOTE_ROOT/repo/" "$ROOT_DIR/" "$SSH_TARGET:$REMOTE_ROOT/repo/"
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_ROOT/repo/target/release' '$REMOTE_ROOT/bin' '$REMOTE_ROOT/rounds' '$REMOTE_ROOT/state/ours' '$REMOTE_ROOT/state/rpki-client'" ssh "$SSH_TARGET" "mkdir -p '$REMOTE_ROOT/repo/target/release' '$REMOTE_ROOT/bin' '$REMOTE_ROOT/lib' '$REMOTE_ROOT/rounds' '$REMOTE_ROOT/state/ours' '$REMOTE_ROOT/state/rpki-client'"
rsync -a "$ROOT_DIR/target/release/rpki" "$SSH_TARGET:$REMOTE_ROOT/repo/target/release/" rsync -a "$ROOT_DIR/target/release/rpki" "$SSH_TARGET:$REMOTE_ROOT/repo/target/release/"
rsync -a "$RPKI_CLIENT_BIN" "$SSH_TARGET:$REMOTE_ROOT/bin/rpki-client" rsync -a "$RPKI_CLIENT_BIN" "$SSH_TARGET:$REMOTE_ROOT/bin/rpki-client"
rsync -aL "$RPKI_CLIENT_LIBTLS_PATH" "$SSH_TARGET:$REMOTE_ROOT/lib/libtls.so.28"
for idx in $(seq 1 "$ROUND_COUNT"); do for idx in $(seq 1 "$ROUND_COUNT"); do
ROUND_ID="$(printf 'round-%03d' "$idx")" ROUND_ID="$(printf 'round-%03d' "$idx")"

View File

@ -87,8 +87,8 @@ mkdir -p "$REMOTE_OUT"
if [[ "$KIND" == "snapshot" ]]; then if [[ "$KIND" == "snapshot" ]]; then
rm -rf "$REMOTE_CACHE" "$REMOTE_STATE_OUT" "$REMOTE_STATE_ROOT/ta" "$REMOTE_STATE_ROOT/.ta" rm -rf "$REMOTE_CACHE" "$REMOTE_STATE_OUT" "$REMOTE_STATE_ROOT/ta" "$REMOTE_STATE_ROOT/.ta"
fi fi
mkdir -p "$REMOTE_CACHE" "$REMOTE_STATE_OUT" mkdir -p "$REMOTE_CACHE" "$REMOTE_STATE_OUT" "$REMOTE_STATE_ROOT/ta" "$REMOTE_STATE_ROOT/.ta"
chmod 0777 "$REMOTE_STATE_ROOT" "$REMOTE_CACHE" "$REMOTE_STATE_OUT" chmod -R 0777 "$REMOTE_STATE_ROOT"
started_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)" started_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
started_at_ms="$(python3 - <<'PY' started_at_ms="$(python3 - <<'PY'
@ -104,7 +104,8 @@ meta_out="$REMOTE_OUT/round-result.json"
set +e set +e
( (
cd "$REMOTE_STATE_ROOT" cd "$REMOTE_STATE_ROOT"
"$REMOTE_BIN" -vv -t "../../repo/tests/fixtures/tal/apnic-rfc7730-https.tal" -d "cache" "out" LD_LIBRARY_PATH="$REMOTE_ROOT/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}" \
"$REMOTE_BIN" -vv -t "../../repo/tests/fixtures/tal/apnic-rfc7730-https.tal" -d "cache" "out"
) >"$run_log" 2>&1 ) >"$run_log" 2>&1
exit_code=$? exit_code=$?
set -e set -e

View File

@ -26,6 +26,7 @@ RUN_ROOT=""
SSH_TARGET="${SSH_TARGET:-root@47.77.183.68}" SSH_TARGET="${SSH_TARGET:-root@47.77.183.68}"
REMOTE_ROOT="" REMOTE_ROOT=""
RPKI_CLIENT_BIN="${RPKI_CLIENT_BIN:-/home/yuyr/dev/rpki-client-9.7/build-m5/src/rpki-client}" RPKI_CLIENT_BIN="${RPKI_CLIENT_BIN:-/home/yuyr/dev/rpki-client-9.7/build-m5/src/rpki-client}"
RPKI_CLIENT_LIBTLS_PATH="${RPKI_CLIENT_LIBTLS_PATH:-/home/yuyr/dev/rpki-client-9.7/.deps/libtls/root/usr/lib/x86_64-linux-gnu/libtls.so.28}"
ROUND_COUNT=10 ROUND_COUNT=10
INTERVAL_SECS=600 INTERVAL_SECS=600
START_AT="" START_AT=""
@ -37,6 +38,7 @@ while [[ $# -gt 0 ]]; do
--ssh-target) SSH_TARGET="$2"; shift 2 ;; --ssh-target) SSH_TARGET="$2"; shift 2 ;;
--remote-root) REMOTE_ROOT="$2"; shift 2 ;; --remote-root) REMOTE_ROOT="$2"; shift 2 ;;
--rpki-client-bin) RPKI_CLIENT_BIN="$2"; shift 2 ;; --rpki-client-bin) RPKI_CLIENT_BIN="$2"; shift 2 ;;
--rpki-client-libtls) RPKI_CLIENT_LIBTLS_PATH="$2"; shift 2 ;;
--round-count) ROUND_COUNT="$2"; shift 2 ;; --round-count) ROUND_COUNT="$2"; shift 2 ;;
--interval-secs) INTERVAL_SECS="$2"; shift 2 ;; --interval-secs) INTERVAL_SECS="$2"; shift 2 ;;
--start-at) START_AT="$2"; shift 2 ;; --start-at) START_AT="$2"; shift 2 ;;
@ -52,6 +54,7 @@ done
if [[ "$DRY_RUN" -ne 1 ]]; then if [[ "$DRY_RUN" -ne 1 ]]; then
[[ -n "$REMOTE_ROOT" ]] || { echo "--remote-root is required unless --dry-run" >&2; exit 2; } [[ -n "$REMOTE_ROOT" ]] || { echo "--remote-root is required unless --dry-run" >&2; exit 2; }
[[ -x "$RPKI_CLIENT_BIN" ]] || { echo "rpki-client binary not executable: $RPKI_CLIENT_BIN" >&2; exit 2; } [[ -x "$RPKI_CLIENT_BIN" ]] || { echo "rpki-client binary not executable: $RPKI_CLIENT_BIN" >&2; exit 2; }
[[ -f "$RPKI_CLIENT_LIBTLS_PATH" ]] || { echo "rpki-client libtls not found: $RPKI_CLIENT_LIBTLS_PATH" >&2; exit 2; }
fi fi
mkdir -p "$RUN_ROOT" mkdir -p "$RUN_ROOT"
@ -180,9 +183,10 @@ rsync -a --delete \
--exclude target \ --exclude target \
--exclude .git \ --exclude .git \
"$ROOT_DIR/" "$SSH_TARGET:$REMOTE_ROOT/repo/" "$ROOT_DIR/" "$SSH_TARGET:$REMOTE_ROOT/repo/"
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_ROOT/repo/target/release' '$REMOTE_ROOT/bin' '$REMOTE_ROOT/rounds' '$REMOTE_ROOT/state/ours' '$REMOTE_ROOT/state/rpki-client'" ssh "$SSH_TARGET" "mkdir -p '$REMOTE_ROOT/repo/target/release' '$REMOTE_ROOT/bin' '$REMOTE_ROOT/lib' '$REMOTE_ROOT/rounds' '$REMOTE_ROOT/state/ours' '$REMOTE_ROOT/state/rpki-client'"
rsync -a "$ROOT_DIR/target/release/rpki" "$SSH_TARGET:$REMOTE_ROOT/repo/target/release/" rsync -a "$ROOT_DIR/target/release/rpki" "$SSH_TARGET:$REMOTE_ROOT/repo/target/release/"
rsync -a "$RPKI_CLIENT_BIN" "$SSH_TARGET:$REMOTE_ROOT/bin/rpki-client" rsync -a "$RPKI_CLIENT_BIN" "$SSH_TARGET:$REMOTE_ROOT/bin/rpki-client"
rsync -aL "$RPKI_CLIENT_LIBTLS_PATH" "$SSH_TARGET:$REMOTE_ROOT/lib/libtls.so.28"
for idx in $(seq 1 "$ROUND_COUNT"); do for idx in $(seq 1 "$ROUND_COUNT"); do
ROUND_ID="$(printf 'round-%03d' "$idx")" ROUND_ID="$(printf 'round-%03d' "$idx")"

View File

@ -88,8 +88,8 @@ mkdir -p "$REMOTE_OUT"
if [[ "$KIND" == "snapshot" ]]; then if [[ "$KIND" == "snapshot" ]]; then
rm -rf "$REMOTE_CACHE" "$REMOTE_STATE_OUT" "$REMOTE_STATE_ROOT/ta" "$REMOTE_STATE_ROOT/.ta" rm -rf "$REMOTE_CACHE" "$REMOTE_STATE_OUT" "$REMOTE_STATE_ROOT/ta" "$REMOTE_STATE_ROOT/.ta"
fi fi
mkdir -p "$REMOTE_CACHE" "$REMOTE_STATE_OUT" mkdir -p "$REMOTE_CACHE" "$REMOTE_STATE_OUT" "$REMOTE_STATE_ROOT/ta" "$REMOTE_STATE_ROOT/.ta"
chmod 0777 "$REMOTE_STATE_ROOT" "$REMOTE_CACHE" "$REMOTE_STATE_OUT" chmod -R 0777 "$REMOTE_STATE_ROOT"
started_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)" started_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
started_at_ms="$(python3 - <<'PY' started_at_ms="$(python3 - <<'PY'
@ -106,7 +106,8 @@ meta_out="$REMOTE_OUT/round-result.json"
set +e set +e
( (
cd "$REMOTE_STATE_ROOT" cd "$REMOTE_STATE_ROOT"
"$REMOTE_BIN" -vv -t "../../repo/tests/fixtures/tal/arin.tal" -d "cache" "out" LD_LIBRARY_PATH="$REMOTE_ROOT/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}" \
"$REMOTE_BIN" -vv -t "../../repo/tests/fixtures/tal/arin.tal" -d "cache" "out"
) >"$run_log" 2>&1 ) >"$run_log" 2>&1
exit_code=$? exit_code=$?
set -e set -e

View File

@ -134,7 +134,12 @@ pub fn trace_rule_to_root(
Ok(Some(AuditRuleTrace { Ok(Some(AuditRuleTrace {
rule, rule,
resolved_output: resolved_output_from_local(&local_output), resolved_output: resolved_output_from_local(&local_output),
source_object_raw: resolve_raw_ref(store, &local_output.source_object_hash)?, source_object_raw: resolve_raw_ref(
store,
&local_output.source_object_hash,
Some(&local_output.source_object_uri),
Some(local_output.source_object_type.as_str()),
)?,
source_ee_cert_raw: resolve_source_ee_cert_raw_ref(store, &local_output)?, source_ee_cert_raw: resolve_source_ee_cert_raw_ref(store, &local_output)?,
chain_leaf_to_root: chain, chain_leaf_to_root: chain,
})) }))
@ -184,7 +189,12 @@ fn trace_chain_node(
uri: artifact.uri.clone(), uri: artifact.uri.clone(),
object_type: artifact.object_type.clone(), object_type: artifact.object_type.clone(),
validation_status: artifact.validation_status, validation_status: artifact.validation_status,
raw: resolve_raw_ref(store, &artifact.sha256)?, raw: resolve_raw_ref(
store,
&artifact.sha256,
artifact.uri.as_deref(),
artifact.object_type.as_deref(),
)?,
}); });
} }
@ -225,9 +235,27 @@ fn resolved_output_from_local(local: &VcirLocalOutput) -> AuditTraceResolvedOutp
fn resolve_raw_ref( fn resolve_raw_ref(
store: &RocksStore, store: &RocksStore,
sha256_hex: &str, sha256_hex: &str,
fallback_uri: Option<&str>,
fallback_object_type: Option<&str>,
) -> Result<AuditTraceRawRef, AuditTraceError> { ) -> Result<AuditTraceRawRef, AuditTraceError> {
let raw = store.get_raw_by_hash_entry(sha256_hex)?; let raw = store.get_raw_by_hash_entry(sha256_hex)?;
Ok(raw_ref_from_entry(sha256_hex, raw.as_ref())) if raw.is_some() {
return Ok(raw_ref_from_entry(sha256_hex, raw.as_ref()));
}
let blob = store.get_blob_bytes(sha256_hex)?;
match blob {
Some(bytes) => Ok(AuditTraceRawRef {
sha256_hex: sha256_hex.to_string(),
raw_present: true,
origin_uris: fallback_uri
.map(|uri| vec![uri.to_string()])
.unwrap_or_default(),
object_type: fallback_object_type.map(str::to_string),
byte_len: Some(bytes.len()),
}),
None => Ok(raw_ref_from_entry(sha256_hex, None)),
}
} }
fn resolve_source_ee_cert_raw_ref( fn resolve_source_ee_cert_raw_ref(
@ -239,31 +267,27 @@ fn resolve_source_ee_cert_raw_ref(
return Ok(raw_ref_from_entry(&local.source_ee_cert_hash, raw.as_ref())); return Ok(raw_ref_from_entry(&local.source_ee_cert_hash, raw.as_ref()));
} }
let source_raw = store.get_raw_by_hash_entry(&local.source_object_hash)?; let source_bytes = store.get_blob_bytes(&local.source_object_hash)?;
let Some(source_raw) = source_raw else { let Some(source_bytes) = source_bytes else {
return Ok(raw_ref_from_entry(&local.source_ee_cert_hash, None)); return Ok(raw_ref_from_entry(&local.source_ee_cert_hash, None));
}; };
let derived = match local.source_object_type.as_str() { let derived = match local.source_object_type.as_str() {
"roa" => RoaObject::decode_der(&source_raw.bytes) "roa" => RoaObject::decode_der(&source_bytes).ok().and_then(|roa| {
.ok() roa.signed_object
.and_then(|roa| { .signed_data
roa.signed_object .certificates
.signed_data .first()
.certificates .map(|cert| cert.raw_der.to_vec())
.first() }),
.map(|cert| cert.raw_der.to_vec()) "aspa" => AspaObject::decode_der(&source_bytes).ok().and_then(|aspa| {
}), aspa.signed_object
"aspa" => AspaObject::decode_der(&source_raw.bytes) .signed_data
.ok() .certificates
.and_then(|aspa| { .first()
aspa.signed_object .map(|cert| cert.raw_der.to_vec())
.signed_data }),
.certificates "mft" => ManifestObject::decode_der(&source_bytes)
.first()
.map(|cert| cert.raw_der.to_vec())
}),
"mft" => ManifestObject::decode_der(&source_raw.bytes)
.ok() .ok()
.and_then(|manifest| { .and_then(|manifest| {
manifest manifest
@ -273,7 +297,7 @@ fn resolve_source_ee_cert_raw_ref(
.first() .first()
.map(|cert| cert.raw_der.to_vec()) .map(|cert| cert.raw_der.to_vec())
}), }),
"router_key" => Some(source_raw.bytes.clone()), "router_key" => Some(source_bytes),
_ => None, _ => None,
}; };
@ -461,6 +485,12 @@ mod tests {
.expect("put raw evidence"); .expect("put raw evidence");
} }
fn put_blob_only(store: &RocksStore, bytes: &[u8]) {
store
.put_blob_bytes_batch(&[(sha256_hex(bytes), bytes.to_vec())])
.expect("put blob bytes");
}
#[test] #[test]
fn trace_rule_to_root_returns_leaf_to_root_chain_and_evidence_refs() { fn trace_rule_to_root_returns_leaf_to_root_chain_and_evidence_refs() {
let store_dir = tempfile::tempdir().expect("store dir"); let store_dir = tempfile::tempdir().expect("store dir");
@ -621,7 +651,11 @@ mod tests {
source_object_uri: "rsync://example.test/leaf/a.roa".to_string(), source_object_uri: "rsync://example.test/leaf/a.roa".to_string(),
source_object_type: "roa".to_string(), source_object_type: "roa".to_string(),
source_object_hash: sha256_hex(&roa_bytes), source_object_hash: sha256_hex(&roa_bytes),
source_ee_cert_hash: sha256_hex(roa.signed_object.signed_data.certificates[0].raw_der.as_slice()), source_ee_cert_hash: sha256_hex(
roa.signed_object.signed_data.certificates[0]
.raw_der
.as_slice(),
),
payload_json: payload_json:
serde_json::json!({"asn": 64496, "prefix": "203.0.113.0/24", "max_length": 24}) serde_json::json!({"asn": 64496, "prefix": "203.0.113.0/24", "max_length": 24})
.to_string(), .to_string(),
@ -666,6 +700,81 @@ mod tests {
assert_eq!(trace.source_ee_cert_raw.object_type.as_deref(), Some("cer")); assert_eq!(trace.source_ee_cert_raw.object_type.as_deref(), Some("cer"));
} }
#[test]
fn trace_rule_to_root_uses_blob_only_fallback_for_source_object_raw() {
let store_dir = tempfile::tempdir().expect("store dir");
let main_db = store_dir.path().join("main-db");
let raw_db = store_dir.path().join("raw-store.db");
let store =
RocksStore::open_with_external_raw_store(&main_db, &raw_db).expect("open rocksdb");
let manifest = "rsync://example.test/leaf/leaf.mft";
let roa_path = std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
.join("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS142071.roa");
let roa_bytes = std::fs::read(&roa_path).expect("read ROA fixture");
let roa = RoaObject::decode_der(&roa_bytes).expect("decode ROA fixture");
let local = VcirLocalOutput {
output_id: sha256_hex(b"blob-only-vrp-output"),
output_type: VcirOutputType::Vrp,
item_effective_until: PackTime::from_utc_offset_datetime(
time::OffsetDateTime::now_utc() + time::Duration::minutes(30),
),
source_object_uri: "rsync://example.test/leaf/blob-only.roa".to_string(),
source_object_type: "roa".to_string(),
source_object_hash: sha256_hex(&roa_bytes),
source_ee_cert_hash: sha256_hex(
roa.signed_object.signed_data.certificates[0]
.raw_der
.as_slice(),
),
payload_json:
serde_json::json!({"asn": 64496, "prefix": "203.0.113.0/24", "max_length": 24})
.to_string(),
rule_hash: sha256_hex(b"blob-only-roa-rule"),
validation_path_hint: vec![manifest.to_string()],
};
let vcir = sample_vcir(
manifest,
None,
"test-tal",
Some(local.clone()),
sample_artifacts(manifest, &local.source_object_hash),
);
store.put_vcir(&vcir).expect("put vcir");
let rule_entry = AuditRuleIndexEntry {
kind: AuditRuleKind::Roa,
rule_hash: local.rule_hash.clone(),
manifest_rsync_uri: manifest.to_string(),
source_object_uri: local.source_object_uri.clone(),
source_object_hash: local.source_object_hash.clone(),
output_id: local.output_id.clone(),
item_effective_until: local.item_effective_until.clone(),
};
store
.put_audit_rule_index_entry(&rule_entry)
.expect("put rule index");
put_raw_evidence(&store, manifest.as_bytes(), manifest, "mft");
put_raw_evidence(
&store,
format!("{}-crl", manifest).as_bytes(),
&manifest.replace(".mft", ".crl"),
"crl",
);
put_blob_only(&store, &roa_bytes);
let trace = trace_rule_to_root(&store, AuditRuleKind::Roa, &local.rule_hash)
.expect("trace rule")
.expect("trace exists");
assert!(trace.source_object_raw.raw_present);
assert_eq!(
trace.source_object_raw.origin_uris,
vec![local.source_object_uri.clone()]
);
assert_eq!(trace.source_object_raw.object_type.as_deref(), Some("roa"));
assert_eq!(trace.source_object_raw.byte_len, Some(roa_bytes.len()));
assert!(trace.source_ee_cert_raw.raw_present);
}
#[test] #[test]
fn trace_rule_to_root_returns_none_for_missing_rule_index() { fn trace_rule_to_root_returns_none_for_missing_rule_index() {
let store_dir = tempfile::tempdir().expect("store dir"); let store_dir = tempfile::tempdir().expect("store dir");

View File

@ -197,8 +197,7 @@ fn main() -> Result<(), String> {
} else if let Some(raw_store_db) = raw_store_db.as_ref() { } else if let Some(raw_store_db) = raw_store_db.as_ref() {
ExternalRawStoreDb::open(raw_store_db) ExternalRawStoreDb::open(raw_store_db)
.ok() .ok()
.and_then(|store| store.get_raw_entry(&hash).ok().flatten()) .and_then(|store| store.get_blob_bytes(&hash).ok().flatten())
.map(|entry| entry.bytes)
} else { } else {
None None
}; };

View File

@ -16,6 +16,26 @@ fn raw_blob_key(sha256_hex: &str) -> String {
format!("{RAW_BLOB_KEY_PREFIX}{sha256_hex}") format!("{RAW_BLOB_KEY_PREFIX}{sha256_hex}")
} }
fn validate_blob_sha256_hex(sha256_hex: &str) -> StorageResult<()> {
if sha256_hex.len() != 64 || !sha256_hex.as_bytes().iter().all(u8::is_ascii_hexdigit) {
return Err(StorageError::InvalidData {
entity: "raw_blob",
detail: format!("invalid sha256 hex: {sha256_hex}"),
});
}
Ok(())
}
fn validate_blob_bytes(bytes: &[u8]) -> StorageResult<()> {
if bytes.is_empty() {
return Err(StorageError::InvalidData {
entity: "raw_blob",
detail: "bytes must not be empty".to_string(),
});
}
Ok(())
}
pub trait RawObjectStore { pub trait RawObjectStore {
fn get_raw_entry(&self, sha256_hex: &str) -> StorageResult<Option<RawByHashEntry>>; fn get_raw_entry(&self, sha256_hex: &str) -> StorageResult<Option<RawByHashEntry>>;
@ -103,6 +123,23 @@ impl ExternalRawStoreDb {
Ok(()) Ok(())
} }
pub fn put_blob_bytes_batch(&self, blobs: &[(String, Vec<u8>)]) -> StorageResult<()> {
if blobs.is_empty() {
return Ok(());
}
let mut batch = WriteBatch::default();
for (sha256_hex, bytes) in blobs {
validate_blob_sha256_hex(sha256_hex)?;
validate_blob_bytes(bytes)?;
let blob_key = raw_blob_key(sha256_hex);
batch.put(blob_key.as_bytes(), bytes.as_slice());
}
self.db
.write(batch)
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(())
}
pub fn delete_raw_entry(&self, sha256_hex: &str) -> StorageResult<()> { pub fn delete_raw_entry(&self, sha256_hex: &str) -> StorageResult<()> {
let key = raw_by_hash_key(sha256_hex); let key = raw_by_hash_key(sha256_hex);
let blob_key = raw_blob_key(sha256_hex); let blob_key = raw_blob_key(sha256_hex);
@ -134,23 +171,11 @@ impl RawObjectStore for RocksStore {
} }
fn get_blob_bytes(&self, sha256_hex: &str) -> StorageResult<Option<Vec<u8>>> { fn get_blob_bytes(&self, sha256_hex: &str) -> StorageResult<Option<Vec<u8>>> {
if let Some(raw_store) = self.external_raw_store_ref() { RocksStore::get_blob_bytes(self, sha256_hex)
return raw_store.get_blob_bytes(sha256_hex);
}
self.get_raw_entry(sha256_hex)
.map(|entry| entry.map(|entry| entry.bytes))
} }
fn get_blob_bytes_batch(&self, sha256_hexes: &[String]) -> StorageResult<Vec<Option<Vec<u8>>>> { fn get_blob_bytes_batch(&self, sha256_hexes: &[String]) -> StorageResult<Vec<Option<Vec<u8>>>> {
if let Some(raw_store) = self.external_raw_store_ref() { RocksStore::get_blob_bytes_batch(self, sha256_hexes)
return raw_store.get_blob_bytes_batch(sha256_hexes);
}
self.get_raw_entries_batch(sha256_hexes).map(|entries| {
entries
.into_iter()
.map(|entry| entry.map(|entry| entry.bytes))
.collect()
})
} }
} }
@ -431,6 +456,47 @@ mod tests {
); );
} }
#[test]
fn put_blob_bytes_batch_round_trips_without_raw_entry() {
let td = tempfile::tempdir().expect("tempdir");
let raw_store =
ExternalRawStoreDb::open(td.path().join("raw-store.db")).expect("open raw store");
let a = (sha256_hex(b"blob-a"), b"blob-a".to_vec());
let b = (sha256_hex(b"blob-b"), b"blob-b".to_vec());
raw_store
.put_blob_bytes_batch(&[a.clone(), b.clone()])
.expect("put blobs");
assert_eq!(
raw_store.get_blob_bytes(&a.0).expect("get blob a"),
Some(a.1.clone())
);
assert_eq!(
raw_store.get_blob_bytes(&b.0).expect("get blob b"),
Some(b.1.clone())
);
assert!(raw_store.get_raw_entry(&a.0).expect("get raw a").is_none());
assert!(raw_store.get_raw_entry(&b.0).expect("get raw b").is_none());
}
#[test]
fn put_blob_bytes_batch_rejects_invalid_inputs() {
let td = tempfile::tempdir().expect("tempdir");
let raw_store =
ExternalRawStoreDb::open(td.path().join("raw-store.db")).expect("open raw store");
let err = raw_store
.put_blob_bytes_batch(&[("zz".repeat(32), b"blob".to_vec())])
.expect_err("invalid hash should fail");
assert!(matches!(err, StorageError::InvalidData { .. }));
let err = raw_store
.put_blob_bytes_batch(&[(sha256_hex(b"blob"), Vec::new())])
.expect_err("empty bytes should fail");
assert!(matches!(err, StorageError::InvalidData { .. }));
}
#[test] #[test]
fn external_raw_store_db_rejects_invalid_entry_on_put() { fn external_raw_store_db_rejects_invalid_entry_on_put() {
let td = tempfile::tempdir().expect("tempdir"); let td = tempfile::tempdir().expect("tempdir");

View File

@ -2,7 +2,6 @@ use std::collections::{BTreeMap, BTreeSet};
use sha2::Digest; use sha2::Digest;
use crate::blob_store::RawObjectStore;
use crate::ccr::encode::{ use crate::ccr::encode::{
encode_aspa_payload_state_payload_der, encode_manifest_state_payload_der, encode_aspa_payload_state_payload_der, encode_manifest_state_payload_der,
encode_roa_payload_state_payload_der, encode_router_key_state_payload_der, encode_roa_payload_state_payload_der, encode_router_key_state_payload_der,

View File

@ -3,7 +3,6 @@ use std::collections::BTreeSet;
use std::path::Path; use std::path::Path;
use crate::audit::{AuditObjectResult, PublicationPointAudit}; use crate::audit::{AuditObjectResult, PublicationPointAudit};
use crate::blob_store::RawObjectStore;
use crate::cir::encode::{CirEncodeError, encode_cir}; use crate::cir::encode::{CirEncodeError, encode_cir};
use crate::cir::model::{ use crate::cir::model::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal,
@ -266,7 +265,7 @@ pub fn export_cir_raw_store(
let mut reused_entries = 0usize; let mut reused_entries = 0usize;
for sha256_hex in &unique { for sha256_hex in &unique {
if store if store
.get_raw_entry(sha256_hex) .get_blob_bytes(sha256_hex)
.map_err(|e| { .map_err(|e| {
CirExportError::Write(raw_store_path.display().to_string(), e.to_string()) CirExportError::Write(raw_store_path.display().to_string(), e.to_string())
})? })?
@ -373,8 +372,8 @@ fn ta_sha256_hex(bytes: &[u8]) -> String {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::current_repo_index::CurrentRepoObject;
use crate::cir::decode::decode_cir; use crate::cir::decode::decode_cir;
use crate::current_repo_index::CurrentRepoObject;
use crate::data_model::ta::TrustAnchor; use crate::data_model::ta::TrustAnchor;
use crate::data_model::tal::Tal; use crate::data_model::tal::Tal;
use crate::storage::{RawByHashEntry, RepositoryViewEntry, RepositoryViewState, RocksStore}; use crate::storage::{RawByHashEntry, RepositoryViewEntry, RepositoryViewState, RocksStore};
@ -730,14 +729,9 @@ mod tests {
) )
.expect("build cir"); .expect("build cir");
let summary = export_cir_static_pool( let summary =
&store, export_cir_static_pool(&store, &static_root, sample_date(), &cir, &[&ta1, &ta2])
&static_root, .expect("export static pool");
sample_date(),
&cir,
&[&ta1, &ta2],
)
.expect("export static pool");
assert!(summary.unique_hashes >= 3); assert!(summary.unique_hashes >= 3);
assert!(summary.written_files >= 3); assert!(summary.written_files >= 3);
} }
@ -746,8 +740,9 @@ mod tests {
fn export_cir_raw_store_reports_missing_non_ta_object_and_writes_ta_entries() { fn export_cir_raw_store_reports_missing_non_ta_object_and_writes_ta_entries() {
let td = tempfile::tempdir().unwrap(); let td = tempfile::tempdir().unwrap();
let raw_store_path = td.path().join("raw-store.db"); let raw_store_path = td.path().join("raw-store.db");
let store = RocksStore::open_with_external_raw_store(&td.path().join("db"), &raw_store_path) let store =
.unwrap(); RocksStore::open_with_external_raw_store(&td.path().join("db"), &raw_store_path)
.unwrap();
let ta1 = sample_trust_anchor(); let ta1 = sample_trust_anchor();
let ta2 = sample_arin_trust_anchor(); let ta2 = sample_arin_trust_anchor();
@ -769,13 +764,8 @@ mod tests {
) )
.expect("build cir with tas only"); .expect("build cir with tas only");
let summary = export_cir_raw_store( let summary = export_cir_raw_store(&store, &raw_store_path, &cir_only_tas, &[&ta1, &ta2])
&store, .expect("export raw store");
&raw_store_path,
&cir_only_tas,
&[&ta1, &ta2],
)
.expect("export raw store");
assert!(summary.unique_hashes >= 2); assert!(summary.unique_hashes >= 2);
assert!(summary.written_entries >= 2 || summary.reused_entries >= 2); assert!(summary.written_entries >= 2 || summary.reused_entries >= 2);
@ -784,13 +774,8 @@ mod tests {
rsync_uri: "rsync://example.test/repo/missing.roa".to_string(), rsync_uri: "rsync://example.test/repo/missing.roa".to_string(),
sha256: vec![0x44; 32], sha256: vec![0x44; 32],
}); });
let err = export_cir_raw_store( let err = export_cir_raw_store(&store, &raw_store_path, &cir_missing_object, &[&ta1, &ta2])
&store, .expect_err("missing non-ta object must fail");
&raw_store_path,
&cir_missing_object,
&[&ta1, &ta2],
)
.expect_err("missing non-ta object must fail");
assert!(matches!(err, CirExportError::Write(_, _)), "{err}"); assert!(matches!(err, CirExportError::Write(_, _)), "{err}");
} }
} }

View File

@ -165,8 +165,8 @@ pub fn materialize_cir_from_raw_store(
let mut copied_files = 0usize; let mut copied_files = 0usize;
for object in &cir.objects { for object in &cir.objects {
let sha256_hex = hex::encode(&object.sha256); let sha256_hex = hex::encode(&object.sha256);
let entry = raw_store let bytes = raw_store
.get_raw_entry(&sha256_hex) .get_blob_bytes(&sha256_hex)
.map_err(|e| CirMaterializeError::ReadRawStore { .map_err(|e| CirMaterializeError::ReadRawStore {
sha256_hex: sha256_hex.clone(), sha256_hex: sha256_hex.clone(),
detail: e.to_string(), detail: e.to_string(),
@ -191,7 +191,7 @@ pub fn materialize_cir_from_raw_store(
})?; })?;
} }
fs::write(&target, &entry.bytes).map_err(|e| CirMaterializeError::Copy { fs::write(&target, &bytes).map_err(|e| CirMaterializeError::Copy {
src: raw_store_db.display().to_string(), src: raw_store_db.display().to_string(),
dst: target.display().to_string(), dst: target.display().to_string(),
detail: e.to_string(), detail: e.to_string(),
@ -629,15 +629,12 @@ mod tests {
{ {
let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap(); let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap();
let mut entry_a = raw_store
crate::storage::RawByHashEntry::from_bytes(hex::encode(&cir.objects[0].sha256), a); .put_blob_bytes_batch(&[
entry_a.origin_uris.push(cir.objects[0].rsync_uri.clone()); (hex::encode(&cir.objects[0].sha256), a),
raw_store.put_raw_entry(&entry_a).unwrap(); (hex::encode(&cir.objects[1].sha256), b),
])
let mut entry_b = .unwrap();
crate::storage::RawByHashEntry::from_bytes(hex::encode(&cir.objects[1].sha256), b);
entry_b.origin_uris.push(cir.objects[1].rsync_uri.clone());
raw_store.put_raw_entry(&entry_b).unwrap();
} }
let summary = let summary =
@ -663,11 +660,9 @@ mod tests {
let cir = cir_with_real_hashes(b"a", b"b"); let cir = cir_with_real_hashes(b"a", b"b");
{ {
let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap(); let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap();
let only = crate::storage::RawByHashEntry::from_bytes( raw_store
hex::encode(&cir.objects[0].sha256), .put_blob_bytes_batch(&[(hex::encode(&cir.objects[0].sha256), b"a".to_vec())])
b"a".to_vec(), .unwrap();
);
raw_store.put_raw_entry(&only).unwrap();
} }
let err = materialize_cir_from_raw_store(&cir, &raw_store_path, &mirror_root, true) let err = materialize_cir_from_raw_store(&cir, &raw_store_path, &mirror_root, true)
@ -686,18 +681,12 @@ mod tests {
let cir = cir_with_real_hashes(b"a", b"b"); let cir = cir_with_real_hashes(b"a", b"b");
{ {
let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap(); let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap();
let mut entry_a = crate::storage::RawByHashEntry::from_bytes( raw_store
hex::encode(&cir.objects[0].sha256), .put_blob_bytes_batch(&[
b"a".to_vec(), (hex::encode(&cir.objects[0].sha256), b"a".to_vec()),
); (hex::encode(&cir.objects[1].sha256), b"b".to_vec()),
entry_a.origin_uris.push(cir.objects[0].rsync_uri.clone()); ])
raw_store.put_raw_entry(&entry_a).unwrap(); .unwrap();
let mut entry_b = crate::storage::RawByHashEntry::from_bytes(
hex::encode(&cir.objects[1].sha256),
b"b".to_vec(),
);
entry_b.origin_uris.push(cir.objects[1].rsync_uri.clone());
raw_store.put_raw_entry(&entry_b).unwrap();
} }
std::fs::create_dir_all(mirror_root.join("extra")).unwrap(); std::fs::create_dir_all(mirror_root.join("extra")).unwrap();
std::fs::write(mirror_root.join("extra/stale.txt"), b"stale").unwrap(); std::fs::write(mirror_root.join("extra/stale.txt"), b"stale").unwrap();
@ -717,18 +706,12 @@ mod tests {
let cir = cir_with_real_hashes(&a, &b); let cir = cir_with_real_hashes(&a, &b);
{ {
let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap(); let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap();
let mut entry_a = crate::storage::RawByHashEntry::from_bytes( raw_store
hex::encode(&cir.objects[0].sha256), .put_blob_bytes_batch(&[
a.clone(), (hex::encode(&cir.objects[0].sha256), a.clone()),
); (hex::encode(&cir.objects[1].sha256), b.clone()),
entry_a.origin_uris.push(cir.objects[0].rsync_uri.clone()); ])
raw_store.put_raw_entry(&entry_a).unwrap(); .unwrap();
let mut entry_b = crate::storage::RawByHashEntry::from_bytes(
hex::encode(&cir.objects[1].sha256),
b.clone(),
);
entry_b.origin_uris.push(cir.objects[1].rsync_uri.clone());
raw_store.put_raw_entry(&entry_b).unwrap();
} }
let target = mirror_root.join("example.net/repo/a.cer"); let target = mirror_root.join("example.net/repo/a.cer");
std::fs::create_dir_all(target.parent().unwrap()).unwrap(); std::fs::create_dir_all(target.parent().unwrap()).unwrap();
@ -741,12 +724,34 @@ mod tests {
} }
#[test] #[test]
fn materialize_from_raw_store_reports_codec_errors() { fn materialize_from_raw_store_ignores_corrupt_raw_entry_when_blob_exists() {
let td = tempfile::tempdir().unwrap(); let td = tempfile::tempdir().unwrap();
let raw_store_path = td.path().join("raw-store.db"); let raw_store_path = td.path().join("raw-store.db");
let mirror_root = td.path().join("mirror"); let mirror_root = td.path().join("mirror");
let cir = CanonicalInputRepresentation {
version: CIR_VERSION_V1,
hash_alg: CirHashAlgorithm::Sha256,
validation_time: sample_time(),
objects: vec![CirObject {
rsync_uri: "rsync://example.net/repo/a.cer".to_string(),
sha256: hex::decode(
"1111111111111111111111111111111111111111111111111111111111111111",
)
.unwrap(),
}],
tals: vec![CirTal {
tal_uri: "https://tal.example.net/root.tal".to_string(),
tal_bytes: b"x".to_vec(),
}],
};
{ {
let _raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap(); let raw_store = ExternalRawStoreDb::open(&raw_store_path).unwrap();
raw_store
.put_blob_bytes_batch(&[(
"1111111111111111111111111111111111111111111111111111111111111111".to_string(),
b"blob-a".to_vec(),
)])
.unwrap();
} }
{ {
let db = rocksdb::DB::open_default(&raw_store_path).unwrap(); let db = rocksdb::DB::open_default(&raw_store_path).unwrap();
@ -757,37 +762,15 @@ mod tests {
.unwrap(); .unwrap();
} }
let err = materialize_cir_from_raw_store( let summary =
&CanonicalInputRepresentation { materialize_cir_from_raw_store(&cir, &raw_store_path, &mirror_root, true).unwrap();
version: CIR_VERSION_V1, assert_eq!(summary.object_count, 1);
hash_alg: CirHashAlgorithm::Sha256, assert_eq!(
validation_time: sample_time(), std::fs::read(mirror_root.join("example.net/repo/a.cer")).unwrap(),
objects: vec![CirObject { b"blob-a"
rsync_uri: "rsync://example.net/repo/a.cer".to_string(), );
sha256: hex::decode(
"1111111111111111111111111111111111111111111111111111111111111111",
)
.unwrap(),
}],
tals: vec![CirTal {
tal_uri: "https://tal.example.net/root.tal".to_string(),
tal_bytes: b"x".to_vec(),
}],
},
&raw_store_path,
&mirror_root,
true,
)
.expect_err("corrupt raw-store object should fail");
assert!(matches!(
err,
CirMaterializeError::ReadRawStore { .. }
| CirMaterializeError::MissingRawStoreObject { .. }
));
} }
#[test] #[test]
fn materialize_from_repo_bytes_creates_expected_tree() { fn materialize_from_repo_bytes_creates_expected_tree() {
let td = tempfile::tempdir().unwrap(); let td = tempfile::tempdir().unwrap();

View File

@ -17,8 +17,7 @@ pub use export::{
}; };
pub use materialize::{ pub use materialize::{
CirMaterializeError, CirMaterializeSummary, materialize_cir, materialize_cir_from_raw_store, CirMaterializeError, CirMaterializeSummary, materialize_cir, materialize_cir_from_raw_store,
materialize_cir_from_repo_bytes, mirror_relative_path_for_rsync_uri, materialize_cir_from_repo_bytes, mirror_relative_path_for_rsync_uri, resolve_static_pool_file,
resolve_static_pool_file,
}; };
pub use model::{ pub use model::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal,

View File

@ -3,7 +3,6 @@ use std::fs::{self, OpenOptions};
use std::io::Write; use std::io::Write;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use crate::blob_store::RawObjectStore;
use crate::storage::{RawByHashEntry, RocksStore}; use crate::storage::{RawByHashEntry, RocksStore};
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]

View File

@ -452,10 +452,7 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
let tal_path = tal_paths.first().cloned(); let tal_path = tal_paths.first().cloned();
let ta_path = ta_paths.first().cloned(); let ta_path = ta_paths.first().cloned();
if cir_enabled && cir_out_path.is_none() { if cir_enabled && cir_out_path.is_none() {
return Err(format!( return Err(format!("--cir-enable requires --cir-out\n\n{}", usage()));
"--cir-enable requires --cir-out\n\n{}",
usage()
));
} }
if cir_static_root.is_some() { if cir_static_root.is_some() {
return Err(format!( return Err(format!(
@ -463,9 +460,7 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
usage() usage()
)); ));
} }
if !cir_enabled if !cir_enabled && (cir_out_path.is_some() || !cir_tal_uris.is_empty()) {
&& (cir_out_path.is_some() || !cir_tal_uris.is_empty())
{
return Err(format!( return Err(format!(
"--cir-out/--cir-tal-uri require --cir-enable\n\n{}", "--cir-out/--cir-tal-uri require --cir-enable\n\n{}",
usage() usage()
@ -1741,10 +1736,7 @@ mod tests {
"--cir-enable".to_string(), "--cir-enable".to_string(),
]; ];
let err = parse_args(&argv_missing).unwrap_err(); let err = parse_args(&argv_missing).unwrap_err();
assert!( assert!(err.contains("--cir-enable requires --cir-out"), "{err}");
err.contains("--cir-enable requires --cir-out"),
"{err}"
);
let argv_needs_enable = vec![ let argv_needs_enable = vec![
"rpki".to_string(), "rpki".to_string(),

View File

@ -230,8 +230,16 @@ mod tests {
let mut index = handle.lock().expect("lock index"); let mut index = handle.lock().expect("lock index");
index index
.apply_repository_view_entries(&[ .apply_repository_view_entries(&[
present("rsync://example.test/repo-b/", "rsync://example.test/repo-b/b.roa", &"22".repeat(32)), present(
present("rsync://example.test/repo-a/", "rsync://example.test/repo-a/a.roa", &"11".repeat(32)), "rsync://example.test/repo-b/",
"rsync://example.test/repo-b/b.roa",
&"22".repeat(32),
),
present(
"rsync://example.test/repo-a/",
"rsync://example.test/repo-a/a.roa",
&"11".repeat(32),
),
]) ])
.expect("apply present entries"); .expect("apply present entries");
assert_eq!(index.active_uri_count(), 2); assert_eq!(index.active_uri_count(), 2);

View File

@ -4,17 +4,27 @@ use crate::data_model::oid::{
OID_CMS_ATTR_MESSAGE_DIGEST, OID_CMS_ATTR_MESSAGE_DIGEST_RAW, OID_CMS_ATTR_SIGNING_TIME, OID_CMS_ATTR_MESSAGE_DIGEST, OID_CMS_ATTR_MESSAGE_DIGEST_RAW, OID_CMS_ATTR_SIGNING_TIME,
OID_CMS_ATTR_SIGNING_TIME_RAW, OID_CT_ASPA, OID_CT_ASPA_RAW, OID_CT_ROUTE_ORIGIN_AUTHZ, OID_CMS_ATTR_SIGNING_TIME_RAW, OID_CT_ASPA, OID_CT_ASPA_RAW, OID_CT_ROUTE_ORIGIN_AUTHZ,
OID_CT_ROUTE_ORIGIN_AUTHZ_RAW, OID_CT_RPKI_MANIFEST, OID_CT_RPKI_MANIFEST_RAW, OID_CT_ROUTE_ORIGIN_AUTHZ_RAW, OID_CT_RPKI_MANIFEST, OID_CT_RPKI_MANIFEST_RAW,
OID_RSA_ENCRYPTION, OID_RSA_ENCRYPTION_RAW, OID_SHA256, OID_SHA256_RAW, OID_KEY_USAGE_RAW, OID_RSA_ENCRYPTION, OID_RSA_ENCRYPTION_RAW, OID_SHA256, OID_SHA256_RAW,
OID_SHA256_WITH_RSA_ENCRYPTION, OID_SHA256_WITH_RSA_ENCRYPTION_RAW, OID_SIGNED_DATA, OID_SHA256_WITH_RSA_ENCRYPTION, OID_SHA256_WITH_RSA_ENCRYPTION_RAW, OID_SIGNED_DATA,
OID_SIGNED_DATA_RAW, OID_SUBJECT_INFO_ACCESS, OID_SIGNED_DATA_RAW, OID_SUBJECT_INFO_ACCESS,
}; };
use crate::data_model::rc::{ResourceCertificate, SubjectInfoAccess}; use crate::data_model::rc::{ResourceCertificate, SubjectInfoAccess};
use asn1_rs::{Any, Class, FromBer, Header, Tag}; use asn1_rs::{Any, Class, FromBer, Header, Tag};
use ring::digest; use ring::digest;
use x509_parser::prelude::FromDer; use x509_parser::extensions::ParsedExtension;
use x509_parser::prelude::{FromDer, X509Certificate};
use x509_parser::public_key::PublicKey; use x509_parser::public_key::PublicKey;
use x509_parser::x509::SubjectPublicKeyInfo; use x509_parser::x509::SubjectPublicKeyInfo;
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum EeKeyUsageSummary {
DigitalSignatureOnly,
Missing,
NotCritical,
InvalidBits,
ParseError(String),
}
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub struct ResourceEeCertificate { pub struct ResourceEeCertificate {
pub raw_der: Vec<u8>, pub raw_der: Vec<u8>,
@ -22,6 +32,9 @@ pub struct ResourceEeCertificate {
pub spki_der: Vec<u8>, pub spki_der: Vec<u8>,
pub rsa_public_modulus: Vec<u8>, pub rsa_public_modulus: Vec<u8>,
pub rsa_public_exponent: Vec<u8>, pub rsa_public_exponent: Vec<u8>,
pub tbs_certificate_der: Vec<u8>,
pub signature_bytes: Vec<u8>,
pub key_usage_summary: EeKeyUsageSummary,
pub sia_signed_object_uris: Vec<String>, pub sia_signed_object_uris: Vec<String>,
pub resource_cert: ResourceCertificate, pub resource_cert: ResourceCertificate,
} }
@ -726,6 +739,15 @@ fn parse_signer_infos_set_cursor(
} }
fn validate_ee_certificate(der: &[u8]) -> Result<ResourceEeCertificate, SignedObjectValidateError> { fn validate_ee_certificate(der: &[u8]) -> Result<ResourceEeCertificate, SignedObjectValidateError> {
let (rem, cert) = X509Certificate::from_der(der)
.map_err(|e| SignedObjectValidateError::EeCertificateParse(e.to_string()))?;
if !rem.is_empty() {
return Err(SignedObjectValidateError::EeCertificateParse(format!(
"trailing bytes after EE certificate DER: {}",
rem.len()
)));
}
let rc = match ResourceCertificate::from_der(der) { let rc = match ResourceCertificate::from_der(der) {
Ok(v) => v, Ok(v) => v,
Err(e) => { Err(e) => {
@ -800,11 +822,49 @@ fn validate_ee_certificate(der: &[u8]) -> Result<ResourceEeCertificate, SignedOb
spki_der, spki_der,
rsa_public_modulus, rsa_public_modulus,
rsa_public_exponent, rsa_public_exponent,
tbs_certificate_der: cert.tbs_certificate.as_ref().to_vec(),
signature_bytes: cert.signature_value.data.to_vec(),
key_usage_summary: summarize_ee_key_usage(&cert),
sia_signed_object_uris: signed_object_uris, sia_signed_object_uris: signed_object_uris,
resource_cert: rc, resource_cert: rc,
}) })
} }
fn summarize_ee_key_usage(cert: &X509Certificate<'_>) -> EeKeyUsageSummary {
for ext in cert.extensions() {
if ext.oid.as_bytes() == OID_KEY_USAGE_RAW {
match ext.parsed_extension() {
ParsedExtension::KeyUsage(ku) => {
if !ext.critical {
return EeKeyUsageSummary::NotCritical;
}
let ok = ku.digital_signature()
&& !ku.key_cert_sign()
&& !ku.crl_sign()
&& !ku.non_repudiation()
&& !ku.key_encipherment()
&& !ku.data_encipherment()
&& !ku.key_agreement()
&& !ku.encipher_only()
&& !ku.decipher_only();
return if ok {
EeKeyUsageSummary::DigitalSignatureOnly
} else {
EeKeyUsageSummary::InvalidBits
};
}
other => {
return EeKeyUsageSummary::ParseError(format!(
"unexpected parsed keyUsage extension: {other:?}"
));
}
}
}
}
EeKeyUsageSummary::Missing
}
fn parse_signer_info_cursor( fn parse_signer_info_cursor(
mut seq: CmsReader<'_>, mut seq: CmsReader<'_>,
) -> Result<SignerInfoParsed, SignedObjectParseError> { ) -> Result<SignerInfoParsed, SignedObjectParseError> {

View File

@ -470,7 +470,8 @@ mod tests {
}) })
.expect("http"); .expect("http");
let (_, profile_short, timeout_short) = http.client_for_uri("https://example.test/root.tal"); let (_, profile_short, timeout_short) =
http.client_for_uri("https://example.test/root.tal");
assert_eq!(profile_short, "short"); assert_eq!(profile_short, "short");
assert_eq!(timeout_short, Duration::from_secs(3)); assert_eq!(timeout_short, Duration::from_secs(3));

View File

@ -13,6 +13,7 @@ use crate::data_model::rc::{AsResourceSet, IpResourceSet};
pub const CF_REPOSITORY_VIEW: &str = "repository_view"; pub const CF_REPOSITORY_VIEW: &str = "repository_view";
pub const CF_RAW_BY_HASH: &str = "raw_by_hash"; pub const CF_RAW_BY_HASH: &str = "raw_by_hash";
pub const CF_RAW_BLOB: &str = "raw_blob";
pub const CF_VCIR: &str = "vcir"; pub const CF_VCIR: &str = "vcir";
pub const CF_AUDIT_RULE_INDEX: &str = "audit_rule_index"; pub const CF_AUDIT_RULE_INDEX: &str = "audit_rule_index";
pub const CF_RRDP_SOURCE: &str = "rrdp_source"; pub const CF_RRDP_SOURCE: &str = "rrdp_source";
@ -22,6 +23,7 @@ pub const CF_RRDP_URI_OWNER: &str = "rrdp_uri_owner";
pub const ALL_COLUMN_FAMILY_NAMES: &[&str] = &[ pub const ALL_COLUMN_FAMILY_NAMES: &[&str] = &[
CF_REPOSITORY_VIEW, CF_REPOSITORY_VIEW,
CF_RAW_BY_HASH, CF_RAW_BY_HASH,
CF_RAW_BLOB,
CF_VCIR, CF_VCIR,
CF_AUDIT_RULE_INDEX, CF_AUDIT_RULE_INDEX,
CF_RRDP_SOURCE, CF_RRDP_SOURCE,
@ -31,6 +33,7 @@ pub const ALL_COLUMN_FAMILY_NAMES: &[&str] = &[
const REPOSITORY_VIEW_KEY_PREFIX: &str = "repo_view:"; const REPOSITORY_VIEW_KEY_PREFIX: &str = "repo_view:";
const RAW_BY_HASH_KEY_PREFIX: &str = "rawbyhash:"; const RAW_BY_HASH_KEY_PREFIX: &str = "rawbyhash:";
const RAW_BLOB_KEY_PREFIX: &str = "rawblob:";
const VCIR_KEY_PREFIX: &str = "vcir:"; const VCIR_KEY_PREFIX: &str = "vcir:";
const AUDIT_ROA_RULE_KEY_PREFIX: &str = "audit:roa_rule:"; const AUDIT_ROA_RULE_KEY_PREFIX: &str = "audit:roa_rule:";
const AUDIT_ASPA_RULE_KEY_PREFIX: &str = "audit:aspa_rule:"; const AUDIT_ASPA_RULE_KEY_PREFIX: &str = "audit:aspa_rule:";
@ -46,6 +49,10 @@ fn cf_opts() -> Options {
opts opts
} }
fn raw_blob_key(sha256_hex: &str) -> String {
format!("{RAW_BLOB_KEY_PREFIX}{sha256_hex}")
}
pub fn column_family_descriptors() -> Vec<ColumnFamilyDescriptor> { pub fn column_family_descriptors() -> Vec<ColumnFamilyDescriptor> {
ALL_COLUMN_FAMILY_NAMES ALL_COLUMN_FAMILY_NAMES
.iter() .iter()
@ -954,6 +961,29 @@ impl RocksStore {
self.write_batch(batch) self.write_batch(batch)
} }
pub fn put_blob_bytes_batch(&self, blobs: &[(String, Vec<u8>)]) -> StorageResult<()> {
if blobs.is_empty() {
return Ok(());
}
if let Some(raw_store) = self.external_raw_store.as_ref() {
return raw_store.put_blob_bytes_batch(blobs);
}
let cf = self.cf(CF_RAW_BLOB)?;
let mut batch = WriteBatch::default();
for (sha256_hex, bytes) in blobs {
validate_sha256_hex("raw_blob.sha256_hex", sha256_hex)?;
if bytes.is_empty() {
return Err(StorageError::InvalidData {
entity: "raw_blob",
detail: "bytes must not be empty".to_string(),
});
}
let key = raw_blob_key(sha256_hex);
batch.put_cf(cf, key.as_bytes(), bytes.as_slice());
}
self.write_batch(batch)
}
pub fn delete_raw_by_hash_entry(&self, sha256_hex: &str) -> StorageResult<()> { pub fn delete_raw_by_hash_entry(&self, sha256_hex: &str) -> StorageResult<()> {
validate_sha256_hex("raw_by_hash.sha256_hex", sha256_hex)?; validate_sha256_hex("raw_by_hash.sha256_hex", sha256_hex)?;
if let Some(raw_store) = self.external_raw_store.as_ref() { if let Some(raw_store) = self.external_raw_store.as_ref() {
@ -1018,6 +1048,64 @@ impl RocksStore {
.collect() .collect()
} }
pub fn get_blob_bytes(&self, sha256_hex: &str) -> StorageResult<Option<Vec<u8>>> {
if let Some(raw_store) = self.external_raw_store.as_ref() {
return raw_store.get_blob_bytes(sha256_hex);
}
validate_sha256_hex("raw_blob.sha256_hex", sha256_hex)?;
let cf = self.cf(CF_RAW_BLOB)?;
let key = raw_blob_key(sha256_hex);
if let Some(bytes) = self
.db
.get_cf(cf, key.as_bytes())
.map_err(|e| StorageError::RocksDb(e.to_string()))?
{
return Ok(Some(bytes));
}
self.get_raw_by_hash_entry(sha256_hex)
.map(|entry| entry.map(|entry| entry.bytes))
}
pub fn get_blob_bytes_batch(
&self,
sha256_hexes: &[String],
) -> StorageResult<Vec<Option<Vec<u8>>>> {
if sha256_hexes.is_empty() {
return Ok(Vec::new());
}
if let Some(raw_store) = self.external_raw_store.as_ref() {
return raw_store.get_blob_bytes_batch(sha256_hexes);
}
let cf = self.cf(CF_RAW_BLOB)?;
let keys: Vec<String> = sha256_hexes
.iter()
.map(|hash| {
validate_sha256_hex("raw_blob.sha256_hex", hash)?;
Ok::<String, StorageError>(raw_blob_key(hash))
})
.collect::<Result<_, _>>()?;
let blob_results: Vec<Option<Vec<u8>>> = self
.db
.multi_get_cf(keys.iter().map(|key| (cf, key.as_bytes())))
.into_iter()
.map(|res| res.map_err(|e| StorageError::RocksDb(e.to_string())))
.collect::<Result<_, _>>()?;
let mut out = Vec::with_capacity(sha256_hexes.len());
for (sha256_hex, maybe_blob) in sha256_hexes.iter().zip(blob_results.into_iter()) {
if maybe_blob.is_some() {
out.push(maybe_blob);
} else {
out.push(
self.get_raw_by_hash_entry(sha256_hex)?
.map(|entry| entry.bytes),
);
}
}
Ok(out)
}
pub fn put_vcir(&self, vcir: &ValidatedCaInstanceResult) -> StorageResult<()> { pub fn put_vcir(&self, vcir: &ValidatedCaInstanceResult) -> StorageResult<()> {
vcir.validate_internal()?; vcir.validate_internal()?;
let cf = self.cf(CF_VCIR)?; let cf = self.cf(CF_VCIR)?;
@ -1290,12 +1378,14 @@ impl RocksStore {
entity: "repository_view", entity: "repository_view",
detail: format!("current_hash missing for current object URI: {rsync_uri}"), detail: format!("current_hash missing for current object URI: {rsync_uri}"),
})?; })?;
let bytes = self.get_blob_bytes(hash)?.ok_or(StorageError::InvalidData { let bytes = self
entity: "repository_view", .get_blob_bytes(hash)?
detail: format!( .ok_or(StorageError::InvalidData {
"raw_by_hash entry missing for current object URI: {rsync_uri} (hash={hash})" entity: "repository_view",
), detail: format!(
})?; "blob bytes missing for current object URI: {rsync_uri} (hash={hash})"
),
})?;
let current_hash = decode_sha256_hex_32("repository_view.current_hash", hash)?; let current_hash = decode_sha256_hex_32("repository_view.current_hash", hash)?;
Ok(Some(CurrentObjectWithHash { Ok(Some(CurrentObjectWithHash {
current_hash_hex: hash.to_ascii_lowercase(), current_hash_hex: hash.to_ascii_lowercase(),
@ -1499,10 +1589,7 @@ impl PackBytes {
Self::Eager(std::sync::Arc::from(bytes)) Self::Eager(std::sync::Arc::from(bytes))
} }
pub fn lazy_external( pub fn lazy_external(sha256_hex: String, store: std::sync::Arc<ExternalRawStoreDb>) -> Self {
sha256_hex: String,
store: std::sync::Arc<ExternalRawStoreDb>,
) -> Self {
Self::LazyExternal { Self::LazyExternal {
sha256_hex, sha256_hex,
store, store,
@ -1579,7 +1666,11 @@ impl PackFile {
sha256: [u8; 32], sha256: [u8; 32],
store: std::sync::Arc<ExternalRawStoreDb>, store: std::sync::Arc<ExternalRawStoreDb>,
) -> Self { ) -> Self {
Self::new(rsync_uri, PackBytes::lazy_external(sha256_hex, store), sha256) Self::new(
rsync_uri,
PackBytes::lazy_external(sha256_hex, store),
sha256,
)
} }
pub fn from_bytes_compute_sha256(rsync_uri: impl Into<String>, bytes: Vec<u8>) -> Self { pub fn from_bytes_compute_sha256(rsync_uri: impl Into<String>, bytes: Vec<u8>) -> Self {
@ -1602,7 +1693,9 @@ impl PackFile {
impl PartialEq for PackFile { impl PartialEq for PackFile {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.rsync_uri == other.rsync_uri && self.sha256 == other.sha256 && self.bytes == other.bytes self.rsync_uri == other.rsync_uri
&& self.sha256 == other.sha256
&& self.bytes == other.bytes
} }
} }
@ -1918,6 +2011,231 @@ mod tests {
); );
} }
#[test]
fn put_blob_bytes_batch_uses_internal_blob_cf_without_raw_entry() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(td.path()).expect("open rocksdb");
let bytes = b"internal-blob-only".to_vec();
let hash = sha256_hex(&bytes);
store
.put_blob_bytes_batch(&[(hash.clone(), bytes.clone())])
.expect("put blob bytes");
assert_eq!(
store.get_blob_bytes(&hash).expect("get blob bytes"),
Some(bytes.clone())
);
assert!(
store
.get_raw_by_hash_entry(&hash)
.expect("get raw entry")
.is_none()
);
}
#[test]
fn put_blob_bytes_batch_routes_to_external_raw_store_without_raw_entry() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open_with_external_raw_store(
&td.path().join("main-db"),
&td.path().join("raw-store.db"),
)
.expect("open store");
let bytes = b"external-blob-only".to_vec();
let hash = sha256_hex(&bytes);
store
.put_blob_bytes_batch(&[(hash.clone(), bytes.clone())])
.expect("put external blob bytes");
assert_eq!(store.get_blob_bytes(&hash).unwrap(), Some(bytes));
assert!(store.get_raw_by_hash_entry(&hash).unwrap().is_none());
}
#[test]
fn put_blob_bytes_batch_accepts_empty_batch_with_external_raw_store() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open_with_external_raw_store(
&td.path().join("main-db"),
&td.path().join("raw-store.db"),
)
.expect("open store");
store
.put_blob_bytes_batch(&[])
.expect("empty external blob batch should be a no-op");
}
#[test]
fn get_blob_bytes_internal_falls_back_to_raw_entry_when_blob_missing() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(td.path()).expect("open rocksdb");
let raw = sample_raw_by_hash_entry(b"raw-fallback".to_vec());
store.put_raw_by_hash_entry(&raw).expect("put raw entry");
assert_eq!(
store
.get_blob_bytes(&raw.sha256_hex)
.expect("get blob bytes via raw fallback"),
Some(raw.bytes.clone())
);
}
#[test]
fn get_blob_bytes_batch_internal_prefers_blob_cf_and_falls_back_to_raw_entry() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(td.path()).expect("open rocksdb");
let blob_bytes = b"blob-cf-object".to_vec();
let blob_hash = sha256_hex(&blob_bytes);
store
.put_blob_bytes_batch(&[(blob_hash.clone(), blob_bytes.clone())])
.expect("put blob bytes");
let raw = sample_raw_by_hash_entry(b"raw-fallback-batch".to_vec());
store.put_raw_by_hash_entry(&raw).expect("put raw fallback");
let batch = store
.get_blob_bytes_batch(&[blob_hash.clone(), raw.sha256_hex.clone(), "00".repeat(32)])
.expect("get blob bytes batch");
assert_eq!(
batch,
vec![Some(blob_bytes), Some(raw.bytes.clone()), None]
);
}
#[test]
fn get_blob_bytes_batch_routes_to_external_raw_store_without_raw_entry() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open_with_external_raw_store(
&td.path().join("main-db"),
&td.path().join("raw-store.db"),
)
.expect("open store");
let bytes = b"external-batch-blob".to_vec();
let hash = sha256_hex(&bytes);
store
.put_blob_bytes_batch(&[(hash.clone(), bytes.clone())])
.expect("put external blob bytes");
assert_eq!(
store
.get_blob_bytes_batch(&[hash, "00".repeat(32)])
.expect("get external blob batch"),
vec![Some(bytes), None]
);
}
#[test]
fn get_blob_bytes_rejects_invalid_hash_for_internal_store() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(td.path()).expect("open rocksdb");
let err = store
.get_blob_bytes("not-a-valid-hash")
.expect_err("invalid hash must fail");
assert!(matches!(err, StorageError::InvalidData { .. }));
}
#[test]
fn get_blob_bytes_batch_rejects_invalid_hash_for_internal_store() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(td.path()).expect("open rocksdb");
let err = store
.get_blob_bytes_batch(&["not-a-valid-hash".to_string()])
.expect_err("invalid hash must fail");
assert!(matches!(err, StorageError::InvalidData { .. }));
}
#[test]
fn get_blob_bytes_batch_returns_empty_for_empty_request_internal() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(td.path()).expect("open rocksdb");
assert!(
store
.get_blob_bytes_batch(&[])
.expect("empty blob batch request")
.is_empty()
);
}
#[test]
fn put_blob_bytes_batch_accepts_empty_batch() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(td.path()).expect("open rocksdb");
store
.put_blob_bytes_batch(&[])
.expect("empty blob batch should be a no-op");
}
#[test]
fn put_blob_bytes_batch_rejects_empty_bytes() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(td.path()).expect("open rocksdb");
let err = store
.put_blob_bytes_batch(&[(sha256_hex(b"valid"), Vec::new())])
.expect_err("empty bytes must fail");
assert!(matches!(err, StorageError::InvalidData { .. }));
}
#[test]
fn delete_raw_by_hash_entry_internal_preserves_blob_bytes() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(td.path()).expect("open rocksdb");
let bytes = b"blob-persists-after-raw-delete".to_vec();
let hash = sha256_hex(&bytes);
let raw = RawByHashEntry::from_bytes(hash.clone(), bytes.clone());
store
.put_blob_bytes_batch(&[(hash.clone(), bytes.clone())])
.expect("put blob bytes");
store.put_raw_by_hash_entry(&raw).expect("put raw entry");
store
.delete_raw_by_hash_entry(&hash)
.expect("delete raw entry only");
assert!(store.get_raw_by_hash_entry(&hash).unwrap().is_none());
assert_eq!(store.get_blob_bytes(&hash).unwrap(), Some(bytes));
}
#[test]
fn delete_raw_by_hash_entry_rejects_invalid_hash() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(td.path()).expect("open rocksdb");
let err = store
.delete_raw_by_hash_entry("not-a-valid-hash")
.expect_err("invalid hash must fail");
assert!(matches!(err, StorageError::InvalidData { .. }));
}
#[test]
fn delete_raw_by_hash_entry_routes_to_external_raw_store() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open_with_external_raw_store(
&td.path().join("main-db"),
&td.path().join("raw-store.db"),
)
.expect("open store");
let raw = sample_raw_by_hash_entry(b"external-delete".to_vec());
store.put_raw_by_hash_entry(&raw).expect("put raw entry");
store
.delete_raw_by_hash_entry(&raw.sha256_hex)
.expect("delete external raw entry");
assert!(store.get_raw_by_hash_entry(&raw.sha256_hex).unwrap().is_none());
assert!(store.get_blob_bytes(&raw.sha256_hex).unwrap().is_none());
}
#[test] #[test]
fn repository_view_and_raw_by_hash_validation_errors_are_reported() { fn repository_view_and_raw_by_hash_validation_errors_are_reported() {
let td = tempfile::tempdir().expect("tempdir"); let td = tempfile::tempdir().expect("tempdir");
@ -2558,6 +2876,71 @@ mod tests {
assert_eq!(got.bytes, bytes); assert_eq!(got.bytes, bytes);
} }
#[test]
fn load_current_object_with_hash_by_uri_uses_internal_blob_cf_without_raw_entry() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(td.path()).expect("open rocksdb");
let rsync_uri = "rsync://example.test/repo/blob-only.roa";
let bytes = b"blob-only-current-object".to_vec();
let hash = sha256_hex(&bytes);
store
.put_blob_bytes_batch(&[(hash.clone(), bytes.clone())])
.expect("put blob bytes");
store
.put_repository_view_entry(&RepositoryViewEntry {
rsync_uri: rsync_uri.to_string(),
current_hash: Some(hash.clone()),
repository_source: Some("https://rrdp.example.test/notification.xml".to_string()),
object_type: Some("roa".to_string()),
state: RepositoryViewState::Present,
})
.expect("put view");
let got = store
.load_current_object_with_hash_by_uri(rsync_uri)
.expect("load current object")
.expect("current object exists");
assert_eq!(got.current_hash_hex, hash);
assert_eq!(got.current_hash, compute_sha256_32(&bytes));
assert_eq!(got.bytes, bytes);
assert!(
store
.get_raw_by_hash_entry(&got.current_hash_hex)
.expect("get raw entry")
.is_none()
);
}
#[test]
fn load_current_object_bytes_by_uri_uses_internal_blob_cf_without_raw_entry() {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(td.path()).expect("open rocksdb");
let rsync_uri = "rsync://example.test/repo/blob-only-bytes.roa";
let bytes = b"blob-only-current-object-bytes".to_vec();
let hash = sha256_hex(&bytes);
store
.put_blob_bytes_batch(&[(hash, bytes.clone())])
.expect("put blob bytes");
store
.put_repository_view_entry(&RepositoryViewEntry {
rsync_uri: rsync_uri.to_string(),
current_hash: Some(sha256_hex(&bytes)),
repository_source: Some("https://rrdp.example.test/notification.xml".to_string()),
object_type: Some("roa".to_string()),
state: RepositoryViewState::Present,
})
.expect("put view");
assert_eq!(
store
.load_current_object_bytes_by_uri(rsync_uri)
.expect("load current object bytes"),
Some(bytes)
);
}
#[test] #[test]
fn pack_file_can_lazy_load_bytes_from_external_raw_store() { fn pack_file_can_lazy_load_bytes_from_external_raw_store() {
let td = tempfile::tempdir().expect("tempdir"); let td = tempfile::tempdir().expect("tempdir");
@ -2567,7 +2950,10 @@ mod tests {
let bytes = b"lazy-pack-file".to_vec(); let bytes = b"lazy-pack-file".to_vec();
let sha256_hex = sha256_hex(&bytes); let sha256_hex = sha256_hex(&bytes);
raw_store raw_store
.put_raw_entry(&RawByHashEntry::from_bytes(sha256_hex.clone(), bytes.clone())) .put_raw_entry(&RawByHashEntry::from_bytes(
sha256_hex.clone(),
bytes.clone(),
))
.expect("put raw entry"); .expect("put raw entry");
let file = PackFile::from_lazy_external_raw_store( let file = PackFile::from_lazy_external_raw_store(

View File

@ -7,14 +7,14 @@ use crate::policy::{Policy, SyncPreference};
use crate::replay::archive::{ReplayArchiveIndex, ReplayTransport}; use crate::replay::archive::{ReplayArchiveIndex, ReplayTransport};
use crate::replay::delta_archive::{ReplayDeltaArchiveIndex, ReplayDeltaRrdpKind}; use crate::replay::delta_archive::{ReplayDeltaArchiveIndex, ReplayDeltaRrdpKind};
use crate::report::{RfcRef, Warning}; use crate::report::{RfcRef, Warning};
use crate::storage::{RawByHashEntry, RocksStore}; use crate::storage::RocksStore;
use crate::sync::rrdp::sync_from_notification_with_timing_and_download_log; use crate::sync::rrdp::sync_from_notification_with_timing_and_download_log;
use crate::sync::rrdp::{Fetcher as HttpFetcher, RrdpSyncError, load_rrdp_local_state}; use crate::sync::rrdp::{Fetcher as HttpFetcher, RrdpSyncError, load_rrdp_local_state};
use crate::sync::store_projection::{ use crate::sync::store_projection::{
build_repository_view_present_entry, build_repository_view_withdrawn_entry, compute_sha256_hex, build_repository_view_present_entry, build_repository_view_withdrawn_entry,
infer_object_type_from_uri, prepare_repo_bytes_batch,
}; };
use std::collections::{BTreeMap, HashSet}; use std::collections::HashSet;
#[cfg(test)] #[cfg(test)]
use crate::storage::RrdpSourceSyncState; use crate::storage::RrdpSourceSyncState;
@ -591,27 +591,11 @@ fn rsync_sync_into_current_store(
let mut dl_span = let mut dl_span =
download_log.map(|dl| dl.span_download(AuditDownloadKind::Rsync, rsync_base_uri)); download_log.map(|dl| dl.span_download(AuditDownloadKind::Rsync, rsync_base_uri));
let mut new_set: HashSet<String> = HashSet::new(); let mut new_set: HashSet<String> = HashSet::new();
let mut uri_to_hash: BTreeMap<String, String> = BTreeMap::new(); let mut fetched_objects: Vec<(String, Vec<u8>)> = Vec::new();
let mut pending_raw: BTreeMap<String, RawByHashEntry> = BTreeMap::new();
let (object_count, bytes_total) = let (object_count, bytes_total) =
match rsync_fetcher.visit_objects(rsync_base_uri, &mut |uri, bytes| { match rsync_fetcher.visit_objects(rsync_base_uri, &mut |uri, bytes| {
let sha256_hex = compute_sha256_hex(&bytes);
new_set.insert(uri.clone()); new_set.insert(uri.clone());
uri_to_hash.insert(uri.clone(), sha256_hex.clone()); fetched_objects.push((uri, bytes));
let entry = pending_raw
.entry(sha256_hex.clone())
.or_insert_with(|| RawByHashEntry::from_bytes(sha256_hex.clone(), bytes.clone()));
if entry.bytes != bytes {
return Err(format!(
"raw_by_hash collision for {uri}: same sha256 maps to different bytes"
));
}
if !entry.origin_uris.iter().any(|existing| existing == &uri) {
entry.origin_uris.push(uri.clone());
}
if entry.object_type.is_none() {
entry.object_type = infer_object_type_from_uri(&uri);
}
Ok(()) Ok(())
}) { }) {
Ok(v) => { Ok(v) => {
@ -652,44 +636,8 @@ fn rsync_sync_into_current_store(
let _proj = timing let _proj = timing
.as_ref() .as_ref()
.map(|t| t.span_phase("rsync_write_current_store_total")); .map(|t| t.span_phase("rsync_write_current_store_total"));
let hashes: Vec<String> = pending_raw.keys().cloned().collect(); let prepared_bytes =
let existing_entries = store prepare_repo_bytes_batch(&fetched_objects).map_err(RepoSyncError::Storage)?;
.get_raw_by_hash_entries_batch(&hashes)
.map_err(|e| RepoSyncError::Storage(e.to_string()))?;
let mut entries_to_write = Vec::new();
for (hash, existing_opt) in hashes.into_iter().zip(existing_entries.into_iter()) {
let mut pending_entry = pending_raw.remove(&hash).ok_or_else(|| {
RepoSyncError::Storage(format!("missing pending raw entry for {hash}"))
})?;
match existing_opt {
Some(mut existing) => {
if existing.bytes != pending_entry.bytes {
return Err(RepoSyncError::Storage(format!(
"raw_by_hash collision for hash {hash}: same sha256 maps to different bytes"
)));
}
let mut changed = false;
for uri in pending_entry.origin_uris.drain(..) {
if !existing
.origin_uris
.iter()
.any(|existing_uri| existing_uri == &uri)
{
existing.origin_uris.push(uri);
changed = true;
}
}
if existing.object_type.is_none() && pending_entry.object_type.is_some() {
existing.object_type = pending_entry.object_type;
changed = true;
}
if changed {
entries_to_write.push(existing);
}
}
None => entries_to_write.push(pending_entry),
}
}
let mut repository_view_entries = Vec::new(); let mut repository_view_entries = Vec::new();
for entry in existing_view { for entry in existing_view {
if !new_set.contains(&entry.rsync_uri) { if !new_set.contains(&entry.rsync_uri) {
@ -702,9 +650,13 @@ fn rsync_sync_into_current_store(
} }
for uri in &new_set { for uri in &new_set {
let current_hash = uri_to_hash.get(uri).cloned().ok_or_else(|| { let current_hash = prepared_bytes
RepoSyncError::Storage(format!("missing raw_by_hash mapping for {uri}")) .uri_to_hash
})?; .get(uri)
.cloned()
.ok_or_else(|| {
RepoSyncError::Storage(format!("missing raw_by_hash mapping for {uri}"))
})?;
repository_view_entries.push(build_repository_view_present_entry( repository_view_entries.push(build_repository_view_present_entry(
&sync_scope_uri, &sync_scope_uri,
uri, uri,
@ -713,7 +665,7 @@ fn rsync_sync_into_current_store(
} }
store store
.put_raw_by_hash_entries_batch_unchecked(&entries_to_write) .put_blob_bytes_batch(&prepared_bytes.blobs_to_write)
.map_err(|e| RepoSyncError::Storage(e.to_string()))?; .map_err(|e| RepoSyncError::Storage(e.to_string()))?;
store store
.put_projection_batch(&repository_view_entries, &[], &[]) .put_projection_batch(&repository_view_entries, &[], &[])
@ -785,7 +737,7 @@ mod tests {
use crate::storage::RepositoryViewState; use crate::storage::RepositoryViewState;
use crate::sync::rrdp::Fetcher as HttpFetcher; use crate::sync::rrdp::Fetcher as HttpFetcher;
use crate::sync::rrdp::RrdpState; use crate::sync::rrdp::RrdpState;
use crate::sync::store_projection::build_repository_view_present_entry; use crate::sync::store_projection::{build_repository_view_present_entry, compute_sha256_hex};
use base64::Engine; use base64::Engine;
use sha2::Digest; use sha2::Digest;
use std::collections::HashMap; use std::collections::HashMap;
@ -1274,14 +1226,16 @@ mod tests {
Some("rsync://example.test/repo/") Some("rsync://example.test/repo/")
); );
let raw = store let current_bytes = store
.get_raw_by_hash_entry(hex::encode(sha2::Sha256::digest(b"roa")).as_str()) .load_current_object_bytes_by_uri("rsync://example.test/repo/sub/b.roa")
.expect("get raw_by_hash") .expect("load current bytes")
.expect("raw_by_hash entry present"); .expect("current object bytes exist");
assert_eq!(current_bytes, b"roa".to_vec());
assert!( assert!(
raw.origin_uris store
.iter() .get_raw_by_hash_entry(hex::encode(sha2::Sha256::digest(b"roa")).as_str())
.any(|uri| uri == "rsync://example.test/repo/sub/b.roa") .expect("get raw_by_hash")
.is_none()
); );
let timing_path = temp.path().join("timing.json"); let timing_path = temp.path().join("timing.json");

View File

@ -7,10 +7,10 @@ use crate::sync::store_projection::{
build_repository_view_present_entry, build_repository_view_withdrawn_entry, build_repository_view_present_entry, build_repository_view_withdrawn_entry,
build_rrdp_source_member_present_record, build_rrdp_source_member_withdrawn_record, build_rrdp_source_member_present_record, build_rrdp_source_member_withdrawn_record,
build_rrdp_uri_owner_active_record, build_rrdp_uri_owner_withdrawn_record, compute_sha256_hex, build_rrdp_uri_owner_active_record, build_rrdp_uri_owner_withdrawn_record, compute_sha256_hex,
current_rrdp_owner_is, ensure_rrdp_uri_can_be_owned_by, prepare_raw_by_hash_evidence_batch, current_rrdp_owner_is, ensure_rrdp_uri_can_be_owned_by, prepare_repo_bytes_batch,
put_repository_view_present, put_repository_view_withdrawn, put_rrdp_source_member_present, put_repository_view_present, put_repository_view_withdrawn, put_rrdp_source_member_present,
put_rrdp_source_member_withdrawn, put_rrdp_uri_owner_active, put_rrdp_uri_owner_withdrawn, put_rrdp_source_member_withdrawn, put_rrdp_uri_owner_active, put_rrdp_uri_owner_withdrawn,
update_rrdp_source_record_on_success, upsert_raw_by_hash_evidence, update_rrdp_source_record_on_success, upsert_repo_blob_bytes,
}; };
use base64::Engine; use base64::Engine;
use quick_xml::Reader; use quick_xml::Reader;
@ -1097,8 +1097,8 @@ fn apply_delta(
for effect in projection { for effect in projection {
match effect { match effect {
DeltaProjectionEffect::Upsert { rsync_uri, bytes } => { DeltaProjectionEffect::Upsert { rsync_uri, bytes } => {
let current_hash = upsert_raw_by_hash_evidence(store, &rsync_uri, &bytes) let current_hash =
.map_err(RrdpSyncError::Storage)?; upsert_repo_blob_bytes(store, &bytes).map_err(RrdpSyncError::Storage)?;
put_repository_view_present(store, notification_uri, &rsync_uri, &current_hash) put_repository_view_present(store, notification_uri, &rsync_uri, &current_hash)
.map_err(RrdpSyncError::Storage)?; .map_err(RrdpSyncError::Storage)?;
if let Some(index) = current_repo_index { if let Some(index) = current_repo_index {
@ -1493,16 +1493,19 @@ fn flush_snapshot_publish_batch(
serial: u64, serial: u64,
published: &[(String, Vec<u8>)], published: &[(String, Vec<u8>)],
) -> Result<(), RrdpSyncError> { ) -> Result<(), RrdpSyncError> {
let prepared_raw = let prepared_bytes = prepare_repo_bytes_batch(published).map_err(RrdpSyncError::Storage)?;
prepare_raw_by_hash_evidence_batch(store, published).map_err(RrdpSyncError::Storage)?;
let mut repository_view_entries = Vec::with_capacity(published.len()); let mut repository_view_entries = Vec::with_capacity(published.len());
let mut member_records = Vec::with_capacity(published.len()); let mut member_records = Vec::with_capacity(published.len());
let mut owner_records = Vec::with_capacity(published.len()); let mut owner_records = Vec::with_capacity(published.len());
for (uri, _bytes) in published { for (uri, _bytes) in published {
let current_hash = prepared_raw.uri_to_hash.get(uri).cloned().ok_or_else(|| { let current_hash = prepared_bytes
RrdpSyncError::Storage(format!("missing raw_by_hash mapping for {uri}")) .uri_to_hash
})?; .get(uri)
.cloned()
.ok_or_else(|| {
RrdpSyncError::Storage(format!("missing raw_by_hash mapping for {uri}"))
})?;
repository_view_entries.push(build_repository_view_present_entry( repository_view_entries.push(build_repository_view_present_entry(
notification_uri, notification_uri,
uri, uri,
@ -1525,7 +1528,7 @@ fn flush_snapshot_publish_batch(
} }
store store
.put_raw_by_hash_entries_batch_unchecked(&prepared_raw.entries_to_write) .put_blob_bytes_batch(&prepared_bytes.blobs_to_write)
.map_err(|e| RrdpSyncError::Storage(e.to_string()))?; .map_err(|e| RrdpSyncError::Storage(e.to_string()))?;
store store
.put_projection_batch(&repository_view_entries, &member_records, &owner_records) .put_projection_batch(&repository_view_entries, &member_records, &owner_records)
@ -2445,14 +2448,16 @@ mod tests {
assert_eq!(view.state, crate::storage::RepositoryViewState::Present); assert_eq!(view.state, crate::storage::RepositoryViewState::Present);
assert_eq!(view.repository_source.as_deref(), Some(notif_uri)); assert_eq!(view.repository_source.as_deref(), Some(notif_uri));
let raw = store let current_bytes = store
.get_raw_by_hash_entry(hex::encode(sha2::Sha256::digest(b"mft-bytes")).as_str()) .load_current_object_bytes_by_uri("rsync://example.net/repo/a.mft")
.expect("get raw_by_hash") .expect("load current bytes")
.expect("raw_by_hash exists"); .expect("current object bytes exist");
assert_eq!(current_bytes, b"mft-bytes".to_vec());
assert!( assert!(
raw.origin_uris store
.iter() .get_raw_by_hash_entry(hex::encode(sha2::Sha256::digest(b"mft-bytes")).as_str())
.any(|uri| uri == "rsync://example.net/repo/a.mft") .expect("get raw_by_hash")
.is_none()
); );
let member = store let member = store

View File

@ -7,11 +7,50 @@ use crate::storage::{
}; };
use sha2::Digest; use sha2::Digest;
#[allow(dead_code)]
pub struct PreparedRawByHashBatch { pub struct PreparedRawByHashBatch {
pub uri_to_hash: BTreeMap<String, String>, pub uri_to_hash: BTreeMap<String, String>,
pub entries_to_write: Vec<RawByHashEntry>, pub entries_to_write: Vec<RawByHashEntry>,
} }
pub struct PreparedRepoBytesBatch {
pub uri_to_hash: BTreeMap<String, String>,
pub blobs_to_write: Vec<(String, Vec<u8>)>,
}
pub fn prepare_repo_bytes_batch(
objects: &[(String, Vec<u8>)],
) -> Result<PreparedRepoBytesBatch, String> {
let mut uri_to_hash: BTreeMap<String, String> = BTreeMap::new();
let mut pending: BTreeMap<String, Vec<u8>> = BTreeMap::new();
for (uri, bytes) in objects {
if bytes.is_empty() {
return Err(format!("repo bytes for {uri} must not be empty"));
}
let sha256_hex = compute_sha256_hex(bytes);
uri_to_hash.insert(uri.clone(), sha256_hex.clone());
match pending.entry(sha256_hex) {
std::collections::btree_map::Entry::Vacant(slot) => {
slot.insert(bytes.clone());
}
std::collections::btree_map::Entry::Occupied(existing) => {
if existing.get() != bytes {
return Err(format!(
"repo bytes collision for {uri}: same sha256 maps to different bytes"
));
}
}
}
}
Ok(PreparedRepoBytesBatch {
uri_to_hash,
blobs_to_write: pending.into_iter().collect(),
})
}
#[allow(dead_code)]
pub fn prepare_raw_by_hash_evidence_batch( pub fn prepare_raw_by_hash_evidence_batch(
store: &RocksStore, store: &RocksStore,
objects: &[(String, Vec<u8>)], objects: &[(String, Vec<u8>)],
@ -83,6 +122,17 @@ pub fn prepare_raw_by_hash_evidence_batch(
}) })
} }
pub fn upsert_repo_blob_bytes(store: &RocksStore, bytes: &[u8]) -> Result<String, String> {
if bytes.is_empty() {
return Err("repo bytes must not be empty".to_string());
}
let sha256_hex = compute_sha256_hex(bytes);
store
.put_blob_bytes_batch(&[(sha256_hex.clone(), bytes.to_vec())])
.map_err(|e| e.to_string())?;
Ok(sha256_hex)
}
pub fn infer_object_type_from_uri(uri: &str) -> Option<String> { pub fn infer_object_type_from_uri(uri: &str) -> Option<String> {
let ext = uri.rsplit('.').next()?; let ext = uri.rsplit('.').next()?;
let ext = ext.to_ascii_lowercase(); let ext = ext.to_ascii_lowercase();
@ -194,6 +244,7 @@ pub fn build_rrdp_uri_owner_withdrawn_record(
} }
} }
#[allow(dead_code)]
pub fn upsert_raw_by_hash_evidence( pub fn upsert_raw_by_hash_evidence(
store: &RocksStore, store: &RocksStore,
rsync_uri: &str, rsync_uri: &str,
@ -401,3 +452,36 @@ pub fn compute_sha256_hex(bytes: &[u8]) -> String {
pub fn now_pack_time() -> PackTime { pub fn now_pack_time() -> PackTime {
PackTime::from_utc_offset_datetime(time::OffsetDateTime::now_utc()) PackTime::from_utc_offset_datetime(time::OffsetDateTime::now_utc())
} }
#[cfg(test)]
mod tests {
use super::prepare_repo_bytes_batch;
use std::collections::BTreeSet;
#[test]
fn prepare_repo_bytes_batch_deduplicates_by_hash() {
let objects = vec![
(
"rsync://example.test/repo/a.roa".to_string(),
b"same".to_vec(),
),
(
"rsync://example.test/repo/b.roa".to_string(),
b"same".to_vec(),
),
(
"rsync://example.test/repo/c.roa".to_string(),
b"other".to_vec(),
),
];
let prepared = prepare_repo_bytes_batch(&objects).expect("prepare repo bytes");
assert_eq!(prepared.uri_to_hash.len(), 3);
assert_eq!(prepared.blobs_to_write.len(), 2);
let unique_hashes = prepared
.blobs_to_write
.iter()
.map(|(hash, _)| hash.clone())
.collect::<BTreeSet<_>>();
assert_eq!(unique_hashes.len(), 2);
}
}

View File

@ -3,6 +3,8 @@ use crate::data_model::crl::{CrlDecodeError, CrlVerifyError, RpkixCrl};
use crate::data_model::rc::{ use crate::data_model::rc::{
ResourceCertKind, ResourceCertificate, ResourceCertificateDecodeError, ResourceCertKind, ResourceCertificate, ResourceCertificateDecodeError,
}; };
use crate::data_model::signed_object::{EeKeyUsageSummary, ResourceEeCertificate};
use ring::signature;
use x509_parser::prelude::{FromDer, X509Certificate}; use x509_parser::prelude::{FromDer, X509Certificate};
use crate::validation::x509_name::x509_names_equivalent; use crate::validation::x509_name::x509_names_equivalent;
@ -63,6 +65,9 @@ pub enum CertPathError {
#[error("EE KeyUsage must have only digitalSignature set (RFC 6487 §4.8.4)")] #[error("EE KeyUsage must have only digitalSignature set (RFC 6487 §4.8.4)")]
KeyUsageInvalidBits, KeyUsageInvalidBits,
#[error("EE KeyUsage parse failed: {0} (RFC 6487 §4.8.4; RFC 5280 §4.2.1.3)")]
KeyUsageParse(String),
#[error("issuer CA subjectKeyIdentifier missing (RFC 6487 §4.8.2)")] #[error("issuer CA subjectKeyIdentifier missing (RFC 6487 §4.8.2)")]
IssuerSkiMissing, IssuerSkiMissing,
@ -235,6 +240,34 @@ pub fn validate_ee_cert_path_with_predecoded_ee(
) )
} }
/// Validate a signed-object embedded EE certificate path using signed-object cached fields.
///
/// This avoids reparsing the EE certificate as `X509Certificate` and bypasses the
/// x509-parser signature wrapper by verifying the cached TBS/signature bytes directly.
pub fn validate_signed_object_ee_cert_path_fast(
ee: &ResourceEeCertificate,
issuer_ca: &ResourceCertificate,
issuer_spki: &SubjectPublicKeyInfo<'_>,
issuer_crl: &RpkixCrl,
issuer_crl_revoked_serials: &HashSet<Vec<u8>>,
issuer_ca_rsync_uri: Option<&str>,
issuer_crl_rsync_uri: Option<&str>,
validation_time: time::OffsetDateTime,
) -> Result<(), CertPathError> {
validate_ee_cert_path_bindings_and_status(
&ee.resource_cert,
issuer_ca,
issuer_crl,
issuer_crl_revoked_serials,
issuer_ca_rsync_uri,
issuer_crl_rsync_uri,
validation_time,
)?;
validate_ee_key_usage_summary(&ee.key_usage_summary)?;
verify_ee_cert_signature_fast(&ee.tbs_certificate_der, &ee.signature_bytes, issuer_spki)?;
Ok(())
}
fn validate_ee_cert_path_components( fn validate_ee_cert_path_components(
ee: &ResourceCertificate, ee: &ResourceCertificate,
ee_cert_der: &[u8], ee_cert_der: &[u8],
@ -245,6 +278,32 @@ fn validate_ee_cert_path_components(
issuer_ca_rsync_uri: Option<&str>, issuer_ca_rsync_uri: Option<&str>,
issuer_crl_rsync_uri: Option<&str>, issuer_crl_rsync_uri: Option<&str>,
validation_time: time::OffsetDateTime, validation_time: time::OffsetDateTime,
) -> Result<(), CertPathError> {
validate_ee_cert_path_bindings_and_status(
ee,
issuer_ca,
issuer_crl,
issuer_crl_revoked_serials,
issuer_ca_rsync_uri,
issuer_crl_rsync_uri,
validation_time,
)?;
let ee_x509 = parse_x509_cert(ee_cert_der)?;
verify_ee_cert_signature(&ee_x509, issuer_spki)?;
validate_ee_key_usage(&ee_x509)?;
Ok(())
}
fn validate_ee_cert_path_bindings_and_status(
ee: &ResourceCertificate,
issuer_ca: &ResourceCertificate,
issuer_crl: &RpkixCrl,
issuer_crl_revoked_serials: &HashSet<Vec<u8>>,
issuer_ca_rsync_uri: Option<&str>,
issuer_crl_rsync_uri: Option<&str>,
validation_time: time::OffsetDateTime,
) -> Result<(), CertPathError> { ) -> Result<(), CertPathError> {
if ee.kind != ResourceCertKind::Ee { if ee.kind != ResourceCertKind::Ee {
return Err(CertPathError::EeNotEe); return Err(CertPathError::EeNotEe);
@ -280,10 +339,6 @@ fn validate_ee_cert_path_components(
return Err(CertPathError::CertificateNotValidAtTime); return Err(CertPathError::CertificateNotValidAtTime);
} }
let ee_x509 = parse_x509_cert(ee_cert_der)?;
verify_ee_cert_signature(&ee_x509, issuer_spki)?;
validate_ee_key_usage(&ee_x509)?;
if !crl_valid_at_time(issuer_crl, validation_time) { if !crl_valid_at_time(issuer_crl, validation_time) {
return Err(CertPathError::CrlNotValidAtTime); return Err(CertPathError::CrlNotValidAtTime);
} }
@ -324,6 +379,19 @@ fn verify_ee_cert_signature(
.map_err(|e| CertPathError::EeSignatureInvalid(e.to_string())) .map_err(|e| CertPathError::EeSignatureInvalid(e.to_string()))
} }
fn verify_ee_cert_signature_fast(
ee_tbs_der: &[u8],
ee_signature_bytes: &[u8],
issuer_spki: &SubjectPublicKeyInfo<'_>,
) -> Result<(), CertPathError> {
let key = signature::UnparsedPublicKey::new(
&signature::RSA_PKCS1_2048_8192_SHA256,
&issuer_spki.subject_public_key.data,
);
key.verify(ee_tbs_der, ee_signature_bytes)
.map_err(|e| CertPathError::EeSignatureInvalid(e.to_string()))
}
fn validate_ee_aki_matches_issuer_ski( fn validate_ee_aki_matches_issuer_ski(
ee: &ResourceCertificate, ee: &ResourceCertificate,
issuer_ca: &ResourceCertificate, issuer_ca: &ResourceCertificate,
@ -408,6 +476,16 @@ fn validate_ee_key_usage(cert: &X509Certificate<'_>) -> Result<(), CertPathError
Ok(()) Ok(())
} }
fn validate_ee_key_usage_summary(summary: &EeKeyUsageSummary) -> Result<(), CertPathError> {
match summary {
EeKeyUsageSummary::DigitalSignatureOnly => Ok(()),
EeKeyUsageSummary::Missing => Err(CertPathError::KeyUsageMissing),
EeKeyUsageSummary::NotCritical => Err(CertPathError::KeyUsageNotCritical),
EeKeyUsageSummary::InvalidBits => Err(CertPathError::KeyUsageInvalidBits),
EeKeyUsageSummary::ParseError(err) => Err(CertPathError::KeyUsageParse(err.clone())),
}
}
fn time_within_validity( fn time_within_validity(
t: time::OffsetDateTime, t: time::OffsetDateTime,
not_before: time::OffsetDateTime, not_before: time::OffsetDateTime,

View File

@ -1,15 +1,15 @@
use crate::blob_store::RawObjectStore;
use crate::current_repo_index::CurrentRepoIndexHandle; use crate::current_repo_index::CurrentRepoIndexHandle;
use crate::data_model::manifest::{ManifestDecodeError, ManifestObject, ManifestValidateError}; use crate::data_model::manifest::{ManifestDecodeError, ManifestObject, ManifestValidateError};
use crate::data_model::signed_object::SignedObjectVerifyError; use crate::data_model::signed_object::SignedObjectVerifyError;
use crate::policy::{CaFailedFetchPolicy, Policy}; use crate::policy::{CaFailedFetchPolicy, Policy};
use crate::report::{RfcRef, Warning}; use crate::report::{RfcRef, Warning};
use crate::storage::{PackFile, PackTime, RocksStore, StorageError, VcirArtifactRole}; use crate::storage::{PackFile, PackTime, RocksStore, StorageError, VcirArtifactRole};
use crate::validation::cert_path::{CertPathError, validate_ee_cert_path}; use crate::validation::cert_path::{CertPathError, validate_signed_object_ee_cert_path_fast};
use crate::validation::publication_point::PublicationPointSnapshot; use crate::validation::publication_point::PublicationPointSnapshot;
use sha2::Digest; use sha2::Digest;
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::HashSet; use std::collections::HashSet;
use x509_parser::prelude::FromDer;
#[derive(Clone, Copy, Debug, PartialEq, Eq)] #[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PublicationPointSource { pub enum PublicationPointSource {
@ -737,7 +737,10 @@ pub(crate) fn try_build_fresh_publication_point_with_timing(
timing.manifest_entries_ms = manifest_entries_started.elapsed().as_millis() as u64; timing.manifest_entries_ms = manifest_entries_started.elapsed().as_millis() as u64;
let mut files = Vec::with_capacity(manifest.manifest.file_count()); let mut files = Vec::with_capacity(manifest.manifest.file_count());
let pack_files_started = std::time::Instant::now(); let pack_files_started = std::time::Instant::now();
let external_raw_store = store.external_raw_store_ref().cloned().map(std::sync::Arc::new); let external_raw_store = store
.external_raw_store_ref()
.cloned()
.map(std::sync::Arc::new);
for entry in &entries { for entry in &entries {
let rsync_uri = let rsync_uri =
join_rsync_dir_and_file(publication_point_rsync_uri, entry.file_name.as_str()); join_rsync_dir_and_file(publication_point_rsync_uri, entry.file_name.as_str());
@ -768,7 +771,9 @@ pub(crate) fn try_build_fresh_publication_point_with_timing(
return Err(ManifestFreshError::HashMismatch { rsync_uri }); return Err(ManifestFreshError::HashMismatch { rsync_uri });
} }
if let (Some(_), Some(raw_store)) = (current_index_guard.as_ref(), external_raw_store.as_ref()) { if let (Some(_), Some(raw_store)) =
(current_index_guard.as_ref(), external_raw_store.as_ref())
{
files.push(PackFile::from_lazy_external_raw_store( files.push(PackFile::from_lazy_external_raw_store(
rsync_uri, rsync_uri,
current_object.current_hash_hex, current_object.current_hash_hex,
@ -855,7 +860,6 @@ fn validate_manifest_embedded_ee_cert_path(
validation_time: time::OffsetDateTime, validation_time: time::OffsetDateTime,
) -> Result<(), ManifestFreshError> { ) -> Result<(), ManifestFreshError> {
let ee = &manifest.signed_object.signed_data.certificates[0]; let ee = &manifest.signed_object.signed_data.certificates[0];
let ee_der = ee.raw_der.as_slice();
let crl_files = files let crl_files = files
.iter() .iter()
@ -878,13 +882,31 @@ fn validate_manifest_embedded_ee_cert_path(
for u in crldp_uris { for u in crldp_uris {
let s = u.as_str(); let s = u.as_str();
if let Some(f) = crl_files.iter().find(|f| f.rsync_uri == s) { if let Some(f) = crl_files.iter().find(|f| f.rsync_uri == s) {
let crl_bytes = f let crl_bytes = f.bytes().map_err(|e| ManifestFreshError::MissingFile {
.bytes() rsync_uri: format!("{s} ({e})"),
.map_err(|e| ManifestFreshError::MissingFile { rsync_uri: format!("{s} ({e})") })?; })?;
let _validated = validate_ee_cert_path( let issuer_ca = crate::data_model::rc::ResourceCertificate::decode_der(issuer_ca_der)
ee_der, .map_err(CertPathError::IssuerDecode)?;
issuer_ca_der, let (rem, issuer_spki) = x509_parser::x509::SubjectPublicKeyInfo::from_der(
crl_bytes, &issuer_ca.tbs.subject_public_key_info,
)
.map_err(|e| CertPathError::IssuerSpkiParse(e.to_string()))?;
if !rem.is_empty() {
return Err(CertPathError::IssuerSpkiTrailingBytes(rem.len()).into());
}
let issuer_crl = crate::data_model::crl::RpkixCrl::decode_der(crl_bytes)
.map_err(CertPathError::from)?;
let revoked_serials = issuer_crl
.revoked_certs
.iter()
.map(|rc| rc.serial_number.bytes_be.clone())
.collect::<std::collections::HashSet<_>>();
validate_signed_object_ee_cert_path_fast(
ee,
&issuer_ca,
&issuer_spki,
&issuer_crl,
&revoked_serials,
issuer_ca_rsync_uri, issuer_ca_rsync_uri,
Some(f.rsync_uri.as_str()), Some(f.rsync_uri.as_str()),
validation_time, validation_time,
@ -1418,7 +1440,10 @@ mod tests {
assert_eq!(point.manifest_bytes, manifest_bytes); assert_eq!(point.manifest_bytes, manifest_bytes);
assert_eq!(point.files.len(), 1); assert_eq!(point.files.len(), 1);
assert_eq!(point.files[0].rsync_uri, locked_uri); assert_eq!(point.files[0].rsync_uri, locked_uri);
assert_eq!(point.files[0].bytes_cloned().expect("locked bytes"), locked_bytes); assert_eq!(
point.files[0].bytes_cloned().expect("locked bytes"),
locked_bytes
);
} }
#[test] #[test]

View File

@ -11,7 +11,7 @@ use crate::data_model::signed_object::SignedObjectVerifyError;
use crate::policy::{Policy, SignedObjectFailurePolicy}; use crate::policy::{Policy, SignedObjectFailurePolicy};
use crate::report::{RfcRef, Warning}; use crate::report::{RfcRef, Warning};
use crate::storage::{PackFile, PackTime, VcirLocalOutput, VcirOutputType}; use crate::storage::{PackFile, PackTime, VcirLocalOutput, VcirOutputType};
use crate::validation::cert_path::{CertPathError, validate_ee_cert_path_with_predecoded_ee}; use crate::validation::cert_path::{CertPathError, validate_signed_object_ee_cert_path_fast};
use crate::validation::manifest::PublicationPointData; use crate::validation::manifest::PublicationPointData;
use crate::validation::publication_point::PublicationPointSnapshot; use crate::validation::publication_point::PublicationPointSnapshot;
use x509_parser::prelude::FromDer; use x509_parser::prelude::FromDer;
@ -221,10 +221,7 @@ pub fn process_publication_point_for_issuer<P: PublicationPointData>(
let bytes = f let bytes = f
.bytes_cloned() .bytes_cloned()
.expect("snapshot CRL bytes must be loadable"); .expect("snapshot CRL bytes must be loadable");
( (f.rsync_uri.clone(), CachedIssuerCrl::Pending(bytes))
f.rsync_uri.clone(),
CachedIssuerCrl::Pending(bytes),
)
}) })
.collect(); .collect();
@ -605,18 +602,21 @@ fn process_roa_with_issuer(
roa.signed_object.verify()?; roa.signed_object.verify()?;
drop(_verify); drop(_verify);
let ee = &roa.signed_object.signed_data.certificates[0].resource_cert; let ee = &roa.signed_object.signed_data.certificates[0];
let ee_der = &roa.signed_object.signed_data.certificates[0].raw_der; let ee_crldp_uris = ee
let ee_crldp_uris = ee.tbs.extensions.crl_distribution_points_uris.as_ref(); .resource_cert
.tbs
.extensions
.crl_distribution_points_uris
.as_ref();
let issuer_crl_rsync_uri = choose_crl_uri_for_certificate(ee_crldp_uris, crl_cache)?; let issuer_crl_rsync_uri = choose_crl_uri_for_certificate(ee_crldp_uris, crl_cache)?;
let verified_crl = ensure_issuer_crl_verified(issuer_crl_rsync_uri, crl_cache, issuer_ca_der)?; let verified_crl = ensure_issuer_crl_verified(issuer_crl_rsync_uri, crl_cache, issuer_ca_der)?;
let _cert_path = timing let _cert_path = timing
.as_ref() .as_ref()
.map(|t| t.span_phase("objects_roa_validate_ee_cert_path_total")); .map(|t| t.span_phase("objects_roa_validate_ee_cert_path_total"));
validate_ee_cert_path_with_predecoded_ee( validate_signed_object_ee_cert_path_fast(
ee, ee,
ee_der,
issuer_ca, issuer_ca,
issuer_spki, issuer_spki,
&verified_crl.crl, &verified_crl.crl,
@ -631,7 +631,7 @@ fn process_roa_with_issuer(
.as_ref() .as_ref()
.map(|t| t.span_phase("objects_roa_validate_ee_resources_subset_total")); .map(|t| t.span_phase("objects_roa_validate_ee_resources_subset_total"));
validate_ee_resources_subset( validate_ee_resources_subset(
&ee, &ee.resource_cert,
issuer_effective_ip, issuer_effective_ip,
issuer_effective_as, issuer_effective_as,
issuer_resources_index, issuer_resources_index,
@ -641,7 +641,8 @@ fn process_roa_with_issuer(
let vrps = roa_to_vrps(&roa); let vrps = roa_to_vrps(&roa);
let source_object_hash = sha256_hex_from_32(&file.sha256); let source_object_hash = sha256_hex_from_32(&file.sha256);
let source_ee_cert_hash = crate::audit::sha256_hex(ee.raw_der.as_slice()); let source_ee_cert_hash = crate::audit::sha256_hex(ee.raw_der.as_slice());
let item_effective_until = PackTime::from_utc_offset_datetime(ee.tbs.validity_not_after); let item_effective_until =
PackTime::from_utc_offset_datetime(ee.resource_cert.tbs.validity_not_after);
let local_outputs = vrps let local_outputs = vrps
.iter() .iter()
.map(|vrp| { .map(|vrp| {
@ -713,18 +714,21 @@ fn process_aspa_with_issuer(
aspa.signed_object.verify()?; aspa.signed_object.verify()?;
drop(_verify); drop(_verify);
let ee = &aspa.signed_object.signed_data.certificates[0].resource_cert; let ee = &aspa.signed_object.signed_data.certificates[0];
let ee_der = &aspa.signed_object.signed_data.certificates[0].raw_der; let ee_crldp_uris = ee
let ee_crldp_uris = ee.tbs.extensions.crl_distribution_points_uris.as_ref(); .resource_cert
.tbs
.extensions
.crl_distribution_points_uris
.as_ref();
let issuer_crl_rsync_uri = choose_crl_uri_for_certificate(ee_crldp_uris, crl_cache)?; let issuer_crl_rsync_uri = choose_crl_uri_for_certificate(ee_crldp_uris, crl_cache)?;
let verified_crl = ensure_issuer_crl_verified(issuer_crl_rsync_uri, crl_cache, issuer_ca_der)?; let verified_crl = ensure_issuer_crl_verified(issuer_crl_rsync_uri, crl_cache, issuer_ca_der)?;
let _cert_path = timing let _cert_path = timing
.as_ref() .as_ref()
.map(|t| t.span_phase("objects_aspa_validate_ee_cert_path_total")); .map(|t| t.span_phase("objects_aspa_validate_ee_cert_path_total"));
validate_ee_cert_path_with_predecoded_ee( validate_signed_object_ee_cert_path_fast(
ee, ee,
ee_der,
issuer_ca, issuer_ca,
issuer_spki, issuer_spki,
&verified_crl.crl, &verified_crl.crl,
@ -739,7 +743,7 @@ fn process_aspa_with_issuer(
.as_ref() .as_ref()
.map(|t| t.span_phase("objects_aspa_validate_ee_resources_subset_total")); .map(|t| t.span_phase("objects_aspa_validate_ee_resources_subset_total"));
validate_ee_resources_subset( validate_ee_resources_subset(
&ee, &ee.resource_cert,
issuer_effective_ip, issuer_effective_ip,
issuer_effective_as, issuer_effective_as,
issuer_resources_index, issuer_resources_index,
@ -752,7 +756,8 @@ fn process_aspa_with_issuer(
}; };
let source_object_hash = sha256_hex_from_32(&file.sha256); let source_object_hash = sha256_hex_from_32(&file.sha256);
let source_ee_cert_hash = crate::audit::sha256_hex(ee.raw_der.as_slice()); let source_ee_cert_hash = crate::audit::sha256_hex(ee.raw_der.as_slice());
let item_effective_until = PackTime::from_utc_offset_datetime(ee.tbs.validity_not_after); let item_effective_until =
PackTime::from_utc_offset_datetime(ee.resource_cert.tbs.validity_not_after);
let providers = attestation let providers = attestation
.provider_as_ids .provider_as_ids
.iter() .iter()

View File

@ -823,10 +823,7 @@ fn discover_children_from_fresh_snapshot_with_audit<P: PublicationPointData>(
let bytes = f let bytes = f
.bytes_cloned() .bytes_cloned()
.map_err(|e| format!("snapshot CRL bytes load failed: {e}"))?; .map_err(|e| format!("snapshot CRL bytes load failed: {e}"))?;
Ok(( Ok((f.rsync_uri.clone(), CachedIssuerCrl::Pending(bytes)))
f.rsync_uri.clone(),
CachedIssuerCrl::Pending(bytes),
))
}) })
.collect::<Result<_, String>>()?; .collect::<Result<_, String>>()?;
@ -1480,7 +1477,11 @@ fn build_publication_point_audit_from_snapshot(
if !f.rsync_uri.ends_with(".crl") { if !f.rsync_uri.ends_with(".crl") {
continue; continue;
} }
let ok = f.bytes().ok().and_then(|bytes| RpkixCrl::decode_der(bytes).ok()).is_some(); let ok = f
.bytes()
.ok()
.and_then(|bytes| RpkixCrl::decode_der(bytes).ok())
.is_some();
audit_by_uri.insert( audit_by_uri.insert(
f.rsync_uri.clone(), f.rsync_uri.clone(),
ObjectAuditEntry { ObjectAuditEntry {
@ -2477,7 +2478,7 @@ fn select_manifest_current_crl_from_snapshot(
file.bytes() file.bytes()
.map_err(|e| format!("load current CRL bytes for VCIR failed: {e}"))?, .map_err(|e| format!("load current CRL bytes for VCIR failed: {e}"))?,
) )
.map_err(|e| format!("decode current CRL for VCIR failed: {e}"))?; .map_err(|e| format!("decode current CRL for VCIR failed: {e}"))?;
return Ok(CurrentCrlRef { file, crl }); return Ok(CurrentCrlRef { file, crl });
} }
} }
@ -2519,7 +2520,7 @@ fn build_vcir_local_outputs(
file.bytes() file.bytes()
.map_err(|e| format!("load accepted ROA bytes for VCIR failed: {e}"))?, .map_err(|e| format!("load accepted ROA bytes for VCIR failed: {e}"))?,
) )
.map_err(|e| format!("decode accepted ROA for VCIR failed: {e}"))?; .map_err(|e| format!("decode accepted ROA for VCIR failed: {e}"))?;
let ee = &roa.signed_object.signed_data.certificates[0]; let ee = &roa.signed_object.signed_data.certificates[0];
let source_ee_cert_hash = sha256_hex(ee.raw_der.as_slice()); let source_ee_cert_hash = sha256_hex(ee.raw_der.as_slice());
let item_effective_until = let item_effective_until =
@ -2561,7 +2562,7 @@ fn build_vcir_local_outputs(
file.bytes() file.bytes()
.map_err(|e| format!("load accepted ASPA bytes for VCIR failed: {e}"))?, .map_err(|e| format!("load accepted ASPA bytes for VCIR failed: {e}"))?,
) )
.map_err(|e| format!("decode accepted ASPA for VCIR failed: {e}"))?; .map_err(|e| format!("decode accepted ASPA for VCIR failed: {e}"))?;
let ee = &aspa.signed_object.signed_data.certificates[0]; let ee = &aspa.signed_object.signed_data.certificates[0];
let source_ee_cert_hash = sha256_hex(ee.raw_der.as_slice()); let source_ee_cert_hash = sha256_hex(ee.raw_der.as_slice());
let item_effective_until = let item_effective_until =
@ -3610,8 +3611,7 @@ authorityKeyIdentifier = keyid:always
publication_point_rsync_uri: pack.publication_point_rsync_uri.clone(), publication_point_rsync_uri: pack.publication_point_rsync_uri.clone(),
rrdp_notification_uri: None, rrdp_notification_uri: None,
}; };
persist_vcir_non_repository_evidence(&store, &ca) persist_vcir_non_repository_evidence(&store, &ca).expect("persist embedded evidence");
.expect("persist embedded evidence");
let issuer_hash = sha256_hex(&issuer_ca_der); let issuer_hash = sha256_hex(&issuer_ca_der);
let issuer_entry = store let issuer_entry = store
@ -3901,7 +3901,8 @@ authorityKeyIdentifier = keyid:always
assert!( assert!(
!artifacts !artifacts
.iter() .iter()
.any(|artifact| artifact.uri.is_none() && artifact.sha256 == sha256_hex(b"embedded-ee")), .any(|artifact| artifact.uri.is_none()
&& artifact.sha256 == sha256_hex(b"embedded-ee")),
"embedded EE cert artifacts should no longer be persisted separately" "embedded EE cert artifacts should no longer be persisted separately"
); );
} }

View File

@ -88,7 +88,11 @@ impl LiveStats {
if f.rsync_uri.ends_with(".crl") { if f.rsync_uri.ends_with(".crl") {
self.crl_total += 1; self.crl_total += 1;
if f.bytes().ok().and_then(|b| RpkixCrl::decode_der(b).ok()).is_some() { if f.bytes()
.ok()
.and_then(|b| RpkixCrl::decode_der(b).ok())
.is_some()
{
self.crl_decode_ok += 1; self.crl_decode_ok += 1;
} }
} }

View File

@ -2,6 +2,7 @@ use std::process::Command;
use rpki::validation::cert_path::{ use rpki::validation::cert_path::{
CertPathError, validate_ee_cert_path, validate_ee_cert_path_with_predecoded_ee, CertPathError, validate_ee_cert_path, validate_ee_cert_path_with_predecoded_ee,
validate_signed_object_ee_cert_path_fast,
}; };
fn openssl_available() -> bool { fn openssl_available() -> bool {
@ -367,6 +368,109 @@ fn validate_ee_cert_path_with_predecoded_ee_matches_prevalidated_path_rules() {
assert!(matches!(err, CertPathError::EeRevoked), "{err}"); assert!(matches!(err, CertPathError::EeRevoked), "{err}");
} }
#[test]
fn validate_signed_object_ee_cert_path_fast_matches_predecoded_helper_on_real_manifest_fixture() {
use rpki::data_model::common::BigUnsigned;
use rpki::data_model::crl::RpkixCrl;
use rpki::data_model::manifest::ManifestObject;
use rpki::data_model::rc::ResourceCertificate;
use std::collections::HashSet;
use x509_parser::prelude::FromDer;
use x509_parser::x509::SubjectPublicKeyInfo;
let manifest_der = std::fs::read(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
)
.expect("read manifest fixture");
let crl_der = std::fs::read(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl",
)
.expect("read crl fixture");
let issuer_der = std::fs::read(
"tests/fixtures/repository/rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer",
)
.expect("read issuer fixture");
let manifest = ManifestObject::decode_der(&manifest_der).expect("decode manifest");
let ee = &manifest.signed_object.signed_data.certificates[0];
let issuer = ResourceCertificate::decode_der(&issuer_der).expect("decode issuer");
let issuer_crl = RpkixCrl::decode_der(&crl_der).expect("decode crl");
let (rem, issuer_spki) = SubjectPublicKeyInfo::from_der(&issuer.tbs.subject_public_key_info)
.expect("parse issuer spki");
assert!(rem.is_empty());
let now = ee
.resource_cert
.tbs
.validity_not_before
.max(issuer.tbs.validity_not_before)
.max(issuer_crl.this_update.utc)
+ time::Duration::seconds(1);
assert!(now < ee.resource_cert.tbs.validity_not_after);
assert!(now < issuer.tbs.validity_not_after);
assert!(now < issuer_crl.next_update.utc);
let issuer_uri = ee
.resource_cert
.tbs
.extensions
.ca_issuers_uris
.as_ref()
.and_then(|uris| uris.iter().find(|u| u.starts_with("rsync://")))
.expect("issuer uri")
.to_string();
let crl_uri = ee
.resource_cert
.tbs
.extensions
.crl_distribution_points_uris
.as_ref()
.and_then(|uris| uris.iter().find(|u| u.starts_with("rsync://")))
.expect("crl uri")
.to_string();
validate_ee_cert_path_with_predecoded_ee(
&ee.resource_cert,
&ee.raw_der,
&issuer,
&issuer_spki,
&issuer_crl,
&HashSet::new(),
Some(&issuer_uri),
Some(&crl_uri),
now,
)
.expect("predecoded helper accepts fixture");
validate_signed_object_ee_cert_path_fast(
ee,
&issuer,
&issuer_spki,
&issuer_crl,
&HashSet::new(),
Some(&issuer_uri),
Some(&crl_uri),
now,
)
.expect("fast helper accepts fixture");
let mut revoked = HashSet::new();
revoked.insert(BigUnsigned::from_biguint(&ee.resource_cert.tbs.serial_number).bytes_be);
let err = validate_signed_object_ee_cert_path_fast(
ee,
&issuer,
&issuer_spki,
&issuer_crl,
&revoked,
Some(&issuer_uri),
Some(&crl_uri),
now,
)
.unwrap_err();
assert!(matches!(err, CertPathError::EeRevoked), "{err}");
}
#[test] #[test]
fn validate_ee_cert_path_with_prevalidated_issuer_rejects_non_ee_and_non_ca_issuer() { fn validate_ee_cert_path_with_prevalidated_issuer_rejects_non_ee_and_non_ca_issuer() {
use rpki::data_model::crl::RpkixCrl; use rpki::data_model::crl::RpkixCrl;

View File

@ -135,7 +135,10 @@ fn cir_replay_matrix_script_matches_reference_for_all_participants() {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_matrix.sh"); PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_matrix.sh");
let out = Command::new(script) let out = Command::new(script)
.env("CIR_MATERIALIZE_BIN", env!("CARGO_BIN_EXE_cir_materialize")) .env("CIR_MATERIALIZE_BIN", env!("CARGO_BIN_EXE_cir_materialize"))
.env("CIR_EXTRACT_INPUTS_BIN", env!("CARGO_BIN_EXE_cir_extract_inputs")) .env(
"CIR_EXTRACT_INPUTS_BIN",
env!("CARGO_BIN_EXE_cir_extract_inputs"),
)
.env( .env(
"CCR_TO_COMPARE_VIEWS_BIN", "CCR_TO_COMPARE_VIEWS_BIN",
env!("CARGO_BIN_EXE_ccr_to_compare_views"), env!("CARGO_BIN_EXE_ccr_to_compare_views"),

View File

@ -133,7 +133,10 @@ fn cir_routinator_script_matches_reference_on_ta_only_cir() {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_routinator.sh"); PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_routinator.sh");
let out = Command::new(script) let out = Command::new(script)
.env("CIR_MATERIALIZE_BIN", env!("CARGO_BIN_EXE_cir_materialize")) .env("CIR_MATERIALIZE_BIN", env!("CARGO_BIN_EXE_cir_materialize"))
.env("CIR_EXTRACT_INPUTS_BIN", env!("CARGO_BIN_EXE_cir_extract_inputs")) .env(
"CIR_EXTRACT_INPUTS_BIN",
env!("CARGO_BIN_EXE_cir_extract_inputs"),
)
.env( .env(
"CCR_TO_COMPARE_VIEWS_BIN", "CCR_TO_COMPARE_VIEWS_BIN",
env!("CARGO_BIN_EXE_ccr_to_compare_views"), env!("CARGO_BIN_EXE_ccr_to_compare_views"),
@ -189,7 +192,10 @@ fn cir_rpki_client_script_matches_reference_on_ta_only_cir() {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_rpki_client.sh"); PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("scripts/cir/run_cir_replay_rpki_client.sh");
let out = Command::new(script) let out = Command::new(script)
.env("CIR_MATERIALIZE_BIN", env!("CARGO_BIN_EXE_cir_materialize")) .env("CIR_MATERIALIZE_BIN", env!("CARGO_BIN_EXE_cir_materialize"))
.env("CIR_EXTRACT_INPUTS_BIN", env!("CARGO_BIN_EXE_cir_extract_inputs")) .env(
"CIR_EXTRACT_INPUTS_BIN",
env!("CARGO_BIN_EXE_cir_extract_inputs"),
)
.env( .env(
"CCR_TO_COMPARE_VIEWS_BIN", "CCR_TO_COMPARE_VIEWS_BIN",
env!("CARGO_BIN_EXE_ccr_to_compare_views"), env!("CARGO_BIN_EXE_ccr_to_compare_views"),

View File

@ -182,7 +182,10 @@ fn peer_sequence_replay_scripts_replay_all_steps() {
.join("scripts/cir/run_cir_replay_sequence_routinator.sh"); .join("scripts/cir/run_cir_replay_sequence_routinator.sh");
let out = Command::new(routinator_script) let out = Command::new(routinator_script)
.env("CIR_MATERIALIZE_BIN", env!("CARGO_BIN_EXE_cir_materialize")) .env("CIR_MATERIALIZE_BIN", env!("CARGO_BIN_EXE_cir_materialize"))
.env("CIR_EXTRACT_INPUTS_BIN", env!("CARGO_BIN_EXE_cir_extract_inputs")) .env(
"CIR_EXTRACT_INPUTS_BIN",
env!("CARGO_BIN_EXE_cir_extract_inputs"),
)
.env( .env(
"CCR_TO_COMPARE_VIEWS_BIN", "CCR_TO_COMPARE_VIEWS_BIN",
env!("CARGO_BIN_EXE_ccr_to_compare_views"), env!("CARGO_BIN_EXE_ccr_to_compare_views"),
@ -206,7 +209,10 @@ fn peer_sequence_replay_scripts_replay_all_steps() {
.join("scripts/cir/run_cir_replay_sequence_rpki_client.sh"); .join("scripts/cir/run_cir_replay_sequence_rpki_client.sh");
let out = Command::new(rpki_client_script) let out = Command::new(rpki_client_script)
.env("CIR_MATERIALIZE_BIN", env!("CARGO_BIN_EXE_cir_materialize")) .env("CIR_MATERIALIZE_BIN", env!("CARGO_BIN_EXE_cir_materialize"))
.env("CIR_EXTRACT_INPUTS_BIN", env!("CARGO_BIN_EXE_cir_extract_inputs")) .env(
"CIR_EXTRACT_INPUTS_BIN",
env!("CARGO_BIN_EXE_cir_extract_inputs"),
)
.env( .env(
"CCR_TO_COMPARE_VIEWS_BIN", "CCR_TO_COMPARE_VIEWS_BIN",
env!("CARGO_BIN_EXE_ccr_to_compare_views"), env!("CARGO_BIN_EXE_ccr_to_compare_views"),

View File

@ -174,7 +174,10 @@ fn ours_sequence_replay_script_replays_all_steps() {
.join("scripts/cir/run_cir_replay_sequence_ours.sh"); .join("scripts/cir/run_cir_replay_sequence_ours.sh");
let out = Command::new(script) let out = Command::new(script)
.env("CIR_MATERIALIZE_BIN", env!("CARGO_BIN_EXE_cir_materialize")) .env("CIR_MATERIALIZE_BIN", env!("CARGO_BIN_EXE_cir_materialize"))
.env("CIR_EXTRACT_INPUTS_BIN", env!("CARGO_BIN_EXE_cir_extract_inputs")) .env(
"CIR_EXTRACT_INPUTS_BIN",
env!("CARGO_BIN_EXE_cir_extract_inputs"),
)
.env( .env(
"CCR_TO_COMPARE_VIEWS_BIN", "CCR_TO_COMPARE_VIEWS_BIN",
env!("CARGO_BIN_EXE_ccr_to_compare_views"), env!("CARGO_BIN_EXE_ccr_to_compare_views"),

View File

@ -20,7 +20,10 @@ fn wrapper_script() -> std::path::PathBuf {
fn multi_rir_case_info_resolves_all_five_rirs_and_timings() { fn multi_rir_case_info_resolves_all_five_rirs_and_timings() {
let bundle_root = multi_rir_bundle_root(); let bundle_root = multi_rir_bundle_root();
if !bundle_root.is_dir() { if !bundle_root.is_dir() {
eprintln!("skipping multi-rir case info test; bundle root missing: {}", bundle_root.display()); eprintln!(
"skipping multi-rir case info test; bundle root missing: {}",
bundle_root.display()
);
return; return;
} }

View File

@ -1,5 +1,5 @@
use rpki::data_model::oid::{OID_CT_RPKI_MANIFEST, OID_SHA256, OID_SIGNED_DATA}; use rpki::data_model::oid::{OID_CT_RPKI_MANIFEST, OID_SHA256, OID_SIGNED_DATA};
use rpki::data_model::signed_object::RpkiSignedObject; use rpki::data_model::signed_object::{EeKeyUsageSummary, RpkiSignedObject};
#[test] #[test]
fn decode_manifest_signed_object_smoke() { fn decode_manifest_signed_object_smoke() {
@ -32,6 +32,20 @@ fn decode_manifest_signed_object_smoke() {
.iter() .iter()
.any(|u| u.starts_with("rsync://")) .any(|u| u.starts_with("rsync://"))
); );
assert!(
!so.signed_data.certificates[0]
.tbs_certificate_der
.is_empty(),
"expected cached tbsCertificate DER"
);
assert!(
!so.signed_data.certificates[0].signature_bytes.is_empty(),
"expected cached EE signature bytes"
);
assert_eq!(
so.signed_data.certificates[0].key_usage_summary,
EeKeyUsageSummary::DigitalSignatureOnly
);
assert_eq!(so.signed_data.signer_infos.len(), 1); assert_eq!(so.signed_data.signer_infos.len(), 1);
println!("{so:#?}") println!("{so:#?}")
} }