manifest decode & profile validate optimization

This commit is contained in:
yuyr 2026-02-25 11:16:02 +08:00
parent 2a6a963ecd
commit 1cc3351bef
35 changed files with 4418 additions and 225 deletions

1
.gitignore vendored
View File

@ -1,2 +1,3 @@
target/ target/
Cargo.lock Cargo.lock
perf.*

View File

@ -3,6 +3,11 @@ name = "rpki"
version = "0.1.0" version = "0.1.0"
edition = "2024" edition = "2024"
[features]
default = ["full"]
# Full build used by the main RP implementation (includes RocksDB-backed storage).
full = ["dep:rocksdb"]
[dependencies] [dependencies]
der-parser = { version = "10.0.0", features = ["serialize"] } der-parser = { version = "10.0.0", features = ["serialize"] }
hex = "0.4.3" hex = "0.4.3"
@ -16,7 +21,7 @@ url = "2.5.8"
serde = { version = "1.0.218", features = ["derive"] } serde = { version = "1.0.218", features = ["derive"] }
serde_json = "1.0.140" serde_json = "1.0.140"
toml = "0.8.20" toml = "0.8.20"
rocksdb = { version = "0.22.0", default-features = false, features = ["lz4"] } rocksdb = { version = "0.22.0", optional = true, default-features = false, features = ["lz4"] }
serde_cbor = "0.11.2" serde_cbor = "0.11.2"
roxmltree = "0.20.0" roxmltree = "0.20.0"
uuid = { version = "1.7.0", features = ["v4"] } uuid = { version = "1.7.0", features = ["v4"] }

View File

@ -0,0 +1,8 @@
[package]
name = "ours-manifest-bench"
version = "0.1.0"
edition = "2024"
[dependencies]
rpki = { path = "../..", default-features = false }

View File

@ -0,0 +1,145 @@
use rpki::data_model::manifest::ManifestObject;
use std::hint::black_box;
use std::path::PathBuf;
use std::time::Instant;
#[derive(Debug, Clone)]
struct Config {
sample: Option<String>,
manifest_path: Option<PathBuf>,
iterations: u64,
warmup_iterations: u64,
repeats: u32,
}
fn usage_and_exit() -> ! {
eprintln!(
"Usage:\n ours-manifest-bench (--sample <name> | --manifest <path>) [--iterations N] [--warmup-iterations N] [--repeats N]\n\nExamples:\n cargo run --release -- --sample small-01 --iterations 20000 --warmup-iterations 2000 --repeats 3\n cargo run --release -- --manifest ../../tests/benchmark/selected_der/small-01.mft"
);
std::process::exit(2);
}
fn parse_args() -> Config {
let mut sample: Option<String> = None;
let mut manifest_path: Option<PathBuf> = None;
let mut iterations: u64 = 20_000;
let mut warmup_iterations: u64 = 2_000;
let mut repeats: u32 = 3;
let mut args = std::env::args().skip(1);
while let Some(arg) = args.next() {
match arg.as_str() {
"--sample" => sample = Some(args.next().unwrap_or_else(|| usage_and_exit())),
"--manifest" => {
manifest_path = Some(PathBuf::from(args.next().unwrap_or_else(|| usage_and_exit())))
}
"--iterations" => {
iterations = args
.next()
.unwrap_or_else(|| usage_and_exit())
.parse()
.unwrap_or_else(|_| usage_and_exit())
}
"--warmup-iterations" => {
warmup_iterations = args
.next()
.unwrap_or_else(|| usage_and_exit())
.parse()
.unwrap_or_else(|_| usage_and_exit())
}
"--repeats" => {
repeats = args
.next()
.unwrap_or_else(|| usage_and_exit())
.parse()
.unwrap_or_else(|_| usage_and_exit())
}
"-h" | "--help" => usage_and_exit(),
_ => usage_and_exit(),
}
}
if sample.is_none() && manifest_path.is_none() {
usage_and_exit();
}
if sample.is_some() && manifest_path.is_some() {
usage_and_exit();
}
Config {
sample,
manifest_path,
iterations,
warmup_iterations,
repeats,
}
}
fn derive_manifest_path(sample: &str) -> PathBuf {
// Assumes current working directory is `rpki/benchmark/ours_manifest_bench`.
PathBuf::from(format!("../../tests/benchmark/selected_der/{sample}.mft"))
}
fn main() {
let cfg = parse_args();
let manifest_path = cfg
.manifest_path
.clone()
.unwrap_or_else(|| derive_manifest_path(cfg.sample.as_deref().unwrap()));
let bytes = std::fs::read(&manifest_path).unwrap_or_else(|e| {
eprintln!("read manifest fixture failed: {e}; path={}", manifest_path.display());
std::process::exit(1);
});
let decoded_once = ManifestObject::decode_der(&bytes).unwrap_or_else(|e| {
eprintln!("decode failed: {e}; path={}", manifest_path.display());
std::process::exit(1);
});
let file_count = decoded_once.manifest.file_count();
let mut round_ns_per_op: Vec<f64> = Vec::with_capacity(cfg.repeats as usize);
let mut round_ops_per_s: Vec<f64> = Vec::with_capacity(cfg.repeats as usize);
for _round in 0..cfg.repeats {
for _ in 0..cfg.warmup_iterations {
let obj = ManifestObject::decode_der(black_box(&bytes)).expect("warmup decode");
black_box(obj);
}
let start = Instant::now();
for _ in 0..cfg.iterations {
let obj = ManifestObject::decode_der(black_box(&bytes)).expect("timed decode");
black_box(obj);
}
let elapsed = start.elapsed();
let ns_per_op = (elapsed.as_secs_f64() * 1e9) / (cfg.iterations as f64);
let ops_per_s = (cfg.iterations as f64) / elapsed.as_secs_f64();
round_ns_per_op.push(ns_per_op);
round_ops_per_s.push(ops_per_s);
}
let avg_ns_per_op = round_ns_per_op.iter().sum::<f64>() / (round_ns_per_op.len() as f64);
let avg_ops_per_s = round_ops_per_s.iter().sum::<f64>() / (round_ops_per_s.len() as f64);
let sample_name = cfg.sample.clone().unwrap_or_else(|| {
manifest_path
.file_name()
.map(|s| s.to_string_lossy().to_string())
.unwrap_or_else(|| manifest_path.display().to_string())
});
let sample_name = sample_name
.strip_suffix(".mft")
.unwrap_or(&sample_name)
.to_string();
println!("fixture: {}", manifest_path.display());
println!();
println!("| sample | avg ns/op | ops/s | file count |");
println!("|---|---:|---:|---:|");
println!(
"| {} | {:.2} | {:.2} | {} |",
sample_name, avg_ns_per_op, avg_ops_per_s, file_count
);
}

View File

@ -0,0 +1,195 @@
#!/usr/bin/env bash
set -euo pipefail
# M2: Run per-sample decode+profile benchmark (Ours vs Routinator) on selected_der fixtures.
#
# Outputs:
# - specs/develop/20260224/data/m2_manifest_decode_profile_compare.csv
# - specs/develop/20260224/data/m2_raw.log
#
# Note: This script assumes Routinator benchmark repo exists at:
# /home/yuyr/dev/rust_playground/routinator/benchmark
#
# It also assumes fixtures exist under:
# rpki/tests/benchmark/selected_der/*.mft
# routinator/benchmark/fixtures/selected_der/*.mft
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
RPKI_DIR="$ROOT_DIR"
OURS_BENCH_DIR="$RPKI_DIR/benchmark/ours_manifest_bench"
ROUT_BENCH_DIR="${ROUT_BENCH_DIR:-/home/yuyr/dev/rust_playground/routinator/benchmark}"
ROUT_BIN="$ROUT_BENCH_DIR/target/release/routinator-manifest-benchmark"
DATE_TAG="${DATE_TAG:-20260224}"
OUT_DIR="$RPKI_DIR/../specs/develop/${DATE_TAG}/data"
OUT_CSV="${OUT_CSV:-$OUT_DIR/m2_manifest_decode_profile_compare.csv}"
OUT_RAW="${OUT_RAW:-$OUT_DIR/m2_raw.log}"
REPEATS="${REPEATS:-3}"
# Iterations / warmups (kept moderate for interactive iteration).
ITER_SMALL="${ITER_SMALL:-20000}"
ITER_MEDIUM="${ITER_MEDIUM:-20000}"
ITER_LARGE="${ITER_LARGE:-20000}"
ITER_XLARGE="${ITER_XLARGE:-2000}"
WARM_SMALL="${WARM_SMALL:-2000}"
WARM_MEDIUM="${WARM_MEDIUM:-2000}"
WARM_LARGE="${WARM_LARGE:-2000}"
WARM_XLARGE="${WARM_XLARGE:-200}"
SAMPLES=(
small-01
small-02
medium-01
medium-02
large-01
large-02
xlarge-01
xlarge-02
)
mkdir -p "$OUT_DIR"
: > "$OUT_RAW"
echo "sample,bucket,manifest_file_count,ours_avg_ns_per_op,ours_ops_per_s,rout_avg_ns_per_op,rout_ops_per_s,ratio_ours_over_rout,iterations,repeats,warmup" > "$OUT_CSV"
echo "[1/3] Build ours benchmark (release)..." | tee -a "$OUT_RAW"
(cd "$OURS_BENCH_DIR" && cargo build --release -q)
OURS_BIN="$OURS_BENCH_DIR/target/release/ours-manifest-bench"
echo "[2/3] Build routinator benchmark (release)..." | tee -a "$OUT_RAW"
(cd "$ROUT_BENCH_DIR" && cargo build --release -q)
taskset_prefix=""
if command -v taskset >/dev/null 2>&1; then
if [[ -n "${TASKSET_CPU:-}" ]]; then
taskset_prefix="taskset -c ${TASKSET_CPU}"
fi
fi
bucket_for() {
local s="$1"
case "$s" in
small-*) echo "small" ;;
medium-*) echo "medium" ;;
large-*) echo "large" ;;
xlarge-*) echo "xlarge" ;;
*) echo "unknown" ;;
esac
}
iters_for() {
local b="$1"
case "$b" in
small) echo "$ITER_SMALL" ;;
medium) echo "$ITER_MEDIUM" ;;
large) echo "$ITER_LARGE" ;;
xlarge) echo "$ITER_XLARGE" ;;
*) echo "$ITER_MEDIUM" ;;
esac
}
warm_for() {
local b="$1"
case "$b" in
small) echo "$WARM_SMALL" ;;
medium) echo "$WARM_MEDIUM" ;;
large) echo "$WARM_LARGE" ;;
xlarge) echo "$WARM_XLARGE" ;;
*) echo "$WARM_MEDIUM" ;;
esac
}
run_ours() {
local sample="$1"
local iters="$2"
local warm="$3"
local ours_fixture="$RPKI_DIR/tests/benchmark/selected_der/${sample}.mft"
if [[ ! -f "$ours_fixture" ]]; then
echo "ours fixture not found: $ours_fixture" >&2
exit 1
fi
echo "### ours $sample" >> "$OUT_RAW"
local out
out=$($taskset_prefix "$OURS_BIN" --manifest "$ours_fixture" --iterations "$iters" --warmup-iterations "$warm" --repeats "$REPEATS")
echo "$out" >> "$OUT_RAW"
local line
line=$(echo "$out" | rg "^\\| ${sample} \\|" | tail -n 1)
if [[ -z "${line:-}" ]]; then
echo "failed to parse ours output for $sample" >&2
exit 1
fi
# Expected final row: | sample | avg ns/op | ops/s | file count |
local avg ops cnt
avg=$(echo "$line" | awk -F'|' '{gsub(/^ +| +$/,"",$3); print $3}')
ops=$(echo "$line" | awk -F'|' '{gsub(/^ +| +$/,"",$4); print $4}')
cnt=$(echo "$line" | awk -F'|' '{gsub(/^ +| +$/,"",$5); print $5}')
echo "$avg,$ops,$cnt"
}
run_rout() {
local sample="$1"
local iters="$2"
local warm="$3"
local rout_fixture="$ROUT_BENCH_DIR/fixtures/selected_der/${sample}.mft"
if [[ ! -f "$rout_fixture" ]]; then
echo "routinator fixture not found: $rout_fixture" >&2
exit 1
fi
echo "### routinator $sample" >> "$OUT_RAW"
local out
out=$(
$taskset_prefix "$ROUT_BIN" \
--target decode_only \
--manifest "$rout_fixture" \
--issuer "$ROUT_BENCH_DIR/fixtures/ta.cer" \
--iterations "$iters" \
--repeats "$REPEATS" \
--warmup-iterations "$warm" \
--strict false
)
echo "$out" >> "$OUT_RAW"
local avg_line cnt_line
avg_line=$(echo "$out" | rg "^ avg:")
cnt_line=$(echo "$out" | rg "^ manifest_file_count:")
local avg_ns ops_s cnt
avg_ns=$(echo "$avg_line" | awk '{print $2}')
ops_s=$(echo "$avg_line" | awk '{gsub(/[()]/,"",$4); print $4}')
cnt=$(echo "$cnt_line" | awk '{print $2}')
echo "$avg_ns,$ops_s,$cnt"
}
echo "[3/3] Run per-sample benchmarks..." | tee -a "$OUT_RAW"
for s in "${SAMPLES[@]}"; do
b=$(bucket_for "$s")
it=$(iters_for "$b")
warm=$(warm_for "$b")
IFS=, read -r ours_avg ours_ops ours_cnt < <(run_ours "$s" "$it" "$warm")
IFS=, read -r rout_avg rout_ops rout_cnt < <(run_rout "$s" "$it" "$warm")
if [[ "$ours_cnt" != "$rout_cnt" ]]; then
echo "WARNING: file count differs for $s (ours=$ours_cnt rout=$rout_cnt)" | tee -a "$OUT_RAW"
fi
ratio=$(python3 - <<PY
o=float("$ours_avg")
r=float("$rout_avg")
print(f"{(o/r):.4f}" if r != 0 else "inf")
PY
)
echo "$s,$b,$ours_cnt,$ours_avg,$ours_ops,$rout_avg,$rout_ops,$ratio,$it,$REPEATS,$warm" >> "$OUT_CSV"
echo >> "$OUT_RAW"
done
echo "Done."
echo "- CSV: $OUT_CSV"
echo "- Raw: $OUT_RAW"

View File

@ -0,0 +1,142 @@
#!/usr/bin/env bash
set -euo pipefail
# M3: Generate flamegraphs + top hotspots for Manifest decode+profile (Ours vs Routinator).
#
# Outputs under:
# specs/develop/20260224/flamegraph/
# specs/develop/20260224/hotspots/
# specs/develop/20260224/perf/
#
# Notes:
# - On WSL2, /usr/bin/perf is often a wrapper that fails. This script uses a real perf binary
# from /usr/lib/linux-tools/*/perf (if present).
# - Ours profiling uses perf + flamegraph --perfdata to avoid rebuilding the whole crate graph
# with RocksDB.
ROOT_REPO="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
RPKI_DIR="$ROOT_REPO/rpki"
DATE_TAG="${DATE_TAG:-20260224}"
OUT_BASE="$ROOT_REPO/specs/develop/${DATE_TAG}"
OUT_FLAME="$OUT_BASE/flamegraph"
OUT_HOT="$OUT_BASE/hotspots"
OUT_PERF="$OUT_BASE/perf"
RUN_TAG="${RUN_TAG:-p2}"
OURS_BENCH_DIR="$RPKI_DIR/benchmark/ours_manifest_bench"
OURS_BIN="$OURS_BENCH_DIR/target/release/ours-manifest-bench"
ROUT_BENCH_DIR="${ROUT_BENCH_DIR:-/home/yuyr/dev/rust_playground/routinator/benchmark}"
ROUT_BIN="$ROUT_BENCH_DIR/target/release/routinator-manifest-benchmark"
ROUT_ISSUER="$ROUT_BENCH_DIR/fixtures/ta.cer"
PROFILE_HZ="${PROFILE_HZ:-99}"
mkdir -p "$OUT_FLAME" "$OUT_HOT" "$OUT_PERF"
PERF_WRAPPER_OUT="$(perf --version 2>&1 || true)"
PERF_REAL=""
if echo "${PERF_WRAPPER_OUT}" | grep -q "WARNING: perf not found for kernel"; then
PERF_REAL="$(ls -1 /usr/lib/linux-tools/*/perf 2>/dev/null | head -n 1 || true)"
else
PERF_REAL="$(command -v perf || true)"
fi
if [[ -z "${PERF_REAL}" ]]; then
echo "ERROR: usable perf binary not found (wrapper detected and no /usr/lib/linux-tools/*/perf)." >&2
exit 2
fi
SHIM_DIR="$RPKI_DIR/target/bench/tools"
mkdir -p "$SHIM_DIR"
cat > "$SHIM_DIR/perf" <<EOF
#!/usr/bin/env bash
exec "${PERF_REAL}" "\$@"
EOF
chmod +x "$SHIM_DIR/perf"
export PATH="$SHIM_DIR:$PATH"
echo "Using perf: $PERF_REAL"
echo "[1/3] Build ours benchmark with frame pointers..."
(cd "$OURS_BENCH_DIR" && RUSTFLAGS="-C force-frame-pointers=yes" cargo build --release -q)
echo "[2/3] Build routinator benchmark (release)..."
(cd "$ROUT_BENCH_DIR" && cargo build --release -q)
taskset_prefix=""
if command -v taskset >/dev/null 2>&1; then
taskset_prefix="taskset -c 0"
fi
profile_ours() {
local sample="$1"
local iters="$2"
local warm="$3"
local fixture="$RPKI_DIR/tests/benchmark/selected_der/${sample}.mft"
if [[ ! -f "$fixture" ]]; then
echo "ERROR: ours fixture not found: $fixture" >&2
exit 1
fi
local perfdata="$OUT_PERF/ours_${sample}_${RUN_TAG}.perf.data"
local svg="$OUT_FLAME/ours_${sample}_${RUN_TAG}.svg"
local tsv="$OUT_HOT/ours_${sample}_${RUN_TAG}.tsv"
echo "== ours $sample (iters=$iters warmup=$warm hz=$PROFILE_HZ)"
$taskset_prefix perf record -o "$perfdata" -F "$PROFILE_HZ" -g -- \
"$OURS_BIN" --manifest "$fixture" --iterations "$iters" --warmup-iterations "$warm" --repeats 1 >/dev/null
flamegraph --perfdata "$perfdata" --output "$svg" --title "ours ${sample} ManifestObject::decode_der" --deterministic >/dev/null
perf report -i "$perfdata" --stdio --no-children --sort symbol --percent-limit 0.5 \
| awk '/^[[:space:]]*[0-9.]+%/ {pct=$1; sub(/%/,"",pct); $1=""; sub(/^[[:space:]]+/,""); print pct "\t" $0}' \
> "$tsv"
}
profile_routinator() {
local sample="$1"
local iters="$2"
local warm="$3"
local fixture="$ROUT_BENCH_DIR/fixtures/selected_der/${sample}.mft"
if [[ ! -f "$fixture" ]]; then
echo "ERROR: routinator fixture not found: $fixture" >&2
exit 1
fi
local svg="$OUT_FLAME/routinator_${sample}_${RUN_TAG}.svg"
local tsv="$OUT_HOT/routinator_${sample}_${RUN_TAG}.tsv"
echo "== routinator $sample (iters=$iters warmup=$warm hz=$PROFILE_HZ)"
$taskset_prefix "$ROUT_BIN" \
--target decode_only \
--manifest "$fixture" \
--issuer "$ROUT_ISSUER" \
--iterations "$iters" \
--repeats 1 \
--warmup-iterations "$warm" \
--strict false \
--profile-hz "$PROFILE_HZ" \
--flamegraph "$svg" \
--hotspots "$tsv" \
>/dev/null
}
echo "[3/3] Profile samples..."
# Choose iterations so each capture runs ~10-20s serially.
profile_ours small-01 200000 0
profile_routinator small-01 200000 0
profile_ours large-02 50000 0
profile_routinator large-02 50000 0
profile_ours xlarge-02 5000 0
profile_routinator xlarge-02 5000 0
echo "Done."
echo "- Flamegraphs: $OUT_FLAME/"
echo "- Hotspots: $OUT_HOT/"
echo "- Perf data: $OUT_PERF/"

File diff suppressed because it is too large Load Diff

View File

@ -4,8 +4,6 @@ use crate::data_model::rc::ResourceCertificate;
use crate::data_model::signed_object::{ use crate::data_model::signed_object::{
RpkiSignedObject, RpkiSignedObjectParsed, SignedObjectParseError, SignedObjectValidateError, RpkiSignedObject, RpkiSignedObjectParsed, SignedObjectParseError, SignedObjectValidateError,
}; };
use der_parser::ber::BerObjectContent;
use der_parser::der::{DerObject, Tag, parse_der};
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub struct ManifestObject { pub struct ManifestObject {
@ -28,7 +26,10 @@ pub struct ManifestEContent {
pub this_update: UtcTime, pub this_update: UtcTime,
pub next_update: UtcTime, pub next_update: UtcTime,
pub file_hash_alg: String, pub file_hash_alg: String,
pub files: Vec<FileAndHash>, /// DER-encoded content bytes of `Manifest.fileList` (SEQUENCE OF FileAndHash).
pub file_list_der: Vec<u8>,
/// Count of FileAndHash entries in `fileList`.
pub file_count: usize,
} }
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
@ -39,7 +40,7 @@ pub struct ManifestEContentParsed {
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub struct FileAndHash { pub struct FileAndHash {
pub file_name: String, pub file_name: String,
pub hash_bytes: Vec<u8>, pub hash_bytes: [u8; 32],
} }
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
@ -246,7 +247,8 @@ impl ManifestObject {
impl ManifestEContent { impl ManifestEContent {
/// Parse step of scheme A (`parse → validate → verify`). /// Parse step of scheme A (`parse → validate → verify`).
pub fn parse_der(der: &[u8]) -> Result<ManifestEContentParsed, ManifestParseError> { pub fn parse_der(der: &[u8]) -> Result<ManifestEContentParsed, ManifestParseError> {
let (rem, _obj) = parse_der(der).map_err(|e| ManifestParseError::Parse(e.to_string()))?; let (_tag, _value, rem) =
der_take_tlv(der).map_err(|e| ManifestParseError::Parse(e))?;
if !rem.is_empty() { if !rem.is_empty() {
return Err(ManifestParseError::TrailingBytes(rem.len())); return Err(ManifestParseError::TrailingBytes(rem.len()));
} }
@ -265,6 +267,18 @@ impl ManifestEContent {
pub fn decode_der(der: &[u8]) -> Result<Self, ManifestDecodeError> { pub fn decode_der(der: &[u8]) -> Result<Self, ManifestDecodeError> {
Ok(Self::parse_der(der)?.validate_profile()?) Ok(Self::parse_der(der)?.validate_profile()?)
} }
/// Parse and return the manifest fileList.
///
/// Note: `ManifestEContent` is profile-validated when produced via `decode_der()`, so this
/// should only fail due to internal inconsistencies (or if constructed manually).
pub fn parse_files(&self) -> Result<Vec<FileAndHash>, ManifestProfileError> {
parse_file_list_sha256_fast(&self.file_list_der)
}
pub fn file_count(&self) -> usize {
self.file_count
}
} }
impl ManifestObjectParsed { impl ManifestObjectParsed {
@ -292,65 +306,215 @@ impl ManifestObjectParsed {
impl ManifestEContentParsed { impl ManifestEContentParsed {
pub fn validate_profile(self) -> Result<ManifestEContent, ManifestProfileError> { pub fn validate_profile(self) -> Result<ManifestEContent, ManifestProfileError> {
let (_rem, obj) = decode_manifest_econtent_fast(&self.der)
parse_der(&self.der).map_err(|e| ManifestProfileError::ProfileDecode(e.to_string()))?; }
}
let seq = obj
.as_sequence() fn validate_file_name_bytes(bytes: &[u8]) -> Result<(), ManifestProfileError> {
.map_err(|e| ManifestProfileError::ProfileDecode(e.to_string()))?; // RFC 9286 §4.2.2:
if seq.len() != 5 && seq.len() != 6 { // 1+ chars from a-zA-Z0-9-_ , then '.', then 3-letter extension.
return Err(ManifestProfileError::InvalidManifestSequenceLen(seq.len())); if bytes.len() < 5 {
return Err(ManifestProfileError::InvalidFileName(
String::from_utf8_lossy(bytes).into_owned(),
));
};
// "followed by a single . (DOT), followed by a three letter extension"
// -> the dot must be exactly 4 bytes from the end.
let dot_pos = bytes.len() - 4;
if bytes[dot_pos] != b'.' {
return Err(ManifestProfileError::InvalidFileName(
String::from_utf8_lossy(bytes).into_owned(),
));
}
#[inline(always)]
fn valid_base_char(b: u8) -> bool {
// RFC 9286 allowed set: a-zA-Z0-9-_
(b'0'..=b'9').contains(&b)
|| (b'a'..=b'z').contains(&b)
|| (b'A'..=b'Z').contains(&b)
|| b == b'-'
|| b == b'_'
}
for &b in &bytes[..dot_pos] {
if (b & 0x80) != 0 || !valid_base_char(b) {
return Err(ManifestProfileError::InvalidFileName(
String::from_utf8_lossy(bytes).into_owned(),
));
}
}
let e0 = bytes[dot_pos + 1];
let e1 = bytes[dot_pos + 2];
let e2 = bytes[dot_pos + 3];
if (e0 & 0x80) != 0 || (e1 & 0x80) != 0 || (e2 & 0x80) != 0 {
return Err(ManifestProfileError::InvalidFileName(
String::from_utf8_lossy(bytes).into_owned(),
));
}
#[inline(always)]
fn lower_if_alpha(b: u8) -> Option<u8> {
match b {
b'a'..=b'z' => Some(b),
b'A'..=b'Z' => Some(b + 32),
_ => None,
}
}
let Some(l0) = lower_if_alpha(e0) else {
return Err(ManifestProfileError::InvalidFileName(
String::from_utf8_lossy(bytes).into_owned(),
));
};
let Some(l1) = lower_if_alpha(e1) else {
return Err(ManifestProfileError::InvalidFileName(
String::from_utf8_lossy(bytes).into_owned(),
));
};
let Some(l2) = lower_if_alpha(e2) else {
return Err(ManifestProfileError::InvalidFileName(
String::from_utf8_lossy(bytes).into_owned(),
));
};
match [l0, l1, l2] {
// Full IANA list (see `common.rs`).
[b'a', b's', b'a']
| [b'c', b'e', b'r']
| [b'c', b'r', b'l']
| [b'g', b'b', b'r']
| [b'm', b'f', b't']
| [b'r', b'o', b'a']
| [b's', b'i', b'g']
| [b't', b'a', b'k'] => Ok(()),
_ => Err(ManifestProfileError::InvalidFileName(
String::from_utf8_lossy(bytes).into_owned(),
)),
}
}
fn decode_manifest_econtent_fast(der: &[u8]) -> Result<ManifestEContent, ManifestProfileError> {
let (tag, mut seq_content, rem) = der_take_tlv(der)
.map_err(|e| ManifestProfileError::ProfileDecode(format!("DER decode error: {e}")))?;
if !rem.is_empty() {
return Err(ManifestProfileError::ProfileDecode(format!(
"trailing bytes after DER object: {} bytes",
rem.len()
)));
}
if tag != 0x30 {
return Err(ManifestProfileError::ProfileDecode(
"Manifest eContent must be SEQUENCE".into(),
));
}
let seq_len = der_count_elements(seq_content)
.map_err(|e| ManifestProfileError::ProfileDecode(e))?;
if seq_len != 5 && seq_len != 6 {
return Err(ManifestProfileError::InvalidManifestSequenceLen(seq_len));
} }
let mut idx = 0;
let mut version: u32 = 0; let mut version: u32 = 0;
if seq.len() == 6 { if seq_len == 6 {
let v_obj = &seq[0]; let Some(&first_tag) = seq_content.first() else {
if v_obj.class() != der_parser::ber::Class::ContextSpecific || v_obj.tag() != Tag(0) { return Err(ManifestProfileError::InvalidManifestSequenceLen(0));
};
if first_tag != 0xA0 {
return Err(ManifestProfileError::ProfileDecode( return Err(ManifestProfileError::ProfileDecode(
"Manifest.version must be [0] EXPLICIT INTEGER".into(), "Manifest.version must be [0] EXPLICIT INTEGER".into(),
)); ));
} }
let inner_der = v_obj
.as_slice() let (_cs_tag, cs_value, after) = der_take_tlv(seq_content).map_err(|e| {
.map_err(|e| ManifestProfileError::ProfileDecode(e.to_string()))?; ManifestProfileError::ProfileDecode(format!("Manifest.version decode error: {e}"))
let (rem, inner) = parse_der(inner_der) })?;
.map_err(|e| ManifestProfileError::ProfileDecode(e.to_string()))?; seq_content = after;
if !rem.is_empty() {
let (inner_tag, inner_value, inner_rem) = der_take_tlv(cs_value).map_err(|e| {
ManifestProfileError::ProfileDecode(format!("Manifest.version inner decode error: {e}"))
})?;
if !inner_rem.is_empty() {
return Err(ManifestProfileError::ProfileDecode( return Err(ManifestProfileError::ProfileDecode(
"trailing bytes inside Manifest.version".into(), "trailing bytes inside Manifest.version".into(),
)); ));
} }
let v = inner if inner_tag != 0x02 {
.as_u64() return Err(ManifestProfileError::ProfileDecode(
.map_err(|e| ManifestProfileError::ProfileDecode(e.to_string()))?; "Manifest.version must be [0] EXPLICIT INTEGER".into(),
));
}
let v = der_integer_to_u64(inner_value).map_err(|e| {
ManifestProfileError::ProfileDecode(format!("Manifest.version decode error: {e}"))
})?;
if v != 0 { if v != 0 {
return Err(ManifestProfileError::InvalidManifestVersion(v)); return Err(ManifestProfileError::InvalidManifestVersion(v));
} }
version = 0; version = 0;
idx = 1;
} }
let manifest_number = parse_manifest_number(&seq[idx])?; let (mn_tag, mn_value, after) = der_take_tlv(seq_content).map_err(|e| {
idx += 1; ManifestProfileError::ProfileDecode(format!("Manifest.manifestNumber decode error: {e}"))
})?;
seq_content = after;
if mn_tag != 0x02 {
return Err(ManifestProfileError::InvalidManifestNumber);
}
let manifest_number = der_integer_to_bigunsigned(mn_value)?;
let this_update = let (tu_tag, tu_value, after) = der_take_tlv(seq_content).map_err(|e| {
parse_generalized_time(&seq[idx], ManifestProfileError::InvalidThisUpdate)?; ManifestProfileError::ProfileDecode(format!("Manifest.thisUpdate decode error: {e}"))
idx += 1; })?;
let next_update = seq_content = after;
parse_generalized_time(&seq[idx], ManifestProfileError::InvalidNextUpdate)?; if tu_tag != 0x18 {
idx += 1; return Err(ManifestProfileError::InvalidThisUpdate);
}
let this_update = parse_generalized_time_bytes(tu_value)
.map_err(|e| ManifestProfileError::ProfileDecode(e))?;
let (nu_tag, nu_value, after) = der_take_tlv(seq_content).map_err(|e| {
ManifestProfileError::ProfileDecode(format!("Manifest.nextUpdate decode error: {e}"))
})?;
seq_content = after;
if nu_tag != 0x18 {
return Err(ManifestProfileError::InvalidNextUpdate);
}
let next_update = parse_generalized_time_bytes(nu_value)
.map_err(|e| ManifestProfileError::ProfileDecode(e))?;
if next_update <= this_update { if next_update <= this_update {
return Err(ManifestProfileError::NextUpdateNotLater); return Err(ManifestProfileError::NextUpdateNotLater);
} }
let file_hash_alg = oid_to_string(&seq[idx])?; let (oid_tag, oid_value, after) = der_take_tlv(seq_content).map_err(|e| {
idx += 1; ManifestProfileError::ProfileDecode(format!("Manifest.fileHashAlg decode error: {e}"))
if file_hash_alg != OID_SHA256 { })?;
return Err(ManifestProfileError::InvalidFileHashAlg(file_hash_alg)); seq_content = after;
if oid_tag != 0x06 {
return Err(ManifestProfileError::ProfileDecode(
"Manifest.fileHashAlg must be OBJECT IDENTIFIER".into(),
));
}
if !oid_content_is_sha256(oid_value) {
return Err(ManifestProfileError::InvalidFileHashAlg(
oid_content_to_string(oid_value),
));
} }
let files = parse_file_list_sha256(&seq[idx])?; let (fl_tag, fl_value, after) = der_take_tlv(seq_content).map_err(|e| {
ManifestProfileError::ProfileDecode(format!("Manifest.fileList decode error: {e}"))
})?;
seq_content = after;
if fl_tag != 0x30 {
return Err(ManifestProfileError::InvalidFileList);
}
let file_count = validate_file_list_sha256_fast(fl_value)?;
let file_list_der = fl_value.to_vec();
if !seq_content.is_empty() {
return Err(ManifestProfileError::InvalidManifestSequenceLen(seq_len));
}
Ok(ManifestEContent { Ok(ManifestEContent {
version, version,
@ -358,45 +522,112 @@ impl ManifestEContentParsed {
this_update, this_update,
next_update, next_update,
file_hash_alg: OID_SHA256.to_string(), file_hash_alg: OID_SHA256.to_string(),
files, file_list_der,
file_count,
}) })
} }
fn validate_file_list_sha256_fast(content: &[u8]) -> Result<usize, ManifestProfileError> {
let mut cur = content;
let mut count: usize = 0;
while !cur.is_empty() {
let (tag, value, rem) = der_take_tlv(cur).map_err(|e| {
ManifestProfileError::ProfileDecode(format!("fileList entry decode error: {e}"))
})?;
cur = rem;
if tag != 0x30 {
return Err(ManifestProfileError::InvalidFileAndHash);
} }
fn parse_manifest_number(obj: &DerObject<'_>) -> Result<BigUnsigned, ManifestProfileError> { let mut entry = value;
let n = obj let (fn_tag, fn_value, entry_rem) = der_take_tlv(entry).map_err(|e| {
.as_biguint() ManifestProfileError::ProfileDecode(format!("fileList fileName decode error: {e}"))
.map_err(|_e| ManifestProfileError::InvalidManifestNumber)?; })?;
let out = BigUnsigned::from_biguint(&n); entry = entry_rem;
if out.bytes_be.len() > 20 { if fn_tag != 0x16 {
return Err(ManifestProfileError::ManifestNumberTooLong); return Err(ManifestProfileError::InvalidFileAndHash);
} }
Ok(out) if entry.is_empty() {
return Err(ManifestProfileError::InvalidFileAndHash);
}
validate_file_name_bytes(fn_value)?;
let (hash_tag, hash_value, entry_rem) = der_take_tlv(entry).map_err(|_e| {
// Missing second element should map to "SEQUENCE of 2" shape error.
ManifestProfileError::InvalidFileAndHash
})?;
entry = entry_rem;
if !entry.is_empty() {
return Err(ManifestProfileError::InvalidFileAndHash);
}
if hash_tag != 0x03 {
return Err(ManifestProfileError::InvalidHashType);
}
if hash_value.is_empty() {
return Err(ManifestProfileError::InvalidHashLength(0));
}
let unused_bits = hash_value[0];
if unused_bits != 0 {
return Err(ManifestProfileError::HashNotOctetAligned);
}
let bits = &hash_value[1..];
if bits.len() != 32 {
return Err(ManifestProfileError::InvalidHashLength(bits.len()));
}
count += 1;
}
Ok(count)
} }
fn parse_generalized_time( fn parse_file_list_sha256_fast(content: &[u8]) -> Result<Vec<FileAndHash>, ManifestProfileError> {
obj: &DerObject<'_>, // Heuristic initial capacity (avoid a full pre-scan, which is expensive for xlarge manifests).
err: ManifestProfileError, // Each FileAndHash entry is typically tens of bytes; 80 is a conservative average.
) -> Result<UtcTime, ManifestProfileError> { let est = (content.len() / 80).clamp(16, 4096);
match &obj.content { let mut cur = content;
BerObjectContent::GeneralizedTime(dt) => dt let mut out: Vec<FileAndHash> = Vec::with_capacity(est);
.to_datetime() while !cur.is_empty() {
.map_err(|e| ManifestProfileError::ProfileDecode(e.to_string())), let (tag, value, rem) = der_take_tlv(cur)
_ => Err(err), .map_err(|e| ManifestProfileError::ProfileDecode(format!("fileList entry decode error: {e}")))?;
cur = rem;
if tag != 0x30 {
return Err(ManifestProfileError::InvalidFileAndHash);
} }
let mut entry = value;
let (fn_tag, fn_value, entry_rem) = der_take_tlv(entry)
.map_err(|e| ManifestProfileError::ProfileDecode(format!("fileList fileName decode error: {e}")))?;
entry = entry_rem;
if fn_tag != 0x16 {
return Err(ManifestProfileError::InvalidFileAndHash);
} }
if entry.is_empty() {
return Err(ManifestProfileError::InvalidFileAndHash);
}
let file_name = validate_and_copy_file_name(fn_value)?;
fn parse_file_list_sha256(obj: &DerObject<'_>) -> Result<Vec<FileAndHash>, ManifestProfileError> { let (hash_tag, hash_value, entry_rem) = der_take_tlv(entry).map_err(|_e| {
let seq = obj // Missing second element should map to "SEQUENCE of 2" shape error.
.as_sequence() ManifestProfileError::InvalidFileAndHash
.map_err(|_e| ManifestProfileError::InvalidFileList)?; })?;
let mut out = Vec::with_capacity(seq.len()); entry = entry_rem;
for entry in seq { if !entry.is_empty() {
let (file_name, hash_bytes) = parse_file_and_hash(entry)?; return Err(ManifestProfileError::InvalidFileAndHash);
validate_file_name(&file_name)?;
if hash_bytes.len() != 32 {
return Err(ManifestProfileError::InvalidHashLength(hash_bytes.len()));
} }
if hash_tag != 0x03 {
return Err(ManifestProfileError::InvalidHashType);
}
if hash_value.is_empty() {
return Err(ManifestProfileError::InvalidHashLength(0));
}
let unused_bits = hash_value[0];
if unused_bits != 0 {
return Err(ManifestProfileError::HashNotOctetAligned);
}
let bits = &hash_value[1..];
if bits.len() != 32 {
return Err(ManifestProfileError::InvalidHashLength(bits.len()));
}
let mut hash_bytes = [0u8; 32];
hash_bytes.copy_from_slice(bits);
out.push(FileAndHash { out.push(FileAndHash {
file_name, file_name,
hash_bytes, hash_bytes,
@ -405,58 +636,346 @@ fn parse_file_list_sha256(obj: &DerObject<'_>) -> Result<Vec<FileAndHash>, Manif
Ok(out) Ok(out)
} }
fn parse_file_and_hash(obj: &DerObject<'_>) -> Result<(String, Vec<u8>), ManifestProfileError> { fn validate_and_copy_file_name(bytes: &[u8]) -> Result<String, ManifestProfileError> {
let seq = obj validate_file_name_bytes(bytes)?;
.as_sequence() Ok(unsafe { String::from_utf8_unchecked(bytes.to_vec()) })
.map_err(|e| ManifestProfileError::ProfileDecode(e.to_string()))?;
if seq.len() != 2 {
return Err(ManifestProfileError::InvalidFileAndHash);
}
let file_name = seq[0]
.as_str()
.map_err(|e| ManifestProfileError::ProfileDecode(e.to_string()))?
.to_string();
let (unused_bits, bits) = match &seq[1].content {
BerObjectContent::BitString(unused, bso) => (*unused, bso.data.to_vec()),
_ => return Err(ManifestProfileError::InvalidHashType),
};
if unused_bits != 0 {
return Err(ManifestProfileError::HashNotOctetAligned);
}
Ok((file_name, bits))
} }
fn validate_file_name(name: &str) -> Result<(), ManifestProfileError> { fn der_count_elements(mut input: &[u8]) -> Result<usize, String> {
// RFC 9286 §4.2.2: let mut count: usize = 0;
// 1+ chars from a-zA-Z0-9-_ , then '.', then 3-letter extension. while !input.is_empty() {
let Some((base, ext)) = name.rsplit_once('.') else { let (_tag, _value, rem) = der_take_tlv(input)?;
return Err(ManifestProfileError::InvalidFileName(name.to_string())); input = rem;
}; count += 1;
if base.is_empty() || ext.len() != 3 {
return Err(ManifestProfileError::InvalidFileName(name.to_string()));
} }
if !base Ok(count)
.bytes()
.all(|b| b.is_ascii_alphanumeric() || b == b'-' || b == b'_')
{
return Err(ManifestProfileError::InvalidFileName(name.to_string()));
}
if !ext.bytes().all(|b| b.is_ascii_alphabetic()) {
return Err(ManifestProfileError::InvalidFileName(name.to_string()));
}
let ext_lower = ext.to_ascii_lowercase();
if !crate::data_model::common::IANA_RPKI_REPOSITORY_FILENAME_EXTENSIONS
.iter()
.any(|&e| e == ext_lower)
{
return Err(ManifestProfileError::InvalidFileName(name.to_string()));
}
Ok(())
} }
fn oid_to_string(obj: &DerObject<'_>) -> Result<String, ManifestProfileError> { fn der_integer_to_u64(bytes: &[u8]) -> Result<u64, String> {
let oid = obj if bytes.is_empty() {
.as_oid() return Err("INTEGER empty".into());
.map_err(|e| ManifestProfileError::ProfileDecode(e.to_string()))?; }
Ok(oid.to_id_string()) // Reject negative (two's complement).
if bytes[0] & 0x80 != 0 {
return Err("INTEGER is negative".into());
}
if bytes.len() > 8 {
return Err("INTEGER too large".into());
}
let mut v: u64 = 0;
for &b in bytes {
v = (v << 8) | (b as u64);
}
Ok(v)
}
fn der_integer_to_bigunsigned(bytes: &[u8]) -> Result<BigUnsigned, ManifestProfileError> {
if bytes.is_empty() {
return Err(ManifestProfileError::InvalidManifestNumber);
}
// Two's complement: for non-negative values, a leading 0x00 may be present.
if bytes[0] & 0x80 != 0 {
return Err(ManifestProfileError::InvalidManifestNumber);
}
let mut start = 0usize;
while start + 1 < bytes.len() && bytes[start] == 0 {
start += 1;
}
let mut minimal = bytes[start..].to_vec();
if minimal.is_empty() {
minimal.push(0);
}
if minimal.len() > 20 {
return Err(ManifestProfileError::ManifestNumberTooLong);
}
Ok(BigUnsigned { bytes_be: minimal })
}
fn parse_generalized_time_bytes(bytes: &[u8]) -> Result<UtcTime, String> {
// Accept "YYYYMMDDHHMMSSZ" and also allow optional fractional seconds (".fff...Z").
if !bytes.is_ascii() {
return Err("GeneralizedTime not ASCII".into());
}
let s = std::str::from_utf8(bytes).map_err(|e| e.to_string())?;
if !s.ends_with('Z') {
return Err("GeneralizedTime must end with 'Z'".into());
}
let core = &s[..s.len() - 1];
let (main, frac) = core.split_once('.').map_or((core, None), |(a, b)| (a, Some(b)));
if main.len() != 14 || !main.bytes().all(|b| b.is_ascii_digit()) {
return Err("GeneralizedTime must be YYYYMMDDHHMMSS[.fff]Z".into());
}
let year: i32 = main[0..4].parse().map_err(|_| "bad year")?;
let month: u8 = main[4..6].parse().map_err(|_| "bad month")?;
let day: u8 = main[6..8].parse().map_err(|_| "bad day")?;
let hour: u8 = main[8..10].parse().map_err(|_| "bad hour")?;
let minute: u8 = main[10..12].parse().map_err(|_| "bad minute")?;
let second: u8 = main[12..14].parse().map_err(|_| "bad second")?;
let nanosecond: u32 = if let Some(frac) = frac {
if frac.is_empty() || !frac.bytes().all(|b| b.is_ascii_digit()) {
return Err("bad fractional seconds".into());
}
let mut ns: u32 = 0;
let mut scale: u32 = 1_000_000_000;
for (i, ch) in frac.bytes().enumerate() {
if i >= 9 {
break;
}
scale /= 10;
ns += ((ch - b'0') as u32) * scale;
}
ns
} else {
0
};
let date = time::Date::from_calendar_date(year, time::Month::try_from(month).map_err(|_| "bad month")?, day)
.map_err(|e| e.to_string())?;
let t = time::Time::from_hms_nano(hour, minute, second, nanosecond).map_err(|e| e.to_string())?;
Ok(date.with_time(t).assume_utc())
}
fn oid_content_is_sha256(bytes: &[u8]) -> bool {
// 2.16.840.1.101.3.4.2.1
let mut arcs = oid_content_iter(bytes);
const EXPECTED: &[u64] = &[2, 16, 840, 1, 101, 3, 4, 2, 1];
for &e in EXPECTED {
match arcs.next() {
Some(v) if v == e => {}
_ => return false,
}
}
arcs.next().is_none()
}
fn oid_content_to_string(bytes: &[u8]) -> String {
let arcs: Vec<u64> = oid_content_iter(bytes).collect();
if arcs.is_empty() {
return "<invalid oid>".to_string();
}
let mut s = String::new();
for (i, a) in arcs.iter().enumerate() {
if i > 0 {
s.push('.');
}
s.push_str(&a.to_string());
}
s
}
fn oid_content_iter(bytes: &[u8]) -> impl Iterator<Item = u64> + '_ {
struct It<'a> {
bytes: &'a [u8],
pos: usize,
first_done: bool,
first_a0: u64,
first_a1: u64,
emit_first_idx: u8,
}
impl<'a> Iterator for It<'a> {
type Item = u64;
fn next(&mut self) -> Option<u64> {
if !self.first_done {
if self.bytes.is_empty() {
self.first_done = true;
return None;
}
let first = self.bytes[0] as u64;
self.first_a0 = first / 40;
self.first_a1 = first % 40;
self.pos = 1;
self.first_done = true;
self.emit_first_idx = 0;
}
if self.emit_first_idx == 0 {
self.emit_first_idx = 1;
return Some(self.first_a0);
}
if self.emit_first_idx == 1 {
self.emit_first_idx = 2;
return Some(self.first_a1);
}
if self.pos >= self.bytes.len() {
return None;
}
let mut v: u64 = 0;
while self.pos < self.bytes.len() {
let b = self.bytes[self.pos];
self.pos += 1;
v = (v << 7) | ((b & 0x7F) as u64);
if b & 0x80 == 0 {
return Some(v);
}
}
None
}
}
It {
bytes,
pos: 0,
first_done: false,
first_a0: 0,
first_a1: 0,
emit_first_idx: 0,
}
}
fn der_take_tlv(input: &[u8]) -> Result<(u8, &[u8], &[u8]), String> {
if input.len() < 2 {
return Err("truncated DER (need tag+len)".into());
}
let tag = input[0];
if (tag & 0x1F) == 0x1F {
return Err("high-tag-number form not supported".into());
}
let len0 = input[1];
if len0 == 0x80 {
return Err("indefinite length not allowed in DER".into());
}
let (len, hdr_len) = if len0 & 0x80 == 0 {
(len0 as usize, 2usize)
} else {
let n = (len0 & 0x7F) as usize;
if n == 0 || n > 8 {
return Err("invalid DER length".into());
}
if input.len() < 2 + n {
return Err("truncated DER (length bytes)".into());
}
let mut l: usize = 0;
for &b in &input[2..2 + n] {
l = (l << 8) | (b as usize);
}
(l, 2 + n)
};
if input.len() < hdr_len + len {
return Err("truncated DER (value bytes)".into());
}
let value = &input[hdr_len..hdr_len + len];
let rem = &input[hdr_len + len..];
Ok((tag, value, rem))
}
#[cfg(test)]
mod tests {
use super::*;
fn tlv(tag: u8, value: &[u8]) -> Vec<u8> {
assert!(value.len() < 128);
let mut out = Vec::with_capacity(2 + value.len());
out.push(tag);
out.push(value.len() as u8);
out.extend_from_slice(value);
out
}
fn tlv_long_len(tag: u8, len_bytes: &[u8], value: &[u8]) -> Vec<u8> {
let mut out = Vec::with_capacity(2 + len_bytes.len() + value.len());
out.push(tag);
out.push(0x80 | (len_bytes.len() as u8));
out.extend_from_slice(len_bytes);
out.extend_from_slice(value);
out
}
#[test]
fn der_take_tlv_supports_short_and_long_form_lengths_and_errors() {
let v = b"abc";
let der = tlv(0x04, v);
let (tag, val, rem) = der_take_tlv(&der).expect("short len");
assert_eq!(tag, 0x04);
assert_eq!(val, v);
assert!(rem.is_empty());
// Long-form length with 1 length byte (130).
let v = vec![b'x'; 130];
let der = tlv_long_len(0x04, &[0x82], &v);
let (tag, val, rem) = der_take_tlv(&der).expect("long len 1");
assert_eq!(tag, 0x04);
assert_eq!(val.len(), 130);
assert!(rem.is_empty());
// Long-form length with 2 length bytes (256).
let v = vec![b'y'; 256];
let der = tlv_long_len(0x04, &[0x01, 0x00], &v);
let (tag, val, rem) = der_take_tlv(&der).expect("long len 2");
assert_eq!(tag, 0x04);
assert_eq!(val.len(), 256);
assert!(rem.is_empty());
assert!(der_take_tlv(&[]).is_err());
assert!(der_take_tlv(&[0x04]).is_err());
// High-tag-number form not supported.
assert!(der_take_tlv(&[0x1F, 0x01, 0x00]).is_err());
// Indefinite length is not allowed in DER.
assert!(der_take_tlv(&[0x04, 0x80]).is_err());
// Invalid long-form length encoding.
assert!(der_take_tlv(&[0x04, 0x81]).is_err());
assert!(der_take_tlv(&[0x04, 0x89]).is_err());
}
#[test]
fn parse_generalized_time_bytes_accepts_fraction_and_rejects_invalid() {
let t = parse_generalized_time_bytes(b"20260101000000Z").expect("basic time");
assert_eq!(t.year(), 2026);
let t = parse_generalized_time_bytes(b"20260101000000.1Z").expect("fractional");
assert_eq!(t.nanosecond(), 100_000_000);
assert!(parse_generalized_time_bytes(b"20260101000000").is_err());
assert!(parse_generalized_time_bytes(b"20260101000000+00").is_err());
assert!(parse_generalized_time_bytes(b"2026010100000Z").is_err());
assert!(parse_generalized_time_bytes(b"20261301000000Z").is_err());
assert!(parse_generalized_time_bytes(b"20260132000000Z").is_err());
assert!(parse_generalized_time_bytes(&[0xFF]).is_err());
}
#[test]
fn oid_helpers_accept_sha256_and_format_invalid() {
// 2.16.840.1.101.3.4.2.1
let sha256_oid_content = [0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01];
assert!(oid_content_is_sha256(&sha256_oid_content));
assert!(!oid_content_is_sha256(&[0x55, 0x04, 0x03])); // 2.5.4.3
assert_eq!(oid_content_to_string(&[]), "<invalid oid>".to_string());
}
#[test]
fn validate_file_list_sha256_fast_counts_and_rejects_bad_hash() {
fn file_and_hash(file: &str, digest: u8) -> Vec<u8> {
let mut hash = vec![0u8; 33];
hash[0] = 0; // unused bits
for b in &mut hash[1..] {
*b = digest;
}
let ia5 = tlv(0x16, file.as_bytes());
let bit = tlv(0x03, &hash);
let mut entry = Vec::new();
entry.extend_from_slice(&ia5);
entry.extend_from_slice(&bit);
tlv(0x30, &entry)
}
let mut list = Vec::new();
list.extend_from_slice(&file_and_hash("A.cer", 0xAA));
list.extend_from_slice(&file_and_hash("B.roa", 0xBB));
assert_eq!(validate_file_list_sha256_fast(&list).expect("count"), 2);
// Wrong hash length.
let mut bad = Vec::new();
let ia5 = tlv(0x16, b"A.cer");
let bit = tlv(0x03, &[0u8; 2]); // too short
let mut entry = Vec::new();
entry.extend_from_slice(&ia5);
entry.extend_from_slice(&bit);
bad.extend_from_slice(&tlv(0x30, &entry));
assert!(matches!(
validate_file_list_sha256_fast(&bad),
Err(ManifestProfileError::InvalidHashLength(_))
));
}
} }

View File

@ -7,7 +7,7 @@ use crate::data_model::oid::{
use crate::data_model::rc::{ResourceCertificate, SubjectInfoAccess}; use crate::data_model::rc::{ResourceCertificate, SubjectInfoAccess};
use der_parser::ber::Class; use der_parser::ber::Class;
use der_parser::der::{DerObject, Tag, parse_der}; use der_parser::der::{DerObject, Tag, parse_der};
use sha2::{Digest, Sha256}; use ring::digest;
use x509_parser::prelude::FromDer; use x509_parser::prelude::FromDer;
use x509_parser::public_key::PublicKey; use x509_parser::public_key::PublicKey;
use x509_parser::x509::SubjectPublicKeyInfo; use x509_parser::x509::SubjectPublicKeyInfo;
@ -812,8 +812,8 @@ fn validate_signed_data_profile(
}); });
} }
let computed = Sha256::digest(&encap_content_info.econtent).to_vec(); let computed = digest::digest(&digest::SHA256, &encap_content_info.econtent);
if computed != signed_attrs.message_digest { if computed.as_ref() != signed_attrs.message_digest.as_slice() {
return Err(SignedObjectValidateError::MessageDigestMismatch); return Err(SignedObjectValidateError::MessageDigestMismatch);
} }

View File

@ -62,3 +62,52 @@ impl Fetcher for BlockingHttpFetcher {
self.fetch_bytes(uri) self.fetch_bytes(uri)
} }
} }
#[cfg(test)]
mod tests {
use super::*;
use std::io::{Read, Write};
use std::net::TcpListener;
use std::thread;
fn spawn_one_shot_http_server(status_line: &'static str, body: &'static [u8]) -> String {
let listener = TcpListener::bind(("127.0.0.1", 0)).expect("bind");
let addr = listener.local_addr().expect("addr");
thread::spawn(move || {
let (mut stream, _) = listener.accept().expect("accept");
let mut buf = [0u8; 1024];
let _ = stream.read(&mut buf);
let hdr = format!(
"{status_line}\r\nContent-Length: {}\r\nConnection: close\r\n\r\n",
body.len()
);
stream.write_all(hdr.as_bytes()).expect("write hdr");
stream.write_all(body).expect("write body");
});
format!("http://{}/", addr)
}
#[test]
fn fetch_bytes_returns_body_on_success() {
let url = spawn_one_shot_http_server("HTTP/1.1 200 OK", b"hello");
let http = BlockingHttpFetcher::new(HttpFetcherConfig {
timeout: Duration::from_secs(2),
..HttpFetcherConfig::default()
})
.expect("http");
let got = http.fetch_bytes(&url).expect("fetch");
assert_eq!(got, b"hello");
}
#[test]
fn fetch_bytes_rejects_non_success_status() {
let url = spawn_one_shot_http_server("HTTP/1.1 404 Not Found", b"");
let http = BlockingHttpFetcher::new(HttpFetcherConfig {
timeout: Duration::from_secs(2),
..HttpFetcherConfig::default()
})
.expect("http");
let err = http.fetch_bytes(&url).unwrap_err();
assert!(err.contains("http status"), "{err}");
}
}

View File

@ -80,3 +80,39 @@ fn normalize_rsync_base_uri(s: &str) -> String {
format!("{s}/") format!("{s}/")
} }
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn local_dir_rsync_fetcher_collects_files_and_normalizes_base_uri() {
let tmp = tempfile::tempdir().expect("tempdir");
std::fs::create_dir_all(tmp.path().join("nested")).expect("mkdir");
std::fs::write(tmp.path().join("a.mft"), b"a").expect("write");
std::fs::write(tmp.path().join("nested").join("b.roa"), b"b").expect("write");
let f = LocalDirRsyncFetcher::new(tmp.path());
let mut objects = f
.fetch_objects("rsync://example.net/repo")
.expect("fetch_objects");
objects.sort_by(|(a, _), (b, _)| a.cmp(b));
assert_eq!(objects.len(), 2);
assert_eq!(objects[0].0, "rsync://example.net/repo/a.mft");
assert_eq!(objects[0].1, b"a");
assert_eq!(objects[1].0, "rsync://example.net/repo/nested/b.roa");
assert_eq!(objects[1].1, b"b");
}
#[test]
fn local_dir_rsync_fetcher_reports_read_dir_errors() {
let tmp = tempfile::tempdir().expect("tempdir");
let missing = tmp.path().join("missing");
let f = LocalDirRsyncFetcher::new(missing);
let err = f.fetch_objects("rsync://example.net/repo").unwrap_err();
match err {
RsyncFetchError::Fetch(msg) => assert!(!msg.is_empty()),
}
}
}

View File

@ -142,3 +142,67 @@ fn walk_dir_collect(
} }
Ok(()) Ok(())
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn normalize_rsync_base_uri_appends_slash_when_missing() {
assert_eq!(
normalize_rsync_base_uri("rsync://example.net/repo"),
"rsync://example.net/repo/".to_string()
);
assert_eq!(
normalize_rsync_base_uri("rsync://example.net/repo/"),
"rsync://example.net/repo/".to_string()
);
}
#[test]
fn walk_dir_collect_collects_files_and_normalizes_backslashes_in_uri() {
let temp = tempfile::tempdir().expect("tempdir");
let root = temp.path();
std::fs::create_dir_all(root.join("sub")).expect("mkdir");
std::fs::write(root.join("sub").join("a.cer"), b"x").expect("write");
std::fs::write(root.join("b\\c.mft"), b"y").expect("write backslash file");
let mut out: Vec<(String, Vec<u8>)> = Vec::new();
walk_dir_collect(root, root, "rsync://example.net/repo/", &mut out).expect("walk");
out.sort_by(|a, b| a.0.cmp(&b.0));
assert_eq!(out.len(), 2);
assert_eq!(out[0].0, "rsync://example.net/repo/b/c.mft");
assert_eq!(out[0].1, b"y");
assert_eq!(out[1].0, "rsync://example.net/repo/sub/a.cer");
assert_eq!(out[1].1, b"x");
}
#[test]
fn system_rsync_fetcher_reports_spawn_and_exit_errors() {
let dst = tempfile::tempdir().expect("tempdir");
// 1) Spawn error.
let f = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: PathBuf::from("/this/does/not/exist/rsync"),
timeout: Duration::from_secs(1),
extra_args: Vec::new(),
});
let e = f
.run_rsync("rsync://example.net/repo/", dst.path())
.expect_err("spawn must fail");
assert!(e.contains("rsync spawn failed:"), "{e}");
// 2) Non-zero exit status.
let f = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: PathBuf::from("false"),
timeout: Duration::from_secs(1),
extra_args: Vec::new(),
});
let e = f
.run_rsync("rsync://example.net/repo/", dst.path())
.expect_err("false must fail");
assert!(e.contains("rsync failed:"), "{e}");
assert!(e.contains("status="), "{e}");
}
}

View File

@ -1,9 +1,18 @@
pub mod audit;
pub mod cli;
pub mod data_model; pub mod data_model;
#[cfg(feature = "full")]
pub mod audit;
#[cfg(feature = "full")]
pub mod cli;
#[cfg(feature = "full")]
pub mod fetch; pub mod fetch;
#[cfg(feature = "full")]
pub mod policy; pub mod policy;
#[cfg(feature = "full")]
pub mod report; pub mod report;
#[cfg(feature = "full")]
pub mod storage; pub mod storage;
#[cfg(feature = "full")]
pub mod sync; pub mod sync;
#[cfg(feature = "full")]
pub mod validation; pub mod validation;

View File

@ -288,3 +288,197 @@ fn collect_element_text(node: &roxmltree::Node<'_, '_>) -> Option<String> {
fn strip_all_ascii_whitespace(s: &str) -> String { fn strip_all_ascii_whitespace(s: &str) -> String {
s.chars().filter(|c| !c.is_ascii_whitespace()).collect() s.chars().filter(|c| !c.is_ascii_whitespace()).collect()
} }
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
struct MapFetcher {
map: HashMap<String, Vec<u8>>,
}
impl Fetcher for MapFetcher {
fn fetch(&self, uri: &str) -> Result<Vec<u8>, String> {
self.map
.get(uri)
.cloned()
.ok_or_else(|| format!("not found: {uri}"))
}
}
fn notification_xml(session_id: &str, serial: u64, snapshot_uri: &str, snapshot_hash: &str) -> Vec<u8> {
format!(
r#"<notification xmlns="{RRDP_XMLNS}" version="1" session_id="{session_id}" serial="{serial}"><snapshot uri="{snapshot_uri}" hash="{snapshot_hash}"/></notification>"#
)
.into_bytes()
}
fn snapshot_xml(session_id: &str, serial: u64, published: &[(&str, &[u8])]) -> Vec<u8> {
let mut out = format!(
r#"<snapshot xmlns="{RRDP_XMLNS}" version="1" session_id="{session_id}" serial="{serial}">"#
);
for (uri, bytes) in published {
let b64 = base64::engine::general_purpose::STANDARD.encode(bytes);
out.push_str(&format!(r#"<publish uri="{uri}">{b64}</publish>"#));
}
out.push_str("</snapshot>");
out.into_bytes()
}
#[test]
fn parse_notification_snapshot_rejects_non_ascii() {
let mut xml = b"<notification/>".to_vec();
xml.push(0x80);
let err = parse_notification_snapshot(&xml).unwrap_err();
assert!(matches!(err, RrdpError::NotAscii));
}
#[test]
fn parse_notification_snapshot_parses_valid_minimal_notification() {
let sid = "550e8400-e29b-41d4-a716-446655440000";
let snapshot_uri = "https://example.net/snapshot.xml";
let hash = "00".repeat(32);
let xml = notification_xml(sid, 7, snapshot_uri, &hash);
let n = parse_notification_snapshot(&xml).expect("parse");
assert_eq!(n.session_id, Uuid::parse_str(sid).unwrap());
assert_eq!(n.serial, 7);
assert_eq!(n.snapshot_uri, snapshot_uri);
assert_eq!(hex::encode(n.snapshot_hash_sha256), hash);
}
#[test]
fn sync_from_notification_snapshot_applies_snapshot_and_stores_state() {
let tmp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(tmp.path()).expect("open rocksdb");
let sid = "550e8400-e29b-41d4-a716-446655440000";
let serial = 9u64;
let notif_uri = "https://example.net/notification.xml";
let snapshot_uri = "https://example.net/snapshot.xml";
let snapshot = snapshot_xml(
sid,
serial,
&[
("rsync://example.net/repo/a.mft", b"mft-bytes"),
("rsync://example.net/repo/b.roa", b"roa-bytes"),
],
);
let snapshot_hash = hex::encode(sha2::Sha256::digest(&snapshot));
let notif = notification_xml(sid, serial, snapshot_uri, &snapshot_hash);
let fetcher = MapFetcher {
map: HashMap::from([(snapshot_uri.to_string(), snapshot.clone())]),
};
let published = sync_from_notification_snapshot(&store, notif_uri, &notif, &fetcher)
.expect("sync");
assert_eq!(published, 2);
let a = store
.get_raw("rsync://example.net/repo/a.mft")
.expect("get_raw")
.expect("a present");
assert_eq!(a, b"mft-bytes");
let b = store
.get_raw("rsync://example.net/repo/b.roa")
.expect("get_raw")
.expect("b present");
assert_eq!(b, b"roa-bytes");
let state_bytes = store
.get_rrdp_state(notif_uri)
.expect("get_rrdp_state")
.expect("state present");
let state = RrdpState::decode(&state_bytes).expect("decode state");
assert_eq!(state.session_id, sid);
assert_eq!(state.serial, serial);
}
#[test]
fn sync_from_notification_snapshot_rejects_snapshot_hash_mismatch() {
let tmp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(tmp.path()).expect("open rocksdb");
let sid = "550e8400-e29b-41d4-a716-446655440000";
let serial = 1u64;
let notif_uri = "https://example.net/notification.xml";
let snapshot_uri = "https://example.net/snapshot.xml";
let snapshot = snapshot_xml(sid, serial, &[("rsync://example.net/repo/a.mft", b"x")]);
let notif = notification_xml(sid, serial, snapshot_uri, &"00".repeat(32));
let fetcher = MapFetcher {
map: HashMap::from([(snapshot_uri.to_string(), snapshot)]),
};
let err = sync_from_notification_snapshot(&store, notif_uri, &notif, &fetcher).unwrap_err();
assert!(matches!(err, RrdpSyncError::Rrdp(RrdpError::SnapshotHashMismatch)));
}
#[test]
fn apply_snapshot_rejects_session_id_and_serial_mismatch() {
let tmp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(tmp.path()).expect("open rocksdb");
let expected_sid = Uuid::parse_str("550e8400-e29b-41d4-a716-446655440000").unwrap();
let got_sid = "550e8400-e29b-41d4-a716-446655440001";
let snapshot = snapshot_xml(got_sid, 2, &[("rsync://example.net/repo/a.mft", b"x")]);
let err = apply_snapshot(&store, &snapshot, expected_sid, 2).unwrap_err();
assert!(matches!(
err,
RrdpSyncError::Rrdp(RrdpError::SnapshotSessionIdMismatch { .. })
));
let snapshot = snapshot_xml(expected_sid.to_string().as_str(), 3, &[("rsync://example.net/repo/a.mft", b"x")]);
let err = apply_snapshot(&store, &snapshot, expected_sid, 2).unwrap_err();
assert!(matches!(
err,
RrdpSyncError::Rrdp(RrdpError::SnapshotSerialMismatch { .. })
));
}
#[test]
fn strip_all_ascii_whitespace_removes_newlines_and_spaces() {
assert_eq!(strip_all_ascii_whitespace(" a \n b\tc "), "abc");
}
#[test]
fn apply_snapshot_reports_publish_errors() {
let tmp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(tmp.path()).expect("open rocksdb");
let sid = Uuid::parse_str("550e8400-e29b-41d4-a716-446655440000").unwrap();
// Missing publish/@uri
let xml = format!(
r#"<snapshot xmlns="{RRDP_XMLNS}" version="1" session_id="{sid}" serial="1"><publish>AA==</publish></snapshot>"#
)
.into_bytes();
let err = apply_snapshot(&store, &xml, sid, 1).unwrap_err();
assert!(matches!(err, RrdpSyncError::Rrdp(RrdpError::PublishUriMissing)));
// Missing base64 content (no text nodes).
let xml = format!(
r#"<snapshot xmlns="{RRDP_XMLNS}" version="1" session_id="{sid}" serial="1"><publish uri="rsync://example.net/repo/a.cer"></publish></snapshot>"#
)
.into_bytes();
let err = apply_snapshot(&store, &xml, sid, 1).unwrap_err();
assert!(matches!(
err,
RrdpSyncError::Rrdp(RrdpError::PublishContentMissing)
));
// Invalid base64 content.
let xml = format!(
r#"<snapshot xmlns="{RRDP_XMLNS}" version="1" session_id="{sid}" serial="1"><publish uri="rsync://example.net/repo/a.cer">!!!</publish></snapshot>"#
)
.into_bytes();
let err = apply_snapshot(&store, &xml, sid, 1).unwrap_err();
assert!(matches!(
err,
RrdpSyncError::Rrdp(RrdpError::PublishBase64(_))
));
}
}

View File

@ -260,13 +260,17 @@ fn revalidate_cached_pack_with_current_time(
.map(|f| (f.rsync_uri.as_str(), f)) .map(|f| (f.rsync_uri.as_str(), f))
.collect(); .collect();
for entry in &manifest.manifest.files { let entries = manifest
.manifest
.parse_files()
.map_err(|e| ManifestFreshError::Decode(ManifestDecodeError::Validate(e)))?;
for entry in &entries {
let rsync_uri = let rsync_uri =
join_rsync_dir_and_file(&pack.publication_point_rsync_uri, &entry.file_name); join_rsync_dir_and_file(&pack.publication_point_rsync_uri, entry.file_name.as_str());
let Some(file) = by_uri.get(rsync_uri.as_str()) else { let Some(file) = by_uri.get(rsync_uri.as_str()) else {
return Err(ManifestCachedError::CachedMissingFile { rsync_uri }); return Err(ManifestCachedError::CachedMissingFile { rsync_uri });
}; };
if file.sha256.as_slice() != entry.hash_bytes.as_slice() { if file.sha256.as_slice() != entry.hash_bytes.as_ref() {
return Err(ManifestCachedError::CachedHashMismatch { rsync_uri }); return Err(ManifestCachedError::CachedHashMismatch { rsync_uri });
} }
} }
@ -388,9 +392,11 @@ fn try_build_fresh_pack(
} }
} }
let mut files = Vec::with_capacity(manifest.manifest.files.len()); let entries = manifest.manifest.parse_files().map_err(ManifestDecodeError::Validate)?;
for entry in &manifest.manifest.files { let mut files = Vec::with_capacity(manifest.manifest.file_count());
let rsync_uri = join_rsync_dir_and_file(publication_point_rsync_uri, &entry.file_name); for entry in &entries {
let rsync_uri =
join_rsync_dir_and_file(publication_point_rsync_uri, entry.file_name.as_str());
let bytes = store let bytes = store
.get_raw(&rsync_uri) .get_raw(&rsync_uri)
.map_err(|_e| ManifestFreshError::MissingFile { .map_err(|_e| ManifestFreshError::MissingFile {
@ -401,7 +407,7 @@ fn try_build_fresh_pack(
})?; })?;
let computed = sha2::Sha256::digest(&bytes); let computed = sha2::Sha256::digest(&bytes);
if computed.as_slice() != entry.hash_bytes.as_slice() { if computed.as_slice() != entry.hash_bytes.as_ref() {
return Err(ManifestFreshError::HashMismatch { rsync_uri }); return Err(ManifestFreshError::HashMismatch { rsync_uri });
} }

View File

@ -0,0 +1,320 @@
use rpki::data_model::manifest::ManifestObject;
use std::path::{Path, PathBuf};
use std::time::Instant;
fn default_samples_dir() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests/benchmark/selected_der")
}
fn read_samples(dir: &Path) -> Vec<Sample> {
let mut out = Vec::new();
let rd = std::fs::read_dir(dir).unwrap_or_else(|e| panic!("read_dir {}: {e}", dir.display()));
for ent in rd.flatten() {
let path = ent.path();
if path.extension().and_then(|s| s.to_str()) != Some("mft") {
continue;
}
let name = path
.file_stem()
.and_then(|s| s.to_str())
.unwrap_or("unknown")
.to_string();
out.push(Sample { name, path });
}
out.sort_by(|a, b| a.name.cmp(&b.name));
out
}
#[derive(Clone, Debug)]
struct Sample {
name: String,
path: PathBuf,
}
fn env_u64(name: &str, default: u64) -> u64 {
std::env::var(name)
.ok()
.and_then(|s| s.parse::<u64>().ok())
.unwrap_or(default)
}
fn env_u64_opt(name: &str) -> Option<u64> {
std::env::var(name)
.ok()
.and_then(|s| s.parse::<u64>().ok())
}
fn env_bool(name: &str) -> bool {
matches!(
std::env::var(name).as_deref(),
Ok("1") | Ok("true") | Ok("TRUE") | Ok("yes") | Ok("YES")
)
}
fn env_string(name: &str) -> Option<String> {
std::env::var(name).ok().filter(|s| !s.trim().is_empty())
}
fn escape_md(s: &str) -> String {
s.replace('|', "\\|").replace('\n', " ")
}
fn create_parent_dirs(path: &Path) {
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent).unwrap_or_else(|e| {
panic!("create_dir_all {}: {e}", parent.display());
});
}
}
#[test]
#[ignore = "manual performance benchmark; prints Markdown table"]
fn manifest_decode_profile_benchmark_selected_der() {
let dir = env_string("BENCH_DIR")
.map(PathBuf::from)
.unwrap_or_else(default_samples_dir);
let sample_filter = env_string("BENCH_SAMPLE");
let fixed_iters = env_u64_opt("BENCH_ITERS");
let warmup_iters = env_u64("BENCH_WARMUP_ITERS", 100);
let rounds = env_u64("BENCH_ROUNDS", 5);
let min_round_ms = env_u64("BENCH_MIN_ROUND_MS", 200);
let max_adaptive_iters = env_u64("BENCH_MAX_ITERS", 1_000_000);
let verbose = env_bool("BENCH_VERBOSE");
let out_md = env_string("BENCH_OUT_MD").map(|p| PathBuf::from(p));
let out_json = env_string("BENCH_OUT_JSON").map(|p| PathBuf::from(p));
if let Some(n) = fixed_iters {
assert!(n >= 1, "BENCH_ITERS must be >= 1");
}
assert!(rounds >= 1, "BENCH_ROUNDS must be >= 1");
assert!(min_round_ms >= 1, "BENCH_MIN_ROUND_MS must be >= 1");
assert!(max_adaptive_iters >= 1, "BENCH_MAX_ITERS must be >= 1");
let mut samples = read_samples(&dir);
assert!(
!samples.is_empty(),
"no .mft files found under: {}",
dir.display()
);
if let Some(filter) = sample_filter.as_deref() {
samples.retain(|s| s.name == filter);
assert!(!samples.is_empty(), "no sample matched BENCH_SAMPLE={filter}");
}
println!("# Manifest decode + profile validate benchmark (debug build)");
println!();
println!("- dir: {}", dir.display());
if let Some(n) = fixed_iters {
println!("- iters: {} (fixed)", n);
} else {
println!(
"- warmup: {} iters, rounds: {}, min_round: {}ms (adaptive iters, max {})",
warmup_iters, rounds, min_round_ms, max_adaptive_iters
);
}
if let Some(filter) = sample_filter.as_deref() {
println!("- sample: {}", filter);
}
if verbose {
println!("- verbose: true");
}
if let Some(p) = out_md.as_ref() {
println!("- out_md: {}", p.display());
}
if let Some(p) = out_json.as_ref() {
println!("- out_json: {}", p.display());
}
println!();
println!("Samples:");
for s in &samples {
println!("- {}", s.name);
}
println!();
println!("| sample | file_count | avg ns/op | ops/s |");
println!("|---|---:|---:|---:|");
let mut rows: Vec<ResultRow> = Vec::with_capacity(samples.len());
for s in samples {
let bytes =
std::fs::read(&s.path).unwrap_or_else(|e| panic!("read {}: {e}", s.path.display()));
let file_count = ManifestObject::decode_der(bytes.as_slice())
.unwrap_or_else(|e| panic!("decode {}: {e}", s.name))
.manifest
.file_count();
// Warm-up: exercise the exact decode path but don't time it.
for _ in 0..warmup_iters {
let input = std::hint::black_box(bytes.as_slice());
let decoded = ManifestObject::decode_der(input).expect("decode");
std::hint::black_box(decoded);
}
let mut per_round_ns_per_op = Vec::with_capacity(rounds as usize);
for round in 0..rounds {
let iters = if let Some(n) = fixed_iters {
n
} else {
choose_iters_adaptive(
bytes.as_slice(),
min_round_ms,
max_adaptive_iters,
)
};
let start = Instant::now();
for _ in 0..iters {
let input = std::hint::black_box(bytes.as_slice());
let decoded = ManifestObject::decode_der(input).expect("decode");
std::hint::black_box(decoded);
}
let elapsed = start.elapsed();
let total_ns = elapsed.as_secs_f64() * 1e9;
let ns_per_op = total_ns / (iters as f64);
per_round_ns_per_op.push(ns_per_op);
if verbose {
println!(
"# {} round {}: iters={} total_ms={:.2} ns/op={:.2}",
s.name,
round + 1,
iters,
elapsed.as_secs_f64() * 1e3,
ns_per_op
);
}
}
let avg_ns = per_round_ns_per_op.iter().sum::<f64>() / (per_round_ns_per_op.len() as f64);
let ops_per_sec = 1e9_f64 / avg_ns;
println!(
"| {} | {} | {:.2} | {:.2} |",
s.name, file_count, avg_ns, ops_per_sec
);
rows.push(ResultRow {
sample: s.name,
file_count,
avg_ns_per_op: avg_ns,
ops_per_sec,
});
}
if out_md.is_some() || out_json.is_some() {
let timestamp_utc =
time::OffsetDateTime::now_utc().format(&time::format_description::well_known::Rfc3339)
.unwrap_or_else(|_| "unknown".to_string());
let cfg = RunConfig {
dir: dir.display().to_string(),
sample: sample_filter,
fixed_iters,
warmup_iters,
rounds,
min_round_ms,
max_adaptive_iters,
timestamp_utc,
};
if let Some(path) = out_md {
let md = render_markdown(&cfg, &rows);
write_text_file(&path, &md);
eprintln!("Wrote {}", path.display());
}
if let Some(path) = out_json {
let json = serde_json::to_string_pretty(&BenchmarkOutput { config: cfg, rows })
.expect("serialize json");
write_text_file(&path, &json);
eprintln!("Wrote {}", path.display());
}
}
}
fn choose_iters_adaptive(bytes: &[u8], min_round_ms: u64, max_iters: u64) -> u64 {
let min_secs = (min_round_ms as f64) / 1e3;
let mut iters: u64 = 1;
loop {
let start = Instant::now();
for _ in 0..iters {
let input = std::hint::black_box(bytes);
let decoded = ManifestObject::decode_der(input).expect("decode");
std::hint::black_box(decoded);
}
let elapsed = start.elapsed().as_secs_f64();
if elapsed >= min_secs {
return iters;
}
if iters >= max_iters {
return iters;
}
iters = (iters.saturating_mul(2)).min(max_iters);
}
}
fn render_markdown(cfg: &RunConfig, rows: &[ResultRow]) -> String {
let mut out = String::new();
out.push_str("# Manifest decode + profile validate benchmark (debug build)\n\n");
out.push_str(&format!("- timestamp_utc: {}\n", cfg.timestamp_utc));
out.push_str(&format!("- dir: `{}`\n", cfg.dir));
if let Some(s) = cfg.sample.as_deref() {
out.push_str(&format!("- sample: `{}`\n", s));
}
if let Some(n) = cfg.fixed_iters {
out.push_str(&format!("- iters: {} (fixed)\n", n));
} else {
out.push_str(&format!(
"- warmup: {} iters, rounds: {}, min_round: {}ms (adaptive iters, max {})\n",
cfg.warmup_iters, cfg.rounds, cfg.min_round_ms, cfg.max_adaptive_iters
));
}
out.push('\n');
out.push_str("| sample | file_count | avg ns/op | ops/s |\n");
out.push_str("|---|---:|---:|---:|\n");
for r in rows {
out.push_str(&format!(
"| {} | {} | {:.2} | {:.2} |\n",
escape_md(&r.sample),
r.file_count,
r.avg_ns_per_op,
r.ops_per_sec
));
}
out
}
fn write_text_file(path: &Path, content: &str) {
create_parent_dirs(path);
std::fs::write(path, content).unwrap_or_else(|e| panic!("write {}: {e}", path.display()));
}
#[derive(Clone, Debug, serde::Serialize)]
struct RunConfig {
dir: String,
sample: Option<String>,
fixed_iters: Option<u64>,
warmup_iters: u64,
rounds: u64,
min_round_ms: u64,
max_adaptive_iters: u64,
timestamp_utc: String,
}
#[derive(Clone, Debug, serde::Serialize)]
struct ResultRow {
sample: String,
file_count: usize,
avg_ns_per_op: f64,
ops_per_sec: f64,
}
#[derive(Clone, Debug, serde::Serialize)]
struct BenchmarkOutput {
config: RunConfig,
rows: Vec<ResultRow>,
}

View File

@ -0,0 +1,87 @@
#!/usr/bin/env bash
set -euo pipefail
# Generates a flamegraph SVG for Manifest decode+profile validate.
#
# Prereqs:
# - `perf` installed and usable by current user
# - `cargo flamegraph` installed (`cargo install flamegraph`)
#
# Usage examples:
# BENCH_SAMPLE=large-02 ./tests/benchmark/flamegraph_manifest_decode_profile.sh
# BENCH_SAMPLE=large-02 BENCH_ITERS=5000 FLAMEGRAPH_PROFILE=dev ./tests/benchmark/flamegraph_manifest_decode_profile.sh
# BENCH_SAMPLE=large-02 BENCH_ITERS=2000 FLAMEGRAPH_PROFILE=release ./tests/benchmark/flamegraph_manifest_decode_profile.sh
PROFILE="${FLAMEGRAPH_PROFILE:-dev}" # dev|release
SAMPLE="${BENCH_SAMPLE:-large-02}"
ITERS="${BENCH_ITERS:-2000}"
FREQ="${FLAMEGRAPH_FREQ:-99}"
WARMUP="${BENCH_WARMUP_ITERS:-1}"
ROUNDS="${BENCH_ROUNDS:-1}"
MIN_ROUND_MS="${BENCH_MIN_ROUND_MS:-1}"
MAX_ITERS="${BENCH_MAX_ITERS:-100000}"
if ! command -v perf >/dev/null 2>&1; then
echo "ERROR: perf not found. Install linux-tools/perf first (may require sudo)." >&2
exit 2
fi
if ! command -v cargo-flamegraph >/dev/null 2>&1 && ! command -v flamegraph >/dev/null 2>&1; then
# cargo installs as `cargo flamegraph`, but the binary is `cargo-flamegraph`.
if ! command -v cargo-flamegraph >/dev/null 2>&1; then
echo "ERROR: cargo-flamegraph not found. Install with: cargo install flamegraph" >&2
exit 2
fi
fi
OUT="target/bench/flamegraph_mft_decode_profile_${SAMPLE}_${PROFILE}.svg"
echo "profile=${PROFILE} sample=${SAMPLE} iters=${ITERS} freq=${FREQ} out=${OUT}"
FLAGS=()
if [[ "${PROFILE}" == "release" ]]; then
FLAGS+=(--release)
else
FLAGS+=(--dev)
fi
# On WSL2, /usr/bin/perf is often a wrapper that exits 2 because there is no
# kernel-matched perf binary. In that case, prefer a real perf binary under
# /usr/lib/linux-tools/*/perf by putting a shim earlier in PATH.
PERF_WRAPPER_OK=1
PERF_VERSION_OUT="$(perf --version 2>&1 || true)"
if echo "${PERF_VERSION_OUT}" | grep -q "WARNING: perf not found for kernel"; then
PERF_WRAPPER_OK=0
fi
if [[ "${PERF_WRAPPER_OK}" == "0" ]]; then
PERF_REAL="$(ls -1 /usr/lib/linux-tools/*/perf 2>/dev/null | head -n 1 || true)"
if [[ -z "${PERF_REAL}" ]]; then
echo "ERROR: perf wrapper found, but no real perf binary under /usr/lib/linux-tools/*/perf" >&2
exit 2
fi
SHIM_DIR="target/bench/tools"
mkdir -p "${SHIM_DIR}"
cat > "${SHIM_DIR}/perf" <<EOF
#!/usr/bin/env bash
exec "${PERF_REAL}" "\$@"
EOF
chmod +x "${SHIM_DIR}/perf"
export PATH="$(pwd)/${SHIM_DIR}:${PATH}"
echo "Using perf shim -> ${PERF_REAL}"
fi
BENCH_SAMPLE="${SAMPLE}" \
BENCH_ITERS="${ITERS}" \
BENCH_WARMUP_ITERS="${WARMUP}" \
BENCH_ROUNDS="${ROUNDS}" \
BENCH_MIN_ROUND_MS="${MIN_ROUND_MS}" \
BENCH_MAX_ITERS="${MAX_ITERS}" \
cargo flamegraph "${FLAGS[@]}" \
-F "${FREQ}" \
--output "${OUT}" \
--test bench_manifest_decode_profile -- --ignored --nocapture
echo "Wrote ${OUT}"

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,9 @@
name input_path der_path input_size der_size source_format
large-01 benchmark/fixtures/selected/large-01.mft benchmark/fixtures/selected_der/large-01.mft 262869 2250 stored_point_container
large-02 benchmark/fixtures/selected/large-02.mft benchmark/fixtures/selected_der/large-02.mft 264331 10476 stored_point_container
medium-01 benchmark/fixtures/selected/medium-01.mft benchmark/fixtures/selected_der/medium-01.mft 16384 2681 stored_point_container
medium-02 benchmark/fixtures/selected/medium-02.mft benchmark/fixtures/selected_der/medium-02.mft 16384 2681 stored_point_container
small-01 benchmark/fixtures/selected/small-01.mft benchmark/fixtures/selected_der/small-01.mft 3160 1875 stored_point_container
small-02 benchmark/fixtures/selected/small-02.mft benchmark/fixtures/selected_der/small-02.mft 3160 1875 stored_point_container
xlarge-01 benchmark/fixtures/selected/xlarge-01.mft benchmark/fixtures/selected_der/xlarge-01.mft 4522145 144968 stored_point_container
xlarge-02 benchmark/fixtures/selected/xlarge-02.mft benchmark/fixtures/selected_der/xlarge-02.mft 4615094 150261 stored_point_container
1 name input_path der_path input_size der_size source_format
2 large-01 benchmark/fixtures/selected/large-01.mft benchmark/fixtures/selected_der/large-01.mft 262869 2250 stored_point_container
3 large-02 benchmark/fixtures/selected/large-02.mft benchmark/fixtures/selected_der/large-02.mft 264331 10476 stored_point_container
4 medium-01 benchmark/fixtures/selected/medium-01.mft benchmark/fixtures/selected_der/medium-01.mft 16384 2681 stored_point_container
5 medium-02 benchmark/fixtures/selected/medium-02.mft benchmark/fixtures/selected_der/medium-02.mft 16384 2681 stored_point_container
6 small-01 benchmark/fixtures/selected/small-01.mft benchmark/fixtures/selected_der/small-01.mft 3160 1875 stored_point_container
7 small-02 benchmark/fixtures/selected/small-02.mft benchmark/fixtures/selected_der/small-02.mft 3160 1875 stored_point_container
8 xlarge-01 benchmark/fixtures/selected/xlarge-01.mft benchmark/fixtures/selected_der/xlarge-01.mft 4522145 144968 stored_point_container
9 xlarge-02 benchmark/fixtures/selected/xlarge-02.mft benchmark/fixtures/selected_der/xlarge-02.mft 4615094 150261 stored_point_container

Binary file not shown.

Binary file not shown.

View File

@ -59,8 +59,15 @@ fn store_raw_publication_point_files(
store store
.put_raw(manifest_rsync_uri, manifest_bytes) .put_raw(manifest_rsync_uri, manifest_bytes)
.expect("store manifest raw"); .expect("store manifest raw");
for entry in &manifest.manifest.files { let entries = manifest
let file_path = manifest_path.parent().unwrap().join(&entry.file_name); .manifest
.parse_files()
.expect("parse validated manifest fileList");
for entry in &entries {
let file_path = manifest_path
.parent()
.unwrap()
.join(entry.file_name.as_str());
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);

View File

@ -80,8 +80,15 @@ fn cache_pack_publication_point_mismatch_is_rejected() {
store store
.put_raw(&manifest_rsync_uri, &manifest_bytes) .put_raw(&manifest_rsync_uri, &manifest_bytes)
.expect("store manifest"); .expect("store manifest");
for entry in &manifest.manifest.files { let entries = manifest
let file_path = manifest_path.parent().unwrap().join(&entry.file_name); .manifest
.parse_files()
.expect("parse validated manifest fileList");
for entry in &entries {
let file_path = manifest_path
.parent()
.unwrap()
.join(entry.file_name.as_str());
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);

View File

@ -18,13 +18,16 @@ fn decode_manifest_fixture_smoke() {
rpki::data_model::oid::OID_SHA256 rpki::data_model::oid::OID_SHA256
); );
assert!(mft.manifest.next_update > mft.manifest.this_update); assert!(mft.manifest.next_update > mft.manifest.this_update);
assert!(!mft.manifest.files.is_empty()); assert!(mft.manifest.file_count() > 0);
// The manifest file MUST NOT be listed in its own fileList. // The manifest file MUST NOT be listed in its own fileList.
let entries = mft
.manifest
.parse_files()
.expect("parse validated manifest fileList");
assert!( assert!(
mft.manifest entries
.files
.iter() .iter()
.all(|f| !f.file_name.to_ascii_lowercase().ends_with(".mft")) .all(|f| !f.file_name.as_str().to_ascii_lowercase().ends_with(".mft"))
); );
} }

View File

@ -57,8 +57,15 @@ fn manifest_success_writes_fetch_cache_pp_pack() {
store store
.put_raw(&manifest_rsync_uri, &manifest_bytes) .put_raw(&manifest_rsync_uri, &manifest_bytes)
.expect("store manifest"); .expect("store manifest");
for entry in &manifest.manifest.files { let entries = manifest
let file_path = manifest_path.parent().unwrap().join(&entry.file_name); .manifest
.parse_files()
.expect("parse validated manifest fileList");
for entry in &entries {
let file_path = manifest_path
.parent()
.unwrap()
.join(entry.file_name.as_str());
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
@ -111,8 +118,15 @@ fn manifest_hash_mismatch_falls_back_to_fetch_cache_pp_when_enabled() {
store store
.put_raw(&manifest_rsync_uri, &manifest_bytes) .put_raw(&manifest_rsync_uri, &manifest_bytes)
.expect("store manifest"); .expect("store manifest");
for entry in &manifest.manifest.files { let entries = manifest
let file_path = manifest_path.parent().unwrap().join(&entry.file_name); .manifest
.parse_files()
.expect("parse validated manifest fileList");
for entry in &entries {
let file_path = manifest_path
.parent()
.unwrap()
.join(entry.file_name.as_str());
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
@ -140,11 +154,11 @@ fn manifest_hash_mismatch_falls_back_to_fetch_cache_pp_when_enabled() {
.expect("fetch_cache_pp pack exists"); .expect("fetch_cache_pp pack exists");
let cached_pack = FetchCachePpPack::decode(&cached_bytes).expect("decode cached"); let cached_pack = FetchCachePpPack::decode(&cached_bytes).expect("decode cached");
let victim = manifest let entries = manifest
.manifest .manifest
.files .parse_files()
.first() .expect("parse validated manifest fileList");
.expect("non-empty file list"); let victim = entries.first().expect("non-empty file list");
let victim_uri = format!("{publication_point_rsync_uri}{}", victim.file_name); let victim_uri = format!("{publication_point_rsync_uri}{}", victim.file_name);
let mut tampered = store let mut tampered = store
.get_raw(&victim_uri) .get_raw(&victim_uri)
@ -186,8 +200,15 @@ fn manifest_failed_fetch_stop_all_output() {
store store
.put_raw(&manifest_rsync_uri, &manifest_bytes) .put_raw(&manifest_rsync_uri, &manifest_bytes)
.expect("store manifest"); .expect("store manifest");
for entry in &manifest.manifest.files { let entries = manifest
let file_path = manifest_path.parent().unwrap().join(&entry.file_name); .manifest
.parse_files()
.expect("parse validated manifest fileList");
for entry in &entries {
let file_path = manifest_path
.parent()
.unwrap()
.join(entry.file_name.as_str());
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
@ -208,11 +229,11 @@ fn manifest_failed_fetch_stop_all_output() {
) )
.expect("first run stores fetch_cache_pp pack"); .expect("first run stores fetch_cache_pp pack");
let victim = manifest let entries = manifest
.manifest .manifest
.files .parse_files()
.first() .expect("parse validated manifest fileList");
.expect("non-empty file list"); let victim = entries.first().expect("non-empty file list");
let victim_uri = format!("{publication_point_rsync_uri}{}", victim.file_name); let victim_uri = format!("{publication_point_rsync_uri}{}", victim.file_name);
let mut tampered = store let mut tampered = store
.get_raw(&victim_uri) .get_raw(&victim_uri)
@ -255,8 +276,15 @@ fn manifest_fallback_pack_is_revalidated_and_rejected_if_stale() {
store store
.put_raw(&manifest_rsync_uri, &manifest_bytes) .put_raw(&manifest_rsync_uri, &manifest_bytes)
.expect("store manifest"); .expect("store manifest");
for entry in &manifest.manifest.files { let entries = manifest
let file_path = manifest_path.parent().unwrap().join(&entry.file_name); .manifest
.parse_files()
.expect("parse validated manifest fileList");
for entry in &entries {
let file_path = manifest_path
.parent()
.unwrap()
.join(entry.file_name.as_str());
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
@ -314,8 +342,15 @@ fn manifest_replay_is_treated_as_failed_fetch_and_uses_fetch_cache_pp() {
store store
.put_raw(&manifest_rsync_uri, &manifest_bytes) .put_raw(&manifest_rsync_uri, &manifest_bytes)
.expect("store manifest"); .expect("store manifest");
for entry in &manifest.manifest.files { let entries = manifest
let file_path = manifest_path.parent().unwrap().join(&entry.file_name); .manifest
.parse_files()
.expect("parse validated manifest fileList");
for entry in &entries {
let file_path = manifest_path
.parent()
.unwrap()
.join(entry.file_name.as_str());
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);

View File

@ -41,8 +41,12 @@ fn manifest_outside_publication_point_is_failed_fetch_rfc9286_section6_1() {
// Store all referenced files under the (different) publication point so that §6.4/§6.5 // Store all referenced files under the (different) publication point so that §6.4/§6.5
// would otherwise succeed if §6.1 was not enforced. // would otherwise succeed if §6.1 was not enforced.
for entry in &manifest.manifest.files { let entries = manifest
let file_path = fixture_dir.join(&entry.file_name); .manifest
.parse_files()
.expect("parse validated manifest fileList");
for entry in &entries {
let file_path = fixture_dir.join(entry.file_name.as_str());
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);

View File

@ -319,13 +319,16 @@ struct ManifestEContentPretty {
impl From<&ManifestEContent> for ManifestEContentPretty { impl From<&ManifestEContent> for ManifestEContentPretty {
fn from(v: &ManifestEContent) -> Self { fn from(v: &ManifestEContent) -> Self {
let entries = v
.parse_files()
.expect("parse validated manifest fileList");
Self { Self {
version: v.version, version: v.version,
manifest_number: v.manifest_number.to_hex_upper(), manifest_number: v.manifest_number.to_hex_upper(),
this_update: v.this_update, this_update: v.this_update,
next_update: v.next_update, next_update: v.next_update,
file_hash_alg: v.file_hash_alg.clone(), file_hash_alg: v.file_hash_alg.clone(),
files: v.files.iter().map(FileAndHashPretty::from).collect(), files: entries.iter().map(FileAndHashPretty::from).collect(),
} }
} }
} }
@ -339,8 +342,8 @@ struct FileAndHashPretty {
impl From<&FileAndHash> for FileAndHashPretty { impl From<&FileAndHash> for FileAndHashPretty {
fn from(v: &FileAndHash) -> Self { fn from(v: &FileAndHash) -> Self {
Self { Self {
file_name: v.file_name.clone(), file_name: v.file_name.as_str().to_string(),
hash_hex: hex::encode(&v.hash_bytes), hash_hex: hex::encode(v.hash_bytes.as_ref()),
} }
} }
} }

View File

@ -51,8 +51,15 @@ fn build_cernet_pack_and_validation_time() -> (
store store
.put_raw(&manifest_rsync_uri, &manifest_bytes) .put_raw(&manifest_rsync_uri, &manifest_bytes)
.expect("store manifest"); .expect("store manifest");
for entry in &manifest.manifest.files { let entries = manifest
let file_path = manifest_path.parent().unwrap().join(&entry.file_name); .manifest
.parse_files()
.expect("parse validated manifest fileList");
for entry in &entries {
let file_path = manifest_path
.parent()
.unwrap()
.join(entry.file_name.as_str());
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);

View File

@ -51,8 +51,15 @@ fn build_cernet_pack_and_validation_time() -> (
store store
.put_raw(&manifest_rsync_uri, &manifest_bytes) .put_raw(&manifest_rsync_uri, &manifest_bytes)
.expect("store manifest"); .expect("store manifest");
for entry in &manifest.manifest.files { let entries = manifest
let file_path = manifest_path.parent().unwrap().join(&entry.file_name); .manifest
.parse_files()
.expect("parse validated manifest fileList");
for entry in &entries {
let file_path = manifest_path
.parent()
.unwrap()
.join(entry.file_name.as_str());
let bytes = std::fs::read(&file_path) let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}")); .unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name); let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);

View File

@ -0,0 +1,33 @@
use std::process::Command;
#[test]
fn rpki_bin_help_exits_success_and_prints_usage() {
// This also increases coverage for `src/bin/rpki.rs` because it executes the binary.
let exe = env!("CARGO_BIN_EXE_rpki");
let out = Command::new(exe)
.arg("--help")
.output()
.expect("run rpki --help");
assert!(out.status.success(), "status={}", out.status);
let stdout = String::from_utf8_lossy(&out.stdout);
// `cli::usage()` contains "Usage:".
assert!(
stdout.contains("Usage:") || stdout.contains("USAGE:"),
"stdout={stdout}"
);
}
#[test]
fn rpki_bin_without_args_exits_2_and_prints_error() {
let exe = env!("CARGO_BIN_EXE_rpki");
let out = Command::new(exe).output().expect("run rpki");
assert_eq!(out.status.code(), Some(2), "status={}", out.status);
let stderr = String::from_utf8_lossy(&out.stderr);
assert!(
!stderr.trim().is_empty(),
"expected non-empty stderr, got empty"
);
}