手动执行全量同步

This commit is contained in:
yuyr 2026-03-04 11:12:53 +08:00
parent 0f3d65254e
commit 6276d13814
54 changed files with 3879 additions and 670 deletions

View File

@ -7,6 +7,7 @@ edition = "2024"
default = ["full"]
# Full build used by the main RP implementation (includes RocksDB-backed storage).
full = ["dep:rocksdb"]
profile = ["dep:pprof", "dep:flate2"]
[dependencies]
asn1-rs = "0.7.1"
@ -27,6 +28,8 @@ serde_cbor = "0.11.2"
roxmltree = "0.20.0"
uuid = { version = "1.7.0", features = ["v4"] }
reqwest = { version = "0.12.12", default-features = false, features = ["blocking", "rustls-tls"] }
pprof = { version = "0.14.1", optional = true, features = ["flamegraph", "prost-codec"] }
flate2 = { version = "1.0.35", optional = true }
[dev-dependencies]
tempfile = "3.16.0"

View File

@ -62,6 +62,7 @@ Both scripts accept overrides via env vars:
- `TAL_URL` (default: APNIC TAL URL)
- `HTTP_TIMEOUT_SECS` (default: 1800)
- `RSYNC_TIMEOUT_SECS` (default: 1800)
- `RSYNC_MIRROR_ROOT` (default: disabled; when set, passes `--rsync-mirror-root` to `rpki`)
- `VALIDATION_TIME` (RFC3339; default: now UTC)
- `OUT_DIR` (default: `rpki/target/live/manual_sync`)
- `RUN_NAME` (default: auto timestamped)

View File

@ -40,6 +40,7 @@ fi
TAL_URL="${TAL_URL:-https://tal.apnic.net/tal-archive/apnic-rfc7730-https.tal}"
HTTP_TIMEOUT_SECS="${HTTP_TIMEOUT_SECS:-1800}"
RSYNC_TIMEOUT_SECS="${RSYNC_TIMEOUT_SECS:-1800}"
RSYNC_MIRROR_ROOT="${RSYNC_MIRROR_ROOT:-}"
VALIDATION_TIME="${VALIDATION_TIME:-}"
OUT_DIR="${OUT_DIR:-$ROOT_DIR/target/live/manual_sync}"
@ -88,6 +89,9 @@ cmd=(cargo run --release --bin rpki -- \
--http-timeout-secs "$HTTP_TIMEOUT_SECS" \
--rsync-timeout-secs "$RSYNC_TIMEOUT_SECS" \
--report-json "$DELTA_REPORT_JSON")
if [[ -n "${RSYNC_MIRROR_ROOT}" ]]; then
cmd+=(--rsync-mirror-root "$RSYNC_MIRROR_ROOT")
fi
if [[ -n "${VALIDATION_TIME}" ]]; then
cmd+=(--validation-time "$VALIDATION_TIME")
fi

View File

@ -16,6 +16,7 @@ cd "$ROOT_DIR"
TAL_URL="${TAL_URL:-https://tal.apnic.net/tal-archive/apnic-rfc7730-https.tal}"
HTTP_TIMEOUT_SECS="${HTTP_TIMEOUT_SECS:-1800}"
RSYNC_TIMEOUT_SECS="${RSYNC_TIMEOUT_SECS:-1800}"
RSYNC_MIRROR_ROOT="${RSYNC_MIRROR_ROOT:-}"
VALIDATION_TIME="${VALIDATION_TIME:-}"
OUT_DIR="${OUT_DIR:-$ROOT_DIR/target/live/manual_sync}"
@ -45,6 +46,9 @@ cmd=(cargo run --release --bin rpki -- \
--rsync-timeout-secs "$RSYNC_TIMEOUT_SECS" \
--report-json "$REPORT_JSON")
if [[ -n "${RSYNC_MIRROR_ROOT}" ]]; then
cmd+=(--rsync-mirror-root "$RSYNC_MIRROR_ROOT")
fi
if [[ -n "${VALIDATION_TIME}" ]]; then
cmd+=(--validation-time "$VALIDATION_TIME")
fi

1
src/analysis/mod.rs Normal file
View File

@ -0,0 +1 @@
pub mod timing;

308
src/analysis/timing.rs Normal file
View File

@ -0,0 +1,308 @@
use std::collections::HashMap;
use std::path::Path;
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use serde::{Deserialize, Serialize};
#[derive(Clone)]
pub struct TimingHandle {
inner: Arc<Mutex<TimingCollector>>,
}
impl TimingHandle {
pub fn new(meta: TimingMeta) -> Self {
Self {
inner: Arc::new(Mutex::new(TimingCollector::new(meta))),
}
}
pub fn span_phase(&self, phase: &'static str) -> TimingSpanGuard<'_> {
TimingSpanGuard {
handle: self.clone(),
kind: TimingSpanKind::Phase(phase),
start: Instant::now(),
}
}
pub fn span_rrdp_repo<'a>(&self, repo_uri: &'a str) -> TimingSpanGuard<'a> {
TimingSpanGuard {
handle: self.clone(),
kind: TimingSpanKind::RrdpRepo(repo_uri),
start: Instant::now(),
}
}
pub fn span_rrdp_repo_step<'a>(
&self,
repo_uri: &'a str,
step: &'static str,
) -> TimingSpanGuard<'a> {
TimingSpanGuard {
handle: self.clone(),
kind: TimingSpanKind::RrdpRepoStep { repo_uri, step },
start: Instant::now(),
}
}
pub fn span_publication_point<'a>(&self, manifest_rsync_uri: &'a str) -> TimingSpanGuard<'a> {
TimingSpanGuard {
handle: self.clone(),
kind: TimingSpanKind::PublicationPoint(manifest_rsync_uri),
start: Instant::now(),
}
}
pub fn set_meta(&self, update: TimingMetaUpdate<'_>) {
let mut g = self.inner.lock().expect("timing lock");
if let Some(v) = update.tal_url {
g.meta.tal_url = Some(v.to_string());
}
if let Some(v) = update.db_path {
g.meta.db_path = Some(v.to_string());
}
}
pub fn record_count(&self, key: &'static str, inc: u64) {
let mut g = self.inner.lock().expect("timing lock");
g.counts
.entry(key)
.and_modify(|v| *v = v.saturating_add(inc))
.or_insert(inc);
}
/// Record a phase duration directly in nanoseconds.
///
/// This is useful when aggregating sub-phase timings locally (to reduce lock contention)
/// and then emitting a single record per publication point.
pub fn record_phase_nanos(&self, phase: &'static str, nanos: u64) {
let mut g = self.inner.lock().expect("timing lock");
g.phases.record(phase, nanos);
}
pub fn write_json(&self, path: &Path, top_n: usize) -> Result<(), String> {
let report = {
let g = self.inner.lock().expect("timing lock");
g.to_report(top_n)
};
let f = std::fs::File::create(path)
.map_err(|e| format!("create timing json failed: {}: {e}", path.display()))?;
serde_json::to_writer_pretty(f, &report)
.map_err(|e| format!("write timing json failed: {e}"))?;
Ok(())
}
fn record_duration(&self, kind: TimingSpanKind<'_>, duration: Duration) {
let nanos_u64 = duration.as_nanos().min(u128::from(u64::MAX)) as u64;
let mut g = self.inner.lock().expect("timing lock");
match kind {
TimingSpanKind::Phase(name) => g.phases.record(name, nanos_u64),
TimingSpanKind::RrdpRepo(uri) => g.rrdp_repos.record(uri, nanos_u64),
TimingSpanKind::RrdpRepoStep { repo_uri, step } => g
.rrdp_repo_steps
.record(&format!("{repo_uri}::{step}"), nanos_u64),
TimingSpanKind::PublicationPoint(uri) => g.publication_points.record(uri, nanos_u64),
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct TimingMeta {
pub recorded_at_utc_rfc3339: String,
pub validation_time_utc_rfc3339: String,
pub tal_url: Option<String>,
pub db_path: Option<String>,
}
#[derive(Clone, Debug, Default)]
pub struct TimingMetaUpdate<'a> {
pub tal_url: Option<&'a str>,
pub db_path: Option<&'a str>,
}
pub struct TimingSpanGuard<'a> {
handle: TimingHandle,
kind: TimingSpanKind<'a>,
start: Instant,
}
impl Drop for TimingSpanGuard<'_> {
fn drop(&mut self) {
self.handle
.record_duration(self.kind.clone(), self.start.elapsed());
}
}
#[derive(Clone, Debug)]
enum TimingSpanKind<'a> {
Phase(&'static str),
RrdpRepo(&'a str),
RrdpRepoStep {
repo_uri: &'a str,
step: &'static str,
},
PublicationPoint(&'a str),
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct DurationStats {
pub count: u64,
pub total_nanos: u64,
}
impl DurationStats {
fn record(&mut self, nanos: u64) {
self.count = self.count.saturating_add(1);
self.total_nanos = self.total_nanos.saturating_add(nanos);
}
}
#[derive(Clone, Debug, Default)]
struct DurationStatsMap {
map: HashMap<String, DurationStats>,
}
impl DurationStatsMap {
fn record(&mut self, key: &str, nanos: u64) {
self.map.entry(key.to_string()).or_default().record(nanos);
}
fn top(&self, n: usize) -> Vec<TopDurationEntry> {
let mut v = self
.map
.iter()
.map(|(k, s)| TopDurationEntry {
key: k.clone(),
count: s.count,
total_nanos: s.total_nanos,
})
.collect::<Vec<_>>();
v.sort_by(|a, b| b.total_nanos.cmp(&a.total_nanos));
v.truncate(n);
v
}
}
struct TimingCollector {
meta: TimingMeta,
counts: HashMap<&'static str, u64>,
phases: DurationStatsMap,
rrdp_repos: DurationStatsMap,
rrdp_repo_steps: DurationStatsMap,
publication_points: DurationStatsMap,
}
impl TimingCollector {
fn new(meta: TimingMeta) -> Self {
Self {
meta,
counts: HashMap::new(),
phases: DurationStatsMap::default(),
rrdp_repos: DurationStatsMap::default(),
rrdp_repo_steps: DurationStatsMap::default(),
publication_points: DurationStatsMap::default(),
}
}
fn to_report(&self, top_n: usize) -> TimingReportV1 {
TimingReportV1 {
format_version: 1,
meta: self.meta.clone(),
counts: self
.counts
.iter()
.map(|(k, v)| ((*k).to_string(), *v))
.collect(),
phases: self
.phases
.map
.iter()
.map(|(k, s)| (k.clone(), s.clone()))
.collect(),
top_rrdp_repos: self.rrdp_repos.top(top_n),
top_rrdp_repo_steps: self.rrdp_repo_steps.top(top_n),
top_publication_points: self.publication_points.top(top_n),
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct TimingReportV1 {
pub format_version: u64,
pub meta: TimingMeta,
pub counts: HashMap<String, u64>,
pub phases: HashMap<String, DurationStats>,
pub top_rrdp_repos: Vec<TopDurationEntry>,
pub top_rrdp_repo_steps: Vec<TopDurationEntry>,
pub top_publication_points: Vec<TopDurationEntry>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct TopDurationEntry {
pub key: String,
pub count: u64,
pub total_nanos: u64,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn timing_handle_writes_json_with_phases_and_tops() {
let meta = TimingMeta {
recorded_at_utc_rfc3339: "2026-02-28T00:00:00Z".to_string(),
validation_time_utc_rfc3339: "2026-02-28T00:00:00Z".to_string(),
tal_url: Some("https://example.test/x.tal".to_string()),
db_path: Some("db".to_string()),
};
let h = TimingHandle::new(meta);
{
let _p = h.span_phase("tal_bootstrap");
}
{
let _r = h.span_rrdp_repo("https://rrdp.example.test/notification.xml");
}
{
let _s = h.span_rrdp_repo_step(
"https://rrdp.example.test/notification.xml",
"fetch_notification",
);
}
{
let _pp = h.span_publication_point("rsync://example.test/repo/manifest.mft");
}
h.record_count("vrps", 42);
let dir = tempfile::tempdir().expect("tempdir");
let path = dir.path().join("timing.json");
h.write_json(&path, 10).expect("write_json");
let rep: TimingReportV1 =
serde_json::from_slice(&std::fs::read(&path).expect("read timing.json"))
.expect("parse timing.json");
assert_eq!(rep.format_version, 1);
assert!(rep.phases.contains_key("tal_bootstrap"));
assert_eq!(rep.counts.get("vrps").copied(), Some(42));
assert!(
rep.top_rrdp_repos
.iter()
.any(|e| e.key.contains("rrdp.example.test")),
"expected repo in top list"
);
assert!(
rep.top_rrdp_repo_steps
.iter()
.any(|e| e.key.contains("fetch_notification")),
"expected repo step in top list"
);
assert!(
rep.top_publication_points
.iter()
.any(|e| e.key.contains("manifest.mft")),
"expected PP in top list"
);
}
}

View File

@ -1,5 +1,6 @@
use std::path::{Path, PathBuf};
use crate::analysis::timing::{TimingHandle, TimingMeta, TimingMetaUpdate};
use crate::audit::{
AspaOutput, AuditReportV1, AuditRunMeta, AuditWarning, TreeSummary, VrpOutput,
format_roa_ip_prefix,
@ -11,7 +12,8 @@ use crate::policy::Policy;
use crate::storage::RocksStore;
use crate::validation::run_tree_from_tal::{
RunTreeFromTalAuditOutput, run_tree_from_tal_and_ta_der_serial_audit,
run_tree_from_tal_url_serial_audit,
run_tree_from_tal_and_ta_der_serial_audit_with_timing, run_tree_from_tal_url_serial_audit,
run_tree_from_tal_url_serial_audit_with_timing,
};
use crate::validation::tree::TreeRunConfig;
@ -29,10 +31,15 @@ pub struct CliArgs {
pub http_timeout_secs: u64,
pub rsync_timeout_secs: u64,
pub rsync_mirror_root: Option<PathBuf>,
pub max_depth: Option<usize>,
pub max_instances: Option<usize>,
pub validation_time: Option<time::OffsetDateTime>,
pub revalidate_only: bool,
pub analyze: bool,
pub profile_cpu: bool,
}
fn usage() -> String {
@ -55,9 +62,13 @@ Options:
--rsync-local-dir <path> Use LocalDirRsyncFetcher rooted at this directory (offline tests)
--http-timeout-secs <n> HTTP fetch timeout seconds (default: 20)
--rsync-timeout-secs <n> rsync I/O timeout seconds (default: 60)
--rsync-mirror-root <path> Persist rsync mirrors under this directory (default: disabled)
--max-depth <n> Max CA instance depth (0 = root only)
--max-instances <n> Max number of CA instances to process
--validation-time <rfc3339> Validation time in RFC3339 (default: now UTC)
--revalidate-only Skip RRDP/rsync sync; re-validate from existing DB cache
--analyze Write timing analysis JSON under target/live/analyze/<timestamp>/
--profile-cpu (Requires build feature 'profile') Write CPU flamegraph under analyze dir
--help Show this help
"
@ -76,9 +87,13 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
let mut rsync_local_dir: Option<PathBuf> = None;
let mut http_timeout_secs: u64 = 20;
let mut rsync_timeout_secs: u64 = 60;
let mut rsync_mirror_root: Option<PathBuf> = None;
let mut max_depth: Option<usize> = None;
let mut max_instances: Option<usize> = None;
let mut validation_time: Option<time::OffsetDateTime> = None;
let mut revalidate_only: bool = false;
let mut analyze: bool = false;
let mut profile_cpu: bool = false;
let mut i = 1usize;
while i < argv.len() {
@ -134,6 +149,11 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
.parse::<u64>()
.map_err(|_| format!("invalid --rsync-timeout-secs: {v}"))?;
}
"--rsync-mirror-root" => {
i += 1;
let v = argv.get(i).ok_or("--rsync-mirror-root requires a value")?;
rsync_mirror_root = Some(PathBuf::from(v));
}
"--max-depth" => {
i += 1;
let v = argv.get(i).ok_or("--max-depth requires a value")?;
@ -158,6 +178,15 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
.map_err(|e| format!("invalid --validation-time (RFC3339 expected): {e}"))?;
validation_time = Some(t);
}
"--revalidate-only" => {
revalidate_only = true;
}
"--analyze" => {
analyze = true;
}
"--profile-cpu" => {
profile_cpu = true;
}
_ => return Err(format!("unknown argument: {arg}\n\n{}", usage())),
}
i += 1;
@ -189,9 +218,13 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
rsync_local_dir,
http_timeout_secs,
rsync_timeout_secs,
rsync_mirror_root,
max_depth,
max_instances,
validation_time,
revalidate_only,
analyze,
profile_cpu,
})
}
@ -321,6 +354,67 @@ pub fn run(argv: &[String]) -> Result<(), String> {
let config = TreeRunConfig {
max_depth: args.max_depth,
max_instances: args.max_instances,
revalidate_only: args.revalidate_only,
};
use time::format_description::well_known::Rfc3339;
let mut timing: Option<(std::path::PathBuf, TimingHandle)> = None;
if args.analyze {
let recorded_at_utc_rfc3339 = time::OffsetDateTime::now_utc()
.to_offset(time::UtcOffset::UTC)
.format(&Rfc3339)
.map_err(|e| format!("format recorded_at_utc failed: {e}"))?;
let validation_time_utc_rfc3339 = validation_time
.to_offset(time::UtcOffset::UTC)
.format(&Rfc3339)
.map_err(|e| format!("format validation_time failed: {e}"))?;
let ts_compact = {
let fmt = time::format_description::parse("[year][month][day]T[hour][minute][second]Z")
.map_err(|e| format!("format description parse failed: {e}"))?;
time::OffsetDateTime::now_utc()
.format(&fmt)
.map_err(|e| format!("format timestamp failed: {e}"))?
};
let out_dir = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("target")
.join("live")
.join("analyze")
.join(ts_compact);
std::fs::create_dir_all(&out_dir)
.map_err(|e| format!("create analyze out dir failed: {}: {e}", out_dir.display()))?;
let handle = TimingHandle::new(TimingMeta {
recorded_at_utc_rfc3339,
validation_time_utc_rfc3339,
tal_url: None,
db_path: None,
});
handle.set_meta(TimingMetaUpdate {
tal_url: args.tal_url.as_deref(),
db_path: Some(args.db_path.to_string_lossy().as_ref()),
});
timing = Some((out_dir, handle));
}
if args.profile_cpu && !args.analyze {
return Err("--profile-cpu requires --analyze".to_string());
}
#[cfg(not(feature = "profile"))]
if args.profile_cpu {
return Err("CPU profiling requires building with: --features profile".to_string());
}
#[cfg(feature = "profile")]
let mut profiler_guard: Option<pprof::ProfilerGuard<'static>> = if args.profile_cpu {
Some(
pprof::ProfilerGuard::new(100)
.map_err(|e| format!("pprof ProfilerGuard init failed: {e}"))?,
)
} else {
None
};
let out = if let Some(dir) = args.rsync_local_dir.as_ref() {
@ -330,39 +424,72 @@ pub fn run(argv: &[String]) -> Result<(), String> {
args.tal_path.as_ref(),
args.ta_path.as_ref(),
) {
(Some(url), _, _) => run_tree_from_tal_url_serial_audit(
&store,
&policy,
url,
&http,
&rsync,
validation_time,
&config,
)
.map_err(|e| e.to_string())?,
(Some(url), _, _) => {
if let Some((_, t)) = timing.as_ref() {
run_tree_from_tal_url_serial_audit_with_timing(
&store,
&policy,
url,
&http,
&rsync,
validation_time,
&config,
t,
)
.map_err(|e| e.to_string())?
} else {
run_tree_from_tal_url_serial_audit(
&store,
&policy,
url,
&http,
&rsync,
validation_time,
&config,
)
.map_err(|e| e.to_string())?
}
}
(None, Some(tal_path), Some(ta_path)) => {
let tal_bytes = std::fs::read(tal_path)
.map_err(|e| format!("read tal failed: {}: {e}", tal_path.display()))?;
let ta_der = std::fs::read(ta_path)
.map_err(|e| format!("read ta failed: {}: {e}", ta_path.display()))?;
run_tree_from_tal_and_ta_der_serial_audit(
&store,
&policy,
&tal_bytes,
&ta_der,
None,
&http,
&rsync,
validation_time,
&config,
)
.map_err(|e| e.to_string())?
if let Some((_, t)) = timing.as_ref() {
run_tree_from_tal_and_ta_der_serial_audit_with_timing(
&store,
&policy,
&tal_bytes,
&ta_der,
None,
&http,
&rsync,
validation_time,
&config,
t,
)
.map_err(|e| e.to_string())?
} else {
run_tree_from_tal_and_ta_der_serial_audit(
&store,
&policy,
&tal_bytes,
&ta_der,
None,
&http,
&rsync,
validation_time,
&config,
)
.map_err(|e| e.to_string())?
}
}
_ => unreachable!("validated by parse_args"),
}
} else {
let rsync = SystemRsyncFetcher::new(SystemRsyncConfig {
timeout: std::time::Duration::from_secs(args.rsync_timeout_secs.max(1)),
mirror_root: args.rsync_mirror_root.clone(),
..SystemRsyncConfig::default()
});
match (
@ -370,44 +497,132 @@ pub fn run(argv: &[String]) -> Result<(), String> {
args.tal_path.as_ref(),
args.ta_path.as_ref(),
) {
(Some(url), _, _) => run_tree_from_tal_url_serial_audit(
&store,
&policy,
url,
&http,
&rsync,
validation_time,
&config,
)
.map_err(|e| e.to_string())?,
(Some(url), _, _) => {
if let Some((_, t)) = timing.as_ref() {
run_tree_from_tal_url_serial_audit_with_timing(
&store,
&policy,
url,
&http,
&rsync,
validation_time,
&config,
t,
)
.map_err(|e| e.to_string())?
} else {
run_tree_from_tal_url_serial_audit(
&store,
&policy,
url,
&http,
&rsync,
validation_time,
&config,
)
.map_err(|e| e.to_string())?
}
}
(None, Some(tal_path), Some(ta_path)) => {
let tal_bytes = std::fs::read(tal_path)
.map_err(|e| format!("read tal failed: {}: {e}", tal_path.display()))?;
let ta_der = std::fs::read(ta_path)
.map_err(|e| format!("read ta failed: {}: {e}", ta_path.display()))?;
run_tree_from_tal_and_ta_der_serial_audit(
&store,
&policy,
&tal_bytes,
&ta_der,
None,
&http,
&rsync,
validation_time,
&config,
)
.map_err(|e| e.to_string())?
if let Some((_, t)) = timing.as_ref() {
run_tree_from_tal_and_ta_der_serial_audit_with_timing(
&store,
&policy,
&tal_bytes,
&ta_der,
None,
&http,
&rsync,
validation_time,
&config,
t,
)
.map_err(|e| e.to_string())?
} else {
run_tree_from_tal_and_ta_der_serial_audit(
&store,
&policy,
&tal_bytes,
&ta_der,
None,
&http,
&rsync,
validation_time,
&config,
)
.map_err(|e| e.to_string())?
}
}
_ => unreachable!("validated by parse_args"),
}
};
if let Some((_out_dir, t)) = timing.as_ref() {
t.record_count("instances_processed", out.tree.instances_processed as u64);
t.record_count("instances_failed", out.tree.instances_failed as u64);
}
let report = build_report(&policy, validation_time, out);
if let Some(p) = args.report_json_path.as_deref() {
write_json(p, &report)?;
}
if let Some((out_dir, t)) = timing.as_ref() {
t.record_count("vrps", report.vrps.len() as u64);
t.record_count("aspas", report.aspas.len() as u64);
t.record_count(
"audit_publication_points",
report.publication_points.len() as u64,
);
let timing_json_path = out_dir.join("timing.json");
t.write_json(&timing_json_path, 20)?;
eprintln!("analysis: wrote {}", timing_json_path.display());
}
#[cfg(feature = "profile")]
if let (Some((out_dir, _)), Some(guard)) = (timing.as_ref(), profiler_guard.take()) {
let report = guard
.report()
.build()
.map_err(|e| format!("pprof report build failed: {e}"))?;
let svg_path = out_dir.join("flamegraph.svg");
let svg_file = std::fs::File::create(&svg_path)
.map_err(|e| format!("create flamegraph failed: {}: {e}", svg_path.display()))?;
report
.flamegraph(svg_file)
.map_err(|e| format!("write flamegraph failed: {e}"))?;
eprintln!("analysis: wrote {}", svg_path.display());
// Best-effort: write pprof protobuf as gzipped bytes.
let pb_path = out_dir.join("pprof.pb.gz");
let pprof_profile = report
.pprof()
.map_err(|e| format!("pprof export failed: {e}"))?;
use pprof::protos::Message;
let mut body = Vec::with_capacity(pprof_profile.encoded_len());
pprof_profile
.encode(&mut body)
.map_err(|e| format!("pprof encode failed: {e}"))?;
let gz = flate2::write::GzEncoder::new(
std::fs::File::create(&pb_path)
.map_err(|e| format!("create pprof.pb.gz failed: {}: {e}", pb_path.display()))?,
flate2::Compression::default(),
);
let mut gz = gz;
use std::io::Write;
gz.write_all(&body)
.map_err(|e| format!("write pprof.pb.gz failed: {e}"))?;
gz.finish()
.map_err(|e| format!("finish pprof.pb.gz failed: {e}"))?;
eprintln!("analysis: wrote {}", pb_path.display());
}
print_summary(&report);
Ok(())
}
@ -422,6 +637,7 @@ mod tests {
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("Usage:"), "{err}");
assert!(err.contains("--db"), "{err}");
assert!(err.contains("--rsync-mirror-root"), "{err}");
}
#[test]

View File

@ -1,5 +1,5 @@
use crate::data_model::oid::OID_CT_ASPA;
use crate::data_model::common::{DerReader, der_take_tlv};
use crate::data_model::oid::OID_CT_ASPA;
use crate::data_model::rc::ResourceCertificate;
use crate::data_model::signed_object::{
RpkiSignedObject, RpkiSignedObjectParsed, SignedObjectParseError, SignedObjectValidateError,

View File

@ -1,6 +1,6 @@
use x509_parser::asn1_rs::Tag;
use x509_parser::x509::AlgorithmIdentifier;
use x509_parser::prelude::FromDer;
use x509_parser::x509::AlgorithmIdentifier;
pub type UtcTime = time::OffsetDateTime;
@ -169,7 +169,10 @@ impl<'a> DerReader<'a> {
}
pub(crate) fn peek_tag(&self) -> Result<u8, String> {
self.buf.first().copied().ok_or_else(|| "truncated DER".into())
self.buf
.first()
.copied()
.ok_or_else(|| "truncated DER".into())
}
pub(crate) fn take_any(&mut self) -> Result<(u8, &'a [u8]), String> {
@ -223,7 +226,10 @@ impl<'a> DerReader<'a> {
der_uint_from_bytes(v)
}
pub(crate) fn take_explicit(&mut self, expected_outer_tag: u8) -> Result<(u8, &'a [u8]), String> {
pub(crate) fn take_explicit(
&mut self,
expected_outer_tag: u8,
) -> Result<(u8, &'a [u8]), String> {
let inner_der = self.take_tag(expected_outer_tag)?;
let (tag, value, rem) = der_take_tlv(inner_der)?;
if !rem.is_empty() {

View File

@ -433,10 +433,7 @@ fn algorithm_identifier_value(ai: &AlgorithmIdentifier<'_>) -> AlgorithmIdentifi
} else {
ai.algorithm.to_id_string()
};
AlgorithmIdentifierValue {
oid,
parameters,
}
AlgorithmIdentifierValue { oid, parameters }
}
fn parse_extensions_parse(exts: &[X509Extension<'_>]) -> Result<Vec<CrlExtensionParsed>, String> {
@ -444,15 +441,15 @@ fn parse_extensions_parse(exts: &[X509Extension<'_>]) -> Result<Vec<CrlExtension
for ext in exts {
let oid = ext.oid.as_bytes();
if oid == OID_AUTHORITY_KEY_IDENTIFIER_RAW {
let ParsedExtension::AuthorityKeyIdentifier(aki) = ext.parsed_extension() else {
return Err("AKI extension parse failed".to_string());
};
out.push(CrlExtensionParsed::AuthorityKeyIdentifier {
key_identifier: aki.key_identifier.as_ref().map(|k| k.0.to_vec()),
has_other_fields: aki.authority_cert_issuer.is_some()
|| aki.authority_cert_serial.is_some(),
critical: ext.critical,
});
let ParsedExtension::AuthorityKeyIdentifier(aki) = ext.parsed_extension() else {
return Err("AKI extension parse failed".to_string());
};
out.push(CrlExtensionParsed::AuthorityKeyIdentifier {
key_identifier: aki.key_identifier.as_ref().map(|k| k.0.to_vec()),
has_other_fields: aki.authority_cert_issuer.is_some()
|| aki.authority_cert_serial.is_some(),
critical: ext.critical,
});
} else if oid == OID_CRL_NUMBER_RAW {
match ext.parsed_extension() {
ParsedExtension::CRLNumber(n) => out.push(CrlExtensionParsed::CrlNumber {

View File

@ -1,5 +1,5 @@
use crate::data_model::common::{BigUnsigned, UtcTime};
use crate::data_model::common::der_take_tlv;
use crate::data_model::common::{BigUnsigned, UtcTime};
use crate::data_model::oid::{OID_CT_RPKI_MANIFEST, OID_SHA256};
use crate::data_model::rc::ResourceCertificate;
use crate::data_model::signed_object::{
@ -248,8 +248,7 @@ impl ManifestObject {
impl ManifestEContent {
/// Parse step of scheme A (`parse → validate → verify`).
pub fn parse_der(der: &[u8]) -> Result<ManifestEContentParsed, ManifestParseError> {
let (_tag, _value, rem) =
der_take_tlv(der).map_err(|e| ManifestParseError::Parse(e))?;
let (_tag, _value, rem) = der_take_tlv(der).map_err(|e| ManifestParseError::Parse(e))?;
if !rem.is_empty() {
return Err(ManifestParseError::TrailingBytes(rem.len()));
}
@ -412,8 +411,8 @@ fn decode_manifest_econtent_fast(der: &[u8]) -> Result<ManifestEContent, Manifes
));
}
let seq_len = der_count_elements(seq_content)
.map_err(|e| ManifestProfileError::ProfileDecode(e))?;
let seq_len =
der_count_elements(seq_content).map_err(|e| ManifestProfileError::ProfileDecode(e))?;
if seq_len != 5 && seq_len != 6 {
return Err(ManifestProfileError::InvalidManifestSequenceLen(seq_len));
}
@ -587,15 +586,17 @@ fn parse_file_list_sha256_fast(content: &[u8]) -> Result<Vec<FileAndHash>, Manif
let mut cur = content;
let mut out: Vec<FileAndHash> = Vec::with_capacity(est);
while !cur.is_empty() {
let (tag, value, rem) = der_take_tlv(cur)
.map_err(|e| ManifestProfileError::ProfileDecode(format!("fileList entry decode error: {e}")))?;
let (tag, value, rem) = der_take_tlv(cur).map_err(|e| {
ManifestProfileError::ProfileDecode(format!("fileList entry decode error: {e}"))
})?;
cur = rem;
if tag != 0x30 {
return Err(ManifestProfileError::InvalidFileAndHash);
}
let mut entry = value;
let (fn_tag, fn_value, entry_rem) = der_take_tlv(entry)
.map_err(|e| ManifestProfileError::ProfileDecode(format!("fileList fileName decode error: {e}")))?;
let (fn_tag, fn_value, entry_rem) = der_take_tlv(entry).map_err(|e| {
ManifestProfileError::ProfileDecode(format!("fileList fileName decode error: {e}"))
})?;
entry = entry_rem;
if fn_tag != 0x16 {
return Err(ManifestProfileError::InvalidFileAndHash);
@ -702,7 +703,9 @@ fn parse_generalized_time_bytes(bytes: &[u8]) -> Result<UtcTime, String> {
return Err("GeneralizedTime must end with 'Z'".into());
}
let core = &s[..s.len() - 1];
let (main, frac) = core.split_once('.').map_or((core, None), |(a, b)| (a, Some(b)));
let (main, frac) = core
.split_once('.')
.map_or((core, None), |(a, b)| (a, Some(b)));
if main.len() != 14 || !main.bytes().all(|b| b.is_ascii_digit()) {
return Err("GeneralizedTime must be YYYYMMDDHHMMSS[.fff]Z".into());
}
@ -731,9 +734,14 @@ fn parse_generalized_time_bytes(bytes: &[u8]) -> Result<UtcTime, String> {
0
};
let date = time::Date::from_calendar_date(year, time::Month::try_from(month).map_err(|_| "bad month")?, day)
.map_err(|e| e.to_string())?;
let t = time::Time::from_hms_nano(hour, minute, second, nanosecond).map_err(|e| e.to_string())?;
let date = time::Date::from_calendar_date(
year,
time::Month::try_from(month).map_err(|_| "bad month")?,
day,
)
.map_err(|e| e.to_string())?;
let t =
time::Time::from_hms_nano(hour, minute, second, nanosecond).map_err(|e| e.to_string())?;
Ok(date.with_time(t).assume_utc())
}

View File

@ -38,11 +38,9 @@ pub const OID_SUBJECT_KEY_IDENTIFIER: &str = "2.5.29.14";
pub const OID_SUBJECT_KEY_IDENTIFIER_RAW: &[u8] = &asn1_rs::oid!(raw 2.5.29.14);
pub const OID_CT_RPKI_MANIFEST: &str = "1.2.840.113549.1.9.16.1.26";
pub const OID_CT_RPKI_MANIFEST_RAW: &[u8] =
&asn1_rs::oid!(raw 1.2.840.113549.1.9.16.1.26);
pub const OID_CT_RPKI_MANIFEST_RAW: &[u8] = &asn1_rs::oid!(raw 1.2.840.113549.1.9.16.1.26);
pub const OID_CT_ROUTE_ORIGIN_AUTHZ: &str = "1.2.840.113549.1.9.16.1.24";
pub const OID_CT_ROUTE_ORIGIN_AUTHZ_RAW: &[u8] =
&asn1_rs::oid!(raw 1.2.840.113549.1.9.16.1.24);
pub const OID_CT_ROUTE_ORIGIN_AUTHZ_RAW: &[u8] = &asn1_rs::oid!(raw 1.2.840.113549.1.9.16.1.24);
pub const OID_CT_ASPA: &str = "1.2.840.113549.1.9.16.1.49";
pub const OID_CT_ASPA_RAW: &[u8] = &asn1_rs::oid!(raw 1.2.840.113549.1.9.16.1.49);

View File

@ -648,10 +648,7 @@ impl RcExtensionsParsed {
let subject_key_identifier = match subject_key_identifier.len() {
0 => None,
1 => {
let (ski, critical) = subject_key_identifier
.into_iter()
.next()
.expect("len==1");
let (ski, critical) = subject_key_identifier.into_iter().next().expect("len==1");
if critical {
return Err(ResourceCertificateProfileError::SkiCriticality);
}
@ -673,10 +670,7 @@ impl RcExtensionsParsed {
}
}
1 => {
let (aki, critical) = authority_key_identifier
.into_iter()
.next()
.expect("len==1");
let (aki, critical) = authority_key_identifier.into_iter().next().expect("len==1");
if critical {
return Err(ResourceCertificateProfileError::AkiCriticality);
}
@ -716,10 +710,7 @@ impl RcExtensionsParsed {
}
}
1 => {
let (crldp, critical) = crl_distribution_points
.into_iter()
.next()
.expect("len==1");
let (crldp, critical) = crl_distribution_points.into_iter().next().expect("len==1");
if critical {
return Err(ResourceCertificateProfileError::CrlDistributionPointsCriticality);
}
@ -795,7 +786,11 @@ impl RcExtensionsParsed {
ResourceCertificateProfileError::AuthorityInfoAccessMissingCaIssuers,
);
}
if !aia.ca_issuers_uris.iter().any(|u| u.starts_with("rsync://")) {
if !aia
.ca_issuers_uris
.iter()
.any(|u| u.starts_with("rsync://"))
{
return Err(ResourceCertificateProfileError::AuthorityInfoAccessNoRsync);
}
Some(aia.ca_issuers_uris)
@ -932,10 +927,7 @@ fn algorithm_identifier_value(
} else {
ai.algorithm.to_id_string()
};
AlgorithmIdentifierValue {
oid,
parameters,
}
AlgorithmIdentifierValue { oid, parameters }
}
fn parse_extensions_parse(
@ -955,78 +947,78 @@ fn parse_extensions_parse(
for ext in exts {
let oid = ext.oid.as_bytes();
if oid == OID_BASIC_CONSTRAINTS_RAW {
let ParsedExtension::BasicConstraints(bc) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"basicConstraints parse failed".into(),
));
};
basic_constraints_ca.push(bc.ca);
} else if oid == OID_SUBJECT_KEY_IDENTIFIER_RAW {
let ParsedExtension::SubjectKeyIdentifier(s) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"subjectKeyIdentifier parse failed".into(),
));
};
ski.push((s.0.to_vec(), ext.critical));
} else if oid == OID_AUTHORITY_KEY_IDENTIFIER_RAW {
let ParsedExtension::AuthorityKeyIdentifier(a) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"authorityKeyIdentifier parse failed".into(),
));
};
aki.push((
AuthorityKeyIdentifierParsed {
key_identifier: a.key_identifier.as_ref().map(|k| k.0.to_vec()),
has_authority_cert_issuer: a.authority_cert_issuer.is_some(),
has_authority_cert_serial: a.authority_cert_serial.is_some(),
},
ext.critical,
let ParsedExtension::BasicConstraints(bc) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"basicConstraints parse failed".into(),
));
};
basic_constraints_ca.push(bc.ca);
} else if oid == OID_SUBJECT_KEY_IDENTIFIER_RAW {
let ParsedExtension::SubjectKeyIdentifier(s) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"subjectKeyIdentifier parse failed".into(),
));
};
ski.push((s.0.to_vec(), ext.critical));
} else if oid == OID_AUTHORITY_KEY_IDENTIFIER_RAW {
let ParsedExtension::AuthorityKeyIdentifier(a) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"authorityKeyIdentifier parse failed".into(),
));
};
aki.push((
AuthorityKeyIdentifierParsed {
key_identifier: a.key_identifier.as_ref().map(|k| k.0.to_vec()),
has_authority_cert_issuer: a.authority_cert_issuer.is_some(),
has_authority_cert_serial: a.authority_cert_serial.is_some(),
},
ext.critical,
));
} else if oid == OID_CRL_DISTRIBUTION_POINTS_RAW {
let ParsedExtension::CRLDistributionPoints(p) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"cRLDistributionPoints parse failed".into(),
));
};
crldp.push((parse_crldp_parse(p)?, ext.critical));
let ParsedExtension::CRLDistributionPoints(p) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"cRLDistributionPoints parse failed".into(),
));
};
crldp.push((parse_crldp_parse(p)?, ext.critical));
} else if oid == OID_AUTHORITY_INFO_ACCESS_RAW {
let ParsedExtension::AuthorityInfoAccess(p) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"authorityInfoAccess parse failed".into(),
));
};
aia.push((parse_aia_parse(p.accessdescs.as_slice())?, ext.critical));
let ParsedExtension::AuthorityInfoAccess(p) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"authorityInfoAccess parse failed".into(),
));
};
aia.push((parse_aia_parse(p.accessdescs.as_slice())?, ext.critical));
} else if oid == OID_SUBJECT_INFO_ACCESS_RAW {
let ParsedExtension::SubjectInfoAccess(s) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"subjectInfoAccess parse failed".into(),
));
};
sia.push((parse_sia_parse(s.accessdescs.as_slice())?, ext.critical));
let ParsedExtension::SubjectInfoAccess(s) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"subjectInfoAccess parse failed".into(),
));
};
sia.push((parse_sia_parse(s.accessdescs.as_slice())?, ext.critical));
} else if oid == OID_CERTIFICATE_POLICIES_RAW {
let ParsedExtension::CertificatePolicies(cp) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"certificatePolicies parse failed".into(),
));
};
let mut oids: Vec<String> = Vec::with_capacity(cp.len());
for p in cp.iter() {
let b = p.policy_id.as_bytes();
if b == OID_CP_IPADDR_ASNUMBER_RAW {
oids.push(OID_CP_IPADDR_ASNUMBER.to_string());
} else {
oids.push(p.policy_id.to_id_string());
}
let ParsedExtension::CertificatePolicies(cp) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"certificatePolicies parse failed".into(),
));
};
let mut oids: Vec<String> = Vec::with_capacity(cp.len());
for p in cp.iter() {
let b = p.policy_id.as_bytes();
if b == OID_CP_IPADDR_ASNUMBER_RAW {
oids.push(OID_CP_IPADDR_ASNUMBER.to_string());
} else {
oids.push(p.policy_id.to_id_string());
}
cert_policies.push((oids, ext.critical));
}
cert_policies.push((oids, ext.critical));
} else if oid == OID_IP_ADDR_BLOCKS_RAW {
let parsed = IpResourceSet::decode_extn_value(ext.value)
.map_err(|_e| ResourceCertificateParseError::InvalidIpResourcesEncoding)?;
ip_resources.push((parsed, ext.critical));
let parsed = IpResourceSet::decode_extn_value(ext.value)
.map_err(|_e| ResourceCertificateParseError::InvalidIpResourcesEncoding)?;
ip_resources.push((parsed, ext.critical));
} else if oid == OID_AUTONOMOUS_SYS_IDS_RAW {
let parsed = AsResourceSet::decode_extn_value(ext.value)
.map_err(|_e| ResourceCertificateParseError::InvalidAsResourcesEncoding)?;
as_resources.push((parsed, ext.critical));
let parsed = AsResourceSet::decode_extn_value(ext.value)
.map_err(|_e| ResourceCertificateParseError::InvalidAsResourcesEncoding)?;
as_resources.push((parsed, ext.critical));
}
}

View File

@ -1,5 +1,5 @@
use crate::data_model::oid::OID_CT_ROUTE_ORIGIN_AUTHZ;
use crate::data_model::common::{DerReader, der_take_tlv};
use crate::data_model::oid::OID_CT_ROUTE_ORIGIN_AUTHZ;
use crate::data_model::rc::{Afi as RcAfi, IpPrefix as RcIpPrefix, ResourceCertificate};
use crate::data_model::signed_object::{
RpkiSignedObject, RpkiSignedObjectParsed, SignedObjectParseError, SignedObjectValidateError,
@ -397,15 +397,18 @@ impl RoaEContentParsed {
return Err(RoaProfileError::AsIdOutOfRange(as_id_u64));
}
let as_id = as_id_u64 as u32;
let ip_addr_blocks = parse_ip_addr_blocks_cursor(seq.take_sequence().map_err(|e| {
RoaProfileError::ProfileDecode(format!("ipAddrBlocks: {e}"))
})?)?;
let ip_addr_blocks = parse_ip_addr_blocks_cursor(
seq.take_sequence()
.map_err(|e| RoaProfileError::ProfileDecode(format!("ipAddrBlocks: {e}")))?,
)?;
if !seq.is_empty() {
// Extra elements beyond the expected 2..3.
let extra =
count_elements(seq).map_err(|e| RoaProfileError::ProfileDecode(e.to_string()))?;
return Err(RoaProfileError::InvalidAttestationSequenceLen(elem_count + extra));
return Err(RoaProfileError::InvalidAttestationSequenceLen(
elem_count + extra,
));
}
let mut out = RoaEContent {
@ -517,11 +520,13 @@ fn parse_roa_ip_address_cursor(
let v = seq
.take_uint_u64()
.map_err(|e| RoaProfileError::ProfileDecode(e.to_string()))?;
let max_len: u16 = v.try_into().map_err(|_e| RoaProfileError::InvalidMaxLength {
afi,
prefix_len: prefix.prefix_len,
max_len: u16::MAX,
})?;
let max_len: u16 = v
.try_into()
.map_err(|_e| RoaProfileError::InvalidMaxLength {
afi,
prefix_len: prefix.prefix_len,
max_len: u16::MAX,
})?;
Some(max_len)
} else {
None

View File

@ -1,9 +1,9 @@
use crate::data_model::common::{der_take_tlv, Asn1TimeEncoding, Asn1TimeUtc, DerReader};
use crate::data_model::common::{Asn1TimeEncoding, Asn1TimeUtc, DerReader, der_take_tlv};
use crate::data_model::oid::{
OID_AD_SIGNED_OBJECT, OID_CMS_ATTR_CONTENT_TYPE, OID_CMS_ATTR_MESSAGE_DIGEST,
OID_CMS_ATTR_CONTENT_TYPE_RAW, OID_CMS_ATTR_MESSAGE_DIGEST_RAW, OID_CMS_ATTR_SIGNING_TIME,
OID_CMS_ATTR_SIGNING_TIME_RAW, OID_CT_ASPA, OID_CT_ASPA_RAW, OID_CT_RPKI_MANIFEST,
OID_CT_RPKI_MANIFEST_RAW, OID_CT_ROUTE_ORIGIN_AUTHZ, OID_CT_ROUTE_ORIGIN_AUTHZ_RAW,
OID_AD_SIGNED_OBJECT, OID_CMS_ATTR_CONTENT_TYPE, OID_CMS_ATTR_CONTENT_TYPE_RAW,
OID_CMS_ATTR_MESSAGE_DIGEST, OID_CMS_ATTR_MESSAGE_DIGEST_RAW, OID_CMS_ATTR_SIGNING_TIME,
OID_CMS_ATTR_SIGNING_TIME_RAW, OID_CT_ASPA, OID_CT_ASPA_RAW, OID_CT_ROUTE_ORIGIN_AUTHZ,
OID_CT_ROUTE_ORIGIN_AUTHZ_RAW, OID_CT_RPKI_MANIFEST, OID_CT_RPKI_MANIFEST_RAW,
OID_RSA_ENCRYPTION, OID_RSA_ENCRYPTION_RAW, OID_SHA256, OID_SHA256_RAW,
OID_SHA256_WITH_RSA_ENCRYPTION, OID_SHA256_WITH_RSA_ENCRYPTION_RAW, OID_SIGNED_DATA,
OID_SIGNED_DATA_RAW, OID_SUBJECT_INFO_ACCESS,
@ -429,7 +429,9 @@ fn parse_signed_data_from_contentinfo_cursor(
parse_signed_data_cursor(signed_data_seq)
}
fn parse_signed_data_cursor(mut seq: DerReader<'_>) -> Result<SignedDataParsed, SignedObjectParseError> {
fn parse_signed_data_cursor(
mut seq: DerReader<'_>,
) -> Result<SignedDataParsed, SignedObjectParseError> {
let version = seq
.take_uint_u64()
.map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
@ -496,8 +498,8 @@ fn parse_signed_data_cursor(mut seq: DerReader<'_>) -> Result<SignedDataParsed,
}
}
let signer_infos =
signer_infos.ok_or_else(|| SignedObjectParseError::Parse("SignedData.signerInfos missing".into()))?;
let signer_infos = signer_infos
.ok_or_else(|| SignedObjectParseError::Parse("SignedData.signerInfos missing".into()))?;
Ok(SignedDataParsed {
version,
@ -524,7 +526,9 @@ fn parse_encapsulated_content_info_cursor(
None
} else {
let inner_der = seq.take_explicit_der(0xA0).map_err(|_e| {
SignedObjectParseError::Parse("EncapsulatedContentInfo.eContent must be [0] EXPLICIT".into())
SignedObjectParseError::Parse(
"EncapsulatedContentInfo.eContent must be [0] EXPLICIT".into(),
)
})?;
let mut inner = DerReader::new(inner_der);
let octets = inner
@ -561,7 +565,9 @@ fn split_der_objects(mut input: &[u8]) -> Result<Vec<Vec<u8>>, SignedObjectParse
Ok(out)
}
fn parse_signer_infos_set_cursor(set_bytes: &[u8]) -> Result<Vec<SignerInfoParsed>, SignedObjectParseError> {
fn parse_signer_infos_set_cursor(
set_bytes: &[u8],
) -> Result<Vec<SignerInfoParsed>, SignedObjectParseError> {
let mut set = DerReader::new(set_bytes);
let mut out: Vec<SignerInfoParsed> = Vec::new();
while !set.is_empty() {
@ -624,7 +630,9 @@ fn validate_ee_certificate(der: &[u8]) -> Result<ResourceEeCertificate, SignedOb
})
}
fn parse_signer_info_cursor(mut seq: DerReader<'_>) -> Result<SignerInfoParsed, SignedObjectParseError> {
fn parse_signer_info_cursor(
mut seq: DerReader<'_>,
) -> Result<SignerInfoParsed, SignedObjectParseError> {
let version = seq
.take_uint_u64()
.map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
@ -880,10 +888,9 @@ fn parse_signed_attrs_implicit(
}
};
if count != 1 {
return Err(SignedObjectValidateError::InvalidSignedAttributeValuesCount {
oid,
count,
});
return Err(
SignedObjectValidateError::InvalidSignedAttributeValuesCount { oid, count },
);
}
// Re-parse the sole value.
@ -938,7 +945,10 @@ fn parse_signed_attrs_implicit(
})
}
fn parse_signing_time_value_tlv(tag: u8, value: &[u8]) -> Result<Asn1TimeUtc, SignedObjectValidateError> {
fn parse_signing_time_value_tlv(
tag: u8,
value: &[u8],
) -> Result<Asn1TimeUtc, SignedObjectValidateError> {
match tag {
0x17 => Ok(Asn1TimeUtc {
utc: parse_utctime(value)?,
@ -1070,7 +1080,8 @@ fn parse_algorithm_identifier_cursor(
}
fn parse_utctime(value: &[u8]) -> Result<time::OffsetDateTime, SignedObjectValidateError> {
let s = std::str::from_utf8(value).map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let s = std::str::from_utf8(value)
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
if !s.ends_with('Z') {
return Err(SignedObjectValidateError::InvalidSigningTimeValue);
}
@ -1081,25 +1092,41 @@ fn parse_utctime(value: &[u8]) -> Result<time::OffsetDateTime, SignedObjectValid
if !digits.as_bytes().iter().all(|b| b.is_ascii_digit()) {
return Err(SignedObjectValidateError::InvalidSigningTimeValue);
}
let yy: i32 = digits[0..2].parse().map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let yy: i32 = digits[0..2]
.parse()
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let year = if yy <= 49 { 2000 + yy } else { 1900 + yy };
let mon: u8 = digits[2..4].parse().map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let day: u8 = digits[4..6].parse().map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let hour: u8 = digits[6..8].parse().map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let min: u8 = digits[8..10].parse().map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let mon: u8 = digits[2..4]
.parse()
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let day: u8 = digits[4..6]
.parse()
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let hour: u8 = digits[6..8]
.parse()
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let min: u8 = digits[8..10]
.parse()
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let sec: u8 = if digits.len() == 12 {
digits[10..12].parse().map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?
digits[10..12]
.parse()
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?
} else {
0
};
let month = time::Month::try_from(mon).map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let date = time::Date::from_calendar_date(year, month, day).map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let time = time::Time::from_hms(hour, min, sec).map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let month = time::Month::try_from(mon)
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let date = time::Date::from_calendar_date(year, month, day)
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let time = time::Time::from_hms(hour, min, sec)
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
Ok(time::OffsetDateTime::new_utc(date, time))
}
fn parse_generalized_time(value: &[u8]) -> Result<time::OffsetDateTime, SignedObjectValidateError> {
let s = std::str::from_utf8(value).map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let s = std::str::from_utf8(value)
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
if !s.ends_with('Z') {
return Err(SignedObjectValidateError::InvalidSigningTimeValue);
}
@ -1110,19 +1137,34 @@ fn parse_generalized_time(value: &[u8]) -> Result<time::OffsetDateTime, SignedOb
if !digits.as_bytes().iter().all(|b| b.is_ascii_digit()) {
return Err(SignedObjectValidateError::InvalidSigningTimeValue);
}
let year: i32 = digits[0..4].parse().map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let mon: u8 = digits[4..6].parse().map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let day: u8 = digits[6..8].parse().map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let hour: u8 = digits[8..10].parse().map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let min: u8 = digits[10..12].parse().map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let year: i32 = digits[0..4]
.parse()
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let mon: u8 = digits[4..6]
.parse()
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let day: u8 = digits[6..8]
.parse()
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let hour: u8 = digits[8..10]
.parse()
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let min: u8 = digits[10..12]
.parse()
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let sec: u8 = if digits.len() == 14 {
digits[12..14].parse().map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?
digits[12..14]
.parse()
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?
} else {
0
};
let month = time::Month::try_from(mon).map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let date = time::Date::from_calendar_date(year, month, day).map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let time = time::Time::from_hms(hour, min, sec).map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let month = time::Month::try_from(mon)
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let date = time::Date::from_calendar_date(year, month, day)
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
let time = time::Time::from_hms(hour, min, sec)
.map_err(|_| SignedObjectValidateError::InvalidSigningTimeValue)?;
Ok(time::OffsetDateTime::new_utc(date, time))
}

View File

@ -2,6 +2,7 @@ use std::path::{Path, PathBuf};
use std::process::Command;
use std::time::Duration;
use sha2::Digest;
use uuid::Uuid;
use crate::fetch::rsync::{RsyncFetchError, RsyncFetchResult, RsyncFetcher};
@ -11,6 +12,14 @@ pub struct SystemRsyncConfig {
pub rsync_bin: PathBuf,
pub timeout: Duration,
pub extra_args: Vec<String>,
/// Optional root directory for persistent rsync mirrors.
///
/// When set, callers may choose to sync into stable subdirectories under this
/// root (instead of a temporary directory) to benefit from rsync's incremental
/// behavior across runs.
///
/// Note: actual mirror behavior is implemented separately from config wiring.
pub mirror_root: Option<PathBuf>,
}
impl Default for SystemRsyncConfig {
@ -19,6 +28,7 @@ impl Default for SystemRsyncConfig {
rsync_bin: PathBuf::from("rsync"),
timeout: Duration::from_secs(60),
extra_args: Vec::new(),
mirror_root: None,
}
}
}
@ -37,6 +47,29 @@ impl SystemRsyncFetcher {
Self { config }
}
fn mirror_dst_dir(&self, normalized_rsync_base_uri: &str) -> Result<Option<PathBuf>, String> {
let Some(root) = self.config.mirror_root.as_ref() else {
return Ok(None);
};
std::fs::create_dir_all(root).map_err(|e| {
format!(
"create rsync mirror root failed: {}: {e}",
root.display()
)
})?;
let hash = hex::encode(sha2::Sha256::digest(normalized_rsync_base_uri.as_bytes()));
let dir = root.join(hash);
std::fs::create_dir_all(&dir).map_err(|e| {
format!(
"create rsync mirror directory failed: {}: {e}",
dir.display()
)
})?;
Ok(Some(dir))
}
fn run_rsync(&self, src: &str, dst: &Path) -> Result<(), String> {
// `--timeout` is I/O timeout in seconds (applies to network reads/writes).
let timeout_secs = self.config.timeout.as_secs().max(1).to_string();
@ -70,8 +103,17 @@ impl SystemRsyncFetcher {
impl RsyncFetcher for SystemRsyncFetcher {
fn fetch_objects(&self, rsync_base_uri: &str) -> RsyncFetchResult<Vec<(String, Vec<u8>)>> {
let base = normalize_rsync_base_uri(rsync_base_uri);
let tmp = TempDir::new().map_err(|e| RsyncFetchError::Fetch(e.to_string()))?;
if let Some(dst) = self
.mirror_dst_dir(&base)
.map_err(|e| RsyncFetchError::Fetch(e.to_string()))?
{
self.run_rsync(&base, &dst).map_err(RsyncFetchError::Fetch)?;
let mut out = Vec::new();
walk_dir_collect(&dst, &dst, &base, &mut out).map_err(RsyncFetchError::Fetch)?;
return Ok(out);
}
let tmp = TempDir::new().map_err(|e| RsyncFetchError::Fetch(e.to_string()))?;
self.run_rsync(&base, tmp.path())
.map_err(RsyncFetchError::Fetch)?;
@ -187,6 +229,7 @@ mod tests {
rsync_bin: PathBuf::from("/this/does/not/exist/rsync"),
timeout: Duration::from_secs(1),
extra_args: Vec::new(),
mirror_root: None,
});
let e = f
.run_rsync("rsync://example.net/repo/", dst.path())
@ -198,6 +241,7 @@ mod tests {
rsync_bin: PathBuf::from("false"),
timeout: Duration::from_secs(1),
extra_args: Vec::new(),
mirror_root: None,
});
let e = f
.run_rsync("rsync://example.net/repo/", dst.path())

View File

@ -1,5 +1,7 @@
pub mod data_model;
#[cfg(feature = "full")]
pub mod analysis;
#[cfg(feature = "full")]
pub mod audit;
#[cfg(feature = "full")]

View File

@ -105,6 +105,24 @@ impl RocksStore {
Ok(())
}
/// Store multiple raw objects in a single RocksDB write batch.
///
/// This is primarily used by rsync-based publication point sync to reduce write amplification
/// (many small `put_cf` calls) into one `DB::write()` call.
pub fn put_raw_batch(&self, objects: Vec<(String, Vec<u8>)>) -> StorageResult<usize> {
if objects.is_empty() {
return Ok(0);
}
let cf = self.cf(CF_RAW_OBJECTS)?;
let mut batch = WriteBatch::default();
for (rsync_uri, bytes) in &objects {
batch.put_cf(cf, rsync_uri.as_bytes(), bytes.as_slice());
}
self.write_batch(batch)?;
Ok(objects.len())
}
pub fn get_raw(&self, rsync_uri: &str) -> StorageResult<Option<Vec<u8>>> {
let cf = self.cf(CF_RAW_OBJECTS)?;
let v = self
@ -130,10 +148,7 @@ impl RocksStore {
Ok(())
}
pub fn get_fetch_cache_pp(
&self,
key: &FetchCachePpKey,
) -> StorageResult<Option<Vec<u8>>> {
pub fn get_fetch_cache_pp(&self, key: &FetchCachePpKey) -> StorageResult<Option<Vec<u8>>> {
let cf = self.cf(CF_FETCH_CACHE_PP)?;
let v = self
.db
@ -623,10 +638,7 @@ mod tests {
);
// Snapshot publishes two objects.
let published_v1 = vec![
(u1.clone(), vec![1u8, 2, 3]),
(u2.clone(), vec![9u8, 8, 7]),
];
let published_v1 = vec![(u1.clone(), vec![1u8, 2, 3]), (u2.clone(), vec![9u8, 8, 7])];
let n = store
.apply_rrdp_snapshot(notification_uri, &published_v1)
.expect("apply snapshot v1");
@ -676,7 +688,9 @@ mod tests {
rsync_uri: u3.clone(),
bytes: vec![4u8, 5, 6],
},
RrdpDeltaOp::Delete { rsync_uri: u2.clone() },
RrdpDeltaOp::Delete {
rsync_uri: u2.clone(),
},
];
let applied = store
.apply_rrdp_delta(notification_uri, &ops)
@ -772,7 +786,8 @@ mod tests {
let td = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(td.path()).expect("open rocksdb");
let key = FetchCachePpKey::from_manifest_rsync_uri("rsync://example.test/repo/pp/manifest.mft");
let key =
FetchCachePpKey::from_manifest_rsync_uri("rsync://example.test/repo/pp/manifest.mft");
let bytes = minimal_valid_pack().encode().expect("encode pack");
store
.put_fetch_cache_pp(&key, &bytes)
@ -783,6 +798,9 @@ mod tests {
.expect("iter all")
.map(|(k, _v)| String::from_utf8(k.to_vec()).expect("utf8 key"))
.collect();
assert!(keys.iter().any(|k| k == key.as_str()), "missing key in iterator");
assert!(
keys.iter().any(|k| k == key.as_str()),
"missing key in iterator"
);
}
}

View File

@ -1,10 +1,20 @@
use crate::analysis::timing::TimingHandle;
use crate::fetch::rsync::{RsyncFetchError, RsyncFetcher};
use crate::policy::{Policy, SyncPreference};
use crate::report::{RfcRef, Warning};
use crate::storage::RocksStore;
use crate::sync::rrdp::{
Fetcher as HttpFetcher, RrdpSyncError, sync_from_notification,
};
use crate::sync::rrdp::sync_from_notification_with_timing;
use crate::sync::rrdp::{Fetcher as HttpFetcher, RrdpSyncError};
use std::thread;
use std::time::Duration;
const RRDP_RETRY_BACKOFFS_PROD: [Duration; 3] = [
Duration::from_millis(200),
Duration::from_millis(500),
Duration::from_secs(1),
];
const RRDP_RETRY_BACKOFFS_TEST: [Duration; 2] =
[Duration::from_millis(0), Duration::from_millis(0)];
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum RepoSyncSource {
@ -45,23 +55,37 @@ pub fn sync_publication_point(
rsync_base_uri: &str,
http_fetcher: &dyn HttpFetcher,
rsync_fetcher: &dyn RsyncFetcher,
timing: Option<&TimingHandle>,
) -> Result<RepoSyncResult, RepoSyncError> {
match (policy.sync_preference, rrdp_notification_uri) {
(SyncPreference::RrdpThenRsync, Some(notification_uri)) => {
match try_rrdp_sync(store, notification_uri, http_fetcher) {
Ok(written) => Ok(RepoSyncResult {
source: RepoSyncSource::Rrdp,
objects_written: written,
warnings: Vec::new(),
}),
match try_rrdp_sync_with_retry(store, notification_uri, http_fetcher, timing) {
Ok(written) => {
if let Some(t) = timing.as_ref() {
t.record_count("repo_sync_rrdp_ok_total", 1);
t.record_count("repo_sync_rrdp_objects_written_total", written as u64);
}
Ok(RepoSyncResult {
source: RepoSyncSource::Rrdp,
objects_written: written,
warnings: Vec::new(),
})
}
Err(err) => {
if let Some(t) = timing.as_ref() {
t.record_count("repo_sync_rrdp_failed_total", 1);
}
let warnings = vec![
Warning::new(format!("RRDP failed; falling back to rsync: {err}"))
.with_rfc_refs(&[RfcRef("RFC 8182 §3.4.5")])
.with_context(notification_uri),
];
let written =
rsync_sync_into_raw_objects(store, rsync_base_uri, rsync_fetcher)?;
rsync_sync_into_raw_objects(store, rsync_base_uri, rsync_fetcher, timing)?;
if let Some(t) = timing.as_ref() {
t.record_count("repo_sync_rsync_fallback_ok_total", 1);
t.record_count("repo_sync_rsync_objects_written_total", written as u64);
}
Ok(RepoSyncResult {
source: RepoSyncSource::Rsync,
objects_written: written,
@ -71,7 +95,12 @@ pub fn sync_publication_point(
}
}
_ => {
let written = rsync_sync_into_raw_objects(store, rsync_base_uri, rsync_fetcher)?;
let written =
rsync_sync_into_raw_objects(store, rsync_base_uri, rsync_fetcher, timing)?;
if let Some(t) = timing.as_ref() {
t.record_count("repo_sync_rsync_direct_total", 1);
t.record_count("repo_sync_rsync_objects_written_total", written as u64);
}
Ok(RepoSyncResult {
source: RepoSyncSource::Rsync,
objects_written: written,
@ -85,26 +114,505 @@ fn try_rrdp_sync(
store: &RocksStore,
notification_uri: &str,
http_fetcher: &dyn HttpFetcher,
timing: Option<&TimingHandle>,
) -> Result<usize, RrdpSyncError> {
let notification_xml = http_fetcher
.fetch(notification_uri)
.map_err(RrdpSyncError::Fetch)?;
let notification_xml = {
let _step = timing
.as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "fetch_notification"));
let _total = timing
.as_ref()
.map(|t| t.span_phase("rrdp_fetch_notification_total"));
match http_fetcher.fetch(notification_uri) {
Ok(v) => {
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_notification_fetch_ok_total", 1);
}
v
}
Err(e) => {
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_notification_fetch_fail_total", 1);
}
return Err(RrdpSyncError::Fetch(e));
}
}
};
if let Some(t) = timing.as_ref() {
t.record_count(
"rrdp_notification_bytes_total",
notification_xml.len() as u64,
);
}
sync_from_notification(store, notification_uri, &notification_xml, http_fetcher)
sync_from_notification_with_timing(
store,
notification_uri,
&notification_xml,
http_fetcher,
timing,
)
}
fn is_retryable_http_fetch_error(msg: &str) -> bool {
if msg.contains("http request failed:") || msg.contains("http read body failed:") {
return true;
}
let Some(rest) = msg.strip_prefix("http status ") else {
return false;
};
let code = rest
.trim()
.split_whitespace()
.next()
.and_then(|s| s.parse::<u16>().ok())
.unwrap_or(0);
code == 408 || code == 429 || (500..600).contains(&code)
}
fn rrdp_retry_backoffs() -> &'static [Duration] {
if cfg!(test) {
&RRDP_RETRY_BACKOFFS_TEST
} else {
&RRDP_RETRY_BACKOFFS_PROD
}
}
fn try_rrdp_sync_with_retry(
store: &RocksStore,
notification_uri: &str,
http_fetcher: &dyn HttpFetcher,
timing: Option<&TimingHandle>,
) -> Result<usize, RrdpSyncError> {
let backoffs = rrdp_retry_backoffs();
let max_attempts = backoffs.len().saturating_add(1).max(1);
let mut attempt: usize = 0;
loop {
attempt += 1;
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_retry_attempt_total", 1);
}
match try_rrdp_sync(store, notification_uri, http_fetcher, timing) {
Ok(written) => {
if attempt > 1 {
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_retry_success_total", 1);
}
}
return Ok(written);
}
Err(err) => {
let retryable = match &err {
RrdpSyncError::Fetch(msg) => is_retryable_http_fetch_error(msg),
_ => false,
};
if retryable && attempt < max_attempts {
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_retry_sleep_total", 1);
}
let backoff = backoffs
.get(attempt.saturating_sub(1))
.copied()
.unwrap_or_else(|| Duration::from_secs(0));
if !backoff.is_zero() {
thread::sleep(backoff);
}
continue;
}
if let Some(t) = timing.as_ref() {
match &err {
RrdpSyncError::Fetch(_) => t.record_count("rrdp_failed_fetch_total", 1),
RrdpSyncError::Rrdp(_) => t.record_count("rrdp_failed_protocol_total", 1),
RrdpSyncError::Storage(_) => t.record_count("rrdp_failed_storage_total", 1),
}
if retryable && attempt >= max_attempts && attempt > 1 {
t.record_count("rrdp_retry_exhausted_total", 1);
}
}
return Err(err);
}
}
}
}
fn rsync_sync_into_raw_objects(
store: &RocksStore,
rsync_base_uri: &str,
rsync_fetcher: &dyn RsyncFetcher,
timing: Option<&TimingHandle>,
) -> Result<usize, RepoSyncError> {
let _s = timing
.as_ref()
.map(|t| t.span_rrdp_repo_step(rsync_base_uri, "rsync_fetch_objects"));
let _p = timing.as_ref().map(|t| t.span_phase("rsync_fetch_total"));
let objects = rsync_fetcher.fetch_objects(rsync_base_uri)?;
let mut written = 0usize;
for (rsync_uri, bytes) in objects {
store
.put_raw(&rsync_uri, &bytes)
.map_err(|e| RepoSyncError::Storage(e.to_string()))?;
written += 1;
if let Some(t) = timing.as_ref() {
t.record_count("rsync_objects_fetched_total", objects.len() as u64);
let bytes_total: u64 = objects.iter().map(|(_u, b)| b.len() as u64).sum::<u64>();
t.record_count("rsync_objects_bytes_total", bytes_total);
}
drop(_p);
let _w = timing
.as_ref()
.map(|t| t.span_phase("rsync_write_raw_objects_total"));
store
.put_raw_batch(objects)
.map_err(|e| RepoSyncError::Storage(e.to_string()))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::analysis::timing::{TimingHandle, TimingMeta};
use crate::fetch::rsync::LocalDirRsyncFetcher;
use crate::sync::rrdp::Fetcher as HttpFetcher;
use base64::Engine;
use sha2::Digest;
use std::collections::HashMap;
use std::sync::atomic::{AtomicUsize, Ordering};
struct DummyHttpFetcher;
impl HttpFetcher for DummyHttpFetcher {
fn fetch(&self, _url: &str) -> Result<Vec<u8>, String> {
panic!("http fetcher must not be used in rsync-only mode")
}
}
struct PanicRsyncFetcher;
impl RsyncFetcher for PanicRsyncFetcher {
fn fetch_objects(
&self,
_rsync_base_uri: &str,
) -> Result<Vec<(String, Vec<u8>)>, RsyncFetchError> {
panic!("rsync must not be used in this test")
}
}
struct MapFetcher {
map: HashMap<String, Vec<u8>>,
}
impl HttpFetcher for MapFetcher {
fn fetch(&self, uri: &str) -> Result<Vec<u8>, String> {
self.map
.get(uri)
.cloned()
.ok_or_else(|| format!("not found: {uri}"))
}
}
fn notification_xml(
session_id: &str,
serial: u64,
snapshot_uri: &str,
snapshot_hash: &str,
) -> Vec<u8> {
format!(
r#"<notification xmlns="http://www.ripe.net/rpki/rrdp" version="1" session_id="{session_id}" serial="{serial}"><snapshot uri="{snapshot_uri}" hash="{snapshot_hash}"/></notification>"#
)
.into_bytes()
}
fn snapshot_xml(session_id: &str, serial: u64, published: &[(&str, &[u8])]) -> Vec<u8> {
let mut out = format!(
r#"<snapshot xmlns="http://www.ripe.net/rpki/rrdp" version="1" session_id="{session_id}" serial="{serial}">"#
);
for (uri, bytes) in published {
let b64 = base64::engine::general_purpose::STANDARD.encode(bytes);
out.push_str(&format!(r#"<publish uri="{uri}">{b64}</publish>"#));
}
out.push_str("</snapshot>");
out.into_bytes()
}
fn timing_to_json(temp_dir: &std::path::Path, timing: &TimingHandle) -> serde_json::Value {
let timing_path = temp_dir.join("timing_retry.json");
timing.write_json(&timing_path, 50).expect("write json");
serde_json::from_slice(&std::fs::read(&timing_path).expect("read json"))
.expect("parse json")
}
#[test]
fn rsync_sync_writes_raw_objects_with_batch_and_records_counts() {
let temp = tempfile::tempdir().expect("tempdir");
let repo_dir = temp.path().join("repo");
std::fs::create_dir_all(repo_dir.join("sub")).expect("mkdir");
std::fs::write(repo_dir.join("a.mft"), b"mft").expect("write");
std::fs::write(repo_dir.join("sub").join("b.roa"), b"roa").expect("write");
std::fs::write(repo_dir.join("sub").join("c.cer"), b"cer").expect("write");
let store_dir = temp.path().join("db");
let store = RocksStore::open(&store_dir).expect("open rocksdb");
let timing = TimingHandle::new(TimingMeta {
recorded_at_utc_rfc3339: "2026-02-28T00:00:00Z".to_string(),
validation_time_utc_rfc3339: "2026-02-28T00:00:00Z".to_string(),
tal_url: None,
db_path: Some(store_dir.to_string_lossy().into_owned()),
});
let policy = Policy {
sync_preference: SyncPreference::RsyncOnly,
..Policy::default()
};
let http = DummyHttpFetcher;
let rsync = LocalDirRsyncFetcher::new(&repo_dir);
let out = sync_publication_point(
&store,
&policy,
None,
"rsync://example.test/repo/",
&http,
&rsync,
Some(&timing),
)
.expect("sync ok");
assert_eq!(out.source, RepoSyncSource::Rsync);
assert_eq!(out.objects_written, 3);
assert_eq!(
store.get_raw("rsync://example.test/repo/a.mft").unwrap(),
Some(b"mft".to_vec())
);
assert_eq!(
store
.get_raw("rsync://example.test/repo/sub/b.roa")
.unwrap(),
Some(b"roa".to_vec())
);
assert_eq!(
store
.get_raw("rsync://example.test/repo/sub/c.cer")
.unwrap(),
Some(b"cer".to_vec())
);
let timing_path = temp.path().join("timing.json");
timing.write_json(&timing_path, 5).expect("write json");
let v: serde_json::Value =
serde_json::from_slice(&std::fs::read(&timing_path).expect("read json"))
.expect("parse json");
let counts = v.get("counts").expect("counts");
assert_eq!(
counts
.get("rsync_objects_fetched_total")
.and_then(|v| v.as_u64()),
Some(3)
);
assert_eq!(
counts
.get("rsync_objects_bytes_total")
.and_then(|v| v.as_u64()),
Some(3 * 3)
);
}
#[test]
fn rrdp_retry_succeeds_without_rsync_when_notification_fetch_is_transient() {
let temp = tempfile::tempdir().expect("tempdir");
let store_dir = temp.path().join("db");
let store = RocksStore::open(&store_dir).expect("open rocksdb");
let timing = TimingHandle::new(TimingMeta {
recorded_at_utc_rfc3339: "2026-02-28T00:00:00Z".to_string(),
validation_time_utc_rfc3339: "2026-02-28T00:00:00Z".to_string(),
tal_url: None,
db_path: Some(store_dir.to_string_lossy().into_owned()),
});
let notification_uri = "https://example.test/notification.xml";
let snapshot_uri = "https://example.test/snapshot.xml";
let published_uri = "rsync://example.test/repo/a.mft";
let published_bytes = b"x";
let snapshot = snapshot_xml(
"9df4b597-af9e-4dca-bdda-719cce2c4e28",
1,
&[(published_uri, published_bytes)],
);
let snapshot_hash = hex::encode(sha2::Sha256::digest(&snapshot));
let notif = notification_xml(
"9df4b597-af9e-4dca-bdda-719cce2c4e28",
1,
snapshot_uri,
&snapshot_hash,
);
let mut map = HashMap::new();
map.insert(notification_uri.to_string(), notif);
map.insert(snapshot_uri.to_string(), snapshot);
struct RetryThenMap {
inner: MapFetcher,
notification_uri: String,
fail_times: usize,
notification_calls: AtomicUsize,
}
impl HttpFetcher for RetryThenMap {
fn fetch(&self, uri: &str) -> Result<Vec<u8>, String> {
if uri == self.notification_uri {
let n = self.notification_calls.fetch_add(1, Ordering::SeqCst);
if n < self.fail_times {
return Err("http request failed: simulated transient".to_string());
}
}
self.inner.fetch(uri)
}
}
let http = RetryThenMap {
inner: MapFetcher { map },
notification_uri: notification_uri.to_string(),
fail_times: 2,
notification_calls: AtomicUsize::new(0),
};
let policy = Policy {
sync_preference: SyncPreference::RrdpThenRsync,
..Policy::default()
};
let out = sync_publication_point(
&store,
&policy,
Some(notification_uri),
"rsync://example.test/repo/",
&http,
&PanicRsyncFetcher,
Some(&timing),
)
.expect("sync ok");
assert_eq!(out.source, RepoSyncSource::Rrdp);
assert_eq!(
store.get_raw(published_uri).unwrap(),
Some(published_bytes.to_vec())
);
let v = timing_to_json(temp.path(), &timing);
let counts = v.get("counts").expect("counts");
assert_eq!(
counts
.get("rrdp_retry_attempt_total")
.and_then(|v| v.as_u64()),
Some(3)
);
assert_eq!(
counts
.get("rrdp_retry_success_total")
.and_then(|v| v.as_u64()),
Some(1)
);
assert_eq!(
counts.get("repo_sync_rrdp_ok_total").and_then(|v| v.as_u64()),
Some(1)
);
}
#[test]
fn rrdp_protocol_error_does_not_retry_and_falls_back_to_rsync() {
let temp = tempfile::tempdir().expect("tempdir");
let store_dir = temp.path().join("db");
let store = RocksStore::open(&store_dir).expect("open rocksdb");
let timing = TimingHandle::new(TimingMeta {
recorded_at_utc_rfc3339: "2026-02-28T00:00:00Z".to_string(),
validation_time_utc_rfc3339: "2026-02-28T00:00:00Z".to_string(),
tal_url: None,
db_path: Some(store_dir.to_string_lossy().into_owned()),
});
let notification_uri = "https://example.test/notification.xml";
let snapshot_uri = "https://example.test/snapshot.xml";
let published_uri = "rsync://example.test/repo/a.mft";
let published_bytes = b"x";
let snapshot = snapshot_xml(
"9df4b597-af9e-4dca-bdda-719cce2c4e28",
1,
&[(published_uri, published_bytes)],
);
// Intentionally wrong hash to trigger protocol error (SnapshotHashMismatch).
let notif = notification_xml(
"9df4b597-af9e-4dca-bdda-719cce2c4e28",
1,
snapshot_uri,
"00",
);
let mut map = HashMap::new();
map.insert(notification_uri.to_string(), notif);
map.insert(snapshot_uri.to_string(), snapshot);
let http = MapFetcher { map };
struct EmptyRsyncFetcher;
impl RsyncFetcher for EmptyRsyncFetcher {
fn fetch_objects(
&self,
_rsync_base_uri: &str,
) -> Result<Vec<(String, Vec<u8>)>, RsyncFetchError> {
Ok(Vec::new())
}
}
let policy = Policy {
sync_preference: SyncPreference::RrdpThenRsync,
..Policy::default()
};
let out = sync_publication_point(
&store,
&policy,
Some(notification_uri),
"rsync://example.test/repo/",
&http,
&EmptyRsyncFetcher,
Some(&timing),
)
.expect("sync ok");
assert_eq!(out.source, RepoSyncSource::Rsync);
assert!(
out.warnings
.iter()
.any(|w| w.message.contains("RRDP failed; falling back to rsync")),
"expected RRDP fallback warning"
);
let v = timing_to_json(temp.path(), &timing);
let counts = v.get("counts").expect("counts");
assert_eq!(
counts
.get("rrdp_retry_attempt_total")
.and_then(|v| v.as_u64()),
Some(1)
);
assert_eq!(
counts
.get("rrdp_failed_protocol_total")
.and_then(|v| v.as_u64()),
Some(1)
);
assert_eq!(
counts
.get("repo_sync_rrdp_failed_total")
.and_then(|v| v.as_u64()),
Some(1)
);
assert_eq!(
counts
.get("repo_sync_rsync_fallback_ok_total")
.and_then(|v| v.as_u64()),
Some(1)
);
}
Ok(written)
}

View File

@ -1,3 +1,4 @@
use crate::analysis::timing::TimingHandle;
use crate::storage::RocksStore;
use crate::storage::RrdpDeltaOp;
use base64::Engine;
@ -93,9 +94,7 @@ pub enum RrdpError {
#[error("snapshot serial mismatch: expected {expected}, got {got} (RFC 8182 §3.5.2.3)")]
SnapshotSerialMismatch { expected: u64, got: u64 },
#[error(
"delta file hash mismatch (RFC 8182 §3.4.2; RFC 8182 §3.5.1.3)"
)]
#[error("delta file hash mismatch (RFC 8182 §3.4.2; RFC 8182 §3.5.1.3)")]
DeltaHashMismatch,
#[error("delta session_id mismatch: expected {expected}, got {got} (RFC 8182 §3.5.3.3)")]
@ -104,14 +103,10 @@ pub enum RrdpError {
#[error("delta serial mismatch: expected {expected}, got {got} (RFC 8182 §3.5.3.3)")]
DeltaSerialMismatch { expected: u64, got: u64 },
#[error(
"notification serial moved backwards: old={old} new={new} (RFC 8182 §3.4.1)"
)]
#[error("notification serial moved backwards: old={old} new={new} (RFC 8182 §3.4.1)")]
NotificationSerialRollback { old: u64, new: u64 },
#[error(
"delta publish without @hash for existing object: {rsync_uri} (RFC 8182 §3.4.2)"
)]
#[error("delta publish without @hash for existing object: {rsync_uri} (RFC 8182 §3.4.2)")]
DeltaPublishWithoutHashForExisting { rsync_uri: String },
#[error(
@ -122,9 +117,7 @@ pub enum RrdpError {
#[error("delta withdraw/replace target missing in local cache: {rsync_uri} (RFC 8182 §3.4.2)")]
DeltaTargetMissing { rsync_uri: String },
#[error(
"delta withdraw/replace target hash mismatch: {rsync_uri} (RFC 8182 §3.4.2)"
)]
#[error("delta withdraw/replace target hash mismatch: {rsync_uri} (RFC 8182 §3.4.2)")]
DeltaTargetHashMismatch { rsync_uri: String },
#[error("publish/@uri missing (RFC 8182 §3.5.2.3)")]
@ -145,7 +138,9 @@ pub enum RrdpError {
#[error("delta file <publish> base64 decode failed (RFC 8182 §3.5.3.3): {0}")]
DeltaPublishBase64(String),
#[error("delta file <publish> @hash must be hex encoding of SHA-256, got {0} (RFC 8182 §3.5.3.3)")]
#[error(
"delta file <publish> @hash must be hex encoding of SHA-256, got {0} (RFC 8182 §3.5.3.3)"
)]
DeltaPublishHashInvalid(String),
#[error("delta file <withdraw> missing @uri (RFC 8182 §3.5.3.3)")]
@ -154,7 +149,9 @@ pub enum RrdpError {
#[error("delta file <withdraw> missing @hash (RFC 8182 §3.5.3.3)")]
DeltaWithdrawHashMissing,
#[error("delta file <withdraw> @hash must be hex encoding of SHA-256, got {0} (RFC 8182 §3.5.3.3)")]
#[error(
"delta file <withdraw> @hash must be hex encoding of SHA-256, got {0} (RFC 8182 §3.5.3.3)"
)]
DeltaWithdrawHashInvalid(String),
#[error("delta file <withdraw> must not contain text content (RFC 8182 §3.5.3.3)")]
@ -296,9 +293,7 @@ pub fn parse_notification(xml: &[u8]) -> Result<Notification, RrdpError> {
});
}
let uri = d.attribute("uri").ok_or(RrdpError::DeltaRefUriMissing)?;
let hash = d
.attribute("hash")
.ok_or(RrdpError::DeltaRefHashMissing)?;
let hash = d.attribute("hash").ok_or(RrdpError::DeltaRefHashMissing)?;
let hash_sha256 = parse_sha256_hex_delta_ref(hash)?;
deltas.push(NotificationDeltaRef {
@ -377,7 +372,10 @@ pub fn parse_delta_file(xml: &[u8]) -> Result<DeltaFile, RrdpError> {
.attribute("uri")
.ok_or(RrdpError::DeltaPublishUriMissing)?
.to_string();
let hash_sha256 = child.attribute("hash").map(parse_sha256_hex_delta_publish).transpose()?;
let hash_sha256 = child
.attribute("hash")
.map(parse_sha256_hex_delta_publish)
.transpose()?;
let content_b64 =
collect_element_text(&child).ok_or(RrdpError::DeltaPublishContentMissing)?;
@ -400,7 +398,9 @@ pub fn parse_delta_file(xml: &[u8]) -> Result<DeltaFile, RrdpError> {
.attribute("uri")
.ok_or(RrdpError::DeltaWithdrawUriMissing)?
.to_string();
let hash = child.attribute("hash").ok_or(RrdpError::DeltaWithdrawHashMissing)?;
let hash = child
.attribute("hash")
.ok_or(RrdpError::DeltaWithdrawHashMissing)?;
let hash_sha256 = parse_sha256_hex_delta_withdraw(hash)?;
if let Some(s) = collect_element_text(&child) {
@ -432,19 +432,89 @@ pub fn sync_from_notification_snapshot(
notification_xml: &[u8],
fetcher: &dyn Fetcher,
) -> RrdpSyncResult<usize> {
let notif = parse_notification_snapshot(notification_xml)?;
let snapshot_xml = fetcher
.fetch(&notif.snapshot_uri)
.map_err(RrdpSyncError::Fetch)?;
sync_from_notification_snapshot_inner(store, notification_uri, notification_xml, fetcher, None)
}
pub fn sync_from_notification_snapshot_with_timing(
store: &RocksStore,
notification_uri: &str,
notification_xml: &[u8],
fetcher: &dyn Fetcher,
timing: Option<&TimingHandle>,
) -> RrdpSyncResult<usize> {
sync_from_notification_snapshot_inner(
store,
notification_uri,
notification_xml,
fetcher,
timing,
)
}
fn sync_from_notification_snapshot_inner(
store: &RocksStore,
notification_uri: &str,
notification_xml: &[u8],
fetcher: &dyn Fetcher,
timing: Option<&TimingHandle>,
) -> RrdpSyncResult<usize> {
let _parse_step = timing
.as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "parse_notification_snapshot"));
let _parse_total = timing.as_ref().map(|t| t.span_phase("rrdp_parse_notification_total"));
let notif = parse_notification_snapshot(notification_xml)?;
drop(_parse_step);
drop(_parse_total);
let _fetch_step = timing
.as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "fetch_snapshot"));
let _fetch_total = timing.as_ref().map(|t| t.span_phase("rrdp_fetch_snapshot_total"));
let snapshot_xml = fetcher.fetch(&notif.snapshot_uri).map_err(|e| {
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_snapshot_fetch_fail_total", 1);
}
RrdpSyncError::Fetch(e)
})?;
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_snapshot_fetch_ok_total", 1);
t.record_count("rrdp_snapshot_bytes_total", snapshot_xml.len() as u64);
}
drop(_fetch_step);
drop(_fetch_total);
let _hash_step = timing
.as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "hash_snapshot"));
let _hash_total = timing.as_ref().map(|t| t.span_phase("rrdp_hash_snapshot_total"));
let computed = sha2::Sha256::digest(&snapshot_xml);
if computed.as_slice() != notif.snapshot_hash_sha256.as_slice() {
return Err(RrdpError::SnapshotHashMismatch.into());
}
drop(_hash_step);
drop(_hash_total);
let published =
apply_snapshot(store, notification_uri, &snapshot_xml, notif.session_id, notif.serial)?;
let _apply_step = timing
.as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "apply_snapshot"));
let _apply_total = timing.as_ref().map(|t| t.span_phase("rrdp_apply_snapshot_total"));
let published = apply_snapshot(
store,
notification_uri,
&snapshot_xml,
notif.session_id,
notif.serial,
)?;
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_snapshot_objects_applied_total", published as u64);
}
drop(_apply_step);
drop(_apply_total);
let _write_state_step = timing
.as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "write_state"));
let _write_state_total = timing.as_ref().map(|t| t.span_phase("rrdp_write_state_total"));
let state = RrdpState {
session_id: notif.session_id.to_string(),
serial: notif.serial,
@ -453,6 +523,8 @@ pub fn sync_from_notification_snapshot(
store
.put_rrdp_state(notification_uri, &bytes)
.map_err(|e| RrdpSyncError::Storage(e.to_string()))?;
drop(_write_state_step);
drop(_write_state_total);
Ok(published)
}
@ -463,12 +535,50 @@ pub fn sync_from_notification(
notification_xml: &[u8],
fetcher: &dyn Fetcher,
) -> RrdpSyncResult<usize> {
let notif = parse_notification(notification_xml)?;
sync_from_notification_inner(store, notification_uri, notification_xml, fetcher, None)
}
pub fn sync_from_notification_with_timing(
store: &RocksStore,
notification_uri: &str,
notification_xml: &[u8],
fetcher: &dyn Fetcher,
timing: Option<&TimingHandle>,
) -> RrdpSyncResult<usize> {
sync_from_notification_inner(store, notification_uri, notification_xml, fetcher, timing)
}
fn sync_from_notification_inner(
store: &RocksStore,
notification_uri: &str,
notification_xml: &[u8],
fetcher: &dyn Fetcher,
timing: Option<&TimingHandle>,
) -> RrdpSyncResult<usize> {
let _parse_step = timing
.as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "parse_notification"));
let _parse_total = timing.as_ref().map(|t| t.span_phase("rrdp_parse_notification_total"));
let notif = parse_notification(notification_xml)?;
drop(_parse_step);
drop(_parse_total);
if let Some(t) = timing.as_ref() {
t.record_count(
"rrdp_notification_delta_refs_total",
notif.deltas.len() as u64,
);
}
let _read_state_step = timing
.as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "read_state"));
let _read_state_total = timing.as_ref().map(|t| t.span_phase("rrdp_read_state_total"));
let state = store
.get_rrdp_state(notification_uri)
.map_err(|e| RrdpSyncError::Storage(e.to_string()))?
.and_then(|bytes| RrdpState::decode(&bytes).ok());
drop(_read_state_step);
drop(_read_state_total);
let same_session_state = state
.as_ref()
@ -501,10 +611,18 @@ pub fn sync_from_notification(
if max_serial == notif.serial && want_first >= min_serial {
// Fetch all required delta files first so a network failure doesn't leave us with
// partially applied deltas and no snapshot fallback.
let _fetch_d_step = timing
.as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "fetch_deltas"));
let _fetch_d_total =
timing.as_ref().map(|t| t.span_phase("rrdp_fetch_deltas_total"));
let mut fetched: Vec<(u64, [u8; 32], Vec<u8>)> =
Vec::with_capacity((want_last - want_first + 1) as usize);
let mut fetch_ok = true;
for serial in want_first..=want_last {
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_delta_fetch_attempted_total", 1);
}
let idx = (serial - min_serial) as usize;
let dref = match notif.deltas.get(idx) {
Some(v) if v.serial == serial => v,
@ -515,15 +633,31 @@ pub fn sync_from_notification(
};
match fetcher.fetch(&dref.uri) {
Ok(bytes) => fetched.push((serial, dref.hash_sha256, bytes)),
Ok(bytes) => {
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_delta_fetch_ok_total", 1);
t.record_count("rrdp_delta_bytes_total", bytes.len() as u64);
}
fetched.push((serial, dref.hash_sha256, bytes))
}
Err(_) => {
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_delta_fetch_fail_total", 1);
}
fetch_ok = false;
break;
}
}
}
drop(_fetch_d_step);
drop(_fetch_d_total);
if fetch_ok {
let _apply_d_step = timing
.as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "apply_deltas"));
let _apply_d_total =
timing.as_ref().map(|t| t.span_phase("rrdp_apply_deltas_total"));
let mut applied_total = 0usize;
let mut ok = true;
for (serial, expected_hash, bytes) in &fetched {
@ -542,8 +676,15 @@ pub fn sync_from_notification(
}
}
}
drop(_apply_d_step);
drop(_apply_d_total);
if ok {
let _write_state_step = timing
.as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "write_state"));
let _write_state_total =
timing.as_ref().map(|t| t.span_phase("rrdp_write_state_total"));
let new_state = RrdpState {
session_id: notif.session_id.to_string(),
serial: notif.serial,
@ -552,6 +693,11 @@ pub fn sync_from_notification(
store
.put_rrdp_state(notification_uri, &bytes)
.map_err(|e| RrdpSyncError::Storage(e.to_string()))?;
drop(_write_state_step);
drop(_write_state_total);
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_delta_ops_applied_total", applied_total as u64);
}
return Ok(applied_total);
}
}
@ -560,18 +706,55 @@ pub fn sync_from_notification(
}
// Snapshot fallback (RFC 8182 §3.4.3).
let snapshot_xml = fetcher
.fetch(&notif.snapshot_uri)
.map_err(RrdpSyncError::Fetch)?;
let _fetch_step = timing
.as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "fetch_snapshot"));
let _fetch_total = timing.as_ref().map(|t| t.span_phase("rrdp_fetch_snapshot_total"));
let snapshot_xml = fetcher.fetch(&notif.snapshot_uri).map_err(|e| {
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_snapshot_fetch_fail_total", 1);
}
RrdpSyncError::Fetch(e)
})?;
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_snapshot_fetch_ok_total", 1);
t.record_count("rrdp_snapshot_bytes_total", snapshot_xml.len() as u64);
}
drop(_fetch_step);
drop(_fetch_total);
let _hash_step = timing
.as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "hash_snapshot"));
let _hash_total = timing.as_ref().map(|t| t.span_phase("rrdp_hash_snapshot_total"));
let computed = sha2::Sha256::digest(&snapshot_xml);
if computed.as_slice() != notif.snapshot_hash_sha256.as_slice() {
return Err(RrdpError::SnapshotHashMismatch.into());
}
drop(_hash_step);
drop(_hash_total);
let published =
apply_snapshot(store, notification_uri, &snapshot_xml, notif.session_id, notif.serial)?;
let _apply_step = timing
.as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "apply_snapshot"));
let _apply_total = timing.as_ref().map(|t| t.span_phase("rrdp_apply_snapshot_total"));
let published = apply_snapshot(
store,
notification_uri,
&snapshot_xml,
notif.session_id,
notif.serial,
)?;
if let Some(t) = timing.as_ref() {
t.record_count("rrdp_snapshot_objects_applied_total", published as u64);
}
drop(_apply_step);
drop(_apply_total);
let _write_state_step = timing
.as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "write_state"));
let _write_state_total = timing.as_ref().map(|t| t.span_phase("rrdp_write_state_total"));
let new_state = RrdpState {
session_id: notif.session_id.to_string(),
serial: notif.serial,
@ -580,6 +763,8 @@ pub fn sync_from_notification(
store
.put_rrdp_state(notification_uri, &bytes)
.map_err(|e| RrdpSyncError::Storage(e.to_string()))?;
drop(_write_state_step);
drop(_write_state_total);
Ok(published)
}
@ -635,9 +820,7 @@ fn apply_delta(
})?;
let old_computed = sha2::Sha256::digest(old_bytes.as_slice());
if old_computed.as_slice() != old_hash.as_slice() {
return Err(
RrdpError::DeltaTargetHashMismatch { rsync_uri: uri }.into(),
);
return Err(RrdpError::DeltaTargetHashMismatch { rsync_uri: uri }.into());
}
ops.push(RrdpDeltaOp::Upsert {
@ -678,9 +861,7 @@ fn apply_delta(
})?;
let old_computed = sha2::Sha256::digest(old_bytes.as_slice());
if old_computed.as_slice() != hash_sha256.as_slice() {
return Err(
RrdpError::DeltaTargetHashMismatch { rsync_uri: uri }.into(),
);
return Err(RrdpError::DeltaTargetHashMismatch { rsync_uri: uri }.into());
}
ops.push(RrdpDeltaOp::Delete { rsync_uri: uri });
}
@ -831,7 +1012,10 @@ fn strip_all_ascii_whitespace(s: &str) -> String {
#[cfg(test)]
mod tests {
use super::*;
use crate::analysis::timing::{TimingHandle, TimingMeta};
use crate::storage::RocksStore;
use std::collections::HashMap;
use std::time::Duration;
struct MapFetcher {
map: HashMap<String, Vec<u8>>,
@ -846,7 +1030,27 @@ mod tests {
}
}
fn notification_xml(session_id: &str, serial: u64, snapshot_uri: &str, snapshot_hash: &str) -> Vec<u8> {
struct SleepyFetcher {
inner: MapFetcher,
sleep_uri: String,
sleep: Duration,
}
impl Fetcher for SleepyFetcher {
fn fetch(&self, uri: &str) -> Result<Vec<u8>, String> {
if uri == self.sleep_uri {
std::thread::sleep(self.sleep);
}
self.inner.fetch(uri)
}
}
fn notification_xml(
session_id: &str,
serial: u64,
snapshot_uri: &str,
snapshot_hash: &str,
) -> Vec<u8> {
format!(
r#"<notification xmlns="{RRDP_XMLNS}" version="1" session_id="{session_id}" serial="{serial}"><snapshot uri="{snapshot_uri}" hash="{snapshot_hash}"/></notification>"#
)
@ -884,6 +1088,71 @@ mod tests {
out.into_bytes()
}
#[test]
fn timing_rrdp_repo_step_spans_cover_snapshot_fetch_duration() {
let temp = tempfile::tempdir().expect("tempdir");
let store_dir = temp.path().join("db");
let store = RocksStore::open(&store_dir).expect("open rocksdb");
let notification_uri = "https://example.test/notification.xml";
let snapshot_uri = "https://example.test/snapshot.xml";
let published_uri = "rsync://example.test/repo/a.mft";
let published_bytes = b"x";
let session_id = "550e8400-e29b-41d4-a716-446655440000";
let snapshot = snapshot_xml(session_id, 1, &[(published_uri, published_bytes)]);
let snapshot_hash = hex::encode(sha2::Sha256::digest(&snapshot));
let notif = notification_xml(session_id, 1, snapshot_uri, &snapshot_hash);
let mut map = HashMap::new();
map.insert(snapshot_uri.to_string(), snapshot);
let fetcher = SleepyFetcher {
inner: MapFetcher { map },
sleep_uri: snapshot_uri.to_string(),
sleep: Duration::from_millis(25),
};
let timing = TimingHandle::new(TimingMeta {
recorded_at_utc_rfc3339: "2026-02-28T00:00:00Z".to_string(),
validation_time_utc_rfc3339: "2026-02-28T00:00:00Z".to_string(),
tal_url: None,
db_path: Some(store_dir.to_string_lossy().into_owned()),
});
sync_from_notification_snapshot_with_timing(
&store,
notification_uri,
&notif,
&fetcher,
Some(&timing),
)
.expect("rrdp snapshot sync ok");
let timing_path = temp.path().join("timing.json");
timing.write_json(&timing_path, 200).expect("write timing");
let rep: serde_json::Value =
serde_json::from_slice(&std::fs::read(&timing_path).expect("read timing"))
.expect("parse timing");
let want = format!("{notification_uri}::fetch_snapshot");
let steps = rep
.get("top_rrdp_repo_steps")
.and_then(|v| v.as_array())
.expect("top_rrdp_repo_steps array");
let entry = steps
.iter()
.find(|e| e.get("key").and_then(|k| k.as_str()) == Some(want.as_str()))
.unwrap_or_else(|| panic!("missing timing step entry for {want}"));
let nanos = entry
.get("total_nanos")
.and_then(|v| v.as_u64())
.expect("total_nanos");
assert!(
nanos >= 20_000_000,
"expected fetch_snapshot timing to include the fetch duration; got {nanos}ns"
);
}
#[test]
fn parse_notification_snapshot_rejects_non_ascii() {
let mut xml = b"<notification/>".to_vec();
@ -991,7 +1260,11 @@ mod tests {
assert_eq!(d.serial, serial);
assert_eq!(d.elements.len(), 2);
match &d.elements[0] {
DeltaElement::Publish { uri, hash_sha256, bytes } => {
DeltaElement::Publish {
uri,
hash_sha256,
bytes,
} => {
assert_eq!(uri, "rsync://example.net/repo/a.mft");
assert_eq!(*hash_sha256, None);
assert_eq!(bytes, publish_bytes);
@ -1046,7 +1319,8 @@ mod tests {
let fetcher = MapFetcher {
map: HashMap::from([(snapshot_uri.to_string(), snapshot)]),
};
sync_from_notification_snapshot(&store, notif_uri, &notif, &fetcher).expect("sync snapshot");
sync_from_notification_snapshot(&store, notif_uri, &notif, &fetcher)
.expect("sync snapshot");
let old_b = store
.get_raw("rsync://example.net/repo/b.roa")
@ -1135,7 +1409,10 @@ mod tests {
let mut wrong = [0u8; 32];
wrong[0] = 1;
let err = apply_delta(&store, notif_uri, &delta, wrong, sid, 1).unwrap_err();
assert!(matches!(err, RrdpSyncError::Rrdp(RrdpError::DeltaHashMismatch)));
assert!(matches!(
err,
RrdpSyncError::Rrdp(RrdpError::DeltaHashMismatch)
));
}
#[test]
@ -1311,8 +1588,7 @@ mod tests {
// Session mismatch.
let other_sid = Uuid::parse_str("550e8400-e29b-41d4-a716-446655440001").unwrap();
let err =
apply_delta(&store, notif_uri, &delta, expected_hash, other_sid, 2).unwrap_err();
let err = apply_delta(&store, notif_uri, &delta, expected_hash, other_sid, 2).unwrap_err();
assert!(matches!(
err,
RrdpSyncError::Rrdp(RrdpError::DeltaSessionIdMismatch { .. })
@ -1359,8 +1635,8 @@ mod tests {
map: HashMap::from([(snapshot_uri.to_string(), snapshot.clone())]),
};
let published = sync_from_notification_snapshot(&store, notif_uri, &notif, &fetcher)
.expect("sync");
let published =
sync_from_notification_snapshot(&store, notif_uri, &notif, &fetcher).expect("sync");
assert_eq!(published, 2);
let a = store
@ -1431,7 +1707,10 @@ mod tests {
let a = store
.get_raw("rsync://example.net/repo/a.mft")
.expect("get_raw");
assert!(a.is_none(), "a should be deleted by full-state snapshot apply");
assert!(
a.is_none(),
"a should be deleted by full-state snapshot apply"
);
let b = store
.get_raw("rsync://example.net/repo/b.roa")
@ -1502,8 +1781,18 @@ mod tests {
snapshot_uri_3,
&snapshot_hash_3,
&[
("d3", 3, "https://example.net/delta-3.xml", &delta_3_hash_hex),
("d2", 2, "https://example.net/delta-2.xml", &delta_2_hash_hex),
(
"d3",
3,
"https://example.net/delta-3.xml",
&delta_3_hash_hex,
),
(
"d2",
2,
"https://example.net/delta-2.xml",
&delta_2_hash_hex,
),
],
);
@ -1563,7 +1852,12 @@ mod tests {
3,
snapshot_uri_3,
&snapshot_hash_3,
&[("d3", 3, "https://example.net/delta-3.xml", &delta_3_hash_hex)],
&[(
"d3",
3,
"https://example.net/delta-3.xml",
&delta_3_hash_hex,
)],
);
let fetcher = MapFetcher {
@ -1598,7 +1892,10 @@ mod tests {
map: HashMap::from([(snapshot_uri.to_string(), snapshot)]),
};
let err = sync_from_notification_snapshot(&store, notif_uri, &notif, &fetcher).unwrap_err();
assert!(matches!(err, RrdpSyncError::Rrdp(RrdpError::SnapshotHashMismatch)));
assert!(matches!(
err,
RrdpSyncError::Rrdp(RrdpError::SnapshotHashMismatch)
));
}
#[test]
@ -1617,7 +1914,11 @@ mod tests {
RrdpSyncError::Rrdp(RrdpError::SnapshotSessionIdMismatch { .. })
));
let snapshot = snapshot_xml(expected_sid.to_string().as_str(), 3, &[("rsync://example.net/repo/a.mft", b"x")]);
let snapshot = snapshot_xml(
expected_sid.to_string().as_str(),
3,
&[("rsync://example.net/repo/a.mft", b"x")],
);
let err = apply_snapshot(&store, notif_uri, &snapshot, expected_sid, 2).unwrap_err();
assert!(matches!(
err,
@ -1643,7 +1944,10 @@ mod tests {
)
.into_bytes();
let err = apply_snapshot(&store, notif_uri, &xml, sid, 1).unwrap_err();
assert!(matches!(err, RrdpSyncError::Rrdp(RrdpError::PublishUriMissing)));
assert!(matches!(
err,
RrdpSyncError::Rrdp(RrdpError::PublishUriMissing)
));
// Missing base64 content (no text nodes).
let xml = format!(

View File

@ -8,6 +8,8 @@ use crate::data_model::rc::{
use x509_parser::prelude::{FromDer, X509Certificate};
use crate::validation::x509_name::x509_names_equivalent;
use std::collections::HashSet;
use x509_parser::x509::SubjectPublicKeyInfo;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ValidatedSubordinateCa {
@ -18,6 +20,13 @@ pub struct ValidatedSubordinateCa {
pub effective_as_resources: Option<AsResourceSet>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ValidatedSubordinateCaLite {
pub child_ca: ResourceCertificate,
pub effective_ip_resources: Option<IpResourceSet>,
pub effective_as_resources: Option<AsResourceSet>,
}
#[derive(Debug, thiserror::Error)]
pub enum CaPathError {
#[error("child CA certificate decode failed: {0} (RFC 6487 §4; RFC 5280 §4.1)")]
@ -50,6 +59,14 @@ pub enum CaPathError {
#[error("child CA certificate signature verification failed: {0} (RFC 5280 §6.1)")]
ChildSignatureInvalid(String),
#[error("issuer SubjectPublicKeyInfo parse error: {0} (RFC 5280 §4.1.2.7)")]
IssuerSpkiParse(String),
#[error(
"trailing bytes after issuer SubjectPublicKeyInfo DER: {0} bytes (DER; RFC 5280 §4.1.2.7)"
)]
IssuerSpkiTrailingBytes(usize),
#[error("certificate not valid at validation_time (RFC 5280 §4.1.2.5; RFC 5280 §6.1)")]
CertificateNotValidAtTime,
@ -136,6 +153,7 @@ pub fn validate_subordinate_ca_cert(
if issuer_ca.kind != ResourceCertKind::Ca {
return Err(CaPathError::IssuerNotCa);
}
let issuer_spki = parse_subject_pki_from_der(&issuer_ca.tbs.subject_public_key_info)?;
if !x509_names_equivalent(&child_ca.tbs.issuer_name, &issuer_ca.tbs.subject_name) {
return Err(CaPathError::IssuerSubjectMismatch {
@ -162,8 +180,9 @@ pub fn validate_subordinate_ca_cert(
return Err(CaPathError::CertificateNotValidAtTime);
}
verify_cert_signature_with_issuer(child_ca_der, issuer_ca_der)?;
validate_child_ca_key_usage(child_ca_der)?;
let child_x509 = parse_x509_cert(child_ca_der)?;
verify_child_signature(&child_x509, &issuer_spki)?;
validate_child_ca_key_usage(&child_x509)?;
let issuer_crl = RpkixCrl::decode_der(issuer_crl_der)?;
issuer_crl.verify_signature_with_issuer_certificate_der(issuer_ca_der)?;
@ -196,6 +215,116 @@ pub fn validate_subordinate_ca_cert(
})
}
/// Validate a subordinate child CA using *pre-decoded issuer CA* and *pre-decoded+verified issuer CRL*.
///
/// This avoids repeating issuer CA decode and issuer CRL decode+signature verification for every
/// child CA certificate discovered in a publication point.
pub fn validate_subordinate_ca_cert_with_prevalidated_issuer(
child_ca_der: &[u8],
child_ca: ResourceCertificate,
issuer_ca: &ResourceCertificate,
issuer_spki: &SubjectPublicKeyInfo<'_>,
issuer_crl: &RpkixCrl,
issuer_crl_revoked_serials: &HashSet<Vec<u8>>,
issuer_ca_rsync_uri: Option<&str>,
issuer_crl_rsync_uri: &str,
issuer_effective_ip: Option<&IpResourceSet>,
issuer_effective_as: Option<&AsResourceSet>,
validation_time: time::OffsetDateTime,
) -> Result<ValidatedSubordinateCaLite, CaPathError> {
if child_ca.kind != ResourceCertKind::Ca {
return Err(CaPathError::ChildNotCa);
}
if issuer_ca.kind != ResourceCertKind::Ca {
return Err(CaPathError::IssuerNotCa);
}
if !x509_names_equivalent(&child_ca.tbs.issuer_name, &issuer_ca.tbs.subject_name) {
return Err(CaPathError::IssuerSubjectMismatch {
child_issuer_dn: child_ca.tbs.issuer_name.to_string(),
issuer_subject_dn: issuer_ca.tbs.subject_name.to_string(),
});
}
validate_child_aki_matches_issuer_ski(&child_ca, issuer_ca)?;
if let Some(expected_issuer_uri) = issuer_ca_rsync_uri {
validate_child_aia_points_to_issuer_uri(&child_ca, expected_issuer_uri)?;
}
validate_child_crldp_contains_issuer_crl_uri(&child_ca, issuer_crl_rsync_uri)?;
if !time_within_validity(
validation_time,
child_ca.tbs.validity_not_before,
child_ca.tbs.validity_not_after,
) || !time_within_validity(
validation_time,
issuer_ca.tbs.validity_not_before,
issuer_ca.tbs.validity_not_after,
) {
return Err(CaPathError::CertificateNotValidAtTime);
}
let child_x509 = parse_x509_cert(child_ca_der)?;
verify_child_signature(&child_x509, issuer_spki)?;
validate_child_ca_key_usage(&child_x509)?;
if !crl_valid_at_time(issuer_crl, validation_time) {
return Err(CaPathError::CrlNotValidAtTime);
}
let serial = BigUnsigned::from_biguint(&child_ca.tbs.serial_number);
if issuer_crl_revoked_serials.contains(&serial.bytes_be) {
return Err(CaPathError::ChildRevoked);
}
let effective_ip_resources = resolve_child_ip_resources(
child_ca.tbs.extensions.ip_resources.as_ref(),
issuer_effective_ip,
)?;
let effective_as_resources = resolve_child_as_resources(
child_ca.tbs.extensions.as_resources.as_ref(),
issuer_effective_as,
)?;
if effective_ip_resources.is_none() && effective_as_resources.is_none() {
return Err(CaPathError::ResourcesMissing);
}
Ok(ValidatedSubordinateCaLite {
child_ca,
effective_ip_resources,
effective_as_resources,
})
}
fn parse_subject_pki_from_der(der: &[u8]) -> Result<SubjectPublicKeyInfo<'_>, CaPathError> {
let (rem, spki) = SubjectPublicKeyInfo::from_der(der)
.map_err(|e| CaPathError::IssuerSpkiParse(e.to_string()))?;
if !rem.is_empty() {
return Err(CaPathError::IssuerSpkiTrailingBytes(rem.len()));
}
Ok(spki)
}
fn parse_x509_cert(der: &[u8]) -> Result<X509Certificate<'_>, CaPathError> {
let (rem, cert) = X509Certificate::from_der(der)
.map_err(|e| CaPathError::ChildSignatureInvalid(e.to_string()))?;
if !rem.is_empty() {
return Err(CaPathError::ChildSignatureInvalid(
"trailing bytes after child certificate".to_string(),
));
}
Ok(cert)
}
fn verify_child_signature(
child: &X509Certificate<'_>,
issuer_spki: &SubjectPublicKeyInfo<'_>,
) -> Result<(), CaPathError> {
child
.verify_signature(Some(issuer_spki))
.map_err(|e| CaPathError::ChildSignatureInvalid(e.to_string()))
}
fn validate_child_aki_matches_issuer_ski(
child: &ResourceCertificate,
issuer: &ResourceCertificate,
@ -238,15 +367,7 @@ fn validate_child_crldp_contains_issuer_crl_uri(
Ok(())
}
fn validate_child_ca_key_usage(child_ca_der: &[u8]) -> Result<(), CaPathError> {
let (rem, cert) = X509Certificate::from_der(child_ca_der)
.map_err(|e| CaPathError::ChildSignatureInvalid(e.to_string()))?;
if !rem.is_empty() {
return Err(CaPathError::ChildSignatureInvalid(
"trailing bytes after child CA certificate".to_string(),
));
}
fn validate_child_ca_key_usage(cert: &X509Certificate<'_>) -> Result<(), CaPathError> {
let mut ku_critical: Option<bool> = None;
for ext in cert.extensions() {
if ext.oid.as_bytes() == OID_KEY_USAGE_RAW {
@ -286,30 +407,6 @@ fn validate_child_ca_key_usage(child_ca_der: &[u8]) -> Result<(), CaPathError> {
Ok(())
}
fn verify_cert_signature_with_issuer(
child_der: &[u8],
issuer_ca_der: &[u8],
) -> Result<(), CaPathError> {
let (rem, child) = X509Certificate::from_der(child_der)
.map_err(|e| CaPathError::ChildSignatureInvalid(e.to_string()))?;
if !rem.is_empty() {
return Err(CaPathError::ChildSignatureInvalid(
"trailing bytes after child certificate".to_string(),
));
}
let (rem, issuer) = X509Certificate::from_der(issuer_ca_der)
.map_err(|e| CaPathError::ChildSignatureInvalid(e.to_string()))?;
if !rem.is_empty() {
return Err(CaPathError::ChildSignatureInvalid(
"trailing bytes after issuer certificate".to_string(),
));
}
child
.verify_signature(Some(&issuer.tbs_certificate.subject_pki))
.map_err(|e| CaPathError::ChildSignatureInvalid(e.to_string()))
}
fn time_within_validity(
t: time::OffsetDateTime,
not_before: time::OffsetDateTime,
@ -691,13 +788,13 @@ fn bytes_leq(a: &[u8], b: &[u8]) -> bool {
#[cfg(test)]
mod tests {
use super::*;
use crate::data_model::common::X509NameDer;
use crate::data_model::rc::{
Afi, AsIdentifierChoice, AsResourceSet, IpAddressChoice, IpAddressFamily, IpResourceSet,
};
use crate::data_model::rc::{
RcExtensions, ResourceCertKind, ResourceCertificate, RpkixTbsCertificate,
};
use crate::data_model::common::X509NameDer;
use der_parser::num_bigint::BigUint;
use std::process::Command;
fn dummy_cert(
@ -823,7 +920,10 @@ mod tests {
};
// Issuer self-signed.
run(Command::new("openssl").args(["genrsa", "-out"]).arg(&issuer_key).arg("2048"));
run(Command::new("openssl")
.args(["genrsa", "-out"])
.arg(&issuer_key)
.arg("2048"));
run(Command::new("openssl")
.args(["req", "-new", "-key"])
.arg(&issuer_key)
@ -843,7 +943,10 @@ mod tests {
.arg(&issuer_der));
// Child signed by issuer.
run(Command::new("openssl").args(["genrsa", "-out"]).arg(&child_key).arg("2048"));
run(Command::new("openssl")
.args(["genrsa", "-out"])
.arg(&child_key)
.arg("2048"));
run(Command::new("openssl")
.args(["req", "-new", "-key"])
.arg(&child_key)
@ -865,7 +968,10 @@ mod tests {
.arg(&child_der));
// Other self-signed issuer.
run(Command::new("openssl").args(["genrsa", "-out"]).arg(&other_key).arg("2048"));
run(Command::new("openssl")
.args(["genrsa", "-out"])
.arg(&other_key)
.arg("2048"));
run(Command::new("openssl")
.args(["req", "-new", "-key"])
.arg(&other_key)
@ -1116,20 +1222,23 @@ mod tests {
td.path(),
Some("keyUsage = critical, keyCertSign, cRLSign"),
);
validate_child_ca_key_usage(&der).expect("key usage ok");
let cert = parse_x509_cert(&der).expect("x509 parse ok");
validate_child_ca_key_usage(&cert).expect("key usage ok");
}
#[test]
fn validate_child_ca_key_usage_rejects_missing_noncritical_and_invalid_bits() {
let td = tempfile::tempdir().expect("tempdir");
let missing = write_cert_der_with_addext(td.path(), None);
let err = validate_child_ca_key_usage(&missing).unwrap_err();
let cert = parse_x509_cert(&missing).expect("x509 parse ok");
let err = validate_child_ca_key_usage(&cert).unwrap_err();
assert!(matches!(err, CaPathError::KeyUsageMissing), "{err}");
let td = tempfile::tempdir().expect("tempdir");
let noncritical =
write_cert_der_with_addext(td.path(), Some("keyUsage = keyCertSign, cRLSign"));
let err = validate_child_ca_key_usage(&noncritical).unwrap_err();
let cert = parse_x509_cert(&noncritical).expect("x509 parse ok");
let err = validate_child_ca_key_usage(&cert).unwrap_err();
assert!(matches!(err, CaPathError::KeyUsageNotCritical), "{err}");
let td = tempfile::tempdir().expect("tempdir");
@ -1137,7 +1246,8 @@ mod tests {
td.path(),
Some("keyUsage = critical, keyCertSign, cRLSign, digitalSignature"),
);
let err = validate_child_ca_key_usage(&invalid).unwrap_err();
let cert = parse_x509_cert(&invalid).expect("x509 parse ok");
let err = validate_child_ca_key_usage(&cert).unwrap_err();
assert!(matches!(err, CaPathError::KeyUsageInvalidBits), "{err}");
}
@ -1145,9 +1255,17 @@ mod tests {
fn verify_cert_signature_with_issuer_accepts_valid_chain_and_rejects_wrong_issuer() {
let td = tempfile::tempdir().expect("tempdir");
let (issuer, child, other) = gen_issuer_and_child_der(td.path());
verify_cert_signature_with_issuer(&child, &issuer).expect("signature ok");
let err = verify_cert_signature_with_issuer(&child, &other).unwrap_err();
assert!(matches!(err, CaPathError::ChildSignatureInvalid(_)), "{err}");
let issuer_cert = parse_x509_cert(&issuer).expect("x509 parse issuer");
let child_cert = parse_x509_cert(&child).expect("x509 parse child");
verify_child_signature(&child_cert, &issuer_cert.tbs_certificate.subject_pki)
.expect("signature ok");
let other_cert = parse_x509_cert(&other).expect("x509 parse other");
let err = verify_child_signature(&child_cert, &other_cert.tbs_certificate.subject_pki)
.unwrap_err();
assert!(
matches!(err, CaPathError::ChildSignatureInvalid(_)),
"{err}"
);
}
#[test]
@ -1214,10 +1332,9 @@ mod tests {
assert!(matches!(err, CaPathError::ResourcesNotSubset), "{err}");
let parent_as = AsResourceSet {
asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Range {
min: 1,
max: 100,
}])),
asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![
AsIdOrRange::Range { min: 1, max: 100 },
])),
rdi: None,
};
let child_as_inherit = AsResourceSet {
@ -1238,7 +1355,9 @@ mod tests {
.expect("some");
let child_as_bad = AsResourceSet {
asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(200)])),
asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(
200,
)])),
rdi: None,
};
let err = resolve_child_as_resources(Some(&child_as_bad), Some(&parent_as)).unwrap_err();

View File

@ -6,6 +6,8 @@ use crate::data_model::rc::{
use x509_parser::prelude::{FromDer, X509Certificate};
use crate::validation::x509_name::x509_names_equivalent;
use std::collections::HashSet;
use x509_parser::x509::SubjectPublicKeyInfo;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ValidatedEeCertPath {
@ -44,6 +46,14 @@ pub enum CertPathError {
#[error("EE certificate signature verification failed: {0} (RFC 5280 §6.1)")]
EeSignatureInvalid(String),
#[error("issuer SubjectPublicKeyInfo parse error: {0} (RFC 5280 §4.1.2.7)")]
IssuerSpkiParse(String),
#[error(
"trailing bytes after issuer SubjectPublicKeyInfo DER: {0} bytes (DER; RFC 5280 §4.1.2.7)"
)]
IssuerSpkiTrailingBytes(usize),
#[error("EE KeyUsage extension missing (RFC 6487 §4.8.4; RFC 5280 §4.2.1.3)")]
KeyUsageMissing,
@ -113,6 +123,7 @@ pub fn validate_ee_cert_path(
if issuer_ca.kind != ResourceCertKind::Ca {
return Err(CertPathError::IssuerNotCa);
}
let issuer_spki = parse_subject_pki_from_der(&issuer_ca.tbs.subject_public_key_info)?;
if !x509_names_equivalent(&ee.tbs.issuer_name, &issuer_ca.tbs.subject_name) {
return Err(CertPathError::IssuerSubjectMismatch {
@ -141,8 +152,9 @@ pub fn validate_ee_cert_path(
return Err(CertPathError::CertificateNotValidAtTime);
}
verify_cert_signature_with_issuer(ee_cert_der, issuer_ca_der)?;
validate_ee_key_usage(ee_cert_der)?;
let ee_x509 = parse_x509_cert(ee_cert_der)?;
verify_ee_cert_signature(&ee_x509, &issuer_spki)?;
validate_ee_key_usage(&ee_x509)?;
let issuer_crl = RpkixCrl::decode_der(issuer_crl_der)?;
issuer_crl.verify_signature_with_issuer_certificate_der(issuer_ca_der)?;
@ -161,6 +173,104 @@ pub fn validate_ee_cert_path(
})
}
/// Validate the EE certificate path using a *pre-decoded issuer CA* and a *pre-decoded and
/// pre-verified issuer CRL*.
///
/// This is a performance-oriented helper for stage2 serial runs: it avoids repeating issuer CA
/// decode and issuer CRL decode+signature verification for every signed object in a publication point.
///
/// The caller must ensure:
/// - `issuer_ca` corresponds to `issuer_ca_der`
/// - `issuer_crl` cryptographic signature/binding has already been verified against `issuer_ca_der`
pub fn validate_ee_cert_path_with_prevalidated_issuer(
ee_cert_der: &[u8],
issuer_ca: &ResourceCertificate,
issuer_spki: &SubjectPublicKeyInfo<'_>,
issuer_crl: &RpkixCrl,
issuer_crl_revoked_serials: &HashSet<Vec<u8>>,
issuer_ca_rsync_uri: Option<&str>,
issuer_crl_rsync_uri: Option<&str>,
validation_time: time::OffsetDateTime,
) -> Result<ResourceCertificate, CertPathError> {
let ee = ResourceCertificate::decode_der(ee_cert_der)?;
if ee.kind != ResourceCertKind::Ee {
return Err(CertPathError::EeNotEe);
}
if issuer_ca.kind != ResourceCertKind::Ca {
return Err(CertPathError::IssuerNotCa);
}
if !x509_names_equivalent(&ee.tbs.issuer_name, &issuer_ca.tbs.subject_name) {
return Err(CertPathError::IssuerSubjectMismatch {
ee_issuer_dn: ee.tbs.issuer_name.to_string(),
issuer_subject_dn: issuer_ca.tbs.subject_name.to_string(),
});
}
validate_ee_aki_matches_issuer_ski(&ee, issuer_ca)?;
if let Some(expected_issuer_uri) = issuer_ca_rsync_uri {
validate_ee_aia_points_to_issuer_uri(&ee, expected_issuer_uri)?;
}
if let Some(expected_crl_uri) = issuer_crl_rsync_uri {
validate_ee_crldp_contains_issuer_crl_uri(&ee, expected_crl_uri)?;
}
if !time_within_validity(
validation_time,
ee.tbs.validity_not_before,
ee.tbs.validity_not_after,
) || !time_within_validity(
validation_time,
issuer_ca.tbs.validity_not_before,
issuer_ca.tbs.validity_not_after,
) {
return Err(CertPathError::CertificateNotValidAtTime);
}
let ee_x509 = parse_x509_cert(ee_cert_der)?;
verify_ee_cert_signature(&ee_x509, issuer_spki)?;
validate_ee_key_usage(&ee_x509)?;
if !crl_valid_at_time(issuer_crl, validation_time) {
return Err(CertPathError::CrlNotValidAtTime);
}
let serial = BigUnsigned::from_biguint(&ee.tbs.serial_number);
if issuer_crl_revoked_serials.contains(&serial.bytes_be) {
return Err(CertPathError::EeRevoked);
}
Ok(ee)
}
fn parse_subject_pki_from_der(der: &[u8]) -> Result<SubjectPublicKeyInfo<'_>, CertPathError> {
let (rem, spki) = SubjectPublicKeyInfo::from_der(der)
.map_err(|e| CertPathError::IssuerSpkiParse(e.to_string()))?;
if !rem.is_empty() {
return Err(CertPathError::IssuerSpkiTrailingBytes(rem.len()));
}
Ok(spki)
}
fn parse_x509_cert(der: &[u8]) -> Result<X509Certificate<'_>, CertPathError> {
let (rem, cert) = X509Certificate::from_der(der)
.map_err(|e| CertPathError::EeSignatureInvalid(e.to_string()))?;
if !rem.is_empty() {
return Err(CertPathError::EeSignatureInvalid(
"trailing bytes after EE certificate".to_string(),
));
}
Ok(cert)
}
fn verify_ee_cert_signature(
ee: &X509Certificate<'_>,
issuer_spki: &SubjectPublicKeyInfo<'_>,
) -> Result<(), CertPathError> {
ee.verify_signature(Some(issuer_spki))
.map_err(|e| CertPathError::EeSignatureInvalid(e.to_string()))
}
fn validate_ee_aki_matches_issuer_ski(
ee: &ResourceCertificate,
issuer_ca: &ResourceCertificate,
@ -203,15 +313,7 @@ fn validate_ee_crldp_contains_issuer_crl_uri(
Ok(())
}
fn validate_ee_key_usage(ee_cert_der: &[u8]) -> Result<(), CertPathError> {
let (rem, cert) = X509Certificate::from_der(ee_cert_der)
.map_err(|e| CertPathError::EeSignatureInvalid(e.to_string()))?;
if !rem.is_empty() {
return Err(CertPathError::EeSignatureInvalid(
"trailing bytes after EE certificate".to_string(),
));
}
fn validate_ee_key_usage(cert: &X509Certificate<'_>) -> Result<(), CertPathError> {
let mut ku_critical: Option<bool> = None;
for ext in cert.extensions() {
if ext.oid.as_bytes() == crate::data_model::oid::OID_KEY_USAGE_RAW {
@ -250,29 +352,6 @@ fn validate_ee_key_usage(ee_cert_der: &[u8]) -> Result<(), CertPathError> {
Ok(())
}
fn verify_cert_signature_with_issuer(
ee_cert_der: &[u8],
issuer_ca_der: &[u8],
) -> Result<(), CertPathError> {
let (rem, ee) = X509Certificate::from_der(ee_cert_der)
.map_err(|e| CertPathError::EeSignatureInvalid(e.to_string()))?;
if !rem.is_empty() {
return Err(CertPathError::EeSignatureInvalid(
"trailing bytes after EE certificate".to_string(),
));
}
let (rem, issuer) = X509Certificate::from_der(issuer_ca_der)
.map_err(|e| CertPathError::EeSignatureInvalid(e.to_string()))?;
if !rem.is_empty() {
return Err(CertPathError::EeSignatureInvalid(
"trailing bytes after issuer certificate".to_string(),
));
}
ee.verify_signature(Some(&issuer.tbs_certificate.subject_pki))
.map_err(|e| CertPathError::EeSignatureInvalid(e.to_string()))
}
fn time_within_validity(
t: time::OffsetDateTime,
not_before: time::OffsetDateTime,
@ -301,10 +380,10 @@ fn is_serial_revoked_by_crl(ee: &ResourceCertificate, crl: &RpkixCrl) -> bool {
#[cfg(test)]
mod tests {
use super::*;
use crate::data_model::common::X509NameDer;
use crate::data_model::rc::{
RcExtensions, ResourceCertKind, ResourceCertificate, RpkixTbsCertificate,
};
use crate::data_model::common::X509NameDer;
use der_parser::num_bigint::BigUint;
fn dummy_cert(

View File

@ -119,9 +119,7 @@ pub enum ManifestCachedError {
)]
CachedMissingFile { rsync_uri: String },
#[error(
"cached fetch_cache_pp file hash mismatch: {rsync_uri} (RFC 9286 §6.5; RFC 9286 §6.6)"
)]
#[error("cached fetch_cache_pp file hash mismatch: {rsync_uri} (RFC 9286 §6.5; RFC 9286 §6.6)")]
CachedHashMismatch { rsync_uri: String },
}
@ -242,7 +240,7 @@ pub fn process_manifest_publication_point_after_repo_sync(
}
}
fn load_and_revalidate_cached_pack(
pub fn load_and_revalidate_cached_pack(
store: &RocksStore,
manifest_rsync_uri: &str,
publication_point_rsync_uri: &str,
@ -283,8 +281,9 @@ fn revalidate_cached_pack_with_current_time(
validation_time: time::OffsetDateTime,
) -> Result<(), ManifestCachedError> {
// First, re-validate the cached manifest itself with the current time.
let manifest = decode_and_validate_manifest_with_current_time(&pack.manifest_bytes, validation_time)
.map_err(ManifestCachedError::from)?;
let manifest =
decode_and_validate_manifest_with_current_time(&pack.manifest_bytes, validation_time)
.map_err(ManifestCachedError::from)?;
// Then, re-bind the manifest fileList to the cached pack contents, as per RFC 9286 §6.4-§6.5.
let by_uri: HashMap<&str, &crate::storage::PackFile> = pack
@ -379,10 +378,17 @@ fn try_build_fresh_pack(
manifest_rsync_uri: manifest_rsync_uri.to_string(),
})?;
let manifest = decode_and_validate_manifest_with_current_time(&manifest_bytes, validation_time)?;
let manifest =
decode_and_validate_manifest_with_current_time(&manifest_bytes, validation_time)?;
let this_update = manifest.manifest.this_update.to_offset(time::UtcOffset::UTC);
let next_update = manifest.manifest.next_update.to_offset(time::UtcOffset::UTC);
let this_update = manifest
.manifest
.this_update
.to_offset(time::UtcOffset::UTC);
let next_update = manifest
.manifest
.next_update
.to_offset(time::UtcOffset::UTC);
let now = validation_time.to_offset(time::UtcOffset::UTC);
// RFC 9286 §4.2.1: replay/rollback detection for manifestNumber and thisUpdate.
@ -443,7 +449,10 @@ fn try_build_fresh_pack(
}
}
let entries = manifest.manifest.parse_files().map_err(ManifestDecodeError::Validate)?;
let entries = manifest
.manifest
.parse_files()
.map_err(ManifestDecodeError::Validate)?;
let mut files = Vec::with_capacity(manifest.manifest.file_count());
for entry in &entries {
let rsync_uri =
@ -493,9 +502,7 @@ fn try_build_fresh_pack(
fn cmp_minimal_be_unsigned(a: &[u8], b: &[u8]) -> Ordering {
// Compare two minimal big-endian byte strings as unsigned integers.
// (Leading zeros are not expected; callers store minimal big-endian.)
a.len()
.cmp(&b.len())
.then_with(|| a.cmp(b))
a.len().cmp(&b.len()).then_with(|| a.cmp(b))
}
fn join_rsync_dir_and_file(base: &str, file_name: &str) -> String {

View File

@ -1,3 +1,4 @@
use crate::analysis::timing::TimingHandle;
use crate::audit::{AuditObjectKind, AuditObjectResult, ObjectAuditEntry, sha256_hex_from_32};
use crate::data_model::aspa::{AspaDecodeError, AspaObject, AspaValidateError};
use crate::data_model::manifest::ManifestObject;
@ -10,13 +11,35 @@ use crate::data_model::signed_object::SignedObjectVerifyError;
use crate::policy::{Policy, SignedObjectFailurePolicy};
use crate::report::{RfcRef, Warning};
use crate::storage::{FetchCachePpPack, PackFile};
use crate::validation::cert_path::{CertPathError, validate_ee_cert_path};
use crate::validation::cert_path::{CertPathError, validate_ee_cert_path_with_prevalidated_issuer};
use x509_parser::prelude::FromDer;
use x509_parser::x509::SubjectPublicKeyInfo;
const RFC_NONE: &[RfcRef] = &[];
const RFC_CRLDP: &[RfcRef] = &[RfcRef("RFC 6487 §4.8.6")];
const RFC_CRLDP_AND_LOCKED_PACK: &[RfcRef] =
&[RfcRef("RFC 6487 §4.8.6"), RfcRef("RFC 9286 §4.2.1")];
#[derive(Clone, Debug)]
struct VerifiedIssuerCrl {
crl: crate::data_model::crl::RpkixCrl,
revoked_serials: std::collections::HashSet<Vec<u8>>,
}
#[derive(Clone, Debug)]
enum CachedIssuerCrl {
Pending(Vec<u8>),
Ok(VerifiedIssuerCrl),
}
#[derive(Clone, Debug, Default)]
struct IssuerResourcesIndex {
ip_v4: Option<Vec<(Vec<u8>, Vec<u8>)>>,
ip_v6: Option<Vec<(Vec<u8>, Vec<u8>)>>,
asnum: Option<Vec<(u32, u32)>>,
rdi: Option<Vec<(u32, u32)>>,
}
fn extra_rfc_refs_for_crl_selection(e: &ObjectValidateError) -> &'static [RfcRef] {
match e {
ObjectValidateError::MissingCrlDpUris => RFC_CRLDP,
@ -68,6 +91,7 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
issuer_effective_ip: Option<&crate::data_model::rc::IpResourceSet>,
issuer_effective_as: Option<&crate::data_model::rc::AsResourceSet>,
validation_time: time::OffsetDateTime,
timing: Option<&TimingHandle>,
) -> ObjectsOutput {
let mut warnings: Vec<Warning> = Vec::new();
let mut stats = ObjectsStats::default();
@ -87,16 +111,105 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
let _manifest =
ManifestObject::decode_der(&pack.manifest_bytes).expect("fetch_cache_pp manifest decodes");
let crl_files = pack
// Decode issuer CA once; if it fails we cannot validate ROA/ASPA EE certificates.
let issuer_ca = match ResourceCertificate::decode_der(issuer_ca_der) {
Ok(v) => v,
Err(e) => {
stats.publication_point_dropped = true;
warnings.push(
Warning::new(format!(
"dropping publication point: issuer CA decode failed: {e}"
))
.with_rfc_refs(&[RfcRef("RFC 6487 §7.2"), RfcRef("RFC 5280 §6.1")])
.with_context(&pack.manifest_rsync_uri),
);
for f in &pack.files {
if f.rsync_uri.ends_with(".roa") {
audit.push(ObjectAuditEntry {
rsync_uri: f.rsync_uri.clone(),
sha256_hex: sha256_hex_from_32(&f.sha256),
kind: AuditObjectKind::Roa,
result: AuditObjectResult::Skipped,
detail: Some("skipped: issuer CA decode failed".to_string()),
});
} else if f.rsync_uri.ends_with(".asa") {
audit.push(ObjectAuditEntry {
rsync_uri: f.rsync_uri.clone(),
sha256_hex: sha256_hex_from_32(&f.sha256),
kind: AuditObjectKind::Aspa,
result: AuditObjectResult::Skipped,
detail: Some("skipped: issuer CA decode failed".to_string()),
});
}
}
return ObjectsOutput {
vrps: Vec::new(),
aspas: Vec::new(),
warnings,
stats,
audit,
};
}
};
// Parse issuer SubjectPublicKeyInfo once and reuse for all EE certificate signature checks.
let issuer_spki = match SubjectPublicKeyInfo::from_der(&issuer_ca.tbs.subject_public_key_info) {
Ok((rem, spki)) if rem.is_empty() => spki,
Ok((rem, _)) => {
stats.publication_point_dropped = true;
warnings.push(
Warning::new(format!(
"dropping publication point: trailing bytes after issuer SPKI DER: {} bytes",
rem.len()
))
.with_rfc_refs(&[RfcRef("RFC 5280 §4.1.2.7")])
.with_context(&pack.manifest_rsync_uri),
);
return ObjectsOutput {
vrps: Vec::new(),
aspas: Vec::new(),
warnings,
stats,
audit,
};
}
Err(e) => {
stats.publication_point_dropped = true;
warnings.push(
Warning::new(format!(
"dropping publication point: issuer SPKI parse failed: {e}"
))
.with_rfc_refs(&[RfcRef("RFC 5280 §4.1.2.7")])
.with_context(&pack.manifest_rsync_uri),
);
return ObjectsOutput {
vrps: Vec::new(),
aspas: Vec::new(),
warnings,
stats,
audit,
};
}
};
let mut crl_cache: std::collections::HashMap<String, CachedIssuerCrl> = pack
.files
.iter()
.filter(|f| f.rsync_uri.ends_with(".crl"))
.map(|f| (f.rsync_uri.clone(), f.bytes.clone()))
.collect::<Vec<_>>();
.map(|f| {
(
f.rsync_uri.clone(),
CachedIssuerCrl::Pending(f.bytes.clone()),
)
})
.collect();
let issuer_resources_index =
build_issuer_resources_index(issuer_effective_ip, issuer_effective_as);
// If the pack has signed objects but no CRLs at all, we cannot validate any embedded EE
// certificate paths deterministically (EE CRLDP must reference an rsync URI in the pack).
if crl_files.is_empty() && (stats.roa_total > 0 || stats.aspa_total > 0) {
if crl_cache.is_empty() && (stats.roa_total > 0 || stats.aspa_total > 0) {
stats.publication_point_dropped = true;
warnings.push(
Warning::new("dropping publication point: no CRL files in fetch_cache_pp")
@ -110,9 +223,7 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
sha256_hex: sha256_hex_from_32(&f.sha256),
kind: AuditObjectKind::Roa,
result: AuditObjectResult::Skipped,
detail: Some(
"skipped due to missing CRL files in fetch_cache_pp".to_string(),
),
detail: Some("skipped due to missing CRL files in fetch_cache_pp".to_string()),
});
} else if f.rsync_uri.ends_with(".asa") {
audit.push(ObjectAuditEntry {
@ -120,9 +231,7 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
sha256_hex: sha256_hex_from_32(&f.sha256),
kind: AuditObjectKind::Aspa,
result: AuditObjectResult::Skipped,
detail: Some(
"skipped due to missing CRL files in fetch_cache_pp".to_string(),
),
detail: Some("skipped due to missing CRL files in fetch_cache_pp".to_string()),
});
}
}
@ -140,14 +249,19 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
for (idx, file) in pack.files.iter().enumerate() {
if file.rsync_uri.ends_with(".roa") {
let _t = timing.as_ref().map(|t| t.span_phase("objects_roa_total"));
match process_roa_with_issuer(
file,
issuer_ca_der,
&issuer_ca,
&issuer_spki,
issuer_ca_rsync_uri,
&crl_files,
&mut crl_cache,
&issuer_resources_index,
issuer_effective_ip,
issuer_effective_as,
validation_time,
timing,
) {
Ok(mut out) => {
stats.roa_ok += 1;
@ -232,14 +346,19 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
},
}
} else if file.rsync_uri.ends_with(".asa") {
let _t = timing.as_ref().map(|t| t.span_phase("objects_aspa_total"));
match process_aspa_with_issuer(
file,
issuer_ca_der,
&issuer_ca,
&issuer_spki,
issuer_ca_rsync_uri,
&crl_files,
&mut crl_cache,
&issuer_resources_index,
issuer_effective_ip,
issuer_effective_as,
validation_time,
timing,
) {
Ok(att) => {
stats.aspa_ok += 1;
@ -389,15 +508,33 @@ enum ObjectValidateError {
fn process_roa_with_issuer(
file: &PackFile,
issuer_ca_der: &[u8],
issuer_ca: &ResourceCertificate,
issuer_spki: &SubjectPublicKeyInfo<'_>,
issuer_ca_rsync_uri: Option<&str>,
crl_files: &[(String, Vec<u8>)],
crl_cache: &mut std::collections::HashMap<String, CachedIssuerCrl>,
issuer_resources_index: &IssuerResourcesIndex,
issuer_effective_ip: Option<&crate::data_model::rc::IpResourceSet>,
issuer_effective_as: Option<&crate::data_model::rc::AsResourceSet>,
validation_time: time::OffsetDateTime,
timing: Option<&TimingHandle>,
) -> Result<Vec<Vrp>, ObjectValidateError> {
let _decode = timing
.as_ref()
.map(|t| t.span_phase("objects_roa_decode_and_validate_total"));
let roa = RoaObject::decode_der(&file.bytes)?;
drop(_decode);
let _ee_profile = timing
.as_ref()
.map(|t| t.span_phase("objects_roa_validate_embedded_ee_total"));
roa.validate_embedded_ee_cert()?;
drop(_ee_profile);
let _verify = timing
.as_ref()
.map(|t| t.span_phase("objects_roa_verify_signature_total"));
roa.signed_object.verify()?;
drop(_verify);
let ee_der = &roa.signed_object.signed_data.certificates[0].raw_der;
let ee_crldp_uris = roa.signed_object.signed_data.certificates[0]
@ -406,18 +543,34 @@ fn process_roa_with_issuer(
.extensions
.crl_distribution_points_uris
.as_ref();
let (issuer_crl_rsync_uri, issuer_crl_der) =
choose_crl_for_certificate(ee_crldp_uris, crl_files)?;
let validated = validate_ee_cert_path(
let issuer_crl_rsync_uri = choose_crl_uri_for_certificate(ee_crldp_uris, crl_cache)?;
let verified_crl = ensure_issuer_crl_verified(issuer_crl_rsync_uri, crl_cache, issuer_ca_der)?;
let _cert_path = timing
.as_ref()
.map(|t| t.span_phase("objects_roa_validate_ee_cert_path_total"));
let ee = validate_ee_cert_path_with_prevalidated_issuer(
ee_der,
issuer_ca_der,
&issuer_crl_der,
issuer_ca,
issuer_spki,
&verified_crl.crl,
&verified_crl.revoked_serials,
issuer_ca_rsync_uri,
Some(issuer_crl_rsync_uri.as_str()),
Some(issuer_crl_rsync_uri),
validation_time,
)?;
drop(_cert_path);
validate_ee_resources_subset(&validated.ee, issuer_effective_ip, issuer_effective_as)?;
let _subset = timing
.as_ref()
.map(|t| t.span_phase("objects_roa_validate_ee_resources_subset_total"));
validate_ee_resources_subset(
&ee,
issuer_effective_ip,
issuer_effective_as,
issuer_resources_index,
)?;
drop(_subset);
Ok(roa_to_vrps(&roa))
}
@ -425,15 +578,33 @@ fn process_roa_with_issuer(
fn process_aspa_with_issuer(
file: &PackFile,
issuer_ca_der: &[u8],
issuer_ca: &ResourceCertificate,
issuer_spki: &SubjectPublicKeyInfo<'_>,
issuer_ca_rsync_uri: Option<&str>,
crl_files: &[(String, Vec<u8>)],
crl_cache: &mut std::collections::HashMap<String, CachedIssuerCrl>,
issuer_resources_index: &IssuerResourcesIndex,
issuer_effective_ip: Option<&crate::data_model::rc::IpResourceSet>,
issuer_effective_as: Option<&crate::data_model::rc::AsResourceSet>,
validation_time: time::OffsetDateTime,
timing: Option<&TimingHandle>,
) -> Result<AspaAttestation, ObjectValidateError> {
let _decode = timing
.as_ref()
.map(|t| t.span_phase("objects_aspa_decode_and_validate_total"));
let aspa = AspaObject::decode_der(&file.bytes)?;
drop(_decode);
let _ee_profile = timing
.as_ref()
.map(|t| t.span_phase("objects_aspa_validate_embedded_ee_total"));
aspa.validate_embedded_ee_cert()?;
drop(_ee_profile);
let _verify = timing
.as_ref()
.map(|t| t.span_phase("objects_aspa_verify_signature_total"));
aspa.signed_object.verify()?;
drop(_verify);
let ee_der = &aspa.signed_object.signed_data.certificates[0].raw_der;
let ee_crldp_uris = aspa.signed_object.signed_data.certificates[0]
@ -442,18 +613,34 @@ fn process_aspa_with_issuer(
.extensions
.crl_distribution_points_uris
.as_ref();
let (issuer_crl_rsync_uri, issuer_crl_der) =
choose_crl_for_certificate(ee_crldp_uris, crl_files)?;
let validated = validate_ee_cert_path(
let issuer_crl_rsync_uri = choose_crl_uri_for_certificate(ee_crldp_uris, crl_cache)?;
let verified_crl = ensure_issuer_crl_verified(issuer_crl_rsync_uri, crl_cache, issuer_ca_der)?;
let _cert_path = timing
.as_ref()
.map(|t| t.span_phase("objects_aspa_validate_ee_cert_path_total"));
let ee = validate_ee_cert_path_with_prevalidated_issuer(
ee_der,
issuer_ca_der,
&issuer_crl_der,
issuer_ca,
issuer_spki,
&verified_crl.crl,
&verified_crl.revoked_serials,
issuer_ca_rsync_uri,
Some(issuer_crl_rsync_uri.as_str()),
Some(issuer_crl_rsync_uri),
validation_time,
)?;
drop(_cert_path);
validate_ee_resources_subset(&validated.ee, issuer_effective_ip, issuer_effective_as)?;
let _subset = timing
.as_ref()
.map(|t| t.span_phase("objects_aspa_validate_ee_resources_subset_total"));
validate_ee_resources_subset(
&ee,
issuer_effective_ip,
issuer_effective_as,
issuer_resources_index,
)?;
drop(_subset);
Ok(AspaAttestation {
customer_as_id: aspa.aspa.customer_as_id,
@ -461,11 +648,11 @@ fn process_aspa_with_issuer(
})
}
fn choose_crl_for_certificate(
crldp_uris: Option<&Vec<String>>,
crl_files: &[(String, Vec<u8>)],
) -> Result<(String, Vec<u8>), ObjectValidateError> {
if crl_files.is_empty() {
fn choose_crl_uri_for_certificate<'a>(
crldp_uris: Option<&'a Vec<String>>,
crl_cache: &std::collections::HashMap<String, CachedIssuerCrl>,
) -> Result<&'a str, ObjectValidateError> {
if crl_cache.is_empty() {
return Err(ObjectValidateError::MissingCrlInPack);
}
@ -475,8 +662,8 @@ fn choose_crl_for_certificate(
for u in crldp_uris {
let s = u.as_str();
if let Some((uri, bytes)) = crl_files.iter().find(|(uri, _)| uri.as_str() == s) {
return Ok((uri.clone(), bytes.clone()));
if crl_cache.contains_key(s) {
return Ok(s);
}
}
Err(ObjectValidateError::CrlNotFound(
@ -488,16 +675,52 @@ fn choose_crl_for_certificate(
))
}
fn ensure_issuer_crl_verified<'a>(
crl_rsync_uri: &str,
crl_cache: &'a mut std::collections::HashMap<String, CachedIssuerCrl>,
issuer_ca_der: &[u8],
) -> Result<&'a VerifiedIssuerCrl, CertPathError> {
let entry = crl_cache
.get_mut(crl_rsync_uri)
.expect("CRL must exist in cache");
match entry {
CachedIssuerCrl::Ok(v) => Ok(v),
CachedIssuerCrl::Pending(bytes) => {
let der = std::mem::take(bytes);
let crl = crate::data_model::crl::RpkixCrl::decode_der(&der)
.map_err(CertPathError::CrlDecode)?;
crl.verify_signature_with_issuer_certificate_der(issuer_ca_der)
.map_err(CertPathError::CrlVerify)?;
let mut revoked_serials: std::collections::HashSet<Vec<u8>> =
std::collections::HashSet::with_capacity(crl.revoked_certs.len());
for rc in &crl.revoked_certs {
revoked_serials.insert(rc.serial_number.bytes_be.clone());
}
*entry = CachedIssuerCrl::Ok(VerifiedIssuerCrl {
crl,
revoked_serials,
});
match entry {
CachedIssuerCrl::Ok(v) => Ok(v),
_ => unreachable!(),
}
}
}
}
fn validate_ee_resources_subset(
ee: &ResourceCertificate,
issuer_effective_ip: Option<&crate::data_model::rc::IpResourceSet>,
issuer_effective_as: Option<&crate::data_model::rc::AsResourceSet>,
issuer_resources_index: &IssuerResourcesIndex,
) -> Result<(), ObjectValidateError> {
if let Some(child_ip) = ee.tbs.extensions.ip_resources.as_ref() {
let Some(parent_ip) = issuer_effective_ip else {
return Err(ObjectValidateError::MissingIssuerEffectiveIp);
};
if !ip_resources_is_subset(child_ip, parent_ip) {
if !ip_resources_is_subset_indexed(child_ip, parent_ip, issuer_resources_index) {
return Err(ObjectValidateError::EeResourcesNotSubset);
}
}
@ -506,7 +729,7 @@ fn validate_ee_resources_subset(
let Some(parent_as) = issuer_effective_as else {
return Err(ObjectValidateError::MissingIssuerEffectiveAs);
};
if !as_resources_is_subset(child_as, parent_as) {
if !as_resources_is_subset_indexed(child_as, parent_as, issuer_resources_index) {
return Err(ObjectValidateError::EeResourcesNotSubset);
}
}
@ -519,6 +742,40 @@ fn as_resources_is_subset(child: &AsResourceSet, parent: &AsResourceSet) -> bool
&& as_choice_subset(child.rdi.as_ref(), parent.rdi.as_ref())
}
fn as_resources_is_subset_indexed(
child: &AsResourceSet,
parent: &AsResourceSet,
idx: &IssuerResourcesIndex,
) -> bool {
let _ = parent;
as_choice_subset_indexed(child.asnum.as_ref(), idx.asnum.as_deref())
&& as_choice_subset_indexed(child.rdi.as_ref(), idx.rdi.as_deref())
}
fn as_choice_subset_indexed(
child: Option<&AsIdentifierChoice>,
parent_intervals: Option<&[(u32, u32)]>,
) -> bool {
let Some(child) = child else {
return true;
};
let Some(parent_intervals) = parent_intervals else {
return false;
};
if matches!(child, AsIdentifierChoice::Inherit) {
return false;
}
let child_intervals = as_choice_to_merged_intervals(child);
for (cmin, cmax) in &child_intervals {
if !as_interval_is_covered(parent_intervals, *cmin, *cmax) {
return false;
}
}
true
}
fn as_choice_subset(
child: Option<&AsIdentifierChoice>,
parent: Option<&AsIdentifierChoice>,
@ -614,6 +871,45 @@ fn ip_resources_is_subset(
true
}
fn ip_resources_is_subset_indexed(
child: &crate::data_model::rc::IpResourceSet,
parent: &crate::data_model::rc::IpResourceSet,
idx: &IssuerResourcesIndex,
) -> bool {
let _ = parent;
for fam in &child.families {
let parent_intervals = match fam.afi {
crate::data_model::rc::Afi::Ipv4 => idx.ip_v4.as_deref(),
crate::data_model::rc::Afi::Ipv6 => idx.ip_v6.as_deref(),
};
let Some(parent_intervals) = parent_intervals else {
return false;
};
let items = match &fam.choice {
IpAddressChoice::Inherit => return false,
IpAddressChoice::AddressesOrRanges(items) => items,
};
let mut child_intervals: Vec<(Vec<u8>, Vec<u8>)> = Vec::new();
for item in items {
match item {
IpAddressOrRange::Prefix(p) => child_intervals.push(prefix_to_range(p)),
IpAddressOrRange::Range(r) => child_intervals.push((r.min.clone(), r.max.clone())),
}
}
if child_intervals.is_empty() {
continue;
}
child_intervals.sort_by(|(a, _), (b, _)| a.cmp(b));
merge_ip_intervals_in_place(&mut child_intervals);
if !intervals_are_covered(parent_intervals, &child_intervals) {
return false;
}
}
true
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
enum AfiKey {
V4,
@ -650,7 +946,7 @@ fn ip_resources_to_merged_intervals(
for (_afi, v) in m.iter_mut() {
v.sort_by(|(a, _), (b, _)| a.cmp(b));
*v = merge_ip_intervals(v);
merge_ip_intervals_in_place(v);
}
m
@ -683,28 +979,31 @@ fn ip_resources_to_merged_intervals_strict(
for (_afi, v) in m.iter_mut() {
v.sort_by(|(a, _), (b, _)| a.cmp(b));
*v = merge_ip_intervals(v);
merge_ip_intervals_in_place(v);
}
Ok(m)
}
fn merge_ip_intervals(v: &[(Vec<u8>, Vec<u8>)]) -> Vec<(Vec<u8>, Vec<u8>)> {
let mut out: Vec<(Vec<u8>, Vec<u8>)> = Vec::new();
for (min, max) in v {
fn merge_ip_intervals_in_place(v: &mut Vec<(Vec<u8>, Vec<u8>)>) {
if v.is_empty() {
return;
}
let mut out: Vec<(Vec<u8>, Vec<u8>)> = Vec::with_capacity(v.len());
for (min, max) in v.drain(..) {
let Some(last) = out.last_mut() else {
out.push((min.clone(), max.clone()));
out.push((min, max));
continue;
};
if bytes_leq(min, &increment_bytes(&last.1)) {
if bytes_leq(&last.1, max) {
last.1 = max.clone();
if bytes_leq(&min, &last.1) || bytes_is_next(&min, &last.1) {
if bytes_leq(&last.1, &max) {
last.1 = max;
}
continue;
}
out.push((min.clone(), max.clone()));
out.push((min, max));
}
out
*v = out;
}
fn interval_is_covered(parent: &[(Vec<u8>, Vec<u8>)], min: &[u8], max: &[u8]) -> bool {
@ -719,6 +1018,23 @@ fn interval_is_covered(parent: &[(Vec<u8>, Vec<u8>)], min: &[u8], max: &[u8]) ->
false
}
fn intervals_are_covered(parent: &[(Vec<u8>, Vec<u8>)], child: &[(Vec<u8>, Vec<u8>)]) -> bool {
let mut i = 0usize;
for (cmin, cmax) in child {
while i < parent.len() && parent[i].1.as_slice() < cmin.as_slice() {
i += 1;
}
if i >= parent.len() {
return false;
}
let (pmin, pmax) = &parent[i];
if !bytes_leq(pmin, cmin) || !bytes_leq(cmax, pmax) {
return false;
}
}
true
}
fn prefix_to_range(prefix: &RcIpPrefix) -> (Vec<u8>, Vec<u8>) {
let mut min = prefix.addr.clone();
let mut max = prefix.addr.clone();
@ -752,6 +1068,78 @@ fn increment_bytes(v: &[u8]) -> Vec<u8> {
vec![0u8; out.len()]
}
fn bytes_is_next(a: &[u8], b: &[u8]) -> bool {
if a.len() != b.len() {
return false;
}
let mut carry: u16 = 1;
for i in (0..b.len()).rev() {
let sum = (b[i] as u16) + carry;
let expected = (sum & 0xFF) as u8;
carry = sum >> 8;
if a[i] != expected {
return false;
}
}
true
}
fn build_issuer_resources_index(
issuer_effective_ip: Option<&crate::data_model::rc::IpResourceSet>,
issuer_effective_as: Option<&crate::data_model::rc::AsResourceSet>,
) -> IssuerResourcesIndex {
let mut idx = IssuerResourcesIndex::default();
if let Some(ip) = issuer_effective_ip {
let mut v4: Vec<(Vec<u8>, Vec<u8>)> = Vec::new();
let mut v6: Vec<(Vec<u8>, Vec<u8>)> = Vec::new();
for fam in &ip.families {
let ent = match fam.afi {
crate::data_model::rc::Afi::Ipv4 => &mut v4,
crate::data_model::rc::Afi::Ipv6 => &mut v6,
};
match &fam.choice {
IpAddressChoice::Inherit => {
// Effective resources should not contain inherit; leave empty so subset fails.
}
IpAddressChoice::AddressesOrRanges(items) => {
for item in items {
match item {
IpAddressOrRange::Prefix(p) => ent.push(prefix_to_range(p)),
IpAddressOrRange::Range(r) => ent.push((r.min.clone(), r.max.clone())),
}
}
}
}
}
if !v4.is_empty() {
v4.sort_by(|(a, _), (b, _)| a.cmp(b));
merge_ip_intervals_in_place(&mut v4);
idx.ip_v4 = Some(v4);
}
if !v6.is_empty() {
v6.sort_by(|(a, _), (b, _)| a.cmp(b));
merge_ip_intervals_in_place(&mut v6);
idx.ip_v6 = Some(v6);
}
}
if let Some(asr) = issuer_effective_as {
if let Some(choice) = asr.asnum.as_ref() {
if !matches!(choice, AsIdentifierChoice::Inherit) {
idx.asnum = Some(as_choice_to_merged_intervals(choice));
}
}
if let Some(choice) = asr.rdi.as_ref() {
if !matches!(choice, AsIdentifierChoice::Inherit) {
idx.rdi = Some(as_choice_to_merged_intervals(choice));
}
}
}
idx
}
fn roa_to_vrps(roa: &RoaObject) -> Vec<Vrp> {
let asn = roa.roa.as_id;
let mut out = Vec::new();
@ -783,6 +1171,7 @@ mod tests {
Afi, AsIdOrRange, AsIdentifierChoice, IpAddressFamily, IpAddressOrRange, IpAddressRange,
IpPrefix, IpResourceSet,
};
use std::collections::HashMap;
fn fixture_bytes(path: &str) -> Vec<u8> {
std::fs::read(std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(path))
@ -885,12 +1274,12 @@ mod tests {
#[test]
fn merge_ip_intervals_merges_contiguous() {
let v = vec![
let mut v = vec![
(vec![0, 0, 0, 0], vec![0, 0, 0, 10]),
(vec![0, 0, 0, 11], vec![0, 0, 0, 20]),
];
let merged = merge_ip_intervals(&v);
assert_eq!(merged, vec![(vec![0, 0, 0, 0], vec![0, 0, 0, 20])]);
merge_ip_intervals_in_place(&mut v);
assert_eq!(v, vec![(vec![0, 0, 0, 0], vec![0, 0, 0, 20])]);
}
#[test]
@ -904,14 +1293,19 @@ mod tests {
.extensions
.crl_distribution_points_uris
.as_ref();
let err = choose_crl_for_certificate(ee_crldp_uris, &[]).unwrap_err();
let crl_cache: HashMap<String, CachedIssuerCrl> = HashMap::new();
let err = choose_crl_uri_for_certificate(ee_crldp_uris, &crl_cache).unwrap_err();
assert!(matches!(err, ObjectValidateError::MissingCrlInPack));
}
#[test]
fn choose_crl_for_certificate_reports_missing_crldp_uris() {
let crl_a = ("rsync://example.test/a.crl".to_string(), vec![0x01]);
let err = choose_crl_for_certificate(None, &[crl_a]).unwrap_err();
let mut crl_cache: HashMap<String, CachedIssuerCrl> = HashMap::new();
crl_cache.insert(
"rsync://example.test/a.crl".to_string(),
CachedIssuerCrl::Pending(vec![0x01]),
);
let err = choose_crl_uri_for_certificate(None, &crl_cache).unwrap_err();
assert!(matches!(err, ObjectValidateError::MissingCrlDpUris));
}
@ -928,24 +1322,16 @@ mod tests {
.as_ref()
.expect("fixture ee has crldp");
// Use two CRLs, only one matches the first CRLDP URI.
let other_crl_der = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl",
);
let matching_uri = ee_crldp_uris[0].as_str().to_string();
let matching_crl_der = vec![0x01, 0x02, 0x03];
let (uri, bytes) = choose_crl_for_certificate(
Some(ee_crldp_uris),
&[
("rsync://example.test/other.crl".to_string(), other_crl_der),
(matching_uri.clone(), matching_crl_der.clone()),
],
)
.unwrap();
let mut crl_cache: HashMap<String, CachedIssuerCrl> = HashMap::new();
crl_cache.insert(
"rsync://example.test/other.crl".to_string(),
CachedIssuerCrl::Pending(vec![0x00]),
);
crl_cache.insert(matching_uri.clone(), CachedIssuerCrl::Pending(vec![0x01]));
let uri = choose_crl_uri_for_certificate(Some(ee_crldp_uris), &crl_cache).unwrap();
assert_eq!(uri, matching_uri);
assert_eq!(bytes, matching_crl_der);
}
#[test]
@ -960,11 +1346,12 @@ mod tests {
.crl_distribution_points_uris
.as_ref();
let err = choose_crl_for_certificate(
ee_crldp_uris,
&[("rsync://example.test/other.crl".to_string(), vec![0x01])],
)
.unwrap_err();
let mut crl_cache: HashMap<String, CachedIssuerCrl> = HashMap::new();
crl_cache.insert(
"rsync://example.test/other.crl".to_string(),
CachedIssuerCrl::Pending(vec![0x01]),
);
let err = choose_crl_uri_for_certificate(ee_crldp_uris, &crl_cache).unwrap_err();
assert!(matches!(err, ObjectValidateError::CrlNotFound(_)));
}
@ -976,7 +1363,8 @@ mod tests {
let roa = RoaObject::decode_der(&roa_der).expect("decode roa");
let ee = &roa.signed_object.signed_data.certificates[0].resource_cert;
let err = validate_ee_resources_subset(ee, None, None).unwrap_err();
let idx = IssuerResourcesIndex::default();
let err = validate_ee_resources_subset(ee, None, None, &idx).unwrap_err();
assert!(matches!(err, ObjectValidateError::MissingIssuerEffectiveIp));
}
@ -1002,7 +1390,8 @@ mod tests {
}],
};
let err = validate_ee_resources_subset(ee, Some(&issuer_ip), None).unwrap_err();
let idx = build_issuer_resources_index(Some(&issuer_ip), None);
let err = validate_ee_resources_subset(ee, Some(&issuer_ip), None, &idx).unwrap_err();
assert!(matches!(err, ObjectValidateError::MissingIssuerEffectiveAs));
}
@ -1028,7 +1417,8 @@ mod tests {
}],
};
let err = validate_ee_resources_subset(ee, Some(&issuer_ip), None).unwrap_err();
let idx = build_issuer_resources_index(Some(&issuer_ip), None);
let err = validate_ee_resources_subset(ee, Some(&issuer_ip), None, &idx).unwrap_err();
assert!(matches!(err, ObjectValidateError::EeResourcesNotSubset));
}
}

View File

@ -5,9 +5,7 @@ use crate::storage::{FetchCachePpKey, RocksStore};
use crate::sync::repo::{RepoSyncResult, sync_publication_point};
use crate::sync::rrdp::Fetcher as HttpFetcher;
use crate::validation::manifest::{PublicationPointResult, process_manifest_publication_point};
use crate::validation::objects::{
ObjectsOutput, process_fetch_cache_pp_pack_for_issuer,
};
use crate::validation::objects::{ObjectsOutput, process_fetch_cache_pp_pack_for_issuer};
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RunOutput {
@ -53,6 +51,7 @@ pub fn run_publication_point_once(
rsync_base_uri,
http_fetcher,
rsync_fetcher,
None,
)?;
let publication_point = process_manifest_publication_point(
@ -73,6 +72,7 @@ pub fn run_publication_point_once(
issuer_effective_ip,
issuer_effective_as,
validation_time,
None,
);
Ok(RunOutput {

View File

@ -1,5 +1,6 @@
use url::Url;
use crate::analysis::timing::TimingHandle;
use crate::audit::PublicationPointAudit;
use crate::data_model::ta::TrustAnchor;
use crate::sync::rrdp::Fetcher;
@ -12,6 +13,8 @@ use crate::validation::tree::{
run_tree_serial, run_tree_serial_audit,
};
use crate::validation::tree_runner::Rpkiv1PublicationPointRunner;
use std::collections::HashMap;
use std::sync::Mutex;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RunTreeFromTalOutput {
@ -71,6 +74,12 @@ pub fn run_tree_from_tal_url_serial(
http_fetcher,
rsync_fetcher,
validation_time,
timing: None,
revalidate_only: config.revalidate_only,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
rsync_repo_cache: Mutex::new(HashMap::new()),
};
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance);
@ -96,6 +105,12 @@ pub fn run_tree_from_tal_url_serial_audit(
http_fetcher,
rsync_fetcher,
validation_time,
timing: None,
revalidate_only: config.revalidate_only,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
rsync_repo_cache: Mutex::new(HashMap::new()),
};
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance);
@ -111,6 +126,48 @@ pub fn run_tree_from_tal_url_serial_audit(
})
}
pub fn run_tree_from_tal_url_serial_audit_with_timing(
store: &crate::storage::RocksStore,
policy: &crate::policy::Policy,
tal_url: &str,
http_fetcher: &dyn Fetcher,
rsync_fetcher: &dyn crate::fetch::rsync::RsyncFetcher,
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
timing: &TimingHandle,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let _tal = timing.span_phase("tal_bootstrap");
let discovery = discover_root_ca_instance_from_tal_url(http_fetcher, tal_url)?;
drop(_tal);
let runner = Rpkiv1PublicationPointRunner {
store,
policy,
http_fetcher,
rsync_fetcher,
validation_time,
timing: Some(timing.clone()),
revalidate_only: config.revalidate_only,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
rsync_repo_cache: Mutex::new(HashMap::new()),
};
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance);
let _tree = timing.span_phase("tree_run_total");
let TreeRunAuditOutput {
tree,
publication_points,
} = run_tree_serial_audit(root, &runner, config)?;
Ok(RunTreeFromTalAuditOutput {
discovery,
tree,
publication_points,
})
}
pub fn run_tree_from_tal_and_ta_der_serial(
store: &crate::storage::RocksStore,
policy: &crate::policy::Policy,
@ -131,6 +188,12 @@ pub fn run_tree_from_tal_and_ta_der_serial(
http_fetcher,
rsync_fetcher,
validation_time,
timing: None,
revalidate_only: config.revalidate_only,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
rsync_repo_cache: Mutex::new(HashMap::new()),
};
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance);
@ -159,6 +222,12 @@ pub fn run_tree_from_tal_and_ta_der_serial_audit(
http_fetcher,
rsync_fetcher,
validation_time,
timing: None,
revalidate_only: config.revalidate_only,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
rsync_repo_cache: Mutex::new(HashMap::new()),
};
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance);
@ -173,3 +242,48 @@ pub fn run_tree_from_tal_and_ta_der_serial_audit(
publication_points,
})
}
pub fn run_tree_from_tal_and_ta_der_serial_audit_with_timing(
store: &crate::storage::RocksStore,
policy: &crate::policy::Policy,
tal_bytes: &[u8],
ta_der: &[u8],
resolved_ta_uri: Option<&Url>,
http_fetcher: &dyn Fetcher,
rsync_fetcher: &dyn crate::fetch::rsync::RsyncFetcher,
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
timing: &TimingHandle,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let _tal = timing.span_phase("tal_bootstrap");
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
drop(_tal);
let runner = Rpkiv1PublicationPointRunner {
store,
policy,
http_fetcher,
rsync_fetcher,
validation_time,
timing: Some(timing.clone()),
revalidate_only: config.revalidate_only,
rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true,
rsync_repo_cache: Mutex::new(HashMap::new()),
};
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance);
let _tree = timing.span_phase("tree_run_total");
let TreeRunAuditOutput {
tree,
publication_points,
} = run_tree_serial_audit(root, &runner, config)?;
Ok(RunTreeFromTalAuditOutput {
discovery,
tree,
publication_points,
})
}

View File

@ -12,6 +12,10 @@ pub struct TreeRunConfig {
pub max_depth: Option<usize>,
/// Max number of CA instances to process.
pub max_instances: Option<usize>,
/// Skip RRDP/rsync fetch and re-validate from existing `fetch_cache_pp` packs in the DB.
///
/// This is primarily intended for profiling/analysis runs to remove network noise.
pub revalidate_only: bool,
}
impl Default for TreeRunConfig {
@ -19,6 +23,7 @@ impl Default for TreeRunConfig {
Self {
max_depth: None,
max_instances: None,
revalidate_only: false,
}
}
}
@ -182,7 +187,8 @@ pub fn run_tree_serial_audit(
audit.discovered_from = node.discovered_from.clone();
publication_points.push(audit);
let enqueue_children = res.source == PublicationPointSource::Fresh;
let enqueue_children =
res.source == PublicationPointSource::Fresh || config.revalidate_only;
if !enqueue_children && !res.discovered_children.is_empty() {
warnings.push(
Warning::new("skipping child CA discovery due to failed fetch cache use")

File diff suppressed because it is too large Load Diff

View File

@ -3,8 +3,7 @@ use std::path::{Path, PathBuf};
use std::time::Instant;
fn default_samples_dir() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests/benchmark/selected_der")
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/benchmark/selected_der")
}
fn read_samples(dir: &Path) -> Vec<Sample> {
@ -40,9 +39,7 @@ fn env_u64(name: &str, default: u64) -> u64 {
}
fn env_u64_opt(name: &str) -> Option<u64> {
std::env::var(name)
.ok()
.and_then(|s| s.parse::<u64>().ok())
std::env::var(name).ok().and_then(|s| s.parse::<u64>().ok())
}
fn env_bool(name: &str) -> bool {
@ -101,7 +98,10 @@ fn manifest_decode_profile_benchmark_selected_der() {
if let Some(filter) = sample_filter.as_deref() {
samples.retain(|s| s.name == filter);
assert!(!samples.is_empty(), "no sample matched BENCH_SAMPLE={filter}");
assert!(
!samples.is_empty(),
"no sample matched BENCH_SAMPLE={filter}"
);
}
println!("# Manifest decode + profile validate benchmark (debug build)");
@ -161,11 +161,7 @@ fn manifest_decode_profile_benchmark_selected_der() {
let iters = if let Some(n) = fixed_iters {
n
} else {
choose_iters_adaptive(
bytes.as_slice(),
min_round_ms,
max_adaptive_iters,
)
choose_iters_adaptive(bytes.as_slice(), min_round_ms, max_adaptive_iters)
};
let start = Instant::now();
@ -208,9 +204,9 @@ fn manifest_decode_profile_benchmark_selected_der() {
}
if out_md.is_some() || out_json.is_some() {
let timestamp_utc =
time::OffsetDateTime::now_utc().format(&time::format_description::well_known::Rfc3339)
.unwrap_or_else(|_| "unknown".to_string());
let timestamp_utc = time::OffsetDateTime::now_utc()
.format(&time::format_description::well_known::Rfc3339)
.unwrap_or_else(|_| "unknown".to_string());
let cfg = RunConfig {
dir: dir.display().to_string(),
sample: sample_filter,

View File

@ -6,7 +6,9 @@ use std::path::{Path, PathBuf};
use rpki::data_model::aspa::AspaObject;
use rpki::data_model::crl::RpkixCrl;
use rpki::data_model::manifest::ManifestObject;
use rpki::data_model::rc::{AsIdentifierChoice, AsResourceSet, IpAddressChoice, IpResourceSet, ResourceCertificate};
use rpki::data_model::rc::{
AsIdentifierChoice, AsResourceSet, IpAddressChoice, IpResourceSet, ResourceCertificate,
};
use rpki::data_model::roa::RoaObject;
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
@ -243,7 +245,9 @@ fn scan_pack_candidates(
let cand = CandidateRef {
pack_index,
part: Part::StoredObject { index: object_index },
part: Part::StoredObject {
index: object_index,
},
size_bytes: content_len as u32,
};
if let Some(ext) = ext {
@ -272,7 +276,8 @@ struct Extracted {
}
fn extract_candidate(pack_path: &Path, cand: CandidateRef) -> Result<Extracted, String> {
let bytes = std::fs::read(pack_path).map_err(|e| format!("read {}: {e}", pack_path.display()))?;
let bytes =
std::fs::read(pack_path).map_err(|e| format!("read {}: {e}", pack_path.display()))?;
let mut cur = ByteCursor::new(bytes.as_slice());
let _version = cur.read_u8()?;
@ -349,9 +354,7 @@ fn ip_resource_count(set: &IpResourceSet) -> u32 {
for fam in &set.families {
match &fam.choice {
IpAddressChoice::Inherit => n = n.saturating_add(1),
IpAddressChoice::AddressesOrRanges(items) => {
n = n.saturating_add(items.len() as u32)
}
IpAddressChoice::AddressesOrRanges(items) => n = n.saturating_add(items.len() as u32),
}
}
n
@ -556,7 +559,9 @@ fn stage2_collect_selected_der_v2_from_routinator_store() {
(Part::PackCrl, Part::PackCrl) => std::cmp::Ordering::Equal,
(Part::PackCrl, Part::StoredObject { .. }) => std::cmp::Ordering::Less,
(Part::StoredObject { .. }, Part::PackCrl) => std::cmp::Ordering::Greater,
(Part::StoredObject { index: ai }, Part::StoredObject { index: bi }) => ai.cmp(&bi),
(Part::StoredObject { index: ai }, Part::StoredObject { index: bi }) => {
ai.cmp(&bi)
}
})
});
@ -791,11 +796,18 @@ fn stage2_collect_selected_der_v2_from_routinator_store() {
}
}
per_type_out.insert(format!("{:?}", obj_type).to_lowercase(), selected_sha.len() as u32);
per_type_out.insert(
format!("{:?}", obj_type).to_lowercase(),
selected_sha.len() as u32,
);
println!();
}
samples.sort_by(|a, b| a.obj_type.cmp(&b.obj_type).then_with(|| a.label.cmp(&b.label)));
samples.sort_by(|a, b| {
a.obj_type
.cmp(&b.obj_type)
.then_with(|| a.label.cmp(&b.label))
});
let created_at_rfc3339_utc = fmt_rfc3339_utc(time::OffsetDateTime::now_utc());
let manifest = SamplesManifest {
@ -885,9 +897,8 @@ impl<'a> ByteCursor<'a> {
fn read_time_utc_i64_be(&mut self) -> Result<time::OffsetDateTime, String> {
let ts = self.read_i64_be()?;
time::OffsetDateTime::from_unix_timestamp(ts).map_err(|e| {
format!("invalid unix timestamp {ts}: {e}")
})
time::OffsetDateTime::from_unix_timestamp(ts)
.map_err(|e| format!("invalid unix timestamp {ts}: {e}"))
}
fn read_bytes_u32(&mut self) -> Result<&'a [u8], String> {
@ -917,17 +928,15 @@ impl<'a> ByteCursor<'a> {
fn skip_bytes_u64(&mut self) -> Result<(), String> {
let len_u64 = self.read_u64_be()?;
let len = usize::try_from(len_u64).map_err(|_| {
format!("data block too large for this system: {len_u64}")
})?;
let len = usize::try_from(len_u64)
.map_err(|_| format!("data block too large for this system: {len_u64}"))?;
self.skip(len)
}
fn read_bytes_u64_vec(&mut self) -> Result<Vec<u8>, String> {
let len_u64 = self.read_u64_be()?;
let len = usize::try_from(len_u64).map_err(|_| {
format!("data block too large for this system: {len_u64}")
})?;
let len = usize::try_from(len_u64)
.map_err(|_| format!("data block too large for this system: {len_u64}"))?;
Ok(self.read_exact(len)?.to_vec())
}
}

View File

@ -1,11 +1,13 @@
use rpki::data_model::aspa::AspaObject;
use rpki::data_model::crl::RpkixCrl;
use rpki::data_model::manifest::ManifestObject;
use rpki::data_model::rc::{AsIdentifierChoice, AsResourceSet, IpAddressChoice, IpResourceSet, ResourceCertificate};
use rpki::data_model::rc::{
AsIdentifierChoice, AsResourceSet, IpAddressChoice, IpResourceSet, ResourceCertificate,
};
use rpki::data_model::roa::RoaObject;
use rpki::storage::pack::PackFile;
use rpki::storage::RocksStore;
use rpki::storage::pack::PackFile;
use std::path::{Path, PathBuf};
use std::time::Instant;
@ -76,10 +78,18 @@ fn read_samples(root: &Path) -> Vec<Sample> {
.and_then(|s| s.to_str())
.unwrap_or("unknown")
.to_string();
out.push(Sample { obj_type, name, path });
out.push(Sample {
obj_type,
name,
path,
});
}
}
out.sort_by(|a, b| a.obj_type.cmp(&b.obj_type).then_with(|| a.name.cmp(&b.name)));
out.sort_by(|a, b| {
a.obj_type
.cmp(&b.obj_type)
.then_with(|| a.name.cmp(&b.name))
});
out
}
@ -260,8 +270,8 @@ fn complexity_main(obj_type: ObjType, bytes: &[u8]) -> u64 {
fn decode_validate(obj_type: ObjType, bytes: &[u8]) {
match obj_type {
ObjType::Cer => {
let decoded = ResourceCertificate::decode_der(std::hint::black_box(bytes))
.expect("decode cert");
let decoded =
ResourceCertificate::decode_der(std::hint::black_box(bytes)).expect("decode cert");
std::hint::black_box(decoded);
}
ObjType::Crl => {
@ -284,13 +294,13 @@ fn decode_validate(obj_type: ObjType, bytes: &[u8]) {
}
}
fn landing_packfile_cbor_put(
store: &RocksStore,
obj_type: ObjType,
sample: &str,
bytes: &[u8],
) {
let rsync_uri = format!("rsync://bench.invalid/{}/{}.{}", obj_type.as_str(), sample, obj_type.ext());
fn landing_packfile_cbor_put(store: &RocksStore, obj_type: ObjType, sample: &str, bytes: &[u8]) {
let rsync_uri = format!(
"rsync://bench.invalid/{}/{}.{}",
obj_type.as_str(),
sample,
obj_type.ext()
);
let pf = PackFile::from_bytes_compute_sha256(rsync_uri, bytes.to_vec());
let encoded = serde_cbor::to_vec(std::hint::black_box(&pf)).expect("cbor encode packfile");
let key = format!("bench:packfile:{}:{}", obj_type.as_str(), sample);
@ -414,7 +424,10 @@ fn stage2_decode_validate_and_landing_benchmark_selected_der_v2() {
}
if let Some(filter) = sample_filter.as_deref() {
samples.retain(|s| s.name == filter);
assert!(!samples.is_empty(), "no sample matched BENCH_SAMPLE={filter}");
assert!(
!samples.is_empty(),
"no sample matched BENCH_SAMPLE={filter}"
);
}
println!("# Stage2 decode+validate benchmark (selected_der_v2)");
@ -473,7 +486,8 @@ fn stage2_decode_validate_and_landing_benchmark_selected_der_v2() {
let mut landing_rows: Vec<ResultRow> = Vec::with_capacity(samples.len());
for s in &samples {
let bytes = std::fs::read(&s.path).unwrap_or_else(|e| panic!("read {}: {e}", s.path.display()));
let bytes =
std::fs::read(&s.path).unwrap_or_else(|e| panic!("read {}: {e}", s.path.display()));
let size_bytes = bytes.len();
let complexity = if mode.do_decode() {
complexity_main(s.obj_type, bytes.as_slice())
@ -637,7 +651,10 @@ fn stage2_decode_validate_and_landing_benchmark_selected_der_v2() {
};
if let Some(path) = out_md {
let md = render_markdown("Stage2 decode+validate benchmark (selected_der_v2)", &decode_rows);
let md = render_markdown(
"Stage2 decode+validate benchmark (selected_der_v2)",
&decode_rows,
);
write_text_file(&path, &md);
eprintln!("Wrote {}", path.display());
}

View File

@ -228,8 +228,6 @@ fn stage2_inventory_routinator_sap_store() {
if let Some(path) = out_json.as_ref() {
create_parent_dirs(path);
let bytes = serde_json::to_vec_pretty(&report).expect("encode json");
std::fs::write(path, bytes)
.unwrap_or_else(|e| panic!("write {}: {e}", path.display()));
std::fs::write(path, bytes).unwrap_or_else(|e| panic!("write {}: {e}", path.display()));
}
}

View File

@ -199,9 +199,8 @@ impl<'a> Cursor<'a> {
fn read_time_utc_i64_be(&mut self) -> Result<time::OffsetDateTime, SapDecodeError> {
let ts = self.read_i64_be()?;
time::OffsetDateTime::from_unix_timestamp(ts).map_err(|e| {
SapDecodeError::Decode(format!("invalid unix timestamp {ts}: {e}"))
})
time::OffsetDateTime::from_unix_timestamp(ts)
.map_err(|e| SapDecodeError::Decode(format!("invalid unix timestamp {ts}: {e}")))
}
fn read_string_u32(&mut self) -> Result<String, SapDecodeError> {
@ -252,4 +251,3 @@ impl<'a> Cursor<'a> {
Ok(out)
}
}

View File

@ -7,8 +7,8 @@ use rpki::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig};
use rpki::fetch::rsync::{RsyncFetchError, RsyncFetcher};
use rpki::policy::{CaFailedFetchPolicy, Policy, SyncPreference};
use rpki::storage::{FetchCachePpKey, RocksStore};
use rpki::sync::rrdp::{Fetcher, parse_notification, sync_from_notification};
use rpki::sync::repo::{RepoSyncSource, sync_publication_point};
use rpki::sync::rrdp::{Fetcher, parse_notification, sync_from_notification};
use rpki::validation::from_tal::discover_root_ca_instance_from_tal_url;
use rpki::validation::manifest::{PublicationPointSource, process_manifest_publication_point};
@ -36,8 +36,13 @@ fn live_http_fetcher() -> BlockingHttpFetcher {
struct AlwaysFailRsyncFetcher;
impl RsyncFetcher for AlwaysFailRsyncFetcher {
fn fetch_objects(&self, _rsync_base_uri: &str) -> Result<Vec<(String, Vec<u8>)>, RsyncFetchError> {
Err(RsyncFetchError::Fetch("rsync disabled for this test".to_string()))
fn fetch_objects(
&self,
_rsync_base_uri: &str,
) -> Result<Vec<(String, Vec<u8>)>, RsyncFetchError> {
Err(RsyncFetchError::Fetch(
"rsync disabled for this test".to_string(),
))
}
}
@ -108,6 +113,7 @@ fn apnic_live_bootstrap_snapshot_and_fetch_cache_pp_pack_to_persistent_db() {
&ca_instance.rsync_base_uri,
&http,
&rsync,
None,
)
.expect("repo sync");
@ -130,13 +136,16 @@ fn apnic_live_bootstrap_snapshot_and_fetch_cache_pp_pack_to_persistent_db() {
assert_eq!(pp.source, PublicationPointSource::Fresh);
let key = FetchCachePpKey::from_manifest_rsync_uri(&ca_instance.manifest_rsync_uri);
let cached = store
.get_fetch_cache_pp(&key)
.expect("get fetch_cache_pp");
let cached = store.get_fetch_cache_pp(&key).expect("get fetch_cache_pp");
assert!(cached.is_some(), "expected fetch_cache_pp to be stored");
eprintln!("OK: bootstrap complete; persistent db at: {}", db_dir.display());
eprintln!("Next: run `cargo test --release -q --test test_apnic_rrdp_delta_live_20260226 -- --ignored` later to exercise delta sync.");
eprintln!(
"OK: bootstrap complete; persistent db at: {}",
db_dir.display()
);
eprintln!(
"Next: run `cargo test --release -q --test test_apnic_rrdp_delta_live_20260226 -- --ignored` later to exercise delta sync."
);
}
#[test]
@ -184,9 +193,7 @@ fn apnic_live_delta_only_from_persistent_db() {
if start.elapsed() > Duration::from_secs(max_wait_secs) {
panic!(
"timed out waiting for APNIC RRDP serial to advance for delta sync; old_session={} old_serial={} waited={}s",
old_session,
old_serial,
max_wait_secs
old_session, old_serial, max_wait_secs
);
}
@ -198,8 +205,7 @@ fn apnic_live_delta_only_from_persistent_db() {
if notif.session_id.to_string() != old_session {
panic!(
"RRDP session_id changed; this delta-only test assumes same snapshot baseline. old_session={} new_session={}",
old_session,
notif.session_id
old_session, notif.session_id
);
}
@ -217,10 +223,7 @@ fn apnic_live_delta_only_from_persistent_db() {
if notif.deltas.is_empty() || min_delta > want_first {
panic!(
"notification deltas do not cover required serial gap for delta-only sync; old_serial={} want_first={} min_delta={} current_serial={}. rerun bootstrap to refresh snapshot baseline.",
old_serial,
want_first,
min_delta,
notif.serial
old_serial, want_first, min_delta, notif.serial
);
}
@ -276,9 +279,7 @@ fn apnic_root_repo_sync_failure_uses_fetch_cache_pp_pack() {
// Ensure cache exists (created by bootstrap).
let key = FetchCachePpKey::from_manifest_rsync_uri(&ca_instance.manifest_rsync_uri);
let cached = store
.get_fetch_cache_pp(&key)
.expect("get fetch_cache_pp");
let cached = store.get_fetch_cache_pp(&key).expect("get fetch_cache_pp");
assert!(
cached.is_some(),
"missing fetch_cache_pp; run bootstrap test first. db_dir={}",
@ -303,8 +304,9 @@ fn apnic_root_repo_sync_failure_uses_fetch_cache_pp_pack() {
assert_eq!(pp.source, PublicationPointSource::FetchCachePp);
assert!(
pp.warnings.iter().any(|w| w.message.contains("using fetch_cache_pp")),
pp.warnings
.iter()
.any(|w| w.message.contains("using fetch_cache_pp")),
"expected cache-use warning"
);
}

View File

@ -166,6 +166,12 @@ fn apnic_tree_full_stats_serial() {
http_fetcher: &http,
rsync_fetcher: &rsync,
validation_time,
timing: None,
revalidate_only: false,
rrdp_dedup: true,
rrdp_repo_cache: std::sync::Mutex::new(std::collections::HashMap::new()),
rsync_dedup: true,
rsync_repo_cache: std::sync::Mutex::new(std::collections::HashMap::new()),
};
let stats = RefCell::new(LiveStats::default());
@ -190,6 +196,7 @@ fn apnic_tree_full_stats_serial() {
&TreeRunConfig {
max_depth,
max_instances,
revalidate_only: false,
},
)
.expect("run tree");

View File

@ -36,6 +36,7 @@ fn apnic_tree_depth1_processes_more_than_root() {
&TreeRunConfig {
max_depth: Some(1),
max_instances: Some(2),
revalidate_only: false,
},
)
.expect("run tree from tal");
@ -74,6 +75,7 @@ fn apnic_tree_root_only_processes_root_with_long_timeouts() {
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
revalidate_only: false,
},
)
.expect("run APNIC root-only");

View File

@ -1,6 +1,7 @@
use rpki::data_model::oid::{OID_AD_CA_REPOSITORY, OID_AD_RPKI_MANIFEST, OID_AD_RPKI_NOTIFY};
use rpki::data_model::rc::{
AccessDescription, ResourceCertKind, ResourceCertificate, SubjectInfoAccess, SubjectInfoAccessCa,
AccessDescription, ResourceCertKind, ResourceCertificate, SubjectInfoAccess,
SubjectInfoAccessCa,
};
use rpki::validation::ca_instance::{CaInstanceUrisError, ca_instance_uris_from_ca_certificate};
@ -12,8 +13,9 @@ fn apnic_child_ca_fixture_der() -> Vec<u8> {
}
fn set_sia_ca(cert: &mut ResourceCertificate, ads: Vec<AccessDescription>) {
cert.tbs.extensions.subject_info_access =
Some(SubjectInfoAccess::Ca(SubjectInfoAccessCa { access_descriptions: ads }));
cert.tbs.extensions.subject_info_access = Some(SubjectInfoAccess::Ca(SubjectInfoAccessCa {
access_descriptions: ads,
}));
}
#[test]
@ -48,7 +50,10 @@ fn ca_instance_uris_success_and_error_branches() {
}],
);
let err = ca_instance_uris_from_ca_certificate(&cert).unwrap_err();
assert!(matches!(err, CaInstanceUrisError::MissingCaRepository), "{err}");
assert!(
matches!(err, CaInstanceUrisError::MissingCaRepository),
"{err}"
);
set_sia_ca(
&mut cert,
@ -58,7 +63,10 @@ fn ca_instance_uris_success_and_error_branches() {
}],
);
let err = ca_instance_uris_from_ca_certificate(&cert).unwrap_err();
assert!(matches!(err, CaInstanceUrisError::MissingRpkiManifest), "{err}");
assert!(
matches!(err, CaInstanceUrisError::MissingRpkiManifest),
"{err}"
);
// Scheme validation branches.
set_sia_ca(
@ -69,21 +77,29 @@ fn ca_instance_uris_success_and_error_branches() {
}],
);
let err = ca_instance_uris_from_ca_certificate(&cert).unwrap_err();
assert!(matches!(err, CaInstanceUrisError::CaRepositoryNotRsync(_)), "{err}");
assert!(
matches!(err, CaInstanceUrisError::CaRepositoryNotRsync(_)),
"{err}"
);
set_sia_ca(
&mut cert,
vec![AccessDescription {
access_method_oid: OID_AD_CA_REPOSITORY.to_string(),
access_location: "rsync://example.test/repo/".to_string(),
},
AccessDescription {
access_method_oid: OID_AD_RPKI_MANIFEST.to_string(),
access_location: "http://example.test/repo/x.mft".to_string(),
}],
vec![
AccessDescription {
access_method_oid: OID_AD_CA_REPOSITORY.to_string(),
access_location: "rsync://example.test/repo/".to_string(),
},
AccessDescription {
access_method_oid: OID_AD_RPKI_MANIFEST.to_string(),
access_location: "http://example.test/repo/x.mft".to_string(),
},
],
);
let err = ca_instance_uris_from_ca_certificate(&cert).unwrap_err();
assert!(matches!(err, CaInstanceUrisError::RpkiManifestNotRsync(_)), "{err}");
assert!(
matches!(err, CaInstanceUrisError::RpkiManifestNotRsync(_)),
"{err}"
);
set_sia_ca(
&mut cert,
@ -103,7 +119,10 @@ fn ca_instance_uris_success_and_error_branches() {
],
);
let err = ca_instance_uris_from_ca_certificate(&cert).unwrap_err();
assert!(matches!(err, CaInstanceUrisError::RpkiNotifyNotHttps(_)), "{err}");
assert!(
matches!(err, CaInstanceUrisError::RpkiNotifyNotHttps(_)),
"{err}"
);
// ManifestNotUnderPublicationPoint.
set_sia_ca(
@ -120,6 +139,11 @@ fn ca_instance_uris_success_and_error_branches() {
],
);
let err = ca_instance_uris_from_ca_certificate(&cert).unwrap_err();
assert!(matches!(err, CaInstanceUrisError::ManifestNotUnderPublicationPoint { .. }), "{err}");
assert!(
matches!(
err,
CaInstanceUrisError::ManifestNotUnderPublicationPoint { .. }
),
"{err}"
);
}

View File

@ -50,6 +50,7 @@ impl PublicationPointRunner for SinglePackRunner {
ca.effective_ip_resources.as_ref(),
ca.effective_as_resources.as_ref(),
time::OffsetDateTime::now_utc(),
None,
);
Ok(PublicationPointRunResult {
@ -81,7 +82,9 @@ fn crl_mismatch_drops_publication_point_and_cites_rfc_sections() {
let root = CaInstanceHandle {
depth: 0,
ca_certificate_der: vec![0x01, 0x02, 0x03],
// Use a real, parseable CA certificate DER so objects processing can reach CRL selection.
// The test only asserts CRLDP/locked-pack error handling, not signature chaining.
ca_certificate_der: fixture_bytes("tests/fixtures/ta/apnic-ta.cer"),
ca_certificate_rsync_uri: None,
effective_ip_resources: None,
effective_as_resources: None,
@ -99,6 +102,7 @@ fn crl_mismatch_drops_publication_point_and_cites_rfc_sections() {
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
revalidate_only: false,
},
)
.expect("run tree audit");

View File

@ -213,7 +213,10 @@ fn cached_pack_revalidation_rejects_hash_mismatch_against_manifest_filelist() {
)
.expect_err("cache pack hash mismatch must be rejected");
let msg = err.to_string();
assert!(msg.contains("cached fetch_cache_pp file hash mismatch"), "{msg}");
assert!(
msg.contains("cached fetch_cache_pp file hash mismatch"),
"{msg}"
);
assert!(msg.contains("RFC 9286 §6.5"), "{msg}");
}

View File

@ -36,3 +36,47 @@ fn system_rsync_fetcher_can_sync_from_local_directory_for_tests() {
assert!(saw_a, "expected a.txt");
assert!(saw_b, "expected sub/b.txt");
}
#[test]
fn system_rsync_fetcher_can_reuse_persistent_mirror_directory() {
let tmp = tempfile::tempdir().expect("tempdir");
let src = tmp.path().join("src");
let nested = src.join("sub");
fs::create_dir_all(&nested).expect("mkdir");
fs::write(src.join("a.txt"), b"aaa").expect("write a");
fs::write(nested.join("b.txt"), b"bbb").expect("write b");
let mirror_root = tmp.path().join("mirror");
let config = SystemRsyncConfig {
rsync_bin: "rsync".into(),
mirror_root: Some(mirror_root.clone()),
..Default::default()
};
let fetcher = SystemRsyncFetcher::new(config);
let base = src.to_string_lossy().to_string();
// First sync creates the mirror directory.
let out1 = fetcher.fetch_objects(&base).expect("fetch objects #1");
assert!(out1.iter().any(|(u, _)| u.ends_with("a.txt")));
assert!(out1.iter().any(|(u, _)| u.ends_with("sub/b.txt")));
let dirs1: Vec<_> = std::fs::read_dir(&mirror_root)
.expect("read mirror root")
.filter_map(|e| e.ok())
.filter(|e| e.path().is_dir())
.collect();
assert_eq!(dirs1.len(), 1, "expected exactly one mirror directory");
// Second sync should reuse the same mirror directory (stable hash mapping).
let out2 = fetcher.fetch_objects(&base).expect("fetch objects #2");
assert!(out2.iter().any(|(u, _)| u.ends_with("a.txt")));
assert!(out2.iter().any(|(u, _)| u.ends_with("sub/b.txt")));
let dirs2: Vec<_> = std::fs::read_dir(&mirror_root)
.expect("read mirror root")
.filter_map(|e| e.ok())
.filter(|e| e.path().is_dir())
.collect();
assert_eq!(dirs2.len(), 1, "expected mirror directory to be reused");
}

View File

@ -31,7 +31,9 @@ fn discover_root_ca_instance_from_tal_url_succeeds_with_apnic_fixtures() {
.map
.insert(tal_url.to_string(), Ok(tal_bytes.clone()));
for u in &tal.ta_uris {
fetcher.map.insert(u.as_str().to_string(), Ok(ta_der.clone()));
fetcher
.map
.insert(u.as_str().to_string(), Ok(ta_der.clone()));
}
let out = discover_root_ca_instance_from_tal_url(&fetcher, tal_url).expect("discover root");
@ -49,9 +51,7 @@ fn discover_root_ca_instance_from_tal_url_returns_ta_fetch_error_when_all_candid
let tal = Tal::decode_bytes(&tal_bytes).expect("decode tal fixture");
let mut fetcher = MapFetcher::default();
fetcher
.map
.insert(tal_url.to_string(), Ok(tal_bytes));
fetcher.map.insert(tal_url.to_string(), Ok(tal_bytes));
for u in &tal.ta_uris {
fetcher.map.insert(
u.as_str().to_string(),
@ -67,4 +67,3 @@ fn discover_root_ca_instance_from_tal_url_returns_ta_fetch_error_when_all_candid
other => panic!("unexpected error: {other}"),
}
}

View File

@ -385,7 +385,10 @@ fn manifest_revalidation_with_unchanged_manifest_is_fresh() {
assert_eq!(second.source, PublicationPointSource::Fresh);
assert!(second.warnings.is_empty());
assert_eq!(second.pack.manifest_bytes, first.pack.manifest_bytes);
assert_eq!(second.pack.manifest_number_be, first.pack.manifest_number_be);
assert_eq!(
second.pack.manifest_number_be,
first.pack.manifest_number_be
);
assert_eq!(second.pack.files, first.pack.files);
}

View File

@ -3,7 +3,9 @@ use std::path::Path;
use rpki::data_model::manifest::ManifestObject;
use rpki::policy::Policy;
use rpki::storage::RocksStore;
use rpki::validation::manifest::{ManifestProcessError, PublicationPointSource, process_manifest_publication_point};
use rpki::validation::manifest::{
ManifestProcessError, PublicationPointSource, process_manifest_publication_point,
};
fn issuer_ca_fixture_der() -> Vec<u8> {
std::fs::read(
@ -89,7 +91,10 @@ fn manifest_outside_publication_point_yields_no_usable_cache() {
.unwrap_err();
// With no cached pack available for this wrong publication point, we get NoUsableCache.
assert!(matches!(err, ManifestProcessError::NoUsableCache { .. }), "{err}");
assert!(
matches!(err, ManifestProcessError::NoUsableCache { .. }),
"{err}"
);
}
#[test]
@ -154,6 +159,8 @@ fn manifest_outside_publication_point_detects_cached_pack_pp_mismatch() {
validation_time,
)
.unwrap_err();
assert!(matches!(err, ManifestProcessError::NoUsableCache { .. }), "{err}");
assert!(
matches!(err, ManifestProcessError::NoUsableCache { .. }),
"{err}"
);
}

View File

@ -3,7 +3,10 @@ use std::path::Path;
use rpki::data_model::manifest::ManifestObject;
use rpki::policy::{CaFailedFetchPolicy, Policy};
use rpki::storage::{FetchCachePpKey, FetchCachePpPack, RocksStore};
use rpki::validation::manifest::{ManifestProcessError, PublicationPointSource, process_manifest_publication_point, process_manifest_publication_point_after_repo_sync};
use rpki::validation::manifest::{
ManifestProcessError, PublicationPointSource, process_manifest_publication_point,
process_manifest_publication_point_after_repo_sync,
};
fn issuer_ca_fixture_der() -> Vec<u8> {
std::fs::read(
@ -522,7 +525,8 @@ fn cached_pack_missing_file_is_rejected_during_revalidation() {
"{err}"
);
assert!(
err.to_string().contains("cached fetch_cache_pp missing file"),
err.to_string()
.contains("cached fetch_cache_pp missing file"),
"unexpected error: {err}"
);
}
@ -597,7 +601,8 @@ fn cached_pack_hash_mismatch_is_rejected_during_revalidation() {
"{err}"
);
assert!(
err.to_string().contains("cached fetch_cache_pp file hash mismatch"),
err.to_string()
.contains("cached fetch_cache_pp file hash mismatch"),
"unexpected error: {err}"
);
}

View File

@ -319,9 +319,7 @@ struct ManifestEContentPretty {
impl From<&ManifestEContent> for ManifestEContentPretty {
fn from(v: &ManifestEContent) -> Self {
let entries = v
.parse_files()
.expect("parse validated manifest fileList");
let entries = v.parse_files().expect("parse validated manifest fileList");
Self {
version: v.version,
manifest_number: v.manifest_number.to_hex_upper(),

View File

@ -121,6 +121,7 @@ fn missing_crl_causes_roas_to_be_dropped_under_drop_object_policy() {
issuer_ca.tbs.extensions.ip_resources.as_ref(),
issuer_ca.tbs.extensions.as_resources.as_ref(),
validation_time,
None,
);
assert!(out.vrps.is_empty());
assert!(!out.warnings.is_empty());
@ -146,6 +147,7 @@ fn wrong_issuer_ca_cert_causes_roas_to_be_dropped_under_drop_object_policy() {
None,
None,
validation_time,
None,
);
assert!(out.vrps.is_empty());
assert!(!out.warnings.is_empty());
@ -173,6 +175,7 @@ fn invalid_aspa_object_is_reported_as_warning_under_drop_object_policy() {
issuer_ca.tbs.extensions.ip_resources.as_ref(),
issuer_ca.tbs.extensions.as_resources.as_ref(),
validation_time,
None,
);
assert!(

View File

@ -144,6 +144,7 @@ fn drop_object_policy_drops_only_failing_object() {
issuer_ca.tbs.extensions.ip_resources.as_ref(),
issuer_ca.tbs.extensions.as_resources.as_ref(),
validation_time,
None,
);
assert!(
@ -186,6 +187,7 @@ fn drop_publication_point_policy_drops_the_publication_point() {
issuer_ca.tbs.extensions.ip_resources.as_ref(),
issuer_ca.tbs.extensions.as_resources.as_ref(),
validation_time,
None,
);
assert!(out.stats.publication_point_dropped);
assert!(out.vrps.is_empty(), "expected publication point dropped");

View File

@ -91,6 +91,7 @@ fn build_fetch_cache_pp_from_local_rsync_fixture(
rsync_base_uri,
&NoopHttpFetcher,
&LocalDirRsyncFetcher::new(dir),
None,
)
.expect("sync into raw_objects");
@ -134,6 +135,7 @@ fn process_pack_for_issuer_extracts_vrps_from_real_cernet_fixture() {
issuer_ca.tbs.extensions.ip_resources.as_ref(),
issuer_ca.tbs.extensions.as_resources.as_ref(),
validation_time,
None,
);
assert!(
@ -188,6 +190,7 @@ fn signed_object_failure_policy_drop_object_drops_only_bad_object() {
issuer_ca.tbs.extensions.ip_resources.as_ref(),
issuer_ca.tbs.extensions.as_resources.as_ref(),
validation_time,
None,
);
assert!(out.vrps.len() > 0);
@ -250,6 +253,7 @@ fn signed_object_failure_policy_drop_publication_point_drops_all_output() {
issuer_ca.tbs.extensions.ip_resources.as_ref(),
issuer_ca.tbs.extensions.as_resources.as_ref(),
validation_time,
None,
);
assert!(out.vrps.is_empty());
@ -304,6 +308,7 @@ fn process_pack_for_issuer_without_crl_drops_publication_point() {
None,
None,
validation_time,
None,
);
assert!(out.vrps.is_empty());
@ -346,6 +351,7 @@ fn process_pack_for_issuer_handles_invalid_aspa_bytes() {
None,
None,
validation_time,
None,
);
assert!(out.aspas.is_empty());
assert!(!out.warnings.is_empty());
@ -388,6 +394,7 @@ fn process_pack_for_issuer_drop_publication_point_on_invalid_aspa_bytes() {
None,
None,
validation_time,
None,
);
assert!(out.vrps.is_empty());
assert!(out.aspas.is_empty());

View File

@ -68,6 +68,7 @@ fn process_pack_drop_object_on_wrong_issuer_ca_for_roa() {
None,
None,
time::OffsetDateTime::now_utc(),
None,
);
assert_eq!(out.stats.roa_total, 1);
@ -130,6 +131,7 @@ fn process_pack_drop_publication_point_on_wrong_issuer_ca_for_roa_skips_rest() {
None,
None,
time::OffsetDateTime::now_utc(),
None,
);
assert!(out.stats.publication_point_dropped);
assert_eq!(out.warnings.len(), 1);
@ -179,6 +181,7 @@ fn process_pack_drop_object_on_wrong_issuer_ca_for_aspa() {
None,
None,
time::OffsetDateTime::now_utc(),
None,
);
assert_eq!(out.stats.aspa_total, 1);
@ -234,6 +237,7 @@ fn process_pack_drop_publication_point_on_wrong_issuer_ca_for_aspa_skips_rest()
None,
None,
time::OffsetDateTime::now_utc(),
None,
);
assert!(out.stats.publication_point_dropped);
}
@ -265,11 +269,12 @@ fn process_pack_for_issuer_marks_objects_skipped_when_missing_issuer_crl() {
let out = process_fetch_cache_pp_pack_for_issuer(
&pack,
&policy,
&[0x01, 0x02, 0x03],
&fixture_bytes("tests/fixtures/ta/apnic-ta.cer"),
None,
None,
None,
time::OffsetDateTime::now_utc(),
None,
);
assert!(out.stats.publication_point_dropped);
@ -307,11 +312,12 @@ fn process_pack_for_issuer_drop_object_records_errors_and_continues() {
let out = process_fetch_cache_pp_pack_for_issuer(
&pack,
&policy,
&[0x01, 0x02, 0x03],
&fixture_bytes("tests/fixtures/ta/apnic-ta.cer"),
None,
None,
None,
time::OffsetDateTime::now_utc(),
None,
);
assert!(!out.stats.publication_point_dropped);
@ -348,11 +354,12 @@ fn process_pack_for_issuer_drop_publication_point_records_skips_for_rest() {
let out = process_fetch_cache_pp_pack_for_issuer(
&pack,
&policy,
&[0x01, 0x02, 0x03],
&fixture_bytes("tests/fixtures/ta/apnic-ta.cer"),
None,
None,
None,
time::OffsetDateTime::now_utc(),
None,
);
assert!(out.stats.publication_point_dropped);
@ -398,11 +405,12 @@ fn process_pack_for_issuer_selects_crl_by_ee_crldp_uri_roa() {
let out = process_fetch_cache_pp_pack_for_issuer(
&pack,
&policy,
&[0x01, 0x02, 0x03],
&fixture_bytes("tests/fixtures/ta/apnic-ta.cer"),
None,
None,
None,
time::OffsetDateTime::now_utc(),
None,
);
assert_eq!(out.stats.roa_total, 1);
@ -439,11 +447,12 @@ fn process_pack_for_issuer_rejects_roa_when_crldp_crl_missing() {
let out = process_fetch_cache_pp_pack_for_issuer(
&pack,
&policy,
&[0x01, 0x02, 0x03],
&fixture_bytes("tests/fixtures/ta/apnic-ta.cer"),
None,
None,
None,
time::OffsetDateTime::now_utc(),
None,
);
assert_eq!(out.stats.roa_total, 1);
@ -494,11 +503,12 @@ fn process_pack_for_issuer_selects_crl_by_ee_crldp_uri_aspa() {
let out = process_fetch_cache_pp_pack_for_issuer(
&pack,
&policy,
&[0x01, 0x02, 0x03],
&fixture_bytes("tests/fixtures/ta/apnic-ta.cer"),
None,
None,
None,
time::OffsetDateTime::now_utc(),
None,
);
assert_eq!(out.stats.aspa_total, 1);

View File

@ -83,6 +83,7 @@ fn repo_sync_uses_rrdp_when_available() {
"rsync://example.net/repo/",
&http_fetcher,
&rsync_fetcher,
None,
)
.expect("sync");
@ -135,6 +136,7 @@ fn repo_sync_skips_snapshot_when_state_unchanged() {
"rsync://example.net/repo/",
&http_fetcher,
&rsync_fetcher,
None,
)
.expect("sync 1");
assert_eq!(out1.source, RepoSyncSource::Rrdp);
@ -147,6 +149,7 @@ fn repo_sync_skips_snapshot_when_state_unchanged() {
"rsync://example.net/repo/",
&http_fetcher,
&rsync_fetcher,
None,
)
.expect("sync 2");
assert_eq!(out2.source, RepoSyncSource::Rrdp);
@ -199,6 +202,7 @@ fn repo_sync_falls_back_to_rsync_on_rrdp_failure() {
"rsync://example.net/repo/",
&http_fetcher,
&rsync_fetcher,
None,
)
.expect("fallback sync");
@ -247,6 +251,7 @@ fn repo_sync_rsync_populates_raw_objects() {
"rsync://example.net/repo/",
&http_fetcher,
&rsync_fetcher,
None,
)
.expect("rsync-only sync");

View File

@ -30,4 +30,3 @@ fn rpki_bin_without_args_exits_2_and_prints_error() {
"expected non-empty stderr, got empty"
);
}

View File

@ -0,0 +1,128 @@
use std::path::{Path, PathBuf};
use std::process::Command;
use std::time::{Duration, Instant};
use rpki::storage::RocksStore;
const RSYNC_BASE_URI: &str = "rsync://rpki.luys.cloud/repo/LY-RPKI/1/";
fn run_rsync(src: &str, dst: &Path, timeout: Duration) -> Result<(), String> {
let timeout_secs = timeout.as_secs().max(1).to_string();
let out = Command::new("rsync")
.arg("-rt")
.arg("--delete")
.arg("--timeout")
.arg(timeout_secs)
.arg(src)
.arg(dst)
.output()
.map_err(|e| format!("rsync spawn failed: {e}"))?;
if !out.status.success() {
return Err(format!(
"rsync failed: status={} stdout={} stderr={}",
out.status,
String::from_utf8_lossy(&out.stdout).trim(),
String::from_utf8_lossy(&out.stderr).trim()
));
}
Ok(())
}
fn normalize_rsync_base_uri(s: &str) -> String {
if s.ends_with('/') {
s.to_string()
} else {
format!("{s}/")
}
}
fn walk_files(root: &Path, current: &Path, out: &mut Vec<PathBuf>) -> Result<(), String> {
for entry in std::fs::read_dir(current).map_err(|e| e.to_string())? {
let entry = entry.map_err(|e| e.to_string())?;
let path = entry.path();
let meta = entry.metadata().map_err(|e| e.to_string())?;
if meta.is_dir() {
walk_files(root, &path, out)?;
} else if meta.is_file() {
out.push(path);
}
}
Ok(())
}
#[test]
#[ignore = "live network rsync breakdown; prints timing table"]
fn rsync_fallback_breakdown_luys_cloud() {
let rsync_base_uri = normalize_rsync_base_uri(RSYNC_BASE_URI);
let timeout = Duration::from_secs(30 * 60);
let temp_dir = tempfile::tempdir().expect("tempdir");
let dst = temp_dir.path().join("rsync");
std::fs::create_dir_all(&dst).expect("create dst dir");
let t0 = Instant::now();
run_rsync(&rsync_base_uri, &dst, timeout).expect("rsync fetch");
let rsync_wall = t0.elapsed();
let t1 = Instant::now();
let mut files: Vec<PathBuf> = Vec::new();
walk_files(&dst, &dst, &mut files).expect("walk files");
let walk_wall = t1.elapsed();
let t2 = Instant::now();
let mut objects: Vec<(String, Vec<u8>)> = Vec::with_capacity(files.len());
let mut total_bytes: u64 = 0;
for path in &files {
let rel = path
.strip_prefix(&dst)
.expect("strip prefix")
.to_string_lossy()
.replace('\\', "/");
let uri = format!("{rsync_base_uri}{rel}");
let bytes = std::fs::read(path).expect("read file");
total_bytes = total_bytes.saturating_add(bytes.len() as u64);
objects.push((uri, bytes));
}
let read_wall = t2.elapsed();
let db_dir = tempfile::tempdir().expect("db tempdir");
let store = RocksStore::open(db_dir.path()).expect("open rocksdb");
let t3 = Instant::now();
for (uri, bytes) in &objects {
store.put_raw(uri, bytes).expect("put_raw");
}
let write_wall = t3.elapsed();
println!("rsync_base_uri: {rsync_base_uri}");
println!("| phase | duration(ms) | pct | notes |\n|---|---:|---:|---|");
let total = rsync_wall + walk_wall + read_wall + write_wall;
let pct = |d: Duration| (d.as_secs_f64() / total.as_secs_f64()) * 100.0;
println!(
"| rsync_network | {:>11.3} | {:>5.1}% | rsync command wall time |",
rsync_wall.as_secs_f64() * 1000.0,
pct(rsync_wall)
);
println!(
"| walk_files | {:>11.3} | {:>5.1}% | enumerate local files |",
walk_wall.as_secs_f64() * 1000.0,
pct(walk_wall)
);
println!(
"| read_files | {:>11.3} | {:>5.1}% | read all bytes into memory |",
read_wall.as_secs_f64() * 1000.0,
pct(read_wall)
);
println!(
"| rocksdb_put_raw | {:>11.3} | {:>5.1}% | write raw_objects ({} keys) |",
write_wall.as_secs_f64() * 1000.0,
pct(write_wall),
objects.len()
);
println!(
"| TOTAL | {:>11.3} | 100.0% | files={} bytes={} |",
total.as_secs_f64() * 1000.0,
objects.len(),
total_bytes
);
}

View File

@ -91,6 +91,7 @@ fn run_tree_from_tal_url_entry_executes_and_records_failure_when_repo_empty() {
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
revalidate_only: false,
},
)
.expect("run tree");
@ -134,6 +135,7 @@ fn run_tree_from_tal_and_ta_der_entry_executes_and_records_failure_when_repo_emp
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
revalidate_only: false,
},
)
.expect("run tree");
@ -185,6 +187,7 @@ fn run_tree_from_tal_url_audit_entry_collects_no_publication_points_when_repo_em
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
revalidate_only: false,
},
)
.expect("run tree audit");
@ -224,6 +227,7 @@ fn run_tree_from_tal_and_ta_der_audit_entry_collects_no_publication_points_when_
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
revalidate_only: false,
},
)
.expect("run tree audit");

View File

@ -236,6 +236,7 @@ fn tree_respects_max_depth_and_max_instances() {
&TreeRunConfig {
max_depth: Some(0),
max_instances: None,
revalidate_only: false,
},
)
.expect("run tree depth-limited");
@ -248,6 +249,7 @@ fn tree_respects_max_depth_and_max_instances() {
&TreeRunConfig {
max_depth: None,
max_instances: Some(1),
revalidate_only: false,
},
)
.expect("run tree instance-limited");
@ -316,3 +318,69 @@ fn tree_audit_includes_parent_and_discovered_from_for_non_root_nodes() {
.expect("child discovered_from");
assert_eq!(df.parent_manifest_rsync_uri, root_manifest);
}
#[test]
fn tree_revalidate_only_enqueues_children_from_fetch_cache_pp() {
let root_manifest = "rsync://example.test/repo/root.mft";
let child_manifest = "rsync://example.test/repo/child.mft";
let runner = MockRunner::default()
.with(
root_manifest,
PublicationPointRunResult {
source: PublicationPointSource::FetchCachePp,
pack: empty_pack(root_manifest, "rsync://example.test/repo/"),
warnings: Vec::new(),
objects: ObjectsOutput {
vrps: Vec::new(),
aspas: Vec::new(),
warnings: Vec::new(),
stats: ObjectsStats::default(),
audit: Vec::new(),
},
audit: PublicationPointAudit::default(),
discovered_children: vec![discovered_child(root_manifest, child_manifest)],
},
)
.with(
child_manifest,
PublicationPointRunResult {
source: PublicationPointSource::FetchCachePp,
pack: empty_pack(child_manifest, "rsync://example.test/repo/child/"),
warnings: Vec::new(),
objects: ObjectsOutput {
vrps: Vec::new(),
aspas: Vec::new(),
warnings: Vec::new(),
stats: ObjectsStats::default(),
audit: Vec::new(),
},
audit: PublicationPointAudit::default(),
discovered_children: Vec::new(),
},
);
let out = run_tree_serial(
ca_handle(root_manifest),
&runner,
&TreeRunConfig {
max_depth: None,
max_instances: None,
revalidate_only: false,
},
)
.expect("run tree");
assert_eq!(out.instances_processed, 1);
let out = run_tree_serial(
ca_handle(root_manifest),
&runner,
&TreeRunConfig {
max_depth: None,
max_instances: None,
revalidate_only: true,
},
)
.expect("run tree");
assert_eq!(out.instances_processed, 2);
}