串行验证通过

This commit is contained in:
yuyr 2026-02-09 19:35:54 +08:00
parent 7be865d7f1
commit afc31c02ab
66 changed files with 12049 additions and 4 deletions

View File

@ -13,3 +13,14 @@ time = "0.3.45"
ring = "0.17.14" ring = "0.17.14"
x509-parser = { version = "0.18.0", features = ["verify"] } x509-parser = { version = "0.18.0", features = ["verify"] }
url = "2.5.8" url = "2.5.8"
serde = { version = "1.0.218", features = ["derive"] }
serde_json = "1.0.140"
toml = "0.8.20"
rocksdb = { version = "0.22.0", default-features = false, features = ["lz4"] }
serde_cbor = "0.11.2"
roxmltree = "0.20.0"
uuid = { version = "1.7.0", features = ["v4"] }
reqwest = { version = "0.12.12", default-features = false, features = ["blocking", "rustls-tls"] }
[dev-dependencies]
tempfile = "3.16.0"

144
src/audit.rs Normal file
View File

@ -0,0 +1,144 @@
use serde::Serialize;
use sha2::Digest;
use crate::policy::Policy;
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum AuditObjectKind {
Manifest,
Crl,
Certificate,
Roa,
Aspa,
Other,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum AuditObjectResult {
Ok,
Skipped,
Error,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct ObjectAuditEntry {
pub rsync_uri: String,
pub sha256_hex: String,
pub kind: AuditObjectKind,
pub result: AuditObjectResult,
#[serde(skip_serializing_if = "Option::is_none")]
pub detail: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditWarning {
pub message: String,
pub rfc_refs: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub context: Option<String>,
}
impl From<&crate::report::Warning> for AuditWarning {
fn from(w: &crate::report::Warning) -> Self {
Self {
message: w.message.clone(),
rfc_refs: w.rfc_refs.iter().map(|r| r.0.to_string()).collect(),
context: w.context.clone(),
}
}
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize)]
pub struct PublicationPointAudit {
pub rsync_base_uri: String,
pub manifest_rsync_uri: String,
pub publication_point_rsync_uri: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub rrdp_notification_uri: Option<String>,
pub source: String,
pub this_update_rfc3339_utc: String,
pub next_update_rfc3339_utc: String,
pub verified_at_rfc3339_utc: String,
pub warnings: Vec<AuditWarning>,
pub objects: Vec<ObjectAuditEntry>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct TreeSummary {
pub instances_processed: usize,
pub instances_failed: usize,
pub warnings: Vec<AuditWarning>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditRunMeta {
pub validation_time_rfc3339_utc: String,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditReportV1 {
pub format_version: u32,
pub meta: AuditRunMeta,
pub policy: Policy,
pub tree: TreeSummary,
pub publication_points: Vec<PublicationPointAudit>,
pub vrps: Vec<VrpOutput>,
pub aspas: Vec<AspaOutput>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct VrpOutput {
pub asn: u32,
pub prefix: String,
pub max_length: u16,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AspaOutput {
pub customer_as_id: u32,
pub provider_as_ids: Vec<u32>,
}
pub fn sha256_hex_from_32(bytes: &[u8; 32]) -> String {
hex::encode(bytes)
}
pub fn sha256_hex(bytes: &[u8]) -> String {
let digest = sha2::Sha256::digest(bytes);
hex::encode(digest)
}
pub fn format_roa_ip_prefix(p: &crate::data_model::roa::IpPrefix) -> String {
match p.afi {
crate::data_model::roa::RoaAfi::Ipv4 => {
if p.addr.len() != 4 {
return format!("ipv4:{:02X?}/{}", p.addr, p.prefix_len);
}
format!(
"{}.{}.{}.{}{}",
p.addr[0],
p.addr[1],
p.addr[2],
p.addr[3],
format!("/{}", p.prefix_len)
)
}
crate::data_model::roa::RoaAfi::Ipv6 => {
if p.addr.len() != 16 {
return format!("ipv6:{:02X?}/{}", p.addr, p.prefix_len);
}
let mut parts = Vec::with_capacity(8);
for i in 0..8 {
let hi = p.addr[i * 2] as u16;
let lo = p.addr[i * 2 + 1] as u16;
parts.push(format!("{:x}", (hi << 8) | lo));
}
format!("{}{}", parts.join(":"), format!("/{}", p.prefix_len))
}
}
}

12
src/bin/rpki.rs Normal file
View File

@ -0,0 +1,12 @@
fn main() {
let argv: Vec<String> = std::env::args().collect();
if let Err(e) = rpki::cli::run(&argv) {
// `parse_args` uses `Err(usage())` for `--help`, so treat it as success.
if argv.iter().any(|a| a == "--help" || a == "-h") {
println!("{e}");
return;
}
eprintln!("{e}");
std::process::exit(2);
}
}

652
src/cli.rs Normal file
View File

@ -0,0 +1,652 @@
use std::path::{Path, PathBuf};
use crate::audit::{
AspaOutput, AuditReportV1, AuditRunMeta, AuditWarning, TreeSummary, VrpOutput,
format_roa_ip_prefix,
};
use crate::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig};
use crate::fetch::rsync::LocalDirRsyncFetcher;
use crate::fetch::rsync_system::{SystemRsyncConfig, SystemRsyncFetcher};
use crate::policy::Policy;
use crate::storage::RocksStore;
use crate::validation::run_tree_from_tal::{
RunTreeFromTalAuditOutput, run_tree_from_tal_and_ta_der_serial_audit,
run_tree_from_tal_url_serial_audit,
};
use crate::validation::tree::TreeRunConfig;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CliArgs {
pub tal_url: Option<String>,
pub tal_path: Option<PathBuf>,
pub ta_path: Option<PathBuf>,
pub db_path: PathBuf,
pub policy_path: Option<PathBuf>,
pub report_json_path: Option<PathBuf>,
pub rsync_local_dir: Option<PathBuf>,
pub max_depth: Option<usize>,
pub max_instances: Option<usize>,
pub validation_time: Option<time::OffsetDateTime>,
}
fn usage() -> String {
let bin = "rpki";
format!(
"\
Usage:
{bin} --db <path> --tal-url <url> [options]
{bin} --db <path> --tal-path <path> --ta-path <path> [options]
Options:
--db <path> RocksDB directory path (required)
--policy <path> Policy TOML path (optional)
--report-json <path> Write full audit report as JSON (optional)
--tal-url <url> TAL URL (downloads TAL + TA over HTTPS)
--tal-path <path> TAL file path (offline-friendly; requires --ta-path)
--ta-path <path> TA certificate DER file path (offline-friendly)
--rsync-local-dir <path> Use LocalDirRsyncFetcher rooted at this directory (offline tests)
--max-depth <n> Max CA instance depth (0 = root only)
--max-instances <n> Max number of CA instances to process
--validation-time <rfc3339> Validation time in RFC3339 (default: now UTC)
--help Show this help
"
)
}
pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
let mut tal_url: Option<String> = None;
let mut tal_path: Option<PathBuf> = None;
let mut ta_path: Option<PathBuf> = None;
let mut db_path: Option<PathBuf> = None;
let mut policy_path: Option<PathBuf> = None;
let mut report_json_path: Option<PathBuf> = None;
let mut rsync_local_dir: Option<PathBuf> = None;
let mut max_depth: Option<usize> = None;
let mut max_instances: Option<usize> = None;
let mut validation_time: Option<time::OffsetDateTime> = None;
let mut i = 1usize;
while i < argv.len() {
let arg = argv[i].as_str();
match arg {
"--help" | "-h" => return Err(usage()),
"--tal-url" => {
i += 1;
let v = argv.get(i).ok_or("--tal-url requires a value")?;
tal_url = Some(v.clone());
}
"--tal-path" => {
i += 1;
let v = argv.get(i).ok_or("--tal-path requires a value")?;
tal_path = Some(PathBuf::from(v));
}
"--ta-path" => {
i += 1;
let v = argv.get(i).ok_or("--ta-path requires a value")?;
ta_path = Some(PathBuf::from(v));
}
"--db" => {
i += 1;
let v = argv.get(i).ok_or("--db requires a value")?;
db_path = Some(PathBuf::from(v));
}
"--policy" => {
i += 1;
let v = argv.get(i).ok_or("--policy requires a value")?;
policy_path = Some(PathBuf::from(v));
}
"--report-json" => {
i += 1;
let v = argv.get(i).ok_or("--report-json requires a value")?;
report_json_path = Some(PathBuf::from(v));
}
"--rsync-local-dir" => {
i += 1;
let v = argv.get(i).ok_or("--rsync-local-dir requires a value")?;
rsync_local_dir = Some(PathBuf::from(v));
}
"--max-depth" => {
i += 1;
let v = argv.get(i).ok_or("--max-depth requires a value")?;
max_depth = Some(
v.parse::<usize>()
.map_err(|_| format!("invalid --max-depth: {v}"))?,
);
}
"--max-instances" => {
i += 1;
let v = argv.get(i).ok_or("--max-instances requires a value")?;
max_instances = Some(
v.parse::<usize>()
.map_err(|_| format!("invalid --max-instances: {v}"))?,
);
}
"--validation-time" => {
i += 1;
let v = argv.get(i).ok_or("--validation-time requires a value")?;
use time::format_description::well_known::Rfc3339;
let t = time::OffsetDateTime::parse(v, &Rfc3339)
.map_err(|e| format!("invalid --validation-time (RFC3339 expected): {e}"))?;
validation_time = Some(t);
}
_ => return Err(format!("unknown argument: {arg}\n\n{}", usage())),
}
i += 1;
}
let db_path = db_path.ok_or_else(|| format!("--db is required\n\n{}", usage()))?;
let tal_mode_count = tal_url.is_some() as u8 + tal_path.is_some() as u8;
if tal_mode_count != 1 {
return Err(format!(
"must specify exactly one of --tal-url or --tal-path\n\n{}",
usage()
));
}
if tal_path.is_some() && ta_path.is_none() {
return Err(format!(
"--tal-path requires --ta-path (offline-friendly mode)\n\n{}",
usage()
));
}
Ok(CliArgs {
tal_url,
tal_path,
ta_path,
db_path,
policy_path,
report_json_path,
rsync_local_dir,
max_depth,
max_instances,
validation_time,
})
}
fn read_policy(path: Option<&Path>) -> Result<Policy, String> {
match path {
None => Ok(Policy::default()),
Some(p) => {
let s = std::fs::read_to_string(p)
.map_err(|e| format!("read policy file failed: {}: {e}", p.display()))?;
Policy::from_toml_str(&s).map_err(|e| e.to_string())
}
}
}
fn write_json(path: &Path, report: &AuditReportV1) -> Result<(), String> {
let f = std::fs::File::create(path)
.map_err(|e| format!("create report file failed: {}: {e}", path.display()))?;
serde_json::to_writer_pretty(f, report)
.map_err(|e| format!("write report json failed: {e}"))?;
Ok(())
}
fn unique_rrdp_repos(report: &AuditReportV1) -> usize {
use std::collections::HashSet;
let mut set: HashSet<&str> = HashSet::new();
for pp in &report.publication_points {
if let Some(u) = pp.rrdp_notification_uri.as_deref() {
set.insert(u);
}
}
set.len()
}
fn print_summary(report: &AuditReportV1) {
let rrdp_repos = unique_rrdp_repos(report);
println!("RPKI stage2 serial run summary");
println!("validation_time={}", report.meta.validation_time_rfc3339_utc);
println!(
"publication_points_processed={} publication_points_failed={}",
report.tree.instances_processed, report.tree.instances_failed
);
println!("rrdp_repos_unique={rrdp_repos}");
println!("vrps={}", report.vrps.len());
println!("aspas={}", report.aspas.len());
println!("audit_publication_points={}", report.publication_points.len());
println!(
"warnings_total={}",
report.tree.warnings.len()
+ report
.publication_points
.iter()
.map(|pp| pp.warnings.len())
.sum::<usize>()
);
}
fn build_report(
policy: &Policy,
validation_time: time::OffsetDateTime,
out: RunTreeFromTalAuditOutput,
) -> AuditReportV1 {
use time::format_description::well_known::Rfc3339;
let validation_time_rfc3339_utc = validation_time
.to_offset(time::UtcOffset::UTC)
.format(&Rfc3339)
.expect("format validation_time");
let vrps = out
.tree
.vrps
.iter()
.map(|v| VrpOutput {
asn: v.asn,
prefix: format_roa_ip_prefix(&v.prefix),
max_length: v.max_length,
})
.collect::<Vec<_>>();
let aspas = out
.tree
.aspas
.iter()
.map(|a| AspaOutput {
customer_as_id: a.customer_as_id,
provider_as_ids: a.provider_as_ids.clone(),
})
.collect::<Vec<_>>();
AuditReportV1 {
format_version: 1,
meta: AuditRunMeta {
validation_time_rfc3339_utc,
},
policy: policy.clone(),
tree: TreeSummary {
instances_processed: out.tree.instances_processed,
instances_failed: out.tree.instances_failed,
warnings: out.tree.warnings.iter().map(AuditWarning::from).collect(),
},
publication_points: out.publication_points,
vrps,
aspas,
}
}
pub fn run(argv: &[String]) -> Result<(), String> {
let args = parse_args(argv)?;
let policy = read_policy(args.policy_path.as_deref())?;
let validation_time = args.validation_time.unwrap_or_else(time::OffsetDateTime::now_utc);
let store = RocksStore::open(&args.db_path).map_err(|e| e.to_string())?;
let http = BlockingHttpFetcher::new(HttpFetcherConfig::default()).map_err(|e| e.to_string())?;
let config = TreeRunConfig {
max_depth: args.max_depth,
max_instances: args.max_instances,
};
let out = if let Some(dir) = args.rsync_local_dir.as_ref() {
let rsync = LocalDirRsyncFetcher::new(dir);
match (args.tal_url.as_ref(), args.tal_path.as_ref(), args.ta_path.as_ref()) {
(Some(url), _, _) => run_tree_from_tal_url_serial_audit(
&store,
&policy,
url,
&http,
&rsync,
validation_time,
&config,
)
.map_err(|e| e.to_string())?,
(None, Some(tal_path), Some(ta_path)) => {
let tal_bytes = std::fs::read(tal_path)
.map_err(|e| format!("read tal failed: {}: {e}", tal_path.display()))?;
let ta_der = std::fs::read(ta_path)
.map_err(|e| format!("read ta failed: {}: {e}", ta_path.display()))?;
run_tree_from_tal_and_ta_der_serial_audit(
&store,
&policy,
&tal_bytes,
&ta_der,
None,
&http,
&rsync,
validation_time,
&config,
)
.map_err(|e| e.to_string())?
}
_ => unreachable!("validated by parse_args"),
}
} else {
let rsync = SystemRsyncFetcher::new(SystemRsyncConfig::default());
match (args.tal_url.as_ref(), args.tal_path.as_ref(), args.ta_path.as_ref()) {
(Some(url), _, _) => run_tree_from_tal_url_serial_audit(
&store,
&policy,
url,
&http,
&rsync,
validation_time,
&config,
)
.map_err(|e| e.to_string())?,
(None, Some(tal_path), Some(ta_path)) => {
let tal_bytes = std::fs::read(tal_path)
.map_err(|e| format!("read tal failed: {}: {e}", tal_path.display()))?;
let ta_der = std::fs::read(ta_path)
.map_err(|e| format!("read ta failed: {}: {e}", ta_path.display()))?;
run_tree_from_tal_and_ta_der_serial_audit(
&store,
&policy,
&tal_bytes,
&ta_der,
None,
&http,
&rsync,
validation_time,
&config,
)
.map_err(|e| e.to_string())?
}
_ => unreachable!("validated by parse_args"),
}
};
let report = build_report(&policy, validation_time, out);
if let Some(p) = args.report_json_path.as_deref() {
write_json(p, &report)?;
}
print_summary(&report);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_help_returns_usage() {
let argv = vec!["rpki".to_string(), "--help".to_string()];
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("Usage:"), "{err}");
assert!(err.contains("--db"), "{err}");
}
#[test]
fn parse_rejects_unknown_argument() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-url".to_string(),
"https://example.test/x.tal".to_string(),
"--nope".to_string(),
];
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("unknown argument"), "{err}");
}
#[test]
fn parse_rejects_both_tal_url_and_tal_path() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-url".to_string(),
"https://example.test/x.tal".to_string(),
"--tal-path".to_string(),
"x.tal".to_string(),
];
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("exactly one of --tal-url or --tal-path"), "{err}");
}
#[test]
fn parse_rejects_invalid_max_depth() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-url".to_string(),
"https://example.test/x.tal".to_string(),
"--max-depth".to_string(),
"nope".to_string(),
];
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("invalid --max-depth"), "{err}");
}
#[test]
fn parse_rejects_invalid_validation_time() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-url".to_string(),
"https://example.test/x.tal".to_string(),
"--validation-time".to_string(),
"not-a-time".to_string(),
];
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("invalid --validation-time"), "{err}");
}
#[test]
fn parse_rejects_invalid_max_instances() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-url".to_string(),
"https://example.test/x.tal".to_string(),
"--max-instances".to_string(),
"nope".to_string(),
];
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("invalid --max-instances"), "{err}");
}
#[test]
fn parse_rejects_missing_value_for_db() {
let argv = vec!["rpki".to_string(), "--db".to_string()];
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("--db requires a value"), "{err}");
}
#[test]
fn parse_rejects_missing_value_for_tal_url() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-url".to_string(),
];
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("--tal-url requires a value"), "{err}");
}
#[test]
fn parse_rejects_missing_db() {
let argv = vec!["rpki".to_string(), "--tal-url".to_string(), "x".to_string()];
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("--db is required"), "{err}");
}
#[test]
fn parse_rejects_missing_tal_mode() {
let argv = vec!["rpki".to_string(), "--db".to_string(), "db".to_string()];
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("--tal-url") || err.contains("--tal-path"), "{err}");
}
#[test]
fn parse_accepts_tal_url_mode() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-url".to_string(),
"https://example.test/x.tal".to_string(),
];
let args = parse_args(&argv).expect("parse");
assert_eq!(args.tal_url.as_deref(), Some("https://example.test/x.tal"));
assert!(args.tal_path.is_none());
assert!(args.ta_path.is_none());
}
#[test]
fn parse_accepts_offline_mode_requires_ta() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-path".to_string(),
"a.tal".to_string(),
"--ta-path".to_string(),
"ta.cer".to_string(),
"--max-depth".to_string(),
"0".to_string(),
];
let args = parse_args(&argv).expect("parse");
assert_eq!(args.tal_path.as_deref(), Some(Path::new("a.tal")));
assert_eq!(args.ta_path.as_deref(), Some(Path::new("ta.cer")));
assert_eq!(args.max_depth, Some(0));
}
#[test]
fn parse_accepts_validation_time_rfc3339() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-url".to_string(),
"https://example.test/x.tal".to_string(),
"--validation-time".to_string(),
"2026-01-01T00:00:00Z".to_string(),
];
let args = parse_args(&argv).expect("parse");
assert!(args.validation_time.is_some());
}
#[test]
fn read_policy_accepts_valid_toml() {
let dir = tempfile::tempdir().expect("tmpdir");
let p = dir.path().join("policy.toml");
std::fs::write(
&p,
"signed_object_failure_policy = \"drop_publication_point\"\n",
)
.expect("write policy");
let policy = read_policy(Some(&p)).expect("parse policy");
assert_eq!(
policy.signed_object_failure_policy,
crate::policy::SignedObjectFailurePolicy::DropPublicationPoint
);
}
#[test]
fn read_policy_reports_missing_file() {
let dir = tempfile::tempdir().expect("tmpdir");
let p = dir.path().join("missing.toml");
let err = read_policy(Some(&p)).unwrap_err();
assert!(err.contains("read policy file failed"), "{err}");
}
#[test]
fn build_report_and_helpers_work_on_synthetic_output() {
let tal_bytes = std::fs::read(
std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests/fixtures/tal/apnic-rfc7730-https.tal"),
)
.expect("read tal fixture");
let ta_der = std::fs::read(
std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests/fixtures/ta/apnic-ta.cer"),
)
.expect("read ta fixture");
let discovery = crate::validation::from_tal::discover_root_ca_instance_from_tal_and_ta_der(
&tal_bytes,
&ta_der,
None,
)
.expect("discover root");
let tree = crate::validation::tree::TreeRunOutput {
instances_processed: 1,
instances_failed: 0,
warnings: vec![crate::report::Warning::new("synthetic warning")
.with_rfc_refs(&[crate::report::RfcRef("RFC 6487 §4.8.8.1")])
.with_context("rsync://example.test/repo/pp/")],
vrps: vec![crate::validation::objects::Vrp {
asn: 64496,
prefix: crate::data_model::roa::IpPrefix {
afi: crate::data_model::roa::RoaAfi::Ipv4,
prefix_len: 24,
addr: vec![192, 0, 2, 0],
},
max_length: 24,
}],
aspas: vec![crate::validation::objects::AspaAttestation {
customer_as_id: 64496,
provider_as_ids: vec![64497, 64498],
}],
};
let mut pp1 = crate::audit::PublicationPointAudit::default();
pp1.rrdp_notification_uri = Some("https://example.test/n1.xml".to_string());
let mut pp2 = crate::audit::PublicationPointAudit::default();
pp2.rrdp_notification_uri = Some("https://example.test/n1.xml".to_string());
let mut pp3 = crate::audit::PublicationPointAudit::default();
pp3.rrdp_notification_uri = Some("https://example.test/n2.xml".to_string());
let out = crate::validation::run_tree_from_tal::RunTreeFromTalAuditOutput {
discovery,
tree,
publication_points: vec![pp1, pp2, pp3],
};
let policy = Policy::default();
let validation_time = time::OffsetDateTime::now_utc();
let report = build_report(&policy, validation_time, out);
assert_eq!(unique_rrdp_repos(&report), 2);
assert_eq!(report.vrps.len(), 1);
assert_eq!(report.aspas.len(), 1);
print_summary(&report);
}
#[test]
fn write_json_writes_report() {
let report = AuditReportV1 {
format_version: 1,
meta: AuditRunMeta {
validation_time_rfc3339_utc: "2026-01-01T00:00:00Z".to_string(),
},
policy: Policy::default(),
tree: TreeSummary {
instances_processed: 0,
instances_failed: 0,
warnings: Vec::new(),
},
publication_points: Vec::new(),
vrps: Vec::new(),
aspas: Vec::new(),
};
let dir = tempfile::tempdir().expect("tmpdir");
let p = dir.path().join("report.json");
write_json(&p, &report).expect("write json");
let s = std::fs::read_to_string(&p).expect("read report");
assert!(s.contains("\"format_version\""));
assert!(s.contains("\"policy\""));
}
}

View File

@ -10,8 +10,10 @@ use crate::data_model::common::{
Asn1TimeUtc, InvalidTimeEncodingError, UtcTime, asn1_time_to_model, Asn1TimeUtc, InvalidTimeEncodingError, UtcTime, asn1_time_to_model,
}; };
use crate::data_model::oid::{ use crate::data_model::oid::{
OID_AD_SIGNED_OBJECT, OID_AUTONOMOUS_SYS_IDS, OID_CP_IPADDR_ASNUMBER, OID_IP_ADDR_BLOCKS, OID_AD_CA_ISSUERS, OID_AD_SIGNED_OBJECT, OID_AUTHORITY_INFO_ACCESS,
OID_SHA256_WITH_RSA_ENCRYPTION, OID_SUBJECT_INFO_ACCESS, OID_SUBJECT_KEY_IDENTIFIER, OID_AUTHORITY_KEY_IDENTIFIER, OID_AUTONOMOUS_SYS_IDS, OID_CRL_DISTRIBUTION_POINTS,
OID_CP_IPADDR_ASNUMBER, OID_IP_ADDR_BLOCKS, OID_SHA256_WITH_RSA_ENCRYPTION,
OID_SUBJECT_INFO_ACCESS, OID_SUBJECT_KEY_IDENTIFIER,
}; };
/// Resource Certificate kind (semantic classification). /// Resource Certificate kind (semantic classification).
@ -54,6 +56,12 @@ pub struct RpkixTbsCertificate {
pub struct RcExtensions { pub struct RcExtensions {
pub basic_constraints_ca: bool, pub basic_constraints_ca: bool,
pub subject_key_identifier: Option<Vec<u8>>, pub subject_key_identifier: Option<Vec<u8>>,
/// Authority Key Identifier (AKI) keyIdentifier value.
pub authority_key_identifier: Option<Vec<u8>>,
/// CRL Distribution Points URIs (fullName).
pub crl_distribution_points_uris: Option<Vec<Url>>,
/// Authority Information Access (AIA) caIssuers URIs.
pub ca_issuers_uris: Option<Vec<Url>>,
pub subject_info_access: Option<SubjectInfoAccess>, pub subject_info_access: Option<SubjectInfoAccess>,
pub certificate_policies_oid: Option<String>, pub certificate_policies_oid: Option<String>,
@ -104,12 +112,44 @@ impl AlgorithmIdentifierValue {
pub struct RcExtensionsParsed { pub struct RcExtensionsParsed {
pub basic_constraints_ca: Vec<bool>, pub basic_constraints_ca: Vec<bool>,
pub subject_key_identifier: Vec<(Vec<u8>, bool)>, pub subject_key_identifier: Vec<(Vec<u8>, bool)>,
pub authority_key_identifier: Vec<(AuthorityKeyIdentifierParsed, bool)>,
pub crl_distribution_points: Vec<(CrlDistributionPointsParsed, bool)>,
pub authority_info_access: Vec<(AuthorityInfoAccessParsed, bool)>,
pub subject_info_access: Vec<(SubjectInfoAccessParsed, bool)>, pub subject_info_access: Vec<(SubjectInfoAccessParsed, bool)>,
pub certificate_policies: Vec<(Vec<String>, bool)>, pub certificate_policies: Vec<(Vec<String>, bool)>,
pub ip_resources: Vec<(IpResourceSet, bool)>, pub ip_resources: Vec<(IpResourceSet, bool)>,
pub as_resources: Vec<(AsResourceSet, bool)>, pub as_resources: Vec<(AsResourceSet, bool)>,
} }
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct AuthorityKeyIdentifierParsed {
pub key_identifier: Option<Vec<u8>>,
pub has_authority_cert_issuer: bool,
pub has_authority_cert_serial: bool,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct AuthorityInfoAccessParsed {
pub ca_issuers_uris: Vec<Url>,
pub ca_issuers_access_location_not_uri: bool,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CrlDistributionPointsParsed {
pub distribution_points: Vec<CrlDistributionPointParsed>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CrlDistributionPointParsed {
pub distribution_point_present: bool,
pub reasons_present: bool,
pub crl_issuer_present: bool,
pub name_relative_to_crl_issuer_present: bool,
pub full_name_uris: Vec<Url>,
pub full_name_not_uri: bool,
pub full_name_present: bool,
}
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub struct SubjectInfoAccessParsed { pub struct SubjectInfoAccessParsed {
pub access_descriptions: Vec<AccessDescription>, pub access_descriptions: Vec<AccessDescription>,
@ -375,6 +415,103 @@ pub enum ResourceCertificateProfileError {
#[error("autonomousSysIds criticality must be critical when present (RFC 6487 §4.8.11)")] #[error("autonomousSysIds criticality must be critical when present (RFC 6487 §4.8.11)")]
AsResourcesCriticality, AsResourcesCriticality,
#[error(
"authorityKeyIdentifier must be present in non-self-signed certificates (RFC 6487 §4.8.3; RFC 5280 §4.2.1.1)"
)]
AkiMissing,
#[error(
"authorityKeyIdentifier criticality must be non-critical (RFC 6487 §4.8.3; RFC 5280 §4.2.1.1)"
)]
AkiCriticality,
#[error(
"authorityKeyIdentifier authorityCertIssuer MUST NOT be present (RFC 6487 §4.8.3; RFC 5280 §4.2.1.1)"
)]
AkiAuthorityCertIssuerPresent,
#[error(
"authorityKeyIdentifier authorityCertSerialNumber MUST NOT be present (RFC 6487 §4.8.3; RFC 5280 §4.2.1.1)"
)]
AkiAuthorityCertSerialPresent,
#[error(
"self-signed certificate authorityKeyIdentifier must equal subjectKeyIdentifier when present (RFC 6487 §4.8.3)"
)]
AkiSelfSignedNotEqualSki,
#[error(
"CRLDistributionPoints must be present in non-self-signed certificates (RFC 6487 §4.8.6; RFC 5280 §4.2.1.13)"
)]
CrlDistributionPointsMissing,
#[error(
"CRLDistributionPoints criticality must be non-critical (RFC 6487 §4.8.6; RFC 5280 §4.2.1.13)"
)]
CrlDistributionPointsCriticality,
#[error(
"CRLDistributionPoints MUST be omitted in self-signed certificates (RFC 6487 §4.8.6)"
)]
CrlDistributionPointsSelfSignedMustOmit,
#[error(
"CRLDistributionPoints must contain exactly one DistributionPoint (RFC 6487 §4.8.6)"
)]
CrlDistributionPointsNotSingle,
#[error(
"CRLDistributionPoints distributionPoint field MUST be present (RFC 6487 §4.8.6)"
)]
CrlDistributionPointsNoDistributionPoint,
#[error("CRLDistributionPoints reasons field MUST be omitted (RFC 6487 §4.8.6)")]
CrlDistributionPointsHasReasons,
#[error("CRLDistributionPoints cRLIssuer field MUST be omitted (RFC 6487 §4.8.6)")]
CrlDistributionPointsHasCrlIssuer,
#[error(
"CRLDistributionPoints distributionPoint MUST contain fullName and MUST NOT contain nameRelativeToCRLIssuer (RFC 6487 §4.8.6)"
)]
CrlDistributionPointsInvalidName,
#[error(
"CRLDistributionPoints fullName must contain only URI GeneralNames (RFC 6487 §4.8.6; RFC 5280 §4.2.1.6)"
)]
CrlDistributionPointsFullNameNotUri,
#[error("CRLDistributionPoints must include at least one rsync:// URI (RFC 6487 §4.8.6)")]
CrlDistributionPointsNoRsync,
#[error(
"authorityInfoAccess must be present in non-self-signed certificates (RFC 6487 §4.8.7; RFC 5280 §4.2.2.1)"
)]
AuthorityInfoAccessMissing,
#[error(
"authorityInfoAccess criticality must be non-critical (RFC 6487 §4.8.7; RFC 5280 §4.2.2.1)"
)]
AuthorityInfoAccessCriticality,
#[error(
"authorityInfoAccess MUST be omitted in self-signed certificates (RFC 6487 §4.8.7)"
)]
AuthorityInfoAccessSelfSignedMustOmit,
#[error(
"authorityInfoAccess id-ad-caIssuers accessLocation must be URI (RFC 6487 §4.8.7; RFC 5280 §4.2.2.1)"
)]
AuthorityInfoAccessCaIssuersNotUri,
#[error(
"authorityInfoAccess must include at least one id-ad-caIssuers URI (RFC 6487 §4.8.7)"
)]
AuthorityInfoAccessMissingCaIssuers,
#[error("authorityInfoAccess must include at least one rsync:// URI (RFC 6487 §4.8.7)")]
AuthorityInfoAccessNoRsync,
} }
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
@ -464,7 +601,8 @@ impl ResourceCertificateParsed {
return Err(ResourceCertificateProfileError::InvalidSignatureAlgorithmParameters); return Err(ResourceCertificateProfileError::InvalidSignatureAlgorithmParameters);
} }
let extensions = self.extensions.validate_profile()?; let is_self_signed = self.issuer_dn == self.subject_dn;
let extensions = self.extensions.validate_profile(is_self_signed)?;
let kind = if extensions.basic_constraints_ca { let kind = if extensions.basic_constraints_ca {
ResourceCertKind::Ca ResourceCertKind::Ca
} else { } else {
@ -490,7 +628,10 @@ impl ResourceCertificateParsed {
} }
impl RcExtensionsParsed { impl RcExtensionsParsed {
pub fn validate_profile(self) -> Result<RcExtensions, ResourceCertificateProfileError> { pub fn validate_profile(
self,
is_self_signed: bool,
) -> Result<RcExtensions, ResourceCertificateProfileError> {
if self.basic_constraints_ca.len() > 1 { if self.basic_constraints_ca.len() > 1 {
return Err(ResourceCertificateProfileError::DuplicateExtension( return Err(ResourceCertificateProfileError::DuplicateExtension(
"basicConstraints", "basicConstraints",
@ -513,6 +654,123 @@ impl RcExtensionsParsed {
} }
}; };
let authority_key_identifier = match self.authority_key_identifier.as_slice() {
[] => {
if is_self_signed {
None
} else {
return Err(ResourceCertificateProfileError::AkiMissing);
}
}
[(aki, critical)] => {
if *critical {
return Err(ResourceCertificateProfileError::AkiCriticality);
}
if aki.has_authority_cert_issuer {
return Err(ResourceCertificateProfileError::AkiAuthorityCertIssuerPresent);
}
if aki.has_authority_cert_serial {
return Err(ResourceCertificateProfileError::AkiAuthorityCertSerialPresent);
}
let keyid = aki.key_identifier.clone();
if is_self_signed {
if let (Some(keyid), Some(ski)) = (keyid.as_ref(), subject_key_identifier.as_ref())
{
if keyid != ski {
return Err(ResourceCertificateProfileError::AkiSelfSignedNotEqualSki);
}
}
} else if keyid.is_none() {
return Err(ResourceCertificateProfileError::AkiMissing);
}
keyid
}
_ => {
return Err(ResourceCertificateProfileError::DuplicateExtension(
"authorityKeyIdentifier",
));
}
};
let crl_distribution_points_uris = match self.crl_distribution_points.as_slice() {
[] => {
if is_self_signed {
None
} else {
return Err(ResourceCertificateProfileError::CrlDistributionPointsMissing);
}
}
[(crldp, critical)] => {
if *critical {
return Err(ResourceCertificateProfileError::CrlDistributionPointsCriticality);
}
if is_self_signed {
return Err(ResourceCertificateProfileError::CrlDistributionPointsSelfSignedMustOmit);
}
if crldp.distribution_points.len() != 1 {
return Err(ResourceCertificateProfileError::CrlDistributionPointsNotSingle);
}
let dp = &crldp.distribution_points[0];
if dp.reasons_present {
return Err(ResourceCertificateProfileError::CrlDistributionPointsHasReasons);
}
if dp.crl_issuer_present {
return Err(ResourceCertificateProfileError::CrlDistributionPointsHasCrlIssuer);
}
if !dp.distribution_point_present {
return Err(ResourceCertificateProfileError::CrlDistributionPointsNoDistributionPoint);
}
if dp.name_relative_to_crl_issuer_present || !dp.full_name_present {
return Err(ResourceCertificateProfileError::CrlDistributionPointsInvalidName);
}
if dp.full_name_not_uri {
return Err(ResourceCertificateProfileError::CrlDistributionPointsFullNameNotUri);
}
if !dp.full_name_uris.iter().any(|u| u.scheme() == "rsync") {
return Err(ResourceCertificateProfileError::CrlDistributionPointsNoRsync);
}
Some(dp.full_name_uris.clone())
}
_ => {
return Err(ResourceCertificateProfileError::DuplicateExtension(
"cRLDistributionPoints",
));
}
};
let ca_issuers_uris = match self.authority_info_access.as_slice() {
[] => {
if is_self_signed {
None
} else {
return Err(ResourceCertificateProfileError::AuthorityInfoAccessMissing);
}
}
[(aia, critical)] => {
if *critical {
return Err(ResourceCertificateProfileError::AuthorityInfoAccessCriticality);
}
if is_self_signed {
return Err(ResourceCertificateProfileError::AuthorityInfoAccessSelfSignedMustOmit);
}
if aia.ca_issuers_access_location_not_uri {
return Err(ResourceCertificateProfileError::AuthorityInfoAccessCaIssuersNotUri);
}
if aia.ca_issuers_uris.is_empty() {
return Err(ResourceCertificateProfileError::AuthorityInfoAccessMissingCaIssuers);
}
if !aia.ca_issuers_uris.iter().any(|u| u.scheme() == "rsync") {
return Err(ResourceCertificateProfileError::AuthorityInfoAccessNoRsync);
}
Some(aia.ca_issuers_uris.clone())
}
_ => {
return Err(ResourceCertificateProfileError::DuplicateExtension(
"authorityInfoAccess",
));
}
};
let subject_info_access = match self.subject_info_access.as_slice() { let subject_info_access = match self.subject_info_access.as_slice() {
[] => None, [] => None,
[(sia, critical)] => { [(sia, critical)] => {
@ -604,6 +862,9 @@ impl RcExtensionsParsed {
Ok(RcExtensions { Ok(RcExtensions {
basic_constraints_ca, basic_constraints_ca,
subject_key_identifier, subject_key_identifier,
authority_key_identifier,
crl_distribution_points_uris,
ca_issuers_uris,
subject_info_access, subject_info_access,
certificate_policies_oid, certificate_policies_oid,
ip_resources, ip_resources,
@ -631,6 +892,9 @@ fn parse_extensions_parse(
) -> Result<RcExtensionsParsed, ResourceCertificateParseError> { ) -> Result<RcExtensionsParsed, ResourceCertificateParseError> {
let mut basic_constraints_ca: Vec<bool> = Vec::new(); let mut basic_constraints_ca: Vec<bool> = Vec::new();
let mut ski: Vec<(Vec<u8>, bool)> = Vec::new(); let mut ski: Vec<(Vec<u8>, bool)> = Vec::new();
let mut aki: Vec<(AuthorityKeyIdentifierParsed, bool)> = Vec::new();
let mut crldp: Vec<(CrlDistributionPointsParsed, bool)> = Vec::new();
let mut aia: Vec<(AuthorityInfoAccessParsed, bool)> = Vec::new();
let mut sia: Vec<(SubjectInfoAccessParsed, bool)> = Vec::new(); let mut sia: Vec<(SubjectInfoAccessParsed, bool)> = Vec::new();
let mut cert_policies: Vec<(Vec<String>, bool)> = Vec::new(); let mut cert_policies: Vec<(Vec<String>, bool)> = Vec::new();
@ -656,6 +920,37 @@ fn parse_extensions_parse(
}; };
ski.push((s.0.to_vec(), ext.critical)); ski.push((s.0.to_vec(), ext.critical));
} }
OID_AUTHORITY_KEY_IDENTIFIER => {
let ParsedExtension::AuthorityKeyIdentifier(a) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"authorityKeyIdentifier parse failed".into(),
));
};
aki.push((
AuthorityKeyIdentifierParsed {
key_identifier: a.key_identifier.as_ref().map(|k| k.0.to_vec()),
has_authority_cert_issuer: a.authority_cert_issuer.is_some(),
has_authority_cert_serial: a.authority_cert_serial.is_some(),
},
ext.critical,
));
}
OID_CRL_DISTRIBUTION_POINTS => {
let ParsedExtension::CRLDistributionPoints(p) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"cRLDistributionPoints parse failed".into(),
));
};
crldp.push((parse_crldp_parse(p)?, ext.critical));
}
OID_AUTHORITY_INFO_ACCESS => {
let ParsedExtension::AuthorityInfoAccess(p) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse(
"authorityInfoAccess parse failed".into(),
));
};
aia.push((parse_aia_parse(p.accessdescs.as_slice())?, ext.critical));
}
OID_SUBJECT_INFO_ACCESS => { OID_SUBJECT_INFO_ACCESS => {
let ParsedExtension::SubjectInfoAccess(s) = ext.parsed_extension() else { let ParsedExtension::SubjectInfoAccess(s) = ext.parsed_extension() else {
return Err(ResourceCertificateParseError::Parse( return Err(ResourceCertificateParseError::Parse(
@ -690,6 +985,9 @@ fn parse_extensions_parse(
Ok(RcExtensionsParsed { Ok(RcExtensionsParsed {
basic_constraints_ca, basic_constraints_ca,
subject_key_identifier: ski, subject_key_identifier: ski,
authority_key_identifier: aki,
crl_distribution_points: crldp,
authority_info_access: aia,
subject_info_access: sia, subject_info_access: sia,
certificate_policies: cert_policies, certificate_policies: cert_policies,
ip_resources, ip_resources,
@ -697,6 +995,88 @@ fn parse_extensions_parse(
}) })
} }
fn parse_aia_parse(
access: &[x509_parser::extensions::AccessDescription<'_>],
) -> Result<AuthorityInfoAccessParsed, ResourceCertificateParseError> {
let mut ca_issuers_uris: Vec<Url> = Vec::new();
let mut ca_issuers_access_location_not_uri = false;
for ad in access {
let access_method_oid = ad.access_method.to_id_string();
if access_method_oid != OID_AD_CA_ISSUERS {
continue;
}
let uri = match &ad.access_location {
x509_parser::extensions::GeneralName::URI(u) => u,
_ => {
ca_issuers_access_location_not_uri = true;
continue;
}
};
let url = Url::parse(uri)
.map_err(|_| ResourceCertificateParseError::Parse(format!("invalid URI: {uri}")))?;
ca_issuers_uris.push(url);
}
Ok(AuthorityInfoAccessParsed {
ca_issuers_uris,
ca_issuers_access_location_not_uri,
})
}
fn parse_crldp_parse(
crldp: &x509_parser::extensions::CRLDistributionPoints<'_>,
) -> Result<CrlDistributionPointsParsed, ResourceCertificateParseError> {
let mut out: Vec<CrlDistributionPointParsed> = Vec::new();
for p in crldp.iter() {
let mut full_name_uris: Vec<Url> = Vec::new();
let mut full_name_not_uri = false;
let mut full_name_present = false;
let mut name_relative_to_crl_issuer_present = false;
let mut distribution_point_present = false;
if let Some(dp) = &p.distribution_point {
distribution_point_present = true;
match dp {
x509_parser::extensions::DistributionPointName::FullName(names) => {
full_name_present = true;
for n in names {
match n {
x509_parser::extensions::GeneralName::URI(u) => {
let url = Url::parse(u).map_err(|_| {
ResourceCertificateParseError::Parse(format!(
"invalid URI: {u}"
))
})?;
full_name_uris.push(url);
}
_ => {
full_name_not_uri = true;
}
}
}
}
x509_parser::extensions::DistributionPointName::NameRelativeToCRLIssuer(_) => {
name_relative_to_crl_issuer_present = true;
}
}
}
out.push(CrlDistributionPointParsed {
distribution_point_present,
reasons_present: p.reasons.is_some(),
crl_issuer_present: p.crl_issuer.is_some(),
name_relative_to_crl_issuer_present,
full_name_uris,
full_name_not_uri,
full_name_present,
});
}
Ok(CrlDistributionPointsParsed {
distribution_points: out,
})
}
fn parse_sia_parse( fn parse_sia_parse(
access: &[x509_parser::extensions::AccessDescription<'_>], access: &[x509_parser::extensions::AccessDescription<'_>],
) -> Result<SubjectInfoAccessParsed, ResourceCertificateParseError> { ) -> Result<SubjectInfoAccessParsed, ResourceCertificateParseError> {

65
src/fetch/http.rs Normal file
View File

@ -0,0 +1,65 @@
use std::time::Duration;
use reqwest::blocking::Client;
use crate::sync::rrdp::Fetcher;
#[derive(Clone, Debug)]
pub struct HttpFetcherConfig {
pub timeout: Duration,
pub user_agent: String,
}
impl Default for HttpFetcherConfig {
fn default() -> Self {
Self {
timeout: Duration::from_secs(20),
user_agent: "rpki-dev/0.1 (stage2)".to_string(),
}
}
}
/// Minimal blocking HTTP(S) fetcher for stage2.
///
/// This is used for:
/// - downloading TAL / TA certificates (RFC 8630 §2)
/// - fetching RRDP notification/snapshot files (RFC 8182 §3.4)
#[derive(Clone, Debug)]
pub struct BlockingHttpFetcher {
client: Client,
}
impl BlockingHttpFetcher {
pub fn new(config: HttpFetcherConfig) -> Result<Self, String> {
let client = Client::builder()
.timeout(config.timeout)
.user_agent(config.user_agent)
.build()
.map_err(|e| e.to_string())?;
Ok(Self { client })
}
pub fn fetch_bytes(&self, uri: &str) -> Result<Vec<u8>, String> {
let resp = self
.client
.get(uri)
.send()
.map_err(|e| format!("http request failed: {e}"))?;
let status = resp.status();
if !status.is_success() {
return Err(format!("http status {status}"));
}
resp.bytes()
.map(|b| b.to_vec())
.map_err(|e| format!("http read body failed: {e}"))
}
}
impl Fetcher for BlockingHttpFetcher {
fn fetch(&self, uri: &str) -> Result<Vec<u8>, String> {
self.fetch_bytes(uri)
}
}

3
src/fetch/mod.rs Normal file
View File

@ -0,0 +1,3 @@
pub mod http;
pub mod rsync;
pub mod rsync_system;

82
src/fetch/rsync.rs Normal file
View File

@ -0,0 +1,82 @@
use std::path::{Path, PathBuf};
#[derive(Debug, thiserror::Error)]
pub enum RsyncFetchError {
#[error("rsync fetch error: {0}")]
Fetch(String),
}
pub type RsyncFetchResult<T> = Result<T, RsyncFetchError>;
/// Fetch repository objects from a publication point.
///
/// v1: this is intentionally abstract so unit tests can use a mock, and later we can
/// back it by calling the system `rsync` binary (RFC 6481 §5; RFC 8182 §3.4.5).
pub trait RsyncFetcher {
/// Return a list of objects as `(rsync_uri, bytes)` pairs.
fn fetch_objects(&self, rsync_base_uri: &str) -> RsyncFetchResult<Vec<(String, Vec<u8>)>>;
}
/// A simple "rsync" implementation backed by a local directory.
///
/// This is primarily meant for offline tests and fixtures. The key generation mimics rsync URIs:
/// `rsync_base_uri` + relative path (with `/` separators).
pub struct LocalDirRsyncFetcher {
pub root_dir: PathBuf,
}
impl LocalDirRsyncFetcher {
pub fn new(root_dir: impl Into<PathBuf>) -> Self {
Self {
root_dir: root_dir.into(),
}
}
}
impl RsyncFetcher for LocalDirRsyncFetcher {
fn fetch_objects(&self, rsync_base_uri: &str) -> RsyncFetchResult<Vec<(String, Vec<u8>)>> {
let base = normalize_rsync_base_uri(rsync_base_uri);
let mut out = Vec::new();
walk_dir_collect(&self.root_dir, &self.root_dir, &base, &mut out)
.map_err(|e| RsyncFetchError::Fetch(e))?;
Ok(out)
}
}
fn walk_dir_collect(
root: &Path,
current: &Path,
rsync_base_uri: &str,
out: &mut Vec<(String, Vec<u8>)>,
) -> Result<(), String> {
let rd = std::fs::read_dir(current).map_err(|e| e.to_string())?;
for entry in rd {
let entry = entry.map_err(|e| e.to_string())?;
let path = entry.path();
let meta = entry.metadata().map_err(|e| e.to_string())?;
if meta.is_dir() {
walk_dir_collect(root, &path, rsync_base_uri, out)?;
continue;
}
if !meta.is_file() {
continue;
}
let rel = path
.strip_prefix(root)
.map_err(|e| e.to_string())?
.to_string_lossy()
.replace('\\', "/");
let uri = format!("{rsync_base_uri}{rel}");
let bytes = std::fs::read(&path).map_err(|e| e.to_string())?;
out.push((uri, bytes));
}
Ok(())
}
fn normalize_rsync_base_uri(s: &str) -> String {
if s.ends_with('/') {
s.to_string()
} else {
format!("{s}/")
}
}

142
src/fetch/rsync_system.rs Normal file
View File

@ -0,0 +1,142 @@
use std::path::{Path, PathBuf};
use std::process::Command;
use std::time::Duration;
use uuid::Uuid;
use crate::fetch::rsync::{RsyncFetchError, RsyncFetchResult, RsyncFetcher};
#[derive(Clone, Debug)]
pub struct SystemRsyncConfig {
pub rsync_bin: PathBuf,
pub timeout: Duration,
pub extra_args: Vec<String>,
}
impl Default for SystemRsyncConfig {
fn default() -> Self {
Self {
rsync_bin: PathBuf::from("rsync"),
timeout: Duration::from_secs(60),
extra_args: Vec::new(),
}
}
}
/// A `RsyncFetcher` implementation backed by the system `rsync` binary.
///
/// This is intended for live stage2 runs. For unit tests and offline fixtures,
/// prefer `LocalDirRsyncFetcher`.
#[derive(Clone, Debug)]
pub struct SystemRsyncFetcher {
config: SystemRsyncConfig,
}
impl SystemRsyncFetcher {
pub fn new(config: SystemRsyncConfig) -> Self {
Self { config }
}
fn run_rsync(&self, src: &str, dst: &Path) -> Result<(), String> {
// `--timeout` is I/O timeout in seconds (applies to network reads/writes).
let timeout_secs = self.config.timeout.as_secs().max(1).to_string();
let mut cmd = Command::new(&self.config.rsync_bin);
cmd.arg("-rt")
.arg("--delete")
.arg("--timeout")
.arg(timeout_secs)
.args(&self.config.extra_args)
.arg(src)
.arg(dst);
let out = cmd.output().map_err(|e| format!("rsync spawn failed: {e}"))?;
if !out.status.success() {
let stderr = String::from_utf8_lossy(&out.stderr);
let stdout = String::from_utf8_lossy(&out.stdout);
return Err(format!(
"rsync failed: status={} stdout={} stderr={}",
out.status,
stdout.trim(),
stderr.trim()
));
}
Ok(())
}
}
impl RsyncFetcher for SystemRsyncFetcher {
fn fetch_objects(&self, rsync_base_uri: &str) -> RsyncFetchResult<Vec<(String, Vec<u8>)>> {
let base = normalize_rsync_base_uri(rsync_base_uri);
let tmp = TempDir::new().map_err(|e| RsyncFetchError::Fetch(e.to_string()))?;
self.run_rsync(&base, tmp.path())
.map_err(RsyncFetchError::Fetch)?;
let mut out = Vec::new();
walk_dir_collect(tmp.path(), tmp.path(), &base, &mut out)
.map_err(RsyncFetchError::Fetch)?;
Ok(out)
}
}
struct TempDir {
path: PathBuf,
}
impl TempDir {
fn new() -> Result<Self, String> {
let mut p = std::env::temp_dir();
p.push(format!("rpki-system-rsync-{}", Uuid::new_v4()));
std::fs::create_dir_all(&p).map_err(|e| e.to_string())?;
Ok(Self { path: p })
}
fn path(&self) -> &Path {
&self.path
}
}
impl Drop for TempDir {
fn drop(&mut self) {
let _ = std::fs::remove_dir_all(&self.path);
}
}
fn normalize_rsync_base_uri(s: &str) -> String {
if s.ends_with('/') {
s.to_string()
} else {
format!("{s}/")
}
}
fn walk_dir_collect(
root: &Path,
current: &Path,
rsync_base_uri: &str,
out: &mut Vec<(String, Vec<u8>)>,
) -> Result<(), String> {
let rd = std::fs::read_dir(current).map_err(|e| e.to_string())?;
for entry in rd {
let entry = entry.map_err(|e| e.to_string())?;
let path = entry.path();
let meta = entry.metadata().map_err(|e| e.to_string())?;
if meta.is_dir() {
walk_dir_collect(root, &path, rsync_base_uri, out)?;
continue;
}
if !meta.is_file() {
continue;
}
let rel = path
.strip_prefix(root)
.map_err(|e| e.to_string())?
.to_string_lossy()
.replace('\\', "/");
let uri = format!("{rsync_base_uri}{rel}");
let bytes = std::fs::read(&path).map_err(|e| e.to_string())?;
out.push((uri, bytes));
}
Ok(())
}

View File

@ -1 +1,9 @@
pub mod audit;
pub mod cli;
pub mod data_model; pub mod data_model;
pub mod fetch;
pub mod policy;
pub mod report;
pub mod storage;
pub mod sync;
pub mod validation;

70
src/policy.rs Normal file
View File

@ -0,0 +1,70 @@
use serde::{Deserialize, Serialize};
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum SyncPreference {
RrdpThenRsync,
RsyncOnly,
}
impl Default for SyncPreference {
fn default() -> Self {
Self::RrdpThenRsync
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum CaFailedFetchPolicy {
UseVerifiedCache,
StopAllOutput,
}
impl Default for CaFailedFetchPolicy {
fn default() -> Self {
Self::UseVerifiedCache
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum SignedObjectFailurePolicy {
DropObject,
DropPublicationPoint,
}
impl Default for SignedObjectFailurePolicy {
fn default() -> Self {
Self::DropObject
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(default)]
pub struct Policy {
pub sync_preference: SyncPreference,
pub ca_failed_fetch_policy: CaFailedFetchPolicy,
pub signed_object_failure_policy: SignedObjectFailurePolicy,
}
impl Default for Policy {
fn default() -> Self {
Self {
sync_preference: SyncPreference::default(),
ca_failed_fetch_policy: CaFailedFetchPolicy::default(),
signed_object_failure_policy: SignedObjectFailurePolicy::default(),
}
}
}
#[derive(Debug, thiserror::Error)]
pub enum PolicyParseError {
#[error("policy TOML parse error: {0}")]
Toml(String),
}
impl Policy {
pub fn from_toml_str(s: &str) -> Result<Self, PolicyParseError> {
toml::from_str(s).map_err(|e| PolicyParseError::Toml(e.to_string()))
}
}

29
src/report.rs Normal file
View File

@ -0,0 +1,29 @@
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct RfcRef(pub &'static str);
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Warning {
pub message: String,
pub rfc_refs: Vec<RfcRef>,
pub context: Option<String>,
}
impl Warning {
pub fn new(message: impl Into<String>) -> Self {
Self {
message: message.into(),
rfc_refs: Vec::new(),
context: None,
}
}
pub fn with_rfc_refs(mut self, refs: &[RfcRef]) -> Self {
self.rfc_refs.extend_from_slice(refs);
self
}
pub fn with_context(mut self, context: impl Into<String>) -> Self {
self.context = Some(context.into());
self
}
}

396
src/storage.rs Normal file
View File

@ -0,0 +1,396 @@
use std::path::Path;
use rocksdb::{
ColumnFamily, ColumnFamilyDescriptor, DB, DBCompressionType, Direction, IteratorMode, Options,
WriteBatch,
};
use serde::{Deserialize, Serialize};
use sha2::Digest;
use std::collections::HashSet;
const CF_RAW_OBJECTS: &str = "raw_objects";
const CF_VERIFIED_PUBLICATION_POINTS: &str = "verified_publication_points";
const CF_RRDP_STATE: &str = "rrdp_state";
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct VerifiedKey(String);
impl VerifiedKey {
pub fn from_manifest_rsync_uri(manifest_rsync_uri: &str) -> Self {
Self(format!("verified:{manifest_rsync_uri}"))
}
pub fn as_str(&self) -> &str {
&self.0
}
}
#[derive(Debug, thiserror::Error)]
pub enum StorageError {
#[error("rocksdb error: {0}")]
RocksDb(String),
#[error("missing column family: {0}")]
MissingColumnFamily(&'static str),
#[error("verified publication point pack error: {0}")]
Pack(#[from] PackDecodeError),
}
pub type StorageResult<T> = Result<T, StorageError>;
pub struct RocksStore {
db: DB,
}
pub mod pack {
pub use super::{PackDecodeError, PackFile, PackTime, VerifiedPublicationPointPack};
}
impl RocksStore {
pub fn open(path: &Path) -> StorageResult<Self> {
let mut base_opts = Options::default();
base_opts.create_if_missing(true);
base_opts.create_missing_column_families(true);
// Prefer conservative compression; may be overridden later.
base_opts.set_compression_type(DBCompressionType::Lz4);
// Best-effort BlobDB enablement (ignored if bindings don't support it).
enable_blobdb_if_supported(&mut base_opts);
let cfs = vec![
ColumnFamilyDescriptor::new(CF_RAW_OBJECTS, Options::default()),
ColumnFamilyDescriptor::new(CF_VERIFIED_PUBLICATION_POINTS, Options::default()),
ColumnFamilyDescriptor::new(CF_RRDP_STATE, Options::default()),
];
let db = DB::open_cf_descriptors(&base_opts, path, cfs)
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(Self { db })
}
fn cf(&self, name: &'static str) -> StorageResult<&ColumnFamily> {
self.db
.cf_handle(name)
.ok_or(StorageError::MissingColumnFamily(name))
}
pub fn put_raw(&self, rsync_uri: &str, bytes: &[u8]) -> StorageResult<()> {
let cf = self.cf(CF_RAW_OBJECTS)?;
self.db
.put_cf(cf, rsync_uri.as_bytes(), bytes)
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(())
}
pub fn get_raw(&self, rsync_uri: &str) -> StorageResult<Option<Vec<u8>>> {
let cf = self.cf(CF_RAW_OBJECTS)?;
let v = self
.db
.get_cf(cf, rsync_uri.as_bytes())
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(v)
}
pub fn delete_raw(&self, rsync_uri: &str) -> StorageResult<()> {
let cf = self.cf(CF_RAW_OBJECTS)?;
self.db
.delete_cf(cf, rsync_uri.as_bytes())
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(())
}
pub fn put_verified(&self, key: &VerifiedKey, bytes: &[u8]) -> StorageResult<()> {
let cf = self.cf(CF_VERIFIED_PUBLICATION_POINTS)?;
self.db
.put_cf(cf, key.as_str().as_bytes(), bytes)
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(())
}
pub fn get_verified(&self, key: &VerifiedKey) -> StorageResult<Option<Vec<u8>>> {
let cf = self.cf(CF_VERIFIED_PUBLICATION_POINTS)?;
let v = self
.db
.get_cf(cf, key.as_str().as_bytes())
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(v)
}
pub fn put_rrdp_state(&self, notification_uri: &str, bytes: &[u8]) -> StorageResult<()> {
let cf = self.cf(CF_RRDP_STATE)?;
self.db
.put_cf(cf, notification_uri.as_bytes(), bytes)
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(())
}
pub fn get_rrdp_state(&self, notification_uri: &str) -> StorageResult<Option<Vec<u8>>> {
let cf = self.cf(CF_RRDP_STATE)?;
let v = self
.db
.get_cf(cf, notification_uri.as_bytes())
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(v)
}
#[allow(dead_code)]
pub fn delete_rrdp_state(&self, notification_uri: &str) -> StorageResult<()> {
let cf = self.cf(CF_RRDP_STATE)?;
self.db
.delete_cf(cf, notification_uri.as_bytes())
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(())
}
#[allow(dead_code)]
pub fn raw_iter_prefix<'a>(
&'a self,
prefix: &'a [u8],
) -> StorageResult<impl Iterator<Item = (Box<[u8]>, Box<[u8]>)> + 'a> {
let cf = self.cf(CF_RAW_OBJECTS)?;
let mode = IteratorMode::From(prefix, Direction::Forward);
Ok(self
.db
.iterator_cf(cf, mode)
.take_while(move |res| match res {
Ok((k, _v)) => k.starts_with(prefix),
Err(_) => false,
})
.filter_map(|res| res.ok()))
}
#[allow(dead_code)]
pub fn raw_iter_all<'a>(
&'a self,
) -> StorageResult<impl Iterator<Item = (Box<[u8]>, Box<[u8]>)> + 'a> {
let cf = self.cf(CF_RAW_OBJECTS)?;
let mode = IteratorMode::Start;
Ok(self
.db
.iterator_cf(cf, mode)
.filter_map(|res| res.ok()))
}
#[allow(dead_code)]
pub fn verified_iter_all<'a>(
&'a self,
) -> StorageResult<impl Iterator<Item = (Box<[u8]>, Box<[u8]>)> + 'a> {
let cf = self.cf(CF_VERIFIED_PUBLICATION_POINTS)?;
let mode = IteratorMode::Start;
Ok(self
.db
.iterator_cf(cf, mode)
.filter_map(|res| res.ok()))
}
#[allow(dead_code)]
pub fn write_batch(&self, batch: WriteBatch) -> StorageResult<()> {
self.db
.write(batch)
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(())
}
}
fn enable_blobdb_if_supported(opts: &mut Options) {
// Rust bindings may or may not expose BlobDB options depending on RocksDB build.
// We enable them in a best-effort way using method detection via trait bounds is
// not possible in stable Rust; so we keep this minimal.
//
// If the crate exposes `set_enable_blob_files`, use it; otherwise do nothing.
#[allow(unused_mut)]
let mut _enabled = false;
#[allow(dead_code)]
fn _set(opts: &mut Options) {
// If this method exists, this compiles and enables BlobDB.
opts.set_enable_blob_files(true);
opts.set_min_blob_size(1024);
}
// Call the helper in a way that fails to compile if methods don't exist.
// To keep compilation stable, this file is expected to compile with a rocksdb
// crate version that exposes these setters. If not, remove the call and use
// plain RocksDB (still compatible with later enabling).
_set(opts);
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct VerifiedPublicationPointPack {
pub format_version: u32,
pub manifest_rsync_uri: String,
pub publication_point_rsync_uri: String,
pub this_update: PackTime,
pub next_update: PackTime,
pub verified_at: PackTime,
pub manifest_bytes: Vec<u8>,
/// Objects listed in the Manifest fileList (RFC 9286 §4.2.1).
///
/// Note: the manifest object itself is *not* listed in fileList; it is stored separately
/// in `manifest_bytes`.
pub files: Vec<PackFile>,
}
impl VerifiedPublicationPointPack {
pub const FORMAT_VERSION_V1: u32 = 1;
pub fn encode(&self) -> StorageResult<Vec<u8>> {
serde_cbor::to_vec(self).map_err(|e| PackDecodeError::Encode(e.to_string()).into())
}
pub fn decode(bytes: &[u8]) -> StorageResult<Self> {
let pack: Self =
serde_cbor::from_slice(bytes).map_err(|e| PackDecodeError::Decode(e.to_string()))?;
pack.validate_internal()?;
Ok(pack)
}
pub fn validate_internal(&self) -> StorageResult<()> {
if self.format_version != Self::FORMAT_VERSION_V1 {
return Err(PackDecodeError::UnsupportedFormatVersion(self.format_version).into());
}
if self.manifest_rsync_uri.is_empty() {
return Err(PackDecodeError::MissingField("manifest_rsync_uri").into());
}
if self.publication_point_rsync_uri.is_empty() {
return Err(PackDecodeError::MissingField("publication_point_rsync_uri").into());
}
self.this_update
.parse()
.map_err(|e| PackDecodeError::InvalidTimeField {
field: "this_update",
detail: e,
})?;
self.next_update
.parse()
.map_err(|e| PackDecodeError::InvalidTimeField {
field: "next_update",
detail: e,
})?;
self.verified_at
.parse()
.map_err(|e| PackDecodeError::InvalidTimeField {
field: "verified_at",
detail: e,
})?;
if self.manifest_bytes.is_empty() {
return Err(PackDecodeError::MissingManifestBytes.into());
}
let mut seen: HashSet<&str> = HashSet::with_capacity(self.files.len());
for file in &self.files {
if file.rsync_uri.is_empty() {
return Err(PackDecodeError::MissingField("files[].rsync_uri").into());
}
if !seen.insert(file.rsync_uri.as_str()) {
return Err(PackDecodeError::DuplicateFileRsyncUri(file.rsync_uri.clone()).into());
}
if file.bytes.is_empty() {
return Err(PackDecodeError::EmptyFileBytes(file.rsync_uri.clone()).into());
}
let computed = file.compute_sha256();
if computed != file.sha256 {
return Err(PackDecodeError::FileHashMismatch {
rsync_uri: file.rsync_uri.clone(),
}
.into());
}
}
Ok(())
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct PackFile {
pub rsync_uri: String,
pub bytes: Vec<u8>,
pub sha256: [u8; 32],
}
impl PackFile {
pub fn new(rsync_uri: impl Into<String>, bytes: Vec<u8>, sha256: [u8; 32]) -> Self {
Self {
rsync_uri: rsync_uri.into(),
bytes,
sha256,
}
}
pub fn from_bytes_compute_sha256(rsync_uri: impl Into<String>, bytes: Vec<u8>) -> Self {
let sha256 = compute_sha256_32(&bytes);
Self::new(rsync_uri, bytes, sha256)
}
pub fn compute_sha256(&self) -> [u8; 32] {
compute_sha256_32(&self.bytes)
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct PackTime {
/// RFC 3339 timestamp in UTC, e.g. "2026-02-06T00:00:00Z".
pub rfc3339_utc: String,
}
impl PackTime {
pub fn from_utc_offset_datetime(t: time::OffsetDateTime) -> Self {
use time::format_description::well_known::Rfc3339;
let utc = t.to_offset(time::UtcOffset::UTC);
let s = utc.format(&Rfc3339).expect("format RFC 3339 UTC time");
Self { rfc3339_utc: s }
}
pub fn parse(&self) -> Result<time::OffsetDateTime, String> {
use time::format_description::well_known::Rfc3339;
time::OffsetDateTime::parse(&self.rfc3339_utc, &Rfc3339).map_err(|e| e.to_string())
}
}
#[derive(Debug, thiserror::Error)]
pub enum PackDecodeError {
#[error("encode verified publication point pack failed: {0}")]
Encode(String),
#[error("decode verified publication point pack failed: {0}")]
Decode(String),
#[error("unsupported pack format_version: {0}")]
UnsupportedFormatVersion(u32),
#[error("missing required field: {0}")]
MissingField(&'static str),
#[error("missing manifest_bytes in verified pack")]
MissingManifestBytes,
#[error("duplicate file rsync uri in verified pack: {0}")]
DuplicateFileRsyncUri(String),
#[error("empty file bytes in verified pack: {0}")]
EmptyFileBytes(String),
#[error("file hash mismatch in verified pack: {rsync_uri}")]
FileHashMismatch { rsync_uri: String },
#[error("invalid time field {field}: {detail}")]
InvalidTimeField { field: &'static str, detail: String },
}
fn compute_sha256_32(bytes: &[u8]) -> [u8; 32] {
let digest = sha2::Sha256::digest(bytes);
let mut out = [0u8; 32];
out.copy_from_slice(&digest);
out
}

2
src/sync/mod.rs Normal file
View File

@ -0,0 +1,2 @@
pub mod repo;
pub mod rrdp;

128
src/sync/repo.rs Normal file
View File

@ -0,0 +1,128 @@
use crate::fetch::rsync::{RsyncFetchError, RsyncFetcher};
use crate::policy::{Policy, SyncPreference};
use crate::report::{RfcRef, Warning};
use crate::storage::RocksStore;
use crate::sync::rrdp::{
Fetcher as HttpFetcher, RrdpState, RrdpSyncError, parse_notification_snapshot,
sync_from_notification_snapshot,
};
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum RepoSyncSource {
Rrdp,
Rsync,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RepoSyncResult {
pub source: RepoSyncSource,
pub objects_written: usize,
pub warnings: Vec<Warning>,
}
#[derive(Debug, thiserror::Error)]
pub enum RepoSyncError {
#[error("RRDP sync failed: {0}")]
Rrdp(#[from] RrdpSyncError),
#[error("rsync fallback failed: {0}")]
Rsync(#[from] RsyncFetchError),
#[error("storage error: {0}")]
Storage(String),
}
/// Sync a publication point into `raw_objects`.
///
/// v1 behavior:
/// - If `rrdp_notification_uri` is present and `policy.sync_preference` is `rrdp_then_rsync`,
/// try RRDP snapshot sync first (RFC 8182 §3.4.1-§3.4.3).
/// - On RRDP failure, fall back to rsync (RFC 8182 §3.4.5).
/// - If `sync_preference` is `rsync_only` or there is no RRDP URI, use rsync.
pub fn sync_publication_point(
store: &RocksStore,
policy: &Policy,
rrdp_notification_uri: Option<&str>,
rsync_base_uri: &str,
http_fetcher: &dyn HttpFetcher,
rsync_fetcher: &dyn RsyncFetcher,
) -> Result<RepoSyncResult, RepoSyncError> {
match (policy.sync_preference, rrdp_notification_uri) {
(SyncPreference::RrdpThenRsync, Some(notification_uri)) => {
match try_rrdp_sync(store, notification_uri, http_fetcher) {
Ok(written) => Ok(RepoSyncResult {
source: RepoSyncSource::Rrdp,
objects_written: written,
warnings: Vec::new(),
}),
Err(err) => {
let warnings = vec![
Warning::new(format!("RRDP failed; falling back to rsync: {err}"))
.with_rfc_refs(&[RfcRef("RFC 8182 §3.4.5")])
.with_context(notification_uri),
];
let written =
rsync_sync_into_raw_objects(store, rsync_base_uri, rsync_fetcher)?;
Ok(RepoSyncResult {
source: RepoSyncSource::Rsync,
objects_written: written,
warnings,
})
}
}
}
_ => {
let written = rsync_sync_into_raw_objects(store, rsync_base_uri, rsync_fetcher)?;
Ok(RepoSyncResult {
source: RepoSyncSource::Rsync,
objects_written: written,
warnings: Vec::new(),
})
}
}
}
fn try_rrdp_sync(
store: &RocksStore,
notification_uri: &str,
http_fetcher: &dyn HttpFetcher,
) -> Result<usize, RrdpSyncError> {
let notification_xml = http_fetcher
.fetch(notification_uri)
.map_err(RrdpSyncError::Fetch)?;
// Stage2 snapshot-only optimization: if the stored RRDP state matches the current notification's
// session_id+serial, skip snapshot fetch/apply. This avoids repeatedly downloading/applying the
// same snapshot when traversing many CA instances sharing an RRDP endpoint.
//
// RFC 8182 §3.4.1-§3.4.3: clients use notification to discover snapshot and can avoid fetching
// snapshot when serial hasn't advanced.
if let Ok(notif) = parse_notification_snapshot(&notification_xml) {
if let Ok(Some(state_bytes)) = store.get_rrdp_state(notification_uri) {
if let Ok(state) = RrdpState::decode(&state_bytes) {
if state.session_id == notif.session_id.to_string() && state.serial == notif.serial
{
return Ok(0);
}
}
}
}
sync_from_notification_snapshot(store, notification_uri, &notification_xml, http_fetcher)
}
fn rsync_sync_into_raw_objects(
store: &RocksStore,
rsync_base_uri: &str,
rsync_fetcher: &dyn RsyncFetcher,
) -> Result<usize, RepoSyncError> {
let objects = rsync_fetcher.fetch_objects(rsync_base_uri)?;
let mut written = 0usize;
for (rsync_uri, bytes) in objects {
store
.put_raw(&rsync_uri, &bytes)
.map_err(|e| RepoSyncError::Storage(e.to_string()))?;
written += 1;
}
Ok(written)
}

290
src/sync/rrdp.rs Normal file
View File

@ -0,0 +1,290 @@
use crate::storage::RocksStore;
use base64::Engine;
use serde::{Deserialize, Serialize};
use sha2::Digest;
use uuid::Uuid;
const RRDP_XMLNS: &str = "http://www.ripe.net/rpki/rrdp";
#[derive(Debug, thiserror::Error)]
pub enum RrdpError {
#[error("RRDP XML must be US-ASCII encoded (RFC 8182 §3.5.1.3, §3.5.2.3), got non-ASCII bytes")]
NotAscii,
#[error("RRDP XML parse error: {0} (RFC 8182 §3.5.1.3, §3.5.2.3)")]
Xml(String),
#[error(
"RRDP root element must be <notification> or <snapshot>, got <{0}> (RFC 8182 §3.5.1.3, §3.5.2.3)"
)]
UnexpectedRoot(String),
#[error("RRDP XML namespace must be {RRDP_XMLNS}, got {0} (RFC 8182 §3.5.1.3, §3.5.2.3)")]
InvalidNamespace(String),
#[error("RRDP version must be 1, got {0} (RFC 8182 §3.5.1.3, §3.5.2.3)")]
InvalidVersion(String),
#[error("RRDP session_id invalid UUID: {0} (RFC 8182 §3.5.1.3)")]
InvalidSessionId(String),
#[error("RRDP serial invalid unsigned integer: {0} (RFC 8182 §3.5.1.3)")]
InvalidSerial(String),
#[error("notification must contain exactly one <snapshot> element (RFC 8182 §3.5.1.3)")]
SnapshotCountInvalid,
#[error("snapshot/@uri missing (RFC 8182 §3.5.1.3)")]
SnapshotUriMissing,
#[error("snapshot/@hash missing (RFC 8182 §3.5.1.3)")]
SnapshotHashMissing,
#[error("snapshot/@hash must be hex encoding of SHA-256, got {0} (RFC 8182 §3.5.1.3)")]
SnapshotHashInvalid(String),
#[error("snapshot file hash mismatch (RFC 8182 §3.5.1.3)")]
SnapshotHashMismatch,
#[error("snapshot session_id mismatch: expected {expected}, got {got} (RFC 8182 §3.5.2.3)")]
SnapshotSessionIdMismatch { expected: String, got: String },
#[error("snapshot serial mismatch: expected {expected}, got {got} (RFC 8182 §3.5.2.3)")]
SnapshotSerialMismatch { expected: u64, got: u64 },
#[error("publish/@uri missing (RFC 8182 §3.5.2.3)")]
PublishUriMissing,
#[error("publish element missing base64 content (RFC 8182 §3.5.2.3)")]
PublishContentMissing,
#[error("publish base64 decode failed (RFC 8182 §3.5.2.3): {0}")]
PublishBase64(String),
}
#[derive(Debug, thiserror::Error)]
pub enum RrdpSyncError {
#[error("{0}")]
Rrdp(#[from] RrdpError),
#[error("fetch failed: {0}")]
Fetch(String),
#[error("storage error: {0}")]
Storage(String),
}
pub type RrdpSyncResult<T> = Result<T, RrdpSyncError>;
pub trait Fetcher {
fn fetch(&self, uri: &str) -> Result<Vec<u8>, String>;
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct RrdpState {
pub session_id: String,
pub serial: u64,
}
impl RrdpState {
pub fn encode(&self) -> Result<Vec<u8>, String> {
serde_cbor::to_vec(self).map_err(|e| e.to_string())
}
pub fn decode(bytes: &[u8]) -> Result<Self, String> {
serde_cbor::from_slice(bytes).map_err(|e| e.to_string())
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct NotificationSnapshot {
pub session_id: Uuid,
pub serial: u64,
pub snapshot_uri: String,
pub snapshot_hash_sha256: [u8; 32],
}
pub fn parse_notification_snapshot(xml: &[u8]) -> Result<NotificationSnapshot, RrdpError> {
let doc = parse_rrdp_xml(xml)?;
let root = doc.root_element();
if root.tag_name().name() != "notification" {
return Err(RrdpError::UnexpectedRoot(
root.tag_name().name().to_string(),
));
}
validate_root_common(&root)?;
let session_id = parse_uuid_attr(&root, "session_id")?;
let serial = parse_u64_attr(&root, "serial")?;
let snapshots: Vec<_> = root
.children()
.filter(|n| n.is_element() && n.tag_name().name() == "snapshot")
.collect();
if snapshots.len() != 1 {
return Err(RrdpError::SnapshotCountInvalid);
}
let snapshot = snapshots[0];
let snapshot_uri = snapshot
.attribute("uri")
.ok_or(RrdpError::SnapshotUriMissing)?
.to_string();
let snapshot_hash_hex = snapshot
.attribute("hash")
.ok_or(RrdpError::SnapshotHashMissing)?;
let snapshot_hash_sha256 = parse_sha256_hex(snapshot_hash_hex)?;
Ok(NotificationSnapshot {
session_id,
serial,
snapshot_uri,
snapshot_hash_sha256,
})
}
pub fn sync_from_notification_snapshot(
store: &RocksStore,
notification_uri: &str,
notification_xml: &[u8],
fetcher: &dyn Fetcher,
) -> RrdpSyncResult<usize> {
let notif = parse_notification_snapshot(notification_xml)?;
let snapshot_xml = fetcher
.fetch(&notif.snapshot_uri)
.map_err(RrdpSyncError::Fetch)?;
let computed = sha2::Sha256::digest(&snapshot_xml);
if computed.as_slice() != notif.snapshot_hash_sha256.as_slice() {
return Err(RrdpError::SnapshotHashMismatch.into());
}
let published = apply_snapshot(store, &snapshot_xml, notif.session_id, notif.serial)?;
let state = RrdpState {
session_id: notif.session_id.to_string(),
serial: notif.serial,
};
let bytes = state.encode().map_err(RrdpSyncError::Storage)?;
store
.put_rrdp_state(notification_uri, &bytes)
.map_err(|e| RrdpSyncError::Storage(e.to_string()))?;
Ok(published)
}
fn apply_snapshot(
store: &RocksStore,
snapshot_xml: &[u8],
expected_session_id: Uuid,
expected_serial: u64,
) -> Result<usize, RrdpSyncError> {
let doc = parse_rrdp_xml(snapshot_xml)?;
let root = doc.root_element();
if root.tag_name().name() != "snapshot" {
return Err(RrdpError::UnexpectedRoot(root.tag_name().name().to_string()).into());
}
validate_root_common(&root)?;
let got_session_id = parse_uuid_attr(&root, "session_id")?;
if got_session_id != expected_session_id {
return Err(RrdpError::SnapshotSessionIdMismatch {
expected: expected_session_id.to_string(),
got: got_session_id.to_string(),
}
.into());
}
let got_serial = parse_u64_attr(&root, "serial")?;
if got_serial != expected_serial {
return Err(RrdpError::SnapshotSerialMismatch {
expected: expected_serial,
got: got_serial,
}
.into());
}
let mut published = 0usize;
for publish in root
.children()
.filter(|n| n.is_element() && n.tag_name().name() == "publish")
{
let uri = publish
.attribute("uri")
.ok_or(RrdpError::PublishUriMissing)?;
let content_b64 = collect_element_text(&publish).ok_or(RrdpError::PublishContentMissing)?;
let content_b64 = strip_all_ascii_whitespace(&content_b64);
let bytes = base64::engine::general_purpose::STANDARD
.decode(content_b64.as_bytes())
.map_err(|e| RrdpError::PublishBase64(e.to_string()))?;
store
.put_raw(uri, &bytes)
.map_err(|e| RrdpSyncError::Storage(e.to_string()))?;
published += 1;
}
Ok(published)
}
fn parse_rrdp_xml(xml: &[u8]) -> Result<roxmltree::Document<'_>, RrdpError> {
if xml.iter().any(|&b| b > 0x7F) {
return Err(RrdpError::NotAscii);
}
let s = std::str::from_utf8(xml).map_err(|e| RrdpError::Xml(e.to_string()))?;
roxmltree::Document::parse(s).map_err(|e| RrdpError::Xml(e.to_string()))
}
fn validate_root_common(root: &roxmltree::Node<'_, '_>) -> Result<(), RrdpError> {
let ns = root.default_namespace().unwrap_or("").to_string();
if ns != RRDP_XMLNS {
return Err(RrdpError::InvalidNamespace(ns));
}
let version = root.attribute("version").unwrap_or("");
if version != "1" {
return Err(RrdpError::InvalidVersion(version.to_string()));
}
Ok(())
}
fn parse_uuid_attr(root: &roxmltree::Node<'_, '_>, name: &'static str) -> Result<Uuid, RrdpError> {
let s = root.attribute(name).unwrap_or("");
Uuid::parse_str(s).map_err(|_e| RrdpError::InvalidSessionId(s.to_string()))
}
fn parse_u64_attr(root: &roxmltree::Node<'_, '_>, name: &'static str) -> Result<u64, RrdpError> {
let s = root.attribute(name).unwrap_or("");
let v = s
.parse::<u64>()
.map_err(|_e| RrdpError::InvalidSerial(s.to_string()))?;
if v == 0 {
return Err(RrdpError::InvalidSerial(s.to_string()));
}
Ok(v)
}
fn parse_sha256_hex(s: &str) -> Result<[u8; 32], RrdpError> {
let bytes = hex::decode(s).map_err(|_e| RrdpError::SnapshotHashInvalid(s.to_string()))?;
if bytes.len() != 32 {
return Err(RrdpError::SnapshotHashInvalid(s.to_string()));
}
let mut out = [0u8; 32];
out.copy_from_slice(&bytes);
Ok(out)
}
fn collect_element_text(node: &roxmltree::Node<'_, '_>) -> Option<String> {
let mut out = String::new();
for child in node.children() {
if child.is_text() {
out.push_str(child.text().unwrap_or(""));
}
}
if out.is_empty() { None } else { Some(out) }
}
fn strip_all_ascii_whitespace(s: &str) -> String {
s.chars().filter(|c| !c.is_ascii_whitespace()).collect()
}

View File

@ -0,0 +1,123 @@
use crate::data_model::oid::{OID_AD_CA_REPOSITORY, OID_AD_RPKI_MANIFEST, OID_AD_RPKI_NOTIFY};
use crate::data_model::rc::{ResourceCertKind, ResourceCertificate, SubjectInfoAccess};
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CaInstanceUris {
/// CA repository base rsync URI (must end with `/`).
pub rsync_base_uri: String,
/// rsync URI for the manifest object (`.mft`).
pub manifest_rsync_uri: String,
/// Publication point rsync URI (RFC 9286 terminology). In v1 this equals `rsync_base_uri`.
pub publication_point_rsync_uri: String,
/// Optional RRDP notification URI (https).
pub rrdp_notification_uri: Option<String>,
}
#[derive(Debug, thiserror::Error)]
pub enum CaInstanceUrisError {
#[error("certificate must be a CA certificate (RFC 6487 §4.8.1)")]
NotCa,
#[error(
"CA certificate must contain Subject Information Access extension (RFC 6487 §4.8.8; RFC 5280 §4.2.2.2)"
)]
MissingSia,
#[error("CA certificate SIA must contain id-ad-caRepository rsync URI (RFC 6487 §4.8.8.1)")]
MissingCaRepository,
#[error("CA certificate SIA must contain id-ad-rpkiManifest rsync URI (RFC 6487 §4.8.8.2)")]
MissingRpkiManifest,
#[error(
"SIA id-ad-caRepository accessLocation must be rsync:// URI, got {0} (RFC 6487 §4.8.8.1)"
)]
CaRepositoryNotRsync(String),
#[error(
"SIA id-ad-rpkiManifest accessLocation must be rsync:// URI, got {0} (RFC 6487 §4.8.8.2)"
)]
RpkiManifestNotRsync(String),
#[error(
"SIA id-ad-rpkiNotify accessLocation must be https:// URI, got {0} (RFC 8182 §3.4.1; RFC 6487 §4.8.8.3)"
)]
RpkiNotifyNotHttps(String),
#[error(
"manifest rsync URI must be under CA publication point: manifest={manifest_rsync_uri} publication_point={publication_point_rsync_uri} (RFC 9286 §6.1)"
)]
ManifestNotUnderPublicationPoint {
manifest_rsync_uri: String,
publication_point_rsync_uri: String,
},
}
pub fn ca_instance_uris_from_ca_certificate(
cert: &ResourceCertificate,
) -> Result<CaInstanceUris, CaInstanceUrisError> {
if cert.kind != ResourceCertKind::Ca {
return Err(CaInstanceUrisError::NotCa);
}
let sia = cert
.tbs
.extensions
.subject_info_access
.as_ref()
.ok_or(CaInstanceUrisError::MissingSia)?;
let access_descriptions = match sia {
SubjectInfoAccess::Ca(ca) => &ca.access_descriptions,
SubjectInfoAccess::Ee(_ee) => return Err(CaInstanceUrisError::MissingSia),
};
let mut ca_repo: Option<String> = None;
let mut manifest: Option<String> = None;
let mut notify: Option<String> = None;
for ad in access_descriptions {
if ad.access_method_oid == OID_AD_CA_REPOSITORY {
let u = ad.access_location.to_string();
if ad.access_location.scheme() != "rsync" {
return Err(CaInstanceUrisError::CaRepositoryNotRsync(u));
}
ca_repo.get_or_insert(u);
} else if ad.access_method_oid == OID_AD_RPKI_MANIFEST {
let u = ad.access_location.to_string();
if ad.access_location.scheme() != "rsync" {
return Err(CaInstanceUrisError::RpkiManifestNotRsync(u));
}
manifest.get_or_insert(u);
} else if ad.access_method_oid == OID_AD_RPKI_NOTIFY {
let u = ad.access_location.to_string();
if ad.access_location.scheme() != "https" {
return Err(CaInstanceUrisError::RpkiNotifyNotHttps(u));
}
notify.get_or_insert(u);
}
}
let mut publication_point_rsync_uri = ca_repo.ok_or(CaInstanceUrisError::MissingCaRepository)?;
if !publication_point_rsync_uri.ends_with('/') {
publication_point_rsync_uri.push('/');
}
let rsync_base_uri = publication_point_rsync_uri.clone();
let manifest_rsync_uri = manifest.ok_or(CaInstanceUrisError::MissingRpkiManifest)?;
if !manifest_rsync_uri.starts_with(&publication_point_rsync_uri) {
return Err(CaInstanceUrisError::ManifestNotUnderPublicationPoint {
manifest_rsync_uri,
publication_point_rsync_uri,
});
}
Ok(CaInstanceUris {
rsync_base_uri,
manifest_rsync_uri,
publication_point_rsync_uri,
rrdp_notification_uri: notify,
})
}

965
src/validation/ca_path.rs Normal file
View File

@ -0,0 +1,965 @@
use crate::data_model::common::BigUnsigned;
use crate::data_model::crl::{CrlDecodeError, CrlVerifyError, RpkixCrl};
use crate::data_model::oid::OID_KEY_USAGE;
use crate::data_model::rc::{
AsIdentifierChoice, AsResourceSet, IpAddressChoice, IpResourceSet, ResourceCertKind,
ResourceCertificate, ResourceCertificateDecodeError,
};
use x509_parser::prelude::{FromDer, X509Certificate};
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ValidatedSubordinateCa {
pub child_ca: ResourceCertificate,
pub issuer_ca: ResourceCertificate,
pub issuer_crl: RpkixCrl,
pub effective_ip_resources: Option<IpResourceSet>,
pub effective_as_resources: Option<AsResourceSet>,
}
#[derive(Debug, thiserror::Error)]
pub enum CaPathError {
#[error("child CA certificate decode failed: {0} (RFC 6487 §4; RFC 5280 §4.1)")]
ChildDecode(#[from] ResourceCertificateDecodeError),
#[error("issuer CA certificate decode failed: {0} (RFC 6487 §4; RFC 5280 §4.1)")]
IssuerDecode(ResourceCertificateDecodeError),
#[error("issuer CRL decode failed: {0} (RFC 6487 §5; RFC 9829 §3.1; RFC 5280 §5.1)")]
CrlDecode(#[from] CrlDecodeError),
#[error(
"child certificate must be a CA resource certificate (RFC 6487 §4.8.1; RFC 5280 §4.2.1.9)"
)]
ChildNotCa,
#[error(
"issuer certificate must be a CA resource certificate (RFC 6487 §4.8.1; RFC 5280 §4.2.1.9)"
)]
IssuerNotCa,
#[error(
"child issuer DN does not match issuer CA subject DN: child.issuer={child_issuer_dn} issuer.subject={issuer_subject_dn} (RFC 5280 §6.1)"
)]
IssuerSubjectMismatch {
child_issuer_dn: String,
issuer_subject_dn: String,
},
#[error("child CA certificate signature verification failed: {0} (RFC 5280 §6.1)")]
ChildSignatureInvalid(String),
#[error("certificate not valid at validation_time (RFC 5280 §4.1.2.5; RFC 5280 §6.1)")]
CertificateNotValidAtTime,
#[error(
"child CA KeyUsage extension missing (RFC 6487 §4.8.4; RFC 5280 §4.2.1.3)"
)]
KeyUsageMissing,
#[error("child CA KeyUsage criticality must be critical (RFC 6487 §4.8.4; RFC 5280 §4.2.1.3)")]
KeyUsageNotCritical,
#[error(
"child CA KeyUsage must have only keyCertSign and cRLSign set (RFC 6487 §4.8.4)"
)]
KeyUsageInvalidBits,
#[error(
"CRL signature/binding verification failed: {0} (RFC 5280 §6.3.3; RFC 6487 §5; RFC 9829 §3.1)"
)]
CrlVerify(#[from] CrlVerifyError),
#[error(
"CRL not valid at validation_time (RFC 5280 §6.3.3(g); RFC 5280 §5.1.2.4-§5.1.2.5; RFC 6487 §5)"
)]
CrlNotValidAtTime,
#[error("child CA certificate is revoked by issuer CRL (RFC 5280 §6.3.3; RFC 6487 §5)")]
ChildRevoked,
#[error(
"child CA certificate must contain at least one RFC 3779 resource extension (IP or AS) (RFC 6487 §4.8.10-§4.8.11)"
)]
ResourcesMissing,
#[error(
"resource extension inheritance cannot be resolved (parent missing resources) (RFC 6487 §7.2)"
)]
InheritWithoutParentResources,
#[error("child CA resources are not a subset of issuer resources (RFC 6487 §7.2)")]
ResourcesNotSubset,
#[error("issuer CA subjectKeyIdentifier missing (RFC 6487 §4.8.2)")]
IssuerSkiMissing,
#[error("child CA authorityKeyIdentifier missing (RFC 6487 §4.8.3; RFC 5280 §4.2.1.1)")]
ChildAkiMissing,
#[error(
"child CA authorityKeyIdentifier does not match issuer subjectKeyIdentifier (RFC 6487 §4.8.3)"
)]
ChildAkiMismatch,
#[error("child CA authorityInfoAccess missing (RFC 6487 §4.8.7; RFC 5280 §4.2.2.1)")]
ChildAiaMissing,
#[error(
"child CA authorityInfoAccess does not reference issuer certificate rsync URI (RFC 6487 §4.8.7)"
)]
ChildAiaIssuerUriMismatch,
#[error("child CA CRLDistributionPoints missing (RFC 6487 §4.8.6; RFC 5280 §4.2.1.13)")]
ChildCrlDpMissing,
#[error(
"child CA CRLDistributionPoints does not reference issuer CRL rsync URI (RFC 6487 §4.8.6)"
)]
ChildCrlDpUriMismatch,
}
pub fn validate_subordinate_ca_cert(
child_ca_der: &[u8],
issuer_ca_der: &[u8],
issuer_crl_der: &[u8],
issuer_ca_rsync_uri: Option<&str>,
issuer_crl_rsync_uri: &str,
issuer_effective_ip: Option<&IpResourceSet>,
issuer_effective_as: Option<&AsResourceSet>,
validation_time: time::OffsetDateTime,
) -> Result<ValidatedSubordinateCa, CaPathError> {
let child_ca = ResourceCertificate::decode_der(child_ca_der)?;
if child_ca.kind != ResourceCertKind::Ca {
return Err(CaPathError::ChildNotCa);
}
let issuer_ca =
ResourceCertificate::decode_der(issuer_ca_der).map_err(CaPathError::IssuerDecode)?;
if issuer_ca.kind != ResourceCertKind::Ca {
return Err(CaPathError::IssuerNotCa);
}
if child_ca.tbs.issuer_dn != issuer_ca.tbs.subject_dn {
return Err(CaPathError::IssuerSubjectMismatch {
child_issuer_dn: child_ca.tbs.issuer_dn.clone(),
issuer_subject_dn: issuer_ca.tbs.subject_dn.clone(),
});
}
validate_child_aki_matches_issuer_ski(&child_ca, &issuer_ca)?;
if let Some(expected_issuer_uri) = issuer_ca_rsync_uri {
validate_child_aia_points_to_issuer_uri(&child_ca, expected_issuer_uri)?;
}
validate_child_crldp_contains_issuer_crl_uri(&child_ca, issuer_crl_rsync_uri)?;
if !time_within_validity(
validation_time,
child_ca.tbs.validity_not_before,
child_ca.tbs.validity_not_after,
) || !time_within_validity(
validation_time,
issuer_ca.tbs.validity_not_before,
issuer_ca.tbs.validity_not_after,
) {
return Err(CaPathError::CertificateNotValidAtTime);
}
verify_cert_signature_with_issuer(child_ca_der, issuer_ca_der)?;
validate_child_ca_key_usage(child_ca_der)?;
let issuer_crl = RpkixCrl::decode_der(issuer_crl_der)?;
issuer_crl.verify_signature_with_issuer_certificate_der(issuer_ca_der)?;
if !crl_valid_at_time(&issuer_crl, validation_time) {
return Err(CaPathError::CrlNotValidAtTime);
}
if is_serial_revoked_by_crl(&child_ca, &issuer_crl) {
return Err(CaPathError::ChildRevoked);
}
let effective_ip_resources = resolve_child_ip_resources(
child_ca.tbs.extensions.ip_resources.as_ref(),
issuer_effective_ip,
)?;
let effective_as_resources = resolve_child_as_resources(
child_ca.tbs.extensions.as_resources.as_ref(),
issuer_effective_as,
)?;
if effective_ip_resources.is_none() && effective_as_resources.is_none() {
return Err(CaPathError::ResourcesMissing);
}
Ok(ValidatedSubordinateCa {
child_ca,
issuer_ca,
issuer_crl,
effective_ip_resources,
effective_as_resources,
})
}
fn validate_child_aki_matches_issuer_ski(
child: &ResourceCertificate,
issuer: &ResourceCertificate,
) -> Result<(), CaPathError> {
let Some(issuer_ski) = issuer.tbs.extensions.subject_key_identifier.as_deref() else {
return Err(CaPathError::IssuerSkiMissing);
};
let Some(child_aki) = child.tbs.extensions.authority_key_identifier.as_deref() else {
return Err(CaPathError::ChildAkiMissing);
};
if child_aki != issuer_ski {
return Err(CaPathError::ChildAkiMismatch);
}
Ok(())
}
fn validate_child_aia_points_to_issuer_uri(
child: &ResourceCertificate,
issuer_ca_rsync_uri: &str,
) -> Result<(), CaPathError> {
let Some(uris) = child.tbs.extensions.ca_issuers_uris.as_ref() else {
return Err(CaPathError::ChildAiaMissing);
};
if !uris.iter().any(|u| u.as_str() == issuer_ca_rsync_uri) {
return Err(CaPathError::ChildAiaIssuerUriMismatch);
}
Ok(())
}
fn validate_child_crldp_contains_issuer_crl_uri(
child: &ResourceCertificate,
issuer_crl_rsync_uri: &str,
) -> Result<(), CaPathError> {
let Some(uris) = child.tbs.extensions.crl_distribution_points_uris.as_ref() else {
return Err(CaPathError::ChildCrlDpMissing);
};
if !uris.iter().any(|u| u.as_str() == issuer_crl_rsync_uri) {
return Err(CaPathError::ChildCrlDpUriMismatch);
}
Ok(())
}
fn validate_child_ca_key_usage(child_ca_der: &[u8]) -> Result<(), CaPathError> {
let (rem, cert) = X509Certificate::from_der(child_ca_der)
.map_err(|e| CaPathError::ChildSignatureInvalid(e.to_string()))?;
if !rem.is_empty() {
return Err(CaPathError::ChildSignatureInvalid(
"trailing bytes after child CA certificate".to_string(),
));
}
let mut ku_critical: Option<bool> = None;
for ext in cert.extensions() {
if ext.oid.to_id_string() == OID_KEY_USAGE {
ku_critical = Some(ext.critical);
break;
}
}
let Some(critical) = ku_critical else {
return Err(CaPathError::KeyUsageMissing);
};
if !critical {
return Err(CaPathError::KeyUsageNotCritical);
}
let Some(ku) = cert
.key_usage()
.map_err(|e| CaPathError::ChildSignatureInvalid(e.to_string()))?
else {
return Err(CaPathError::KeyUsageMissing);
};
let v = &ku.value;
let ok = v.key_cert_sign()
&& v.crl_sign()
&& !v.digital_signature()
&& !v.non_repudiation()
&& !v.key_encipherment()
&& !v.data_encipherment()
&& !v.key_agreement()
&& !v.encipher_only()
&& !v.decipher_only();
if !ok {
return Err(CaPathError::KeyUsageInvalidBits);
}
Ok(())
}
fn verify_cert_signature_with_issuer(
child_der: &[u8],
issuer_ca_der: &[u8],
) -> Result<(), CaPathError> {
let (rem, child) = X509Certificate::from_der(child_der)
.map_err(|e| CaPathError::ChildSignatureInvalid(e.to_string()))?;
if !rem.is_empty() {
return Err(CaPathError::ChildSignatureInvalid(
"trailing bytes after child certificate".to_string(),
));
}
let (rem, issuer) = X509Certificate::from_der(issuer_ca_der)
.map_err(|e| CaPathError::ChildSignatureInvalid(e.to_string()))?;
if !rem.is_empty() {
return Err(CaPathError::ChildSignatureInvalid(
"trailing bytes after issuer certificate".to_string(),
));
}
child
.verify_signature(Some(&issuer.tbs_certificate.subject_pki))
.map_err(|e| CaPathError::ChildSignatureInvalid(e.to_string()))
}
fn time_within_validity(
t: time::OffsetDateTime,
not_before: time::OffsetDateTime,
not_after: time::OffsetDateTime,
) -> bool {
let t = t.to_offset(time::UtcOffset::UTC);
let not_before = not_before.to_offset(time::UtcOffset::UTC);
let not_after = not_after.to_offset(time::UtcOffset::UTC);
t >= not_before && t <= not_after
}
fn crl_valid_at_time(crl: &RpkixCrl, t: time::OffsetDateTime) -> bool {
let t = t.to_offset(time::UtcOffset::UTC);
let this_update = crl.this_update.utc.to_offset(time::UtcOffset::UTC);
let next_update = crl.next_update.utc.to_offset(time::UtcOffset::UTC);
t >= this_update && t < next_update
}
fn is_serial_revoked_by_crl(cert: &ResourceCertificate, crl: &RpkixCrl) -> bool {
let serial = BigUnsigned::from_biguint(&cert.tbs.serial_number);
crl.revoked_certs
.iter()
.any(|rc| rc.serial_number == serial)
}
fn resolve_child_ip_resources(
child_ip: Option<&IpResourceSet>,
issuer_effective: Option<&IpResourceSet>,
) -> Result<Option<IpResourceSet>, CaPathError> {
let Some(child_ip) = child_ip else {
return Ok(None);
};
let Some(parent) = issuer_effective else {
if child_ip.has_any_inherit() {
return Err(CaPathError::InheritWithoutParentResources);
}
// With no parent effective resources, we cannot validate subset.
return Err(CaPathError::ResourcesNotSubset);
};
// Resolve per-AFI inherit, producing an effective set with no inherit.
let parent_by_afi = ip_resources_by_afi_items(parent)?;
let mut out_families: Vec<crate::data_model::rc::IpAddressFamily> = Vec::new();
for fam in &child_ip.families {
match &fam.choice {
IpAddressChoice::Inherit => {
let items = parent_by_afi
.get(&fam.afi)
.ok_or(CaPathError::InheritWithoutParentResources)?;
out_families.push(crate::data_model::rc::IpAddressFamily {
afi: fam.afi,
choice: IpAddressChoice::AddressesOrRanges(items.clone()),
});
}
IpAddressChoice::AddressesOrRanges(items) => {
// Subset check against parent union for that AFI.
let parent_set = ip_resources_single_afi(parent, fam.afi, parent_by_afi.get(&fam.afi));
if !ip_family_items_subset(items, &parent_set) {
return Err(CaPathError::ResourcesNotSubset);
}
out_families.push(crate::data_model::rc::IpAddressFamily {
afi: fam.afi,
choice: IpAddressChoice::AddressesOrRanges(items.clone()),
});
}
}
}
Ok(Some(IpResourceSet { families: out_families }))
}
fn resolve_child_as_resources(
child_as: Option<&AsResourceSet>,
issuer_effective: Option<&AsResourceSet>,
) -> Result<Option<AsResourceSet>, CaPathError> {
let Some(child_as) = child_as else {
return Ok(None);
};
let Some(parent) = issuer_effective else {
if matches!(child_as.asnum, Some(AsIdentifierChoice::Inherit))
|| matches!(child_as.rdi, Some(AsIdentifierChoice::Inherit))
{
return Err(CaPathError::InheritWithoutParentResources);
}
return Err(CaPathError::ResourcesNotSubset);
};
let asnum = match child_as.asnum.as_ref() {
None => None,
Some(AsIdentifierChoice::Inherit) => parent.asnum.clone().ok_or(CaPathError::InheritWithoutParentResources).map(Some)?,
Some(_) => {
if !as_choice_subset(child_as.asnum.as_ref(), parent.asnum.as_ref()) {
return Err(CaPathError::ResourcesNotSubset);
}
child_as.asnum.clone()
}
};
let rdi = match child_as.rdi.as_ref() {
None => None,
Some(AsIdentifierChoice::Inherit) => parent.rdi.clone().ok_or(CaPathError::InheritWithoutParentResources).map(Some)?,
Some(_) => {
if !as_choice_subset(child_as.rdi.as_ref(), parent.rdi.as_ref()) {
return Err(CaPathError::ResourcesNotSubset);
}
child_as.rdi.clone()
}
};
Ok(Some(AsResourceSet { asnum, rdi }))
}
fn as_choice_subset(
child: Option<&AsIdentifierChoice>,
parent: Option<&AsIdentifierChoice>,
) -> bool {
let Some(child) = child else {
return true;
};
let Some(parent) = parent else {
return false;
};
// Treat inherit as "all of parent" here; actual resolution is handled elsewhere.
if matches!(child, AsIdentifierChoice::Inherit) {
return true;
}
if matches!(parent, AsIdentifierChoice::Inherit) {
return true;
}
let child_intervals = as_choice_to_merged_intervals(child);
let parent_intervals = as_choice_to_merged_intervals(parent);
for (cmin, cmax) in &child_intervals {
if !as_interval_is_covered(&parent_intervals, *cmin, *cmax) {
return false;
}
}
true
}
fn as_choice_to_merged_intervals(choice: &AsIdentifierChoice) -> Vec<(u32, u32)> {
let mut v = Vec::new();
match choice {
AsIdentifierChoice::Inherit => {}
AsIdentifierChoice::AsIdsOrRanges(items) => {
for item in items {
match item {
crate::data_model::rc::AsIdOrRange::Id(id) => v.push((*id, *id)),
crate::data_model::rc::AsIdOrRange::Range { min, max } => v.push((*min, *max)),
}
}
}
}
v.sort_by_key(|(a, _b)| *a);
merge_as_intervals(&v)
}
fn merge_as_intervals(v: &[(u32, u32)]) -> Vec<(u32, u32)> {
let mut out: Vec<(u32, u32)> = Vec::new();
for (min, max) in v {
let Some(last) = out.last_mut() else {
out.push((*min, *max));
continue;
};
if *min <= last.1.saturating_add(1) {
last.1 = last.1.max(*max);
continue;
}
out.push((*min, *max));
}
out
}
fn as_interval_is_covered(parent: &[(u32, u32)], min: u32, max: u32) -> bool {
for (pmin, pmax) in parent {
if *pmin <= min && max <= *pmax {
return true;
}
if *pmin > min {
break;
}
}
false
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
enum AfiKey {
V4,
V6,
}
fn ip_resources_by_afi_items(
set: &IpResourceSet,
) -> Result<std::collections::BTreeMap<crate::data_model::rc::Afi, Vec<crate::data_model::rc::IpAddressOrRange>>, CaPathError> {
let mut m: std::collections::BTreeMap<crate::data_model::rc::Afi, Vec<crate::data_model::rc::IpAddressOrRange>> =
std::collections::BTreeMap::new();
for fam in &set.families {
match &fam.choice {
IpAddressChoice::Inherit => return Err(CaPathError::InheritWithoutParentResources),
IpAddressChoice::AddressesOrRanges(items) => {
m.insert(fam.afi, items.clone());
}
}
}
Ok(m)
}
fn ip_resources_single_afi(
parent: &IpResourceSet,
afi: crate::data_model::rc::Afi,
items: Option<&Vec<crate::data_model::rc::IpAddressOrRange>>,
) -> IpResourceSet {
let mut families = Vec::new();
if let Some(items) = items {
families.push(crate::data_model::rc::IpAddressFamily {
afi,
choice: IpAddressChoice::AddressesOrRanges(items.clone()),
});
} else {
// If parent doesn't mention this AFI explicitly, treat as empty.
// The subset check will fail.
let _ = parent;
}
IpResourceSet { families }
}
fn ip_family_items_subset(
child_items: &[crate::data_model::rc::IpAddressOrRange],
parent_set: &IpResourceSet,
) -> bool {
let parent_by_afi = ip_resources_to_merged_intervals(parent_set);
// parent_set should contain exactly one AFI.
let (afi_key, parent_intervals) = match parent_by_afi.into_iter().next() {
None => return false,
Some(v) => v,
};
let mut child_intervals: Vec<(Vec<u8>, Vec<u8>)> = Vec::new();
for item in child_items {
match item {
crate::data_model::rc::IpAddressOrRange::Prefix(p) => child_intervals.push(prefix_to_range(p)),
crate::data_model::rc::IpAddressOrRange::Range(r) => child_intervals.push((r.min.clone(), r.max.clone())),
}
}
child_intervals.sort_by(|(a, _), (b, _)| a.cmp(b));
let child_intervals = merge_ip_intervals(&child_intervals);
let _ = afi_key;
for (cmin, cmax) in &child_intervals {
if !interval_is_covered(&parent_intervals, cmin, cmax) {
return false;
}
}
true
}
fn ip_resources_to_merged_intervals(
set: &IpResourceSet,
) -> std::collections::HashMap<AfiKey, Vec<(Vec<u8>, Vec<u8>)>> {
let mut m: std::collections::HashMap<AfiKey, Vec<(Vec<u8>, Vec<u8>)>> =
std::collections::HashMap::new();
for fam in &set.families {
let afi = match fam.afi {
crate::data_model::rc::Afi::Ipv4 => AfiKey::V4,
crate::data_model::rc::Afi::Ipv6 => AfiKey::V6,
};
match &fam.choice {
IpAddressChoice::Inherit => {
// When used in subset checks, treat inherit as "all" by leaving it absent.
// Resolution should have happened earlier.
}
IpAddressChoice::AddressesOrRanges(items) => {
let ent = m.entry(afi).or_default();
for item in items {
match item {
crate::data_model::rc::IpAddressOrRange::Prefix(p) => {
let (min, max) = prefix_to_range(p);
ent.push((min, max));
}
crate::data_model::rc::IpAddressOrRange::Range(r) => {
ent.push((r.min.clone(), r.max.clone()));
}
}
}
}
}
}
for (_afi, v) in m.iter_mut() {
v.sort_by(|(a, _), (b, _)| a.cmp(b));
*v = merge_ip_intervals(v);
}
m
}
fn merge_ip_intervals(v: &[(Vec<u8>, Vec<u8>)]) -> Vec<(Vec<u8>, Vec<u8>)> {
let mut out: Vec<(Vec<u8>, Vec<u8>)> = Vec::new();
for (min, max) in v {
let Some(last) = out.last_mut() else {
out.push((min.clone(), max.clone()));
continue;
};
if bytes_leq(min, &increment_bytes(&last.1)) {
if bytes_leq(&last.1, max) {
last.1 = max.clone();
}
continue;
}
out.push((min.clone(), max.clone()));
}
out
}
fn interval_is_covered(parent: &[(Vec<u8>, Vec<u8>)], min: &[u8], max: &[u8]) -> bool {
for (pmin, pmax) in parent {
if bytes_leq(pmin, min) && bytes_leq(max, pmax) {
return true;
}
if pmin.as_slice() > min {
break;
}
}
false
}
fn prefix_to_range(prefix: &crate::data_model::rc::IpPrefix) -> (Vec<u8>, Vec<u8>) {
let mut min = prefix.addr.clone();
let mut max = prefix.addr.clone();
let bitlen = match prefix.afi {
crate::data_model::rc::Afi::Ipv4 => 32u16,
crate::data_model::rc::Afi::Ipv6 => 128u16,
};
let plen = prefix.prefix_len.min(bitlen);
for bit in plen..bitlen {
let byte = (bit / 8) as usize;
let offset = 7 - (bit % 8);
let mask = 1u8 << offset;
min[byte] &= !mask;
max[byte] |= mask;
}
(min, max)
}
fn bytes_leq(a: &[u8], b: &[u8]) -> bool {
a <= b
}
#[cfg(test)]
mod tests {
use super::*;
use crate::data_model::rc::{
Afi, AsIdentifierChoice, AsResourceSet, IpAddressChoice, IpAddressFamily, IpResourceSet,
};
use crate::data_model::rc::{RcExtensions, ResourceCertKind, ResourceCertificate, RpkixTbsCertificate};
use der_parser::num_bigint::BigUint;
use url::Url;
fn dummy_cert(
kind: ResourceCertKind,
subject_dn: &str,
issuer_dn: &str,
ski: Option<Vec<u8>>,
aki: Option<Vec<u8>>,
aia: Option<Vec<&str>>,
crldp: Option<Vec<&str>>,
) -> ResourceCertificate {
let aia = aia.map(|v| {
v.into_iter()
.map(|s| Url::parse(s).expect("url"))
.collect::<Vec<_>>()
});
let crldp = crldp.map(|v| {
v.into_iter()
.map(|s| Url::parse(s).expect("url"))
.collect::<Vec<_>>()
});
ResourceCertificate {
raw_der: Vec::new(),
kind,
tbs: RpkixTbsCertificate {
version: 2,
serial_number: BigUint::from(1u8),
signature_algorithm: "1.2.840.113549.1.1.11".to_string(),
issuer_dn: issuer_dn.to_string(),
subject_dn: subject_dn.to_string(),
validity_not_before: time::OffsetDateTime::UNIX_EPOCH,
validity_not_after: time::OffsetDateTime::UNIX_EPOCH,
subject_public_key_info: Vec::new(),
extensions: RcExtensions {
basic_constraints_ca: kind == ResourceCertKind::Ca,
subject_key_identifier: ski,
authority_key_identifier: aki,
crl_distribution_points_uris: crldp,
ca_issuers_uris: aia,
subject_info_access: None,
certificate_policies_oid: None,
ip_resources: None,
as_resources: None,
},
},
}
}
#[test]
fn resolve_child_ip_resources_rejects_inherit_without_parent_effective_resources() {
let child = IpResourceSet {
families: vec![IpAddressFamily {
afi: Afi::Ipv4,
choice: IpAddressChoice::Inherit,
}],
};
let err = resolve_child_ip_resources(Some(&child), None).unwrap_err();
assert!(matches!(err, CaPathError::InheritWithoutParentResources));
}
#[test]
fn resolve_child_ip_resources_rejects_non_inherit_without_parent_effective_resources() {
let child = IpResourceSet {
families: vec![IpAddressFamily {
afi: Afi::Ipv4,
choice: IpAddressChoice::AddressesOrRanges(vec![]),
}],
};
let err = resolve_child_ip_resources(Some(&child), None).unwrap_err();
assert!(matches!(err, CaPathError::ResourcesNotSubset));
}
#[test]
fn ip_resources_by_afi_items_rejects_inherit_families() {
let parent = IpResourceSet {
families: vec![IpAddressFamily {
afi: Afi::Ipv6,
choice: IpAddressChoice::Inherit,
}],
};
let err = ip_resources_by_afi_items(&parent).unwrap_err();
assert!(matches!(err, CaPathError::InheritWithoutParentResources));
}
#[test]
fn resolve_child_as_resources_rejects_inherit_without_parent_effective_resources() {
let child = AsResourceSet {
asnum: Some(AsIdentifierChoice::Inherit),
rdi: None,
};
let err = resolve_child_as_resources(Some(&child), None).unwrap_err();
assert!(matches!(err, CaPathError::InheritWithoutParentResources));
}
#[test]
fn child_aki_mismatch_is_rejected() {
let issuer = dummy_cert(
ResourceCertKind::Ca,
"CN=issuer",
"CN=issuer",
Some(vec![1]),
None,
None,
None,
);
let child = dummy_cert(
ResourceCertKind::Ca,
"CN=child",
"CN=issuer",
Some(vec![2]),
Some(vec![9]),
Some(vec!["rsync://example.test/issuer.cer"]),
Some(vec!["rsync://example.test/issuer.crl"]),
);
let err = validate_child_aki_matches_issuer_ski(&child, &issuer).unwrap_err();
assert!(matches!(err, CaPathError::ChildAkiMismatch), "{err}");
}
#[test]
fn child_aia_missing_is_rejected() {
let _issuer = dummy_cert(
ResourceCertKind::Ca,
"CN=issuer",
"CN=issuer",
Some(vec![1]),
None,
None,
None,
);
let child = dummy_cert(
ResourceCertKind::Ca,
"CN=child",
"CN=issuer",
Some(vec![2]),
Some(vec![1]),
None,
Some(vec!["rsync://example.test/issuer.crl"]),
);
let err =
validate_child_aia_points_to_issuer_uri(&child, "rsync://example.test/issuer.cer")
.unwrap_err();
assert!(matches!(err, CaPathError::ChildAiaMissing), "{err}");
// Also cover issuer ski missing.
let issuer_missing_ski = dummy_cert(
ResourceCertKind::Ca,
"CN=issuer",
"CN=issuer",
None,
None,
None,
None,
);
let err = validate_child_aki_matches_issuer_ski(&child, &issuer_missing_ski).unwrap_err();
assert!(matches!(err, CaPathError::IssuerSkiMissing), "{err}");
}
#[test]
fn child_aia_issuer_uri_mismatch_is_rejected() {
let _issuer = dummy_cert(
ResourceCertKind::Ca,
"CN=issuer",
"CN=issuer",
Some(vec![1]),
None,
None,
None,
);
let child = dummy_cert(
ResourceCertKind::Ca,
"CN=child",
"CN=issuer",
Some(vec![2]),
Some(vec![1]),
Some(vec!["rsync://example.test/other.cer"]),
Some(vec!["rsync://example.test/issuer.crl"]),
);
let err =
validate_child_aia_points_to_issuer_uri(&child, "rsync://example.test/issuer.cer")
.unwrap_err();
assert!(
matches!(err, CaPathError::ChildAiaIssuerUriMismatch),
"{err}"
);
}
#[test]
fn child_crldp_mismatch_is_rejected() {
let issuer = dummy_cert(
ResourceCertKind::Ca,
"CN=issuer",
"CN=issuer",
Some(vec![1]),
None,
None,
None,
);
let child = dummy_cert(
ResourceCertKind::Ca,
"CN=child",
"CN=issuer",
Some(vec![2]),
Some(vec![1]),
Some(vec!["rsync://example.test/issuer.cer"]),
None,
);
let err = validate_child_crldp_contains_issuer_crl_uri(
&child,
"rsync://example.test/issuer.crl",
)
.unwrap_err();
assert!(matches!(err, CaPathError::ChildCrlDpMissing), "{err}");
let child = dummy_cert(
ResourceCertKind::Ca,
"CN=child",
"CN=issuer",
Some(vec![2]),
Some(vec![1]),
Some(vec!["rsync://example.test/issuer.cer"]),
Some(vec!["rsync://example.test/other.crl"]),
);
let err = validate_child_crldp_contains_issuer_crl_uri(
&child,
"rsync://example.test/issuer.crl",
)
.unwrap_err();
assert!(
matches!(err, CaPathError::ChildCrlDpUriMismatch),
"{err}"
);
// Cover child AKI missing.
let child_missing_aki = dummy_cert(
ResourceCertKind::Ca,
"CN=child",
"CN=issuer",
Some(vec![2]),
None,
Some(vec!["rsync://example.test/issuer.cer"]),
Some(vec!["rsync://example.test/issuer.crl"]),
);
let err = validate_child_aki_matches_issuer_ski(&child_missing_aki, &issuer).unwrap_err();
assert!(matches!(err, CaPathError::ChildAkiMissing), "{err}");
}
#[test]
fn child_binding_checks_accept_when_matching() {
let issuer = dummy_cert(
ResourceCertKind::Ca,
"CN=issuer",
"CN=issuer",
Some(vec![1]),
None,
None,
None,
);
let child = dummy_cert(
ResourceCertKind::Ca,
"CN=child",
"CN=issuer",
Some(vec![2]),
Some(vec![1]),
Some(vec!["rsync://example.test/issuer.cer"]),
Some(vec!["rsync://example.test/issuer.crl"]),
);
validate_child_aki_matches_issuer_ski(&child, &issuer).expect("aki ok");
validate_child_aia_points_to_issuer_uri(&child, "rsync://example.test/issuer.cer")
.expect("aia ok");
validate_child_crldp_contains_issuer_crl_uri(&child, "rsync://example.test/issuer.crl")
.expect("crldp ok");
}
}
fn increment_bytes(v: &[u8]) -> Vec<u8> {
let mut out = v.to_vec();
for i in (0..out.len()).rev() {
if out[i] != 0xFF {
out[i] += 1;
for j in i + 1..out.len() {
out[j] = 0;
}
return out;
}
}
vec![0u8; out.len()]
}

497
src/validation/cert_path.rs Normal file
View File

@ -0,0 +1,497 @@
use crate::data_model::common::BigUnsigned;
use crate::data_model::crl::{CrlDecodeError, CrlVerifyError, RpkixCrl};
use crate::data_model::rc::{
ResourceCertKind, ResourceCertificate, ResourceCertificateDecodeError,
};
use x509_parser::prelude::{FromDer, X509Certificate};
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ValidatedEeCertPath {
pub ee: ResourceCertificate,
pub issuer_ca: ResourceCertificate,
pub issuer_crl: RpkixCrl,
}
#[derive(Debug, thiserror::Error)]
pub enum CertPathError {
#[error("EE certificate decode failed: {0} (RFC 6487 §4; RFC 5280 §4.1)")]
EeDecode(#[from] ResourceCertificateDecodeError),
#[error("issuer CA certificate decode failed: {0} (RFC 6487 §4; RFC 5280 §4.1)")]
IssuerDecode(ResourceCertificateDecodeError),
#[error("issuer CRL decode failed: {0} (RFC 6487 §5; RFC 9829 §3.1; RFC 5280 §5.1)")]
CrlDecode(#[from] CrlDecodeError),
#[error(
"issuer certificate must be a CA resource certificate (RFC 6487 §4.8.1; RFC 5280 §4.2.1.9)"
)]
IssuerNotCa,
#[error("EE certificate must be an EE resource certificate (RFC 6487 §4)")]
EeNotEe,
#[error(
"EE issuer DN does not match issuer CA subject DN: ee.issuer={ee_issuer_dn} issuer.subject={issuer_subject_dn} (RFC 5280 §6.1)"
)]
IssuerSubjectMismatch {
ee_issuer_dn: String,
issuer_subject_dn: String,
},
#[error("EE certificate signature verification failed: {0} (RFC 5280 §6.1)")]
EeSignatureInvalid(String),
#[error(
"EE KeyUsage extension missing (RFC 6487 §4.8.4; RFC 5280 §4.2.1.3)"
)]
KeyUsageMissing,
#[error("EE KeyUsage criticality must be critical (RFC 6487 §4.8.4; RFC 5280 §4.2.1.3)")]
KeyUsageNotCritical,
#[error(
"EE KeyUsage must have only digitalSignature set (RFC 6487 §4.8.4)"
)]
KeyUsageInvalidBits,
#[error("issuer CA subjectKeyIdentifier missing (RFC 6487 §4.8.2)")]
IssuerSkiMissing,
#[error("EE authorityKeyIdentifier missing (RFC 6487 §4.8.3; RFC 5280 §4.2.1.1)")]
EeAkiMissing,
#[error(
"EE authorityKeyIdentifier does not match issuer subjectKeyIdentifier (RFC 6487 §4.8.3)"
)]
EeAkiMismatch,
#[error("EE authorityInfoAccess missing (RFC 6487 §4.8.7; RFC 5280 §4.2.2.1)")]
EeAiaMissing,
#[error(
"EE authorityInfoAccess does not reference issuer certificate rsync URI (RFC 6487 §4.8.7)"
)]
EeAiaIssuerUriMismatch,
#[error("EE CRLDistributionPoints missing (RFC 6487 §4.8.6; RFC 5280 §4.2.1.13)")]
EeCrlDpMissing,
#[error("EE CRLDistributionPoints does not reference issuer CRL rsync URI (RFC 6487 §4.8.6)")]
EeCrlDpUriMismatch,
#[error("certificate not valid at validation_time (RFC 5280 §4.1.2.5; RFC 5280 §6.1)")]
CertificateNotValidAtTime,
#[error(
"CRL signature/binding verification failed: {0} (RFC 5280 §6.3.3; RFC 6487 §5; RFC 9829 §3.1)"
)]
CrlVerify(#[from] CrlVerifyError),
#[error(
"CRL not valid at validation_time (RFC 5280 §6.3.3(g); RFC 5280 §5.1.2.4-§5.1.2.5; RFC 6487 §5)"
)]
CrlNotValidAtTime,
#[error("EE certificate is revoked by issuer CRL (RFC 5280 §6.3.3; RFC 6487 §5)")]
EeRevoked,
}
pub fn validate_ee_cert_path(
ee_cert_der: &[u8],
issuer_ca_der: &[u8],
issuer_crl_der: &[u8],
issuer_ca_rsync_uri: Option<&str>,
issuer_crl_rsync_uri: Option<&str>,
validation_time: time::OffsetDateTime,
) -> Result<ValidatedEeCertPath, CertPathError> {
let ee = ResourceCertificate::decode_der(ee_cert_der)?;
if ee.kind != ResourceCertKind::Ee {
return Err(CertPathError::EeNotEe);
}
let issuer_ca =
ResourceCertificate::decode_der(issuer_ca_der).map_err(CertPathError::IssuerDecode)?;
if issuer_ca.kind != ResourceCertKind::Ca {
return Err(CertPathError::IssuerNotCa);
}
if ee.tbs.issuer_dn != issuer_ca.tbs.subject_dn {
return Err(CertPathError::IssuerSubjectMismatch {
ee_issuer_dn: ee.tbs.issuer_dn.clone(),
issuer_subject_dn: issuer_ca.tbs.subject_dn.clone(),
});
}
validate_ee_aki_matches_issuer_ski(&ee, &issuer_ca)?;
if let Some(expected_issuer_uri) = issuer_ca_rsync_uri {
validate_ee_aia_points_to_issuer_uri(&ee, expected_issuer_uri)?;
}
if let Some(expected_crl_uri) = issuer_crl_rsync_uri {
validate_ee_crldp_contains_issuer_crl_uri(&ee, expected_crl_uri)?;
}
if !time_within_validity(
validation_time,
ee.tbs.validity_not_before,
ee.tbs.validity_not_after,
) || !time_within_validity(
validation_time,
issuer_ca.tbs.validity_not_before,
issuer_ca.tbs.validity_not_after,
) {
return Err(CertPathError::CertificateNotValidAtTime);
}
verify_cert_signature_with_issuer(ee_cert_der, issuer_ca_der)?;
validate_ee_key_usage(ee_cert_der)?;
let issuer_crl = RpkixCrl::decode_der(issuer_crl_der)?;
issuer_crl.verify_signature_with_issuer_certificate_der(issuer_ca_der)?;
if !crl_valid_at_time(&issuer_crl, validation_time) {
return Err(CertPathError::CrlNotValidAtTime);
}
if is_serial_revoked_by_crl(&ee, &issuer_crl) {
return Err(CertPathError::EeRevoked);
}
Ok(ValidatedEeCertPath {
ee,
issuer_ca,
issuer_crl,
})
}
fn validate_ee_aki_matches_issuer_ski(
ee: &ResourceCertificate,
issuer_ca: &ResourceCertificate,
) -> Result<(), CertPathError> {
let Some(issuer_ski) = issuer_ca.tbs.extensions.subject_key_identifier.as_deref() else {
return Err(CertPathError::IssuerSkiMissing);
};
let Some(ee_aki) = ee.tbs.extensions.authority_key_identifier.as_deref() else {
return Err(CertPathError::EeAkiMissing);
};
if ee_aki != issuer_ski {
return Err(CertPathError::EeAkiMismatch);
}
Ok(())
}
fn validate_ee_aia_points_to_issuer_uri(
ee: &ResourceCertificate,
issuer_ca_rsync_uri: &str,
) -> Result<(), CertPathError> {
let Some(uris) = ee.tbs.extensions.ca_issuers_uris.as_ref() else {
return Err(CertPathError::EeAiaMissing);
};
if !uris.iter().any(|u| u.as_str() == issuer_ca_rsync_uri) {
return Err(CertPathError::EeAiaIssuerUriMismatch);
}
Ok(())
}
fn validate_ee_crldp_contains_issuer_crl_uri(
ee: &ResourceCertificate,
issuer_crl_rsync_uri: &str,
) -> Result<(), CertPathError> {
let Some(uris) = ee.tbs.extensions.crl_distribution_points_uris.as_ref() else {
return Err(CertPathError::EeCrlDpMissing);
};
if !uris.iter().any(|u| u.as_str() == issuer_crl_rsync_uri) {
return Err(CertPathError::EeCrlDpUriMismatch);
}
Ok(())
}
fn validate_ee_key_usage(ee_cert_der: &[u8]) -> Result<(), CertPathError> {
let (rem, cert) = X509Certificate::from_der(ee_cert_der)
.map_err(|e| CertPathError::EeSignatureInvalid(e.to_string()))?;
if !rem.is_empty() {
return Err(CertPathError::EeSignatureInvalid(
"trailing bytes after EE certificate".to_string(),
));
}
let mut ku_critical: Option<bool> = None;
for ext in cert.extensions() {
if ext.oid.to_id_string() == crate::data_model::oid::OID_KEY_USAGE {
ku_critical = Some(ext.critical);
break;
}
}
let Some(critical) = ku_critical else {
return Err(CertPathError::KeyUsageMissing);
};
if !critical {
return Err(CertPathError::KeyUsageNotCritical);
}
let Some(ku) = cert
.key_usage()
.map_err(|e| CertPathError::EeSignatureInvalid(e.to_string()))?
else {
return Err(CertPathError::KeyUsageMissing);
};
let v = &ku.value;
let ok = v.digital_signature()
&& !v.key_cert_sign()
&& !v.crl_sign()
&& !v.non_repudiation()
&& !v.key_encipherment()
&& !v.data_encipherment()
&& !v.key_agreement()
&& !v.encipher_only()
&& !v.decipher_only();
if !ok {
return Err(CertPathError::KeyUsageInvalidBits);
}
Ok(())
}
fn verify_cert_signature_with_issuer(
ee_cert_der: &[u8],
issuer_ca_der: &[u8],
) -> Result<(), CertPathError> {
let (rem, ee) = X509Certificate::from_der(ee_cert_der)
.map_err(|e| CertPathError::EeSignatureInvalid(e.to_string()))?;
if !rem.is_empty() {
return Err(CertPathError::EeSignatureInvalid(
"trailing bytes after EE certificate".to_string(),
));
}
let (rem, issuer) = X509Certificate::from_der(issuer_ca_der)
.map_err(|e| CertPathError::EeSignatureInvalid(e.to_string()))?;
if !rem.is_empty() {
return Err(CertPathError::EeSignatureInvalid(
"trailing bytes after issuer certificate".to_string(),
));
}
ee.verify_signature(Some(&issuer.tbs_certificate.subject_pki))
.map_err(|e| CertPathError::EeSignatureInvalid(e.to_string()))
}
fn time_within_validity(
t: time::OffsetDateTime,
not_before: time::OffsetDateTime,
not_after: time::OffsetDateTime,
) -> bool {
let t = t.to_offset(time::UtcOffset::UTC);
let not_before = not_before.to_offset(time::UtcOffset::UTC);
let not_after = not_after.to_offset(time::UtcOffset::UTC);
t >= not_before && t <= not_after
}
fn crl_valid_at_time(crl: &RpkixCrl, t: time::OffsetDateTime) -> bool {
let t = t.to_offset(time::UtcOffset::UTC);
let this_update = crl.this_update.utc.to_offset(time::UtcOffset::UTC);
let next_update = crl.next_update.utc.to_offset(time::UtcOffset::UTC);
t >= this_update && t < next_update
}
fn is_serial_revoked_by_crl(ee: &ResourceCertificate, crl: &RpkixCrl) -> bool {
let serial = BigUnsigned::from_biguint(&ee.tbs.serial_number);
crl.revoked_certs
.iter()
.any(|rc| rc.serial_number == serial)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::data_model::rc::{RcExtensions, ResourceCertKind, ResourceCertificate, RpkixTbsCertificate};
use der_parser::num_bigint::BigUint;
use url::Url;
fn dummy_cert(
kind: ResourceCertKind,
subject_dn: &str,
issuer_dn: &str,
ski: Option<Vec<u8>>,
aki: Option<Vec<u8>>,
aia: Option<Vec<&str>>,
crldp: Option<Vec<&str>>,
) -> ResourceCertificate {
let aia = aia.map(|v| {
v.into_iter()
.map(|s| Url::parse(s).expect("url"))
.collect::<Vec<_>>()
});
let crldp = crldp.map(|v| {
v.into_iter()
.map(|s| Url::parse(s).expect("url"))
.collect::<Vec<_>>()
});
ResourceCertificate {
raw_der: Vec::new(),
kind,
tbs: RpkixTbsCertificate {
version: 2,
serial_number: BigUint::from(1u8),
signature_algorithm: "1.2.840.113549.1.1.11".to_string(),
issuer_dn: issuer_dn.to_string(),
subject_dn: subject_dn.to_string(),
validity_not_before: time::OffsetDateTime::UNIX_EPOCH,
validity_not_after: time::OffsetDateTime::UNIX_EPOCH,
subject_public_key_info: Vec::new(),
extensions: RcExtensions {
basic_constraints_ca: kind == ResourceCertKind::Ca,
subject_key_identifier: ski,
authority_key_identifier: aki,
crl_distribution_points_uris: crldp,
ca_issuers_uris: aia,
subject_info_access: None,
certificate_policies_oid: None,
ip_resources: None,
as_resources: None,
},
},
}
}
#[test]
fn ee_aki_and_aia_and_crldp_binding_checks_report_errors() {
let issuer = dummy_cert(
ResourceCertKind::Ca,
"CN=issuer",
"CN=issuer",
Some(vec![1]),
None,
None,
None,
);
let ee = dummy_cert(
ResourceCertKind::Ee,
"CN=ee",
"CN=issuer",
Some(vec![2]),
Some(vec![9]),
Some(vec!["rsync://example.test/issuer.cer"]),
Some(vec!["rsync://example.test/issuer.crl"]),
);
let err = validate_ee_aki_matches_issuer_ski(&ee, &issuer).unwrap_err();
assert!(matches!(err, CertPathError::EeAkiMismatch), "{err}");
let issuer_missing_ski = dummy_cert(
ResourceCertKind::Ca,
"CN=issuer",
"CN=issuer",
None,
None,
None,
None,
);
let err = validate_ee_aki_matches_issuer_ski(&ee, &issuer_missing_ski).unwrap_err();
assert!(matches!(err, CertPathError::IssuerSkiMissing), "{err}");
let ee_missing_aki = dummy_cert(
ResourceCertKind::Ee,
"CN=ee",
"CN=issuer",
Some(vec![2]),
None,
Some(vec!["rsync://example.test/issuer.cer"]),
Some(vec!["rsync://example.test/issuer.crl"]),
);
let err = validate_ee_aki_matches_issuer_ski(&ee_missing_aki, &issuer).unwrap_err();
assert!(matches!(err, CertPathError::EeAkiMissing), "{err}");
let ee_missing_aia = dummy_cert(
ResourceCertKind::Ee,
"CN=ee",
"CN=issuer",
Some(vec![2]),
Some(vec![1]),
None,
Some(vec!["rsync://example.test/issuer.crl"]),
);
let err =
validate_ee_aia_points_to_issuer_uri(&ee_missing_aia, "rsync://example.test/issuer.cer")
.unwrap_err();
assert!(matches!(err, CertPathError::EeAiaMissing), "{err}");
let ee_wrong_aia = dummy_cert(
ResourceCertKind::Ee,
"CN=ee",
"CN=issuer",
Some(vec![2]),
Some(vec![1]),
Some(vec!["rsync://example.test/other.cer"]),
Some(vec!["rsync://example.test/issuer.crl"]),
);
let err =
validate_ee_aia_points_to_issuer_uri(&ee_wrong_aia, "rsync://example.test/issuer.cer")
.unwrap_err();
assert!(
matches!(err, CertPathError::EeAiaIssuerUriMismatch),
"{err}"
);
let ee_missing_crldp = dummy_cert(
ResourceCertKind::Ee,
"CN=ee",
"CN=issuer",
Some(vec![2]),
Some(vec![1]),
Some(vec!["rsync://example.test/issuer.cer"]),
None,
);
let err = validate_ee_crldp_contains_issuer_crl_uri(
&ee_missing_crldp,
"rsync://example.test/issuer.crl",
)
.unwrap_err();
assert!(matches!(err, CertPathError::EeCrlDpMissing), "{err}");
let ee_wrong_crldp = dummy_cert(
ResourceCertKind::Ee,
"CN=ee",
"CN=issuer",
Some(vec![2]),
Some(vec![1]),
Some(vec!["rsync://example.test/issuer.cer"]),
Some(vec!["rsync://example.test/other.crl"]),
);
let err = validate_ee_crldp_contains_issuer_crl_uri(
&ee_wrong_crldp,
"rsync://example.test/issuer.crl",
)
.unwrap_err();
assert!(
matches!(err, CertPathError::EeCrlDpUriMismatch),
"{err}"
);
}
#[test]
fn ee_binding_checks_accept_when_matching() {
let issuer = dummy_cert(
ResourceCertKind::Ca,
"CN=issuer",
"CN=issuer",
Some(vec![1]),
None,
None,
None,
);
let ee = dummy_cert(
ResourceCertKind::Ee,
"CN=ee",
"CN=issuer",
Some(vec![2]),
Some(vec![1]),
Some(vec!["rsync://example.test/issuer.cer"]),
Some(vec!["rsync://example.test/issuer.crl"]),
);
validate_ee_aki_matches_issuer_ski(&ee, &issuer).expect("aki ok");
validate_ee_aia_points_to_issuer_uri(&ee, "rsync://example.test/issuer.cer")
.expect("aia ok");
validate_ee_crldp_contains_issuer_crl_uri(&ee, "rsync://example.test/issuer.crl")
.expect("crldp ok");
}
}

146
src/validation/from_tal.rs Normal file
View File

@ -0,0 +1,146 @@
use url::Url;
use crate::data_model::ta::{TrustAnchor, TrustAnchorError};
use crate::data_model::tal::{Tal, TalDecodeError};
use crate::sync::rrdp::Fetcher;
use crate::validation::ca_instance::{CaInstanceUris, CaInstanceUrisError, ca_instance_uris_from_ca_certificate};
use crate::validation::objects::IssuerCaCertificateResolver;
use crate::validation::run::{RunError, RunOutput, run_publication_point_once};
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct DiscoveredRootCaInstance {
pub tal_url: Option<String>,
pub trust_anchor: TrustAnchor,
pub ca_instance: CaInstanceUris,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RunFromTalOutput {
pub discovery: DiscoveredRootCaInstance,
pub run: RunOutput,
}
#[derive(Debug, thiserror::Error)]
pub enum FromTalError {
#[error("TAL fetch failed: {0} (RFC 8630 §2.2)")]
TalFetch(String),
#[error("TAL decode failed: {0} (RFC 8630 §2.2)")]
TalDecode(#[from] TalDecodeError),
#[error("failed to fetch TA certificate from TAL: {0} (RFC 8630 §2.3)")]
TaFetch(String),
#[error("failed to bind TAL and TA certificate: {0} (RFC 8630 §2.3)")]
Bind(#[from] TrustAnchorError),
#[error("failed to discover CA instance URIs from TA certificate: {0}")]
CaInstanceUris(#[from] CaInstanceUrisError),
#[error("run failed: {0}")]
Run(#[from] RunError),
#[error("TAL contains no TA URIs (RFC 8630 §2.2)")]
NoTaUris,
}
pub fn discover_root_ca_instance_from_tal_url(
http_fetcher: &dyn Fetcher,
tal_url: &str,
) -> Result<DiscoveredRootCaInstance, FromTalError> {
let tal_bytes = http_fetcher
.fetch(tal_url)
.map_err(FromTalError::TalFetch)?;
let tal = Tal::decode_bytes(&tal_bytes)?;
discover_root_ca_instance_from_tal(http_fetcher, tal, Some(tal_url.to_string()))
}
pub fn discover_root_ca_instance_from_tal(
http_fetcher: &dyn Fetcher,
tal: Tal,
tal_url: Option<String>,
) -> Result<DiscoveredRootCaInstance, FromTalError> {
if tal.ta_uris.is_empty() {
return Err(FromTalError::NoTaUris);
}
let mut last_err: Option<String> = None;
for ta_uri in tal.ta_uris.iter() {
let ta_der = match http_fetcher.fetch(ta_uri.as_str()) {
Ok(b) => b,
Err(e) => {
last_err = Some(format!("fetch {ta_uri} failed: {e}"));
continue;
}
};
let trust_anchor = match TrustAnchor::bind_der(tal.clone(), &ta_der, Some(ta_uri)) {
Ok(ta) => ta,
Err(e) => {
last_err = Some(format!("bind {ta_uri} failed: {e}"));
continue;
}
};
let ca_instance = match ca_instance_uris_from_ca_certificate(&trust_anchor.ta_certificate.rc_ca)
{
Ok(v) => v,
Err(e) => {
last_err = Some(format!("CA instance discovery failed: {e}"));
continue;
}
};
return Ok(DiscoveredRootCaInstance {
tal_url,
trust_anchor,
ca_instance,
});
}
Err(FromTalError::TaFetch(
last_err.unwrap_or_else(|| "unknown TA candidate error".to_string()),
))
}
pub fn discover_root_ca_instance_from_tal_and_ta_der(
tal_bytes: &[u8],
ta_der: &[u8],
resolved_ta_uri: Option<&Url>,
) -> Result<DiscoveredRootCaInstance, FromTalError> {
let tal = Tal::decode_bytes(tal_bytes)?;
let trust_anchor = TrustAnchor::bind_der(tal, ta_der, resolved_ta_uri)?;
let ca_instance = ca_instance_uris_from_ca_certificate(&trust_anchor.ta_certificate.rc_ca)?;
Ok(DiscoveredRootCaInstance {
tal_url: None,
trust_anchor,
ca_instance,
})
}
pub fn run_root_from_tal_url_once(
store: &crate::storage::RocksStore,
policy: &crate::policy::Policy,
tal_url: &str,
http_fetcher: &dyn Fetcher,
rsync_fetcher: &dyn crate::fetch::rsync::RsyncFetcher,
issuer_resolver: &dyn IssuerCaCertificateResolver,
validation_time: time::OffsetDateTime,
) -> Result<RunFromTalOutput, FromTalError> {
let discovery = discover_root_ca_instance_from_tal_url(http_fetcher, tal_url)?;
let run = run_publication_point_once(
store,
policy,
discovery.ca_instance.rrdp_notification_uri.as_deref(),
&discovery.ca_instance.rsync_base_uri,
&discovery.ca_instance.manifest_rsync_uri,
&discovery.ca_instance.publication_point_rsync_uri,
http_fetcher,
rsync_fetcher,
issuer_resolver,
validation_time,
)?;
Ok(RunFromTalOutput { discovery, run })
}

295
src/validation/manifest.rs Normal file
View File

@ -0,0 +1,295 @@
use crate::data_model::manifest::{ManifestDecodeError, ManifestObject, ManifestValidateError};
use crate::data_model::signed_object::SignedObjectVerifyError;
use crate::policy::{CaFailedFetchPolicy, Policy};
use crate::report::{RfcRef, Warning};
use crate::storage::{RocksStore, StorageError, VerifiedKey, VerifiedPublicationPointPack};
use sha2::Digest;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PublicationPointSource {
Fresh,
VerifiedCache,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct PublicationPointResult {
pub source: PublicationPointSource,
pub pack: VerifiedPublicationPointPack,
pub warnings: Vec<Warning>,
}
#[derive(Debug, thiserror::Error)]
pub enum ManifestFreshError {
#[error(
"manifest not found in raw_objects: {manifest_rsync_uri} (RFC 9286 §6.2; RFC 9286 §6.6)"
)]
MissingManifest { manifest_rsync_uri: String },
#[error("manifest decode failed: {0} (RFC 9286 §4; RFC 9286 §6.2; RFC 9286 §6.6)")]
Decode(#[from] ManifestDecodeError),
#[error(
"manifest embedded EE certificate resources invalid: {0} (RFC 9286 §5.1; RFC 9286 §6.2; RFC 9286 §6.6)"
)]
EeResources(#[from] ManifestValidateError),
#[error(
"manifest CMS signature verification failed: {0} (RFC 6488 §3; RFC 9589 §4; RFC 9286 §6.2; RFC 9286 §6.6)"
)]
Signature(#[from] SignedObjectVerifyError),
#[error(
"manifest is not valid at validation_time: this_update={this_update_rfc3339_utc} next_update={next_update_rfc3339_utc} validation_time={validation_time_rfc3339_utc} (RFC 9286 §6.3; RFC 9286 §6.6)"
)]
StaleOrEarly {
this_update_rfc3339_utc: String,
next_update_rfc3339_utc: String,
validation_time_rfc3339_utc: String,
},
#[error(
"manifest referenced file missing in raw_objects: {rsync_uri} (RFC 9286 §6.4; RFC 9286 §6.6)"
)]
MissingFile { rsync_uri: String },
#[error("manifest file hash mismatch: {rsync_uri} (RFC 9286 §6.5; RFC 9286 §6.6)")]
HashMismatch { rsync_uri: String },
}
#[derive(Debug, thiserror::Error)]
pub enum ManifestCachedError {
#[error("verified cache entry missing: {0} (RFC 9286 §6.6)")]
MissingVerifiedCache(String),
#[error("verified cache pack invalid: {0}")]
InvalidPack(#[from] StorageError),
#[error("cached manifest revalidation failed: {0}")]
CachedManifestFresh(#[from] ManifestFreshError),
}
#[derive(Debug, thiserror::Error)]
pub enum ManifestProcessError {
#[error("manifest processing failed and cache use is disabled: {0}")]
StopAllOutput(#[from] ManifestFreshError),
#[error(
"manifest processing failed and no usable verified cache is available: fresh={fresh}; cached={cached}"
)]
NoUsableCache {
fresh: ManifestFreshError,
cached: ManifestCachedError,
},
#[error("storage error during manifest processing: {0}")]
Storage(#[from] StorageError),
}
pub fn process_manifest_publication_point(
store: &RocksStore,
policy: &Policy,
manifest_rsync_uri: &str,
publication_point_rsync_uri: &str,
validation_time: time::OffsetDateTime,
) -> Result<PublicationPointResult, ManifestProcessError> {
let fresh = try_build_fresh_pack(
store,
manifest_rsync_uri,
publication_point_rsync_uri,
validation_time,
);
match fresh {
Ok(pack) => {
let key = VerifiedKey::from_manifest_rsync_uri(manifest_rsync_uri);
let bytes = pack.encode()?;
store.put_verified(&key, &bytes)?;
Ok(PublicationPointResult {
source: PublicationPointSource::Fresh,
pack,
warnings: Vec::new(),
})
}
Err(fresh_err) => match policy.ca_failed_fetch_policy {
CaFailedFetchPolicy::StopAllOutput => {
Err(ManifestProcessError::StopAllOutput(fresh_err))
}
CaFailedFetchPolicy::UseVerifiedCache => {
let mut warnings = vec![
Warning::new(format!("manifest failed fetch: {fresh_err}"))
.with_rfc_refs(&[RfcRef("RFC 9286 §6.6")])
.with_context(manifest_rsync_uri),
];
match load_and_revalidate_cached_pack(
store,
manifest_rsync_uri,
publication_point_rsync_uri,
validation_time,
) {
Ok(pack) => {
warnings.push(
Warning::new("using verified cache for publication point")
.with_rfc_refs(&[RfcRef("RFC 9286 §6.6")])
.with_context(manifest_rsync_uri),
);
Ok(PublicationPointResult {
source: PublicationPointSource::VerifiedCache,
pack,
warnings,
})
}
Err(cached_err) => Err(ManifestProcessError::NoUsableCache {
fresh: fresh_err,
cached: cached_err,
}),
}
}
},
}
}
fn load_and_revalidate_cached_pack(
store: &RocksStore,
manifest_rsync_uri: &str,
publication_point_rsync_uri: &str,
validation_time: time::OffsetDateTime,
) -> Result<VerifiedPublicationPointPack, ManifestCachedError> {
let key = VerifiedKey::from_manifest_rsync_uri(manifest_rsync_uri);
let bytes = store
.get_verified(&key)?
.ok_or_else(|| ManifestCachedError::MissingVerifiedCache(key.as_str().to_string()))?;
let pack = VerifiedPublicationPointPack::decode(&bytes)?;
if pack.manifest_rsync_uri != manifest_rsync_uri {
return Err(ManifestCachedError::InvalidPack(StorageError::RocksDb(
"cached pack manifest_rsync_uri does not match key".to_string(),
)));
}
if pack.publication_point_rsync_uri != publication_point_rsync_uri {
return Err(ManifestCachedError::InvalidPack(StorageError::RocksDb(
"cached pack publication_point_rsync_uri does not match expected".to_string(),
)));
}
revalidate_pack_with_current_time(&pack, validation_time).map_err(ManifestCachedError::from)?;
Ok(pack)
}
fn revalidate_pack_with_current_time(
pack: &VerifiedPublicationPointPack,
validation_time: time::OffsetDateTime,
) -> Result<(), ManifestFreshError> {
let manifest = ManifestObject::decode_der(&pack.manifest_bytes)?;
manifest.validate_embedded_ee_cert()?;
manifest.signed_object.verify()?;
let this_update = manifest
.manifest
.this_update
.to_offset(time::UtcOffset::UTC);
let next_update = manifest
.manifest
.next_update
.to_offset(time::UtcOffset::UTC);
let now = validation_time.to_offset(time::UtcOffset::UTC);
if now < this_update || now > next_update {
return Err(ManifestFreshError::StaleOrEarly {
this_update_rfc3339_utc: this_update
.format(&time::format_description::well_known::Rfc3339)
.expect("format thisUpdate"),
next_update_rfc3339_utc: next_update
.format(&time::format_description::well_known::Rfc3339)
.expect("format nextUpdate"),
validation_time_rfc3339_utc: now
.format(&time::format_description::well_known::Rfc3339)
.expect("format validation_time"),
});
}
Ok(())
}
fn try_build_fresh_pack(
store: &RocksStore,
manifest_rsync_uri: &str,
publication_point_rsync_uri: &str,
validation_time: time::OffsetDateTime,
) -> Result<VerifiedPublicationPointPack, ManifestFreshError> {
let manifest_bytes = store
.get_raw(manifest_rsync_uri)
.map_err(|e| ManifestFreshError::MissingManifest {
manifest_rsync_uri: format!("{manifest_rsync_uri} ({e})"),
})?
.ok_or_else(|| ManifestFreshError::MissingManifest {
manifest_rsync_uri: manifest_rsync_uri.to_string(),
})?;
let manifest = ManifestObject::decode_der(&manifest_bytes)?;
manifest.validate_embedded_ee_cert()?;
manifest.signed_object.verify()?;
let this_update = manifest
.manifest
.this_update
.to_offset(time::UtcOffset::UTC);
let next_update = manifest
.manifest
.next_update
.to_offset(time::UtcOffset::UTC);
let now = validation_time.to_offset(time::UtcOffset::UTC);
if now < this_update || now > next_update {
return Err(ManifestFreshError::StaleOrEarly {
this_update_rfc3339_utc: this_update
.format(&time::format_description::well_known::Rfc3339)
.expect("format thisUpdate"),
next_update_rfc3339_utc: next_update
.format(&time::format_description::well_known::Rfc3339)
.expect("format nextUpdate"),
validation_time_rfc3339_utc: now
.format(&time::format_description::well_known::Rfc3339)
.expect("format validation_time"),
});
}
let mut files = Vec::with_capacity(manifest.manifest.files.len());
for entry in &manifest.manifest.files {
let rsync_uri = join_rsync_dir_and_file(publication_point_rsync_uri, &entry.file_name);
let bytes = store
.get_raw(&rsync_uri)
.map_err(|_e| ManifestFreshError::MissingFile {
rsync_uri: rsync_uri.clone(),
})?
.ok_or_else(|| ManifestFreshError::MissingFile {
rsync_uri: rsync_uri.clone(),
})?;
let computed = sha2::Sha256::digest(&bytes);
if computed.as_slice() != entry.hash_bytes.as_slice() {
return Err(ManifestFreshError::HashMismatch { rsync_uri });
}
files.push(crate::storage::PackFile::from_bytes_compute_sha256(
rsync_uri, bytes,
));
}
Ok(VerifiedPublicationPointPack {
format_version: VerifiedPublicationPointPack::FORMAT_VERSION_V1,
manifest_rsync_uri: manifest_rsync_uri.to_string(),
publication_point_rsync_uri: publication_point_rsync_uri.to_string(),
this_update: crate::storage::PackTime::from_utc_offset_datetime(this_update),
next_update: crate::storage::PackTime::from_utc_offset_datetime(next_update),
verified_at: crate::storage::PackTime::from_utc_offset_datetime(now),
manifest_bytes,
files,
})
}
fn join_rsync_dir_and_file(base: &str, file_name: &str) -> String {
if base.ends_with('/') {
format!("{base}{file_name}")
} else {
format!("{base}/{file_name}")
}
}

10
src/validation/mod.rs Normal file
View File

@ -0,0 +1,10 @@
pub mod cert_path;
pub mod ca_instance;
pub mod ca_path;
pub mod from_tal;
pub mod manifest;
pub mod objects;
pub mod run;
pub mod tree;
pub mod tree_runner;
pub mod run_tree_from_tal;

1293
src/validation/objects.rs Normal file

File diff suppressed because it is too large Load Diff

86
src/validation/run.rs Normal file
View File

@ -0,0 +1,86 @@
use crate::fetch::rsync::RsyncFetcher;
use crate::policy::Policy;
use crate::storage::{RocksStore, VerifiedKey};
use crate::sync::repo::{RepoSyncResult, sync_publication_point};
use crate::sync::rrdp::Fetcher as HttpFetcher;
use crate::validation::manifest::{PublicationPointResult, process_manifest_publication_point};
use crate::validation::objects::{
IssuerCaCertificateResolver, ObjectsOutput, ObjectsProcessError,
process_verified_publication_point_pack,
};
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RunOutput {
pub repo_sync: RepoSyncResult,
pub publication_point: PublicationPointResult,
pub objects: ObjectsOutput,
}
#[derive(Debug, thiserror::Error)]
pub enum RunError {
#[error("repo sync failed: {0}")]
RepoSync(#[from] crate::sync::repo::RepoSyncError),
#[error("manifest processing failed: {0}")]
Manifest(#[from] crate::validation::manifest::ManifestProcessError),
#[error("objects processing failed: {0}")]
Objects(#[from] ObjectsProcessError),
}
/// v1 serial offline-friendly end-to-end execution for a single publication point.
///
/// This orchestrates:
/// 1) repo sync (RRDP or rsync fallback) into `raw_objects`
/// 2) manifest RP processing into a verified pack (`verified:<manifest-rsync-uri>`)
/// 3) signed object processing (ROA/ASPA) from the verified pack
pub fn run_publication_point_once(
store: &RocksStore,
policy: &Policy,
rrdp_notification_uri: Option<&str>,
rsync_base_uri: &str,
manifest_rsync_uri: &str,
publication_point_rsync_uri: &str,
http_fetcher: &dyn HttpFetcher,
rsync_fetcher: &dyn RsyncFetcher,
issuer_resolver: &dyn IssuerCaCertificateResolver,
validation_time: time::OffsetDateTime,
) -> Result<RunOutput, RunError> {
let repo_sync = sync_publication_point(
store,
policy,
rrdp_notification_uri,
rsync_base_uri,
http_fetcher,
rsync_fetcher,
)?;
let publication_point = process_manifest_publication_point(
store,
policy,
manifest_rsync_uri,
publication_point_rsync_uri,
validation_time,
)?;
let objects = process_verified_publication_point_pack(
&publication_point.pack,
policy,
issuer_resolver,
validation_time,
)?;
Ok(RunOutput {
repo_sync,
publication_point,
objects,
})
}
pub fn verified_pack_exists(store: &RocksStore, manifest_rsync_uri: &str) -> Result<bool, String> {
let key = VerifiedKey::from_manifest_rsync_uri(manifest_rsync_uri);
store
.get_verified(&key)
.map(|v| v.is_some())
.map_err(|e| e.to_string())
}

View File

@ -0,0 +1,202 @@
use url::Url;
use crate::data_model::ta::TrustAnchor;
use crate::sync::rrdp::Fetcher;
use crate::audit::PublicationPointAudit;
use crate::validation::from_tal::{
DiscoveredRootCaInstance, FromTalError, discover_root_ca_instance_from_tal_and_ta_der,
discover_root_ca_instance_from_tal_url,
};
use crate::validation::tree::{CaInstanceHandle, TreeRunConfig, TreeRunError, TreeRunOutput, run_tree_serial};
use crate::validation::tree_runner::Rpkiv1PublicationPointRunner;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RunTreeFromTalOutput {
pub discovery: DiscoveredRootCaInstance,
pub tree: TreeRunOutput,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RunTreeFromTalAuditOutput {
pub discovery: DiscoveredRootCaInstance,
pub tree: TreeRunOutput,
pub publication_points: Vec<PublicationPointAudit>,
}
#[derive(Debug, thiserror::Error)]
pub enum RunTreeFromTalError {
#[error("{0}")]
FromTal(#[from] FromTalError),
#[error("{0}")]
Tree(#[from] TreeRunError),
}
pub fn root_handle_from_trust_anchor(
trust_anchor: &TrustAnchor,
ca_certificate_rsync_uri: Option<String>,
ca_instance: &crate::validation::ca_instance::CaInstanceUris,
) -> CaInstanceHandle {
let ta_rc = trust_anchor.ta_certificate.rc_ca.clone();
CaInstanceHandle {
depth: 0,
ca_certificate_der: trust_anchor.ta_certificate.raw_der.clone(),
ca_certificate_rsync_uri,
effective_ip_resources: ta_rc.tbs.extensions.ip_resources.clone(),
effective_as_resources: ta_rc.tbs.extensions.as_resources.clone(),
rsync_base_uri: ca_instance.rsync_base_uri.clone(),
manifest_rsync_uri: ca_instance.manifest_rsync_uri.clone(),
publication_point_rsync_uri: ca_instance.publication_point_rsync_uri.clone(),
rrdp_notification_uri: ca_instance.rrdp_notification_uri.clone(),
}
}
pub fn run_tree_from_tal_url_serial(
store: &crate::storage::RocksStore,
policy: &crate::policy::Policy,
tal_url: &str,
http_fetcher: &dyn Fetcher,
rsync_fetcher: &dyn crate::fetch::rsync::RsyncFetcher,
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
) -> Result<RunTreeFromTalOutput, RunTreeFromTalError> {
let discovery = discover_root_ca_instance_from_tal_url(http_fetcher, tal_url)?;
let runner = Rpkiv1PublicationPointRunner {
store,
policy,
http_fetcher,
rsync_fetcher,
validation_time,
};
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance);
let tree = run_tree_serial(root, &runner, config)?;
Ok(RunTreeFromTalOutput { discovery, tree })
}
pub fn run_tree_from_tal_url_serial_audit(
store: &crate::storage::RocksStore,
policy: &crate::policy::Policy,
tal_url: &str,
http_fetcher: &dyn Fetcher,
rsync_fetcher: &dyn crate::fetch::rsync::RsyncFetcher,
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let discovery = discover_root_ca_instance_from_tal_url(http_fetcher, tal_url)?;
let runner = Rpkiv1PublicationPointRunner {
store,
policy,
http_fetcher,
rsync_fetcher,
validation_time,
};
let audits: std::cell::RefCell<Vec<PublicationPointAudit>> = std::cell::RefCell::new(Vec::new());
struct AuditingRunner<'a> {
inner: &'a Rpkiv1PublicationPointRunner<'a>,
audits: &'a std::cell::RefCell<Vec<PublicationPointAudit>>,
}
impl<'a> crate::validation::tree::PublicationPointRunner for AuditingRunner<'a> {
fn run_publication_point(
&self,
ca: &crate::validation::tree::CaInstanceHandle,
) -> Result<crate::validation::tree::PublicationPointRunResult, String> {
let res = self.inner.run_publication_point(ca)?;
self.audits.borrow_mut().push(res.audit.clone());
Ok(res)
}
}
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance);
let auditing_runner = AuditingRunner { inner: &runner, audits: &audits };
let tree = run_tree_serial(root, &auditing_runner, config)?;
let publication_points = audits.into_inner();
Ok(RunTreeFromTalAuditOutput {
discovery,
tree,
publication_points,
})
}
pub fn run_tree_from_tal_and_ta_der_serial(
store: &crate::storage::RocksStore,
policy: &crate::policy::Policy,
tal_bytes: &[u8],
ta_der: &[u8],
resolved_ta_uri: Option<&Url>,
http_fetcher: &dyn Fetcher,
rsync_fetcher: &dyn crate::fetch::rsync::RsyncFetcher,
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
) -> Result<RunTreeFromTalOutput, RunTreeFromTalError> {
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let runner = Rpkiv1PublicationPointRunner {
store,
policy,
http_fetcher,
rsync_fetcher,
validation_time,
};
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance);
let tree = run_tree_serial(root, &runner, config)?;
Ok(RunTreeFromTalOutput { discovery, tree })
}
pub fn run_tree_from_tal_and_ta_der_serial_audit(
store: &crate::storage::RocksStore,
policy: &crate::policy::Policy,
tal_bytes: &[u8],
ta_der: &[u8],
resolved_ta_uri: Option<&Url>,
http_fetcher: &dyn Fetcher,
rsync_fetcher: &dyn crate::fetch::rsync::RsyncFetcher,
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let runner = Rpkiv1PublicationPointRunner {
store,
policy,
http_fetcher,
rsync_fetcher,
validation_time,
};
let audits: std::cell::RefCell<Vec<PublicationPointAudit>> = std::cell::RefCell::new(Vec::new());
struct AuditingRunner<'a> {
inner: &'a Rpkiv1PublicationPointRunner<'a>,
audits: &'a std::cell::RefCell<Vec<PublicationPointAudit>>,
}
impl<'a> crate::validation::tree::PublicationPointRunner for AuditingRunner<'a> {
fn run_publication_point(
&self,
ca: &crate::validation::tree::CaInstanceHandle,
) -> Result<crate::validation::tree::PublicationPointRunResult, String> {
let res = self.inner.run_publication_point(ca)?;
self.audits.borrow_mut().push(res.audit.clone());
Ok(res)
}
}
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance);
let auditing_runner = AuditingRunner { inner: &runner, audits: &audits };
let tree = run_tree_serial(root, &auditing_runner, config)?;
let publication_points = audits.into_inner();
Ok(RunTreeFromTalAuditOutput {
discovery,
tree,
publication_points,
})
}

163
src/validation/tree.rs Normal file
View File

@ -0,0 +1,163 @@
use crate::report::{RfcRef, Warning};
use crate::storage::VerifiedPublicationPointPack;
use crate::validation::manifest::PublicationPointSource;
use crate::audit::PublicationPointAudit;
use crate::data_model::rc::{AsResourceSet, IpResourceSet};
use crate::validation::objects::{AspaAttestation, ObjectsOutput, Vrp};
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct TreeRunConfig {
/// Max CA instance depth to process (0 = root only).
pub max_depth: Option<usize>,
/// Max number of CA instances to process.
pub max_instances: Option<usize>,
}
impl Default for TreeRunConfig {
fn default() -> Self {
Self {
max_depth: None,
max_instances: None,
}
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CaInstanceHandle {
pub depth: usize,
/// DER bytes of the CA certificate for this CA instance.
pub ca_certificate_der: Vec<u8>,
/// rsync URI of this CA certificate object (where it is published).
///
/// This is used for strict AIA binding checks (RFC 6487 §4.8.7) when validating
/// subordinate CA and EE certificates. Trust anchor certificates downloaded from
/// TAL URIs may not have an rsync publication URI, so this can be `None`.
pub ca_certificate_rsync_uri: Option<String>,
/// Effective (fully resolved) resources for this CA instance, used for resource-path
/// validation of subordinate CA certificates.
pub effective_ip_resources: Option<IpResourceSet>,
pub effective_as_resources: Option<AsResourceSet>,
pub rsync_base_uri: String,
pub manifest_rsync_uri: String,
pub publication_point_rsync_uri: String,
pub rrdp_notification_uri: Option<String>,
}
impl CaInstanceHandle {
pub fn with_depth(mut self, depth: usize) -> Self {
self.depth = depth;
self
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct PublicationPointRunResult {
pub source: PublicationPointSource,
pub pack: VerifiedPublicationPointPack,
pub warnings: Vec<Warning>,
pub objects: ObjectsOutput,
pub audit: PublicationPointAudit,
/// Candidate child CA instances discovered from this publication point.
///
/// RFC 9286 §6.6 restriction is enforced by the tree engine: if this
/// publication point used verified cache due to failed fetch, children MUST NOT
/// be enqueued/processed in this run.
pub discovered_children: Vec<CaInstanceHandle>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct TreeRunOutput {
pub instances_processed: usize,
pub instances_failed: usize,
pub warnings: Vec<Warning>,
pub vrps: Vec<Vrp>,
pub aspas: Vec<AspaAttestation>,
}
#[derive(Debug, thiserror::Error)]
pub enum TreeRunError {
#[error("publication point runner failed: {0}")]
Runner(String),
}
pub trait PublicationPointRunner {
fn run_publication_point(
&self,
ca: &CaInstanceHandle,
) -> Result<PublicationPointRunResult, String>;
}
pub fn run_tree_serial(
root: CaInstanceHandle,
runner: &dyn PublicationPointRunner,
config: &TreeRunConfig,
) -> Result<TreeRunOutput, TreeRunError> {
let mut queue: std::collections::VecDeque<CaInstanceHandle> = std::collections::VecDeque::new();
queue.push_back(root);
let mut visited_manifest_uris: std::collections::HashSet<String> = std::collections::HashSet::new();
let mut instances_processed = 0usize;
let mut instances_failed = 0usize;
let mut warnings: Vec<Warning> = Vec::new();
let mut vrps: Vec<Vrp> = Vec::new();
let mut aspas: Vec<AspaAttestation> = Vec::new();
while let Some(ca) = queue.pop_front() {
if !visited_manifest_uris.insert(ca.manifest_rsync_uri.clone()) {
continue;
}
if let Some(max_instances) = config.max_instances {
if instances_processed >= max_instances {
break;
}
}
if let Some(max_depth) = config.max_depth {
if ca.depth > max_depth {
continue;
}
}
let res = match runner.run_publication_point(&ca) {
Ok(v) => v,
Err(e) => {
instances_failed += 1;
warnings.push(
Warning::new(format!("publication point failed: {e}"))
.with_context(&ca.manifest_rsync_uri),
);
continue;
}
};
instances_processed += 1;
warnings.extend(res.warnings.clone());
warnings.extend(res.objects.warnings.clone());
vrps.extend(res.objects.vrps.clone());
aspas.extend(res.objects.aspas.clone());
let enqueue_children = res.source == PublicationPointSource::Fresh;
if !enqueue_children && !res.discovered_children.is_empty() {
warnings.push(
Warning::new("skipping child CA discovery due to failed fetch cache use")
.with_rfc_refs(&[RfcRef("RFC 9286 §6.6")])
.with_context(&ca.manifest_rsync_uri),
);
}
if enqueue_children {
for child in res.discovered_children {
queue.push_back(child.with_depth(ca.depth + 1));
}
}
}
Ok(TreeRunOutput {
instances_processed,
instances_failed,
warnings,
vrps,
aspas,
})
}

View File

@ -0,0 +1,836 @@
use crate::fetch::rsync::RsyncFetcher;
use crate::policy::Policy;
use crate::report::{RfcRef, Warning};
use crate::storage::RocksStore;
use crate::sync::repo::sync_publication_point;
use crate::sync::rrdp::Fetcher;
use crate::audit::{
AuditObjectKind, AuditObjectResult, AuditWarning, ObjectAuditEntry, PublicationPointAudit,
sha256_hex, sha256_hex_from_32,
};
use crate::validation::ca_instance::ca_instance_uris_from_ca_certificate;
use crate::validation::ca_path::{CaPathError, validate_subordinate_ca_cert};
use crate::validation::manifest::{PublicationPointSource, process_manifest_publication_point};
use crate::validation::objects::process_verified_publication_point_pack_for_issuer;
use crate::validation::tree::{CaInstanceHandle, PublicationPointRunResult, PublicationPointRunner};
pub struct Rpkiv1PublicationPointRunner<'a> {
pub store: &'a RocksStore,
pub policy: &'a Policy,
pub http_fetcher: &'a dyn Fetcher,
pub rsync_fetcher: &'a dyn RsyncFetcher,
pub validation_time: time::OffsetDateTime,
}
impl<'a> PublicationPointRunner for Rpkiv1PublicationPointRunner<'a> {
fn run_publication_point(
&self,
ca: &CaInstanceHandle,
) -> Result<PublicationPointRunResult, String> {
let mut warnings: Vec<Warning> = Vec::new();
if let Err(e) = sync_publication_point(
self.store,
self.policy,
ca.rrdp_notification_uri.as_deref(),
&ca.rsync_base_uri,
self.http_fetcher,
self.rsync_fetcher,
) {
warnings.push(
Warning::new(format!("repo sync failed (continuing with cached/raw data): {e}"))
.with_rfc_refs(&[RfcRef("RFC 8182 §3.4.5"), RfcRef("RFC 9286 §6.6")])
.with_context(&ca.rsync_base_uri),
);
}
let pp = match process_manifest_publication_point(
self.store,
self.policy,
&ca.manifest_rsync_uri,
&ca.publication_point_rsync_uri,
self.validation_time,
) {
Ok(v) => v,
Err(e) => return Err(format!("{e}")),
};
warnings.extend(pp.warnings.clone());
let objects = process_verified_publication_point_pack_for_issuer(
&pp.pack,
self.policy,
&ca.ca_certificate_der,
ca.ca_certificate_rsync_uri.as_deref(),
ca.effective_ip_resources.as_ref(),
ca.effective_as_resources.as_ref(),
self.validation_time,
);
let (discovered_children, child_audits) = if pp.source == PublicationPointSource::Fresh {
match discover_children_from_fresh_pack_with_audit(ca, &pp.pack, self.validation_time) {
Ok(out) => (out.children, out.audits),
Err(e) => {
warnings.push(
Warning::new(format!("child CA discovery failed: {e}"))
.with_rfc_refs(&[RfcRef("RFC 6487 §7.2")])
.with_context(&ca.manifest_rsync_uri),
);
(Vec::new(), Vec::new())
}
}
} else {
(Vec::new(), Vec::new())
};
let audit = build_publication_point_audit(ca, &pp, &warnings, &objects, &child_audits);
Ok(PublicationPointRunResult {
source: pp.source,
pack: pp.pack,
warnings,
objects,
audit,
discovered_children,
})
}
}
struct ChildDiscoveryOutput {
children: Vec<CaInstanceHandle>,
audits: Vec<ObjectAuditEntry>,
}
fn discover_children_from_fresh_pack_with_audit(
issuer: &CaInstanceHandle,
pack: &crate::storage::VerifiedPublicationPointPack,
validation_time: time::OffsetDateTime,
) -> Result<ChildDiscoveryOutput, String> {
let issuer_ca_der = issuer.ca_certificate_der.as_slice();
let (issuer_crl_uri, issuer_crl_der) = select_issuer_crl_from_pack(issuer, pack)?;
let mut out: Vec<CaInstanceHandle> = Vec::new();
let mut audits: Vec<ObjectAuditEntry> = Vec::new();
for f in &pack.files {
if !f.rsync_uri.ends_with(".cer") {
continue;
}
let child_der = f.bytes.as_slice();
let validated = match validate_subordinate_ca_cert(
child_der,
issuer_ca_der,
issuer_crl_der,
issuer.ca_certificate_rsync_uri.as_deref(),
issuer_crl_uri,
issuer.effective_ip_resources.as_ref(),
issuer.effective_as_resources.as_ref(),
validation_time,
) {
Ok(v) => v,
Err(CaPathError::ChildNotCa) => {
audits.push(ObjectAuditEntry {
rsync_uri: f.rsync_uri.clone(),
sha256_hex: sha256_hex_from_32(&f.sha256),
kind: AuditObjectKind::Certificate,
result: AuditObjectResult::Skipped,
detail: Some("skipped: not a CA resource certificate".to_string()),
});
continue;
}
Err(e) => {
audits.push(ObjectAuditEntry {
rsync_uri: f.rsync_uri.clone(),
sha256_hex: sha256_hex_from_32(&f.sha256),
kind: AuditObjectKind::Certificate,
result: AuditObjectResult::Error,
detail: Some(e.to_string()),
});
continue;
}
};
let uris = match ca_instance_uris_from_ca_certificate(&validated.child_ca) {
Ok(v) => v,
Err(e) => {
audits.push(ObjectAuditEntry {
rsync_uri: f.rsync_uri.clone(),
sha256_hex: sha256_hex_from_32(&f.sha256),
kind: AuditObjectKind::Certificate,
result: AuditObjectResult::Error,
detail: Some(format!("CA instance URI discovery failed: {e}")),
});
continue;
}
};
out.push(CaInstanceHandle {
depth: 0,
ca_certificate_der: child_der.to_vec(),
ca_certificate_rsync_uri: Some(f.rsync_uri.clone()),
effective_ip_resources: validated.effective_ip_resources.clone(),
effective_as_resources: validated.effective_as_resources.clone(),
rsync_base_uri: uris.rsync_base_uri,
manifest_rsync_uri: uris.manifest_rsync_uri,
publication_point_rsync_uri: uris.publication_point_rsync_uri,
rrdp_notification_uri: uris.rrdp_notification_uri,
});
audits.push(ObjectAuditEntry {
rsync_uri: f.rsync_uri.clone(),
sha256_hex: sha256_hex_from_32(&f.sha256),
kind: AuditObjectKind::Certificate,
result: AuditObjectResult::Ok,
detail: Some("validated subordinate CA certificate; enqueued CA instance".to_string()),
});
}
Ok(ChildDiscoveryOutput { children: out, audits })
}
fn select_issuer_crl_from_pack<'a>(
issuer: &CaInstanceHandle,
pack: &'a crate::storage::VerifiedPublicationPointPack,
) -> Result<(&'a str, &'a [u8]), String> {
let issuer_ca = crate::data_model::rc::ResourceCertificate::decode_der(&issuer.ca_certificate_der)
.map_err(|e| e.to_string())?;
let subject_dn = issuer_ca.tbs.subject_dn;
if let Some(uris) = issuer_ca.tbs.extensions.crl_distribution_points_uris.as_ref() {
for u in uris {
let s = u.as_str();
if let Some(f) = pack.files.iter().find(|f| f.rsync_uri == s) {
return Ok((f.rsync_uri.as_str(), f.bytes.as_slice()));
}
}
}
for f in &pack.files {
if !f.rsync_uri.ends_with(".crl") {
continue;
}
let Ok(crl) = crate::data_model::crl::RpkixCrl::decode_der(&f.bytes) else {
continue;
};
if crl.issuer_dn == subject_dn {
return Ok((f.rsync_uri.as_str(), f.bytes.as_slice()));
}
}
Err("issuer CRL not found in verified pack (RFC 9286 §7)".to_string())
}
fn kind_from_rsync_uri(uri: &str) -> AuditObjectKind {
if uri.ends_with(".crl") {
AuditObjectKind::Crl
} else if uri.ends_with(".cer") {
AuditObjectKind::Certificate
} else if uri.ends_with(".roa") {
AuditObjectKind::Roa
} else if uri.ends_with(".asa") {
AuditObjectKind::Aspa
} else {
AuditObjectKind::Other
}
}
fn build_publication_point_audit(
ca: &CaInstanceHandle,
pp: &crate::validation::manifest::PublicationPointResult,
runner_warnings: &[Warning],
objects: &crate::validation::objects::ObjectsOutput,
child_audits: &[ObjectAuditEntry],
) -> PublicationPointAudit {
use crate::data_model::crl::RpkixCrl;
use crate::validation::manifest::PublicationPointSource;
use std::collections::HashMap;
let mut audit_by_uri: HashMap<String, ObjectAuditEntry> = HashMap::new();
for f in &pp.pack.files {
audit_by_uri.insert(
f.rsync_uri.clone(),
ObjectAuditEntry {
rsync_uri: f.rsync_uri.clone(),
sha256_hex: sha256_hex_from_32(&f.sha256),
kind: kind_from_rsync_uri(&f.rsync_uri),
result: AuditObjectResult::Skipped,
detail: Some("skipped: not processed in stage2".to_string()),
},
);
}
// CRL decode status (best-effort).
for f in &pp.pack.files {
if !f.rsync_uri.ends_with(".crl") {
continue;
}
let ok = RpkixCrl::decode_der(&f.bytes).is_ok();
audit_by_uri.insert(
f.rsync_uri.clone(),
ObjectAuditEntry {
rsync_uri: f.rsync_uri.clone(),
sha256_hex: sha256_hex_from_32(&f.sha256),
kind: AuditObjectKind::Crl,
result: if ok {
AuditObjectResult::Ok
} else {
AuditObjectResult::Error
},
detail: if ok {
None
} else {
Some("CRL decode failed".to_string())
},
},
);
}
// Child discovery audits (.cer).
for e in child_audits {
audit_by_uri.insert(e.rsync_uri.clone(), e.clone());
}
// Signed object audits (.roa/.asa).
for e in &objects.audit {
audit_by_uri.insert(e.rsync_uri.clone(), e.clone());
}
// Emit as a stable order: manifest first, then pack files as listed in the pack.
let mut objects_out: Vec<ObjectAuditEntry> = Vec::with_capacity(pp.pack.files.len() + 1);
objects_out.push(ObjectAuditEntry {
rsync_uri: pp.pack.manifest_rsync_uri.clone(),
sha256_hex: sha256_hex(&pp.pack.manifest_bytes),
kind: AuditObjectKind::Manifest,
result: AuditObjectResult::Ok,
detail: None,
});
for f in &pp.pack.files {
if let Some(e) = audit_by_uri.remove(&f.rsync_uri) {
objects_out.push(e);
} else {
objects_out.push(ObjectAuditEntry {
rsync_uri: f.rsync_uri.clone(),
sha256_hex: sha256_hex_from_32(&f.sha256),
kind: kind_from_rsync_uri(&f.rsync_uri),
result: AuditObjectResult::Skipped,
detail: Some("skipped: no audit entry".to_string()),
});
}
}
let mut warnings = Vec::new();
// `runner_warnings` already includes `pp.warnings` (mirrors the runtime propagation behavior).
warnings.extend(runner_warnings.iter().map(AuditWarning::from));
warnings.extend(objects.warnings.iter().map(AuditWarning::from));
PublicationPointAudit {
rsync_base_uri: ca.rsync_base_uri.clone(),
manifest_rsync_uri: ca.manifest_rsync_uri.clone(),
publication_point_rsync_uri: ca.publication_point_rsync_uri.clone(),
rrdp_notification_uri: ca.rrdp_notification_uri.clone(),
source: match pp.source {
PublicationPointSource::Fresh => "fresh".to_string(),
PublicationPointSource::VerifiedCache => "verified_cache".to_string(),
},
this_update_rfc3339_utc: pp.pack.this_update.rfc3339_utc.clone(),
next_update_rfc3339_utc: pp.pack.next_update.rfc3339_utc.clone(),
verified_at_rfc3339_utc: pp.pack.verified_at.rfc3339_utc.clone(),
warnings,
objects: objects_out,
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::data_model::rc::ResourceCertificate;
use crate::fetch::rsync::LocalDirRsyncFetcher;
use crate::storage::{PackFile, PackTime, VerifiedPublicationPointPack};
use crate::sync::rrdp::Fetcher;
use crate::validation::tree::PublicationPointRunner;
use std::process::Command;
struct NeverHttpFetcher;
impl Fetcher for NeverHttpFetcher {
fn fetch(&self, _uri: &str) -> Result<Vec<u8>, String> {
Err("http fetch disabled in test".to_string())
}
}
fn openssl_available() -> bool {
Command::new("openssl")
.arg("version")
.output()
.map(|o| o.status.success())
.unwrap_or(false)
}
struct Generated {
issuer_ca_der: Vec<u8>,
child_ca_der: Vec<u8>,
issuer_crl_der: Vec<u8>,
}
fn run(cmd: &mut Command) {
let out = cmd.output().expect("run command");
if !out.status.success() {
panic!(
"command failed: {:?}\nstdout={}\nstderr={}",
cmd,
String::from_utf8_lossy(&out.stdout),
String::from_utf8_lossy(&out.stderr)
);
}
}
fn generate_chain_and_crl() -> Generated {
assert!(openssl_available(), "openssl is required for this test");
let td = tempfile::tempdir().expect("tempdir");
let dir = td.path();
std::fs::create_dir_all(dir.join("newcerts")).expect("newcerts");
std::fs::write(dir.join("index.txt"), b"").expect("index");
std::fs::write(dir.join("serial"), b"1000\n").expect("serial");
std::fs::write(dir.join("crlnumber"), b"1000\n").expect("crlnumber");
let cnf = format!(
r#"
[ ca ]
default_ca = CA_default
[ CA_default ]
dir = {dir}
database = $dir/index.txt
new_certs_dir = $dir/newcerts
certificate = $dir/issuer.pem
private_key = $dir/issuer.key
serial = $dir/serial
crlnumber = $dir/crlnumber
default_md = sha256
default_days = 365
default_crl_days = 1
policy = policy_any
x509_extensions = v3_issuer_ca
crl_extensions = crl_ext
unique_subject = no
copy_extensions = none
[ policy_any ]
commonName = supplied
[ req ]
prompt = no
distinguished_name = dn
[ dn ]
CN = Test Issuer CA
[ v3_issuer_ca ]
basicConstraints = critical,CA:true
keyUsage = critical, keyCertSign, cRLSign
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always
certificatePolicies = critical, 1.3.6.1.5.5.7.14.2
subjectInfoAccess = caRepository;URI:rsync://example.test/repo/issuer/, rpkiManifest;URI:rsync://example.test/repo/issuer/issuer.mft, rpkiNotify;URI:https://example.test/notification.xml
sbgp-ipAddrBlock = critical, IPv4:10.0.0.0/8
sbgp-autonomousSysNum = critical, AS:64496-64511
[ v3_child_ca ]
basicConstraints = critical,CA:true
keyUsage = critical, keyCertSign, cRLSign
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always
crlDistributionPoints = URI:rsync://example.test/repo/issuer/issuer.crl
authorityInfoAccess = caIssuers;URI:rsync://example.test/repo/issuer/issuer.cer
certificatePolicies = critical, 1.3.6.1.5.5.7.14.2
subjectInfoAccess = caRepository;URI:rsync://example.test/repo/child/, rpkiManifest;URI:rsync://example.test/repo/child/child.mft, rpkiNotify;URI:https://example.test/notification.xml
sbgp-ipAddrBlock = critical, IPv4:10.0.0.0/16
sbgp-autonomousSysNum = critical, AS:64496
[ crl_ext ]
authorityKeyIdentifier = keyid:always
"#,
dir = dir.display()
);
std::fs::write(dir.join("openssl.cnf"), cnf.as_bytes()).expect("write cnf");
run(
Command::new("openssl")
.arg("genrsa")
.arg("-out")
.arg(dir.join("issuer.key"))
.arg("2048"),
);
run(
Command::new("openssl")
.arg("req")
.arg("-new")
.arg("-x509")
.arg("-sha256")
.arg("-days")
.arg("365")
.arg("-key")
.arg(dir.join("issuer.key"))
.arg("-config")
.arg(dir.join("openssl.cnf"))
.arg("-extensions")
.arg("v3_issuer_ca")
.arg("-out")
.arg(dir.join("issuer.pem")),
);
run(
Command::new("openssl")
.arg("genrsa")
.arg("-out")
.arg(dir.join("child.key"))
.arg("2048"),
);
run(
Command::new("openssl")
.arg("req")
.arg("-new")
.arg("-key")
.arg(dir.join("child.key"))
.arg("-subj")
.arg("/CN=Test Child CA")
.arg("-out")
.arg(dir.join("child.csr")),
);
run(
Command::new("openssl")
.arg("ca")
.arg("-batch")
.arg("-config")
.arg(dir.join("openssl.cnf"))
.arg("-in")
.arg(dir.join("child.csr"))
.arg("-extensions")
.arg("v3_child_ca")
.arg("-out")
.arg(dir.join("child.pem")),
);
run(
Command::new("openssl")
.arg("x509")
.arg("-in")
.arg(dir.join("issuer.pem"))
.arg("-outform")
.arg("DER")
.arg("-out")
.arg(dir.join("issuer.cer")),
);
run(
Command::new("openssl")
.arg("x509")
.arg("-in")
.arg(dir.join("child.pem"))
.arg("-outform")
.arg("DER")
.arg("-out")
.arg(dir.join("child.cer")),
);
run(
Command::new("openssl")
.arg("ca")
.arg("-gencrl")
.arg("-config")
.arg(dir.join("openssl.cnf"))
.arg("-out")
.arg(dir.join("issuer.crl.pem")),
);
run(
Command::new("openssl")
.arg("crl")
.arg("-in")
.arg(dir.join("issuer.crl.pem"))
.arg("-outform")
.arg("DER")
.arg("-out")
.arg(dir.join("issuer.crl")),
);
Generated {
issuer_ca_der: std::fs::read(dir.join("issuer.cer")).expect("read issuer der"),
child_ca_der: std::fs::read(dir.join("child.cer")).expect("read child der"),
issuer_crl_der: std::fs::read(dir.join("issuer.crl")).expect("read crl der"),
}
}
fn dummy_pack_with_files(files: Vec<PackFile>) -> VerifiedPublicationPointPack {
let now = time::OffsetDateTime::now_utc();
VerifiedPublicationPointPack {
format_version: VerifiedPublicationPointPack::FORMAT_VERSION_V1,
manifest_rsync_uri: "rsync://example.test/repo/issuer/issuer.mft".to_string(),
publication_point_rsync_uri: "rsync://example.test/repo/issuer/".to_string(),
this_update: PackTime::from_utc_offset_datetime(now),
next_update: PackTime::from_utc_offset_datetime(now + time::Duration::hours(1)),
verified_at: PackTime::from_utc_offset_datetime(now),
manifest_bytes: vec![0x01],
files,
}
}
#[test]
fn select_issuer_crl_from_pack_finds_matching_crl() {
let g = generate_chain_and_crl();
let pack = dummy_pack_with_files(vec![PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/issuer/issuer.crl",
g.issuer_crl_der.clone(),
)]);
let issuer_ca = ResourceCertificate::decode_der(&g.issuer_ca_der).expect("decode issuer");
let issuer = CaInstanceHandle {
depth: 0,
ca_certificate_der: g.issuer_ca_der.clone(),
ca_certificate_rsync_uri: None,
effective_ip_resources: issuer_ca.tbs.extensions.ip_resources.clone(),
effective_as_resources: issuer_ca.tbs.extensions.as_resources.clone(),
rsync_base_uri: "rsync://example.test/repo/issuer/".to_string(),
manifest_rsync_uri: "rsync://example.test/repo/issuer/issuer.mft".to_string(),
publication_point_rsync_uri: "rsync://example.test/repo/issuer/".to_string(),
rrdp_notification_uri: None,
};
let (uri, found) = select_issuer_crl_from_pack(&issuer, &pack).expect("find crl");
assert_eq!(uri, "rsync://example.test/repo/issuer/issuer.crl");
assert_eq!(found, g.issuer_crl_der.as_slice());
}
#[test]
fn discover_children_from_fresh_pack_discovers_child_ca() {
let g = generate_chain_and_crl();
let pack = dummy_pack_with_files(vec![
PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/issuer/issuer.crl",
g.issuer_crl_der.clone(),
),
PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/issuer/child.cer",
g.child_ca_der.clone(),
),
]);
let issuer_ca = ResourceCertificate::decode_der(&g.issuer_ca_der).expect("decode issuer");
let issuer = CaInstanceHandle {
depth: 0,
ca_certificate_der: g.issuer_ca_der.clone(),
ca_certificate_rsync_uri: None,
effective_ip_resources: issuer_ca.tbs.extensions.ip_resources.clone(),
effective_as_resources: issuer_ca.tbs.extensions.as_resources.clone(),
rsync_base_uri: "rsync://example.test/repo/issuer/".to_string(),
manifest_rsync_uri: "rsync://example.test/repo/issuer/issuer.mft".to_string(),
publication_point_rsync_uri: "rsync://example.test/repo/issuer/".to_string(),
rrdp_notification_uri: None,
};
let now = time::OffsetDateTime::now_utc();
let children = discover_children_from_fresh_pack_with_audit(&issuer, &pack, now)
.expect("discover children")
.children;
assert_eq!(children.len(), 1);
assert_eq!(children[0].rsync_base_uri, "rsync://example.test/repo/child/".to_string());
assert_eq!(
children[0].manifest_rsync_uri,
"rsync://example.test/repo/child/child.mft".to_string()
);
assert_eq!(
children[0].publication_point_rsync_uri,
"rsync://example.test/repo/child/".to_string()
);
assert_eq!(
children[0].rrdp_notification_uri.as_deref(),
Some("https://example.test/notification.xml")
);
}
#[test]
fn runner_offline_rsync_fixture_produces_pack_and_warnings() {
let fixture_dir = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0",
);
assert!(fixture_dir.is_dir(), "fixture directory must exist");
let rsync_base_uri = "rsync://rpki.cernet.net/repo/cernet/0/".to_string();
let manifest_file = "05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft";
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
// Pick a validation_time inside the fixture manifest's validity window to keep this
// test stable across wall-clock time.
let fixture_manifest_bytes =
std::fs::read(fixture_dir.join(manifest_file)).expect("read manifest fixture");
let fixture_manifest =
crate::data_model::manifest::ManifestObject::decode_der(&fixture_manifest_bytes)
.expect("decode manifest fixture");
let validation_time = {
let this_update = fixture_manifest.manifest.this_update;
let next_update = fixture_manifest.manifest.next_update;
let candidate = this_update + time::Duration::seconds(60);
if candidate < next_update {
candidate
} else {
this_update
}
};
let store_dir = tempfile::tempdir().expect("store dir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
let policy = Policy {
sync_preference: crate::policy::SyncPreference::RsyncOnly,
..Policy::default()
};
let runner = Rpkiv1PublicationPointRunner {
store: &store,
policy: &policy,
http_fetcher: &NeverHttpFetcher,
rsync_fetcher: &LocalDirRsyncFetcher::new(&fixture_dir),
validation_time,
};
// For this fixture-driven smoke, we provide the correct issuer CA certificate (the CA for
// this publication point) so ROA EE certificate paths can validate.
let issuer_ca_der = std::fs::read(
std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"tests/fixtures/repository/rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer",
),
)
.expect("read issuer ca fixture");
let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer ca");
let handle = CaInstanceHandle {
depth: 0,
ca_certificate_der: issuer_ca_der,
ca_certificate_rsync_uri: Some("rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer".to_string()),
effective_ip_resources: issuer_ca.tbs.extensions.ip_resources.clone(),
effective_as_resources: issuer_ca.tbs.extensions.as_resources.clone(),
rsync_base_uri: rsync_base_uri.clone(),
manifest_rsync_uri: manifest_rsync_uri.clone(),
publication_point_rsync_uri: rsync_base_uri.clone(),
rrdp_notification_uri: None,
};
let out = runner
.run_publication_point(&handle)
.expect("run publication point");
assert_eq!(out.source, PublicationPointSource::Fresh);
assert_eq!(out.pack.manifest_rsync_uri, manifest_rsync_uri);
assert!(out.pack.files.len() > 1);
assert!(
out.objects.vrps.len() > 1,
"expected to extract VRPs from ROAs"
);
}
#[test]
fn build_publication_point_audit_marks_invalid_crl_as_error_and_overlays_roa_audit() {
let now = time::OffsetDateTime::now_utc();
let pack = dummy_pack_with_files(vec![
PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/issuer/bad.crl",
vec![0u8],
),
PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/issuer/x.roa",
vec![1u8],
),
]);
let pp = crate::validation::manifest::PublicationPointResult {
source: crate::validation::manifest::PublicationPointSource::Fresh,
pack: pack.clone(),
warnings: Vec::new(),
};
let issuer = CaInstanceHandle {
depth: 0,
ca_certificate_der: vec![1],
ca_certificate_rsync_uri: None,
effective_ip_resources: None,
effective_as_resources: None,
rsync_base_uri: "rsync://example.test/repo/issuer/".to_string(),
manifest_rsync_uri: pack.manifest_rsync_uri.clone(),
publication_point_rsync_uri: pack.publication_point_rsync_uri.clone(),
rrdp_notification_uri: None,
};
let objects = crate::validation::objects::ObjectsOutput {
vrps: Vec::new(),
aspas: Vec::new(),
warnings: Vec::new(),
stats: crate::validation::objects::ObjectsStats::default(),
audit: vec![ObjectAuditEntry {
rsync_uri: "rsync://example.test/repo/issuer/x.roa".to_string(),
sha256_hex: sha256_hex_from_32(&pack.files[1].sha256),
kind: AuditObjectKind::Roa,
result: AuditObjectResult::Ok,
detail: None,
}],
};
let audit = build_publication_point_audit(&issuer, &pp, &[], &objects, &[]);
assert_eq!(audit.objects[0].kind, AuditObjectKind::Manifest);
let crl = audit
.objects
.iter()
.find(|e| e.rsync_uri.ends_with("bad.crl"))
.expect("crl entry");
assert!(matches!(crl.result, AuditObjectResult::Error));
let roa = audit
.objects
.iter()
.find(|e| e.rsync_uri.ends_with("x.roa"))
.expect("roa entry");
assert!(matches!(roa.result, AuditObjectResult::Ok));
// Smoke that time fields are populated from pack.
assert!(audit.verified_at_rfc3339_utc.contains('T'));
assert!(audit.this_update_rfc3339_utc.contains('T'));
assert!(audit.next_update_rfc3339_utc.contains('T'));
let _ = now;
}
#[test]
fn discover_children_with_audit_records_decode_error_for_corrupt_cer() {
let g = generate_chain_and_crl();
let pack = dummy_pack_with_files(vec![
PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/issuer/issuer.crl",
g.issuer_crl_der.clone(),
),
PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/issuer/corrupt.cer",
vec![0u8],
),
]);
let issuer_ca = ResourceCertificate::decode_der(&g.issuer_ca_der).expect("decode issuer");
let issuer = CaInstanceHandle {
depth: 0,
ca_certificate_der: g.issuer_ca_der.clone(),
ca_certificate_rsync_uri: None,
effective_ip_resources: issuer_ca.tbs.extensions.ip_resources.clone(),
effective_as_resources: issuer_ca.tbs.extensions.as_resources.clone(),
rsync_base_uri: "rsync://example.test/repo/issuer/".to_string(),
manifest_rsync_uri: "rsync://example.test/repo/issuer/issuer.mft".to_string(),
publication_point_rsync_uri: "rsync://example.test/repo/issuer/".to_string(),
rrdp_notification_uri: None,
};
let now = time::OffsetDateTime::now_utc();
let out = discover_children_from_fresh_pack_with_audit(&issuer, &pack, now)
.expect("discover children");
assert!(out.children.is_empty());
assert_eq!(out.audits.len(), 1);
assert!(matches!(out.audits[0].result, AuditObjectResult::Error));
}
}

7
tests/fixtures/rrdp/notification.xml vendored Normal file
View File

@ -0,0 +1,7 @@
<notification xmlns="http://www.ripe.net/rpki/rrdp"
version="1"
session_id="9df4b597-af9e-4dca-bdda-719cce2c4e28"
serial="1">
<snapshot uri="https://example.net/rrdp/snapshot.xml"
hash="dcb1ce91401d568d7ddf7a4c9f70c65d8428c3a5e7135f82db99c4de30413551"/>
</notification>

12
tests/fixtures/rrdp/snapshot.xml vendored Normal file
View File

@ -0,0 +1,12 @@
<snapshot xmlns="http://www.ripe.net/rpki/rrdp"
version="1"
session_id="9df4b597-af9e-4dca-bdda-719cce2c4e28"
serial="1">
<publish uri="rsync://example.net/repo/obj1.cer">
YWJj
</publish>
<publish uri="rsync://example.net/repo/obj2.crl">
ZGVm
</publish>
</snapshot>

View File

@ -0,0 +1,37 @@
use rpki::data_model::ta::TrustAnchor;
use rpki::data_model::tal::Tal;
use rpki::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig};
use rpki::validation::ca_instance::ca_instance_uris_from_ca_certificate;
use rpki::validation::from_tal::discover_root_ca_instance_from_tal_url;
const APNIC_TAL_URL: &str = "https://tal.apnic.net/tal-archive/apnic-rfc7730-https.tal";
#[test]
#[ignore = "live network smoke test (APNIC TAL)"]
fn apnic_tal_downloads_and_binds_to_ta_certificate() {
let fetcher = BlockingHttpFetcher::new(HttpFetcherConfig::default()).expect("build fetcher");
let tal_bytes = fetcher.fetch_bytes(APNIC_TAL_URL).expect("download TAL");
let tal = Tal::decode_bytes(&tal_bytes).expect("decode TAL");
let ta_uri = tal.ta_uris.first().expect("TAL has TA URIs").clone();
let ta_bytes = fetcher
.fetch_bytes(ta_uri.as_str())
.expect("download TA cert");
let trust_anchor = TrustAnchor::bind_der(tal, &ta_bytes, Some(&ta_uri)).expect("bind");
let ca_uris = ca_instance_uris_from_ca_certificate(&trust_anchor.ta_certificate.rc_ca)
.expect("extract CA instance URIs");
assert!(ca_uris.rsync_base_uri.starts_with("rsync://"));
assert!(ca_uris.manifest_rsync_uri.starts_with("rsync://"));
}
#[test]
#[ignore = "live network smoke test (APNIC TAL)"]
fn apnic_tal_discovery_api_smoke() {
let fetcher = BlockingHttpFetcher::new(HttpFetcherConfig::default()).expect("build fetcher");
let d = discover_root_ca_instance_from_tal_url(&fetcher, APNIC_TAL_URL).expect("discover");
assert!(d.ca_instance.rsync_base_uri.starts_with("rsync://"));
assert!(d.ca_instance.manifest_rsync_uri.starts_with("rsync://"));
}

View File

@ -0,0 +1,253 @@
use std::cell::RefCell;
use std::collections::{BTreeMap, HashSet};
use rpki::data_model::crl::RpkixCrl;
use rpki::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig};
use rpki::fetch::rsync_system::{SystemRsyncConfig, SystemRsyncFetcher};
use rpki::policy::Policy;
use rpki::storage::RocksStore;
use rpki::validation::from_tal::discover_root_ca_instance_from_tal_url;
use rpki::validation::run_tree_from_tal::root_handle_from_trust_anchor;
use rpki::validation::tree::{
CaInstanceHandle, PublicationPointRunResult, PublicationPointRunner, TreeRunConfig,
run_tree_serial,
};
use rpki::validation::tree_runner::Rpkiv1PublicationPointRunner;
const APNIC_TAL_URL: &str = "https://tal.apnic.net/tal-archive/apnic-rfc7730-https.tal";
#[derive(Default, Debug)]
struct LiveStats {
publication_points_processed: usize,
publication_points_fresh: usize,
publication_points_cached: usize,
rrdp_repos_unique: HashSet<String>,
pack_file_uris_unique: HashSet<String>,
pack_uris_total: usize,
pack_uris_by_ext_total: BTreeMap<String, usize>,
crl_total: usize,
crl_decode_ok: usize,
child_ca_cert_candidates_total: usize,
child_ca_cert_validated_ok: usize,
roa_total: usize,
roa_ok: usize,
aspa_total: usize,
aspa_ok: usize,
objects_dropped_publication_points: usize,
}
fn ext_of_uri(uri: &str) -> String {
let last = uri.rsplit('/').next().unwrap_or(uri);
match last.rsplit_once('.') {
Some((_base, ext)) if !ext.is_empty() => ext.to_ascii_lowercase(),
_ => "(none)".to_string(),
}
}
impl LiveStats {
fn record_ca_inputs(&mut self, ca: &CaInstanceHandle) {
if let Some(u) = ca.rrdp_notification_uri.as_ref() {
self.rrdp_repos_unique.insert(u.clone());
}
}
fn record(&mut self, res: &PublicationPointRunResult) {
self.publication_points_processed += 1;
match res.source {
rpki::validation::manifest::PublicationPointSource::Fresh => self.publication_points_fresh += 1,
rpki::validation::manifest::PublicationPointSource::VerifiedCache => {
self.publication_points_cached += 1
}
}
// Include manifest object URI itself.
self.pack_uris_total += 1;
self.pack_file_uris_unique
.insert(res.pack.manifest_rsync_uri.clone());
*self
.pack_uris_by_ext_total
.entry(ext_of_uri(&res.pack.manifest_rsync_uri))
.or_insert(0) += 1;
for f in &res.pack.files {
self.pack_uris_total += 1;
self.pack_file_uris_unique.insert(f.rsync_uri.clone());
*self
.pack_uris_by_ext_total
.entry(ext_of_uri(&f.rsync_uri))
.or_insert(0) += 1;
if f.rsync_uri.ends_with(".crl") {
self.crl_total += 1;
if RpkixCrl::decode_der(&f.bytes).is_ok() {
self.crl_decode_ok += 1;
}
}
if f.rsync_uri.ends_with(".cer") {
self.child_ca_cert_candidates_total += 1;
}
}
self.child_ca_cert_validated_ok += res.discovered_children.len();
self.roa_total += res.objects.stats.roa_total;
self.roa_ok += res.objects.stats.roa_ok;
self.aspa_total += res.objects.stats.aspa_total;
self.aspa_ok += res.objects.stats.aspa_ok;
if res.objects.stats.publication_point_dropped {
self.objects_dropped_publication_points += 1;
}
if self.publication_points_processed % 25 == 0 {
println!(
"progress: processed={} fresh={} cached={} failed_objects_pps={} vrps={} aspas={}",
self.publication_points_processed,
self.publication_points_fresh,
self.publication_points_cached,
self.objects_dropped_publication_points,
self.roa_ok,
self.aspa_ok,
);
}
}
}
struct CountingRunner<'a> {
inner: &'a dyn PublicationPointRunner,
stats: &'a RefCell<LiveStats>,
}
impl<'a> PublicationPointRunner for CountingRunner<'a> {
fn run_publication_point(
&self,
ca: &CaInstanceHandle,
) -> Result<PublicationPointRunResult, String> {
self.stats.borrow_mut().record_ca_inputs(ca);
let res = self.inner.run_publication_point(ca)?;
self.stats.borrow_mut().record(&res);
Ok(res)
}
}
#[test]
#[ignore = "live network + rsync full-tree stats (APNIC TAL); may take minutes"]
fn apnic_tree_full_stats_serial() {
let http = BlockingHttpFetcher::new(HttpFetcherConfig::default()).expect("http fetcher");
let rsync = SystemRsyncFetcher::new(SystemRsyncConfig::default());
let validation_time = time::OffsetDateTime::now_utc();
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
let policy = Policy::default();
let discovery =
discover_root_ca_instance_from_tal_url(&http, APNIC_TAL_URL).expect("discover APNIC TAL");
let runner = Rpkiv1PublicationPointRunner {
store: &store,
policy: &policy,
http_fetcher: &http,
rsync_fetcher: &rsync,
validation_time,
};
let stats = RefCell::new(LiveStats::default());
let counting = CountingRunner {
inner: &runner,
stats: &stats,
};
let root: CaInstanceHandle =
root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance);
let max_depth = std::env::var("RPKI_APNIC_MAX_DEPTH")
.ok()
.and_then(|s| s.parse::<usize>().ok());
let max_instances = std::env::var("RPKI_APNIC_MAX_INSTANCES")
.ok()
.and_then(|s| s.parse::<usize>().ok());
let out = run_tree_serial(
root,
&counting,
&TreeRunConfig {
max_depth,
max_instances,
},
)
.expect("run tree");
let stats = stats.into_inner();
let mut raw_total = 0usize;
let mut raw_by_ext: BTreeMap<String, usize> = BTreeMap::new();
for (k, _v) in store.raw_iter_all().expect("raw_iter_all") {
raw_total += 1;
if let Ok(uri) = std::str::from_utf8(&k) {
*raw_by_ext.entry(ext_of_uri(uri)).or_insert(0) += 1;
}
}
let verified_total = store
.verified_iter_all()
.expect("verified_iter_all")
.count();
println!("APNIC Stage2 full-tree serial stats");
println!("tal_url={APNIC_TAL_URL}");
println!("validation_time={}", validation_time.format(&time::format_description::well_known::Rfc3339).unwrap());
println!();
println!(
"publication_points_processed={} publication_points_failed={} fresh={} cached={}",
stats.publication_points_processed,
out.instances_failed,
stats.publication_points_fresh,
stats.publication_points_cached
);
println!("rrdp_repos_unique={}", stats.rrdp_repos_unique.len());
println!("objects_dropped_publication_points={}", stats.objects_dropped_publication_points);
println!();
println!(
"pack_uris_total={} pack_uris_unique={}",
stats.pack_uris_total,
stats.pack_file_uris_unique.len()
);
println!("pack_uris_by_ext_total={:?}", stats.pack_uris_by_ext_total);
println!();
println!(
"crl_total={} crl_decode_ok={}",
stats.crl_total, stats.crl_decode_ok
);
println!(
"child_ca_cert_candidates_total={} child_ca_cert_validated_ok={}",
stats.child_ca_cert_candidates_total, stats.child_ca_cert_validated_ok
);
println!(
"roa_total={} roa_ok={} vrps_out={}",
stats.roa_total,
stats.roa_ok,
out.vrps.len()
);
println!(
"aspa_total={} aspa_ok={} aspas_out={}",
stats.aspa_total,
stats.aspa_ok,
out.aspas.len()
);
println!();
println!("rocksdb_raw_objects_total={} raw_by_ext={:?}", raw_total, raw_by_ext);
println!("rocksdb_verified_packs_total={}", verified_total);
// Loose sanity assertions (avoid flakiness due to repository churn).
assert!(
out.instances_processed >= 2,
"expected to process root + at least one child"
);
}

View File

@ -0,0 +1,37 @@
use rpki::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig};
use rpki::fetch::rsync_system::{SystemRsyncConfig, SystemRsyncFetcher};
use rpki::policy::Policy;
use rpki::storage::RocksStore;
use rpki::validation::run_tree_from_tal::run_tree_from_tal_url_serial;
use rpki::validation::tree::TreeRunConfig;
const APNIC_TAL_URL: &str = "https://tal.apnic.net/tal-archive/apnic-rfc7730-https.tal";
#[test]
#[ignore = "live network + rsync smoke test (APNIC TAL)"]
fn apnic_tree_depth1_processes_more_than_root() {
let http = BlockingHttpFetcher::new(HttpFetcherConfig::default()).expect("http fetcher");
let rsync = SystemRsyncFetcher::new(SystemRsyncConfig::default());
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
let policy = Policy::default();
let out = run_tree_from_tal_url_serial(
&store,
&policy,
APNIC_TAL_URL,
&http,
&rsync,
time::OffsetDateTime::now_utc(),
&TreeRunConfig {
max_depth: Some(1),
max_instances: Some(2),
},
)
.expect("run tree from tal");
assert!(
out.tree.instances_processed >= 2,
"expected to process root + at least one child"
);
}

View File

@ -25,6 +25,9 @@ fn dummy_ee(
extensions: RcExtensions { extensions: RcExtensions {
basic_constraints_ca: false, basic_constraints_ca: false,
subject_key_identifier: Some(vec![0x01]), subject_key_identifier: Some(vec![0x01]),
authority_key_identifier: None,
crl_distribution_points_uris: None,
ca_issuers_uris: None,
subject_info_access: Some(SubjectInfoAccess::Ca( subject_info_access: Some(SubjectInfoAccess::Ca(
rpki::data_model::rc::SubjectInfoAccessCa { rpki::data_model::rc::SubjectInfoAccessCa {
access_descriptions: vec![], access_descriptions: vec![],

View File

@ -0,0 +1,65 @@
use rpki::data_model::manifest::ManifestObject;
use rpki::data_model::ta::TrustAnchor;
use rpki::data_model::tal::Tal;
use rpki::validation::ca_instance::{CaInstanceUrisError, ca_instance_uris_from_ca_certificate};
use url::Url;
fn load_tal_and_ta_fixture(tal_name: &str, ta_name: &str) -> TrustAnchor {
let tal_bytes =
std::fs::read(format!("tests/fixtures/tal/{tal_name}")).expect("read TAL fixture");
let tal = Tal::decode_bytes(&tal_bytes).expect("decode TAL");
let ta_der =
std::fs::read(format!("tests/fixtures/ta/{ta_name}")).expect("read TA fixture");
let resolved = tal.ta_uris[0].clone();
TrustAnchor::bind_der(tal, &ta_der, Some(&resolved)).expect("bind TAL and TA")
}
#[test]
fn ca_instance_uris_are_discoverable_from_trust_anchor_certificates() {
let cases = [
("afrinic.tal", "afrinic-ta.cer"),
("apnic-rfc7730-https.tal", "apnic-ta.cer"),
("arin.tal", "arin-ta.cer"),
("lacnic.tal", "lacnic-ta.cer"),
("ripe-ncc.tal", "ripe-ncc-ta.cer"),
];
for (tal, ta) in cases {
let trust_anchor = load_tal_and_ta_fixture(tal, ta);
let uris = ca_instance_uris_from_ca_certificate(&trust_anchor.ta_certificate.rc_ca)
.expect("extract CA instance URIs");
assert!(uris.rsync_base_uri.starts_with("rsync://"));
assert!(uris.rsync_base_uri.ends_with('/'));
assert!(uris.publication_point_rsync_uri.starts_with("rsync://"));
assert!(uris.publication_point_rsync_uri.ends_with('/'));
assert!(uris.manifest_rsync_uri.starts_with("rsync://"));
assert!(
uris.manifest_rsync_uri.ends_with(".mft"),
"manifest URI should look like an mft: {}",
uris.manifest_rsync_uri
);
if let Some(n) = &uris.rrdp_notification_uri {
assert_eq!(Url::parse(n).unwrap().scheme(), "https");
}
}
}
#[test]
fn ca_instance_uris_rejects_ee_certificate() {
let mft_der = std::fs::read(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
)
.expect("read manifest fixture");
let mft = ManifestObject::decode_der(&mft_der).expect("decode manifest");
let ee = &mft.signed_object.signed_data.certificates[0].resource_cert;
let err = ca_instance_uris_from_ca_certificate(ee).unwrap_err();
assert!(
matches!(err, CaInstanceUrisError::NotCa),
"expected NotCa, got: {err}"
);
}

489
tests/test_ca_path_m15.rs Normal file
View File

@ -0,0 +1,489 @@
use std::process::Command;
use rpki::data_model::rc::ResourceCertificate;
use rpki::validation::ca_path::{CaPathError, validate_subordinate_ca_cert};
fn openssl_available() -> bool {
Command::new("openssl")
.arg("version")
.output()
.map(|o| o.status.success())
.unwrap_or(false)
}
struct Generated {
issuer_ca_der: Vec<u8>,
child_ca_der: Vec<u8>,
issuer_crl_der: Vec<u8>,
}
fn write(path: &std::path::Path, s: &str) {
std::fs::write(path, s.as_bytes()).expect("write file");
}
fn run(cmd: &mut Command) {
let out = cmd.output().expect("run command");
if !out.status.success() {
panic!(
"command failed: {:?}\nstdout={}\nstderr={}",
cmd,
String::from_utf8_lossy(&out.stdout),
String::from_utf8_lossy(&out.stderr)
);
}
}
fn generate_chain_and_crl(child_ext: &str, revoke_child: bool) -> Generated {
assert!(openssl_available(), "openssl is required for this test");
let td = tempfile::tempdir().expect("tempdir");
let dir = td.path();
// Minimal CA database layout required by `openssl ca`.
std::fs::create_dir_all(dir.join("newcerts")).expect("newcerts");
std::fs::write(dir.join("index.txt"), b"").expect("index");
std::fs::write(dir.join("serial"), b"1000\n").expect("serial");
std::fs::write(dir.join("crlnumber"), b"1000\n").expect("crlnumber");
let cnf = format!(
r#"
[ ca ]
default_ca = CA_default
[ CA_default ]
dir = {dir}
database = $dir/index.txt
new_certs_dir = $dir/newcerts
certificate = $dir/issuer.pem
private_key = $dir/issuer.key
serial = $dir/serial
crlnumber = $dir/crlnumber
default_md = sha256
default_days = 365
default_crl_days = 1
policy = policy_any
x509_extensions = v3_issuer_ca
crl_extensions = crl_ext
unique_subject = no
copy_extensions = none
[ policy_any ]
commonName = supplied
[ req ]
prompt = no
distinguished_name = dn
[ dn ]
CN = Test Issuer CA
[ v3_issuer_ca ]
basicConstraints = critical,CA:true
keyUsage = critical, keyCertSign, cRLSign
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always
certificatePolicies = critical, 1.3.6.1.5.5.7.14.2
subjectInfoAccess = caRepository;URI:rsync://example.test/repo/issuer/, rpkiManifest;URI:rsync://example.test/repo/issuer/issuer.mft, rpkiNotify;URI:https://example.test/notification.xml
sbgp-ipAddrBlock = critical, IPv4:10.0.0.0/8
sbgp-autonomousSysNum = critical, AS:64496-64511
[ v3_child_ca ]
basicConstraints = critical,CA:true
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always
crlDistributionPoints = URI:rsync://example.test/repo/issuer/issuer.crl
authorityInfoAccess = caIssuers;URI:rsync://example.test/repo/issuer/issuer.cer
certificatePolicies = critical, 1.3.6.1.5.5.7.14.2
subjectInfoAccess = caRepository;URI:rsync://example.test/repo/child/, rpkiManifest;URI:rsync://example.test/repo/child/child.mft, rpkiNotify;URI:https://example.test/notification.xml
{child_ext}
[ crl_ext ]
authorityKeyIdentifier = keyid:always
"#,
dir = dir.display(),
child_ext = child_ext
);
write(&dir.join("openssl.cnf"), &cnf);
// Issuer CA key + self-signed CA cert (DER later).
run(
Command::new("openssl")
.arg("genrsa")
.arg("-out")
.arg(dir.join("issuer.key"))
.arg("2048"),
);
run(
Command::new("openssl")
.arg("req")
.arg("-new")
.arg("-x509")
.arg("-sha256")
.arg("-days")
.arg("365")
.arg("-key")
.arg(dir.join("issuer.key"))
.arg("-out")
.arg(dir.join("issuer.pem"))
.arg("-config")
.arg(dir.join("openssl.cnf"))
.arg("-extensions")
.arg("v3_issuer_ca"),
);
// Child CA key + CSR.
run(
Command::new("openssl")
.arg("genrsa")
.arg("-out")
.arg(dir.join("child.key"))
.arg("2048"),
);
run(
Command::new("openssl")
.arg("req")
.arg("-new")
.arg("-key")
.arg(dir.join("child.key"))
.arg("-subj")
.arg("/CN=Child CA")
.arg("-out")
.arg(dir.join("child.csr"))
.arg("-config")
.arg(dir.join("openssl.cnf")),
);
// Issue child CA cert using openssl ca (so it appears in the CA database for CRL).
run(
Command::new("openssl")
.arg("ca")
.arg("-batch")
.arg("-config")
.arg(dir.join("openssl.cnf"))
.arg("-extensions")
.arg("v3_child_ca")
.arg("-in")
.arg(dir.join("child.csr"))
.arg("-out")
.arg(dir.join("child.pem"))
.arg("-notext"),
);
if revoke_child {
run(
Command::new("openssl")
.arg("ca")
.arg("-config")
.arg(dir.join("openssl.cnf"))
.arg("-revoke")
.arg(dir.join("child.pem")),
);
}
// Generate CRL.
run(
Command::new("openssl")
.arg("ca")
.arg("-gencrl")
.arg("-config")
.arg(dir.join("openssl.cnf"))
.arg("-out")
.arg(dir.join("issuer.crl.pem")),
);
// Convert to DER.
run(
Command::new("openssl")
.arg("x509")
.arg("-in")
.arg(dir.join("issuer.pem"))
.arg("-outform")
.arg("DER")
.arg("-out")
.arg(dir.join("issuer.cer")),
);
run(
Command::new("openssl")
.arg("x509")
.arg("-in")
.arg(dir.join("child.pem"))
.arg("-outform")
.arg("DER")
.arg("-out")
.arg(dir.join("child.cer")),
);
run(
Command::new("openssl")
.arg("crl")
.arg("-in")
.arg(dir.join("issuer.crl.pem"))
.arg("-outform")
.arg("DER")
.arg("-out")
.arg(dir.join("issuer.crl")),
);
Generated {
issuer_ca_der: std::fs::read(dir.join("issuer.cer")).expect("read issuer der"),
child_ca_der: std::fs::read(dir.join("child.cer")).expect("read child der"),
issuer_crl_der: std::fs::read(dir.join("issuer.crl")).expect("read crl der"),
}
}
#[test]
fn validate_subordinate_ca_succeeds_for_valid_child_and_subset_resources() {
let generated = generate_chain_and_crl(
"keyUsage = critical, keyCertSign, cRLSign\nsbgp-ipAddrBlock = critical, IPv4:10.0.0.0/16\nsbgp-autonomousSysNum = critical, AS:64496\n",
false,
);
let issuer = ResourceCertificate::decode_der(&generated.issuer_ca_der).expect("decode issuer");
let now = time::OffsetDateTime::now_utc();
let validated = validate_subordinate_ca_cert(
&generated.child_ca_der,
&generated.issuer_ca_der,
&generated.issuer_crl_der,
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
now,
)
.expect("validate subordinate");
assert!(validated.effective_ip_resources.is_some());
assert!(validated.effective_as_resources.is_some());
}
#[test]
fn validate_subordinate_ca_rejects_wrong_key_usage_bits() {
let generated = generate_chain_and_crl(
"keyUsage = critical, digitalSignature\nsbgp-ipAddrBlock = critical, IPv4:10.0.0.0/16\n",
false,
);
let issuer = ResourceCertificate::decode_der(&generated.issuer_ca_der).expect("decode issuer");
let now = time::OffsetDateTime::now_utc();
let err = validate_subordinate_ca_cert(
&generated.child_ca_der,
&generated.issuer_ca_der,
&generated.issuer_crl_der,
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
now,
)
.unwrap_err();
assert!(matches!(err, CaPathError::KeyUsageInvalidBits));
}
#[test]
fn validate_subordinate_ca_rejects_out_of_scope_resources() {
let generated = generate_chain_and_crl(
"keyUsage = critical, keyCertSign, cRLSign\nsbgp-ipAddrBlock = critical, IPv4:11.0.0.0/8\n",
false,
);
let issuer = ResourceCertificate::decode_der(&generated.issuer_ca_der).expect("decode issuer");
let now = time::OffsetDateTime::now_utc();
let err = validate_subordinate_ca_cert(
&generated.child_ca_der,
&generated.issuer_ca_der,
&generated.issuer_crl_der,
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
now,
)
.unwrap_err();
assert!(matches!(err, CaPathError::ResourcesNotSubset));
}
#[test]
fn validate_subordinate_ca_rejects_revoked_child() {
let generated = generate_chain_and_crl(
"keyUsage = critical, keyCertSign, cRLSign\nsbgp-ipAddrBlock = critical, IPv4:10.0.0.0/16\n",
true,
);
let issuer = ResourceCertificate::decode_der(&generated.issuer_ca_der).expect("decode issuer");
let now = time::OffsetDateTime::now_utc();
let err = validate_subordinate_ca_cert(
&generated.child_ca_der,
&generated.issuer_ca_der,
&generated.issuer_crl_der,
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
now,
)
.unwrap_err();
assert!(matches!(err, CaPathError::ChildRevoked));
}
#[test]
fn validate_subordinate_ca_rejects_missing_key_usage_extension() {
let generated = generate_chain_and_crl(
"sbgp-ipAddrBlock = critical, IPv4:10.0.0.0/16\nsbgp-autonomousSysNum = critical, AS:64496\n",
false,
);
let issuer = ResourceCertificate::decode_der(&generated.issuer_ca_der).expect("decode issuer");
let now = time::OffsetDateTime::now_utc();
let err = validate_subordinate_ca_cert(
&generated.child_ca_der,
&generated.issuer_ca_der,
&generated.issuer_crl_der,
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
now,
)
.unwrap_err();
assert!(matches!(err, CaPathError::KeyUsageMissing));
}
#[test]
fn validate_subordinate_ca_rejects_non_critical_key_usage() {
let generated = generate_chain_and_crl(
"keyUsage = keyCertSign, cRLSign\nsbgp-ipAddrBlock = critical, IPv4:10.0.0.0/16\n",
false,
);
let issuer = ResourceCertificate::decode_der(&generated.issuer_ca_der).expect("decode issuer");
let now = time::OffsetDateTime::now_utc();
let err = validate_subordinate_ca_cert(
&generated.child_ca_der,
&generated.issuer_ca_der,
&generated.issuer_crl_der,
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
now,
)
.unwrap_err();
assert!(matches!(err, CaPathError::KeyUsageNotCritical));
}
#[test]
fn validate_subordinate_ca_rejects_when_child_has_no_resources() {
let generated = generate_chain_and_crl("keyUsage = critical, keyCertSign, cRLSign\n", false);
let issuer = ResourceCertificate::decode_der(&generated.issuer_ca_der).expect("decode issuer");
let now = time::OffsetDateTime::now_utc();
let err = validate_subordinate_ca_cert(
&generated.child_ca_der,
&generated.issuer_ca_der,
&generated.issuer_crl_der,
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
now,
)
.unwrap_err();
assert!(matches!(err, CaPathError::ResourcesMissing));
}
#[test]
fn validate_subordinate_ca_rejects_when_cert_not_valid_at_validation_time() {
let generated = generate_chain_and_crl(
"keyUsage = critical, keyCertSign, cRLSign\nsbgp-ipAddrBlock = critical, IPv4:10.0.0.0/16\n",
false,
);
let issuer = ResourceCertificate::decode_der(&generated.issuer_ca_der).expect("decode issuer");
let validation_time = time::OffsetDateTime::now_utc() + time::Duration::days(400);
let err = validate_subordinate_ca_cert(
&generated.child_ca_der,
&generated.issuer_ca_der,
&generated.issuer_crl_der,
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
validation_time,
)
.unwrap_err();
assert!(matches!(err, CaPathError::CertificateNotValidAtTime));
}
#[test]
fn validate_subordinate_ca_rejects_when_crl_not_valid_at_validation_time() {
let generated = generate_chain_and_crl(
"keyUsage = critical, keyCertSign, cRLSign\nsbgp-ipAddrBlock = critical, IPv4:10.0.0.0/16\n",
false,
);
let issuer = ResourceCertificate::decode_der(&generated.issuer_ca_der).expect("decode issuer");
let validation_time = time::OffsetDateTime::now_utc() + time::Duration::days(2);
let err = validate_subordinate_ca_cert(
&generated.child_ca_der,
&generated.issuer_ca_der,
&generated.issuer_crl_der,
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
validation_time,
)
.unwrap_err();
assert!(matches!(err, CaPathError::CrlNotValidAtTime));
}
#[test]
fn validate_subordinate_ca_rejects_tampered_child_signature() {
let generated = generate_chain_and_crl(
"keyUsage = critical, keyCertSign, cRLSign\nsbgp-ipAddrBlock = critical, IPv4:10.0.0.0/16\n",
false,
);
let issuer = ResourceCertificate::decode_der(&generated.issuer_ca_der).expect("decode issuer");
let now = time::OffsetDateTime::now_utc();
let mut tampered = generated.child_ca_der.clone();
if let Some(last) = tampered.last_mut() {
*last ^= 0x01;
}
let err = validate_subordinate_ca_cert(
&tampered,
&generated.issuer_ca_der,
&generated.issuer_crl_der,
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
now,
)
.unwrap_err();
assert!(matches!(err, CaPathError::ChildSignatureInvalid(_)));
}
#[test]
fn validate_subordinate_ca_rejects_tampered_crl_signature() {
let generated = generate_chain_and_crl(
"keyUsage = critical, keyCertSign, cRLSign\nsbgp-ipAddrBlock = critical, IPv4:10.0.0.0/16\n",
false,
);
let issuer = ResourceCertificate::decode_der(&generated.issuer_ca_der).expect("decode issuer");
let now = time::OffsetDateTime::now_utc();
let mut tampered = generated.issuer_crl_der.clone();
if let Some(last) = tampered.last_mut() {
*last ^= 0x01;
}
let err = validate_subordinate_ca_cert(
&generated.child_ca_der,
&generated.issuer_ca_der,
&tampered,
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
now,
)
.unwrap_err();
assert!(matches!(err, CaPathError::CrlVerify(_)));
}

View File

@ -0,0 +1,235 @@
use std::process::Command;
use rpki::validation::cert_path::{CertPathError, validate_ee_cert_path};
fn openssl_available() -> bool {
Command::new("openssl")
.arg("version")
.output()
.map(|o| o.status.success())
.unwrap_or(false)
}
fn run(cmd: &mut Command) {
let out = cmd.output().expect("run command");
if !out.status.success() {
panic!(
"command failed: {:?}\nstdout={}\nstderr={}",
cmd,
String::from_utf8_lossy(&out.stdout),
String::from_utf8_lossy(&out.stderr)
);
}
}
struct Generated {
issuer_ca_der: Vec<u8>,
ee_der: Vec<u8>,
issuer_crl_der: Vec<u8>,
}
fn generate_issuer_ca_ee_and_crl(ee_ext: &str) -> Generated {
assert!(openssl_available(), "openssl is required for this test");
let td = tempfile::tempdir().expect("tempdir");
let dir = td.path();
std::fs::create_dir_all(dir.join("newcerts")).expect("newcerts");
std::fs::write(dir.join("index.txt"), b"").expect("index");
std::fs::write(dir.join("serial"), b"1000\n").expect("serial");
std::fs::write(dir.join("crlnumber"), b"1000\n").expect("crlnumber");
let cnf = format!(
r#"
[ ca ]
default_ca = CA_default
[ CA_default ]
dir = {dir}
database = $dir/index.txt
new_certs_dir = $dir/newcerts
certificate = $dir/issuer.pem
private_key = $dir/issuer.key
serial = $dir/serial
crlnumber = $dir/crlnumber
default_md = sha256
default_days = 365
default_crl_days = 1
policy = policy_any
x509_extensions = v3_issuer_ca
crl_extensions = crl_ext
unique_subject = no
copy_extensions = none
[ policy_any ]
commonName = supplied
[ req ]
prompt = no
distinguished_name = dn
[ dn ]
CN = Test Issuer CA
[ v3_issuer_ca ]
basicConstraints = critical,CA:true
keyUsage = critical, keyCertSign, cRLSign
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always
[ v3_ee ]
basicConstraints = critical,CA:false
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always
crlDistributionPoints = URI:rsync://example.test/repo/issuer/issuer.crl
authorityInfoAccess = caIssuers;URI:rsync://example.test/repo/issuer/issuer.cer
{ee_ext}
[ crl_ext ]
authorityKeyIdentifier = keyid:always
"#,
dir = dir.display(),
ee_ext = ee_ext
);
std::fs::write(dir.join("openssl.cnf"), cnf.as_bytes()).expect("write cnf");
run(
Command::new("openssl")
.arg("genrsa")
.arg("-out")
.arg(dir.join("issuer.key"))
.arg("2048"),
);
run(
Command::new("openssl")
.arg("req")
.arg("-new")
.arg("-x509")
.arg("-sha256")
.arg("-days")
.arg("365")
.arg("-key")
.arg(dir.join("issuer.key"))
.arg("-config")
.arg(dir.join("openssl.cnf"))
.arg("-extensions")
.arg("v3_issuer_ca")
.arg("-out")
.arg(dir.join("issuer.pem")),
);
run(
Command::new("openssl")
.arg("genrsa")
.arg("-out")
.arg(dir.join("ee.key"))
.arg("2048"),
);
run(
Command::new("openssl")
.arg("req")
.arg("-new")
.arg("-key")
.arg(dir.join("ee.key"))
.arg("-subj")
.arg("/CN=Test EE")
.arg("-out")
.arg(dir.join("ee.csr")),
);
run(
Command::new("openssl")
.arg("ca")
.arg("-batch")
.arg("-config")
.arg(dir.join("openssl.cnf"))
.arg("-in")
.arg(dir.join("ee.csr"))
.arg("-extensions")
.arg("v3_ee")
.arg("-out")
.arg(dir.join("ee.pem")),
);
run(
Command::new("openssl")
.arg("x509")
.arg("-in")
.arg(dir.join("issuer.pem"))
.arg("-outform")
.arg("DER")
.arg("-out")
.arg(dir.join("issuer.cer")),
);
run(
Command::new("openssl")
.arg("x509")
.arg("-in")
.arg(dir.join("ee.pem"))
.arg("-outform")
.arg("DER")
.arg("-out")
.arg(dir.join("ee.cer")),
);
run(
Command::new("openssl")
.arg("ca")
.arg("-gencrl")
.arg("-config")
.arg(dir.join("openssl.cnf"))
.arg("-out")
.arg(dir.join("issuer.crl.pem")),
);
run(
Command::new("openssl")
.arg("crl")
.arg("-in")
.arg(dir.join("issuer.crl.pem"))
.arg("-outform")
.arg("DER")
.arg("-out")
.arg(dir.join("issuer.crl")),
);
Generated {
issuer_ca_der: std::fs::read(dir.join("issuer.cer")).expect("read issuer der"),
ee_der: std::fs::read(dir.join("ee.cer")).expect("read ee der"),
issuer_crl_der: std::fs::read(dir.join("issuer.crl")).expect("read crl der"),
}
}
#[test]
fn ee_key_usage_digital_signature_only_is_accepted() {
let g = generate_issuer_ca_ee_and_crl("keyUsage = critical, digitalSignature\n");
let now = time::OffsetDateTime::now_utc();
validate_ee_cert_path(&g.ee_der, &g.issuer_ca_der, &g.issuer_crl_der, None, None, now)
.expect("valid EE path");
}
#[test]
fn ee_key_usage_missing_is_rejected() {
let g = generate_issuer_ca_ee_and_crl("");
let now = time::OffsetDateTime::now_utc();
let err = validate_ee_cert_path(&g.ee_der, &g.issuer_ca_der, &g.issuer_crl_der, None, None, now)
.unwrap_err();
assert!(matches!(err, CertPathError::KeyUsageMissing), "{err}");
}
#[test]
fn ee_key_usage_not_critical_is_rejected() {
let g = generate_issuer_ca_ee_and_crl("keyUsage = digitalSignature\n");
let now = time::OffsetDateTime::now_utc();
let err = validate_ee_cert_path(&g.ee_der, &g.issuer_ca_der, &g.issuer_crl_der, None, None, now)
.unwrap_err();
assert!(matches!(err, CertPathError::KeyUsageNotCritical), "{err}");
}
#[test]
fn ee_key_usage_wrong_bits_is_rejected() {
let g = generate_issuer_ca_ee_and_crl("keyUsage = critical, digitalSignature, keyEncipherment\n");
let now = time::OffsetDateTime::now_utc();
let err = validate_ee_cert_path(&g.ee_der, &g.issuer_ca_der, &g.issuer_crl_der, None, None, now)
.unwrap_err();
assert!(matches!(err, CertPathError::KeyUsageInvalidBits), "{err}");
}

120
tests/test_cert_path_m7.rs Normal file
View File

@ -0,0 +1,120 @@
use rpki::data_model::crl::RpkixCrl;
use rpki::data_model::manifest::ManifestObject;
use rpki::data_model::rc::ResourceCertificate;
use rpki::validation::cert_path::{CertPathError, validate_ee_cert_path};
fn max_time(mut t: time::OffsetDateTime, other: time::OffsetDateTime) -> time::OffsetDateTime {
if other > t {
t = other;
}
t
}
fn min_time(mut t: time::OffsetDateTime, other: time::OffsetDateTime) -> time::OffsetDateTime {
if other < t {
t = other;
}
t
}
#[test]
fn ee_cert_must_be_issued_by_ca_and_not_revoked() {
let manifest_der = std::fs::read(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
)
.expect("read manifest fixture");
let crl_der = std::fs::read(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl",
)
.expect("read CRL fixture");
let issuer_ca_der = std::fs::read(
"tests/fixtures/repository/rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer",
)
.expect("read issuer CA cert fixture");
let manifest = ManifestObject::decode_der(&manifest_der).expect("decode manifest");
let ee_der = &manifest.signed_object.signed_data.certificates[0].raw_der;
let ee = ResourceCertificate::decode_der(ee_der).expect("decode EE cert");
let issuer = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer CA cert");
let crl = RpkixCrl::decode_der(&crl_der).expect("decode CRL");
let mut t = ee.tbs.validity_not_before;
t = max_time(t, issuer.tbs.validity_not_before);
t = max_time(t, crl.this_update.utc);
t = max_time(t, manifest.manifest.this_update);
t += time::Duration::seconds(1);
let mut upper = ee.tbs.validity_not_after;
upper = min_time(upper, issuer.tbs.validity_not_after);
upper = min_time(upper, crl.next_update.utc);
upper = min_time(upper, manifest.manifest.next_update);
assert!(t < upper);
validate_ee_cert_path(ee_der, &issuer_ca_der, &crl_der, None, None, t)
.expect("cert path validates");
}
#[test]
fn wrong_issuer_ca_is_rejected() {
let manifest_der = std::fs::read(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
)
.expect("read manifest fixture");
let crl_der = std::fs::read(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl",
)
.expect("read CRL fixture");
let wrong_issuer_ca_der = std::fs::read(
"tests/fixtures/repository/ca.rg.net/rpki/RGnet-OU/R-lVU1XGsAeqzV1Fv0HjOD6ZFkE.cer",
)
.expect("read wrong issuer CA cert fixture");
let manifest = ManifestObject::decode_der(&manifest_der).expect("decode manifest");
let ee_der = &manifest.signed_object.signed_data.certificates[0].raw_der;
let ee = ResourceCertificate::decode_der(ee_der).expect("decode EE cert");
let crl = RpkixCrl::decode_der(&crl_der).expect("decode CRL");
let t = max_time(ee.tbs.validity_not_before, crl.this_update.utc) + time::Duration::seconds(1);
let err = validate_ee_cert_path(ee_der, &wrong_issuer_ca_der, &crl_der, None, None, t)
.expect_err("wrong issuer must be rejected");
assert!(
matches!(
err,
CertPathError::IssuerSubjectMismatch { .. }
| CertPathError::EeSignatureInvalid(_)
| CertPathError::IssuerNotCa
),
"{err}"
);
}
#[test]
fn ee_not_valid_after_not_after_is_rejected() {
let manifest_der = std::fs::read(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
)
.expect("read manifest fixture");
let crl_der = std::fs::read(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl",
)
.expect("read CRL fixture");
let issuer_ca_der = std::fs::read(
"tests/fixtures/repository/rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer",
)
.expect("read issuer CA cert fixture");
let manifest = ManifestObject::decode_der(&manifest_der).expect("decode manifest");
let ee_der = &manifest.signed_object.signed_data.certificates[0].raw_der;
let ee = ResourceCertificate::decode_der(ee_der).expect("decode EE cert");
let too_late = ee.tbs.validity_not_after + time::Duration::seconds(1);
let err = validate_ee_cert_path(ee_der, &issuer_ca_der, &crl_der, None, None, too_late)
.expect_err("expired ee rejected");
assert!(
matches!(err, CertPathError::CertificateNotValidAtTime),
"{err}"
);
}

View File

@ -0,0 +1,42 @@
#[test]
fn cli_run_offline_mode_executes_and_writes_json() {
let db_dir = tempfile::tempdir().expect("db tempdir");
let repo_dir = tempfile::tempdir().expect("repo tempdir");
let out_dir = tempfile::tempdir().expect("out tempdir");
let report_path = out_dir.path().join("report.json");
let policy_path = out_dir.path().join("policy.toml");
std::fs::write(&policy_path, "sync_preference = \"rsync_only\"\n").expect("write policy");
let tal_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests/fixtures/tal/apnic-rfc7730-https.tal");
let ta_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests/fixtures/ta/apnic-ta.cer");
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
db_dir.path().to_string_lossy().to_string(),
"--policy".to_string(),
policy_path.to_string_lossy().to_string(),
"--tal-path".to_string(),
tal_path.to_string_lossy().to_string(),
"--ta-path".to_string(),
ta_path.to_string_lossy().to_string(),
"--rsync-local-dir".to_string(),
repo_dir.path().to_string_lossy().to_string(),
"--max-depth".to_string(),
"0".to_string(),
"--max-instances".to_string(),
"1".to_string(),
"--report-json".to_string(),
report_path.to_string_lossy().to_string(),
];
rpki::cli::run(&argv).expect("cli run");
let bytes = std::fs::read(&report_path).expect("read report json");
let v: serde_json::Value = serde_json::from_slice(&bytes).expect("parse report json");
assert_eq!(v["format_version"], 1);
}

View File

@ -0,0 +1,54 @@
use std::process::Command;
#[test]
fn cli_offline_smoke_writes_report_json() {
let bin = env!("CARGO_BIN_EXE_rpki");
let db_dir = tempfile::tempdir().expect("db tempdir");
let repo_dir = tempfile::tempdir().expect("repo tempdir");
let out_dir = tempfile::tempdir().expect("out tempdir");
let report_path = out_dir.path().join("report.json");
let tal_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests/fixtures/tal/apnic-rfc7730-https.tal");
let ta_path = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests/fixtures/ta/apnic-ta.cer");
let out = Command::new(bin)
.args([
"--db",
db_dir.path().to_string_lossy().as_ref(),
"--tal-path",
tal_path.to_string_lossy().as_ref(),
"--ta-path",
ta_path.to_string_lossy().as_ref(),
"--rsync-local-dir",
repo_dir.path().to_string_lossy().as_ref(),
"--max-depth",
"0",
"--max-instances",
"1",
"--report-json",
report_path.to_string_lossy().as_ref(),
])
.output()
.expect("run cli");
assert!(
out.status.success(),
"cli failed: status={}\nstdout={}\nstderr={}",
out.status,
String::from_utf8_lossy(&out.stdout),
String::from_utf8_lossy(&out.stderr)
);
let bytes = std::fs::read(&report_path).expect("read report json");
let v: serde_json::Value = serde_json::from_slice(&bytes).expect("parse report json");
assert_eq!(v["format_version"], 1);
assert!(v.get("policy").is_some());
assert!(v.get("tree").is_some());
assert!(v.get("publication_points").is_some());
assert!(v.get("vrps").is_some());
assert!(v.get("aspas").is_some());
}

64
tests/test_fetch_http.rs Normal file
View File

@ -0,0 +1,64 @@
use std::io::{Read, Write};
use std::net::TcpListener;
use std::thread;
use rpki::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig};
use rpki::sync::rrdp::Fetcher;
fn start_one_shot_http_server() -> (String, thread::JoinHandle<()>) {
let listener = TcpListener::bind("127.0.0.1:0").expect("bind");
let addr = listener.local_addr().expect("local addr");
let addr_s = addr.to_string();
let h = thread::spawn(move || {
for _ in 0..3 {
let (mut sock, _peer) = listener.accept().expect("accept");
let mut buf = [0u8; 4096];
let n = sock.read(&mut buf).expect("read request");
let req = String::from_utf8_lossy(&buf[..n]);
let path = req
.lines()
.next()
.and_then(|l| l.split_whitespace().nth(1))
.unwrap_or("/");
let (status, body) = if path == "/ok" {
("200 OK", b"ok".as_slice())
} else {
("404 Not Found", b"nope".as_slice())
};
let resp = format!(
"HTTP/1.1 {status}\r\nContent-Length: {}\r\nConnection: close\r\n\r\n",
body.len()
);
sock.write_all(resp.as_bytes()).expect("write headers");
sock.write_all(body).expect("write body");
}
});
(addr_s, h)
}
#[test]
fn http_fetcher_handles_success_and_status_errors() {
let (addr, h) = start_one_shot_http_server();
let fetcher = BlockingHttpFetcher::new(HttpFetcherConfig::default()).expect("build fetcher");
let ok = fetcher
.fetch_bytes(&format!("http://{addr}/ok"))
.expect("fetch ok");
assert_eq!(ok, b"ok");
// Also exercise the trait method wrapper.
let ok2 = Fetcher::fetch(&fetcher, &format!("http://{addr}/ok")).expect("fetch via trait");
assert_eq!(ok2, b"ok");
let err = fetcher
.fetch_bytes(&format!("http://{addr}/missing"))
.unwrap_err();
assert!(err.contains("404"), "error should mention 404, got: {err}");
h.join().expect("server thread join");
}

View File

@ -0,0 +1,25 @@
use std::os::unix::net::UnixListener;
use rpki::fetch::rsync::{LocalDirRsyncFetcher, RsyncFetcher};
#[test]
fn local_dir_rsync_fetcher_normalizes_base_and_skips_non_files() {
let td = tempfile::tempdir().expect("tempdir");
let dir = td.path();
std::fs::write(dir.join("a.txt"), b"a").expect("write file");
// Create a Unix domain socket entry; metadata().is_file() should be false.
let _sock = UnixListener::bind(dir.join("sock")).expect("bind unix socket");
let fetcher = LocalDirRsyncFetcher::new(dir);
let out = fetcher
.fetch_objects("rsync://example.test/repo") // intentionally no trailing slash
.expect("fetch objects");
let uris: Vec<_> = out.iter().map(|(u, _)| u.as_str()).collect();
assert!(uris.contains(&"rsync://example.test/repo/a.txt"));
assert!(!uris.iter().any(|u| u.ends_with("/sock")));
}

View File

@ -0,0 +1,38 @@
use std::fs;
use rpki::fetch::rsync::RsyncFetcher;
use rpki::fetch::rsync_system::{SystemRsyncConfig, SystemRsyncFetcher};
#[test]
fn system_rsync_fetcher_can_sync_from_local_directory_for_tests() {
let tmp = tempfile::tempdir().expect("tempdir");
let src = tmp.path().join("src");
let nested = src.join("sub");
fs::create_dir_all(&nested).expect("mkdir");
fs::write(src.join("a.txt"), b"aaa").expect("write a");
fs::write(nested.join("b.txt"), b"bbb").expect("write b");
let config = SystemRsyncConfig {
rsync_bin: "rsync".into(),
..Default::default()
};
let fetcher = SystemRsyncFetcher::new(config);
let base = src.to_string_lossy().to_string();
let out = fetcher.fetch_objects(&base).expect("fetch objects");
let mut saw_a = false;
let mut saw_b = false;
for (uri, bytes) in out {
if uri.ends_with("a.txt") {
assert_eq!(bytes, b"aaa");
saw_a = true;
}
if uri.ends_with("sub/b.txt") {
assert_eq!(bytes, b"bbb");
saw_b = true;
}
}
assert!(saw_a, "expected a.txt");
assert!(saw_b, "expected sub/b.txt");
}

View File

@ -0,0 +1,181 @@
use std::collections::HashMap;
use rpki::data_model::tal::Tal;
use rpki::fetch::rsync::{RsyncFetchError, RsyncFetcher};
use rpki::policy::{Policy, SyncPreference};
use rpki::storage::RocksStore;
use rpki::sync::rrdp::Fetcher;
use rpki::validation::from_tal::{
FromTalError, discover_root_ca_instance_from_tal, discover_root_ca_instance_from_tal_and_ta_der,
discover_root_ca_instance_from_tal_url, run_root_from_tal_url_once,
};
use rpki::validation::objects::IssuerCaCertificateResolver;
use url::Url;
struct MapFetcher {
by_uri: HashMap<String, Vec<u8>>,
}
impl MapFetcher {
fn new(by_uri: HashMap<String, Vec<u8>>) -> Self {
Self { by_uri }
}
}
impl Fetcher for MapFetcher {
fn fetch(&self, uri: &str) -> Result<Vec<u8>, String> {
self.by_uri
.get(uri)
.cloned()
.ok_or_else(|| format!("missing mapping for {uri}"))
}
}
struct EmptyRsync;
impl RsyncFetcher for EmptyRsync {
fn fetch_objects(&self, _rsync_base_uri: &str) -> Result<Vec<(String, Vec<u8>)>, RsyncFetchError> {
Ok(Vec::new())
}
}
struct NullResolver;
impl IssuerCaCertificateResolver for NullResolver {
fn resolve_by_subject_dn(&self, _subject_dn: &str) -> Option<Vec<u8>> {
None
}
}
fn apnic_tal_bytes() -> Vec<u8> {
std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal").expect("read apnic TAL fixture")
}
fn apnic_ta_der() -> Vec<u8> {
std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic TA fixture")
}
#[test]
fn offline_discovery_from_apnic_tal_and_ta_der_fixture_works() {
let tal_bytes = apnic_tal_bytes();
let ta_der = apnic_ta_der();
let d = discover_root_ca_instance_from_tal_and_ta_der(&tal_bytes, &ta_der, None)
.expect("discover root from fixtures");
assert!(d.ca_instance.rsync_base_uri.starts_with("rsync://"));
assert!(d.ca_instance.rsync_base_uri.ends_with('/'));
assert!(d.ca_instance.manifest_rsync_uri.starts_with("rsync://"));
assert!(d.ca_instance.manifest_rsync_uri.ends_with(".mft"));
if let Some(n) = &d.ca_instance.rrdp_notification_uri {
assert!(n.starts_with("https://"));
}
}
#[test]
fn discover_root_from_tal_url_works_with_mock_fetcher() {
let tal_bytes = apnic_tal_bytes();
let tal = Tal::decode_bytes(&tal_bytes).expect("decode TAL");
let ta_uri = tal.ta_uris[0].as_str().to_string();
let mut map = HashMap::new();
map.insert("https://example.test/apnic.tal".to_string(), tal_bytes);
map.insert(ta_uri, apnic_ta_der());
let fetcher = MapFetcher::new(map);
let d = discover_root_ca_instance_from_tal_url(&fetcher, "https://example.test/apnic.tal")
.expect("discover");
assert!(d.ca_instance.rsync_base_uri.starts_with("rsync://"));
}
#[test]
fn discover_root_tries_multiple_ta_uris_until_one_succeeds() {
let tal_bytes = apnic_tal_bytes();
let mut tal = Tal::decode_bytes(&tal_bytes).expect("decode TAL");
let good_uri = tal.ta_uris[0].clone();
tal.ta_uris.insert(0, Url::parse("https://example.invalid/bad.cer").unwrap());
let mut map = HashMap::new();
map.insert(good_uri.as_str().to_string(), apnic_ta_der());
let fetcher = MapFetcher::new(map);
let d = discover_root_ca_instance_from_tal(&fetcher, tal, None).expect("discover");
assert_eq!(d.trust_anchor.resolved_ta_uri.as_ref(), Some(&good_uri));
}
#[test]
fn discover_root_errors_when_no_ta_uris_present() {
let tal_bytes = apnic_tal_bytes();
let mut tal = Tal::decode_bytes(&tal_bytes).expect("decode TAL");
tal.ta_uris.clear();
let fetcher = MapFetcher::new(HashMap::new());
let err = discover_root_ca_instance_from_tal(&fetcher, tal, None).unwrap_err();
assert!(matches!(err, FromTalError::NoTaUris));
}
#[test]
fn discover_root_errors_when_all_ta_fetches_fail() {
let tal_bytes = apnic_tal_bytes();
let tal = Tal::decode_bytes(&tal_bytes).expect("decode TAL");
let fetcher = MapFetcher::new(HashMap::new());
let err = discover_root_ca_instance_from_tal(&fetcher, tal, None).unwrap_err();
assert!(matches!(err, FromTalError::TaFetch(_)));
}
#[test]
fn discover_root_errors_when_ta_does_not_bind_to_tal_spki() {
let tal_bytes = apnic_tal_bytes();
let tal = Tal::decode_bytes(&tal_bytes).expect("decode TAL");
// Use a different TA cert fixture to trigger SPKI mismatch.
let wrong_ta = std::fs::read("tests/fixtures/ta/arin-ta.cer").expect("read arin ta");
let mut map = HashMap::new();
map.insert(tal.ta_uris[0].as_str().to_string(), wrong_ta);
let fetcher = MapFetcher::new(map);
let err = discover_root_ca_instance_from_tal(&fetcher, tal, None).unwrap_err();
assert!(matches!(err, FromTalError::TaFetch(_)));
}
#[test]
fn discover_root_from_tal_url_errors_when_tal_fetch_fails() {
let fetcher = MapFetcher::new(HashMap::new());
let err = discover_root_ca_instance_from_tal_url(&fetcher, "https://example.test/missing.tal")
.unwrap_err();
assert!(matches!(err, FromTalError::TalFetch(_)));
}
#[test]
fn run_root_from_tal_url_once_propagates_run_error_when_repo_is_empty() {
let tal_bytes = apnic_tal_bytes();
let tal = Tal::decode_bytes(&tal_bytes).expect("decode TAL");
let ta_uri = tal.ta_uris[0].as_str().to_string();
let mut map = HashMap::new();
map.insert("https://example.test/apnic.tal".to_string(), tal_bytes);
map.insert(ta_uri, apnic_ta_der());
let fetcher = MapFetcher::new(map);
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
let mut policy = Policy::default();
policy.sync_preference = SyncPreference::RsyncOnly;
let err = run_root_from_tal_url_once(
&store,
&policy,
"https://example.test/apnic.tal",
&fetcher,
&EmptyRsync,
&NullResolver,
time::OffsetDateTime::now_utc(),
)
.unwrap_err();
assert!(matches!(err, FromTalError::Run(_)));
}

View File

@ -0,0 +1,118 @@
use std::path::Path;
use rpki::data_model::manifest::ManifestObject;
use rpki::policy::{CaFailedFetchPolicy, Policy};
use rpki::storage::{RocksStore, VerifiedKey, VerifiedPublicationPointPack};
use rpki::validation::manifest::process_manifest_publication_point;
fn fixture_to_rsync_uri(path: &Path) -> String {
let rel = path
.strip_prefix("tests/fixtures/repository")
.expect("path under tests/fixtures/repository");
let mut it = rel.components();
let host = it
.next()
.expect("host component")
.as_os_str()
.to_string_lossy();
let rest = it.as_path().to_string_lossy();
format!("rsync://{host}/{rest}")
}
fn fixture_dir_to_rsync_uri(dir: &Path) -> String {
let mut s = fixture_to_rsync_uri(dir);
if !s.ends_with('/') {
s.push('/');
}
s
}
#[test]
fn cache_is_not_used_when_missing_and_fresh_manifest_is_missing() {
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
let mut policy = Policy::default();
policy.ca_failed_fetch_policy = CaFailedFetchPolicy::UseVerifiedCache;
let err = process_manifest_publication_point(
&store,
&policy,
"rsync://example.net/repo/manifest.mft",
"rsync://example.net/repo/",
time::OffsetDateTime::from_unix_timestamp(0).unwrap(),
)
.expect_err("no raw and no verified cache should fail");
assert!(err.to_string().contains("verified cache entry missing"));
}
#[test]
fn cache_pack_publication_point_mismatch_is_rejected() {
let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let manifest_bytes = std::fs::read(manifest_path).expect("read manifest fixture");
let manifest = ManifestObject::decode_der(&manifest_bytes).expect("decode manifest fixture");
let validation_time = manifest.manifest.this_update + time::Duration::seconds(1);
let manifest_rsync_uri = fixture_to_rsync_uri(manifest_path);
let publication_point_rsync_uri = fixture_dir_to_rsync_uri(manifest_path.parent().unwrap());
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store
.put_raw(&manifest_rsync_uri, &manifest_bytes)
.expect("store manifest");
for entry in &manifest.manifest.files {
let file_path = manifest_path.parent().unwrap().join(&entry.file_name);
let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
store.put_raw(&rsync_uri, &bytes).expect("store file");
}
let policy = Policy::default();
let _ = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
validation_time,
)
.expect("first run stores verified pack");
// Corrupt the cached pack by changing the publication point.
let key = VerifiedKey::from_manifest_rsync_uri(&manifest_rsync_uri);
let bytes = store
.get_verified(&key)
.expect("get verified")
.expect("verified exists");
let mut pack = VerifiedPublicationPointPack::decode(&bytes).expect("decode pack");
pack.publication_point_rsync_uri = "rsync://evil.invalid/repo/".to_string();
let bytes = pack.encode().expect("re-encode pack");
store
.put_verified(&key, &bytes)
.expect("overwrite verified");
// Remove raw manifest to force cache path.
store
.delete_raw(&manifest_rsync_uri)
.expect("delete raw manifest");
let err = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
validation_time,
)
.expect_err("cache pack mismatch should fail");
assert!(
err.to_string()
.contains("publication_point_rsync_uri does not match expected")
);
}

View File

@ -0,0 +1,266 @@
use std::path::Path;
use rpki::data_model::manifest::ManifestObject;
use rpki::policy::{CaFailedFetchPolicy, Policy};
use rpki::storage::{RocksStore, VerifiedKey, VerifiedPublicationPointPack};
use rpki::validation::manifest::{PublicationPointSource, process_manifest_publication_point};
fn fixture_to_rsync_uri(path: &Path) -> String {
let rel = path
.strip_prefix("tests/fixtures/repository")
.expect("path under tests/fixtures/repository");
let mut it = rel.components();
let host = it
.next()
.expect("host component")
.as_os_str()
.to_string_lossy();
let rest = it.as_path().to_string_lossy();
format!("rsync://{host}/{rest}")
}
fn fixture_dir_to_rsync_uri(dir: &Path) -> String {
let mut s = fixture_to_rsync_uri(dir);
if !s.ends_with('/') {
s.push('/');
}
s
}
#[test]
fn manifest_success_writes_verified_pack() {
let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let manifest_bytes = std::fs::read(manifest_path).expect("read manifest fixture");
let manifest = ManifestObject::decode_der(&manifest_bytes).expect("decode manifest fixture");
let validation_time = manifest.manifest.this_update + time::Duration::seconds(1);
let manifest_rsync_uri = fixture_to_rsync_uri(manifest_path);
let publication_point_rsync_uri = fixture_dir_to_rsync_uri(manifest_path.parent().unwrap());
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store
.put_raw(&manifest_rsync_uri, &manifest_bytes)
.expect("store manifest");
for entry in &manifest.manifest.files {
let file_path = manifest_path.parent().unwrap().join(&entry.file_name);
let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
store.put_raw(&rsync_uri, &bytes).expect("store file");
}
let policy = Policy::default();
let out = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
validation_time,
)
.expect("process manifest publication point");
assert_eq!(out.source, PublicationPointSource::Fresh);
assert!(out.warnings.is_empty());
let key = VerifiedKey::from_manifest_rsync_uri(&manifest_rsync_uri);
let stored = store
.get_verified(&key)
.expect("get verified")
.expect("verified pack exists");
let decoded = VerifiedPublicationPointPack::decode(&stored).expect("decode stored pack");
assert_eq!(decoded.manifest_rsync_uri, manifest_rsync_uri);
assert_eq!(
decoded.publication_point_rsync_uri,
publication_point_rsync_uri
);
}
#[test]
fn manifest_hash_mismatch_falls_back_to_verified_cache_when_enabled() {
let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let manifest_bytes = std::fs::read(manifest_path).expect("read manifest fixture");
let manifest = ManifestObject::decode_der(&manifest_bytes).expect("decode manifest fixture");
let validation_time = manifest.manifest.this_update + time::Duration::seconds(1);
let manifest_rsync_uri = fixture_to_rsync_uri(manifest_path);
let publication_point_rsync_uri = fixture_dir_to_rsync_uri(manifest_path.parent().unwrap());
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store
.put_raw(&manifest_rsync_uri, &manifest_bytes)
.expect("store manifest");
for entry in &manifest.manifest.files {
let file_path = manifest_path.parent().unwrap().join(&entry.file_name);
let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
store.put_raw(&rsync_uri, &bytes).expect("store file");
}
let policy = Policy::default();
let first = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
validation_time,
)
.expect("first run stores verified pack");
assert_eq!(first.source, PublicationPointSource::Fresh);
let key = VerifiedKey::from_manifest_rsync_uri(&manifest_rsync_uri);
let cached_bytes = store
.get_verified(&key)
.expect("get verified")
.expect("verified pack exists");
let cached_pack = VerifiedPublicationPointPack::decode(&cached_bytes).expect("decode cached");
let victim = manifest
.manifest
.files
.first()
.expect("non-empty file list");
let victim_uri = format!("{publication_point_rsync_uri}{}", victim.file_name);
let mut tampered = store
.get_raw(&victim_uri)
.expect("get victim raw")
.expect("victim raw exists");
tampered[0] ^= 0xFF;
store.put_raw(&victim_uri, &tampered).expect("tamper raw");
let second = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
validation_time,
)
.expect("second run falls back to verified cache");
assert_eq!(second.source, PublicationPointSource::VerifiedCache);
assert!(!second.warnings.is_empty());
assert_eq!(second.pack, cached_pack);
}
#[test]
fn manifest_failed_fetch_stop_all_output() {
let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let manifest_bytes = std::fs::read(manifest_path).expect("read manifest fixture");
let manifest = ManifestObject::decode_der(&manifest_bytes).expect("decode manifest fixture");
let validation_time = manifest.manifest.this_update + time::Duration::seconds(1);
let manifest_rsync_uri = fixture_to_rsync_uri(manifest_path);
let publication_point_rsync_uri = fixture_dir_to_rsync_uri(manifest_path.parent().unwrap());
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store
.put_raw(&manifest_rsync_uri, &manifest_bytes)
.expect("store manifest");
for entry in &manifest.manifest.files {
let file_path = manifest_path.parent().unwrap().join(&entry.file_name);
let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
store.put_raw(&rsync_uri, &bytes).expect("store file");
}
let mut policy = Policy::default();
policy.ca_failed_fetch_policy = CaFailedFetchPolicy::UseVerifiedCache;
let _ = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
validation_time,
)
.expect("first run stores verified pack");
let victim = manifest
.manifest
.files
.first()
.expect("non-empty file list");
let victim_uri = format!("{publication_point_rsync_uri}{}", victim.file_name);
let mut tampered = store
.get_raw(&victim_uri)
.expect("get victim raw")
.expect("victim raw exists");
tampered[0] ^= 0xFF;
store.put_raw(&victim_uri, &tampered).expect("tamper raw");
policy.ca_failed_fetch_policy = CaFailedFetchPolicy::StopAllOutput;
let err = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
validation_time,
)
.expect_err("stop_all_output should not use verified cache");
let msg = err.to_string();
assert!(msg.contains("cache use is disabled"));
}
#[test]
fn manifest_fallback_pack_is_revalidated_and_rejected_if_stale() {
let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let manifest_bytes = std::fs::read(manifest_path).expect("read manifest fixture");
let manifest = ManifestObject::decode_der(&manifest_bytes).expect("decode manifest fixture");
let ok_time = manifest.manifest.this_update + time::Duration::seconds(1);
let stale_time = manifest.manifest.next_update + time::Duration::seconds(1);
let manifest_rsync_uri = fixture_to_rsync_uri(manifest_path);
let publication_point_rsync_uri = fixture_dir_to_rsync_uri(manifest_path.parent().unwrap());
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store
.put_raw(&manifest_rsync_uri, &manifest_bytes)
.expect("store manifest");
for entry in &manifest.manifest.files {
let file_path = manifest_path.parent().unwrap().join(&entry.file_name);
let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
store.put_raw(&rsync_uri, &bytes).expect("store file");
}
let policy = Policy::default();
let _ = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
ok_time,
)
.expect("first run stores verified pack");
store
.delete_raw(&manifest_rsync_uri)
.expect("delete manifest raw to force fallback");
let err = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
stale_time,
)
.expect_err("stale validation_time must reject verified cache pack");
let msg = err.to_string();
assert!(msg.contains("not valid at validation_time"));
}

View File

@ -0,0 +1,172 @@
use std::collections::HashMap;
use std::path::Path;
use rpki::data_model::crl::RpkixCrl;
use rpki::data_model::manifest::ManifestObject;
use rpki::data_model::rc::ResourceCertificate;
use rpki::policy::{Policy, SignedObjectFailurePolicy};
use rpki::storage::{PackFile, RocksStore};
use rpki::validation::manifest::process_manifest_publication_point;
use rpki::validation::objects::{
IssuerCaCertificateResolver, process_verified_publication_point_pack,
};
fn fixture_to_rsync_uri(path: &Path) -> String {
let rel = path
.strip_prefix("tests/fixtures/repository")
.expect("path under tests/fixtures/repository");
let mut it = rel.components();
let host = it
.next()
.expect("host component")
.as_os_str()
.to_string_lossy();
let rest = it.as_path().to_string_lossy();
format!("rsync://{host}/{rest}")
}
fn fixture_dir_to_rsync_uri(dir: &Path) -> String {
let mut s = fixture_to_rsync_uri(dir);
if !s.ends_with('/') {
s.push('/');
}
s
}
struct EmptyResolver;
impl IssuerCaCertificateResolver for EmptyResolver {
fn resolve_by_subject_dn(&self, _subject_dn: &str) -> Option<Vec<u8>> {
None
}
}
struct MapResolver {
by_subject_dn: HashMap<String, Vec<u8>>,
}
impl IssuerCaCertificateResolver for MapResolver {
fn resolve_by_subject_dn(&self, subject_dn: &str) -> Option<Vec<u8>> {
self.by_subject_dn.get(subject_dn).cloned()
}
}
fn build_cernet_pack_and_validation_time() -> (
rpki::storage::VerifiedPublicationPointPack,
time::OffsetDateTime,
MapResolver,
) {
let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let manifest_bytes = std::fs::read(manifest_path).expect("read manifest fixture");
let manifest = ManifestObject::decode_der(&manifest_bytes).expect("decode manifest fixture");
let manifest_rsync_uri = fixture_to_rsync_uri(manifest_path);
let publication_point_rsync_uri = fixture_dir_to_rsync_uri(manifest_path.parent().unwrap());
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store
.put_raw(&manifest_rsync_uri, &manifest_bytes)
.expect("store manifest");
for entry in &manifest.manifest.files {
let file_path = manifest_path.parent().unwrap().join(&entry.file_name);
let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
store.put_raw(&rsync_uri, &bytes).expect("store file");
}
let policy = Policy::default();
let out = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
manifest.manifest.this_update + time::Duration::seconds(1),
)
.expect("process manifest publication point");
let issuer_ca_der = std::fs::read(
"tests/fixtures/repository/rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer",
)
.expect("read issuer CA cert fixture");
let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer CA cert");
let crl_file = out
.pack
.files
.iter()
.find(|f| f.rsync_uri.ends_with(".crl"))
.expect("crl present in pack");
let crl = RpkixCrl::decode_der(&crl_file.bytes).expect("decode crl");
let mut t = manifest.manifest.this_update;
if issuer_ca.tbs.validity_not_before > t {
t = issuer_ca.tbs.validity_not_before;
}
if crl.this_update.utc > t {
t = crl.this_update.utc;
}
t += time::Duration::seconds(1);
let resolver = MapResolver {
by_subject_dn: HashMap::from([(issuer_ca.tbs.subject_dn, issuer_ca_der)]),
};
(out.pack, t, resolver)
}
#[test]
fn missing_crl_causes_roas_to_be_dropped_under_drop_object_policy() {
let (mut pack, validation_time, resolver) = build_cernet_pack_and_validation_time();
pack.files.retain(|f| !f.rsync_uri.ends_with(".crl"));
let mut policy = Policy::default();
policy.signed_object_failure_policy = SignedObjectFailurePolicy::DropObject;
let out = process_verified_publication_point_pack(&pack, &policy, &resolver, validation_time)
.expect("drop_object should not fail the publication point");
assert!(out.vrps.is_empty());
assert!(!out.warnings.is_empty());
}
#[test]
fn missing_issuer_ca_cert_causes_roas_to_be_dropped_under_drop_object_policy() {
let (pack, validation_time, _resolver) = build_cernet_pack_and_validation_time();
let mut policy = Policy::default();
policy.signed_object_failure_policy = SignedObjectFailurePolicy::DropObject;
let out =
process_verified_publication_point_pack(&pack, &policy, &EmptyResolver, validation_time)
.expect("drop_object should not fail the publication point");
assert!(out.vrps.is_empty());
assert!(!out.warnings.is_empty());
}
#[test]
fn invalid_aspa_object_is_reported_as_warning_under_drop_object_policy() {
let (mut pack, validation_time, resolver) = build_cernet_pack_and_validation_time();
let uri = "rsync://rpki.cernet.net/repo/cernet/0/INVALID.asa".to_string();
pack.files.push(PackFile::from_bytes_compute_sha256(
uri.clone(),
b"\0\0".to_vec(),
));
let mut policy = Policy::default();
policy.signed_object_failure_policy = SignedObjectFailurePolicy::DropObject;
let out = process_verified_publication_point_pack(&pack, &policy, &resolver, validation_time)
.expect("drop_object should not fail");
assert!(
out.warnings
.iter()
.any(|w| w.context.as_deref() == Some(&uri)),
"expected warning for invalid ASPA"
);
}

View File

@ -0,0 +1,189 @@
use std::collections::HashMap;
use std::path::Path;
use rpki::data_model::crl::RpkixCrl;
use rpki::data_model::manifest::ManifestObject;
use rpki::data_model::rc::ResourceCertificate;
use rpki::policy::{Policy, SignedObjectFailurePolicy};
use rpki::storage::{PackFile, RocksStore};
use rpki::validation::manifest::process_manifest_publication_point;
use rpki::validation::objects::{
IssuerCaCertificateResolver, ObjectsProcessError, process_verified_publication_point_pack,
};
fn fixture_to_rsync_uri(path: &Path) -> String {
let rel = path
.strip_prefix("tests/fixtures/repository")
.expect("path under tests/fixtures/repository");
let mut it = rel.components();
let host = it
.next()
.expect("host component")
.as_os_str()
.to_string_lossy();
let rest = it.as_path().to_string_lossy();
format!("rsync://{host}/{rest}")
}
fn fixture_dir_to_rsync_uri(dir: &Path) -> String {
let mut s = fixture_to_rsync_uri(dir);
if !s.ends_with('/') {
s.push('/');
}
s
}
struct MapResolver {
by_subject_dn: HashMap<String, Vec<u8>>,
}
impl IssuerCaCertificateResolver for MapResolver {
fn resolve_by_subject_dn(&self, subject_dn: &str) -> Option<Vec<u8>> {
self.by_subject_dn.get(subject_dn).cloned()
}
}
fn build_cernet_pack_and_validation_time() -> (
rpki::storage::VerifiedPublicationPointPack,
time::OffsetDateTime,
MapResolver,
) {
let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let manifest_bytes = std::fs::read(manifest_path).expect("read manifest fixture");
let manifest = ManifestObject::decode_der(&manifest_bytes).expect("decode manifest fixture");
let manifest_rsync_uri = fixture_to_rsync_uri(manifest_path);
let publication_point_rsync_uri = fixture_dir_to_rsync_uri(manifest_path.parent().unwrap());
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store
.put_raw(&manifest_rsync_uri, &manifest_bytes)
.expect("store manifest");
for entry in &manifest.manifest.files {
let file_path = manifest_path.parent().unwrap().join(&entry.file_name);
let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
store.put_raw(&rsync_uri, &bytes).expect("store file");
}
let policy = Policy::default();
let out = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
manifest.manifest.this_update + time::Duration::seconds(1),
)
.expect("process manifest publication point");
let issuer_ca_der = std::fs::read(
"tests/fixtures/repository/rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer",
)
.expect("read issuer CA cert fixture");
let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer CA cert");
let crl_file = out
.pack
.files
.iter()
.find(|f| f.rsync_uri.ends_with(".crl"))
.expect("crl present in pack");
let crl = RpkixCrl::decode_der(&crl_file.bytes).expect("decode crl");
// Choose a validation_time that is within:
// - manifest thisUpdate..nextUpdate (RFC 9286 §6.3)
// - issuer CA validity
// - CRL thisUpdate..nextUpdate
// - and should be within EE validity for most objects.
let mut t = manifest.manifest.this_update;
if issuer_ca.tbs.validity_not_before > t {
t = issuer_ca.tbs.validity_not_before;
}
if crl.this_update.utc > t {
t = crl.this_update.utc;
}
t += time::Duration::seconds(1);
let mut resolver = MapResolver {
by_subject_dn: HashMap::new(),
};
resolver
.by_subject_dn
.insert(issuer_ca.tbs.subject_dn, issuer_ca_der);
(out.pack, t, resolver)
}
#[test]
fn drop_object_policy_drops_only_failing_object() {
let (mut pack, validation_time, resolver) = build_cernet_pack_and_validation_time();
let valid_roa_uri = pack
.files
.iter()
.find(|f| f.rsync_uri.ends_with("AS4538.roa"))
.map(|f| f.rsync_uri.clone())
.expect("AS4538.roa present in pack");
let tamper_idx = pack
.files
.iter()
.position(|f| f.rsync_uri.ends_with(".roa") && f.rsync_uri != valid_roa_uri)
.expect("another ROA present in pack");
let victim_uri = pack.files[tamper_idx].rsync_uri.clone();
let mut tampered = pack.files[tamper_idx].bytes.clone();
let last = tampered.len() - 1;
tampered[last] ^= 0xFF;
pack.files[tamper_idx] = PackFile::from_bytes_compute_sha256(victim_uri.clone(), tampered);
let mut policy = Policy::default();
policy.signed_object_failure_policy = SignedObjectFailurePolicy::DropObject;
let out = process_verified_publication_point_pack(&pack, &policy, &resolver, validation_time)
.expect("drop_object should succeed");
assert!(
out.vrps.iter().any(|v| v.asn == 4538),
"expected at least one VRP for AS4538"
);
assert!(
out.warnings
.iter()
.any(|w| w.context.as_deref() == Some(&victim_uri)),
"expected a warning for the tampered object"
);
}
#[test]
fn drop_publication_point_policy_fails_the_publication_point() {
let (mut pack, validation_time, resolver) = build_cernet_pack_and_validation_time();
let tamper_idx = pack
.files
.iter()
.position(|f| f.rsync_uri.ends_with(".roa"))
.expect("a ROA present in pack");
let victim_uri = pack.files[tamper_idx].rsync_uri.clone();
let mut tampered = pack.files[tamper_idx].bytes.clone();
let last = tampered.len() - 1;
tampered[last] ^= 0xFF;
pack.files[tamper_idx] = PackFile::from_bytes_compute_sha256(victim_uri.clone(), tampered);
let mut policy = Policy::default();
policy.signed_object_failure_policy = SignedObjectFailurePolicy::DropPublicationPoint;
let err = process_verified_publication_point_pack(&pack, &policy, &resolver, validation_time)
.expect_err("drop_publication_point should fail");
match err {
ObjectsProcessError::PublicationPointDropped { rsync_uri, .. } => {
assert_eq!(rsync_uri, victim_uri);
}
}
}

View File

@ -0,0 +1,463 @@
use rpki::fetch::rsync::LocalDirRsyncFetcher;
use rpki::policy::{Policy, SignedObjectFailurePolicy, SyncPreference};
use rpki::storage::{PackFile, PackTime, RocksStore, VerifiedPublicationPointPack};
use rpki::sync::repo::sync_publication_point;
use rpki::sync::rrdp::Fetcher;
use rpki::validation::manifest::process_manifest_publication_point;
use rpki::validation::objects::{
IssuerCaCertificateResolver, process_verified_publication_point_pack,
process_verified_publication_point_pack_for_issuer,
};
struct NoopHttpFetcher;
impl Fetcher for NoopHttpFetcher {
fn fetch(&self, _uri: &str) -> Result<Vec<u8>, String> {
Err("http disabled in test".to_string())
}
}
fn cernet_fixture() -> (std::path::PathBuf, String, String) {
let dir = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0");
let rsync_base_uri = "rsync://rpki.cernet.net/repo/cernet/0/".to_string();
let manifest_file = "05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft".to_string();
(dir, rsync_base_uri, manifest_file)
}
fn validation_time_from_manifest_fixture(dir: &std::path::Path, manifest_file: &str) -> time::OffsetDateTime {
let bytes = std::fs::read(dir.join(manifest_file)).expect("read manifest fixture");
let mft = rpki::data_model::manifest::ManifestObject::decode_der(&bytes).expect("decode mft");
let this_update = mft.manifest.this_update;
let next_update = mft.manifest.next_update;
let candidate = this_update + time::Duration::seconds(60);
if candidate < next_update { candidate } else { this_update }
}
fn issuer_ca_fixture() -> Vec<u8> {
std::fs::read(
std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"tests/fixtures/repository/rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer",
),
)
.expect("read issuer ca fixture")
}
fn issuer_ca_rsync_uri() -> &'static str {
"rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer"
}
fn minimal_pack(
manifest_rsync_uri: &str,
publication_point_rsync_uri: &str,
manifest_bytes: Vec<u8>,
files: Vec<PackFile>,
validation_time: time::OffsetDateTime,
) -> VerifiedPublicationPointPack {
// Keep times consistent enough to pass internal pack validation.
VerifiedPublicationPointPack {
format_version: VerifiedPublicationPointPack::FORMAT_VERSION_V1,
manifest_rsync_uri: manifest_rsync_uri.to_string(),
publication_point_rsync_uri: publication_point_rsync_uri.to_string(),
this_update: PackTime::from_utc_offset_datetime(validation_time),
next_update: PackTime::from_utc_offset_datetime(validation_time + time::Duration::hours(1)),
verified_at: PackTime::from_utc_offset_datetime(validation_time),
manifest_bytes,
files,
}
}
fn build_verified_pack_from_local_rsync_fixture(
dir: &std::path::Path,
rsync_base_uri: &str,
manifest_rsync_uri: &str,
validation_time: time::OffsetDateTime,
) -> rpki::storage::VerifiedPublicationPointPack {
let store_dir = tempfile::tempdir().expect("store dir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
let policy = Policy {
sync_preference: SyncPreference::RsyncOnly,
..Policy::default()
};
sync_publication_point(
&store,
&policy,
None,
rsync_base_uri,
&NoopHttpFetcher,
&LocalDirRsyncFetcher::new(dir),
)
.expect("sync into raw_objects");
let pp = process_manifest_publication_point(
&store,
&policy,
manifest_rsync_uri,
rsync_base_uri,
validation_time,
)
.expect("process manifest");
pp.pack
}
#[test]
fn process_pack_for_issuer_extracts_vrps_from_real_cernet_fixture() {
let (dir, rsync_base_uri, manifest_file) = cernet_fixture();
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file);
let pack = build_verified_pack_from_local_rsync_fixture(
&dir,
&rsync_base_uri,
&manifest_rsync_uri,
validation_time,
);
let issuer_ca_der = issuer_ca_fixture();
let issuer_ca = rpki::data_model::rc::ResourceCertificate::decode_der(&issuer_ca_der)
.expect("decode issuer ca");
let policy = Policy::default();
let out = process_verified_publication_point_pack_for_issuer(
&pack,
&policy,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
issuer_ca.tbs.extensions.ip_resources.as_ref(),
issuer_ca.tbs.extensions.as_resources.as_ref(),
validation_time,
);
assert!(out.vrps.len() > 10, "expected many VRPs, got {}", out.vrps.len());
assert!(out.aspas.is_empty());
}
#[test]
fn signed_object_failure_policy_drop_object_drops_only_bad_object() {
let (dir, rsync_base_uri, manifest_file) = cernet_fixture();
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file);
let mut pack = build_verified_pack_from_local_rsync_fixture(
&dir,
&rsync_base_uri,
&manifest_rsync_uri,
validation_time,
);
let roa_uris = pack
.files
.iter()
.filter(|f| f.rsync_uri.ends_with(".roa"))
.map(|f| f.rsync_uri.clone())
.collect::<Vec<_>>();
let bad_idx = pack
.files
.iter()
.position(|f| f.rsync_uri.ends_with(".roa"))
.expect("pack contains roa");
let bad_uri = pack.files[bad_idx].rsync_uri.clone();
pack.files[bad_idx] = PackFile::from_bytes_compute_sha256(bad_uri, vec![0u8]);
let issuer_ca_der = issuer_ca_fixture();
let issuer_ca = rpki::data_model::rc::ResourceCertificate::decode_der(&issuer_ca_der)
.expect("decode issuer ca");
let policy = Policy {
signed_object_failure_policy: SignedObjectFailurePolicy::DropObject,
..Policy::default()
};
let out = process_verified_publication_point_pack_for_issuer(
&pack,
&policy,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
issuer_ca.tbs.extensions.ip_resources.as_ref(),
issuer_ca.tbs.extensions.as_resources.as_ref(),
validation_time,
);
assert!(out.vrps.len() > 0);
assert!(!out.warnings.is_empty());
assert_eq!(
out.audit.len(),
roa_uris.len(),
"expected one audit entry per ROA"
);
assert!(
out.audit.iter().any(|e| e.rsync_uri == pack.files[bad_idx].rsync_uri && matches!(e.result, rpki::audit::AuditObjectResult::Error)),
"expected audit error for the corrupted ROA"
);
}
#[test]
fn signed_object_failure_policy_drop_publication_point_drops_all_output() {
let (dir, rsync_base_uri, manifest_file) = cernet_fixture();
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file);
let mut pack = build_verified_pack_from_local_rsync_fixture(
&dir,
&rsync_base_uri,
&manifest_rsync_uri,
validation_time,
);
let roa_uris = pack
.files
.iter()
.filter(|f| f.rsync_uri.ends_with(".roa"))
.map(|f| f.rsync_uri.clone())
.collect::<Vec<_>>();
let bad_idx = pack
.files
.iter()
.position(|f| f.rsync_uri.ends_with(".roa"))
.expect("pack contains roa");
let bad_uri = pack.files[bad_idx].rsync_uri.clone();
pack.files[bad_idx] = PackFile::from_bytes_compute_sha256(bad_uri, vec![0u8]);
let issuer_ca_der = issuer_ca_fixture();
let issuer_ca = rpki::data_model::rc::ResourceCertificate::decode_der(&issuer_ca_der)
.expect("decode issuer ca");
let policy = Policy {
signed_object_failure_policy: SignedObjectFailurePolicy::DropPublicationPoint,
..Policy::default()
};
let out = process_verified_publication_point_pack_for_issuer(
&pack,
&policy,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
issuer_ca.tbs.extensions.ip_resources.as_ref(),
issuer_ca.tbs.extensions.as_resources.as_ref(),
validation_time,
);
assert!(out.vrps.is_empty());
assert!(out.aspas.is_empty());
assert!(!out.warnings.is_empty());
assert_eq!(
out.audit.len(),
roa_uris.len(),
"expected audit entries for all ROAs (error + skipped due to policy)"
);
let bad_entry = out
.audit
.iter()
.find(|e| e.rsync_uri == pack.files[bad_idx].rsync_uri)
.expect("bad roa audit entry");
assert!(
matches!(bad_entry.result, rpki::audit::AuditObjectResult::Error),
"expected error for corrupted ROA"
);
assert!(
out.audit
.iter()
.any(|e| matches!(e.result, rpki::audit::AuditObjectResult::Skipped)),
"expected at least one skipped ROA due to drop_publication_point policy"
);
}
#[test]
fn process_pack_for_issuer_without_crl_drops_publication_point() {
let (dir, rsync_base_uri, manifest_file) = cernet_fixture();
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file);
let manifest_bytes = std::fs::read(dir.join(&manifest_file)).expect("read mft");
let pack = minimal_pack(
&manifest_rsync_uri,
&rsync_base_uri,
manifest_bytes,
vec![
// include a ROA-looking file; it won't be reached because CRL selection fails first
PackFile::from_bytes_compute_sha256(format!("{rsync_base_uri}dummy.roa"), vec![1]),
],
validation_time,
);
let policy = Policy::default();
let out = process_verified_publication_point_pack_for_issuer(
&pack,
&policy,
&[],
None,
None,
None,
validation_time,
);
assert!(out.vrps.is_empty());
assert!(out.aspas.is_empty());
assert!(!out.warnings.is_empty());
}
#[test]
fn process_pack_for_issuer_handles_invalid_aspa_bytes() {
let (dir, rsync_base_uri, manifest_file) = cernet_fixture();
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file);
let manifest_bytes = std::fs::read(dir.join(&manifest_file)).expect("read mft");
let crl_bytes = std::fs::read(dir.join(
"05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl",
))
.expect("read crl");
let pack = minimal_pack(
&manifest_rsync_uri,
&rsync_base_uri,
manifest_bytes,
vec![
PackFile::from_bytes_compute_sha256(
format!("{rsync_base_uri}05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl"),
crl_bytes,
),
PackFile::from_bytes_compute_sha256(format!("{rsync_base_uri}bad.asa"), vec![0u8]),
],
validation_time,
);
let policy = Policy {
signed_object_failure_policy: SignedObjectFailurePolicy::DropObject,
..Policy::default()
};
let out = process_verified_publication_point_pack_for_issuer(
&pack,
&policy,
&[],
None,
None,
None,
validation_time,
);
assert!(out.aspas.is_empty());
assert!(!out.warnings.is_empty());
}
#[test]
fn process_pack_for_issuer_drop_publication_point_on_invalid_aspa_bytes() {
let (dir, rsync_base_uri, manifest_file) = cernet_fixture();
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file);
let manifest_bytes = std::fs::read(dir.join(&manifest_file)).expect("read mft");
let crl_bytes = std::fs::read(dir.join(
"05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl",
))
.expect("read crl");
let pack = minimal_pack(
&manifest_rsync_uri,
&rsync_base_uri,
manifest_bytes,
vec![
PackFile::from_bytes_compute_sha256(
format!("{rsync_base_uri}05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl"),
crl_bytes,
),
PackFile::from_bytes_compute_sha256(format!("{rsync_base_uri}bad.asa"), vec![0u8]),
// A ROA-like file: we should not reach it due to policy.
PackFile::from_bytes_compute_sha256(format!("{rsync_base_uri}dummy.roa"), vec![1u8]),
],
validation_time,
);
let policy = Policy {
signed_object_failure_policy: SignedObjectFailurePolicy::DropPublicationPoint,
..Policy::default()
};
let out = process_verified_publication_point_pack_for_issuer(
&pack,
&policy,
&[],
None,
None,
None,
validation_time,
);
assert!(out.vrps.is_empty());
assert!(out.aspas.is_empty());
assert!(!out.warnings.is_empty());
}
struct NoIssuerResolver;
impl IssuerCaCertificateResolver for NoIssuerResolver {
fn resolve_by_subject_dn(&self, _subject_dn: &str) -> Option<Vec<u8>> {
None
}
}
struct AlwaysIssuerResolver {
issuer_ca_der: Vec<u8>,
}
impl IssuerCaCertificateResolver for AlwaysIssuerResolver {
fn resolve_by_subject_dn(&self, _subject_dn: &str) -> Option<Vec<u8>> {
Some(self.issuer_ca_der.clone())
}
}
#[test]
fn process_verified_pack_indexes_ca_certs_by_subject() {
let (dir, rsync_base_uri, manifest_file) = cernet_fixture();
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file);
let manifest_bytes = std::fs::read(dir.join(&manifest_file)).expect("read mft");
// Add a real CA certificate to exercise CA indexing logic.
let ca_der = issuer_ca_fixture();
let pack = minimal_pack(
&manifest_rsync_uri,
&rsync_base_uri,
manifest_bytes,
vec![PackFile::from_bytes_compute_sha256(
format!("{rsync_base_uri}some-ca.cer"),
ca_der,
)],
validation_time,
);
let policy = Policy::default();
let out = process_verified_publication_point_pack(
&pack,
&policy,
&NoIssuerResolver,
validation_time,
)
.expect("process pack");
assert!(out.vrps.is_empty());
assert!(out.aspas.is_empty());
}
#[test]
fn process_pack_with_resolver_extracts_vrps_from_real_cernet_fixture() {
let (dir, rsync_base_uri, manifest_file) = cernet_fixture();
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file);
let pack = build_verified_pack_from_local_rsync_fixture(
&dir,
&rsync_base_uri,
&manifest_rsync_uri,
validation_time,
);
let policy = Policy::default();
let issuer_ca_der = issuer_ca_fixture();
let resolver = AlwaysIssuerResolver {
issuer_ca_der: issuer_ca_der.clone(),
};
let out = process_verified_publication_point_pack(&pack, &policy, &resolver, validation_time)
.expect("process pack");
assert!(out.vrps.len() > 10, "expected many VRPs, got {}", out.vrps.len());
assert!(
out.audit.len() >= pack.files.iter().filter(|f| f.rsync_uri.ends_with(".roa")).count(),
"expected ROA audit entries"
);
}

View File

@ -0,0 +1,304 @@
use rpki::policy::{Policy, SignedObjectFailurePolicy};
use rpki::storage::{PackFile, PackTime, VerifiedPublicationPointPack};
use rpki::validation::objects::{
IssuerCaCertificateResolver, ObjectsProcessError, process_verified_publication_point_pack,
process_verified_publication_point_pack_for_issuer,
};
fn fixture_bytes(path: &str) -> Vec<u8> {
std::fs::read(std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(path))
.unwrap_or_else(|e| panic!("read fixture {path}: {e}"))
}
fn dummy_pack(manifest_bytes: Vec<u8>, files: Vec<PackFile>) -> VerifiedPublicationPointPack {
let now = time::OffsetDateTime::now_utc();
VerifiedPublicationPointPack {
format_version: VerifiedPublicationPointPack::FORMAT_VERSION_V1,
manifest_rsync_uri: "rsync://example.test/repo/pp/manifest.mft".to_string(),
publication_point_rsync_uri: "rsync://example.test/repo/pp/".to_string(),
this_update: PackTime::from_utc_offset_datetime(now),
next_update: PackTime::from_utc_offset_datetime(now + time::Duration::hours(1)),
verified_at: PackTime::from_utc_offset_datetime(now),
manifest_bytes,
files,
}
}
struct NoneResolver;
impl IssuerCaCertificateResolver for NoneResolver {
fn resolve_by_subject_dn(&self, _subject_dn: &str) -> Option<Vec<u8>> {
None
}
}
#[test]
fn process_pack_drop_object_on_missing_issuer_ca_for_roa() {
let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let roa_bytes =
fixture_bytes("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS4538.roa");
let pack = dummy_pack(
manifest_bytes,
vec![PackFile::from_bytes_compute_sha256(
"rsync://rpki.cernet.net/repo/cernet/0/AS4538.roa",
roa_bytes,
)],
);
let policy = Policy {
signed_object_failure_policy: SignedObjectFailurePolicy::DropObject,
..Policy::default()
};
let out =
process_verified_publication_point_pack(&pack, &policy, &NoneResolver, time::OffsetDateTime::now_utc())
.expect("drop_object should not error");
assert_eq!(out.stats.roa_total, 1);
assert_eq!(out.stats.roa_ok, 0);
assert_eq!(out.audit.len(), 1);
assert_eq!(out.warnings.len(), 1);
}
#[test]
fn process_pack_drop_publication_point_on_missing_issuer_ca_for_roa_skips_rest() {
let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let roa_bytes =
fixture_bytes("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS4538.roa");
let aspa_bytes = fixture_bytes(
"tests/fixtures/repository/chloe.sobornost.net/rpki/RIPE-nljobsnijders/5m80fwYws_3FiFD7JiQjAqZ1RYQ.asa",
);
let pack = dummy_pack(
manifest_bytes,
vec![
PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/pp/first.roa",
roa_bytes.clone(),
),
PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/pp/second.roa",
roa_bytes,
),
PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/pp/x.asa",
aspa_bytes,
),
],
);
let policy = Policy {
signed_object_failure_policy: SignedObjectFailurePolicy::DropPublicationPoint,
..Policy::default()
};
let err =
process_verified_publication_point_pack(&pack, &policy, &NoneResolver, time::OffsetDateTime::now_utc())
.unwrap_err();
assert!(matches!(err, ObjectsProcessError::PublicationPointDropped { .. }));
assert!(err.to_string().contains("drop_publication_point"));
}
#[test]
fn process_pack_drop_object_on_missing_issuer_ca_for_aspa() {
let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let aspa_bytes = fixture_bytes(
"tests/fixtures/repository/chloe.sobornost.net/rpki/RIPE-nljobsnijders/5m80fwYws_3FiFD7JiQjAqZ1RYQ.asa",
);
let pack = dummy_pack(
manifest_bytes,
vec![PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/pp/x.asa",
aspa_bytes,
)],
);
let policy = Policy {
signed_object_failure_policy: SignedObjectFailurePolicy::DropObject,
..Policy::default()
};
let out =
process_verified_publication_point_pack(&pack, &policy, &NoneResolver, time::OffsetDateTime::now_utc())
.expect("drop_object should not error");
assert_eq!(out.stats.aspa_total, 1);
assert_eq!(out.stats.aspa_ok, 0);
assert_eq!(out.audit.len(), 1);
assert_eq!(out.warnings.len(), 1);
}
#[test]
fn process_pack_drop_publication_point_on_missing_issuer_ca_for_aspa_skips_rest() {
let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let roa_bytes =
fixture_bytes("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS4538.roa");
let aspa_bytes = fixture_bytes(
"tests/fixtures/repository/chloe.sobornost.net/rpki/RIPE-nljobsnijders/5m80fwYws_3FiFD7JiQjAqZ1RYQ.asa",
);
let pack = dummy_pack(
manifest_bytes,
vec![
PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/pp/x.asa",
aspa_bytes,
),
PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/pp/y.roa",
roa_bytes,
),
],
);
let policy = Policy {
signed_object_failure_policy: SignedObjectFailurePolicy::DropPublicationPoint,
..Policy::default()
};
let err =
process_verified_publication_point_pack(&pack, &policy, &NoneResolver, time::OffsetDateTime::now_utc())
.unwrap_err();
assert!(matches!(err, ObjectsProcessError::PublicationPointDropped { .. }));
}
#[test]
fn process_pack_for_issuer_marks_objects_skipped_when_missing_issuer_crl() {
let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let roa_bytes =
fixture_bytes("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS4538.roa");
let aspa_bytes = fixture_bytes(
"tests/fixtures/repository/chloe.sobornost.net/rpki/RIPE-nljobsnijders/5m80fwYws_3FiFD7JiQjAqZ1RYQ.asa",
);
let pack = dummy_pack(
manifest_bytes,
vec![
PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/pp/a.roa",
roa_bytes,
),
PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/pp/a.asa",
aspa_bytes,
),
],
);
let policy = Policy {
signed_object_failure_policy: SignedObjectFailurePolicy::DropObject,
..Policy::default()
};
let out = process_verified_publication_point_pack_for_issuer(
&pack,
&policy,
&[0x01, 0x02, 0x03],
None,
None,
None,
time::OffsetDateTime::now_utc(),
);
assert!(out.stats.publication_point_dropped);
assert_eq!(out.stats.roa_total, 1);
assert_eq!(out.stats.aspa_total, 1);
assert_eq!(out.vrps.len(), 0);
assert_eq!(out.aspas.len(), 0);
assert_eq!(out.audit.len(), 2);
assert_eq!(out.warnings.len(), 1);
}
#[test]
fn process_pack_for_issuer_drop_object_records_errors_and_continues() {
let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let pack = dummy_pack(
manifest_bytes,
vec![
PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/pp/issuer.crl",
vec![0x01],
),
PackFile::from_bytes_compute_sha256("rsync://example.test/repo/pp/a.roa", vec![0x00]),
PackFile::from_bytes_compute_sha256("rsync://example.test/repo/pp/b.asa", vec![0x00]),
],
);
let policy = Policy {
signed_object_failure_policy: SignedObjectFailurePolicy::DropObject,
..Policy::default()
};
let out = process_verified_publication_point_pack_for_issuer(
&pack,
&policy,
&[0x01, 0x02, 0x03],
None,
None,
None,
time::OffsetDateTime::now_utc(),
);
assert!(!out.stats.publication_point_dropped);
assert_eq!(out.stats.roa_total, 1);
assert_eq!(out.stats.aspa_total, 1);
assert_eq!(out.audit.len(), 2);
assert_eq!(out.warnings.len(), 2);
}
#[test]
fn process_pack_for_issuer_drop_publication_point_records_skips_for_rest() {
let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let pack = dummy_pack(
manifest_bytes,
vec![
PackFile::from_bytes_compute_sha256(
"rsync://example.test/repo/pp/issuer.crl",
vec![0x01],
),
PackFile::from_bytes_compute_sha256("rsync://example.test/repo/pp/a.roa", vec![0x00]),
PackFile::from_bytes_compute_sha256("rsync://example.test/repo/pp/b.roa", vec![0x00]),
PackFile::from_bytes_compute_sha256("rsync://example.test/repo/pp/c.asa", vec![0x00]),
],
);
let policy = Policy {
signed_object_failure_policy: SignedObjectFailurePolicy::DropPublicationPoint,
..Policy::default()
};
let out = process_verified_publication_point_pack_for_issuer(
&pack,
&policy,
&[0x01, 0x02, 0x03],
None,
None,
None,
time::OffsetDateTime::now_utc(),
);
assert!(out.stats.publication_point_dropped);
assert_eq!(out.stats.roa_total, 2);
assert_eq!(out.stats.aspa_total, 1);
assert!(out.audit.len() >= 3, "expected error + skipped entries");
assert_eq!(out.warnings.len(), 1);
}

37
tests/test_policy.rs Normal file
View File

@ -0,0 +1,37 @@
use rpki::policy::{CaFailedFetchPolicy, Policy, SignedObjectFailurePolicy, SyncPreference};
#[test]
fn policy_defaults_are_correct() {
let p = Policy::default();
assert_eq!(p.sync_preference, SyncPreference::RrdpThenRsync);
assert_eq!(
p.ca_failed_fetch_policy,
CaFailedFetchPolicy::UseVerifiedCache
);
assert_eq!(
p.signed_object_failure_policy,
SignedObjectFailurePolicy::DropObject
);
}
#[test]
fn policy_toml_parsing_supports_overrides() {
let toml = r#"
sync_preference = "rsync_only"
ca_failed_fetch_policy = "stop_all_output"
signed_object_failure_policy = "drop_publication_point"
"#;
let p = Policy::from_toml_str(toml).expect("parse TOML policy");
assert_eq!(p.sync_preference, SyncPreference::RsyncOnly);
assert_eq!(p.ca_failed_fetch_policy, CaFailedFetchPolicy::StopAllOutput);
assert_eq!(
p.signed_object_failure_policy,
SignedObjectFailurePolicy::DropPublicationPoint
);
}
#[test]
fn policy_toml_parsing_uses_defaults_when_missing() {
let p = Policy::from_toml_str("").expect("parse empty TOML policy");
assert_eq!(p, Policy::default());
}

270
tests/test_repo_sync_m6.rs Normal file
View File

@ -0,0 +1,270 @@
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use rpki::fetch::rsync::{LocalDirRsyncFetcher, RsyncFetchError, RsyncFetcher};
use rpki::policy::{Policy, SyncPreference};
use rpki::storage::RocksStore;
use rpki::sync::repo::{RepoSyncSource, sync_publication_point};
use rpki::sync::rrdp::Fetcher;
struct MapFetcher {
by_uri: HashMap<String, Vec<u8>>,
}
impl Fetcher for MapFetcher {
fn fetch(&self, uri: &str) -> Result<Vec<u8>, String> {
self.by_uri
.get(uri)
.cloned()
.ok_or_else(|| format!("not found: {uri}"))
}
}
struct CountingRsyncFetcher<F> {
inner: F,
calls: Arc<Mutex<usize>>,
}
impl<F> CountingRsyncFetcher<F> {
fn new(inner: F) -> (Self, Arc<Mutex<usize>>) {
let calls = Arc::new(Mutex::new(0usize));
(
Self {
inner,
calls: calls.clone(),
},
calls,
)
}
}
impl<F: RsyncFetcher> RsyncFetcher for CountingRsyncFetcher<F> {
fn fetch_objects(
&self,
rsync_base_uri: &str,
) -> Result<Vec<(String, Vec<u8>)>, RsyncFetchError> {
*self.calls.lock().unwrap() += 1;
self.inner.fetch_objects(rsync_base_uri)
}
}
#[test]
fn repo_sync_uses_rrdp_when_available() {
let notification_xml =
std::fs::read("tests/fixtures/rrdp/notification.xml").expect("read notification");
let snapshot_xml = std::fs::read("tests/fixtures/rrdp/snapshot.xml").expect("read snapshot");
let http_fetcher = MapFetcher {
by_uri: HashMap::from([
(
"https://example.net/rrdp/notification.xml".to_string(),
notification_xml,
),
(
"https://example.net/rrdp/snapshot.xml".to_string(),
snapshot_xml,
),
]),
};
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
let local_repo = tempfile::tempdir().expect("local repo dir");
std::fs::write(local_repo.path().join("x.cer"), b"x").unwrap();
let (rsync_fetcher, calls) =
CountingRsyncFetcher::new(LocalDirRsyncFetcher::new(local_repo.path()));
let policy = Policy::default();
let out = sync_publication_point(
&store,
&policy,
Some("https://example.net/rrdp/notification.xml"),
"rsync://example.net/repo/",
&http_fetcher,
&rsync_fetcher,
)
.expect("sync");
assert_eq!(out.source, RepoSyncSource::Rrdp);
assert_eq!(out.objects_written, 2);
assert_eq!(*calls.lock().unwrap(), 0);
assert_eq!(
store
.get_raw("rsync://example.net/repo/obj1.cer")
.unwrap()
.unwrap(),
b"abc"
);
}
#[test]
fn repo_sync_skips_snapshot_when_state_unchanged() {
let notification_xml =
std::fs::read("tests/fixtures/rrdp/notification.xml").expect("read notification");
let snapshot_xml = std::fs::read("tests/fixtures/rrdp/snapshot.xml").expect("read snapshot");
let http_fetcher = MapFetcher {
by_uri: HashMap::from([
(
"https://example.net/rrdp/notification.xml".to_string(),
notification_xml,
),
(
"https://example.net/rrdp/snapshot.xml".to_string(),
snapshot_xml,
),
]),
};
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
let local_repo = tempfile::tempdir().expect("local repo dir");
std::fs::write(local_repo.path().join("x.cer"), b"x").unwrap();
let (rsync_fetcher, calls) =
CountingRsyncFetcher::new(LocalDirRsyncFetcher::new(local_repo.path()));
let policy = Policy::default();
let out1 = sync_publication_point(
&store,
&policy,
Some("https://example.net/rrdp/notification.xml"),
"rsync://example.net/repo/",
&http_fetcher,
&rsync_fetcher,
)
.expect("sync 1");
assert_eq!(out1.source, RepoSyncSource::Rrdp);
assert_eq!(out1.objects_written, 2);
let out2 = sync_publication_point(
&store,
&policy,
Some("https://example.net/rrdp/notification.xml"),
"rsync://example.net/repo/",
&http_fetcher,
&rsync_fetcher,
)
.expect("sync 2");
assert_eq!(out2.source, RepoSyncSource::Rrdp);
assert_eq!(
out2.objects_written, 0,
"expected to skip snapshot apply when state unchanged"
);
assert_eq!(
*calls.lock().unwrap(),
0,
"expected no rsync fallback calls"
);
assert_eq!(
store
.get_raw("rsync://example.net/repo/obj1.cer")
.unwrap()
.unwrap(),
b"abc"
);
}
#[test]
fn repo_sync_falls_back_to_rsync_on_rrdp_failure() {
// Provide notification, but omit snapshot, causing RRDP fetch failure.
let notification_xml =
std::fs::read("tests/fixtures/rrdp/notification.xml").expect("read notification");
let http_fetcher = MapFetcher {
by_uri: HashMap::from([(
"https://example.net/rrdp/notification.xml".to_string(),
notification_xml,
)]),
};
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
let local_repo = tempfile::tempdir().expect("local repo dir");
std::fs::create_dir_all(local_repo.path().join("sub")).unwrap();
std::fs::write(local_repo.path().join("sub/obj.cer"), b"hello").unwrap();
let (rsync_fetcher, calls) =
CountingRsyncFetcher::new(LocalDirRsyncFetcher::new(local_repo.path()));
let policy = Policy::default();
let out = sync_publication_point(
&store,
&policy,
Some("https://example.net/rrdp/notification.xml"),
"rsync://example.net/repo/",
&http_fetcher,
&rsync_fetcher,
)
.expect("fallback sync");
assert_eq!(out.source, RepoSyncSource::Rsync);
assert_eq!(out.objects_written, 1);
assert_eq!(*calls.lock().unwrap(), 1);
assert!(!out.warnings.is_empty());
assert!(
out.warnings[0]
.rfc_refs
.iter()
.any(|r| r.0 == "RFC 8182 §3.4.5")
);
assert_eq!(
store
.get_raw("rsync://example.net/repo/sub/obj.cer")
.unwrap()
.unwrap(),
b"hello"
);
}
#[test]
fn repo_sync_rsync_populates_raw_objects() {
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
let local_repo = tempfile::tempdir().expect("local repo dir");
std::fs::create_dir_all(local_repo.path().join("a/b")).unwrap();
std::fs::write(local_repo.path().join("a/one.cer"), b"1").unwrap();
std::fs::write(local_repo.path().join("a/b/two.crl"), b"2").unwrap();
let http_fetcher = MapFetcher {
by_uri: HashMap::new(),
};
let rsync_fetcher = LocalDirRsyncFetcher::new(local_repo.path());
let mut policy = Policy::default();
policy.sync_preference = SyncPreference::RsyncOnly;
let out = sync_publication_point(
&store,
&policy,
None,
"rsync://example.net/repo/",
&http_fetcher,
&rsync_fetcher,
)
.expect("rsync-only sync");
assert_eq!(out.source, RepoSyncSource::Rsync);
assert_eq!(out.objects_written, 2);
assert_eq!(
store
.get_raw("rsync://example.net/repo/a/one.cer")
.unwrap()
.unwrap(),
b"1"
);
assert_eq!(
store
.get_raw("rsync://example.net/repo/a/b/two.crl")
.unwrap()
.unwrap(),
b"2"
);
}

View File

@ -28,6 +28,9 @@ fn dummy_ee(
extensions: RcExtensions { extensions: RcExtensions {
basic_constraints_ca: false, basic_constraints_ca: false,
subject_key_identifier: Some(vec![0x01]), subject_key_identifier: Some(vec![0x01]),
authority_key_identifier: None,
crl_distribution_points_uris: None,
ca_issuers_uris: None,
subject_info_access: Some(SubjectInfoAccess::Ca( subject_info_access: Some(SubjectInfoAccess::Ca(
rpki::data_model::rc::SubjectInfoAccessCa { rpki::data_model::rc::SubjectInfoAccessCa {
access_descriptions: vec![], access_descriptions: vec![],

169
tests/test_rrdp_sync_m5.rs Normal file
View File

@ -0,0 +1,169 @@
use std::collections::HashMap;
use rpki::storage::RocksStore;
use rpki::sync::rrdp::{Fetcher, parse_notification_snapshot, sync_from_notification_snapshot};
use sha2::Digest;
struct MapFetcher {
by_uri: HashMap<String, Vec<u8>>,
}
impl Fetcher for MapFetcher {
fn fetch(&self, uri: &str) -> Result<Vec<u8>, String> {
self.by_uri
.get(uri)
.cloned()
.ok_or_else(|| format!("not found: {uri}"))
}
}
#[test]
fn notification_parses_and_snapshot_is_applied_and_state_written() {
let notification_xml =
std::fs::read("tests/fixtures/rrdp/notification.xml").expect("read notification");
let snapshot_xml = std::fs::read("tests/fixtures/rrdp/snapshot.xml").expect("read snapshot");
let notif = parse_notification_snapshot(&notification_xml).expect("parse notification");
assert_eq!(notif.serial, 1);
assert_eq!(notif.snapshot_uri, "https://example.net/rrdp/snapshot.xml");
let mut fetcher = MapFetcher {
by_uri: HashMap::new(),
};
fetcher.by_uri.insert(
"https://example.net/rrdp/snapshot.xml".to_string(),
snapshot_xml,
);
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
let published = sync_from_notification_snapshot(
&store,
"https://example.net/rrdp/notification.xml",
&notification_xml,
&fetcher,
)
.expect("sync");
assert_eq!(published, 2);
let obj1 = store
.get_raw("rsync://example.net/repo/obj1.cer")
.expect("get obj1")
.expect("obj1 exists");
assert_eq!(obj1, b"abc");
let obj2 = store
.get_raw("rsync://example.net/repo/obj2.crl")
.expect("get obj2")
.expect("obj2 exists");
assert_eq!(obj2, b"def");
let state_bytes = store
.get_rrdp_state("https://example.net/rrdp/notification.xml")
.expect("get state")
.expect("state exists");
let state = rpki::sync::rrdp::RrdpState::decode(&state_bytes).expect("decode state");
assert_eq!(
state.session_id,
"9df4b597-af9e-4dca-bdda-719cce2c4e28".to_string()
);
assert_eq!(state.serial, 1);
}
#[test]
fn snapshot_hash_mismatch_is_rejected() {
let mut notification_xml =
String::from_utf8(std::fs::read("tests/fixtures/rrdp/notification.xml").unwrap()).unwrap();
notification_xml = notification_xml.replace(
"dcb1ce91401d568d7ddf7a4c9f70c65d8428c3a5e7135f82db99c4de30413551",
"0000000000000000000000000000000000000000000000000000000000000000",
);
let snapshot_xml = std::fs::read("tests/fixtures/rrdp/snapshot.xml").expect("read snapshot");
let fetcher = MapFetcher {
by_uri: HashMap::from([(
"https://example.net/rrdp/snapshot.xml".to_string(),
snapshot_xml,
)]),
};
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
let err = sync_from_notification_snapshot(
&store,
"https://example.net/rrdp/notification.xml",
notification_xml.as_bytes(),
&fetcher,
)
.expect_err("hash mismatch rejected");
assert!(err.to_string().contains("hash mismatch"));
}
#[test]
fn session_id_mismatch_is_rejected() {
let notification_xml =
std::fs::read("tests/fixtures/rrdp/notification.xml").expect("read notification");
let mut snapshot_xml =
String::from_utf8(std::fs::read("tests/fixtures/rrdp/snapshot.xml").unwrap()).unwrap();
snapshot_xml = snapshot_xml.replace(
"9df4b597-af9e-4dca-bdda-719cce2c4e28",
"00000000-0000-4000-8000-000000000000",
);
let snapshot_xml = snapshot_xml.into_bytes();
// Recompute snapshot hash and patch notification to keep hash correct, so we test
// the session_id mismatch check.
let mut notif_str = String::from_utf8(notification_xml).unwrap();
let digest = sha2::Sha256::digest(&snapshot_xml);
let hex_hash = hex::encode(digest);
notif_str = notif_str.replace(
"dcb1ce91401d568d7ddf7a4c9f70c65d8428c3a5e7135f82db99c4de30413551",
&hex_hash,
);
let fetcher = MapFetcher {
by_uri: HashMap::from([(
"https://example.net/rrdp/snapshot.xml".to_string(),
snapshot_xml,
)]),
};
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
let err = sync_from_notification_snapshot(
&store,
"https://example.net/rrdp/notification.xml",
notif_str.as_bytes(),
&fetcher,
)
.expect_err("session_id mismatch rejected");
assert!(err.to_string().contains("session_id mismatch"));
}
#[test]
fn serial_zero_is_rejected() {
let notification_xml =
String::from_utf8(std::fs::read("tests/fixtures/rrdp/notification.xml").unwrap()).unwrap();
let notification_xml = notification_xml.replace("serial=\"1\"", "serial=\"0\"");
let snapshot_xml = std::fs::read("tests/fixtures/rrdp/snapshot.xml").expect("read snapshot");
let fetcher = MapFetcher {
by_uri: HashMap::from([(
"https://example.net/rrdp/snapshot.xml".to_string(),
snapshot_xml,
)]),
};
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
let err = sync_from_notification_snapshot(
&store,
"https://example.net/rrdp/notification.xml",
notification_xml.as_bytes(),
&fetcher,
)
.expect_err("serial=0 rejected");
assert!(err.to_string().contains("serial invalid"));
}

128
tests/test_run_m9.rs Normal file
View File

@ -0,0 +1,128 @@
use std::collections::HashMap;
use std::path::Path;
use rpki::data_model::crl::RpkixCrl;
use rpki::data_model::manifest::ManifestObject;
use rpki::data_model::rc::ResourceCertificate;
use rpki::fetch::rsync::LocalDirRsyncFetcher;
use rpki::policy::{Policy, SyncPreference};
use rpki::storage::RocksStore;
use rpki::sync::rrdp::Fetcher;
use rpki::validation::objects::IssuerCaCertificateResolver;
use rpki::validation::run::{run_publication_point_once, verified_pack_exists};
fn fixture_to_rsync_uri(path: &Path) -> String {
let rel = path
.strip_prefix("tests/fixtures/repository")
.expect("path under tests/fixtures/repository");
let mut it = rel.components();
let host = it
.next()
.expect("host component")
.as_os_str()
.to_string_lossy();
let rest = it.as_path().to_string_lossy();
format!("rsync://{host}/{rest}")
}
fn fixture_dir_to_rsync_uri(dir: &Path) -> String {
let mut s = fixture_to_rsync_uri(dir);
if !s.ends_with('/') {
s.push('/');
}
s
}
struct NeverHttpFetcher;
impl Fetcher for NeverHttpFetcher {
fn fetch(&self, _uri: &str) -> Result<Vec<u8>, String> {
Err("http fetch disabled in offline test".to_string())
}
}
struct MapResolver {
by_subject_dn: HashMap<String, Vec<u8>>,
}
impl IssuerCaCertificateResolver for MapResolver {
fn resolve_by_subject_dn(&self, subject_dn: &str) -> Option<Vec<u8>> {
self.by_subject_dn.get(subject_dn).cloned()
}
}
#[test]
fn e2e_offline_uses_rsync_then_writes_verified_pack_then_outputs_vrps() {
let fixture_dir = Path::new("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0");
let rsync_base_uri = "rsync://rpki.cernet.net/repo/cernet/0/";
let manifest_path = fixture_dir.join("05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft");
let manifest_rsync_uri = fixture_to_rsync_uri(&manifest_path);
let publication_point_rsync_uri = fixture_dir_to_rsync_uri(fixture_dir);
let issuer_ca_der = std::fs::read("tests/fixtures/repository/rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer")
.expect("read issuer ca");
let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer ca");
// Choose a validation_time that is safely inside:
// - manifest thisUpdate..nextUpdate
// - issuer CA validity
// - CRL thisUpdate..nextUpdate
let manifest_der = std::fs::read(&manifest_path).expect("read manifest");
let manifest = ManifestObject::decode_der(&manifest_der).expect("decode manifest");
let crl_der = std::fs::read(fixture_dir.join("05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl"))
.expect("read crl");
let crl = RpkixCrl::decode_der(&crl_der).expect("decode crl");
let mut t = manifest.manifest.this_update;
if issuer_ca.tbs.validity_not_before > t {
t = issuer_ca.tbs.validity_not_before;
}
if crl.this_update.utc > t {
t = crl.this_update.utc;
}
t += time::Duration::seconds(1);
let mut policy = Policy::default();
policy.sync_preference = SyncPreference::RsyncOnly;
let rsync_fetcher = LocalDirRsyncFetcher::new(fixture_dir);
let http_fetcher = NeverHttpFetcher;
let resolver = MapResolver {
by_subject_dn: HashMap::from([(issuer_ca.tbs.subject_dn, issuer_ca_der)]),
};
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
let expected_files = std::fs::read_dir(fixture_dir)
.expect("read fixture dir")
.filter_map(|e| e.ok())
.filter_map(|e| e.metadata().ok().map(|m| (e, m)))
.filter(|(_e, m)| m.is_file())
.count();
assert!(expected_files >= 3, "fixture dir seems incomplete");
let out = run_publication_point_once(
&store,
&policy,
None,
rsync_base_uri,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&http_fetcher,
&rsync_fetcher,
&resolver,
t,
)
.expect("run publication point once");
assert!(verified_pack_exists(&store, &manifest_rsync_uri).expect("exists check"));
assert_eq!(out.repo_sync.objects_written, expected_files);
assert!(
out.objects.vrps.iter().any(|v| v.asn == 4538),
"expected VRPs for AS4538"
);
}

View File

@ -0,0 +1,222 @@
use rpki::validation::from_tal::discover_root_ca_instance_from_tal_and_ta_der;
use rpki::validation::run_tree_from_tal::root_handle_from_trust_anchor;
use rpki::validation::run_tree_from_tal::{
run_tree_from_tal_and_ta_der_serial, run_tree_from_tal_url_serial,
run_tree_from_tal_and_ta_der_serial_audit, run_tree_from_tal_url_serial_audit,
};
use rpki::validation::tree::TreeRunConfig;
use std::collections::HashMap;
struct MapHttpFetcher {
map: HashMap<String, Vec<u8>>,
}
impl rpki::sync::rrdp::Fetcher for MapHttpFetcher {
fn fetch(&self, uri: &str) -> Result<Vec<u8>, String> {
self.map
.get(uri)
.cloned()
.ok_or_else(|| format!("no fixture mapped for {uri}"))
}
}
struct EmptyRsyncFetcher;
impl rpki::fetch::rsync::RsyncFetcher for EmptyRsyncFetcher {
fn fetch_objects(&self, _rsync_base_uri: &str) -> Result<Vec<(String, Vec<u8>)>, rpki::fetch::rsync::RsyncFetchError> {
Ok(Vec::new())
}
}
#[test]
fn root_handle_is_constructible_from_fixture_tal_and_ta() {
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(&tal_bytes, &ta_der, None).expect("discover");
let root =
root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance);
assert_eq!(root.depth, 0);
assert_eq!(root.manifest_rsync_uri, discovery.ca_instance.manifest_rsync_uri);
assert_eq!(root.rsync_base_uri, discovery.ca_instance.rsync_base_uri);
assert!(root.ca_certificate_der.len() > 100, "TA der should be non-empty");
}
#[test]
fn run_tree_from_tal_url_entry_executes_and_records_failure_when_repo_empty() {
let tal_url = "mock:apnic.tal";
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let mut map = HashMap::new();
map.insert(tal_url.to_string(), tal_bytes);
map.insert(
"https://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer".to_string(),
ta_der.clone(),
);
map.insert(
"rsync://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer".to_string(),
ta_der.clone(),
);
let http = MapHttpFetcher { map };
let rsync = EmptyRsyncFetcher;
let temp = tempfile::tempdir().expect("tempdir");
let store = rpki::storage::RocksStore::open(temp.path()).expect("open rocksdb");
let policy = rpki::policy::Policy {
sync_preference: rpki::policy::SyncPreference::RsyncOnly,
..rpki::policy::Policy::default()
};
let out = run_tree_from_tal_url_serial(
&store,
&policy,
tal_url,
&http,
&rsync,
time::OffsetDateTime::now_utc(),
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
)
.expect("run tree");
assert_eq!(out.tree.instances_processed, 0);
assert_eq!(out.tree.instances_failed, 1);
assert!(
out.tree
.warnings
.iter()
.any(|w| w.message.contains("publication point failed")),
"expected failure warning"
);
}
#[test]
fn run_tree_from_tal_and_ta_der_entry_executes_and_records_failure_when_repo_empty() {
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let http = MapHttpFetcher { map: HashMap::new() };
let rsync = EmptyRsyncFetcher;
let temp = tempfile::tempdir().expect("tempdir");
let store = rpki::storage::RocksStore::open(temp.path()).expect("open rocksdb");
let policy = rpki::policy::Policy {
sync_preference: rpki::policy::SyncPreference::RsyncOnly,
..rpki::policy::Policy::default()
};
let out = run_tree_from_tal_and_ta_der_serial(
&store,
&policy,
&tal_bytes,
&ta_der,
None,
&http,
&rsync,
time::OffsetDateTime::now_utc(),
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
)
.expect("run tree");
assert_eq!(out.tree.instances_processed, 0);
assert_eq!(out.tree.instances_failed, 1);
assert!(
out.tree
.warnings
.iter()
.any(|w| w.message.contains("publication point failed")),
"expected failure warning"
);
}
#[test]
fn run_tree_from_tal_url_audit_entry_collects_no_publication_points_when_repo_empty() {
let tal_url = "mock:apnic.tal";
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let mut map = HashMap::new();
map.insert(tal_url.to_string(), tal_bytes);
map.insert(
"https://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer".to_string(),
ta_der.clone(),
);
map.insert(
"rsync://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer".to_string(),
ta_der.clone(),
);
let http = MapHttpFetcher { map };
let rsync = EmptyRsyncFetcher;
let temp = tempfile::tempdir().expect("tempdir");
let store = rpki::storage::RocksStore::open(temp.path()).expect("open rocksdb");
let policy = rpki::policy::Policy {
sync_preference: rpki::policy::SyncPreference::RsyncOnly,
..rpki::policy::Policy::default()
};
let out = run_tree_from_tal_url_serial_audit(
&store,
&policy,
tal_url,
&http,
&rsync,
time::OffsetDateTime::now_utc(),
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
)
.expect("run tree audit");
assert_eq!(out.tree.instances_processed, 0);
assert_eq!(out.tree.instances_failed, 1);
assert!(out.publication_points.is_empty());
}
#[test]
fn run_tree_from_tal_and_ta_der_audit_entry_collects_no_publication_points_when_repo_empty() {
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let http = MapHttpFetcher { map: HashMap::new() };
let rsync = EmptyRsyncFetcher;
let temp = tempfile::tempdir().expect("tempdir");
let store = rpki::storage::RocksStore::open(temp.path()).expect("open rocksdb");
let policy = rpki::policy::Policy {
sync_preference: rpki::policy::SyncPreference::RsyncOnly,
..rpki::policy::Policy::default()
};
let out = run_tree_from_tal_and_ta_der_serial_audit(
&store,
&policy,
&tal_bytes,
&ta_der,
None,
&http,
&rsync,
time::OffsetDateTime::now_utc(),
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
)
.expect("run tree audit");
assert_eq!(out.tree.instances_processed, 0);
assert_eq!(out.tree.instances_failed, 1);
assert!(out.publication_points.is_empty());
}

View File

@ -0,0 +1,34 @@
use rpki::storage::{RocksStore, VerifiedKey};
#[test]
fn storage_iter_all_lists_raw_and_verified_entries() {
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store
.put_raw("rsync://example.test/repo/a.cer", b"a")
.expect("put_raw a");
store
.put_raw("rsync://example.test/repo/b.roa", b"b")
.expect("put_raw b");
let key = VerifiedKey::from_manifest_rsync_uri("rsync://example.test/repo/m.mft");
store.put_verified(&key, b"x").expect("put_verified");
let raw_keys = store
.raw_iter_all()
.expect("raw_iter_all")
.map(|(k, _v)| String::from_utf8(k.to_vec()).expect("utf8 key"))
.collect::<Vec<_>>();
assert_eq!(raw_keys.len(), 2);
assert!(raw_keys.contains(&"rsync://example.test/repo/a.cer".to_string()));
assert!(raw_keys.contains(&"rsync://example.test/repo/b.roa".to_string()));
let verified_keys = store
.verified_iter_all()
.expect("verified_iter_all")
.map(|(k, _v)| String::from_utf8(k.to_vec()).expect("utf8 key"))
.collect::<Vec<_>>();
assert_eq!(verified_keys, vec![key.as_str().to_string()]);
}

View File

@ -0,0 +1,73 @@
use rocksdb::WriteBatch;
use rpki::storage::{RocksStore, VerifiedKey};
#[test]
fn storage_delete_rrdp_state_works() {
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store
.put_rrdp_state("https://example.net/rrdp/notification.xml", b"state")
.expect("put state");
assert_eq!(
store
.get_rrdp_state("https://example.net/rrdp/notification.xml")
.unwrap()
.unwrap(),
b"state"
);
store
.delete_rrdp_state("https://example.net/rrdp/notification.xml")
.expect("delete state");
assert!(
store
.get_rrdp_state("https://example.net/rrdp/notification.xml")
.unwrap()
.is_none()
);
}
#[test]
fn storage_raw_iter_prefix_filters_by_prefix() {
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store
.put_raw("rsync://example.net/repo/a/1.cer", b"1")
.unwrap();
store
.put_raw("rsync://example.net/repo/a/2.cer", b"2")
.unwrap();
store
.put_raw("rsync://example.net/repo/b/1.cer", b"3")
.unwrap();
let prefix = b"rsync://example.net/repo/a/";
let items = store
.raw_iter_prefix(prefix)
.expect("iter")
.map(|(k, v)| (String::from_utf8_lossy(&k).to_string(), v.to_vec()))
.collect::<Vec<_>>();
assert_eq!(items.len(), 2);
for (k, _v) in &items {
assert!(k.starts_with("rsync://example.net/repo/a/"));
}
}
#[test]
fn storage_verified_key_format_is_stable() {
let k = VerifiedKey::from_manifest_rsync_uri("rsync://example.net/repo/manifest.mft");
assert_eq!(k.as_str(), "verified:rsync://example.net/repo/manifest.mft");
}
#[test]
fn storage_write_batch_accepts_empty_batch() {
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store
.write_batch(WriteBatch::default())
.expect("write batch");
}

View File

@ -0,0 +1,76 @@
use std::path::Path;
use rpki::storage::{RocksStore, VerifiedKey};
#[test]
fn storage_opens_and_creates_column_families() {
let dir = tempfile::tempdir().expect("tempdir");
let _store = RocksStore::open(dir.path()).expect("open rocksdb");
}
#[test]
fn raw_objects_roundtrip_by_rsync_uri() {
let dir = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(dir.path()).expect("open rocksdb");
let key = "rsync://example.invalid/repo/a.cer";
let value = b"hello";
store.put_raw(key, value).expect("put raw");
let got = store.get_raw(key).expect("get raw");
assert_eq!(got.as_deref(), Some(value.as_slice()));
store.delete_raw(key).expect("delete raw");
let got = store.get_raw(key).expect("get raw after delete");
assert!(got.is_none());
}
#[test]
fn verified_pack_roundtrip_by_manifest_uri() {
let dir = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(dir.path()).expect("open rocksdb");
let manifest_uri = "rsync://example.invalid/repo/manifest.mft";
let verified_key = VerifiedKey::from_manifest_rsync_uri(manifest_uri);
assert_eq!(
verified_key.as_str(),
"verified:rsync://example.invalid/repo/manifest.mft"
);
let bytes = b"pack";
store
.put_verified(&verified_key, bytes)
.expect("put verified");
let got = store.get_verified(&verified_key).expect("get verified");
assert_eq!(got.as_deref(), Some(bytes.as_slice()));
}
#[test]
fn rrdp_state_roundtrip_by_notification_uri() {
let dir = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(dir.path()).expect("open rocksdb");
let notif = "https://example.invalid/rrdp/notification.xml";
let state = b"{\"session_id\":\"00000000-0000-0000-0000-000000000000\",\"last_serial\":1}";
store.put_rrdp_state(notif, state).expect("put rrdp_state");
let got = store.get_rrdp_state(notif).expect("get rrdp_state");
assert_eq!(got.as_deref(), Some(state.as_slice()));
}
#[test]
fn store_is_reopenable() {
let dir = tempfile::tempdir().expect("tempdir");
let path: &Path = dir.path();
let store = RocksStore::open(path).expect("open rocksdb");
store
.put_raw("rsync://example.invalid/repo/x", b"x")
.expect("put");
drop(store);
let store = RocksStore::open(path).expect("reopen rocksdb");
let got = store
.get_raw("rsync://example.invalid/repo/x")
.expect("get after reopen");
assert_eq!(got.as_deref(), Some(b"x".as_slice()));
}

View File

@ -68,6 +68,9 @@ fn ta_constraints_require_policies_and_ski() {
let rc = dummy_rc_ca(RcExtensions { let rc = dummy_rc_ca(RcExtensions {
basic_constraints_ca: true, basic_constraints_ca: true,
subject_key_identifier: None, subject_key_identifier: None,
authority_key_identifier: None,
crl_distribution_points_uris: None,
ca_issuers_uris: None,
subject_info_access: None, subject_info_access: None,
certificate_policies_oid: None, certificate_policies_oid: None,
ip_resources: Some(rpki::data_model::rc::IpResourceSet { families: vec![] }), ip_resources: Some(rpki::data_model::rc::IpResourceSet { families: vec![] }),
@ -94,6 +97,9 @@ fn ta_constraints_require_non_empty_resources_and_no_inherit() {
let rc = dummy_rc_ca(RcExtensions { let rc = dummy_rc_ca(RcExtensions {
basic_constraints_ca: true, basic_constraints_ca: true,
subject_key_identifier: Some(vec![1]), subject_key_identifier: Some(vec![1]),
authority_key_identifier: None,
crl_distribution_points_uris: None,
ca_issuers_uris: None,
subject_info_access: None, subject_info_access: None,
certificate_policies_oid: Some(OID_CP_IPADDR_ASNUMBER.to_string()), certificate_policies_oid: Some(OID_CP_IPADDR_ASNUMBER.to_string()),
ip_resources: None, ip_resources: None,

View File

@ -0,0 +1,103 @@
use rpki::data_model::rc::{
Afi, AsIdentifierChoice, AsResourceSet, IpAddressChoice, IpAddressFamily, IpResourceSet,
ResourceCertKind,
};
use rpki::data_model::ta::{TaCertificate, TaCertificateProfileError};
fn apnic_ta() -> TaCertificate {
let der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta");
TaCertificate::decode_der(&der).expect("decode apnic ta")
}
#[test]
fn ta_rc_constraints_reject_wrong_kind() {
let ta = apnic_ta();
let mut rc = ta.rc_ca.clone();
rc.kind = ResourceCertKind::Ee;
assert!(matches!(
TaCertificate::validate_rc_constraints(&rc),
Err(TaCertificateProfileError::NotCa)
));
}
#[test]
fn ta_rc_constraints_reject_missing_policies_oid() {
let ta = apnic_ta();
let mut rc = ta.rc_ca.clone();
rc.tbs.extensions.certificate_policies_oid = None;
assert!(matches!(
TaCertificate::validate_rc_constraints(&rc),
Err(TaCertificateProfileError::MissingOrInvalidCertificatePolicies)
));
}
#[test]
fn ta_rc_constraints_reject_missing_subject_key_identifier() {
let ta = apnic_ta();
let mut rc = ta.rc_ca.clone();
rc.tbs.extensions.subject_key_identifier = None;
assert!(matches!(
TaCertificate::validate_rc_constraints(&rc),
Err(TaCertificateProfileError::MissingSubjectKeyIdentifier)
));
}
#[test]
fn ta_rc_constraints_reject_missing_resources() {
let ta = apnic_ta();
let mut rc = ta.rc_ca.clone();
rc.tbs.extensions.ip_resources = None;
rc.tbs.extensions.as_resources = None;
assert!(matches!(
TaCertificate::validate_rc_constraints(&rc),
Err(TaCertificateProfileError::ResourcesMissing)
));
}
#[test]
fn ta_rc_constraints_reject_empty_resources() {
let ta = apnic_ta();
let mut rc = ta.rc_ca.clone();
rc.tbs.extensions.ip_resources = Some(IpResourceSet {
families: vec![IpAddressFamily {
afi: Afi::Ipv4,
choice: IpAddressChoice::AddressesOrRanges(vec![]),
}],
});
rc.tbs.extensions.as_resources = None;
assert!(matches!(
TaCertificate::validate_rc_constraints(&rc),
Err(TaCertificateProfileError::ResourcesEmpty)
));
}
#[test]
fn ta_rc_constraints_reject_ip_inherit() {
let ta = apnic_ta();
let mut rc = ta.rc_ca.clone();
rc.tbs.extensions.ip_resources = Some(IpResourceSet {
families: vec![IpAddressFamily {
afi: Afi::Ipv6,
choice: IpAddressChoice::Inherit,
}],
});
assert!(matches!(
TaCertificate::validate_rc_constraints(&rc),
Err(TaCertificateProfileError::IpResourcesInherit)
));
}
#[test]
fn ta_rc_constraints_reject_as_inherit() {
let ta = apnic_ta();
let mut rc = ta.rc_ca.clone();
rc.tbs.extensions.as_resources = Some(AsResourceSet {
asnum: Some(AsIdentifierChoice::Inherit),
rdi: None,
});
assert!(matches!(
TaCertificate::validate_rc_constraints(&rc),
Err(TaCertificateProfileError::AsResourcesInherit)
));
}

View File

@ -0,0 +1,47 @@
use rpki::data_model::rc::ResourceCertificate;
use rpki::data_model::ta::{TaCertificate, TaCertificateVerifyError};
#[test]
fn ta_verify_self_signature_succeeds_for_fixture() {
let der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta");
let ta = TaCertificate::decode_der(&der).expect("decode TA");
ta.verify_self_signature().expect("verify self signature");
}
#[test]
fn ta_verify_self_signature_rejects_trailing_bytes() {
let der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta");
let rc_ca = ResourceCertificate::decode_der(&der).expect("decode rc");
let mut raw_with_trailing = der.clone();
raw_with_trailing.extend_from_slice(&[0u8, 1u8, 2u8]);
let ta = TaCertificate {
raw_der: raw_with_trailing,
rc_ca,
};
let err = ta.verify_self_signature().unwrap_err();
assert!(matches!(err, TaCertificateVerifyError::TrailingBytes(3)));
}
#[test]
fn ta_verify_self_signature_rejects_tampered_signature() {
let der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta");
let rc_ca = ResourceCertificate::decode_der(&der).expect("decode rc");
let mut tampered = der.clone();
if let Some(last) = tampered.last_mut() {
*last ^= 0x01;
}
let ta = TaCertificate {
raw_der: tampered,
rc_ca,
};
let err = ta.verify_self_signature().unwrap_err();
assert!(matches!(
err,
TaCertificateVerifyError::InvalidSelfSignature(_) | TaCertificateVerifyError::Parse(_)
));
}

View File

@ -0,0 +1,136 @@
use std::collections::HashMap;
use rpki::report::Warning;
use rpki::storage::{PackTime, VerifiedPublicationPointPack};
use rpki::validation::manifest::PublicationPointSource;
use rpki::validation::objects::{ObjectsOutput, ObjectsStats};
use rpki::validation::tree::{
CaInstanceHandle, PublicationPointRunResult, PublicationPointRunner, TreeRunConfig,
run_tree_serial,
};
use rpki::audit::PublicationPointAudit;
fn empty_pack(manifest_uri: &str, pp_uri: &str) -> VerifiedPublicationPointPack {
VerifiedPublicationPointPack {
format_version: VerifiedPublicationPointPack::FORMAT_VERSION_V1,
publication_point_rsync_uri: pp_uri.to_string(),
manifest_rsync_uri: manifest_uri.to_string(),
this_update: PackTime {
rfc3339_utc: "2026-01-01T00:00:00Z".to_string(),
},
next_update: PackTime {
rfc3339_utc: "2026-12-31T00:00:00Z".to_string(),
},
verified_at: PackTime {
rfc3339_utc: "2026-02-06T00:00:00Z".to_string(),
},
manifest_bytes: vec![1, 2, 3],
files: Vec::new(),
}
}
fn ca_handle(manifest_uri: &str) -> CaInstanceHandle {
CaInstanceHandle {
depth: 0,
ca_certificate_der: Vec::new(),
ca_certificate_rsync_uri: None,
effective_ip_resources: None,
effective_as_resources: None,
rsync_base_uri: "rsync://example.test/repo/".to_string(),
manifest_rsync_uri: manifest_uri.to_string(),
publication_point_rsync_uri: "rsync://example.test/repo/".to_string(),
rrdp_notification_uri: None,
}
}
#[derive(Default)]
struct ResultRunner {
by_manifest: HashMap<String, Result<PublicationPointRunResult, String>>,
}
impl ResultRunner {
fn with_ok(mut self, manifest: &str, res: PublicationPointRunResult) -> Self {
self.by_manifest.insert(manifest.to_string(), Ok(res));
self
}
fn with_err(mut self, manifest: &str, err: &str) -> Self {
self.by_manifest
.insert(manifest.to_string(), Err(err.to_string()));
self
}
}
impl PublicationPointRunner for ResultRunner {
fn run_publication_point(
&self,
ca: &CaInstanceHandle,
) -> Result<PublicationPointRunResult, String> {
self.by_manifest
.get(&ca.manifest_rsync_uri)
.cloned()
.unwrap_or_else(|| Err(format!("no mock for {}", ca.manifest_rsync_uri)))
}
}
#[test]
fn tree_continues_when_a_publication_point_fails() {
let root_manifest = "rsync://example.test/repo/root.mft";
let bad_child_manifest = "rsync://example.test/repo/bad-child.mft";
let ok_child_manifest = "rsync://example.test/repo/ok-child.mft";
let runner = ResultRunner::default()
.with_ok(
root_manifest,
PublicationPointRunResult {
source: PublicationPointSource::Fresh,
pack: empty_pack(root_manifest, "rsync://example.test/repo/"),
warnings: Vec::new(),
objects: ObjectsOutput {
vrps: Vec::new(),
aspas: Vec::new(),
warnings: Vec::new(),
stats: ObjectsStats::default(),
audit: Vec::new(),
},
audit: PublicationPointAudit::default(),
discovered_children: vec![ca_handle(bad_child_manifest), ca_handle(ok_child_manifest)],
},
)
.with_err(bad_child_manifest, "synthetic failure")
.with_ok(
ok_child_manifest,
PublicationPointRunResult {
source: PublicationPointSource::Fresh,
pack: empty_pack(ok_child_manifest, "rsync://example.test/repo/ok-child/"),
warnings: vec![Warning::new("ok child warning")],
objects: ObjectsOutput {
vrps: Vec::new(),
aspas: Vec::new(),
warnings: Vec::new(),
stats: ObjectsStats::default(),
audit: Vec::new(),
},
audit: PublicationPointAudit::default(),
discovered_children: Vec::new(),
},
);
let out = run_tree_serial(ca_handle(root_manifest), &runner, &TreeRunConfig::default())
.expect("run tree");
assert_eq!(out.instances_processed, 2);
assert_eq!(out.instances_failed, 1);
assert!(
out.warnings
.iter()
.any(|w| w.message.contains("publication point failed")),
"expected failure warning"
);
assert!(
out.warnings
.iter()
.any(|w| w.message.contains("ok child warning")),
"expected ok child warning propagated"
);
}

View File

@ -0,0 +1,231 @@
use std::collections::HashMap;
use rpki::report::Warning;
use rpki::storage::{PackFile, PackTime, VerifiedPublicationPointPack};
use rpki::validation::manifest::PublicationPointSource;
use rpki::validation::tree::{
CaInstanceHandle, PublicationPointRunResult, PublicationPointRunner, TreeRunConfig,
run_tree_serial,
};
use rpki::validation::objects::{ObjectsOutput, ObjectsStats};
use rpki::audit::PublicationPointAudit;
#[derive(Default)]
struct MockRunner {
by_manifest: HashMap<String, PublicationPointRunResult>,
calls: std::sync::Mutex<Vec<String>>,
}
impl MockRunner {
fn with(mut self, manifest: &str, res: PublicationPointRunResult) -> Self {
self.by_manifest.insert(manifest.to_string(), res);
self
}
fn called(&self) -> Vec<String> {
self.calls.lock().unwrap().clone()
}
}
impl PublicationPointRunner for MockRunner {
fn run_publication_point(
&self,
ca: &CaInstanceHandle,
) -> Result<PublicationPointRunResult, String> {
self.calls
.lock()
.unwrap()
.push(ca.manifest_rsync_uri.clone());
self.by_manifest
.get(&ca.manifest_rsync_uri)
.cloned()
.ok_or_else(|| format!("no mock for {}", ca.manifest_rsync_uri))
}
}
fn empty_pack(manifest_uri: &str, pp_uri: &str) -> VerifiedPublicationPointPack {
VerifiedPublicationPointPack {
format_version: 1,
publication_point_rsync_uri: pp_uri.to_string(),
manifest_rsync_uri: manifest_uri.to_string(),
this_update: PackTime {
rfc3339_utc: "2026-01-01T00:00:00Z".to_string(),
},
next_update: PackTime {
rfc3339_utc: "2026-12-31T00:00:00Z".to_string(),
},
verified_at: PackTime {
rfc3339_utc: "2026-02-06T00:00:00Z".to_string(),
},
manifest_bytes: vec![1, 2, 3],
files: vec![PackFile::from_bytes_compute_sha256(
manifest_uri,
vec![1],
)],
}
}
fn ca_handle(manifest_uri: &str) -> CaInstanceHandle {
CaInstanceHandle {
depth: 0,
ca_certificate_der: Vec::new(),
ca_certificate_rsync_uri: None,
effective_ip_resources: None,
effective_as_resources: None,
rsync_base_uri: "rsync://example.test/repo/".to_string(),
manifest_rsync_uri: manifest_uri.to_string(),
publication_point_rsync_uri: "rsync://example.test/repo/".to_string(),
rrdp_notification_uri: None,
}
}
#[test]
fn tree_enqueues_children_only_for_fresh_publication_points() {
let root_manifest = "rsync://example.test/repo/root.mft";
let child1_manifest = "rsync://example.test/repo/child1.mft";
let child2_manifest = "rsync://example.test/repo/child2.mft";
let grandchild_manifest = "rsync://example.test/repo/grandchild.mft";
let root_children = vec![ca_handle(child1_manifest), ca_handle(child2_manifest)];
let child1_children = vec![ca_handle(grandchild_manifest)];
let runner = MockRunner::default()
.with(
root_manifest,
PublicationPointRunResult {
source: PublicationPointSource::Fresh,
pack: empty_pack(root_manifest, "rsync://example.test/repo/"),
warnings: Vec::new(),
objects: ObjectsOutput {
vrps: Vec::new(),
aspas: Vec::new(),
warnings: Vec::new(),
stats: ObjectsStats::default(),
audit: Vec::new(),
},
audit: PublicationPointAudit::default(),
discovered_children: root_children,
},
)
.with(
child1_manifest,
PublicationPointRunResult {
source: PublicationPointSource::VerifiedCache,
pack: empty_pack(child1_manifest, "rsync://example.test/repo/child1/"),
warnings: vec![Warning::new("child1 warning")],
objects: ObjectsOutput {
vrps: Vec::new(),
aspas: Vec::new(),
warnings: Vec::new(),
stats: ObjectsStats::default(),
audit: Vec::new(),
},
audit: PublicationPointAudit::default(),
discovered_children: child1_children,
},
)
.with(
child2_manifest,
PublicationPointRunResult {
source: PublicationPointSource::Fresh,
pack: empty_pack(child2_manifest, "rsync://example.test/repo/child2/"),
warnings: Vec::new(),
objects: ObjectsOutput {
vrps: Vec::new(),
aspas: Vec::new(),
warnings: Vec::new(),
stats: ObjectsStats::default(),
audit: Vec::new(),
},
audit: PublicationPointAudit::default(),
discovered_children: Vec::new(),
},
);
let out = run_tree_serial(ca_handle(root_manifest), &runner, &TreeRunConfig::default())
.expect("run tree");
// root + child1 + child2. grandchild must NOT be processed because child1 used cache.
assert_eq!(out.instances_processed, 3);
assert_eq!(out.instances_failed, 0);
let called = runner.called();
assert_eq!(called, vec![root_manifest, child1_manifest, child2_manifest]);
assert!(
out.warnings.iter().any(|w| w.message.contains("child1 warning")),
"expected child1 warning propagated"
);
assert!(
out.warnings
.iter()
.any(|w| w.message.contains("skipping child CA discovery")),
"expected RFC 9286 §6.6 enforcement warning"
);
}
#[test]
fn tree_respects_max_depth_and_max_instances() {
let root_manifest = "rsync://example.test/repo/root.mft";
let child_manifest = "rsync://example.test/repo/child.mft";
let runner = MockRunner::default()
.with(
root_manifest,
PublicationPointRunResult {
source: PublicationPointSource::Fresh,
pack: empty_pack(root_manifest, "rsync://example.test/repo/"),
warnings: Vec::new(),
objects: ObjectsOutput {
vrps: Vec::new(),
aspas: Vec::new(),
warnings: Vec::new(),
stats: ObjectsStats::default(),
audit: Vec::new(),
},
audit: PublicationPointAudit::default(),
discovered_children: vec![ca_handle(child_manifest)],
},
)
.with(
child_manifest,
PublicationPointRunResult {
source: PublicationPointSource::Fresh,
pack: empty_pack(child_manifest, "rsync://example.test/repo/child/"),
warnings: Vec::new(),
objects: ObjectsOutput {
vrps: Vec::new(),
aspas: Vec::new(),
warnings: Vec::new(),
stats: ObjectsStats::default(),
audit: Vec::new(),
},
audit: PublicationPointAudit::default(),
discovered_children: Vec::new(),
},
);
let out = run_tree_serial(
ca_handle(root_manifest),
&runner,
&TreeRunConfig {
max_depth: Some(0),
max_instances: None,
},
)
.expect("run tree depth-limited");
assert_eq!(out.instances_processed, 1);
assert_eq!(out.instances_failed, 0);
let out = run_tree_serial(
ca_handle(root_manifest),
&runner,
&TreeRunConfig {
max_depth: None,
max_instances: Some(1),
},
)
.expect("run tree instance-limited");
assert_eq!(out.instances_processed, 1);
assert_eq!(out.instances_failed, 0);
}

View File

@ -0,0 +1,137 @@
use rpki::data_model::manifest::ManifestObject;
use rpki::data_model::rc::{AsIdOrRange, AsIdentifierChoice, AsResourceSet, ResourceCertificate};
use rpki::data_model::ta::{TaCertificate, TaCertificateParsed, TaCertificateProfileError};
use rpki::data_model::tal::{Tal, TalDecodeError, TalProfileError};
use rpki::data_model::roa::{IpPrefix, RoaAfi};
#[test]
fn tal_validate_profile_noop_is_callable() {
let raw = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal").expect("read tal");
let tal = Tal::decode_bytes(&raw).expect("decode tal");
tal.validate_profile().expect("validate_profile is no-op");
}
#[test]
fn tal_invalid_uri_after_valid_uri_is_reported_as_missing_separator() {
let s = "https://example.invalid/ta.cer\nnot a url\n\nAQ==\n";
assert!(matches!(
Tal::decode_bytes(s.as_bytes()),
Err(TalDecodeError::Validate(
TalProfileError::MissingSeparatorEmptyLine
))
));
}
#[test]
fn tal_missing_separator_empty_line_is_rejected() {
let s = "https://example.invalid/ta.cer\nAQ==\n";
assert!(matches!(
Tal::decode_bytes(s.as_bytes()),
Err(TalDecodeError::Validate(
TalProfileError::MissingSeparatorEmptyLine
))
));
}
#[test]
fn tal_missing_spki_is_rejected() {
let s = "https://example.invalid/ta.cer\n\n\n";
assert!(matches!(
Tal::decode_bytes(s.as_bytes()),
Err(TalDecodeError::Validate(TalProfileError::MissingSpki))
));
}
#[test]
fn tal_missing_separator_due_to_eof_is_rejected() {
// One URI and then EOF: should fail at the mandatory empty-line separator check.
let s = "https://example.invalid/ta.cer";
assert!(matches!(
Tal::decode_bytes(s.as_bytes()),
Err(TalDecodeError::Validate(
TalProfileError::MissingSeparatorEmptyLine
))
));
}
#[test]
fn ta_validate_profile_noop_is_callable() {
let der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read ta");
let ta = TaCertificate::decode_der(&der).expect("decode ta");
ta.validate_profile().expect("validate_profile is no-op");
}
#[test]
fn ta_parsed_validate_profile_rejects_ee_resource_certificate() {
// Extract an embedded EE certificate from a real manifest fixture and feed it into the TA
// profile validator to hit the `NotCa` branch in `TaCertificateParsed::validate_profile`.
let mft_der = std::fs::read(
"tests/fixtures/repository/ca.rg.net/rpki/RGnet-OU/bW-_qXU9uNhGQz21NR2ansB8lr0.mft",
)
.expect("read manifest fixture");
let mft = ManifestObject::decode_der(&mft_der).expect("decode manifest");
let ee_der = mft.signed_object.signed_data.certificates[0]
.resource_cert
.raw_der
.clone();
let rc_parsed = ResourceCertificate::parse_der(&ee_der).expect("parse embedded EE cert");
let ta_parsed = TaCertificateParsed { rc_parsed };
let err = ta_parsed.validate_profile().unwrap_err();
assert!(matches!(err, TaCertificateProfileError::NotCa));
}
#[test]
fn ta_rc_constraints_exercise_rdi_non_empty_branch() {
let der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read ta");
let ta = TaCertificate::decode_der(&der).expect("decode ta");
let mut rc = ta.rc_ca.clone();
rc.tbs.extensions.as_resources = Some(AsResourceSet {
asnum: rc
.tbs
.extensions
.as_resources
.as_ref()
.and_then(|v| v.asnum.clone()),
rdi: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(
64496,
)])),
});
TaCertificate::validate_rc_constraints(&rc).expect("constraints should still pass");
}
#[test]
fn audit_helpers_format_roa_ip_prefix_smoke() {
let v4 = IpPrefix {
afi: RoaAfi::Ipv4,
prefix_len: 24,
addr: vec![192, 0, 2, 0],
};
assert_eq!(rpki::audit::format_roa_ip_prefix(&v4), "192.0.2.0/24");
let v6 = IpPrefix {
afi: RoaAfi::Ipv6,
prefix_len: 32,
addr: vec![0x20, 0x01, 0x0d, 0xb8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
};
assert!(rpki::audit::format_roa_ip_prefix(&v6).ends_with("/32"));
let bad = IpPrefix {
afi: RoaAfi::Ipv4,
prefix_len: 8,
addr: vec![1, 2, 3],
};
assert!(rpki::audit::format_roa_ip_prefix(&bad).starts_with("ipv4:"));
}
#[test]
fn audit_helpers_sha256_hex_smoke() {
let h = rpki::audit::sha256_hex(b"abc");
assert_eq!(h.len(), 64);
let bytes = [0x11u8; 32];
let h2 = rpki::audit::sha256_hex_from_32(&bytes);
assert_eq!(h2, "11".repeat(32));
}

View File

@ -0,0 +1,68 @@
use rpki::storage::{PackFile, PackTime, VerifiedPublicationPointPack};
fn sample_pack() -> VerifiedPublicationPointPack {
let this_update =
PackTime::from_utc_offset_datetime(time::OffsetDateTime::from_unix_timestamp(0).unwrap());
let next_update = PackTime::from_utc_offset_datetime(
time::OffsetDateTime::from_unix_timestamp(3600).unwrap(),
);
let verified_at =
PackTime::from_utc_offset_datetime(time::OffsetDateTime::from_unix_timestamp(10).unwrap());
let file1 = PackFile::from_bytes_compute_sha256(
"rsync://example.net/repo/CA/1.crl",
b"crl-bytes".to_vec(),
);
let file2 = PackFile::from_bytes_compute_sha256(
"rsync://example.net/repo/CA/2.cer",
b"cer-bytes".to_vec(),
);
VerifiedPublicationPointPack {
format_version: VerifiedPublicationPointPack::FORMAT_VERSION_V1,
manifest_rsync_uri: "rsync://example.net/repo/CA/manifest.mft".to_string(),
publication_point_rsync_uri: "rsync://example.net/repo/CA/".to_string(),
this_update,
next_update,
verified_at,
manifest_bytes: b"manifest-bytes".to_vec(),
files: vec![file1, file2],
}
}
#[test]
fn pack_encode_decode_roundtrip() {
let pack = sample_pack();
let bytes = pack.encode().expect("encode");
let decoded = VerifiedPublicationPointPack::decode(&bytes).expect("decode");
assert_eq!(decoded, pack);
}
#[test]
fn pack_rejects_missing_manifest() {
let mut pack = sample_pack();
pack.manifest_bytes.clear();
let bytes = pack.encode().expect("encode");
assert!(VerifiedPublicationPointPack::decode(&bytes).is_err());
}
#[test]
fn pack_rejects_duplicate_rsync_uri_entries() {
let mut pack = sample_pack();
let dup =
PackFile::from_bytes_compute_sha256("rsync://example.net/repo/CA/1.crl", b"other".to_vec());
pack.files.push(dup);
let bytes = pack.encode().expect("encode");
assert!(VerifiedPublicationPointPack::decode(&bytes).is_err());
}
#[test]
fn pack_includes_this_update_next_update() {
let pack = sample_pack();
let bytes = pack.encode().expect("encode");
let decoded = VerifiedPublicationPointPack::decode(&bytes).expect("decode");
let this_update = decoded.this_update.parse().expect("parse this_update");
let next_update = decoded.next_update.parse().expect("parse next_update");
assert!(next_update > this_update);
}

View File

@ -0,0 +1,101 @@
use rpki::storage::{PackFile, PackTime, VerifiedPublicationPointPack};
fn base_pack() -> VerifiedPublicationPointPack {
let this_update =
PackTime::from_utc_offset_datetime(time::OffsetDateTime::from_unix_timestamp(0).unwrap());
let next_update = PackTime::from_utc_offset_datetime(
time::OffsetDateTime::from_unix_timestamp(3600).unwrap(),
);
let verified_at =
PackTime::from_utc_offset_datetime(time::OffsetDateTime::from_unix_timestamp(10).unwrap());
let file =
PackFile::from_bytes_compute_sha256("rsync://example.net/repo/obj.cer", b"x".to_vec());
VerifiedPublicationPointPack {
format_version: VerifiedPublicationPointPack::FORMAT_VERSION_V1,
manifest_rsync_uri: "rsync://example.net/repo/manifest.mft".to_string(),
publication_point_rsync_uri: "rsync://example.net/repo/".to_string(),
this_update,
next_update,
verified_at,
manifest_bytes: b"manifest".to_vec(),
files: vec![file],
}
}
#[test]
fn pack_rejects_unsupported_format_version() {
let mut pack = base_pack();
pack.format_version = 999;
let bytes = pack.encode().expect("encode");
assert!(
VerifiedPublicationPointPack::decode(&bytes)
.unwrap_err()
.to_string()
.contains("unsupported pack format_version")
);
}
#[test]
fn pack_rejects_missing_manifest_rsync_uri() {
let mut pack = base_pack();
pack.manifest_rsync_uri.clear();
let bytes = pack.encode().expect("encode");
assert!(VerifiedPublicationPointPack::decode(&bytes).is_err());
}
#[test]
fn pack_rejects_missing_publication_point_rsync_uri() {
let mut pack = base_pack();
pack.publication_point_rsync_uri.clear();
let bytes = pack.encode().expect("encode");
assert!(VerifiedPublicationPointPack::decode(&bytes).is_err());
}
#[test]
fn pack_rejects_invalid_time_fields() {
let mut pack = base_pack();
pack.this_update = PackTime {
rfc3339_utc: "not-a-time".to_string(),
};
let bytes = pack.encode().expect("encode");
assert!(VerifiedPublicationPointPack::decode(&bytes).is_err());
}
#[test]
fn pack_rejects_empty_file_bytes() {
let mut pack = base_pack();
let mut sha = [0u8; 32];
sha[0] = 1;
pack.files = vec![PackFile::new(
"rsync://example.net/repo/empty.cer",
Vec::new(),
sha,
)];
let bytes = pack.encode().expect("encode");
assert!(VerifiedPublicationPointPack::decode(&bytes).is_err());
}
#[test]
fn pack_rejects_file_hash_mismatch() {
let mut pack = base_pack();
pack.files = vec![PackFile::new(
"rsync://example.net/repo/bad.cer",
b"abc".to_vec(),
[0u8; 32],
)];
let bytes = pack.encode().expect("encode");
let err = VerifiedPublicationPointPack::decode(&bytes).unwrap_err();
assert!(err.to_string().contains("file hash mismatch"));
}
#[test]
fn pack_rejects_missing_file_rsync_uri() {
let mut pack = base_pack();
let file = PackFile::from_bytes_compute_sha256("", b"x".to_vec());
pack.files = vec![file];
let bytes = pack.encode().expect("encode");
let err = VerifiedPublicationPointPack::decode(&bytes).unwrap_err();
assert!(err.to_string().contains("missing required field"));
}