rpki/tests/test_run_tree_from_tal_offline_m17.rs

363 lines
12 KiB
Rust

use rpki::analysis::timing::{TimingHandle, TimingMeta};
use rpki::validation::from_tal::discover_root_ca_instance_from_tal_and_ta_der;
use rpki::validation::run_tree_from_tal::root_handle_from_trust_anchor;
use rpki::validation::run_tree_from_tal::{
run_tree_from_tal_and_ta_der_serial, run_tree_from_tal_and_ta_der_serial_audit,
run_tree_from_tal_and_ta_der_serial_audit_with_timing, run_tree_from_tal_url_serial,
run_tree_from_tal_url_serial_audit, run_tree_from_tal_url_serial_audit_with_timing,
};
use rpki::validation::tree::TreeRunConfig;
use std::collections::HashMap;
struct MapHttpFetcher {
map: HashMap<String, Vec<u8>>,
}
impl rpki::sync::rrdp::Fetcher for MapHttpFetcher {
fn fetch(&self, uri: &str) -> Result<Vec<u8>, String> {
self.map
.get(uri)
.cloned()
.ok_or_else(|| format!("no fixture mapped for {uri}"))
}
}
struct EmptyRsyncFetcher;
impl rpki::fetch::rsync::RsyncFetcher for EmptyRsyncFetcher {
fn fetch_objects(
&self,
_rsync_base_uri: &str,
) -> Result<Vec<(String, Vec<u8>)>, rpki::fetch::rsync::RsyncFetchError> {
Ok(Vec::new())
}
}
fn test_timing_handle(temp: &tempfile::TempDir) -> TimingHandle {
TimingHandle::new(TimingMeta {
recorded_at_utc_rfc3339: "2026-03-11T00:00:00Z".to_string(),
validation_time_utc_rfc3339: "2026-03-11T00:00:00Z".to_string(),
tal_url: Some("mock:apnic.tal".to_string()),
db_path: Some(temp.path().display().to_string()),
})
}
fn read_timing_json(temp: &tempfile::TempDir, timing: &TimingHandle) -> serde_json::Value {
let path = temp.path().join("timing.json");
timing.write_json(&path, 20).expect("write timing json");
serde_json::from_slice(&std::fs::read(&path).expect("read timing json"))
.expect("parse timing json")
}
#[test]
fn root_handle_is_constructible_from_fixture_tal_and_ta() {
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(&tal_bytes, &ta_der, None).expect("discover");
let root = root_handle_from_trust_anchor(
&discovery.trust_anchor,
"test-tal".to_string(),
None,
&discovery.ca_instance,
);
assert_eq!(root.depth, 0);
assert_eq!(
root.manifest_rsync_uri,
discovery.ca_instance.manifest_rsync_uri
);
assert_eq!(root.rsync_base_uri, discovery.ca_instance.rsync_base_uri);
assert!(
root.ca_certificate_der.len() > 100,
"TA der should be non-empty"
);
}
#[test]
fn run_tree_from_tal_url_entry_executes_and_records_failure_when_repo_empty() {
let tal_url = "mock:apnic.tal";
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let mut map = HashMap::new();
map.insert(tal_url.to_string(), tal_bytes);
map.insert(
"https://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer".to_string(),
ta_der.clone(),
);
map.insert(
"rsync://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer".to_string(),
ta_der.clone(),
);
let http = MapHttpFetcher { map };
let rsync = EmptyRsyncFetcher;
let temp = tempfile::tempdir().expect("tempdir");
let store = rpki::storage::RocksStore::open(temp.path()).expect("open rocksdb");
let policy = rpki::policy::Policy {
sync_preference: rpki::policy::SyncPreference::RsyncOnly,
..rpki::policy::Policy::default()
};
let out = run_tree_from_tal_url_serial(
&store,
&policy,
tal_url,
&http,
&rsync,
time::OffsetDateTime::now_utc(),
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
)
.expect("run tree");
assert_eq!(out.tree.instances_processed, 1);
assert_eq!(out.tree.instances_failed, 0);
assert!(
out.tree
.warnings
.iter()
.any(|w| w.message.contains("manifest failed fetch")
|| w.message.contains("no latest validated result")),
"expected failed-fetch warning"
);
}
#[test]
fn run_tree_from_tal_and_ta_der_entry_executes_and_records_failure_when_repo_empty() {
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let http = MapHttpFetcher {
map: HashMap::new(),
};
let rsync = EmptyRsyncFetcher;
let temp = tempfile::tempdir().expect("tempdir");
let store = rpki::storage::RocksStore::open(temp.path()).expect("open rocksdb");
let policy = rpki::policy::Policy {
sync_preference: rpki::policy::SyncPreference::RsyncOnly,
..rpki::policy::Policy::default()
};
let out = run_tree_from_tal_and_ta_der_serial(
&store,
&policy,
&tal_bytes,
&ta_der,
None,
&http,
&rsync,
time::OffsetDateTime::now_utc(),
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
)
.expect("run tree");
assert_eq!(out.tree.instances_processed, 1);
assert_eq!(out.tree.instances_failed, 0);
assert!(
out.tree
.warnings
.iter()
.any(|w| w.message.contains("manifest failed fetch")
|| w.message.contains("no latest validated result")),
"expected failed-fetch warning"
);
}
#[test]
fn run_tree_from_tal_url_audit_entry_collects_no_publication_points_when_repo_empty() {
let tal_url = "mock:apnic.tal";
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let mut map = HashMap::new();
map.insert(tal_url.to_string(), tal_bytes);
map.insert(
"https://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer".to_string(),
ta_der.clone(),
);
map.insert(
"rsync://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer".to_string(),
ta_der.clone(),
);
let http = MapHttpFetcher { map };
let rsync = EmptyRsyncFetcher;
let temp = tempfile::tempdir().expect("tempdir");
let store = rpki::storage::RocksStore::open(temp.path()).expect("open rocksdb");
let policy = rpki::policy::Policy {
sync_preference: rpki::policy::SyncPreference::RsyncOnly,
..rpki::policy::Policy::default()
};
let out = run_tree_from_tal_url_serial_audit(
&store,
&policy,
tal_url,
&http,
&rsync,
time::OffsetDateTime::now_utc(),
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
)
.expect("run tree audit");
assert_eq!(out.tree.instances_processed, 1);
assert_eq!(out.tree.instances_failed, 0);
assert_eq!(out.publication_points.len(), 1);
assert_eq!(out.publication_points[0].source, "failed_fetch_no_cache");
}
#[test]
fn run_tree_from_tal_and_ta_der_audit_entry_collects_no_publication_points_when_repo_empty() {
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let http = MapHttpFetcher {
map: HashMap::new(),
};
let rsync = EmptyRsyncFetcher;
let temp = tempfile::tempdir().expect("tempdir");
let store = rpki::storage::RocksStore::open(temp.path()).expect("open rocksdb");
let policy = rpki::policy::Policy {
sync_preference: rpki::policy::SyncPreference::RsyncOnly,
..rpki::policy::Policy::default()
};
let out = run_tree_from_tal_and_ta_der_serial_audit(
&store,
&policy,
&tal_bytes,
&ta_der,
None,
&http,
&rsync,
time::OffsetDateTime::now_utc(),
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
)
.expect("run tree audit");
assert_eq!(out.tree.instances_processed, 1);
assert_eq!(out.tree.instances_failed, 0);
assert_eq!(out.publication_points.len(), 1);
assert_eq!(out.publication_points[0].source, "failed_fetch_no_cache");
}
#[test]
fn run_tree_from_tal_url_audit_with_timing_records_phases_when_repo_empty() {
let tal_url = "mock:apnic.tal";
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let mut map = HashMap::new();
map.insert(tal_url.to_string(), tal_bytes);
map.insert(
"https://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer".to_string(),
ta_der.clone(),
);
map.insert(
"rsync://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer".to_string(),
ta_der.clone(),
);
let http = MapHttpFetcher { map };
let rsync = EmptyRsyncFetcher;
let temp = tempfile::tempdir().expect("tempdir");
let store = rpki::storage::RocksStore::open(temp.path()).expect("open rocksdb");
let policy = rpki::policy::Policy {
sync_preference: rpki::policy::SyncPreference::RsyncOnly,
..rpki::policy::Policy::default()
};
let timing = test_timing_handle(&temp);
let out = run_tree_from_tal_url_serial_audit_with_timing(
&store,
&policy,
tal_url,
&http,
&rsync,
time::OffsetDateTime::now_utc(),
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
&timing,
)
.expect("run tree audit with timing");
assert_eq!(out.tree.instances_processed, 1);
assert_eq!(out.tree.instances_failed, 0);
let report = read_timing_json(&temp, &timing);
assert_eq!(report["phases"]["tal_bootstrap"]["count"].as_u64(), Some(1));
assert_eq!(
report["phases"]["tree_run_total"]["count"].as_u64(),
Some(1)
);
}
#[test]
fn run_tree_from_tal_and_ta_der_audit_with_timing_records_phases_when_repo_empty() {
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let http = MapHttpFetcher {
map: HashMap::new(),
};
let rsync = EmptyRsyncFetcher;
let temp = tempfile::tempdir().expect("tempdir");
let store = rpki::storage::RocksStore::open(temp.path()).expect("open rocksdb");
let policy = rpki::policy::Policy {
sync_preference: rpki::policy::SyncPreference::RsyncOnly,
..rpki::policy::Policy::default()
};
let timing = test_timing_handle(&temp);
let out = run_tree_from_tal_and_ta_der_serial_audit_with_timing(
&store,
&policy,
&tal_bytes,
&ta_der,
None,
&http,
&rsync,
time::OffsetDateTime::now_utc(),
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
&timing,
)
.expect("run tree audit with timing");
assert_eq!(out.tree.instances_processed, 1);
assert_eq!(out.tree.instances_failed, 0);
let report = read_timing_json(&temp, &timing);
assert_eq!(report["phases"]["tal_bootstrap"]["count"].as_u64(), Some(1));
assert_eq!(
report["phases"]["tree_run_total"]["count"].as_u64(),
Some(1)
);
}