20260510 增加strict策略模式并隔离多TA失败

This commit is contained in:
yuyr 2026-05-11 11:32:17 +08:00
parent 265b6f65d0
commit 51e483d924
12 changed files with 1194 additions and 96 deletions

View File

@ -16,7 +16,7 @@ use crate::fetch::rsync::LocalDirRsyncFetcher;
use crate::fetch::rsync_system::{SystemRsyncConfig, SystemRsyncFetcher};
use crate::parallel::config::{ParallelPhase1Config, ParallelPhase2Config};
use crate::parallel::types::TalInputSpec;
use crate::policy::Policy;
use crate::policy::{Policy, StrictPolicy};
use crate::storage::RocksStore;
use crate::validation::run_tree_from_tal::{
RunTreeFromTalAuditOutput, run_tree_from_multiple_tals_parallel_phase2_audit,
@ -70,6 +70,7 @@ pub struct CliArgs {
pub raw_store_db: Option<PathBuf>,
pub repo_bytes_db: Option<PathBuf>,
pub policy_path: Option<PathBuf>,
pub strict_policy: Option<StrictPolicy>,
pub report_json_path: Option<PathBuf>,
pub report_json_compact: bool,
pub skip_report_build: bool,
@ -120,6 +121,7 @@ Options:
--raw-store-db <path> External raw-by-hash store DB path (optional)
--repo-bytes-db <path> External repo object bytes DB path (optional)
--policy <path> Policy TOML path (optional)
--strict [policies] Enable strict policies (default all; comma list: name,cms-der,signed-attrs; none disables)
--report-json <path> Write full audit report as JSON (optional)
--report-json-compact Write report JSON without pretty-printing (requires --report-json)
--skip-report-build Skip full audit report construction when --report-json is not requested
@ -195,6 +197,7 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
let mut raw_store_db: Option<PathBuf> = None;
let mut repo_bytes_db: Option<PathBuf> = None;
let mut policy_path: Option<PathBuf> = None;
let mut strict_policy: Option<StrictPolicy> = None;
let mut report_json_path: Option<PathBuf> = None;
let mut report_json_compact: bool = false;
let mut skip_report_build: bool = false;
@ -372,6 +375,18 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
let v = argv.get(i).ok_or("--policy requires a value")?;
policy_path = Some(PathBuf::from(v));
}
"--strict" => {
let next = argv.get(i + 1).map(String::as_str);
let spec = next.filter(|v| !v.starts_with("--"));
if spec.is_some() {
i += 1;
}
strict_policy = Some(StrictPolicy::parse_cli_spec(spec)?);
}
_ if arg.starts_with("--strict=") => {
let spec = arg.strip_prefix("--strict=").expect("prefix checked");
strict_policy = Some(StrictPolicy::parse_cli_spec(Some(spec))?);
}
"--report-json" => {
i += 1;
let v = argv.get(i).ok_or("--report-json requires a value")?;
@ -796,6 +811,7 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
raw_store_db,
repo_bytes_db,
policy_path,
strict_policy,
report_json_path,
report_json_compact,
skip_report_build,
@ -940,6 +956,7 @@ fn print_summary_from_shared(validation_time: time::OffsetDateTime, shared: &Pos
struct PostValidationShared {
discovery: crate::validation::from_tal::DiscoveredRootCaInstance,
discoveries: Arc<[crate::validation::from_tal::DiscoveredRootCaInstance]>,
successful_tal_inputs: Arc<[TalInputSpec]>,
instances_processed: usize,
instances_failed: usize,
tree_warnings: Arc<[crate::report::Warning]>,
@ -958,6 +975,7 @@ impl PostValidationShared {
let RunTreeFromTalAuditOutput {
discovery,
discoveries,
successful_tal_inputs,
tree,
publication_points,
downloads,
@ -977,6 +995,7 @@ impl PostValidationShared {
Self {
discovery,
discoveries: discoveries.into(),
successful_tal_inputs: successful_tal_inputs.into(),
instances_processed,
instances_failed,
tree_warnings: warnings.into(),
@ -1285,6 +1304,38 @@ fn resolve_cir_export_tal_uris(args: &CliArgs) -> Result<Vec<String>, String> {
Err("CIR export requires TAL URI source(s)".to_string())
}
fn effective_cir_tal_uris_for_discoveries(
args: &CliArgs,
shared: &PostValidationShared,
cir_tal_uris: Vec<String>,
) -> Result<Vec<String>, String> {
if shared.successful_tal_inputs.is_empty() {
return Ok(cir_tal_uris);
}
if cir_tal_uris.len() == shared.discoveries.len() {
return Ok(cir_tal_uris);
}
if cir_tal_uris.len() != args.tal_inputs.len() {
return Ok(cir_tal_uris);
}
let mut mapped = Vec::with_capacity(shared.successful_tal_inputs.len());
for successful in shared.successful_tal_inputs.iter() {
let input_index = args
.tal_inputs
.iter()
.position(|candidate| candidate == successful)
.ok_or_else(|| {
format!(
"successful TAL '{}' was not found in original TAL input list",
successful.tal_id
)
})?;
mapped.push(cir_tal_uris[input_index].clone());
}
Ok(mapped)
}
fn build_repo_sync_stats(
publication_points: &[crate::audit::PublicationPointAudit],
) -> AuditRepoSyncStats {
@ -1420,6 +1471,9 @@ pub fn run(argv: &[String]) -> Result<(), String> {
let args = parse_args(argv)?;
let mut policy = read_policy(args.policy_path.as_deref())?;
if let Some(strict_policy) = args.strict_policy {
policy.strict = strict_policy;
}
if args.disable_rrdp {
policy.sync_preference = crate::policy::SyncPreference::RsyncOnly;
}
@ -1796,7 +1850,11 @@ pub fn run(argv: &[String]) -> Result<(), String> {
let mut cir_write_cir_ms = None;
let mut cir_total_ms = None;
if args.cir_enabled {
let cir_tal_uris = resolve_cir_export_tal_uris(&args)?;
let cir_tal_uris = effective_cir_tal_uris_for_discoveries(
&args,
&shared,
resolve_cir_export_tal_uris(&args)?,
)?;
if cir_tal_uris.len() != shared.discoveries.len() {
return Err(format!(
"CIR export TAL URI count ({}) does not match discovery count ({})",
@ -2026,6 +2084,108 @@ mod tests {
assert!(args.report_json_compact);
}
#[test]
fn parse_accepts_strict_policy_list() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-url".to_string(),
"https://example.test/x.tal".to_string(),
"--strict".to_string(),
"name,cms-der".to_string(),
];
let args = parse_args(&argv).expect("parse args");
assert_eq!(
args.strict_policy,
Some(StrictPolicy {
name: true,
cms_der: true,
signed_attrs: false,
})
);
}
#[test]
fn parse_accepts_strict_without_value_as_all() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-url".to_string(),
"https://example.test/x.tal".to_string(),
"--strict".to_string(),
"--report-json".to_string(),
"out/report.json".to_string(),
];
let args = parse_args(&argv).expect("parse args");
assert_eq!(args.strict_policy, Some(StrictPolicy::all()));
}
#[test]
fn parse_rejects_unknown_strict_policy() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-url".to_string(),
"https://example.test/x.tal".to_string(),
"--strict=unknown".to_string(),
];
let err = parse_args(&argv).expect_err("unknown strict policy should fail");
assert!(err.contains("unknown strict policy"), "{err}");
}
#[test]
fn effective_cir_tal_uris_filters_skipped_multi_tal_inputs() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-path".to_string(),
"afrinic.tal".to_string(),
"--ta-path".to_string(),
"afrinic.cer".to_string(),
"--tal-path".to_string(),
"apnic.tal".to_string(),
"--ta-path".to_string(),
"apnic.cer".to_string(),
"--tal-path".to_string(),
"arin.tal".to_string(),
"--ta-path".to_string(),
"arin.cer".to_string(),
"--cir-enable".to_string(),
"--cir-out".to_string(),
"out.cir".to_string(),
"--cir-tal-uri".to_string(),
"https://example.test/afrinic.cer".to_string(),
"--cir-tal-uri".to_string(),
"https://example.test/apnic.cer".to_string(),
"--cir-tal-uri".to_string(),
"https://example.test/arin.cer".to_string(),
];
let args = parse_args(&argv).expect("parse args");
let mut shared = synthetic_post_validation_shared();
shared.discoveries = vec![shared.discovery.clone(), shared.discovery.clone()].into();
shared.successful_tal_inputs =
vec![args.tal_inputs[0].clone(), args.tal_inputs[2].clone()].into();
let effective = effective_cir_tal_uris_for_discoveries(
&args,
&shared,
resolve_cir_export_tal_uris(&args).expect("resolve cir tal uris"),
)
.expect("map effective cir tal uris");
assert_eq!(
effective,
vec![
"https://example.test/afrinic.cer".to_string(),
"https://example.test/arin.cer".to_string(),
]
);
}
#[test]
fn parse_rejects_report_json_compact_without_report_json() {
let argv = vec![
@ -3024,6 +3184,32 @@ mod tests {
policy.signed_object_failure_policy,
crate::policy::SignedObjectFailurePolicy::DropPublicationPoint
);
assert_eq!(policy.strict, StrictPolicy::default());
}
#[test]
fn read_policy_accepts_strict_table() {
let dir = tempfile::tempdir().expect("tmpdir");
let p = dir.path().join("policy.toml");
std::fs::write(
&p,
r#"
[strict]
name = true
cms_der = true
"#,
)
.expect("write policy");
let policy = read_policy(Some(&p)).expect("parse policy");
assert_eq!(
policy.strict,
StrictPolicy {
name: true,
cms_der: true,
signed_attrs: false,
}
);
}
#[test]
@ -3099,6 +3285,7 @@ mod tests {
let out = crate::validation::run_tree_from_tal::RunTreeFromTalAuditOutput {
discovery: discovery.clone(),
discoveries: vec![discovery],
successful_tal_inputs: Vec::new(),
tree,
publication_points: vec![pp1, pp2, pp3],
downloads: Vec::new(),

View File

@ -2,7 +2,8 @@ use crate::data_model::common::{DerReader, der_take_tlv};
use crate::data_model::oid::OID_CT_ASPA;
use crate::data_model::rc::ResourceCertificate;
use crate::data_model::signed_object::{
RpkiSignedObject, RpkiSignedObjectParsed, SignedObjectParseError, SignedObjectValidateError,
RpkiSignedObject, RpkiSignedObjectParsed, SignedObjectDecodeError, SignedObjectParseError,
SignedObjectValidateError,
};
#[derive(Clone, Debug, PartialEq, Eq)]
@ -91,6 +92,15 @@ pub enum AspaProfileError {
ProvidersContainCustomer(u32),
}
impl From<SignedObjectDecodeError> for AspaProfileError {
fn from(value: SignedObjectDecodeError) -> Self {
match value {
SignedObjectDecodeError::Parse(e) => AspaProfileError::ProfileDecode(e.to_string()),
SignedObjectDecodeError::Validate(e) => AspaProfileError::SignedObject(e),
}
}
}
#[derive(Debug, thiserror::Error)]
pub enum AspaDecodeError {
#[error("{0}")]
@ -173,6 +183,17 @@ impl AspaObject {
Ok(Self::parse_der(der)?.validate_profile()?)
}
pub fn decode_der_with_strict_options(
der: &[u8],
strict_cms_der: bool,
strict_name: bool,
) -> Result<Self, AspaDecodeError> {
let signed_object =
RpkiSignedObject::decode_der_with_strict_options(der, strict_cms_der, strict_name)
.map_err(AspaProfileError::from)?;
Self::from_signed_object(signed_object)
}
pub fn from_signed_object(signed_object: RpkiSignedObject) -> Result<Self, AspaDecodeError> {
let econtent_type = signed_object
.signed_data

View File

@ -3,7 +3,8 @@ use crate::data_model::common::{BigUnsigned, UtcTime};
use crate::data_model::oid::{OID_CT_RPKI_MANIFEST, OID_SHA256};
use crate::data_model::rc::ResourceCertificate;
use crate::data_model::signed_object::{
RpkiSignedObject, RpkiSignedObjectParsed, SignedObjectParseError, SignedObjectValidateError,
RpkiSignedObject, RpkiSignedObjectParsed, SignedObjectDecodeError, SignedObjectParseError,
SignedObjectValidateError,
};
#[derive(Clone, Debug, PartialEq, Eq)]
@ -119,6 +120,15 @@ pub enum ManifestProfileError {
InvalidHashLength(usize),
}
impl From<SignedObjectDecodeError> for ManifestProfileError {
fn from(value: SignedObjectDecodeError) -> Self {
match value {
SignedObjectDecodeError::Parse(e) => ManifestProfileError::ProfileDecode(e.to_string()),
SignedObjectDecodeError::Validate(e) => ManifestProfileError::SignedObject(e),
}
}
}
#[derive(Debug, thiserror::Error)]
pub enum ManifestDecodeError {
#[error("{0}")]
@ -186,6 +196,17 @@ impl ManifestObject {
Ok(Self::parse_der(der)?.validate_profile()?)
}
pub fn decode_der_with_strict_options(
der: &[u8],
strict_cms_der: bool,
strict_name: bool,
) -> Result<Self, ManifestDecodeError> {
let signed_object =
RpkiSignedObject::decode_der_with_strict_options(der, strict_cms_der, strict_name)
.map_err(ManifestProfileError::from)?;
Self::from_signed_object(signed_object)
}
pub fn from_signed_object(
signed_object: RpkiSignedObject,
) -> Result<Self, ManifestDecodeError> {

View File

@ -8,7 +8,7 @@ use x509_parser::prelude::{FromDer, X509Certificate, X509Extension, X509Version}
use serde::{Deserialize, Serialize};
use crate::data_model::common::{
Asn1TimeUtc, InvalidTimeEncodingError, UtcTime, X509NameDer, asn1_time_to_model,
Asn1TimeUtc, DerReader, InvalidTimeEncodingError, UtcTime, X509NameDer, asn1_time_to_model,
};
use crate::data_model::oid::{
OID_AD_CA_ISSUERS_RAW, OID_AD_CA_REPOSITORY, OID_AD_CA_REPOSITORY_RAW, OID_AD_RPKI_MANIFEST,
@ -389,6 +389,11 @@ pub enum ResourceCertificateProfileError {
#[error("invalid signature algorithm parameters (RFC 5280 §4.1.1.2)")]
InvalidSignatureAlgorithmParameters,
#[error(
"{role} Name strict validation failed: {detail} (RFC 6487 §4.4; RFC 5280 §4.1.2.4/§4.1.2.6)"
)]
StrictName { role: &'static str, detail: String },
#[error("duplicate extension: {0} (RFC 5280 §4.2; RFC 6487 §4.8)")]
DuplicateExtension(&'static str),
@ -562,17 +567,181 @@ impl ResourceCertificate {
Ok(())
}
pub fn validate_strict_name_profile(&self) -> Result<(), ResourceCertificateProfileError> {
validate_strict_rpki_name(&self.tbs.issuer_name, "issuer")?;
validate_strict_rpki_name(&self.tbs.subject_name, "subject")?;
Ok(())
}
/// Decode a resource certificate (`parse + validate`).
pub fn decode_der(der: &[u8]) -> Result<Self, ResourceCertificateDecodeError> {
Ok(Self::parse_der(der)?.validate_profile()?)
}
pub fn decode_der_with_strict_name(der: &[u8]) -> Result<Self, ResourceCertificateDecodeError> {
let cert = Self::decode_der(der)?;
cert.validate_strict_name_profile()?;
Ok(cert)
}
/// Backwards-compatible helper (historical name).
pub fn from_der(der: &[u8]) -> Result<Self, ResourceCertificateError> {
Self::decode_der(der)
}
}
fn validate_strict_rpki_name(
name: &X509NameDer,
role: &'static str,
) -> Result<(), ResourceCertificateProfileError> {
let mut name_seq = DerReader::new(name.as_raw())
.take_sequence()
.map_err(|e| ResourceCertificateProfileError::StrictName { role, detail: e })?;
let mut common_name_count = 0usize;
let mut serial_number_count = 0usize;
while !name_seq.is_empty() {
let set_bytes = name_seq
.take_tag(0x31)
.map_err(|e| ResourceCertificateProfileError::StrictName { role, detail: e })?;
let mut rdn_set = DerReader::new(set_bytes);
if rdn_set.is_empty() {
return Err(ResourceCertificateProfileError::StrictName {
role,
detail: "RelativeDistinguishedName SET is empty".to_string(),
});
}
while !rdn_set.is_empty() {
let mut attr = rdn_set
.take_sequence()
.map_err(|e| ResourceCertificateProfileError::StrictName { role, detail: e })?;
let oid = attr
.take_tag(0x06)
.map_err(|e| ResourceCertificateProfileError::StrictName { role, detail: e })?;
let (value_tag, _value) = attr
.take_any()
.map_err(|e| ResourceCertificateProfileError::StrictName { role, detail: e })?;
if !attr.is_empty() {
return Err(ResourceCertificateProfileError::StrictName {
role,
detail: "AttributeTypeAndValue must be SEQUENCE of 2".to_string(),
});
}
match oid {
// 2.5.4.3 commonName
&[0x55, 0x04, 0x03] => {
common_name_count += 1;
if value_tag != 0x13 {
return Err(ResourceCertificateProfileError::StrictName {
role,
detail: format!(
"commonName must be PrintableString, got tag 0x{value_tag:02X}"
),
});
}
}
// 2.5.4.5 serialNumber
&[0x55, 0x04, 0x05] => {
serial_number_count += 1;
if value_tag != 0x13 {
return Err(ResourceCertificateProfileError::StrictName {
role,
detail: format!(
"serialNumber must be PrintableString, got tag 0x{value_tag:02X}"
),
});
}
}
_ => {}
}
}
}
if common_name_count != 1 {
return Err(ResourceCertificateProfileError::StrictName {
role,
detail: format!("commonName must appear exactly once, got {common_name_count}"),
});
}
if serial_number_count > 1 {
return Err(ResourceCertificateProfileError::StrictName {
role,
detail: format!("serialNumber must appear at most once, got {serial_number_count}"),
});
}
Ok(())
}
#[cfg(test)]
mod strict_name_tests {
use super::*;
fn name_with_attrs(attrs: &[(&[u8], u8, &[u8])]) -> X509NameDer {
let mut rdns = Vec::new();
for (oid, tag, value) in attrs {
let mut attr = Vec::new();
attr.extend(der_tlv(0x06, oid));
attr.extend(der_tlv(*tag, value));
let attr = der_tlv(0x30, &attr);
let rdn = der_tlv(0x31, &attr);
rdns.extend(rdn);
}
X509NameDer(der_tlv(0x30, &rdns))
}
fn der_tlv(tag: u8, value: &[u8]) -> Vec<u8> {
let mut out = vec![tag];
encode_len(value.len(), &mut out);
out.extend_from_slice(value);
out
}
fn encode_len(len: usize, out: &mut Vec<u8>) {
if len < 0x80 {
out.push(len as u8);
return;
}
let mut bytes = Vec::new();
let mut value = len;
while value > 0 {
bytes.push((value & 0xFF) as u8);
value >>= 8;
}
bytes.reverse();
out.push(0x80 | bytes.len() as u8);
out.extend(bytes);
}
#[test]
fn strict_name_accepts_printable_common_name_and_serial_number() {
let name = name_with_attrs(&[
(&[0x55, 0x04, 0x03], 0x13, b"CN1"),
(&[0x55, 0x04, 0x05], 0x13, b"SN1"),
]);
validate_strict_rpki_name(&name, "subject").expect("strict name");
}
#[test]
fn strict_name_rejects_utf8_common_name() {
let name = name_with_attrs(&[(&[0x55, 0x04, 0x03], 0x0C, b"CN1")]);
let err = validate_strict_rpki_name(&name, "subject").expect_err("strict name fails");
assert!(err.to_string().contains("PrintableString"), "{err}");
}
#[test]
fn strict_name_rejects_duplicate_common_name() {
let name = name_with_attrs(&[
(&[0x55, 0x04, 0x03], 0x13, b"CN1"),
(&[0x55, 0x04, 0x03], 0x13, b"CN2"),
]);
let err = validate_strict_rpki_name(&name, "subject").expect_err("strict name fails");
assert!(err.to_string().contains("exactly once"), "{err}");
}
}
impl ResourceCertificateParsed {
pub fn validate_profile(self) -> Result<ResourceCertificate, ResourceCertificateProfileError> {
let version = match self.version {

View File

@ -2,7 +2,8 @@ use crate::data_model::common::{DerReader, der_take_tlv};
use crate::data_model::oid::OID_CT_ROUTE_ORIGIN_AUTHZ;
use crate::data_model::rc::{Afi as RcAfi, IpPrefix as RcIpPrefix, ResourceCertificate};
use crate::data_model::signed_object::{
RpkiSignedObject, RpkiSignedObjectParsed, SignedObjectParseError, SignedObjectValidateError,
RpkiSignedObject, RpkiSignedObjectParsed, SignedObjectDecodeError, SignedObjectParseError,
SignedObjectValidateError,
};
#[derive(Clone, Debug, PartialEq, Eq)]
@ -106,6 +107,15 @@ pub enum RoaProfileError {
},
}
impl From<SignedObjectDecodeError> for RoaProfileError {
fn from(value: SignedObjectDecodeError) -> Self {
match value {
SignedObjectDecodeError::Parse(e) => RoaProfileError::ProfileDecode(e.to_string()),
SignedObjectDecodeError::Validate(e) => RoaProfileError::SignedObject(e),
}
}
}
#[derive(Debug, thiserror::Error)]
pub enum RoaDecodeError {
#[error("{0}")]
@ -171,6 +181,17 @@ impl RoaObject {
Ok(Self::parse_der(der)?.validate_profile()?)
}
pub fn decode_der_with_strict_options(
der: &[u8],
strict_cms_der: bool,
strict_name: bool,
) -> Result<Self, RoaDecodeError> {
let signed_object =
RpkiSignedObject::decode_der_with_strict_options(der, strict_cms_der, strict_name)
.map_err(RoaProfileError::from)?;
Self::from_signed_object(signed_object)
}
pub fn from_signed_object(signed_object: RpkiSignedObject) -> Result<Self, RoaDecodeError> {
let econtent_type = signed_object
.signed_data

View File

@ -9,10 +9,10 @@ use crate::data_model::oid::{
OID_SIGNED_DATA_RAW, OID_SUBJECT_INFO_ACCESS,
};
use crate::data_model::rc::{ResourceCertificate, SubjectInfoAccess};
use asn1_rs::{Any, Class, FromBer, Header, Tag};
use asn1_rs::{Any, Class, FromBer, FromDer as Asn1FromDer, Header, Tag};
use ring::digest;
use x509_parser::extensions::ParsedExtension;
use x509_parser::prelude::{FromDer, X509Certificate};
use x509_parser::prelude::X509Certificate;
use x509_parser::public_key::PublicKey;
use x509_parser::x509::SubjectPublicKeyInfo;
@ -321,7 +321,13 @@ impl RpkiSignedObject {
/// This performs encoding/structure parsing only. Profile constraints are enforced by
/// `RpkiSignedObjectParsed::validate_profile`.
pub fn parse_der(der: &[u8]) -> Result<RpkiSignedObjectParsed, SignedObjectParseError> {
parse_signed_object_content_info(der, der)
parse_signed_object_content_info(der, der, CmsParseMode::BerCompatible)
}
pub fn parse_der_strict_cms(
der: &[u8],
) -> Result<RpkiSignedObjectParsed, SignedObjectParseError> {
parse_signed_object_content_info(der, der, CmsParseMode::DerStrict)
}
/// Decode a DER-encoded RPKI Signed Object (CMS ContentInfo wrapping SignedData) and enforce
@ -331,6 +337,19 @@ impl RpkiSignedObject {
Ok(parsed.validate_profile()?)
}
pub fn decode_der_with_strict_options(
der: &[u8],
strict_cms_der: bool,
strict_name: bool,
) -> Result<Self, SignedObjectDecodeError> {
let parsed = if strict_cms_der {
Self::parse_der_strict_cms(der)?
} else {
Self::parse_der(der)?
};
Ok(parsed.validate_profile_with_strict_name(strict_name)?)
}
/// Scheme-A naming for signature verification.
pub fn verify(&self) -> Result<(), SignedObjectVerifyError> {
self.verify_signature()
@ -399,13 +418,20 @@ impl RpkiSignedObject {
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum CmsParseMode {
BerCompatible,
DerStrict,
}
struct CmsReader<'a> {
buf: &'a [u8],
mode: CmsParseMode,
}
impl<'a> CmsReader<'a> {
fn new(buf: &'a [u8]) -> Self {
Self { buf }
fn new(buf: &'a [u8], mode: CmsParseMode) -> Self {
Self { buf, mode }
}
fn is_empty(&self) -> bool {
@ -417,19 +443,19 @@ impl<'a> CmsReader<'a> {
}
fn peek_tag(&self) -> Result<u8, String> {
let (_rem, any) = Any::from_ber(self.buf).map_err(|e| format!("BER parse error: {e}"))?;
let (_rem, any) = parse_any(self.buf, self.mode)?;
header_to_single_byte_tag(&any.header)
}
fn take_any(&mut self) -> Result<(u8, &'a [u8]), String> {
let (rem, any) = Any::from_ber(self.buf).map_err(|e| format!("BER parse error: {e}"))?;
let (rem, any) = parse_any(self.buf, self.mode)?;
let tag = header_to_single_byte_tag(&any.header)?;
self.buf = rem;
Ok((tag, any.data))
}
fn take_any_full(&mut self) -> Result<(u8, &'a [u8], &'a [u8]), String> {
let (rem, any) = Any::from_ber(self.buf).map_err(|e| format!("BER parse error: {e}"))?;
let (rem, any) = parse_any(self.buf, self.mode)?;
let consumed = self.buf.len() - rem.len();
let full = &self.buf[..consumed];
let tag = header_to_single_byte_tag(&any.header)?;
@ -454,16 +480,21 @@ impl<'a> CmsReader<'a> {
fn take_sequence(&mut self) -> Result<CmsReader<'a>, String> {
let value = self.take_tag(0x30)?;
Ok(CmsReader::new(value))
Ok(CmsReader::new(value, self.mode))
}
fn take_octet_string(&mut self) -> Result<Vec<u8>, String> {
let (rem, any) = Any::from_ber(self.buf).map_err(|e| format!("BER parse error: {e}"))?;
let (rem, any) = parse_any(self.buf, self.mode)?;
let tag = header_to_single_byte_tag(&any.header)?;
if self.mode == CmsParseMode::DerStrict && tag != 0x04 {
return Err(format!(
"unexpected tag in DER strict mode: got 0x{tag:02X}, expected 0x04"
));
}
if tag != 0x04 && tag != 0x24 {
return Err(format!("unexpected tag: got 0x{tag:02X}, expected 0x04"));
}
let octets = flatten_octet_string(any)?;
let octets = flatten_octet_string(any, self.mode)?;
self.buf = rem;
Ok(octets)
}
@ -475,7 +506,7 @@ impl<'a> CmsReader<'a> {
fn take_explicit(&mut self, expected_outer_tag: u8) -> Result<(u8, &'a [u8]), String> {
let inner_der = self.take_tag(expected_outer_tag)?;
let (tag, value, rem) = cms_take_tlv(inner_der)?;
let (tag, value, rem) = cms_take_tlv(inner_der, self.mode)?;
if !rem.is_empty() {
return Err("trailing bytes inside EXPLICIT value".into());
}
@ -484,7 +515,7 @@ impl<'a> CmsReader<'a> {
fn take_explicit_der(&mut self, expected_outer_tag: u8) -> Result<&'a [u8], String> {
let inner_der = self.take_tag(expected_outer_tag)?;
let (_tag, _value, rem) = cms_take_tlv(inner_der)?;
let (_tag, _value, rem) = cms_take_tlv(inner_der, self.mode)?;
if !rem.is_empty() {
return Err("trailing bytes inside EXPLICIT value".into());
}
@ -495,8 +526,9 @@ impl<'a> CmsReader<'a> {
fn parse_signed_object_content_info(
raw_der: &[u8],
parse_der: &[u8],
mode: CmsParseMode,
) -> Result<RpkiSignedObjectParsed, SignedObjectParseError> {
let mut r = CmsReader::new(parse_der);
let mut r = CmsReader::new(parse_der, mode);
let mut content_info_seq = r
.take_sequence()
.map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
@ -519,6 +551,17 @@ fn parse_signed_object_content_info(
})
}
fn parse_any<'a>(input: &'a [u8], mode: CmsParseMode) -> Result<(&'a [u8], Any<'a>), String> {
match mode {
CmsParseMode::BerCompatible => {
Any::from_ber(input).map_err(|e| format!("BER parse error: {e}"))
}
CmsParseMode::DerStrict => {
Any::from_der(input).map_err(|e| format!("DER parse error: {e}"))
}
}
}
fn header_to_single_byte_tag(header: &Header<'_>) -> Result<u8, String> {
let tag_no = header.tag().0;
if tag_no > 30 {
@ -529,24 +572,27 @@ fn header_to_single_byte_tag(header: &Header<'_>) -> Result<u8, String> {
| tag_no as u8)
}
fn cms_take_tlv(input: &[u8]) -> Result<(u8, &[u8], &[u8]), String> {
let (rem, any) = Any::from_ber(input).map_err(|e| format!("BER parse error: {e}"))?;
fn cms_take_tlv(input: &[u8], mode: CmsParseMode) -> Result<(u8, &[u8], &[u8]), String> {
let (rem, any) = parse_any(input, mode)?;
let tag = header_to_single_byte_tag(&any.header)?;
Ok((tag, any.data, rem))
}
fn flatten_octet_string(any: Any<'_>) -> Result<Vec<u8>, String> {
fn flatten_octet_string(any: Any<'_>, mode: CmsParseMode) -> Result<Vec<u8>, String> {
if any.class() != Class::Universal || any.tag() != Tag::OctetString {
return Err("expected OCTET STRING".into());
}
if !any.header.constructed() {
return Ok(any.data.to_vec());
}
if mode == CmsParseMode::DerStrict {
return Err("constructed OCTET STRING is not allowed in DER strict mode".into());
}
let mut out = Vec::new();
let mut input = any.data;
while !input.is_empty() {
let (rem, child) = Any::from_ber(input).map_err(|e| format!("BER parse error: {e}"))?;
out.extend(flatten_octet_string(child)?);
out.extend(flatten_octet_string(child, mode)?);
input = rem;
}
Ok(out)
@ -554,13 +600,20 @@ fn flatten_octet_string(any: Any<'_>) -> Result<Vec<u8>, String> {
impl RpkiSignedObjectParsed {
pub fn validate_profile(self) -> Result<RpkiSignedObject, SignedObjectValidateError> {
self.validate_profile_with_strict_name(false)
}
pub fn validate_profile_with_strict_name(
self,
strict_name: bool,
) -> Result<RpkiSignedObject, SignedObjectValidateError> {
if self.content_info_content_type != OID_SIGNED_DATA {
return Err(SignedObjectValidateError::InvalidContentInfoContentType(
self.content_info_content_type,
));
}
let signed_data = validate_signed_data_profile(self.signed_data)?;
let signed_data = validate_signed_data_profile(self.signed_data, strict_name)?;
Ok(RpkiSignedObject {
raw_der: self.raw_der,
@ -576,7 +629,7 @@ fn parse_signed_data_from_contentinfo_cursor(
let inner_der = seq.take_explicit_der(0xA0).map_err(|_e| {
SignedObjectParseError::Parse("ContentInfo.content must be [0] EXPLICIT".into())
})?;
let mut r = CmsReader::new(inner_der);
let mut r = CmsReader::new(inner_der, seq.mode);
let signed_data_seq = r
.take_sequence()
.map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
@ -598,7 +651,7 @@ fn parse_signed_data_cursor(
let digest_set_bytes = seq
.take_tag(0x31)
.map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
let mut digest_set = CmsReader::new(digest_set_bytes);
let mut digest_set = CmsReader::new(digest_set_bytes, seq.mode);
let mut digest_algorithms: Vec<AlgorithmIdentifierParsed> = Vec::new();
while !digest_set.is_empty() {
let alg = digest_set
@ -631,7 +684,7 @@ fn parse_signed_data_cursor(
let content = seq
.take_tag(0xA0)
.map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
certificates = Some(split_der_objects(content)?);
certificates = Some(split_der_objects(content, seq.mode)?);
}
0xA1 => {
crls_present = true;
@ -647,7 +700,7 @@ fn parse_signed_data_cursor(
let set_bytes = seq
.take_tag(0x31)
.map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
signer_infos = Some(parse_signer_infos_set_cursor(set_bytes)?);
signer_infos = Some(parse_signer_infos_set_cursor(set_bytes, seq.mode)?);
}
_ => {
return Err(SignedObjectParseError::Parse(
@ -689,7 +742,7 @@ fn parse_encapsulated_content_info_cursor(
"EncapsulatedContentInfo.eContent must be [0] EXPLICIT".into(),
)
})?;
let mut inner = CmsReader::new(inner_der);
let mut inner = CmsReader::new(inner_der, seq.mode);
let octets = inner
.take_octet_string()
.map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
@ -712,11 +765,14 @@ fn parse_encapsulated_content_info_cursor(
})
}
fn split_der_objects(mut input: &[u8]) -> Result<Vec<Vec<u8>>, SignedObjectParseError> {
fn split_der_objects(
mut input: &[u8],
mode: CmsParseMode,
) -> Result<Vec<Vec<u8>>, SignedObjectParseError> {
let mut out: Vec<Vec<u8>> = Vec::new();
while !input.is_empty() {
let (_tag, _value, rem) =
cms_take_tlv(input).map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
cms_take_tlv(input, mode).map_err(|e| SignedObjectParseError::Parse(e.to_string()))?;
let consumed = input.len() - rem.len();
out.push(input[..consumed].to_vec());
input = rem;
@ -726,8 +782,9 @@ fn split_der_objects(mut input: &[u8]) -> Result<Vec<Vec<u8>>, SignedObjectParse
fn parse_signer_infos_set_cursor(
set_bytes: &[u8],
mode: CmsParseMode,
) -> Result<Vec<SignerInfoParsed>, SignedObjectParseError> {
let mut set = CmsReader::new(set_bytes);
let mut set = CmsReader::new(set_bytes, mode);
let mut out: Vec<SignerInfoParsed> = Vec::new();
while !set.is_empty() {
let si = set
@ -738,7 +795,10 @@ fn parse_signer_infos_set_cursor(
Ok(out)
}
fn validate_ee_certificate(der: &[u8]) -> Result<ResourceEeCertificate, SignedObjectValidateError> {
fn validate_ee_certificate(
der: &[u8],
strict_name: bool,
) -> Result<ResourceEeCertificate, SignedObjectValidateError> {
let (rem, cert) = X509Certificate::from_der(der)
.map_err(|e| SignedObjectValidateError::EeCertificateParse(e.to_string()))?;
if !rem.is_empty() {
@ -762,6 +822,10 @@ fn validate_ee_certificate(der: &[u8]) -> Result<ResourceEeCertificate, SignedOb
};
}
};
if strict_name {
rc.validate_strict_name_profile()
.map_err(|e| SignedObjectValidateError::EeCertificateParse(e.to_string()))?;
}
let ski = rc
.tbs
@ -940,6 +1004,7 @@ fn parse_signer_info_cursor(
fn validate_signed_data_profile(
signed_data: SignedDataParsed,
strict_name: bool,
) -> Result<SignedDataProfiled, SignedObjectValidateError> {
if signed_data.version != 3 {
return Err(SignedObjectValidateError::InvalidSignedDataVersion(
@ -985,7 +1050,7 @@ fn validate_signed_data_profile(
certs.len(),
));
}
let ee = validate_ee_certificate(&certs[0])?;
let ee = validate_ee_certificate(&certs[0], strict_name)?;
if signed_data.signer_infos.len() != 1 {
return Err(SignedObjectValidateError::InvalidSignerInfosCount(
@ -1414,3 +1479,116 @@ fn strip_leading_zeros(bytes: &[u8]) -> &[u8] {
&bytes[idx..]
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn strict_cms_der_rejects_constructed_octet_string_fixture() {
let der = std::fs::read(std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(
"tests/fixtures/repository/ca.rg.net/rpki/RGnet-OU/bW-_qXU9uNhGQz21NR2ansB8lr0.mft",
))
.expect("read manifest fixture");
let parsed = RpkiSignedObject::parse_der(&der).expect("parse fixture");
let econtent = parsed
.signed_data
.encap_content_info
.econtent
.clone()
.expect("fixture eContent");
assert_eq!(
parsed.signed_data.encap_content_info.econtent.as_deref(),
Some(econtent.as_slice())
);
let primitive_octets = der_tlv(0x04, &econtent);
let constructed_octets = same_size_constructed_octet_string(&primitive_octets, &econtent);
let mutated = replace_first_subslice(&der, &primitive_octets, &constructed_octets)
.expect("replace eContent OCTET STRING");
let compatible = RpkiSignedObject::parse_der(&mutated).expect("BER-compatible parse");
assert!(
econtent.starts_with(
compatible
.signed_data
.encap_content_info
.econtent
.as_deref()
.expect("compatible eContent")
)
);
let err = RpkiSignedObject::parse_der_strict_cms(&mutated)
.expect_err("DER strict rejects constructed OCTET STRING");
assert!(err.to_string().contains("DER"), "{err}");
}
fn replace_first_subslice(input: &[u8], from: &[u8], to: &[u8]) -> Option<Vec<u8>> {
let pos = input
.windows(from.len())
.position(|candidate| candidate == from)?;
let mut out = Vec::with_capacity(input.len() - from.len() + to.len());
out.extend_from_slice(&input[..pos]);
out.extend_from_slice(to);
out.extend_from_slice(&input[pos + from.len()..]);
Some(out)
}
fn same_size_constructed_octet_string(primitive: &[u8], content: &[u8]) -> Vec<u8> {
assert_eq!(primitive[0], 0x04);
let header_len = tlv_header_len(primitive);
let outer_value_len = primitive.len() - header_len;
let child_len = (0..=outer_value_len)
.rev()
.find(|candidate| 1 + len_len(*candidate) + *candidate == outer_value_len)
.expect("find child length");
let mut out = primitive[..header_len].to_vec();
out[0] = 0x24;
out.extend(der_tlv(0x04, &content[..child_len]));
assert_eq!(out.len(), primitive.len());
out
}
fn tlv_header_len(tlv: &[u8]) -> usize {
if tlv[1] & 0x80 == 0 {
2
} else {
2 + (tlv[1] & 0x7F) as usize
}
}
fn len_len(len: usize) -> usize {
if len < 0x80 {
return 1;
}
let mut value = len;
let mut n = 0usize;
while value > 0 {
n += 1;
value >>= 8;
}
1 + n
}
fn der_tlv(tag: u8, value: &[u8]) -> Vec<u8> {
let mut out = vec![tag];
encode_len(value.len(), &mut out);
out.extend_from_slice(value);
out
}
fn encode_len(len: usize, out: &mut Vec<u8>) {
if len < 0x80 {
out.push(len as u8);
return;
}
let mut bytes = Vec::new();
let mut value = len;
while value > 0 {
bytes.push((value & 0xFF) as u8);
value >>= 8;
}
bytes.reverse();
out.push(0x80 | bytes.len() as u8);
out.extend(bytes);
}
}

View File

@ -112,6 +112,14 @@ impl TaCertificate {
Ok(Self::parse_der(der)?.validate_profile()?)
}
pub fn decode_der_with_strict_name(der: &[u8]) -> Result<Self, TaCertificateDecodeError> {
let ta = Self::decode_der(der)?;
ta.rc_ca
.validate_strict_name_profile()
.map_err(TaCertificateProfileError::from)?;
Ok(ta)
}
/// Backwards-compatible helper (historical name).
pub fn from_der(der: &[u8]) -> Result<Self, TaCertificateError> {
Self::decode_der(der)
@ -264,6 +272,15 @@ impl TrustAnchor {
Ok(Self::bind(tal, ta_certificate, resolved_uri)?)
}
pub fn bind_der_with_strict_name(
tal: Tal,
ta_der: &[u8],
resolved_uri: Option<&Url>,
) -> Result<Self, TrustAnchorError> {
let ta_certificate = TaCertificate::decode_der_with_strict_name(ta_der)?;
Ok(Self::bind(tal, ta_certificate, resolved_uri)?)
}
pub fn bind(
tal: Tal,
ta_certificate: TaCertificate,

View File

@ -39,12 +39,67 @@ impl Default for SignedObjectFailurePolicy {
}
}
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(default)]
pub struct StrictPolicy {
pub name: bool,
pub cms_der: bool,
pub signed_attrs: bool,
}
impl StrictPolicy {
pub fn none() -> Self {
Self::default()
}
pub fn all() -> Self {
Self {
name: true,
cms_der: true,
signed_attrs: true,
}
}
pub fn parse_cli_spec(spec: Option<&str>) -> Result<Self, String> {
let Some(spec) = spec else {
return Ok(Self::all());
};
let spec = spec.trim();
if spec.is_empty() || spec == "all" {
return Ok(Self::all());
}
if spec == "none" {
return Ok(Self::none());
}
let mut out = Self::none();
for raw in spec.split(',') {
let item = raw.trim();
match item {
"name" => out.name = true,
"cms-der" | "cms_der" => out.cms_der = true,
"signed-attrs" | "signed_attrs" => out.signed_attrs = true,
"all" => out = Self::all(),
"none" => out = Self::none(),
"" => return Err("empty strict policy name".to_string()),
_ => {
return Err(format!(
"unknown strict policy: {item}; supported: name,cms-der,signed-attrs,all,none"
));
}
}
}
Ok(out)
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(default)]
pub struct Policy {
pub sync_preference: SyncPreference,
pub ca_failed_fetch_policy: CaFailedFetchPolicy,
pub signed_object_failure_policy: SignedObjectFailurePolicy,
pub strict: StrictPolicy,
}
impl Default for Policy {
@ -53,6 +108,7 @@ impl Default for Policy {
sync_preference: SyncPreference::default(),
ca_failed_fetch_policy: CaFailedFetchPolicy::default(),
signed_object_failure_policy: SignedObjectFailurePolicy::default(),
strict: StrictPolicy::default(),
}
}
}
@ -68,3 +124,50 @@ impl Policy {
toml::from_str(s).map_err(|e| PolicyParseError::Toml(e.to_string()))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn strict_policy_parses_cli_specs() {
assert_eq!(
StrictPolicy::parse_cli_spec(None).unwrap(),
StrictPolicy::all()
);
assert_eq!(
StrictPolicy::parse_cli_spec(Some("name,signed-attrs")).unwrap(),
StrictPolicy {
name: true,
cms_der: false,
signed_attrs: true,
}
);
assert_eq!(
StrictPolicy::parse_cli_spec(Some("none")).unwrap(),
StrictPolicy::none()
);
assert!(StrictPolicy::parse_cli_spec(Some("bogus")).is_err());
}
#[test]
fn policy_toml_accepts_strict_table() {
let policy = Policy::from_toml_str(
r#"
[strict]
name = true
cms_der = true
signed_attrs = false
"#,
)
.expect("parse policy");
assert_eq!(
policy.strict,
StrictPolicy {
name: true,
cms_der: true,
signed_attrs: false,
}
);
}
}

View File

@ -57,10 +57,42 @@ pub fn discover_root_ca_instance_from_tal_url(
discover_root_ca_instance_from_tal(http_fetcher, tal, Some(tal_url.to_string()))
}
pub fn discover_root_ca_instance_from_tal_url_with_strict_name(
http_fetcher: &dyn Fetcher,
tal_url: &str,
) -> Result<DiscoveredRootCaInstance, FromTalError> {
let tal_bytes = http_fetcher
.fetch(tal_url)
.map_err(FromTalError::TalFetch)?;
let tal = Tal::decode_bytes(&tal_bytes)?;
discover_root_ca_instance_from_tal_with_strict_name(
http_fetcher,
tal,
Some(tal_url.to_string()),
)
}
pub fn discover_root_ca_instance_from_tal(
http_fetcher: &dyn Fetcher,
tal: Tal,
tal_url: Option<String>,
) -> Result<DiscoveredRootCaInstance, FromTalError> {
discover_root_ca_instance_from_tal_impl(http_fetcher, tal, tal_url, false)
}
pub fn discover_root_ca_instance_from_tal_with_strict_name(
http_fetcher: &dyn Fetcher,
tal: Tal,
tal_url: Option<String>,
) -> Result<DiscoveredRootCaInstance, FromTalError> {
discover_root_ca_instance_from_tal_impl(http_fetcher, tal, tal_url, true)
}
fn discover_root_ca_instance_from_tal_impl(
http_fetcher: &dyn Fetcher,
tal: Tal,
tal_url: Option<String>,
strict_name: bool,
) -> Result<DiscoveredRootCaInstance, FromTalError> {
if tal.ta_uris.is_empty() {
return Err(FromTalError::NoTaUris);
@ -76,13 +108,14 @@ pub fn discover_root_ca_instance_from_tal(
}
};
let trust_anchor = match TrustAnchor::bind_der(tal.clone(), &ta_der, Some(ta_uri)) {
Ok(ta) => ta,
Err(e) => {
last_err = Some(format!("bind {ta_uri} failed: {e}"));
continue;
}
};
let trust_anchor =
match bind_trust_anchor_der(tal.clone(), &ta_der, Some(ta_uri), strict_name) {
Ok(ta) => ta,
Err(e) => {
last_err = Some(format!("bind {ta_uri} failed: {e}"));
continue;
}
};
let ca_instance =
match ca_instance_uris_from_ca_certificate(&trust_anchor.ta_certificate.rc_ca) {
@ -110,6 +143,37 @@ pub fn discover_root_ca_instance_from_tal_with_fetchers(
rsync_fetcher: &dyn RsyncFetcher,
tal: Tal,
tal_url: Option<String>,
) -> Result<DiscoveredRootCaInstance, FromTalError> {
discover_root_ca_instance_from_tal_with_fetchers_impl(
http_fetcher,
rsync_fetcher,
tal,
tal_url,
false,
)
}
pub fn discover_root_ca_instance_from_tal_with_fetchers_strict_name(
http_fetcher: &dyn Fetcher,
rsync_fetcher: &dyn RsyncFetcher,
tal: Tal,
tal_url: Option<String>,
) -> Result<DiscoveredRootCaInstance, FromTalError> {
discover_root_ca_instance_from_tal_with_fetchers_impl(
http_fetcher,
rsync_fetcher,
tal,
tal_url,
true,
)
}
fn discover_root_ca_instance_from_tal_with_fetchers_impl(
http_fetcher: &dyn Fetcher,
rsync_fetcher: &dyn RsyncFetcher,
tal: Tal,
tal_url: Option<String>,
strict_name: bool,
) -> Result<DiscoveredRootCaInstance, FromTalError> {
if tal.ta_uris.is_empty() {
return Err(FromTalError::NoTaUris);
@ -127,13 +191,14 @@ pub fn discover_root_ca_instance_from_tal_with_fetchers(
}
};
let trust_anchor = match TrustAnchor::bind_der(tal.clone(), &ta_der, Some(ta_uri)) {
Ok(ta) => ta,
Err(e) => {
last_err = Some(format!("bind {ta_uri} failed: {e}"));
continue;
}
};
let trust_anchor =
match bind_trust_anchor_der(tal.clone(), &ta_der, Some(ta_uri), strict_name) {
Ok(ta) => ta,
Err(e) => {
last_err = Some(format!("bind {ta_uri} failed: {e}"));
continue;
}
};
let ca_instance =
match ca_instance_uris_from_ca_certificate(&trust_anchor.ta_certificate.rc_ca) {
@ -168,6 +233,19 @@ fn fetch_ta_der(
}
}
fn bind_trust_anchor_der(
tal: Tal,
ta_der: &[u8],
resolved_uri: Option<&Url>,
strict_name: bool,
) -> Result<TrustAnchor, TrustAnchorError> {
if strict_name {
TrustAnchor::bind_der_with_strict_name(tal, ta_der, resolved_uri)
} else {
TrustAnchor::bind_der(tal, ta_der, resolved_uri)
}
}
fn fetch_ta_der_via_rsync(
rsync_fetcher: &dyn RsyncFetcher,
ta_rsync_uri: &str,
@ -213,9 +291,26 @@ pub fn discover_root_ca_instance_from_tal_and_ta_der(
tal_bytes: &[u8],
ta_der: &[u8],
resolved_ta_uri: Option<&Url>,
) -> Result<DiscoveredRootCaInstance, FromTalError> {
discover_root_ca_instance_from_tal_and_ta_der_impl(tal_bytes, ta_der, resolved_ta_uri, false)
}
pub fn discover_root_ca_instance_from_tal_and_ta_der_with_strict_name(
tal_bytes: &[u8],
ta_der: &[u8],
resolved_ta_uri: Option<&Url>,
) -> Result<DiscoveredRootCaInstance, FromTalError> {
discover_root_ca_instance_from_tal_and_ta_der_impl(tal_bytes, ta_der, resolved_ta_uri, true)
}
fn discover_root_ca_instance_from_tal_and_ta_der_impl(
tal_bytes: &[u8],
ta_der: &[u8],
resolved_ta_uri: Option<&Url>,
strict_name: bool,
) -> Result<DiscoveredRootCaInstance, FromTalError> {
let tal = Tal::decode_bytes(tal_bytes)?;
let trust_anchor = TrustAnchor::bind_der(tal, ta_der, resolved_ta_uri)?;
let trust_anchor = bind_trust_anchor_der(tal, ta_der, resolved_ta_uri, strict_name)?;
let ca_instance = ca_instance_uris_from_ca_certificate(&trust_anchor.ta_certificate.rc_ca)?;
Ok(DiscoveredRootCaInstance {
tal_url: None,

View File

@ -28,6 +28,17 @@ const RFC_CRLDP: &[RfcRef] = &[RfcRef("RFC 6487 §4.8.6")];
const RFC_CRLDP_AND_LOCKED_PACK: &[RfcRef] =
&[RfcRef("RFC 6487 §4.8.6"), RfcRef("RFC 9286 §4.2.1")];
fn decode_resource_certificate_with_policy(
der: &[u8],
policy: &Policy,
) -> Result<ResourceCertificate, crate::data_model::rc::ResourceCertificateDecodeError> {
if policy.strict.name {
ResourceCertificate::decode_der_with_strict_name(der)
} else {
ResourceCertificate::decode_der(der)
}
}
#[derive(Clone, Debug)]
pub(crate) struct VerifiedIssuerCrl {
crl: crate::data_model::crl::RpkixCrl,
@ -178,11 +189,15 @@ pub fn process_publication_point_for_issuer_with_options<P: PublicationPointData
let mut audit: Vec<ObjectAuditEntry> = Vec::new();
// Enforce that `manifest_bytes` is actually a manifest object.
let _manifest = ManifestObject::decode_der(manifest_bytes)
.expect("publication point snapshot manifest decodes");
let _manifest = ManifestObject::decode_der_with_strict_options(
manifest_bytes,
policy.strict.cms_der,
policy.strict.name,
)
.expect("publication point snapshot manifest decodes");
// Decode issuer CA once; if it fails we cannot validate ROA/ASPA EE certificates.
let issuer_ca = match ResourceCertificate::decode_der(issuer_ca_der) {
let issuer_ca = match decode_resource_certificate_with_policy(issuer_ca_der, policy) {
Ok(v) => v,
Err(e) => {
stats.publication_point_dropped = true;
@ -349,6 +364,8 @@ pub fn process_publication_point_for_issuer_with_options<P: PublicationPointData
validation_time,
timing,
collect_vcir_local_outputs,
policy.strict.cms_der,
policy.strict.name,
);
match result.outcome {
Ok(mut ok) => {
@ -457,6 +474,8 @@ pub fn process_publication_point_for_issuer_with_options<P: PublicationPointData
validation_time,
timing,
collect_vcir_local_outputs,
policy.strict.cms_der,
policy.strict.name,
) {
Ok((att, local_output)) => {
stats.aspa_ok += 1;
@ -739,6 +758,8 @@ pub(crate) struct OwnedRoaTask {
issuer_effective_as: Option<crate::data_model::rc::AsResourceSet>,
validation_time: time::OffsetDateTime,
collect_vcir_local_outputs: bool,
strict_cms_der: bool,
strict_name: bool,
pub(crate) submitted_at: Option<Instant>,
}
@ -844,6 +865,8 @@ fn validate_owned_roa_task(worker_index: usize, task: OwnedRoaTask) -> RoaTaskRe
task.validation_time,
None,
task.collect_vcir_local_outputs,
task.strict_cms_der,
task.strict_name,
)
.map(|(vrps, local_outputs)| RoaTaskOk {
vrps,
@ -881,6 +904,8 @@ pub(crate) struct ParallelObjectsStage {
issuer_effective_as: Option<crate::data_model::rc::AsResourceSet>,
validation_time: time::OffsetDateTime,
collect_vcir_local_outputs: bool,
strict_cms_der: bool,
strict_name: bool,
warnings: Vec<Warning>,
stats: ObjectsStats,
audit: Vec<ObjectAuditEntry>,
@ -907,6 +932,8 @@ impl ParallelObjectsStage {
issuer_effective_as: self.issuer_effective_as.clone(),
validation_time: self.validation_time,
collect_vcir_local_outputs: self.collect_vcir_local_outputs,
strict_cms_der: self.strict_cms_der,
strict_name: self.strict_name,
submitted_at: None,
})
.collect()
@ -928,6 +955,7 @@ impl ParallelObjectsStage {
pub(crate) fn prepare_publication_point_for_parallel_roa<P: PublicationPointData>(
publication_point_id: u64,
publication_point: &P,
policy: &Policy,
issuer_ca_der: &[u8],
issuer_ca_rsync_uri: Option<&str>,
issuer_effective_ip: Option<&crate::data_model::rc::IpResourceSet>,
@ -950,10 +978,14 @@ pub(crate) fn prepare_publication_point_for_parallel_roa<P: PublicationPointData
.count();
let mut audit: Vec<ObjectAuditEntry> = Vec::new();
let _manifest = ManifestObject::decode_der(manifest_bytes)
.expect("publication point snapshot manifest decodes");
let _manifest = ManifestObject::decode_der_with_strict_options(
manifest_bytes,
policy.strict.cms_der,
policy.strict.name,
)
.expect("publication point snapshot manifest decodes");
let issuer_ca = match ResourceCertificate::decode_der(issuer_ca_der) {
let issuer_ca = match decode_resource_certificate_with_policy(issuer_ca_der, policy) {
Ok(v) => v,
Err(e) => {
stats.publication_point_dropped = true;
@ -1109,6 +1141,8 @@ pub(crate) fn prepare_publication_point_for_parallel_roa<P: PublicationPointData
issuer_effective_as: issuer_effective_as.cloned(),
validation_time,
collect_vcir_local_outputs,
strict_cms_der: policy.strict.cms_der,
strict_name: policy.strict.name,
warnings,
stats,
audit,
@ -1193,6 +1227,8 @@ pub(crate) fn reduce_parallel_roa_stage(
stage.validation_time,
timing,
stage.collect_vcir_local_outputs,
stage.strict_cms_der,
stage.strict_name,
) {
Ok((att, local_output)) => {
stats.aspa_ok += 1;
@ -1254,6 +1290,7 @@ fn process_publication_point_for_issuer_parallel_roa_inner<P: PublicationPointDa
let stage = match prepare_publication_point_for_parallel_roa(
0,
publication_point,
_policy,
issuer_ca_der,
issuer_ca_rsync_uri,
issuer_effective_ip,
@ -1411,6 +1448,8 @@ pub(crate) fn validate_roa_task_serial(
validation_time: time::OffsetDateTime,
timing: Option<&TimingHandle>,
collect_vcir_local_outputs: bool,
strict_cms_der: bool,
strict_name: bool,
) -> RoaTaskResult {
let sha256_hex = sha256_hex_from_32(&task.file.sha256);
let outcome = process_roa_with_issuer(
@ -1427,6 +1466,8 @@ pub(crate) fn validate_roa_task_serial(
validation_time,
timing,
collect_vcir_local_outputs,
strict_cms_der,
strict_name,
)
.map(|(vrps, local_outputs)| RoaTaskOk {
vrps,
@ -1459,11 +1500,17 @@ fn process_roa_with_issuer(
validation_time: time::OffsetDateTime,
timing: Option<&TimingHandle>,
collect_vcir_local_outputs: bool,
strict_cms_der: bool,
strict_name: bool,
) -> Result<(Vec<Vrp>, Vec<VcirLocalOutput>), ObjectValidateError> {
let _decode = timing
.as_ref()
.map(|t| t.span_phase("objects_roa_decode_and_validate_total"));
let roa = RoaObject::decode_der(file.bytes().map_err(ObjectValidateError::BytesLoad)?)?;
let roa = RoaObject::decode_der_with_strict_options(
file.bytes().map_err(ObjectValidateError::BytesLoad)?,
strict_cms_der,
strict_name,
)?;
drop(_decode);
let _ee_profile = timing
@ -1575,11 +1622,17 @@ fn process_roa_with_issuer_parallel_cached(
validation_time: time::OffsetDateTime,
timing: Option<&TimingHandle>,
collect_vcir_local_outputs: bool,
strict_cms_der: bool,
strict_name: bool,
) -> Result<(Vec<Vrp>, Vec<VcirLocalOutput>), ObjectValidateError> {
let _decode = timing
.as_ref()
.map(|t| t.span_phase("objects_roa_decode_and_validate_total"));
let roa = RoaObject::decode_der(file.bytes().map_err(ObjectValidateError::BytesLoad)?)?;
let roa = RoaObject::decode_der_with_strict_options(
file.bytes().map_err(ObjectValidateError::BytesLoad)?,
strict_cms_der,
strict_name,
)?;
drop(_decode);
let _ee_profile = timing
@ -1697,11 +1750,17 @@ fn process_aspa_with_issuer(
validation_time: time::OffsetDateTime,
timing: Option<&TimingHandle>,
collect_vcir_local_outputs: bool,
strict_cms_der: bool,
strict_name: bool,
) -> Result<(AspaAttestation, Option<VcirLocalOutput>), ObjectValidateError> {
let _decode = timing
.as_ref()
.map(|t| t.span_phase("objects_aspa_decode_and_validate_total"));
let aspa = AspaObject::decode_der(file.bytes().map_err(ObjectValidateError::BytesLoad)?)?;
let aspa = AspaObject::decode_der_with_strict_options(
file.bytes().map_err(ObjectValidateError::BytesLoad)?,
strict_cms_der,
strict_name,
)?;
drop(_decode);
let _ee_profile = timing

View File

@ -24,7 +24,11 @@ use crate::replay::fetch_rsync::PayloadReplayRsyncFetcher;
use crate::sync::rrdp::Fetcher;
use crate::validation::from_tal::{
DiscoveredRootCaInstance, FromTalError, discover_root_ca_instance_from_tal_and_ta_der,
discover_root_ca_instance_from_tal_url, discover_root_ca_instance_from_tal_with_fetchers,
discover_root_ca_instance_from_tal_and_ta_der_with_strict_name,
discover_root_ca_instance_from_tal_url,
discover_root_ca_instance_from_tal_url_with_strict_name,
discover_root_ca_instance_from_tal_with_fetchers,
discover_root_ca_instance_from_tal_with_fetchers_strict_name,
};
use crate::validation::objects::ParallelRoaWorkerPool;
use crate::validation::tree::{
@ -86,6 +90,7 @@ pub struct RunTreeFromTalOutput {
pub struct RunTreeFromTalAuditOutput {
pub discovery: DiscoveredRootCaInstance,
pub discoveries: Vec<DiscoveredRootCaInstance>,
pub successful_tal_inputs: Vec<TalInputSpec>,
pub tree: TreeRunOutput,
pub publication_points: Vec<PublicationPointAudit>,
pub downloads: Vec<crate::audit::AuditDownloadEvent>,
@ -196,19 +201,48 @@ fn root_discovery_from_tal_input(
tal_input: &TalInputSpec,
http_fetcher: &dyn Fetcher,
rsync_fetcher: &dyn crate::fetch::rsync::RsyncFetcher,
strict_name: bool,
) -> Result<DiscoveredRootCaInstance, FromTalError> {
match &tal_input.source {
TalSource::Url(url) => discover_root_ca_instance_from_tal_url(http_fetcher, url),
TalSource::Url(url) => {
if strict_name {
discover_root_ca_instance_from_tal_url_with_strict_name(http_fetcher, url)
} else {
discover_root_ca_instance_from_tal_url(http_fetcher, url)
}
}
TalSource::DerBytes {
tal_bytes, ta_der, ..
} => discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, None),
} => {
if strict_name {
discover_root_ca_instance_from_tal_and_ta_der_with_strict_name(
tal_bytes, ta_der, None,
)
} else {
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, None)
}
}
TalSource::FilePath(path) => {
let tal_bytes = std::fs::read(path).map_err(|e| {
FromTalError::TalFetch(format!("read TAL file failed: {}: {e}", path.display()))
})?;
let tal = crate::data_model::tal::Tal::decode_bytes(&tal_bytes)
.map_err(FromTalError::from)?;
discover_root_ca_instance_from_tal_with_fetchers(http_fetcher, rsync_fetcher, tal, None)
if strict_name {
discover_root_ca_instance_from_tal_with_fetchers_strict_name(
http_fetcher,
rsync_fetcher,
tal,
None,
)
} else {
discover_root_ca_instance_from_tal_with_fetchers(
http_fetcher,
rsync_fetcher,
tal,
None,
)
}
}
TalSource::FilePathWithTa { tal_path, ta_path } => {
let tal_bytes = std::fs::read(tal_path).map_err(|e| {
@ -217,19 +251,76 @@ fn root_discovery_from_tal_input(
let ta_der = std::fs::read(ta_path).map_err(|e| {
FromTalError::TaFetch(format!("read TA file failed: {}: {e}", ta_path.display()))
})?;
discover_root_ca_instance_from_tal_and_ta_der(&tal_bytes, &ta_der, None)
if strict_name {
discover_root_ca_instance_from_tal_and_ta_der_with_strict_name(
&tal_bytes, &ta_der, None,
)
} else {
discover_root_ca_instance_from_tal_and_ta_der(&tal_bytes, &ta_der, None)
}
}
}
}
fn discover_root_ca_instance_from_tal_url_with_policy(
policy: &crate::policy::Policy,
http_fetcher: &dyn Fetcher,
tal_url: &str,
) -> Result<DiscoveredRootCaInstance, FromTalError> {
if policy.strict.name {
discover_root_ca_instance_from_tal_url_with_strict_name(http_fetcher, tal_url)
} else {
discover_root_ca_instance_from_tal_url(http_fetcher, tal_url)
}
}
fn discover_root_ca_instance_from_tal_and_ta_der_with_policy(
policy: &crate::policy::Policy,
tal_bytes: &[u8],
ta_der: &[u8],
resolved_ta_uri: Option<&Url>,
) -> Result<DiscoveredRootCaInstance, FromTalError> {
if policy.strict.name {
discover_root_ca_instance_from_tal_and_ta_der_with_strict_name(
tal_bytes,
ta_der,
resolved_ta_uri,
)
} else {
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)
}
}
fn discover_multiple_roots_from_tal_inputs(
tal_inputs: &[TalInputSpec],
http_fetcher: &dyn Fetcher,
rsync_fetcher: &dyn crate::fetch::rsync::RsyncFetcher,
strict_name: bool,
) -> Result<Vec<TalRootDiscovery>, RunTreeFromTalError> {
let mut roots = Vec::with_capacity(tal_inputs.len());
for tal_input in tal_inputs {
let discovery = root_discovery_from_tal_input(tal_input, http_fetcher, rsync_fetcher)?;
let discovery = match root_discovery_from_tal_input(
tal_input,
http_fetcher,
rsync_fetcher,
strict_name,
) {
Ok(discovery) => discovery,
Err(error)
if should_isolate_multi_tal_strict_name_failure(
tal_inputs.len(),
strict_name,
&error,
) =>
{
eprintln!(
"warning: skipping TAL '{}' because strict name validation failed during trust anchor discovery: {error}",
tal_input.tal_id
);
continue;
}
Err(error) => return Err(error.into()),
};
let root_handle = root_handle_from_trust_anchor(
&discovery.trust_anchor,
tal_input.tal_id.clone(),
@ -242,9 +333,25 @@ fn discover_multiple_roots_from_tal_inputs(
root_handle,
});
}
if roots.is_empty() {
return Err(RunTreeFromTalError::Replay(
"multi-TAL root discovery returned no usable roots after strict name filtering"
.to_string(),
));
}
Ok(roots)
}
fn should_isolate_multi_tal_strict_name_failure(
tal_input_count: usize,
strict_name: bool,
error: &FromTalError,
) -> bool {
strict_name
&& tal_input_count > 1
&& error.to_string().contains("Name strict validation failed")
}
#[derive(Debug, thiserror::Error)]
pub enum RunTreeFromTalError {
#[error("{0}")]
@ -288,7 +395,8 @@ pub fn run_tree_from_tal_url_serial(
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
) -> Result<RunTreeFromTalOutput, RunTreeFromTalError> {
let discovery = discover_root_ca_instance_from_tal_url(http_fetcher, tal_url)?;
let discovery =
discover_root_ca_instance_from_tal_url_with_policy(policy, http_fetcher, tal_url)?;
let runner = make_live_runner(
store,
@ -325,7 +433,8 @@ pub fn run_tree_from_tal_url_serial_audit(
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let discovery = discover_root_ca_instance_from_tal_url(http_fetcher, tal_url)?;
let discovery =
discover_root_ca_instance_from_tal_url_with_policy(policy, http_fetcher, tal_url)?;
let download_log = DownloadLogHandle::new();
let runner = make_live_runner(
@ -359,6 +468,7 @@ pub fn run_tree_from_tal_url_serial_audit(
Ok(RunTreeFromTalAuditOutput {
discovery: discovery.clone(),
discoveries: vec![discovery],
successful_tal_inputs: Vec::new(),
tree,
publication_points,
downloads,
@ -379,7 +489,8 @@ pub fn run_tree_from_tal_url_serial_audit_with_timing(
timing: &TimingHandle,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let _tal = timing.span_phase("tal_bootstrap");
let discovery = discover_root_ca_instance_from_tal_url(http_fetcher, tal_url)?;
let discovery =
discover_root_ca_instance_from_tal_url_with_policy(policy, http_fetcher, tal_url)?;
drop(_tal);
let download_log = DownloadLogHandle::new();
@ -415,6 +526,7 @@ pub fn run_tree_from_tal_url_serial_audit_with_timing(
Ok(RunTreeFromTalAuditOutput {
discovery: discovery.clone(),
discoveries: vec![discovery],
successful_tal_inputs: Vec::new(),
tree,
publication_points,
downloads,
@ -489,6 +601,7 @@ where
Ok(RunTreeFromTalAuditOutput {
discovery: discovery.clone(),
discoveries: vec![discovery],
successful_tal_inputs: Vec::new(),
tree,
publication_points,
downloads,
@ -523,7 +636,12 @@ where
"multi-TAL run requires at least one TAL input".to_string(),
));
}
let roots = discover_multiple_roots_from_tal_inputs(&tal_inputs, http_fetcher, rsync_fetcher)?;
let roots = discover_multiple_roots_from_tal_inputs(
&tal_inputs,
http_fetcher,
rsync_fetcher,
policy.strict.name,
)?;
let primary = roots.first().cloned().ok_or_else(|| {
RunTreeFromTalError::Replay("multi-TAL root discovery returned no roots".to_string())
})?;
@ -531,9 +649,13 @@ where
.iter()
.map(|item| item.discovery.clone())
.collect::<Vec<_>>();
let successful_tal_inputs = roots
.iter()
.map(|item| item.tal_input.clone())
.collect::<Vec<_>>();
let root_handles = roots
.into_iter()
.map(|item| item.root_handle)
.iter()
.map(|item| item.root_handle.clone())
.collect::<Vec<_>>();
let download_log = DownloadLogHandle::new();
@ -545,7 +667,7 @@ where
parallel_config,
None,
Some(download_log.clone()),
tal_inputs,
successful_tal_inputs.clone(),
)?;
let current_repo_index_for_output = current_repo_index.clone();
let runner = make_live_runner(
@ -583,6 +705,7 @@ where
Ok(RunTreeFromTalAuditOutput {
discovery: primary.discovery.clone(),
discoveries,
successful_tal_inputs,
tree,
publication_points,
downloads,
@ -610,7 +733,8 @@ where
H: Fetcher + Clone + 'static,
R: crate::fetch::rsync::RsyncFetcher + Clone + 'static,
{
let discovery = discover_root_ca_instance_from_tal_url(http_fetcher, tal_url)?;
let discovery =
discover_root_ca_instance_from_tal_url_with_policy(policy, http_fetcher, tal_url)?;
run_single_root_parallel_audit_inner(
store,
policy,
@ -643,8 +767,12 @@ where
H: Fetcher + Clone + 'static,
R: crate::fetch::rsync::RsyncFetcher + Clone + 'static,
{
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let discovery = discover_root_ca_instance_from_tal_and_ta_der_with_policy(
policy,
tal_bytes,
ta_der,
resolved_ta_uri,
)?;
let derived_tal_id = derive_tal_id(&discovery);
let tal_inputs = vec![TalInputSpec {
tal_id: derived_tal_id.clone(),
@ -718,7 +846,8 @@ where
H: Fetcher + Clone + 'static,
R: crate::fetch::rsync::RsyncFetcher + Clone + 'static,
{
let discovery = discover_root_ca_instance_from_tal_url(http_fetcher, tal_url)?;
let discovery =
discover_root_ca_instance_from_tal_url_with_policy(policy, http_fetcher, tal_url)?;
run_single_root_parallel_audit_inner(
store,
policy,
@ -752,8 +881,12 @@ where
H: Fetcher + Clone + 'static,
R: crate::fetch::rsync::RsyncFetcher + Clone + 'static,
{
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let discovery = discover_root_ca_instance_from_tal_and_ta_der_with_policy(
policy,
tal_bytes,
ta_der,
resolved_ta_uri,
)?;
let derived_tal_id = derive_tal_id(&discovery);
let tal_inputs = vec![TalInputSpec {
tal_id: derived_tal_id.clone(),
@ -823,8 +956,12 @@ pub fn run_tree_from_tal_and_ta_der_serial(
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
) -> Result<RunTreeFromTalOutput, RunTreeFromTalError> {
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let discovery = discover_root_ca_instance_from_tal_and_ta_der_with_policy(
policy,
tal_bytes,
ta_der,
resolved_ta_uri,
)?;
let runner = Rpkiv1PublicationPointRunner {
store,
@ -916,6 +1053,7 @@ pub fn run_tree_from_tal_bytes_serial_audit(
Ok(RunTreeFromTalAuditOutput {
discovery: discovery.clone(),
discoveries: vec![discovery],
successful_tal_inputs: Vec::new(),
tree,
publication_points,
downloads,
@ -987,6 +1125,7 @@ pub fn run_tree_from_tal_bytes_serial_audit_with_timing(
Ok(RunTreeFromTalAuditOutput {
discovery: discovery.clone(),
discoveries: vec![discovery],
successful_tal_inputs: Vec::new(),
tree,
publication_points,
downloads,
@ -1007,8 +1146,12 @@ pub fn run_tree_from_tal_and_ta_der_serial_audit(
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let discovery = discover_root_ca_instance_from_tal_and_ta_der_with_policy(
policy,
tal_bytes,
ta_der,
resolved_ta_uri,
)?;
let download_log = DownloadLogHandle::new();
let runner = Rpkiv1PublicationPointRunner {
@ -1049,6 +1192,7 @@ pub fn run_tree_from_tal_and_ta_der_serial_audit(
Ok(RunTreeFromTalAuditOutput {
discovery: discovery.clone(),
discoveries: vec![discovery],
successful_tal_inputs: Vec::new(),
tree,
publication_points,
downloads,
@ -1071,8 +1215,12 @@ pub fn run_tree_from_tal_and_ta_der_serial_audit_with_timing(
timing: &TimingHandle,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let _tal = timing.span_phase("tal_bootstrap");
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let discovery = discover_root_ca_instance_from_tal_and_ta_der_with_policy(
policy,
tal_bytes,
ta_der,
resolved_ta_uri,
)?;
drop(_tal);
let download_log = DownloadLogHandle::new();
@ -1115,6 +1263,7 @@ pub fn run_tree_from_tal_and_ta_der_serial_audit_with_timing(
Ok(RunTreeFromTalAuditOutput {
discovery: discovery.clone(),
discoveries: vec![discovery],
successful_tal_inputs: Vec::new(),
tree,
publication_points,
downloads,
@ -1135,8 +1284,12 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial(
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
) -> Result<RunTreeFromTalOutput, RunTreeFromTalError> {
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let discovery = discover_root_ca_instance_from_tal_and_ta_der_with_policy(
policy,
tal_bytes,
ta_der,
resolved_ta_uri,
)?;
let replay_index = Arc::new(
ReplayArchiveIndex::load_allow_missing_rsync_modules(
payload_archive_root,
@ -1192,8 +1345,12 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let discovery = discover_root_ca_instance_from_tal_and_ta_der_with_policy(
policy,
tal_bytes,
ta_der,
resolved_ta_uri,
)?;
let replay_index = Arc::new(
ReplayArchiveIndex::load_allow_missing_rsync_modules(
payload_archive_root,
@ -1244,6 +1401,7 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
Ok(RunTreeFromTalAuditOutput {
discovery: discovery.clone(),
discoveries: vec![discovery],
successful_tal_inputs: Vec::new(),
tree,
publication_points,
downloads,
@ -1266,8 +1424,12 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit_with_timing(
timing: &TimingHandle,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let _tal = timing.span_phase("tal_bootstrap");
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let discovery = discover_root_ca_instance_from_tal_and_ta_der_with_policy(
policy,
tal_bytes,
ta_der,
resolved_ta_uri,
)?;
drop(_tal);
let replay_index = Arc::new(
ReplayArchiveIndex::load_allow_missing_rsync_modules(
@ -1320,6 +1482,7 @@ pub fn run_tree_from_tal_and_ta_der_payload_replay_serial_audit_with_timing(
Ok(RunTreeFromTalAuditOutput {
discovery: discovery.clone(),
discoveries: vec![discovery],
successful_tal_inputs: Vec::new(),
tree,
publication_points,
downloads,
@ -1536,6 +1699,7 @@ fn run_payload_delta_replay_audit_inner(
Ok(RunTreeFromTalAuditOutput {
discovery: discovery.clone(),
discoveries: vec![discovery],
successful_tal_inputs: Vec::new(),
tree,
publication_points,
downloads,
@ -1559,8 +1723,12 @@ pub fn run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit(
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let discovery = discover_root_ca_instance_from_tal_and_ta_der_with_policy(
policy,
tal_bytes,
ta_der,
resolved_ta_uri,
)?;
run_payload_delta_replay_audit_inner(
store,
policy,
@ -1592,8 +1760,12 @@ pub fn run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit_with_timin
timing: &TimingHandle,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let _tal = timing.span_phase("tal_bootstrap");
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let discovery = discover_root_ca_instance_from_tal_and_ta_der_with_policy(
policy,
tal_bytes,
ta_der,
resolved_ta_uri,
)?;
drop(_tal);
run_payload_delta_replay_audit_inner(
store,
@ -1681,6 +1853,7 @@ fn run_payload_delta_replay_step_audit_inner(
Ok(RunTreeFromTalAuditOutput {
discovery: discovery.clone(),
discoveries: vec![discovery],
successful_tal_inputs: Vec::new(),
tree,
publication_points,
downloads,
@ -1702,8 +1875,12 @@ pub fn run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit(
validation_time: time::OffsetDateTime,
config: &TreeRunConfig,
) -> Result<RunTreeFromTalAuditOutput, RunTreeFromTalError> {
let discovery =
discover_root_ca_instance_from_tal_and_ta_der(tal_bytes, ta_der, resolved_ta_uri)?;
let discovery = discover_root_ca_instance_from_tal_and_ta_der_with_policy(
policy,
tal_bytes,
ta_der,
resolved_ta_uri,
)?;
run_payload_delta_replay_step_audit_inner(
store,
policy,
@ -1790,6 +1967,7 @@ mod multi_tal_tests {
&tal_inputs,
&RejectingHttpFetcher,
&RejectingRsyncFetcher,
false,
)
.expect("discover roots");
@ -1803,6 +1981,54 @@ mod multi_tal_tests {
roots[1].root_handle.manifest_rsync_uri
);
}
#[test]
fn discover_multiple_roots_isolates_strict_name_failure() {
let apnic_tal =
std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal").expect("read apnic tal");
let apnic_ta = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta");
let arin_tal = std::fs::read("tests/fixtures/tal/arin.tal").expect("read arin tal");
let arin_ta = std::fs::read("tests/fixtures/ta/arin-ta.cer").expect("read arin ta");
let tal_inputs = vec![
TalInputSpec::from_ta_der("https://example.test/apnic.tal", apnic_tal, apnic_ta),
TalInputSpec::from_ta_der("https://example.test/arin.tal", arin_tal, arin_ta),
];
let roots = discover_multiple_roots_from_tal_inputs(
&tal_inputs,
&RejectingHttpFetcher,
&RejectingRsyncFetcher,
true,
)
.expect("strict discovery should keep usable roots");
assert_eq!(roots.len(), 1);
assert_eq!(roots[0].tal_input.tal_id, "arin");
assert_eq!(roots[0].root_handle.tal_id, "arin");
}
#[test]
fn discover_single_root_keeps_strict_name_failure_fatal() {
let apnic_tal =
std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal").expect("read apnic tal");
let apnic_ta = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta");
let tal_inputs = vec![TalInputSpec::from_ta_der(
"https://example.test/apnic.tal",
apnic_tal,
apnic_ta,
)];
let error = discover_multiple_roots_from_tal_inputs(
&tal_inputs,
&RejectingHttpFetcher,
&RejectingRsyncFetcher,
true,
)
.expect_err("single-TAL strict failure should remain fatal");
assert!(error.to_string().contains("Name strict validation failed"));
}
}
#[cfg(test)]

View File

@ -858,6 +858,7 @@ fn stage_ready_publication_point(
match prepare_publication_point_for_parallel_roa(
ready.node.id,
&fresh_stage.fresh_point,
runner.policy,
&ready.node.handle.ca_certificate_der,
ready.node.handle.ca_certificate_rsync_uri.as_deref(),
ready.node.handle.effective_ip_resources.as_ref(),