将fetch pp cache改成使用vcir结构,跑通apnic全量同步

This commit is contained in:
yuyr 2026-03-13 14:45:41 +08:00
parent e3339533b8
commit cf764c35bb
53 changed files with 8931 additions and 2201 deletions

View File

@ -24,7 +24,7 @@ They are meant for **hands-on validation / acceptance runs**, not for CI.
- run meta JSON (includes durations + download_stats) - run meta JSON (includes durations + download_stats)
- short summary Markdown (includes durations + download_stats) - short summary Markdown (includes durations + download_stats)
- RocksDB key statistics (`db_stats --exact`) - RocksDB key statistics (`db_stats --exact`)
- RRDP repo state dump (`rrdp_state_dump`) - RRDP legacy session/serial dump (`rrdp_state_dump --view legacy-state`)
### `delta_sync.sh` ### `delta_sync.sh`
@ -33,7 +33,7 @@ They are meant for **hands-on validation / acceptance runs**, not for CI.
- Produces the same artifacts as `full_sync.sh` - Produces the same artifacts as `full_sync.sh`
- Additionally generates a Markdown **delta analysis** report by comparing: - Additionally generates a Markdown **delta analysis** report by comparing:
- base vs delta report JSON - base vs delta report JSON
- base vs delta `rrdp_state_dump` TSV - base vs delta `rrdp_state_dump --view legacy-state` TSV
- and includes a **duration comparison** (base vs delta) if the base meta JSON is available - and includes a **duration comparison** (base vs delta) if the base meta JSON is available
- delta meta JSON includes download_stats copied from delta report JSON - delta meta JSON includes download_stats copied from delta report JSON
@ -47,6 +47,9 @@ The `rpki` binary writes an audit report JSON with:
These are useful for diagnosing why a run is slow (e.g. RRDP snapshot vs delta vs rsync fallback). These are useful for diagnosing why a run is slow (e.g. RRDP snapshot vs delta vs rsync fallback).
The standalone `rrdp_state_dump` tool also supports `source`, `members`, `owners`, and `all` views.
The manual sync scripts intentionally call `--view legacy-state` so delta analysis keeps using a stable session/serial TSV format.
## Meta fields (meta.json) ## Meta fields (meta.json)
The scripts generate `*_meta.json` next to `*_report.json` and include: The scripts generate `*_meta.json` next to `*_report.json` and include:

View File

@ -16,7 +16,7 @@ set -euo pipefail
# - *_delta_report_*.json audit report # - *_delta_report_*.json audit report
# - *_delta_run_*.log stdout/stderr log (includes summary) # - *_delta_run_*.log stdout/stderr log (includes summary)
# - *_delta_db_stats_*.txt db_stats --exact output # - *_delta_db_stats_*.txt db_stats --exact output
# - *_delta_rrdp_state_*.tsv rrdp_state_dump output # - *_delta_rrdp_state_*.tsv rrdp_state_dump --view legacy-state output
# - *_delta_analysis_*.md base vs delta comparison report # - *_delta_analysis_*.md base vs delta comparison report
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
@ -113,10 +113,10 @@ cargo run --release --bin db_stats -- --db "$DELTA_DB_DIR" --exact 2>&1 | tee "$
db_stats_end_s="$(date +%s)" db_stats_end_s="$(date +%s)"
db_stats_duration_s="$((db_stats_end_s - db_stats_start_s))" db_stats_duration_s="$((db_stats_end_s - db_stats_start_s))"
echo "== rrdp_state_dump ==" >&2 echo "== rrdp_state_dump (legacy-state) ==" >&2
state_start_s="$(date +%s)" state_start_s="$(date +%s)"
cargo run --release --bin rrdp_state_dump -- --db "$BASE_DB_DIR" >"$BASE_RRDP_STATE_TSV" cargo run --release --bin rrdp_state_dump -- --db "$BASE_DB_DIR" --view legacy-state >"$BASE_RRDP_STATE_TSV"
cargo run --release --bin rrdp_state_dump -- --db "$DELTA_DB_DIR" >"$DELTA_RRDP_STATE_TSV" cargo run --release --bin rrdp_state_dump -- --db "$DELTA_DB_DIR" --view legacy-state >"$DELTA_RRDP_STATE_TSV"
state_end_s="$(date +%s)" state_end_s="$(date +%s)"
state_duration_s="$((state_end_s - state_start_s))" state_duration_s="$((state_end_s - state_start_s))"
@ -174,12 +174,18 @@ def load_optional_json(path_s: str):
return load_json(p) return load_json(p)
def parse_rrdp_state_tsv(p: Path): def parse_rrdp_state_tsv(p: Path):
# format: "<notify_uri>\t<serial>\t<session_id>" # format from `rrdp_state_dump --view legacy-state`:
# [legacy-state]
# notify_uri serial session_id
# <notify_uri> <serial> <session_id>
out = {} out = {}
for line in p.read_text(encoding="utf-8").splitlines(): for line in p.read_text(encoding="utf-8").splitlines():
if not line.strip(): line = line.strip()
if not line or line.startswith("["):
continue continue
parts = line.split("\t") if line == "notify_uri serial session_id":
continue
parts = line.split(" ")
if len(parts) != 3: if len(parts) != 3:
raise SystemExit(f"invalid rrdp_state_dump line in {p}: {line!r}") raise SystemExit(f"invalid rrdp_state_dump line in {p}: {line!r}")
uri, serial, session = parts uri, serial, session = parts
@ -339,7 +345,7 @@ delta_repo_sync_failed = count_repo_sync_failed(delta)
def cache_reason_counts(rep: dict) -> Counter: def cache_reason_counts(rep: dict) -> Counter:
c = Counter() c = Counter()
for pp in rep.get("publication_points", []): for pp in rep.get("publication_points", []):
if pp.get("source") != "fetch_cache_pp": if pp.get("source") != "vcir_current_instance":
continue continue
# Use warning messages as "reason". If missing, emit a fallback bucket. # Use warning messages as "reason". If missing, emit a fallback bucket.
ws = pp.get("warnings", []) ws = pp.get("warnings", [])
@ -378,11 +384,33 @@ added_v = delta_v - base_v
removed_v = base_v - delta_v removed_v = base_v - delta_v
def fmt_db_stats(db: dict) -> str: def fmt_db_stats(db: dict) -> str:
keys = ["raw_objects","rrdp_object_index","fetch_cache_pp","rrdp_state","total"] ordered = [
"mode",
"repository_view",
"raw_by_hash",
"vcir",
"audit_rule_index",
"rrdp_source",
"rrdp_source_member",
"rrdp_uri_owner",
"rrdp_state",
"raw_objects",
"rrdp_object_index",
"group_current_repository_view",
"group_current_validation_state",
"group_current_rrdp_state",
"group_legacy_compatibility",
"total",
"sst_files",
]
out = [] out = []
for k in keys: seen = set()
for k in ordered:
if k in db: if k in db:
out.append(f"- `{k}={db[k]}`") out.append(f"- `{k}={db[k]}`")
seen.add(k)
for k in sorted(set(db) - seen):
out.append(f"- `{k}={db[k]}`")
return "\n".join(out) if out else "_(missing db_stats keys)_" return "\n".join(out) if out else "_(missing db_stats keys)_"
lines = [] lines = []
@ -459,7 +487,7 @@ lines.append(f"- `updated_pp={updated_pp}`\n")
lines.append(f"- `unchanged_pp={unchanged_pp}`\n\n") lines.append(f"- `unchanged_pp={unchanged_pp}`\n\n")
lines.append("> 注:`new_pp/missing_pp/updated_pp` 会混入“遍历范围变化”的影响(例如 validation_time 不同、或 base 中存在更多失败 PP。\n\n") lines.append("> 注:`new_pp/missing_pp/updated_pp` 会混入“遍历范围变化”的影响(例如 validation_time 不同、或 base 中存在更多失败 PP。\n\n")
lines.append("## fail fetch / cache 使用情况\n\n") lines.append("## fail fetch / VCIR 当前实例缓存复用情况\n\n")
lines.append(f"- repo sync failed启发式warning contains 'repo sync failed'/'rrdp fetch failed'/'rsync fetch failed'\n") lines.append(f"- repo sync failed启发式warning contains 'repo sync failed'/'rrdp fetch failed'/'rsync fetch failed'\n")
lines.append(f" - base`{base_repo_sync_failed}`\n") lines.append(f" - base`{base_repo_sync_failed}`\n")
lines.append(f" - delta`{delta_repo_sync_failed}`\n\n") lines.append(f" - delta`{delta_repo_sync_failed}`\n\n")
@ -470,7 +498,7 @@ lines.append(f" - delta`{dict(delta_sources)}`\n\n")
def render_cache_reasons(title: str, c: Counter) -> str: def render_cache_reasons(title: str, c: Counter) -> str:
if not c: if not c:
return f"{title}`0`(未使用 fetch_cache_pp\n\n" return f"{title}`0`(未使用 VCIR 当前实例缓存复用\n\n"
lines = [] lines = []
total = sum(c.values()) total = sum(c.values())
lines.append(f"{title}`{total}`\n\n") lines.append(f"{title}`{total}`\n\n")
@ -480,8 +508,8 @@ def render_cache_reasons(title: str, c: Counter) -> str:
lines.append("\n") lines.append("\n")
return "".join(lines) return "".join(lines)
lines.append(render_cache_reasons("- base `source=fetch_cache_pp`", base_cache_reasons)) lines.append(render_cache_reasons("- base `source=vcir_current_instance`", base_cache_reasons))
lines.append(render_cache_reasons("- delta `source=fetch_cache_pp`", delta_cache_reasons)) lines.append(render_cache_reasons("- delta `source=vcir_current_instance`", delta_cache_reasons))
lines.append("## 文件变更统计(按对象类型)\n\n") lines.append("## 文件变更统计(按对象类型)\n\n")
lines.append("按 `ObjectAuditEntry.sha256_hex` 对比(同一 rsync URI 前后 hash 变化记为 `~changed`\n\n") lines.append("按 `ObjectAuditEntry.sha256_hex` 对比(同一 rsync URI 前后 hash 变化记为 `~changed`\n\n")

View File

@ -8,7 +8,7 @@ set -euo pipefail
# - *_report_*.json audit report # - *_report_*.json audit report
# - *_run_*.log stdout/stderr log (includes summary) # - *_run_*.log stdout/stderr log (includes summary)
# - *_db_stats_*.txt db_stats --exact output # - *_db_stats_*.txt db_stats --exact output
# - *_rrdp_state_*.tsv rrdp_state_dump output # - *_rrdp_state_*.tsv rrdp_state_dump --view legacy-state output
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR" cd "$ROOT_DIR"
@ -71,9 +71,9 @@ cargo run --release --bin db_stats -- --db "$DB_DIR" --exact 2>&1 | tee "$DB_STA
db_stats_end_s="$(date +%s)" db_stats_end_s="$(date +%s)"
db_stats_duration_s="$((db_stats_end_s - db_stats_start_s))" db_stats_duration_s="$((db_stats_end_s - db_stats_start_s))"
echo "== rrdp_state_dump ==" >&2 echo "== rrdp_state_dump (legacy-state) ==" >&2
state_start_s="$(date +%s)" state_start_s="$(date +%s)"
cargo run --release --bin rrdp_state_dump -- --db "$DB_DIR" >"$RRDP_STATE_TSV" cargo run --release --bin rrdp_state_dump -- --db "$DB_DIR" --view legacy-state >"$RRDP_STATE_TSV"
state_end_s="$(date +%s)" state_end_s="$(date +%s)"
state_duration_s="$((state_end_s - state_start_s))" state_duration_s="$((state_end_s - state_start_s))"

View File

@ -39,14 +39,17 @@ impl DownloadLogHandle {
} }
.to_string(); .to_string();
let st = out.by_kind.entry(kind_key).or_insert_with(|| AuditDownloadKindStats { let st = out
ok_total: 0, .by_kind
fail_total: 0, .entry(kind_key)
duration_ms_total: 0, .or_insert_with(|| AuditDownloadKindStats {
bytes_total: None, ok_total: 0,
objects_count_total: None, fail_total: 0,
objects_bytes_total: None, duration_ms_total: 0,
}); bytes_total: None,
objects_count_total: None,
objects_bytes_total: None,
});
if e.success { if e.success {
st.ok_total = st.ok_total.saturating_add(1); st.ok_total = st.ok_total.saturating_add(1);
} else { } else {
@ -165,4 +168,3 @@ fn duration_to_ms(d: Duration) -> u64 {
let ms = d.as_millis(); let ms = d.as_millis();
ms.min(u128::from(u64::MAX)) as u64 ms.min(u128::from(u64::MAX)) as u64
} }

581
src/audit_trace.rs Normal file
View File

@ -0,0 +1,581 @@
use crate::storage::{
AuditRuleIndexEntry, AuditRuleKind, RawByHashEntry, RocksStore, ValidatedCaInstanceResult,
VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirLocalOutput,
VcirOutputType,
};
use serde::Serialize;
use std::collections::HashSet;
#[derive(Debug, thiserror::Error)]
pub enum AuditTraceError {
#[error("storage error: {0}")]
Storage(#[from] crate::storage::StorageError),
#[error("audit rule index points to missing VCIR: {manifest_rsync_uri}")]
MissingVcir { manifest_rsync_uri: String },
#[error(
"audit rule index points to missing local output: rule_hash={rule_hash}, output_id={output_id}, manifest={manifest_rsync_uri}"
)]
MissingLocalOutput {
rule_hash: String,
output_id: String,
manifest_rsync_uri: String,
},
#[error("detected VCIR parent cycle at {manifest_rsync_uri}")]
ParentCycle { manifest_rsync_uri: String },
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditTraceRawRef {
pub sha256_hex: String,
pub raw_present: bool,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub origin_uris: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub object_type: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub byte_len: Option<usize>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditTraceArtifact {
pub artifact_role: VcirArtifactRole,
pub artifact_kind: VcirArtifactKind,
#[serde(skip_serializing_if = "Option::is_none")]
pub uri: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub object_type: Option<String>,
pub validation_status: VcirArtifactValidationStatus,
pub raw: AuditTraceRawRef,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditTraceChainNode {
pub manifest_rsync_uri: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub parent_manifest_rsync_uri: Option<String>,
pub tal_id: String,
pub ca_subject_name: String,
pub ca_ski: String,
pub issuer_ski: String,
pub current_manifest_rsync_uri: String,
pub current_crl_rsync_uri: String,
pub last_successful_validation_time_rfc3339_utc: String,
pub local_output_count: usize,
pub child_count: usize,
pub related_artifacts: Vec<AuditTraceArtifact>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditTraceResolvedOutput {
pub output_id: String,
pub output_type: VcirOutputType,
pub rule_hash: String,
pub source_object_uri: String,
pub source_object_type: String,
pub source_object_hash: String,
pub source_ee_cert_hash: String,
pub item_effective_until_rfc3339_utc: String,
pub payload_json: String,
pub validation_path_hint: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditRuleTrace {
pub rule: AuditRuleIndexEntry,
pub resolved_output: AuditTraceResolvedOutput,
pub source_object_raw: AuditTraceRawRef,
pub source_ee_cert_raw: AuditTraceRawRef,
pub chain_leaf_to_root: Vec<AuditTraceChainNode>,
}
pub fn trace_rule_to_root(
store: &RocksStore,
kind: AuditRuleKind,
rule_hash: &str,
) -> Result<Option<AuditRuleTrace>, AuditTraceError> {
let Some(rule) = store.get_audit_rule_index_entry(kind, rule_hash)? else {
return Ok(None);
};
let Some(leaf_vcir) = store.get_vcir(&rule.manifest_rsync_uri)? else {
return Err(AuditTraceError::MissingVcir {
manifest_rsync_uri: rule.manifest_rsync_uri.clone(),
});
};
let Some(local_output) = leaf_vcir
.local_outputs
.iter()
.find(|output| output.output_id == rule.output_id && output.rule_hash == rule.rule_hash)
.or_else(|| {
leaf_vcir
.local_outputs
.iter()
.find(|output| output.rule_hash == rule.rule_hash)
})
.cloned()
else {
return Err(AuditTraceError::MissingLocalOutput {
rule_hash: rule.rule_hash.clone(),
output_id: rule.output_id.clone(),
manifest_rsync_uri: rule.manifest_rsync_uri.clone(),
});
};
let chain = trace_vcir_chain_to_root(store, &leaf_vcir.manifest_rsync_uri)?
.expect("leaf VCIR already loaded must exist");
Ok(Some(AuditRuleTrace {
rule,
resolved_output: resolved_output_from_local(&local_output),
source_object_raw: resolve_raw_ref(store, &local_output.source_object_hash)?,
source_ee_cert_raw: resolve_raw_ref(store, &local_output.source_ee_cert_hash)?,
chain_leaf_to_root: chain,
}))
}
pub fn trace_vcir_chain_to_root(
store: &RocksStore,
manifest_rsync_uri: &str,
) -> Result<Option<Vec<AuditTraceChainNode>>, AuditTraceError> {
let Some(mut current) = store.get_vcir(manifest_rsync_uri)? else {
return Ok(None);
};
let mut seen = HashSet::new();
let mut chain = Vec::new();
loop {
if !seen.insert(current.manifest_rsync_uri.clone()) {
return Err(AuditTraceError::ParentCycle {
manifest_rsync_uri: current.manifest_rsync_uri,
});
}
let parent = current.parent_manifest_rsync_uri.clone();
chain.push(trace_chain_node(store, &current)?);
let Some(parent_manifest_rsync_uri) = parent else {
break;
};
let Some(parent_vcir) = store.get_vcir(&parent_manifest_rsync_uri)? else {
return Err(AuditTraceError::MissingVcir {
manifest_rsync_uri: parent_manifest_rsync_uri,
});
};
current = parent_vcir;
}
Ok(Some(chain))
}
fn trace_chain_node(
store: &RocksStore,
vcir: &ValidatedCaInstanceResult,
) -> Result<AuditTraceChainNode, AuditTraceError> {
let mut related_artifacts = Vec::with_capacity(vcir.related_artifacts.len());
for artifact in &vcir.related_artifacts {
related_artifacts.push(AuditTraceArtifact {
artifact_role: artifact.artifact_role,
artifact_kind: artifact.artifact_kind,
uri: artifact.uri.clone(),
object_type: artifact.object_type.clone(),
validation_status: artifact.validation_status,
raw: resolve_raw_ref(store, &artifact.sha256)?,
});
}
Ok(AuditTraceChainNode {
manifest_rsync_uri: vcir.manifest_rsync_uri.clone(),
parent_manifest_rsync_uri: vcir.parent_manifest_rsync_uri.clone(),
tal_id: vcir.tal_id.clone(),
ca_subject_name: vcir.ca_subject_name.clone(),
ca_ski: vcir.ca_ski.clone(),
issuer_ski: vcir.issuer_ski.clone(),
current_manifest_rsync_uri: vcir.current_manifest_rsync_uri.clone(),
current_crl_rsync_uri: vcir.current_crl_rsync_uri.clone(),
last_successful_validation_time_rfc3339_utc: vcir
.last_successful_validation_time
.rfc3339_utc
.clone(),
local_output_count: vcir.local_outputs.len(),
child_count: vcir.child_entries.len(),
related_artifacts,
})
}
fn resolved_output_from_local(local: &VcirLocalOutput) -> AuditTraceResolvedOutput {
AuditTraceResolvedOutput {
output_id: local.output_id.clone(),
output_type: local.output_type,
rule_hash: local.rule_hash.clone(),
source_object_uri: local.source_object_uri.clone(),
source_object_type: local.source_object_type.clone(),
source_object_hash: local.source_object_hash.clone(),
source_ee_cert_hash: local.source_ee_cert_hash.clone(),
item_effective_until_rfc3339_utc: local.item_effective_until.rfc3339_utc.clone(),
payload_json: local.payload_json.clone(),
validation_path_hint: local.validation_path_hint.clone(),
}
}
fn resolve_raw_ref(
store: &RocksStore,
sha256_hex: &str,
) -> Result<AuditTraceRawRef, AuditTraceError> {
let raw = store.get_raw_by_hash_entry(sha256_hex)?;
Ok(raw_ref_from_entry(sha256_hex, raw.as_ref()))
}
fn raw_ref_from_entry(sha256_hex: &str, entry: Option<&RawByHashEntry>) -> AuditTraceRawRef {
match entry {
Some(entry) => AuditTraceRawRef {
sha256_hex: sha256_hex.to_string(),
raw_present: true,
origin_uris: entry.origin_uris.clone(),
object_type: entry.object_type.clone(),
byte_len: Some(entry.bytes.len()),
},
None => AuditTraceRawRef {
sha256_hex: sha256_hex.to_string(),
raw_present: false,
origin_uris: Vec::new(),
object_type: None,
byte_len: None,
},
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::audit::sha256_hex;
use crate::storage::{
PackTime, ValidatedManifestMeta, VcirAuditSummary, VcirChildEntry, VcirInstanceGate,
VcirRelatedArtifact, VcirSummary,
};
fn sample_vcir(
manifest_rsync_uri: &str,
parent_manifest_rsync_uri: Option<&str>,
tal_id: &str,
local_output: Option<VcirLocalOutput>,
related_artifacts: Vec<VcirRelatedArtifact>,
) -> ValidatedCaInstanceResult {
let now = time::OffsetDateTime::now_utc();
let next = PackTime::from_utc_offset_datetime(now + time::Duration::hours(1));
let local_outputs: Vec<VcirLocalOutput> = local_output.into_iter().collect();
ValidatedCaInstanceResult {
manifest_rsync_uri: manifest_rsync_uri.to_string(),
parent_manifest_rsync_uri: parent_manifest_rsync_uri.map(str::to_string),
tal_id: tal_id.to_string(),
ca_subject_name: format!("CN={manifest_rsync_uri}"),
ca_ski: "11".repeat(20),
issuer_ski: "22".repeat(20),
last_successful_validation_time: PackTime::from_utc_offset_datetime(now),
current_manifest_rsync_uri: manifest_rsync_uri.to_string(),
current_crl_rsync_uri: manifest_rsync_uri.replace(".mft", ".crl"),
validated_manifest_meta: ValidatedManifestMeta {
validated_manifest_number: vec![1],
validated_manifest_this_update: PackTime::from_utc_offset_datetime(now),
validated_manifest_next_update: next.clone(),
},
instance_gate: VcirInstanceGate {
manifest_next_update: next.clone(),
current_crl_next_update: next.clone(),
self_ca_not_after: PackTime::from_utc_offset_datetime(
now + time::Duration::hours(2),
),
instance_effective_until: next,
},
child_entries: vec![VcirChildEntry {
child_manifest_rsync_uri: "rsync://example.test/child/child.mft".to_string(),
child_cert_rsync_uri: "rsync://example.test/parent/child.cer".to_string(),
child_cert_hash: sha256_hex(b"child-cert"),
child_ski: "33".repeat(20),
child_rsync_base_uri: "rsync://example.test/child/".to_string(),
child_publication_point_rsync_uri: "rsync://example.test/child/".to_string(),
child_rrdp_notification_uri: Some(
"https://example.test/child/notify.xml".to_string(),
),
child_effective_ip_resources: None,
child_effective_as_resources: None,
accepted_at_validation_time: PackTime::from_utc_offset_datetime(now),
}],
summary: VcirSummary {
local_vrp_count: local_outputs
.iter()
.filter(|output| output.output_type == VcirOutputType::Vrp)
.count() as u32,
local_aspa_count: local_outputs
.iter()
.filter(|output| output.output_type == VcirOutputType::Aspa)
.count() as u32,
child_count: 1,
accepted_object_count: related_artifacts.len() as u32,
rejected_object_count: 0,
},
local_outputs,
related_artifacts,
audit_summary: VcirAuditSummary {
failed_fetch_eligible: true,
last_failed_fetch_reason: None,
warning_count: 0,
audit_flags: Vec::new(),
},
}
}
fn sample_local_output(manifest_rsync_uri: &str) -> VcirLocalOutput {
let now = time::OffsetDateTime::now_utc();
VcirLocalOutput {
output_id: sha256_hex(b"vrp-output"),
output_type: VcirOutputType::Vrp,
item_effective_until: PackTime::from_utc_offset_datetime(
now + time::Duration::minutes(30),
),
source_object_uri: "rsync://example.test/leaf/a.roa".to_string(),
source_object_type: "roa".to_string(),
source_object_hash: sha256_hex(b"roa-raw"),
source_ee_cert_hash: sha256_hex(b"roa-ee"),
payload_json:
serde_json::json!({"asn": 64496, "prefix": "203.0.113.0/24", "max_length": 24})
.to_string(),
rule_hash: sha256_hex(b"roa-rule"),
validation_path_hint: vec![
manifest_rsync_uri.to_string(),
"rsync://example.test/leaf/a.roa".to_string(),
sha256_hex(b"roa-raw"),
],
}
}
fn sample_artifacts(manifest_rsync_uri: &str, roa_hash: &str) -> Vec<VcirRelatedArtifact> {
vec![
VcirRelatedArtifact {
artifact_role: VcirArtifactRole::Manifest,
artifact_kind: VcirArtifactKind::Mft,
uri: Some(manifest_rsync_uri.to_string()),
sha256: sha256_hex(manifest_rsync_uri.as_bytes()),
object_type: Some("mft".to_string()),
validation_status: VcirArtifactValidationStatus::Accepted,
},
VcirRelatedArtifact {
artifact_role: VcirArtifactRole::CurrentCrl,
artifact_kind: VcirArtifactKind::Crl,
uri: Some(manifest_rsync_uri.replace(".mft", ".crl")),
sha256: sha256_hex(format!("{}-crl", manifest_rsync_uri).as_bytes()),
object_type: Some("crl".to_string()),
validation_status: VcirArtifactValidationStatus::Accepted,
},
VcirRelatedArtifact {
artifact_role: VcirArtifactRole::SignedObject,
artifact_kind: VcirArtifactKind::Roa,
uri: Some("rsync://example.test/leaf/a.roa".to_string()),
sha256: roa_hash.to_string(),
object_type: Some("roa".to_string()),
validation_status: VcirArtifactValidationStatus::Accepted,
},
]
}
fn put_raw(store: &RocksStore, bytes: &[u8], uri: &str, object_type: &str) {
let mut entry = RawByHashEntry::from_bytes(sha256_hex(bytes), bytes.to_vec());
entry.origin_uris.push(uri.to_string());
entry.object_type = Some(object_type.to_string());
entry.encoding = Some("der".to_string());
store
.put_raw_by_hash_entry(&entry)
.expect("put raw evidence");
}
#[test]
fn trace_rule_to_root_returns_leaf_to_root_chain_and_evidence_refs() {
let store_dir = tempfile::tempdir().expect("store dir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
let root_manifest = "rsync://example.test/root/root.mft";
let leaf_manifest = "rsync://example.test/leaf/leaf.mft";
let local = sample_local_output(leaf_manifest);
let leaf_vcir = sample_vcir(
leaf_manifest,
Some(root_manifest),
"test-tal",
Some(local.clone()),
sample_artifacts(leaf_manifest, &local.source_object_hash),
);
let root_vcir = sample_vcir(
root_manifest,
None,
"test-tal",
None,
sample_artifacts(root_manifest, &sha256_hex(b"root-object")),
);
store.put_vcir(&leaf_vcir).expect("put leaf vcir");
store.put_vcir(&root_vcir).expect("put root vcir");
let rule_entry = AuditRuleIndexEntry {
kind: AuditRuleKind::Roa,
rule_hash: local.rule_hash.clone(),
manifest_rsync_uri: leaf_manifest.to_string(),
source_object_uri: local.source_object_uri.clone(),
source_object_hash: local.source_object_hash.clone(),
output_id: local.output_id.clone(),
item_effective_until: local.item_effective_until.clone(),
};
store
.put_audit_rule_index_entry(&rule_entry)
.expect("put rule index");
put_raw(&store, leaf_manifest.as_bytes(), leaf_manifest, "mft");
put_raw(
&store,
format!("{}-crl", leaf_manifest).as_bytes(),
&leaf_manifest.replace(".mft", ".crl"),
"crl",
);
put_raw(&store, b"roa-raw", &local.source_object_uri, "roa");
put_raw(&store, b"roa-ee", "rsync://example.test/leaf/a.ee", "cer");
put_raw(&store, root_manifest.as_bytes(), root_manifest, "mft");
put_raw(
&store,
format!("{}-crl", root_manifest).as_bytes(),
&root_manifest.replace(".mft", ".crl"),
"crl",
);
let trace = trace_rule_to_root(&store, AuditRuleKind::Roa, &local.rule_hash)
.expect("trace rule")
.expect("trace exists");
assert_eq!(trace.rule, rule_entry);
assert_eq!(trace.resolved_output.output_id, local.output_id);
assert_eq!(trace.chain_leaf_to_root.len(), 2);
assert_eq!(
trace.chain_leaf_to_root[0].manifest_rsync_uri,
leaf_manifest
);
assert_eq!(
trace.chain_leaf_to_root[1].manifest_rsync_uri,
root_manifest
);
assert_eq!(
trace.chain_leaf_to_root[0]
.parent_manifest_rsync_uri
.as_deref(),
Some(root_manifest)
);
assert!(trace.source_object_raw.raw_present);
assert!(trace.source_ee_cert_raw.raw_present);
assert!(
trace.chain_leaf_to_root[0]
.related_artifacts
.iter()
.any(|artifact| {
artifact.uri.as_deref() == Some(leaf_manifest) && artifact.raw.raw_present
})
);
}
#[test]
fn trace_rule_to_root_returns_none_for_missing_rule_index() {
let store_dir = tempfile::tempdir().expect("store dir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
assert!(
trace_rule_to_root(&store, AuditRuleKind::Roa, &sha256_hex(b"missing"))
.expect("missing trace ok")
.is_none()
);
}
#[test]
fn trace_rule_to_root_errors_when_index_points_to_missing_vcir() {
let store_dir = tempfile::tempdir().expect("store dir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
let rule_hash = sha256_hex(b"missing-vcir-rule");
store
.put_audit_rule_index_entry(&AuditRuleIndexEntry {
kind: AuditRuleKind::Roa,
rule_hash: rule_hash.clone(),
manifest_rsync_uri: "rsync://example.test/missing.mft".to_string(),
source_object_uri: "rsync://example.test/missing.roa".to_string(),
source_object_hash: sha256_hex(b"missing-source"),
output_id: sha256_hex(b"missing-output"),
item_effective_until: PackTime::from_utc_offset_datetime(
time::OffsetDateTime::now_utc() + time::Duration::minutes(1),
),
})
.expect("put rule index");
let err = trace_rule_to_root(&store, AuditRuleKind::Roa, &rule_hash).unwrap_err();
assert!(matches!(
err,
AuditTraceError::MissingVcir { manifest_rsync_uri }
if manifest_rsync_uri == "rsync://example.test/missing.mft"
));
}
#[test]
fn trace_rule_to_root_errors_when_vcir_local_output_is_missing() {
let store_dir = tempfile::tempdir().expect("store dir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
let manifest = "rsync://example.test/leaf/leaf.mft";
let vcir = sample_vcir(
manifest,
None,
"test-tal",
None,
sample_artifacts(manifest, &sha256_hex(b"leaf-object")),
);
store.put_vcir(&vcir).expect("put vcir");
let rule_hash = sha256_hex(b"missing-output-rule");
store
.put_audit_rule_index_entry(&AuditRuleIndexEntry {
kind: AuditRuleKind::Roa,
rule_hash: rule_hash.clone(),
manifest_rsync_uri: manifest.to_string(),
source_object_uri: "rsync://example.test/leaf/a.roa".to_string(),
source_object_hash: sha256_hex(b"leaf-object"),
output_id: sha256_hex(b"missing-output"),
item_effective_until: PackTime::from_utc_offset_datetime(
time::OffsetDateTime::now_utc() + time::Duration::minutes(1),
),
})
.expect("put rule index");
let err = trace_rule_to_root(&store, AuditRuleKind::Roa, &rule_hash).unwrap_err();
assert!(matches!(err, AuditTraceError::MissingLocalOutput { .. }));
}
#[test]
fn trace_vcir_chain_to_root_detects_parent_cycle() {
let store_dir = tempfile::tempdir().expect("store dir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
let a_manifest = "rsync://example.test/a.mft";
let b_manifest = "rsync://example.test/b.mft";
let a_vcir = sample_vcir(
a_manifest,
Some(b_manifest),
"test-tal",
None,
sample_artifacts(a_manifest, &sha256_hex(b"a-object")),
);
let b_vcir = sample_vcir(
b_manifest,
Some(a_manifest),
"test-tal",
None,
sample_artifacts(b_manifest, &sha256_hex(b"b-object")),
);
store.put_vcir(&a_vcir).expect("put a");
store.put_vcir(&b_vcir).expect("put b");
let err = trace_vcir_chain_to_root(&store, a_manifest).unwrap_err();
assert!(matches!(
err,
AuditTraceError::ParentCycle { manifest_rsync_uri }
if manifest_rsync_uri == a_manifest
));
}
}

View File

@ -1,21 +1,36 @@
use std::collections::BTreeMap;
use std::path::PathBuf; use std::path::PathBuf;
use rocksdb::{ColumnFamilyDescriptor, DB, IteratorMode, Options}; use rocksdb::{DB, IteratorMode, Options};
use rpki::storage::{
ALL_COLUMN_FAMILY_NAMES, CF_AUDIT_RULE_INDEX, CF_RAW_BY_HASH, CF_RAW_OBJECTS,
CF_REPOSITORY_VIEW, CF_RRDP_OBJECT_INDEX, CF_RRDP_SOURCE, CF_RRDP_SOURCE_MEMBER, CF_RRDP_STATE,
CF_RRDP_URI_OWNER, CF_VCIR, column_family_descriptors,
};
const CF_RAW_OBJECTS: &str = "raw_objects"; #[derive(Clone, Copy, Debug, PartialEq, Eq)]
const CF_FETCH_CACHE_PP: &str = "fetch_cache_pp"; enum DbStatsMode {
const CF_RRDP_STATE: &str = "rrdp_state"; Estimate,
const CF_RRDP_OBJECT_INDEX: &str = "rrdp_object_index"; Exact,
}
fn enable_blobdb_if_supported(opts: &mut Options) { #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
// Keep this in sync with `rpki::storage`: enum CfGroup {
// blob files are CF-level options; readers should open CFs with blob enabled too. CurrentRepositoryView,
#[allow(dead_code)] CurrentValidationState,
fn _set(opts: &mut Options) { CurrentRrdpState,
opts.set_enable_blob_files(true); LegacyCompatibility,
opts.set_min_blob_size(1024); }
impl CfGroup {
fn as_str(self) -> &'static str {
match self {
Self::CurrentRepositoryView => "current_repository_view",
Self::CurrentValidationState => "current_validation_state",
Self::CurrentRrdpState => "current_rrdp_state",
Self::LegacyCompatibility => "legacy_compatibility",
}
} }
_set(opts);
} }
fn usage() -> String { fn usage() -> String {
@ -29,21 +44,16 @@ Options:
--db <path> RocksDB directory --db <path> RocksDB directory
--exact Iterate to count keys (slower; default uses RocksDB estimates) --exact Iterate to count keys (slower; default uses RocksDB estimates)
--help Show this help --help Show this help
Output groups:
- current_repository_view: repository_view + raw_by_hash
- current_validation_state: vcir + audit_rule_index
- current_rrdp_state: rrdp_state + rrdp_source + rrdp_source_member + rrdp_uri_owner
- legacy_compatibility: raw_objects + rrdp_object_index
" "
) )
} }
fn cf_descriptors() -> Vec<ColumnFamilyDescriptor> {
let mut cf_opts = Options::default();
enable_blobdb_if_supported(&mut cf_opts);
vec![
ColumnFamilyDescriptor::new(CF_RAW_OBJECTS, cf_opts.clone()),
ColumnFamilyDescriptor::new(CF_FETCH_CACHE_PP, cf_opts.clone()),
ColumnFamilyDescriptor::new(CF_RRDP_STATE, cf_opts.clone()),
ColumnFamilyDescriptor::new(CF_RRDP_OBJECT_INDEX, cf_opts),
]
}
fn estimate_keys(db: &DB, cf_name: &str) -> Result<Option<u64>, Box<dyn std::error::Error>> { fn estimate_keys(db: &DB, cf_name: &str) -> Result<Option<u64>, Box<dyn std::error::Error>> {
let cf = db let cf = db
.cf_handle(cf_name) .cf_handle(cf_name)
@ -64,6 +74,35 @@ fn exact_keys(db: &DB, cf_name: &str) -> Result<u64, Box<dyn std::error::Error>>
Ok(count) Ok(count)
} }
fn cf_group(cf_name: &str) -> CfGroup {
match cf_name {
CF_REPOSITORY_VIEW | CF_RAW_BY_HASH => CfGroup::CurrentRepositoryView,
CF_VCIR | CF_AUDIT_RULE_INDEX => CfGroup::CurrentValidationState,
CF_RRDP_STATE | CF_RRDP_SOURCE | CF_RRDP_SOURCE_MEMBER | CF_RRDP_URI_OWNER => {
CfGroup::CurrentRrdpState
}
CF_RAW_OBJECTS | CF_RRDP_OBJECT_INDEX => CfGroup::LegacyCompatibility,
_ => CfGroup::LegacyCompatibility,
}
}
fn summarize_counts<'a>(
counts: impl IntoIterator<Item = (&'a str, u64)>,
) -> BTreeMap<CfGroup, u64> {
let mut grouped = BTreeMap::new();
for (cf_name, count) in counts {
*grouped.entry(cf_group(cf_name)).or_insert(0) += count;
}
grouped
}
fn mode_label(mode: DbStatsMode) -> &'static str {
match mode {
DbStatsMode::Estimate => "estimate",
DbStatsMode::Exact => "exact",
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> { fn main() -> Result<(), Box<dyn std::error::Error>> {
let argv: Vec<String> = std::env::args().collect(); let argv: Vec<String> = std::env::args().collect();
if argv.iter().any(|a| a == "--help" || a == "-h") { if argv.iter().any(|a| a == "--help" || a == "-h") {
@ -72,7 +111,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
} }
let mut db_path: Option<PathBuf> = None; let mut db_path: Option<PathBuf> = None;
let mut exact = false; let mut mode = DbStatsMode::Estimate;
let mut i = 1usize; let mut i = 1usize;
while i < argv.len() { while i < argv.len() {
match argv[i].as_str() { match argv[i].as_str() {
@ -81,7 +120,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
let v = argv.get(i).ok_or("--db requires a value")?; let v = argv.get(i).ok_or("--db requires a value")?;
db_path = Some(PathBuf::from(v)); db_path = Some(PathBuf::from(v));
} }
"--exact" => exact = true, "--exact" => mode = DbStatsMode::Exact,
other => return Err(format!("unknown argument: {other}\n\n{}", usage()).into()), other => return Err(format!("unknown argument: {other}\n\n{}", usage()).into()),
} }
i += 1; i += 1;
@ -93,33 +132,75 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
opts.create_if_missing(false); opts.create_if_missing(false);
opts.create_missing_column_families(false); opts.create_missing_column_families(false);
let db = DB::open_cf_descriptors(&opts, &db_path, cf_descriptors())?; let db = DB::open_cf_descriptors(&opts, &db_path, column_family_descriptors())?;
let cf_names = [
CF_RAW_OBJECTS,
CF_FETCH_CACHE_PP,
CF_RRDP_STATE,
CF_RRDP_OBJECT_INDEX,
];
println!("db={}", db_path.display()); println!("db={}", db_path.display());
println!("mode={}", if exact { "exact" } else { "estimate" }); println!("mode={}", mode_label(mode));
let mut per_cf = Vec::with_capacity(ALL_COLUMN_FAMILY_NAMES.len());
let mut total: u64 = 0; let mut total: u64 = 0;
for name in cf_names { for &name in ALL_COLUMN_FAMILY_NAMES {
let n = if exact { let n = match mode {
exact_keys(&db, name)? DbStatsMode::Exact => exact_keys(&db, name)?,
} else { DbStatsMode::Estimate => estimate_keys(&db, name)?.unwrap_or(0),
estimate_keys(&db, name)?.unwrap_or(0)
}; };
total = total.saturating_add(n); total = total.saturating_add(n);
per_cf.push((name, n));
println!("{name}={n}"); println!("{name}={n}");
} }
println!("total={total}"); println!("total={total}");
// Also print # of SST files (useful sanity signal). for (group, count) in summarize_counts(per_cf.iter().copied()) {
println!("group_{}={count}", group.as_str());
}
let live = db.live_files()?; let live = db.live_files()?;
println!("sst_files={}", live.len()); println!("sst_files={}", live.len());
Ok(()) Ok(())
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn cf_group_classifies_current_and_legacy_keyspaces() {
assert_eq!(cf_group(CF_REPOSITORY_VIEW), CfGroup::CurrentRepositoryView);
assert_eq!(cf_group(CF_RAW_BY_HASH), CfGroup::CurrentRepositoryView);
assert_eq!(cf_group(CF_VCIR), CfGroup::CurrentValidationState);
assert_eq!(
cf_group(CF_AUDIT_RULE_INDEX),
CfGroup::CurrentValidationState
);
assert_eq!(cf_group(CF_RRDP_SOURCE), CfGroup::CurrentRrdpState);
assert_eq!(cf_group(CF_RRDP_URI_OWNER), CfGroup::CurrentRrdpState);
assert_eq!(cf_group(CF_RAW_OBJECTS), CfGroup::LegacyCompatibility);
}
#[test]
fn summarize_counts_accumulates_by_group() {
let grouped = summarize_counts([
(CF_REPOSITORY_VIEW, 5),
(CF_RAW_BY_HASH, 7),
(CF_VCIR, 11),
(CF_AUDIT_RULE_INDEX, 13),
(CF_RRDP_STATE, 17),
(CF_RRDP_SOURCE_MEMBER, 19),
(CF_RRDP_OBJECT_INDEX, 29),
]);
assert_eq!(grouped.get(&CfGroup::CurrentRepositoryView), Some(&12));
assert_eq!(grouped.get(&CfGroup::CurrentValidationState), Some(&24));
assert_eq!(grouped.get(&CfGroup::CurrentRrdpState), Some(&36));
assert_eq!(grouped.get(&CfGroup::LegacyCompatibility), Some(&29));
}
#[test]
fn usage_mentions_grouped_output_and_exact_mode() {
let text = usage();
assert!(text.contains("--exact"), "{text}");
assert!(text.contains("current_validation_state"), "{text}");
assert!(text.contains("legacy_compatibility"), "{text}");
}
}

View File

@ -1,16 +1,40 @@
use std::path::PathBuf; use std::path::PathBuf;
use rocksdb::{ColumnFamilyDescriptor, DB, IteratorMode, Options}; use rocksdb::{DB, IteratorMode, Options};
use rpki::storage::{
CF_RRDP_SOURCE, CF_RRDP_SOURCE_MEMBER, CF_RRDP_STATE, CF_RRDP_URI_OWNER,
RrdpSourceMemberRecord, RrdpSourceRecord, RrdpUriOwnerRecord, column_family_descriptors,
};
use rpki::sync::rrdp::RrdpState;
fn enable_blobdb_if_supported(opts: &mut Options) { #[derive(Clone, Copy, Debug, PartialEq, Eq)]
// Keep this in sync with `rpki::storage`: enum DumpView {
// blob files are CF-level options; readers should open CFs with blob enabled too. LegacyState,
#[allow(dead_code)] Source,
fn _set(opts: &mut Options) { Members,
opts.set_enable_blob_files(true); Owners,
opts.set_min_blob_size(1024); All,
}
impl DumpView {
fn parse(value: &str) -> Result<Self, String> {
match value {
"legacy-state" => Ok(Self::LegacyState),
"source" => Ok(Self::Source),
"members" => Ok(Self::Members),
"owners" => Ok(Self::Owners),
"all" => Ok(Self::All),
other => Err(format!(
"invalid --view: {other} (expected one of: legacy-state, source, members, owners, all)"
)),
}
} }
_set(opts); }
#[derive(Debug)]
struct DumpArgs {
db_path: PathBuf,
view: DumpView,
} }
fn usage() -> String { fn usage() -> String {
@ -18,23 +42,23 @@ fn usage() -> String {
format!( format!(
"\ "\
Usage: Usage:
{bin} --db <path> {bin} --db <path> [--view <legacy-state|source|members|owners|all>]
Options: Options:
--db <path> RocksDB directory --db <path> RocksDB directory
--view <name> Dump one RRDP view; default is all
--help Show this help --help Show this help
" "
) )
} }
fn main() -> Result<(), Box<dyn std::error::Error>> { fn parse_args(argv: &[String]) -> Result<DumpArgs, String> {
let argv: Vec<String> = std::env::args().collect();
if argv.iter().any(|a| a == "--help" || a == "-h") { if argv.iter().any(|a| a == "--help" || a == "-h") {
print!("{}", usage()); return Err(usage());
return Ok(());
} }
let mut db_path: Option<PathBuf> = None; let mut db_path: Option<PathBuf> = None;
let mut view = DumpView::All;
let mut i = 1usize; let mut i = 1usize;
while i < argv.len() { while i < argv.len() {
match argv[i].as_str() { match argv[i].as_str() {
@ -43,43 +67,280 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
let v = argv.get(i).ok_or("--db requires a value")?; let v = argv.get(i).ok_or("--db requires a value")?;
db_path = Some(PathBuf::from(v)); db_path = Some(PathBuf::from(v));
} }
other => return Err(format!("unknown argument: {other}\n\n{}", usage()).into()), "--view" => {
i += 1;
let v = argv.get(i).ok_or("--view requires a value")?;
view = DumpView::parse(v)?;
}
other => return Err(format!("unknown argument: {other}\n\n{}", usage())),
} }
i += 1; i += 1;
} }
let db_path = db_path.ok_or_else(|| format!("--db is required\n\n{}", usage()))?; Ok(DumpArgs {
db_path: db_path.ok_or_else(|| format!("--db is required\n\n{}", usage()))?,
view,
})
}
fn open_db(path: &std::path::Path) -> Result<DB, Box<dyn std::error::Error>> {
let mut opts = Options::default(); let mut opts = Options::default();
opts.create_if_missing(false); opts.create_if_missing(false);
opts.create_missing_column_families(false); opts.create_missing_column_families(false);
Ok(DB::open_cf_descriptors(
&opts,
path,
column_family_descriptors(),
)?)
}
// Open only the column families we need. fn collect_legacy_state(db: &DB) -> Result<Vec<(String, RrdpState)>, Box<dyn std::error::Error>> {
let mut cf_opts = Options::default();
enable_blobdb_if_supported(&mut cf_opts);
let cfs = vec![
ColumnFamilyDescriptor::new("raw_objects", cf_opts.clone()),
ColumnFamilyDescriptor::new("fetch_cache_pp", cf_opts.clone()),
ColumnFamilyDescriptor::new("rrdp_state", cf_opts.clone()),
ColumnFamilyDescriptor::new("rrdp_object_index", cf_opts),
];
let db = DB::open_cf_descriptors(&opts, &db_path, cfs)?;
let cf = db let cf = db
.cf_handle("rrdp_state") .cf_handle(CF_RRDP_STATE)
.ok_or("missing column family: rrdp_state")?; .ok_or("missing column family: rrdp_state")?;
let mut out = Vec::new();
let mut out: Vec<(String, u64, String)> = Vec::new();
for res in db.iterator_cf(cf, IteratorMode::Start) { for res in db.iterator_cf(cf, IteratorMode::Start) {
let (k, v) = res?; let (k, v) = res?;
let k = String::from_utf8_lossy(&k).to_string(); let notify_uri = String::from_utf8_lossy(&k).to_string();
let st = rpki::sync::rrdp::RrdpState::decode(&v) let state = RrdpState::decode(&v)
.map_err(|e| format!("decode rrdp_state failed for {k}: {e}"))?; .map_err(|e| format!("decode rrdp_state failed for {notify_uri}: {e}"))?;
out.push((k, st.serial, st.session_id)); out.push((notify_uri, state));
} }
out.sort_by(|a, b| a.0.cmp(&b.0)); out.sort_by(|a, b| a.0.cmp(&b.0));
for (k, serial, session) in out { Ok(out)
println!("{k}\t{serial}\t{session}"); }
fn collect_source_records(db: &DB) -> Result<Vec<RrdpSourceRecord>, Box<dyn std::error::Error>> {
let cf = db
.cf_handle(CF_RRDP_SOURCE)
.ok_or("missing column family: rrdp_source")?;
let mut out = Vec::new();
for res in db.iterator_cf(cf, IteratorMode::Start) {
let (_k, v) = res?;
let record: RrdpSourceRecord = serde_cbor::from_slice(&v)?;
record.validate_internal()?;
out.push(record);
}
out.sort_by(|a, b| a.notify_uri.cmp(&b.notify_uri));
Ok(out)
}
fn collect_source_member_records(
db: &DB,
) -> Result<Vec<RrdpSourceMemberRecord>, Box<dyn std::error::Error>> {
let cf = db
.cf_handle(CF_RRDP_SOURCE_MEMBER)
.ok_or("missing column family: rrdp_source_member")?;
let mut out = Vec::new();
for res in db.iterator_cf(cf, IteratorMode::Start) {
let (_k, v) = res?;
let record: RrdpSourceMemberRecord = serde_cbor::from_slice(&v)?;
record.validate_internal()?;
out.push(record);
}
out.sort_by(|a, b| {
a.notify_uri
.cmp(&b.notify_uri)
.then(a.rsync_uri.cmp(&b.rsync_uri))
});
Ok(out)
}
fn collect_uri_owner_records(
db: &DB,
) -> Result<Vec<RrdpUriOwnerRecord>, Box<dyn std::error::Error>> {
let cf = db
.cf_handle(CF_RRDP_URI_OWNER)
.ok_or("missing column family: rrdp_uri_owner")?;
let mut out = Vec::new();
for res in db.iterator_cf(cf, IteratorMode::Start) {
let (_k, v) = res?;
let record: RrdpUriOwnerRecord = serde_cbor::from_slice(&v)?;
record.validate_internal()?;
out.push(record);
}
out.sort_by(|a, b| a.rsync_uri.cmp(&b.rsync_uri));
Ok(out)
}
fn print_legacy_state(entries: &[(String, RrdpState)]) {
println!("[legacy-state]");
println!("notify_uri\tserial\tsession_id");
for (notify_uri, state) in entries {
println!("{notify_uri}\t{}\t{}", state.serial, state.session_id);
}
}
fn print_source_records(entries: &[RrdpSourceRecord]) {
println!("[source]");
println!("notify_uri\tlast_serial\tlast_session_id\tsync_state\tlast_snapshot_uri\tlast_error");
for record in entries {
println!(
"{}\t{}\t{}\t{:?}\t{}\t{}",
record.notify_uri,
record
.last_serial
.map(|v| v.to_string())
.unwrap_or_else(|| "-".to_string()),
record.last_session_id.as_deref().unwrap_or("-"),
record.sync_state,
record.last_snapshot_uri.as_deref().unwrap_or("-"),
record.last_error.as_deref().unwrap_or("-"),
);
}
}
fn print_source_member_records(entries: &[RrdpSourceMemberRecord]) {
println!("[members]");
println!("notify_uri\trsync_uri\tpresent\thash\tsession_id\tserial");
for record in entries {
println!(
"{}\t{}\t{}\t{}\t{}\t{}",
record.notify_uri,
record.rsync_uri,
record.present,
record.current_hash.as_deref().unwrap_or("-"),
record.last_confirmed_session_id,
record.last_confirmed_serial,
);
}
}
fn print_uri_owner_records(entries: &[RrdpUriOwnerRecord]) {
println!("[owners]");
println!("rsync_uri\tnotify_uri\towner_state\thash\tsession_id\tserial");
for record in entries {
println!(
"{}\t{}\t{:?}\t{}\t{}\t{}",
record.rsync_uri,
record.notify_uri,
record.owner_state,
record.current_hash.as_deref().unwrap_or("-"),
record.last_confirmed_session_id,
record.last_confirmed_serial,
);
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let argv: Vec<String> = std::env::args().collect();
let args = parse_args(&argv).map_err(|e| -> Box<dyn std::error::Error> { e.into() })?;
let db = open_db(&args.db_path)?;
match args.view {
DumpView::LegacyState => print_legacy_state(&collect_legacy_state(&db)?),
DumpView::Source => print_source_records(&collect_source_records(&db)?),
DumpView::Members => print_source_member_records(&collect_source_member_records(&db)?),
DumpView::Owners => print_uri_owner_records(&collect_uri_owner_records(&db)?),
DumpView::All => {
print_legacy_state(&collect_legacy_state(&db)?);
print_source_records(&collect_source_records(&db)?);
print_source_member_records(&collect_source_member_records(&db)?);
print_uri_owner_records(&collect_uri_owner_records(&db)?);
}
} }
Ok(()) Ok(())
} }
#[cfg(test)]
mod tests {
use super::*;
use rpki::storage::{PackTime, RocksStore, RrdpSourceSyncState, RrdpUriOwnerState};
#[test]
fn parse_args_accepts_view_and_db() {
let args = parse_args(&[
"rrdp_state_dump".to_string(),
"--db".to_string(),
"db".to_string(),
"--view".to_string(),
"owners".to_string(),
])
.expect("parse args");
assert_eq!(args.db_path, PathBuf::from("db"));
assert_eq!(args.view, DumpView::Owners);
}
#[test]
fn parse_args_rejects_invalid_view() {
let err = parse_args(&[
"rrdp_state_dump".to_string(),
"--db".to_string(),
"db".to_string(),
"--view".to_string(),
"nope".to_string(),
])
.unwrap_err();
assert!(err.contains("invalid --view"), "{err}");
}
#[test]
fn collect_rrdp_views_reads_legacy_and_current_records() {
let dir = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(dir.path()).expect("open store");
let legacy_state = RrdpState {
session_id: "session-1".to_string(),
serial: 42,
};
store
.put_rrdp_state(
"https://example.test/notify.xml",
&legacy_state.encode().expect("encode legacy state"),
)
.expect("put legacy state");
store
.put_rrdp_source_record(&RrdpSourceRecord {
notify_uri: "https://example.test/notify.xml".to_string(),
last_session_id: Some("session-1".to_string()),
last_serial: Some(42),
first_seen_at: PackTime::from_utc_offset_datetime(time::OffsetDateTime::now_utc()),
last_seen_at: PackTime::from_utc_offset_datetime(time::OffsetDateTime::now_utc()),
last_sync_at: None,
sync_state: RrdpSourceSyncState::DeltaReady,
last_snapshot_uri: Some("https://example.test/snapshot.xml".to_string()),
last_snapshot_hash: None,
last_error: None,
})
.expect("put source record");
store
.put_rrdp_source_member_record(&RrdpSourceMemberRecord {
notify_uri: "https://example.test/notify.xml".to_string(),
rsync_uri: "rsync://example.test/repo/a.roa".to_string(),
current_hash: Some("11".repeat(32)),
object_type: Some("roa".to_string()),
present: true,
last_confirmed_session_id: "session-1".to_string(),
last_confirmed_serial: 42,
last_changed_at: PackTime::from_utc_offset_datetime(time::OffsetDateTime::now_utc()),
})
.expect("put member record");
store
.put_rrdp_uri_owner_record(&RrdpUriOwnerRecord {
rsync_uri: "rsync://example.test/repo/a.roa".to_string(),
notify_uri: "https://example.test/notify.xml".to_string(),
current_hash: Some("11".repeat(32)),
last_confirmed_session_id: "session-1".to_string(),
last_confirmed_serial: 42,
last_changed_at: PackTime::from_utc_offset_datetime(time::OffsetDateTime::now_utc()),
owner_state: RrdpUriOwnerState::Active,
})
.expect("put owner record");
drop(store);
let db = open_db(dir.path()).expect("open db");
let legacy = collect_legacy_state(&db).expect("legacy dump");
let sources = collect_source_records(&db).expect("source dump");
let members = collect_source_member_records(&db).expect("members dump");
let owners = collect_uri_owner_records(&db).expect("owners dump");
assert_eq!(legacy.len(), 1);
assert_eq!(legacy[0].0, "https://example.test/notify.xml");
assert_eq!(legacy[0].1.serial, 42);
assert_eq!(sources.len(), 1);
assert_eq!(sources[0].sync_state, RrdpSourceSyncState::DeltaReady);
assert_eq!(members.len(), 1);
assert_eq!(members[0].rsync_uri, "rsync://example.test/repo/a.roa");
assert_eq!(owners.len(), 1);
assert_eq!(owners[0].owner_state, RrdpUriOwnerState::Active);
}
}

View File

@ -37,7 +37,6 @@ pub struct CliArgs {
pub max_instances: Option<usize>, pub max_instances: Option<usize>,
pub validation_time: Option<time::OffsetDateTime>, pub validation_time: Option<time::OffsetDateTime>,
pub revalidate_only: bool,
pub analyze: bool, pub analyze: bool,
pub profile_cpu: bool, pub profile_cpu: bool,
} }
@ -66,7 +65,6 @@ Options:
--max-depth <n> Max CA instance depth (0 = root only) --max-depth <n> Max CA instance depth (0 = root only)
--max-instances <n> Max number of CA instances to process --max-instances <n> Max number of CA instances to process
--validation-time <rfc3339> Validation time in RFC3339 (default: now UTC) --validation-time <rfc3339> Validation time in RFC3339 (default: now UTC)
--revalidate-only Skip RRDP/rsync sync; re-validate from existing DB cache
--analyze Write timing analysis JSON under target/live/analyze/<timestamp>/ --analyze Write timing analysis JSON under target/live/analyze/<timestamp>/
--profile-cpu (Requires build feature 'profile') Write CPU flamegraph under analyze dir --profile-cpu (Requires build feature 'profile') Write CPU flamegraph under analyze dir
@ -91,7 +89,6 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
let mut max_depth: Option<usize> = None; let mut max_depth: Option<usize> = None;
let mut max_instances: Option<usize> = None; let mut max_instances: Option<usize> = None;
let mut validation_time: Option<time::OffsetDateTime> = None; let mut validation_time: Option<time::OffsetDateTime> = None;
let mut revalidate_only: bool = false;
let mut analyze: bool = false; let mut analyze: bool = false;
let mut profile_cpu: bool = false; let mut profile_cpu: bool = false;
@ -178,9 +175,6 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
.map_err(|e| format!("invalid --validation-time (RFC3339 expected): {e}"))?; .map_err(|e| format!("invalid --validation-time (RFC3339 expected): {e}"))?;
validation_time = Some(t); validation_time = Some(t);
} }
"--revalidate-only" => {
revalidate_only = true;
}
"--analyze" => { "--analyze" => {
analyze = true; analyze = true;
} }
@ -222,7 +216,6 @@ pub fn parse_args(argv: &[String]) -> Result<CliArgs, String> {
max_depth, max_depth,
max_instances, max_instances,
validation_time, validation_time,
revalidate_only,
analyze, analyze,
profile_cpu, profile_cpu,
}) })
@ -356,7 +349,6 @@ pub fn run(argv: &[String]) -> Result<(), String> {
let config = TreeRunConfig { let config = TreeRunConfig {
max_depth: args.max_depth, max_depth: args.max_depth,
max_instances: args.max_instances, max_instances: args.max_instances,
revalidate_only: args.revalidate_only,
}; };
use time::format_description::well_known::Rfc3339; use time::format_description::well_known::Rfc3339;
@ -804,6 +796,20 @@ mod tests {
assert!(args.validation_time.is_some()); assert!(args.validation_time.is_some());
} }
#[test]
fn parse_rejects_removed_revalidate_only_flag() {
let argv = vec![
"rpki".to_string(),
"--db".to_string(),
"db".to_string(),
"--tal-url".to_string(),
"https://example.test/x.tal".to_string(),
"--revalidate-only".to_string(),
];
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("unknown argument: --revalidate-only"), "{err}");
}
#[test] #[test]
fn read_policy_accepts_valid_toml() { fn read_policy_accepts_valid_toml() {
let dir = tempfile::tempdir().expect("tmpdir"); let dir = tempfile::tempdir().expect("tmpdir");

View File

@ -5,6 +5,8 @@ use x509_parser::asn1_rs::{Class as Asn1Class, Tag as Asn1Tag};
use x509_parser::extensions::ParsedExtension; use x509_parser::extensions::ParsedExtension;
use x509_parser::prelude::{FromDer, X509Certificate, X509Extension, X509Version}; use x509_parser::prelude::{FromDer, X509Certificate, X509Extension, X509Version};
use serde::{Deserialize, Serialize};
use crate::data_model::common::{ use crate::data_model::common::{
Asn1TimeUtc, InvalidTimeEncodingError, UtcTime, X509NameDer, asn1_time_to_model, Asn1TimeUtc, InvalidTimeEncodingError, UtcTime, X509NameDer, asn1_time_to_model,
}; };
@ -183,7 +185,7 @@ pub struct AccessDescription {
pub access_location: String, pub access_location: String,
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
pub enum Afi { pub enum Afi {
Ipv4, Ipv4,
Ipv6, Ipv6,
@ -205,7 +207,7 @@ impl Afi {
} }
} }
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct IpResourceSet { pub struct IpResourceSet {
pub families: Vec<IpAddressFamily>, pub families: Vec<IpAddressFamily>,
} }
@ -240,7 +242,7 @@ pub enum IpResourceSetDecodeError {
InvalidEncoding, InvalidEncoding,
} }
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct IpAddressFamily { pub struct IpAddressFamily {
pub afi: Afi, pub afi: Afi,
pub choice: IpAddressChoice, pub choice: IpAddressChoice,
@ -261,32 +263,32 @@ impl IpAddressFamily {
} }
} }
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum IpAddressChoice { pub enum IpAddressChoice {
Inherit, Inherit,
AddressesOrRanges(Vec<IpAddressOrRange>), AddressesOrRanges(Vec<IpAddressOrRange>),
} }
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum IpAddressOrRange { pub enum IpAddressOrRange {
Prefix(IpPrefix), Prefix(IpPrefix),
Range(IpAddressRange), Range(IpAddressRange),
} }
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct IpAddressRange { pub struct IpAddressRange {
pub min: Vec<u8>, pub min: Vec<u8>,
pub max: Vec<u8>, pub max: Vec<u8>,
} }
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct IpPrefix { pub struct IpPrefix {
pub afi: Afi, pub afi: Afi,
pub prefix_len: u16, pub prefix_len: u16,
pub addr: Vec<u8>, pub addr: Vec<u8>,
} }
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct AsResourceSet { pub struct AsResourceSet {
pub asnum: Option<AsIdentifierChoice>, pub asnum: Option<AsIdentifierChoice>,
pub rdi: Option<AsIdentifierChoice>, pub rdi: Option<AsIdentifierChoice>,
@ -330,7 +332,7 @@ pub enum AsResourceSetDecodeError {
InvalidEncoding, InvalidEncoding,
} }
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum AsIdentifierChoice { pub enum AsIdentifierChoice {
Inherit, Inherit,
AsIdsOrRanges(Vec<AsIdOrRange>), AsIdsOrRanges(Vec<AsIdOrRange>),
@ -347,7 +349,7 @@ impl AsIdentifierChoice {
} }
} }
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum AsIdOrRange { pub enum AsIdOrRange {
Id(u32), Id(u32),
Range { min: u32, max: u32 }, Range { min: u32, max: u32 },

View File

@ -52,12 +52,8 @@ impl SystemRsyncFetcher {
return Ok(None); return Ok(None);
}; };
std::fs::create_dir_all(root).map_err(|e| { std::fs::create_dir_all(root)
format!( .map_err(|e| format!("create rsync mirror root failed: {}: {e}", root.display()))?;
"create rsync mirror root failed: {}: {e}",
root.display()
)
})?;
let hash = hex::encode(sha2::Sha256::digest(normalized_rsync_base_uri.as_bytes())); let hash = hex::encode(sha2::Sha256::digest(normalized_rsync_base_uri.as_bytes()));
let dir = root.join(hash); let dir = root.join(hash);
@ -107,7 +103,8 @@ impl RsyncFetcher for SystemRsyncFetcher {
.mirror_dst_dir(&base) .mirror_dst_dir(&base)
.map_err(|e| RsyncFetchError::Fetch(e.to_string()))? .map_err(|e| RsyncFetchError::Fetch(e.to_string()))?
{ {
self.run_rsync(&base, &dst).map_err(RsyncFetchError::Fetch)?; self.run_rsync(&base, &dst)
.map_err(RsyncFetchError::Fetch)?;
let mut out = Vec::new(); let mut out = Vec::new();
walk_dir_collect(&dst, &dst, &base, &mut out).map_err(RsyncFetchError::Fetch)?; walk_dir_collect(&dst, &dst, &base, &mut out).map_err(RsyncFetchError::Fetch)?;
return Ok(out); return Ok(out);
@ -249,4 +246,74 @@ mod tests {
assert!(e.contains("rsync failed:"), "{e}"); assert!(e.contains("rsync failed:"), "{e}");
assert!(e.contains("status="), "{e}"); assert!(e.contains("status="), "{e}");
} }
#[test]
fn mirror_dst_dir_reports_root_creation_error() {
let temp = tempfile::tempdir().expect("tempdir");
let root_file = temp.path().join("mirror-root-file");
std::fs::write(&root_file, b"not a directory").expect("write root file");
let fetcher = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: PathBuf::from("rsync"),
timeout: Duration::from_secs(1),
extra_args: Vec::new(),
mirror_root: Some(root_file.clone()),
});
let err = fetcher
.mirror_dst_dir("rsync://example.net/repo/")
.expect_err("file mirror root must fail");
assert!(err.contains("create rsync mirror root failed"), "{err}");
assert!(err.contains(&root_file.display().to_string()), "{err}");
}
#[cfg(unix)]
#[test]
fn mirror_dst_dir_reports_directory_creation_error_inside_root() {
use std::os::unix::fs::PermissionsExt;
let temp = tempfile::tempdir().expect("tempdir");
let root = temp.path().join("mirror");
std::fs::create_dir_all(&root).expect("mkdir root");
let mut perms = std::fs::metadata(&root).expect("metadata").permissions();
perms.set_mode(0o555);
std::fs::set_permissions(&root, perms).expect("chmod root readonly");
let fetcher = SystemRsyncFetcher::new(SystemRsyncConfig {
rsync_bin: PathBuf::from("rsync"),
timeout: Duration::from_secs(1),
extra_args: Vec::new(),
mirror_root: Some(root.clone()),
});
let err = fetcher
.mirror_dst_dir("rsync://example.net/repo/")
.expect_err("readonly mirror root must fail");
assert!(
err.contains("create rsync mirror directory failed"),
"{err}"
);
let mut perms = std::fs::metadata(&root).expect("metadata").permissions();
perms.set_mode(0o755);
std::fs::set_permissions(&root, perms).expect("restore perms");
}
#[cfg(unix)]
#[test]
fn walk_dir_collect_ignores_non_file_entries() {
use std::os::unix::net::UnixListener;
let temp = tempfile::tempdir().expect("tempdir");
let root = temp.path();
std::fs::write(root.join("a.cer"), b"x").expect("write file");
let socket_path = root.join("skip.sock");
let _listener = UnixListener::bind(&socket_path).expect("bind socket");
let mut out: Vec<(String, Vec<u8>)> = Vec::new();
walk_dir_collect(root, root, "rsync://example.net/repo/", &mut out).expect("walk");
assert_eq!(out.len(), 1);
assert_eq!(out[0].0, "rsync://example.net/repo/a.cer");
}
} }

View File

@ -7,6 +7,8 @@ pub mod audit;
#[cfg(feature = "full")] #[cfg(feature = "full")]
pub mod audit_downloads; pub mod audit_downloads;
#[cfg(feature = "full")] #[cfg(feature = "full")]
pub mod audit_trace;
#[cfg(feature = "full")]
pub mod cli; pub mod cli;
#[cfg(feature = "full")] #[cfg(feature = "full")]
pub mod fetch; pub mod fetch;

View File

@ -16,13 +16,13 @@ impl Default for SyncPreference {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")] #[serde(rename_all = "snake_case")]
pub enum CaFailedFetchPolicy { pub enum CaFailedFetchPolicy {
UseFetchCachePp, ReuseCurrentInstanceVcir,
StopAllOutput, StopAllOutput,
} }
impl Default for CaFailedFetchPolicy { impl Default for CaFailedFetchPolicy {
fn default() -> Self { fn default() -> Self {
Self::UseFetchCachePp Self::ReuseCurrentInstanceVcir
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,2 +1,3 @@
pub mod repo; pub mod repo;
pub mod rrdp; pub mod rrdp;
pub(crate) mod store_projection;

View File

@ -1,12 +1,16 @@
use crate::analysis::timing::TimingHandle; use crate::analysis::timing::TimingHandle;
use crate::audit::AuditDownloadKind;
use crate::audit_downloads::DownloadLogHandle; use crate::audit_downloads::DownloadLogHandle;
use crate::audit::{AuditDownloadKind};
use crate::fetch::rsync::{RsyncFetchError, RsyncFetcher}; use crate::fetch::rsync::{RsyncFetchError, RsyncFetcher};
use crate::policy::{Policy, SyncPreference}; use crate::policy::{Policy, SyncPreference};
use crate::report::{RfcRef, Warning}; use crate::report::{RfcRef, Warning};
use crate::storage::RocksStore; use crate::storage::RocksStore;
use crate::sync::rrdp::sync_from_notification_with_timing_and_download_log; use crate::sync::rrdp::sync_from_notification_with_timing_and_download_log;
use crate::sync::rrdp::{Fetcher as HttpFetcher, RrdpSyncError}; use crate::sync::rrdp::{Fetcher as HttpFetcher, RrdpSyncError};
use crate::sync::store_projection::{
put_repository_view_present, put_repository_view_withdrawn, upsert_raw_by_hash_evidence,
};
use std::collections::HashSet;
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
@ -143,9 +147,8 @@ fn try_rrdp_sync(
let _total = timing let _total = timing
.as_ref() .as_ref()
.map(|t| t.span_phase("rrdp_fetch_notification_total")); .map(|t| t.span_phase("rrdp_fetch_notification_total"));
let mut dl_span = download_log.map(|dl| { let mut dl_span = download_log
dl.span_download(AuditDownloadKind::RrdpNotification, notification_uri) .map(|dl| dl.span_download(AuditDownloadKind::RrdpNotification, notification_uri));
});
match http_fetcher.fetch(notification_uri) { match http_fetcher.fetch(notification_uri) {
Ok(v) => { Ok(v) => {
if let Some(t) = timing.as_ref() { if let Some(t) = timing.as_ref() {
@ -307,12 +310,43 @@ fn rsync_sync_into_raw_objects(
t.record_count("rsync_objects_bytes_total", bytes_total); t.record_count("rsync_objects_bytes_total", bytes_total);
} }
drop(_p); drop(_p);
let existing_view = store
.list_repository_view_entries_with_prefix(rsync_base_uri)
.map_err(|e| RepoSyncError::Storage(e.to_string()))?;
let new_set: HashSet<&str> = objects.iter().map(|(uri, _)| uri.as_str()).collect();
let _w = timing let _w = timing
.as_ref() .as_ref()
.map(|t| t.span_phase("rsync_write_raw_objects_total")); .map(|t| t.span_phase("rsync_write_raw_objects_total"));
store store
.put_raw_batch(objects) .put_raw_batch(objects.clone())
.map_err(|e| RepoSyncError::Storage(e.to_string())) .map_err(|e| RepoSyncError::Storage(e.to_string()))?;
drop(_w);
let _proj = timing
.as_ref()
.map(|t| t.span_phase("rsync_write_repository_view_total"));
for entry in existing_view {
if !new_set.contains(entry.rsync_uri.as_str()) {
put_repository_view_withdrawn(
store,
rsync_base_uri,
&entry.rsync_uri,
entry.current_hash,
)
.map_err(RepoSyncError::Storage)?;
}
}
for (uri, bytes) in &objects {
let current_hash =
upsert_raw_by_hash_evidence(store, uri, bytes).map_err(RepoSyncError::Storage)?;
put_repository_view_present(store, rsync_base_uri, uri, &current_hash)
.map_err(RepoSyncError::Storage)?;
}
Ok(objects.len())
} }
#[cfg(test)] #[cfg(test)]
@ -458,6 +492,29 @@ mod tests {
Some(b"cer".to_vec()) Some(b"cer".to_vec())
); );
let view = store
.get_repository_view_entry("rsync://example.test/repo/a.mft")
.expect("get repository view")
.expect("repository view entry present");
assert_eq!(
view.current_hash.as_deref(),
Some(hex::encode(sha2::Sha256::digest(b"mft")).as_str())
);
assert_eq!(
view.repository_source.as_deref(),
Some("rsync://example.test/repo/")
);
let raw = store
.get_raw_by_hash_entry(hex::encode(sha2::Sha256::digest(b"roa")).as_str())
.expect("get raw_by_hash")
.expect("raw_by_hash entry present");
assert!(
raw.origin_uris
.iter()
.any(|uri| uri == "rsync://example.test/repo/sub/b.roa")
);
let timing_path = temp.path().join("timing.json"); let timing_path = temp.path().join("timing.json");
timing.write_json(&timing_path, 5).expect("write json"); timing.write_json(&timing_path, 5).expect("write json");
let v: serde_json::Value = let v: serde_json::Value =
@ -478,6 +535,71 @@ mod tests {
); );
} }
#[test]
fn rsync_second_sync_marks_missing_repository_view_entries_withdrawn() {
let temp = tempfile::tempdir().expect("tempdir");
let repo_dir = temp.path().join("repo");
std::fs::create_dir_all(repo_dir.join("sub")).expect("mkdir");
std::fs::write(repo_dir.join("a.mft"), b"mft-v1").expect("write a");
std::fs::write(repo_dir.join("sub").join("b.roa"), b"roa-v1").expect("write b");
let store_dir = temp.path().join("db");
let store = RocksStore::open(&store_dir).expect("open rocksdb");
let policy = Policy {
sync_preference: SyncPreference::RsyncOnly,
..Policy::default()
};
let http = DummyHttpFetcher;
let rsync = LocalDirRsyncFetcher::new(&repo_dir);
sync_publication_point(
&store,
&policy,
None,
"rsync://example.test/repo/",
&http,
&rsync,
None,
None,
)
.expect("first sync ok");
std::fs::remove_file(repo_dir.join("sub").join("b.roa")).expect("remove b");
std::fs::write(repo_dir.join("c.crl"), b"crl-v2").expect("write c");
sync_publication_point(
&store,
&policy,
None,
"rsync://example.test/repo/",
&http,
&rsync,
None,
None,
)
.expect("second sync ok");
let withdrawn = store
.get_repository_view_entry("rsync://example.test/repo/sub/b.roa")
.expect("get withdrawn repo view")
.expect("withdrawn entry exists");
assert_eq!(
withdrawn.state,
crate::storage::RepositoryViewState::Withdrawn
);
assert_eq!(
withdrawn.repository_source.as_deref(),
Some("rsync://example.test/repo/")
);
let added = store
.get_repository_view_entry("rsync://example.test/repo/c.crl")
.expect("get added repo view")
.expect("added entry exists");
assert_eq!(added.state, crate::storage::RepositoryViewState::Present);
}
#[test] #[test]
fn rrdp_retry_succeeds_without_rsync_when_notification_fetch_is_transient() { fn rrdp_retry_succeeds_without_rsync_when_notification_fetch_is_transient() {
let temp = tempfile::tempdir().expect("tempdir"); let temp = tempfile::tempdir().expect("tempdir");
@ -602,7 +724,9 @@ mod tests {
Some(1) Some(1)
); );
assert_eq!( assert_eq!(
counts.get("repo_sync_rrdp_ok_total").and_then(|v| v.as_u64()), counts
.get("repo_sync_rrdp_ok_total")
.and_then(|v| v.as_u64()),
Some(1) Some(1)
); );
} }
@ -681,7 +805,11 @@ mod tests {
); );
let events = download_log.snapshot_events(); let events = download_log.snapshot_events();
assert_eq!(events.len(), 3, "expected notification + snapshot + rsync fallback"); assert_eq!(
events.len(),
3,
"expected notification + snapshot + rsync fallback"
);
assert_eq!(events[0].kind, AuditDownloadKind::RrdpNotification); assert_eq!(events[0].kind, AuditDownloadKind::RrdpNotification);
assert!(events[0].success); assert!(events[0].success);
assert_eq!(events[1].kind, AuditDownloadKind::RrdpSnapshot); assert_eq!(events[1].kind, AuditDownloadKind::RrdpSnapshot);

View File

@ -1,8 +1,13 @@
use crate::analysis::timing::TimingHandle; use crate::analysis::timing::TimingHandle;
use crate::audit::AuditDownloadKind; use crate::audit::AuditDownloadKind;
use crate::audit_downloads::DownloadLogHandle; use crate::audit_downloads::DownloadLogHandle;
use crate::storage::RocksStore; use crate::storage::{RocksStore, RrdpDeltaOp, RrdpSourceSyncState};
use crate::storage::RrdpDeltaOp; use crate::sync::store_projection::{
compute_sha256_hex, current_rrdp_owner_is, ensure_rrdp_uri_can_be_owned_by,
put_repository_view_present, put_repository_view_withdrawn, put_rrdp_source_member_present,
put_rrdp_source_member_withdrawn, put_rrdp_uri_owner_active, put_rrdp_uri_owner_withdrawn,
update_rrdp_source_record_on_success, upsert_raw_by_hash_evidence,
};
use base64::Engine; use base64::Engine;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use sha2::Digest; use sha2::Digest;
@ -490,7 +495,9 @@ fn sync_from_notification_snapshot_inner(
let _parse_step = timing let _parse_step = timing
.as_ref() .as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "parse_notification_snapshot")); .map(|t| t.span_rrdp_repo_step(notification_uri, "parse_notification_snapshot"));
let _parse_total = timing.as_ref().map(|t| t.span_phase("rrdp_parse_notification_total")); let _parse_total = timing
.as_ref()
.map(|t| t.span_phase("rrdp_parse_notification_total"));
let notif = parse_notification_snapshot(notification_xml)?; let notif = parse_notification_snapshot(notification_xml)?;
drop(_parse_step); drop(_parse_step);
drop(_parse_total); drop(_parse_total);
@ -498,7 +505,9 @@ fn sync_from_notification_snapshot_inner(
let _fetch_step = timing let _fetch_step = timing
.as_ref() .as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "fetch_snapshot")); .map(|t| t.span_rrdp_repo_step(notification_uri, "fetch_snapshot"));
let _fetch_total = timing.as_ref().map(|t| t.span_phase("rrdp_fetch_snapshot_total")); let _fetch_total = timing
.as_ref()
.map(|t| t.span_phase("rrdp_fetch_snapshot_total"));
let mut dl_span = download_log let mut dl_span = download_log
.map(|dl| dl.span_download(AuditDownloadKind::RrdpSnapshot, &notif.snapshot_uri)); .map(|dl| dl.span_download(AuditDownloadKind::RrdpSnapshot, &notif.snapshot_uri));
let snapshot_xml = match fetcher.fetch(&notif.snapshot_uri) { let snapshot_xml = match fetcher.fetch(&notif.snapshot_uri) {
@ -529,7 +538,9 @@ fn sync_from_notification_snapshot_inner(
let _hash_step = timing let _hash_step = timing
.as_ref() .as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "hash_snapshot")); .map(|t| t.span_rrdp_repo_step(notification_uri, "hash_snapshot"));
let _hash_total = timing.as_ref().map(|t| t.span_phase("rrdp_hash_snapshot_total")); let _hash_total = timing
.as_ref()
.map(|t| t.span_phase("rrdp_hash_snapshot_total"));
let computed = sha2::Sha256::digest(&snapshot_xml); let computed = sha2::Sha256::digest(&snapshot_xml);
if computed.as_slice() != notif.snapshot_hash_sha256.as_slice() { if computed.as_slice() != notif.snapshot_hash_sha256.as_slice() {
return Err(RrdpError::SnapshotHashMismatch.into()); return Err(RrdpError::SnapshotHashMismatch.into());
@ -540,7 +551,9 @@ fn sync_from_notification_snapshot_inner(
let _apply_step = timing let _apply_step = timing
.as_ref() .as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "apply_snapshot")); .map(|t| t.span_rrdp_repo_step(notification_uri, "apply_snapshot"));
let _apply_total = timing.as_ref().map(|t| t.span_phase("rrdp_apply_snapshot_total")); let _apply_total = timing
.as_ref()
.map(|t| t.span_phase("rrdp_apply_snapshot_total"));
let published = apply_snapshot( let published = apply_snapshot(
store, store,
notification_uri, notification_uri,
@ -557,7 +570,9 @@ fn sync_from_notification_snapshot_inner(
let _write_state_step = timing let _write_state_step = timing
.as_ref() .as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "write_state")); .map(|t| t.span_rrdp_repo_step(notification_uri, "write_state"));
let _write_state_total = timing.as_ref().map(|t| t.span_phase("rrdp_write_state_total")); let _write_state_total = timing
.as_ref()
.map(|t| t.span_phase("rrdp_write_state_total"));
let state = RrdpState { let state = RrdpState {
session_id: notif.session_id.to_string(), session_id: notif.session_id.to_string(),
serial: notif.serial, serial: notif.serial,
@ -566,6 +581,16 @@ fn sync_from_notification_snapshot_inner(
store store
.put_rrdp_state(notification_uri, &bytes) .put_rrdp_state(notification_uri, &bytes)
.map_err(|e| RrdpSyncError::Storage(e.to_string()))?; .map_err(|e| RrdpSyncError::Storage(e.to_string()))?;
update_rrdp_source_record_on_success(
store,
notification_uri,
notif.session_id.to_string().as_str(),
notif.serial,
RrdpSourceSyncState::SnapshotOnly,
Some(&notif.snapshot_uri),
Some(&hex::encode(notif.snapshot_hash_sha256)),
)
.map_err(RrdpSyncError::Storage)?;
drop(_write_state_step); drop(_write_state_step);
drop(_write_state_total); drop(_write_state_total);
@ -578,7 +603,14 @@ pub fn sync_from_notification(
notification_xml: &[u8], notification_xml: &[u8],
fetcher: &dyn Fetcher, fetcher: &dyn Fetcher,
) -> RrdpSyncResult<usize> { ) -> RrdpSyncResult<usize> {
sync_from_notification_inner(store, notification_uri, notification_xml, fetcher, None, None) sync_from_notification_inner(
store,
notification_uri,
notification_xml,
fetcher,
None,
None,
)
} }
pub fn sync_from_notification_with_timing( pub fn sync_from_notification_with_timing(
@ -588,7 +620,14 @@ pub fn sync_from_notification_with_timing(
fetcher: &dyn Fetcher, fetcher: &dyn Fetcher,
timing: Option<&TimingHandle>, timing: Option<&TimingHandle>,
) -> RrdpSyncResult<usize> { ) -> RrdpSyncResult<usize> {
sync_from_notification_inner(store, notification_uri, notification_xml, fetcher, timing, None) sync_from_notification_inner(
store,
notification_uri,
notification_xml,
fetcher,
timing,
None,
)
} }
pub fn sync_from_notification_with_timing_and_download_log( pub fn sync_from_notification_with_timing_and_download_log(
@ -620,7 +659,9 @@ fn sync_from_notification_inner(
let _parse_step = timing let _parse_step = timing
.as_ref() .as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "parse_notification")); .map(|t| t.span_rrdp_repo_step(notification_uri, "parse_notification"));
let _parse_total = timing.as_ref().map(|t| t.span_phase("rrdp_parse_notification_total")); let _parse_total = timing
.as_ref()
.map(|t| t.span_phase("rrdp_parse_notification_total"));
let notif = parse_notification(notification_xml)?; let notif = parse_notification(notification_xml)?;
drop(_parse_step); drop(_parse_step);
drop(_parse_total); drop(_parse_total);
@ -634,7 +675,9 @@ fn sync_from_notification_inner(
let _read_state_step = timing let _read_state_step = timing
.as_ref() .as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "read_state")); .map(|t| t.span_rrdp_repo_step(notification_uri, "read_state"));
let _read_state_total = timing.as_ref().map(|t| t.span_phase("rrdp_read_state_total")); let _read_state_total = timing
.as_ref()
.map(|t| t.span_phase("rrdp_read_state_total"));
let state = store let state = store
.get_rrdp_state(notification_uri) .get_rrdp_state(notification_uri)
.map_err(|e| RrdpSyncError::Storage(e.to_string()))? .map_err(|e| RrdpSyncError::Storage(e.to_string()))?
@ -676,8 +719,9 @@ fn sync_from_notification_inner(
let _fetch_d_step = timing let _fetch_d_step = timing
.as_ref() .as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "fetch_deltas")); .map(|t| t.span_rrdp_repo_step(notification_uri, "fetch_deltas"));
let _fetch_d_total = let _fetch_d_total = timing
timing.as_ref().map(|t| t.span_phase("rrdp_fetch_deltas_total")); .as_ref()
.map(|t| t.span_phase("rrdp_fetch_deltas_total"));
let mut fetched: Vec<(u64, [u8; 32], Vec<u8>)> = let mut fetched: Vec<(u64, [u8; 32], Vec<u8>)> =
Vec::with_capacity((want_last - want_first + 1) as usize); Vec::with_capacity((want_last - want_first + 1) as usize);
let mut fetch_ok = true; let mut fetch_ok = true;
@ -727,8 +771,9 @@ fn sync_from_notification_inner(
let _apply_d_step = timing let _apply_d_step = timing
.as_ref() .as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "apply_deltas")); .map(|t| t.span_rrdp_repo_step(notification_uri, "apply_deltas"));
let _apply_d_total = let _apply_d_total = timing
timing.as_ref().map(|t| t.span_phase("rrdp_apply_deltas_total")); .as_ref()
.map(|t| t.span_phase("rrdp_apply_deltas_total"));
let mut applied_total = 0usize; let mut applied_total = 0usize;
let mut ok = true; let mut ok = true;
for (serial, expected_hash, bytes) in &fetched { for (serial, expected_hash, bytes) in &fetched {
@ -754,8 +799,9 @@ fn sync_from_notification_inner(
let _write_state_step = timing let _write_state_step = timing
.as_ref() .as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "write_state")); .map(|t| t.span_rrdp_repo_step(notification_uri, "write_state"));
let _write_state_total = let _write_state_total = timing
timing.as_ref().map(|t| t.span_phase("rrdp_write_state_total")); .as_ref()
.map(|t| t.span_phase("rrdp_write_state_total"));
let new_state = RrdpState { let new_state = RrdpState {
session_id: notif.session_id.to_string(), session_id: notif.session_id.to_string(),
serial: notif.serial, serial: notif.serial,
@ -764,6 +810,16 @@ fn sync_from_notification_inner(
store store
.put_rrdp_state(notification_uri, &bytes) .put_rrdp_state(notification_uri, &bytes)
.map_err(|e| RrdpSyncError::Storage(e.to_string()))?; .map_err(|e| RrdpSyncError::Storage(e.to_string()))?;
update_rrdp_source_record_on_success(
store,
notification_uri,
notif.session_id.to_string().as_str(),
notif.serial,
RrdpSourceSyncState::DeltaReady,
Some(&notif.snapshot_uri),
Some(&hex::encode(notif.snapshot_hash_sha256)),
)
.map_err(RrdpSyncError::Storage)?;
drop(_write_state_step); drop(_write_state_step);
drop(_write_state_total); drop(_write_state_total);
if let Some(t) = timing.as_ref() { if let Some(t) = timing.as_ref() {
@ -780,7 +836,9 @@ fn sync_from_notification_inner(
let _fetch_step = timing let _fetch_step = timing
.as_ref() .as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "fetch_snapshot")); .map(|t| t.span_rrdp_repo_step(notification_uri, "fetch_snapshot"));
let _fetch_total = timing.as_ref().map(|t| t.span_phase("rrdp_fetch_snapshot_total")); let _fetch_total = timing
.as_ref()
.map(|t| t.span_phase("rrdp_fetch_snapshot_total"));
let mut dl_span = download_log let mut dl_span = download_log
.map(|dl| dl.span_download(AuditDownloadKind::RrdpSnapshot, &notif.snapshot_uri)); .map(|dl| dl.span_download(AuditDownloadKind::RrdpSnapshot, &notif.snapshot_uri));
let snapshot_xml = match fetcher.fetch(&notif.snapshot_uri) { let snapshot_xml = match fetcher.fetch(&notif.snapshot_uri) {
@ -811,7 +869,9 @@ fn sync_from_notification_inner(
let _hash_step = timing let _hash_step = timing
.as_ref() .as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "hash_snapshot")); .map(|t| t.span_rrdp_repo_step(notification_uri, "hash_snapshot"));
let _hash_total = timing.as_ref().map(|t| t.span_phase("rrdp_hash_snapshot_total")); let _hash_total = timing
.as_ref()
.map(|t| t.span_phase("rrdp_hash_snapshot_total"));
let computed = sha2::Sha256::digest(&snapshot_xml); let computed = sha2::Sha256::digest(&snapshot_xml);
if computed.as_slice() != notif.snapshot_hash_sha256.as_slice() { if computed.as_slice() != notif.snapshot_hash_sha256.as_slice() {
return Err(RrdpError::SnapshotHashMismatch.into()); return Err(RrdpError::SnapshotHashMismatch.into());
@ -822,7 +882,9 @@ fn sync_from_notification_inner(
let _apply_step = timing let _apply_step = timing
.as_ref() .as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "apply_snapshot")); .map(|t| t.span_rrdp_repo_step(notification_uri, "apply_snapshot"));
let _apply_total = timing.as_ref().map(|t| t.span_phase("rrdp_apply_snapshot_total")); let _apply_total = timing
.as_ref()
.map(|t| t.span_phase("rrdp_apply_snapshot_total"));
let published = apply_snapshot( let published = apply_snapshot(
store, store,
notification_uri, notification_uri,
@ -839,7 +901,9 @@ fn sync_from_notification_inner(
let _write_state_step = timing let _write_state_step = timing
.as_ref() .as_ref()
.map(|t| t.span_rrdp_repo_step(notification_uri, "write_state")); .map(|t| t.span_rrdp_repo_step(notification_uri, "write_state"));
let _write_state_total = timing.as_ref().map(|t| t.span_phase("rrdp_write_state_total")); let _write_state_total = timing
.as_ref()
.map(|t| t.span_phase("rrdp_write_state_total"));
let new_state = RrdpState { let new_state = RrdpState {
session_id: notif.session_id.to_string(), session_id: notif.session_id.to_string(),
serial: notif.serial, serial: notif.serial,
@ -848,6 +912,16 @@ fn sync_from_notification_inner(
store store
.put_rrdp_state(notification_uri, &bytes) .put_rrdp_state(notification_uri, &bytes)
.map_err(|e| RrdpSyncError::Storage(e.to_string()))?; .map_err(|e| RrdpSyncError::Storage(e.to_string()))?;
update_rrdp_source_record_on_success(
store,
notification_uri,
notif.session_id.to_string().as_str(),
notif.serial,
RrdpSourceSyncState::SnapshotOnly,
Some(&notif.snapshot_uri),
Some(&hex::encode(notif.snapshot_hash_sha256)),
)
.map_err(RrdpSyncError::Storage)?;
drop(_write_state_step); drop(_write_state_step);
drop(_write_state_total); drop(_write_state_total);
@ -883,7 +957,20 @@ fn apply_delta(
.into()); .into());
} }
enum DeltaProjectionEffect {
Upsert {
rsync_uri: String,
bytes: Vec<u8>,
},
Delete {
rsync_uri: String,
previous_hash: String,
},
}
let session_id = expected_session_id.to_string();
let mut ops: Vec<RrdpDeltaOp> = Vec::with_capacity(delta.elements.len()); let mut ops: Vec<RrdpDeltaOp> = Vec::with_capacity(delta.elements.len());
let mut projection: Vec<DeltaProjectionEffect> = Vec::with_capacity(delta.elements.len());
for e in delta.elements { for e in delta.elements {
match e { match e {
DeltaElement::Publish { DeltaElement::Publish {
@ -897,6 +984,8 @@ fn apply_delta(
if !is_member { if !is_member {
return Err(RrdpError::DeltaTargetNotFromRepository { rsync_uri: uri }.into()); return Err(RrdpError::DeltaTargetNotFromRepository { rsync_uri: uri }.into());
} }
ensure_rrdp_uri_can_be_owned_by(store, notification_uri, uri.as_str())
.map_err(RrdpSyncError::Storage)?;
let old_bytes = store let old_bytes = store
.get_raw(uri.as_str()) .get_raw(uri.as_str())
.map_err(|e| RrdpSyncError::Storage(e.to_string()))? .map_err(|e| RrdpSyncError::Storage(e.to_string()))?
@ -909,6 +998,10 @@ fn apply_delta(
} }
ops.push(RrdpDeltaOp::Upsert { ops.push(RrdpDeltaOp::Upsert {
rsync_uri: uri.clone(),
bytes: bytes.clone(),
});
projection.push(DeltaProjectionEffect::Upsert {
rsync_uri: uri, rsync_uri: uri,
bytes, bytes,
}); });
@ -926,7 +1019,13 @@ fn apply_delta(
RrdpError::DeltaPublishWithoutHashForExisting { rsync_uri: uri }.into(), RrdpError::DeltaPublishWithoutHashForExisting { rsync_uri: uri }.into(),
); );
} }
ensure_rrdp_uri_can_be_owned_by(store, notification_uri, uri.as_str())
.map_err(RrdpSyncError::Storage)?;
ops.push(RrdpDeltaOp::Upsert { ops.push(RrdpDeltaOp::Upsert {
rsync_uri: uri.clone(),
bytes: bytes.clone(),
});
projection.push(DeltaProjectionEffect::Upsert {
rsync_uri: uri, rsync_uri: uri,
bytes, bytes,
}); });
@ -938,6 +1037,8 @@ fn apply_delta(
if !is_member { if !is_member {
return Err(RrdpError::DeltaTargetNotFromRepository { rsync_uri: uri }.into()); return Err(RrdpError::DeltaTargetNotFromRepository { rsync_uri: uri }.into());
} }
ensure_rrdp_uri_can_be_owned_by(store, notification_uri, uri.as_str())
.map_err(RrdpSyncError::Storage)?;
let old_bytes = store let old_bytes = store
.get_raw(uri.as_str()) .get_raw(uri.as_str())
.map_err(|e| RrdpSyncError::Storage(e.to_string()))? .map_err(|e| RrdpSyncError::Storage(e.to_string()))?
@ -948,14 +1049,86 @@ fn apply_delta(
if old_computed.as_slice() != hash_sha256.as_slice() { if old_computed.as_slice() != hash_sha256.as_slice() {
return Err(RrdpError::DeltaTargetHashMismatch { rsync_uri: uri }.into()); return Err(RrdpError::DeltaTargetHashMismatch { rsync_uri: uri }.into());
} }
ops.push(RrdpDeltaOp::Delete { rsync_uri: uri }); let previous_hash = hex::encode(old_computed);
ops.push(RrdpDeltaOp::Delete {
rsync_uri: uri.clone(),
});
projection.push(DeltaProjectionEffect::Delete {
rsync_uri: uri,
previous_hash,
});
} }
} }
} }
store store
.apply_rrdp_delta(notification_uri, ops.as_slice()) .apply_rrdp_delta(notification_uri, ops.as_slice())
.map_err(|e| RrdpSyncError::Storage(e.to_string())) .map_err(|e| RrdpSyncError::Storage(e.to_string()))?;
for effect in projection {
match effect {
DeltaProjectionEffect::Upsert { rsync_uri, bytes } => {
let current_hash = upsert_raw_by_hash_evidence(store, &rsync_uri, &bytes)
.map_err(RrdpSyncError::Storage)?;
put_repository_view_present(store, notification_uri, &rsync_uri, &current_hash)
.map_err(RrdpSyncError::Storage)?;
put_rrdp_source_member_present(
store,
notification_uri,
&session_id,
expected_serial,
&rsync_uri,
&current_hash,
)
.map_err(RrdpSyncError::Storage)?;
put_rrdp_uri_owner_active(
store,
notification_uri,
&session_id,
expected_serial,
&rsync_uri,
&current_hash,
)
.map_err(RrdpSyncError::Storage)?;
}
DeltaProjectionEffect::Delete {
rsync_uri,
previous_hash,
} => {
put_rrdp_source_member_withdrawn(
store,
notification_uri,
&session_id,
expected_serial,
&rsync_uri,
Some(previous_hash.clone()),
)
.map_err(RrdpSyncError::Storage)?;
if current_rrdp_owner_is(store, notification_uri, &rsync_uri)
.map_err(RrdpSyncError::Storage)?
{
put_repository_view_withdrawn(
store,
notification_uri,
&rsync_uri,
Some(previous_hash.clone()),
)
.map_err(RrdpSyncError::Storage)?;
put_rrdp_uri_owner_withdrawn(
store,
notification_uri,
&session_id,
expected_serial,
&rsync_uri,
Some(previous_hash),
)
.map_err(RrdpSyncError::Storage)?;
}
}
}
}
Ok(ops.len())
} }
fn apply_snapshot( fn apply_snapshot(
@ -1004,12 +1177,92 @@ fn apply_snapshot(
.decode(content_b64.as_bytes()) .decode(content_b64.as_bytes())
.map_err(|e| RrdpError::PublishBase64(e.to_string()))?; .map_err(|e| RrdpError::PublishBase64(e.to_string()))?;
ensure_rrdp_uri_can_be_owned_by(store, notification_uri, uri)
.map_err(RrdpSyncError::Storage)?;
published.push((uri.to_string(), bytes)); published.push((uri.to_string(), bytes));
} }
let previous_members: Vec<String> = store
.rrdp_object_index_iter(notification_uri)
.map_err(|e| RrdpSyncError::Storage(e.to_string()))?
.collect();
let new_set: std::collections::HashSet<&str> =
published.iter().map(|(uri, _)| uri.as_str()).collect();
let mut withdrawn: Vec<(String, Option<String>)> = Vec::new();
for old_uri in &previous_members {
if new_set.contains(old_uri.as_str()) {
continue;
}
let previous_hash = store
.get_repository_view_entry(old_uri)
.map_err(|e| RrdpSyncError::Storage(e.to_string()))?
.and_then(|entry| entry.current_hash)
.or_else(|| {
store
.get_raw(old_uri)
.ok()
.flatten()
.map(|bytes| compute_sha256_hex(&bytes))
});
withdrawn.push((old_uri.clone(), previous_hash));
}
store store
.apply_rrdp_snapshot(notification_uri, published.as_slice()) .apply_rrdp_snapshot(notification_uri, published.as_slice())
.map_err(|e| RrdpSyncError::Storage(e.to_string())) .map_err(|e| RrdpSyncError::Storage(e.to_string()))?;
let session_id = expected_session_id.to_string();
for (uri, bytes) in &published {
let current_hash =
upsert_raw_by_hash_evidence(store, uri, bytes).map_err(RrdpSyncError::Storage)?;
put_repository_view_present(store, notification_uri, uri, &current_hash)
.map_err(RrdpSyncError::Storage)?;
put_rrdp_source_member_present(
store,
notification_uri,
&session_id,
expected_serial,
uri,
&current_hash,
)
.map_err(RrdpSyncError::Storage)?;
put_rrdp_uri_owner_active(
store,
notification_uri,
&session_id,
expected_serial,
uri,
&current_hash,
)
.map_err(RrdpSyncError::Storage)?;
}
for (uri, previous_hash) in withdrawn {
put_rrdp_source_member_withdrawn(
store,
notification_uri,
&session_id,
expected_serial,
&uri,
previous_hash.clone(),
)
.map_err(RrdpSyncError::Storage)?;
if current_rrdp_owner_is(store, notification_uri, &uri).map_err(RrdpSyncError::Storage)? {
put_repository_view_withdrawn(store, notification_uri, &uri, previous_hash.clone())
.map_err(RrdpSyncError::Storage)?;
put_rrdp_uri_owner_withdrawn(
store,
notification_uri,
&session_id,
expected_serial,
&uri,
previous_hash,
)
.map_err(RrdpSyncError::Storage)?;
}
}
Ok(published.len())
} }
fn parse_rrdp_xml(xml: &[u8]) -> Result<roxmltree::Document<'_>, RrdpError> { fn parse_rrdp_xml(xml: &[u8]) -> Result<roxmltree::Document<'_>, RrdpError> {
@ -1477,6 +1730,34 @@ mod tests {
.expect("contains"), .expect("contains"),
"c added to rrdp repo index" "c added to rrdp repo index"
); );
let a_view = store
.get_repository_view_entry("rsync://example.net/repo/a.mft")
.expect("get a view")
.expect("a view exists");
assert_eq!(a_view.state, crate::storage::RepositoryViewState::Withdrawn);
let b_view = store
.get_repository_view_entry("rsync://example.net/repo/b.roa")
.expect("get b view")
.expect("b view exists");
assert_eq!(b_view.state, crate::storage::RepositoryViewState::Present);
assert_eq!(
b_view.current_hash.as_deref(),
Some(hex::encode(sha2::Sha256::digest(b"b2")).as_str())
);
let c_owner = store
.get_rrdp_uri_owner_record("rsync://example.net/repo/c.crl")
.expect("get c owner")
.expect("c owner exists");
assert_eq!(
c_owner.owner_state,
crate::storage::RrdpUriOwnerState::Active
);
let a_member = store
.get_rrdp_source_member_record(notif_uri, "rsync://example.net/repo/a.mft")
.expect("get a member")
.expect("a member exists");
assert!(!a_member.present);
} }
#[test] #[test]
@ -1695,6 +1976,41 @@ mod tests {
)); ));
} }
#[test]
fn sync_from_notification_snapshot_rejects_cross_source_owner_conflict() {
let tmp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(tmp.path()).expect("open rocksdb");
let sid_a = "550e8400-e29b-41d4-a716-446655440000";
let sid_b = "550e8400-e29b-41d4-a716-446655440001";
let uri = "rsync://example.net/repo/a.mft";
let notif_a_uri = "https://example.net/a/notification.xml";
let snapshot_a_uri = "https://example.net/a/snapshot.xml";
let snapshot_a = snapshot_xml(sid_a, 1, &[(uri, b"a1")]);
let snapshot_a_hash = hex::encode(sha2::Sha256::digest(&snapshot_a));
let notif_a = notification_xml(sid_a, 1, snapshot_a_uri, &snapshot_a_hash);
let fetcher_a = MapFetcher {
map: HashMap::from([(snapshot_a_uri.to_string(), snapshot_a)]),
};
sync_from_notification_snapshot(&store, notif_a_uri, &notif_a, &fetcher_a)
.expect("seed source a");
let notif_b_uri = "https://example.net/b/notification.xml";
let snapshot_b_uri = "https://example.net/b/snapshot.xml";
let snapshot_b = snapshot_xml(sid_b, 1, &[(uri, b"b1")]);
let snapshot_b_hash = hex::encode(sha2::Sha256::digest(&snapshot_b));
let notif_b = notification_xml(sid_b, 1, snapshot_b_uri, &snapshot_b_hash);
let fetcher_b = MapFetcher {
map: HashMap::from([(snapshot_b_uri.to_string(), snapshot_b)]),
};
let err = sync_from_notification_snapshot(&store, notif_b_uri, &notif_b, &fetcher_b)
.expect_err("cross-source overwrite must fail");
assert!(matches!(err, RrdpSyncError::Storage(_)));
assert!(err.to_string().contains("owner conflict"), "{err}");
}
#[test] #[test]
fn sync_from_notification_snapshot_applies_snapshot_and_stores_state() { fn sync_from_notification_snapshot_applies_snapshot_and_stores_state() {
let tmp = tempfile::tempdir().expect("tempdir"); let tmp = tempfile::tempdir().expect("tempdir");
@ -1743,6 +2059,46 @@ mod tests {
let state = RrdpState::decode(&state_bytes).expect("decode state"); let state = RrdpState::decode(&state_bytes).expect("decode state");
assert_eq!(state.session_id, sid); assert_eq!(state.session_id, sid);
assert_eq!(state.serial, serial); assert_eq!(state.serial, serial);
let source = store
.get_rrdp_source_record(notif_uri)
.expect("get rrdp source")
.expect("rrdp source exists");
assert_eq!(source.last_session_id.as_deref(), Some(sid));
assert_eq!(source.last_serial, Some(serial));
assert_eq!(
source.sync_state,
crate::storage::RrdpSourceSyncState::SnapshotOnly
);
let view = store
.get_repository_view_entry("rsync://example.net/repo/a.mft")
.expect("get repository view")
.expect("repository view exists");
assert_eq!(view.state, crate::storage::RepositoryViewState::Present);
assert_eq!(view.repository_source.as_deref(), Some(notif_uri));
let raw = store
.get_raw_by_hash_entry(hex::encode(sha2::Sha256::digest(b"mft-bytes")).as_str())
.expect("get raw_by_hash")
.expect("raw_by_hash exists");
assert!(
raw.origin_uris
.iter()
.any(|uri| uri == "rsync://example.net/repo/a.mft")
);
let member = store
.get_rrdp_source_member_record(notif_uri, "rsync://example.net/repo/a.mft")
.expect("get member")
.expect("member exists");
assert!(member.present);
let owner = store
.get_rrdp_uri_owner_record("rsync://example.net/repo/a.mft")
.expect("get owner")
.expect("owner exists");
assert_eq!(owner.notify_uri, notif_uri);
assert_eq!(owner.owner_state, crate::storage::RrdpUriOwnerState::Active);
} }
#[test] #[test]

View File

@ -0,0 +1,262 @@
use std::collections::BTreeSet;
use crate::storage::{
PackTime, RawByHashEntry, RepositoryViewEntry, RepositoryViewState, RocksStore,
RrdpSourceMemberRecord, RrdpSourceRecord, RrdpSourceSyncState, RrdpUriOwnerRecord,
RrdpUriOwnerState,
};
use sha2::Digest;
pub fn infer_object_type_from_uri(uri: &str) -> Option<String> {
let ext = uri.rsplit('.').next()?;
let ext = ext.to_ascii_lowercase();
match ext.as_str() {
"cer" | "crl" | "mft" | "roa" | "asa" | "gbr" | "tal" | "xml" => Some(ext),
_ => None,
}
}
pub fn upsert_raw_by_hash_evidence(
store: &RocksStore,
rsync_uri: &str,
bytes: &[u8],
) -> Result<String, String> {
let sha256_hex = compute_sha256_hex(bytes);
let mut entry = match store
.get_raw_by_hash_entry(&sha256_hex)
.map_err(|e| e.to_string())?
{
Some(existing) => existing,
None => RawByHashEntry::from_bytes(sha256_hex.clone(), bytes.to_vec()),
};
if entry.bytes != bytes {
return Err(format!(
"raw_by_hash collision for {rsync_uri}: same sha256 maps to different bytes"
));
}
let mut origins: BTreeSet<String> = entry.origin_uris.into_iter().collect();
origins.insert(rsync_uri.to_string());
entry.origin_uris = origins.into_iter().collect();
if entry.object_type.is_none() {
entry.object_type = infer_object_type_from_uri(rsync_uri);
}
store
.put_raw_by_hash_entry(&entry)
.map_err(|e| e.to_string())?;
Ok(sha256_hex)
}
pub fn put_repository_view_present(
store: &RocksStore,
repository_source: &str,
rsync_uri: &str,
current_hash: &str,
) -> Result<(), String> {
let entry = RepositoryViewEntry {
rsync_uri: rsync_uri.to_string(),
current_hash: Some(current_hash.to_string()),
repository_source: Some(repository_source.to_string()),
object_type: infer_object_type_from_uri(rsync_uri),
state: RepositoryViewState::Present,
};
store
.put_repository_view_entry(&entry)
.map_err(|e| e.to_string())
}
pub fn put_repository_view_withdrawn(
store: &RocksStore,
repository_source: &str,
rsync_uri: &str,
current_hash: Option<String>,
) -> Result<(), String> {
let entry = RepositoryViewEntry {
rsync_uri: rsync_uri.to_string(),
current_hash,
repository_source: Some(repository_source.to_string()),
object_type: infer_object_type_from_uri(rsync_uri),
state: RepositoryViewState::Withdrawn,
};
store
.put_repository_view_entry(&entry)
.map_err(|e| e.to_string())
}
pub fn ensure_rrdp_uri_can_be_owned_by(
store: &RocksStore,
notification_uri: &str,
rsync_uri: &str,
) -> Result<(), String> {
let Some(owner) = store
.get_rrdp_uri_owner_record(rsync_uri)
.map_err(|e| e.to_string())?
else {
return Ok(());
};
if owner.notify_uri != notification_uri && owner.owner_state == RrdpUriOwnerState::Active {
return Err(format!(
"RRDP source owner conflict for {rsync_uri}: current owner {} but incoming source {}",
owner.notify_uri, notification_uri
));
}
Ok(())
}
pub fn current_rrdp_owner_is(
store: &RocksStore,
notification_uri: &str,
rsync_uri: &str,
) -> Result<bool, String> {
Ok(matches!(
store
.get_rrdp_uri_owner_record(rsync_uri)
.map_err(|e| e.to_string())?,
Some(owner)
if owner.notify_uri == notification_uri && owner.owner_state == RrdpUriOwnerState::Active
))
}
pub fn put_rrdp_source_member_present(
store: &RocksStore,
notification_uri: &str,
session_id: &str,
serial: u64,
rsync_uri: &str,
current_hash: &str,
) -> Result<(), String> {
let record = RrdpSourceMemberRecord {
notify_uri: notification_uri.to_string(),
rsync_uri: rsync_uri.to_string(),
current_hash: Some(current_hash.to_string()),
object_type: infer_object_type_from_uri(rsync_uri),
present: true,
last_confirmed_session_id: session_id.to_string(),
last_confirmed_serial: serial,
last_changed_at: now_pack_time(),
};
store
.put_rrdp_source_member_record(&record)
.map_err(|e| e.to_string())
}
pub fn put_rrdp_source_member_withdrawn(
store: &RocksStore,
notification_uri: &str,
session_id: &str,
serial: u64,
rsync_uri: &str,
current_hash: Option<String>,
) -> Result<(), String> {
let record = RrdpSourceMemberRecord {
notify_uri: notification_uri.to_string(),
rsync_uri: rsync_uri.to_string(),
current_hash,
object_type: infer_object_type_from_uri(rsync_uri),
present: false,
last_confirmed_session_id: session_id.to_string(),
last_confirmed_serial: serial,
last_changed_at: now_pack_time(),
};
store
.put_rrdp_source_member_record(&record)
.map_err(|e| e.to_string())
}
pub fn put_rrdp_uri_owner_active(
store: &RocksStore,
notification_uri: &str,
session_id: &str,
serial: u64,
rsync_uri: &str,
current_hash: &str,
) -> Result<(), String> {
let record = RrdpUriOwnerRecord {
rsync_uri: rsync_uri.to_string(),
notify_uri: notification_uri.to_string(),
current_hash: Some(current_hash.to_string()),
last_confirmed_session_id: session_id.to_string(),
last_confirmed_serial: serial,
last_changed_at: now_pack_time(),
owner_state: RrdpUriOwnerState::Active,
};
store
.put_rrdp_uri_owner_record(&record)
.map_err(|e| e.to_string())
}
pub fn put_rrdp_uri_owner_withdrawn(
store: &RocksStore,
notification_uri: &str,
session_id: &str,
serial: u64,
rsync_uri: &str,
current_hash: Option<String>,
) -> Result<(), String> {
let record = RrdpUriOwnerRecord {
rsync_uri: rsync_uri.to_string(),
notify_uri: notification_uri.to_string(),
current_hash,
last_confirmed_session_id: session_id.to_string(),
last_confirmed_serial: serial,
last_changed_at: now_pack_time(),
owner_state: RrdpUriOwnerState::Withdrawn,
};
store
.put_rrdp_uri_owner_record(&record)
.map_err(|e| e.to_string())
}
pub fn update_rrdp_source_record_on_success(
store: &RocksStore,
notification_uri: &str,
session_id: &str,
serial: u64,
sync_state: RrdpSourceSyncState,
last_snapshot_uri: Option<&str>,
last_snapshot_hash_hex: Option<&str>,
) -> Result<(), String> {
let now = now_pack_time();
let mut record = match store
.get_rrdp_source_record(notification_uri)
.map_err(|e| e.to_string())?
{
Some(existing) => existing,
None => RrdpSourceRecord {
notify_uri: notification_uri.to_string(),
last_session_id: None,
last_serial: None,
first_seen_at: now.clone(),
last_seen_at: now.clone(),
last_sync_at: None,
sync_state,
last_snapshot_uri: None,
last_snapshot_hash: None,
last_error: None,
},
};
record.last_session_id = Some(session_id.to_string());
record.last_serial = Some(serial);
record.last_seen_at = now.clone();
record.last_sync_at = Some(now);
record.sync_state = sync_state;
record.last_snapshot_uri = last_snapshot_uri.map(str::to_string);
record.last_snapshot_hash = last_snapshot_hash_hex.map(str::to_string);
record.last_error = None;
store
.put_rrdp_source_record(&record)
.map_err(|e| e.to_string())
}
pub fn compute_sha256_hex(bytes: &[u8]) -> String {
hex::encode(sha2::Sha256::digest(bytes))
}
pub fn now_pack_time() -> PackTime {
PackTime::from_utc_offset_datetime(time::OffsetDateTime::now_utc())
}

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,7 @@ pub mod cert_path;
pub mod from_tal; pub mod from_tal;
pub mod manifest; pub mod manifest;
pub mod objects; pub mod objects;
pub mod publication_point;
pub mod run; pub mod run;
pub mod run_tree_from_tal; pub mod run_tree_from_tal;
pub mod tree; pub mod tree;

View File

@ -10,8 +10,10 @@ use crate::data_model::roa::{IpPrefix, RoaAfi, RoaDecodeError, RoaObject, RoaVal
use crate::data_model::signed_object::SignedObjectVerifyError; use crate::data_model::signed_object::SignedObjectVerifyError;
use crate::policy::{Policy, SignedObjectFailurePolicy}; use crate::policy::{Policy, SignedObjectFailurePolicy};
use crate::report::{RfcRef, Warning}; use crate::report::{RfcRef, Warning};
use crate::storage::{FetchCachePpPack, PackFile}; use crate::storage::PackFile;
use crate::validation::cert_path::{CertPathError, validate_ee_cert_path_with_prevalidated_issuer}; use crate::validation::cert_path::{CertPathError, validate_ee_cert_path_with_prevalidated_issuer};
use crate::validation::manifest::PublicationPointData;
use crate::validation::publication_point::PublicationPointSnapshot;
use x509_parser::prelude::FromDer; use x509_parser::prelude::FromDer;
use x509_parser::x509::SubjectPublicKeyInfo; use x509_parser::x509::SubjectPublicKeyInfo;
@ -81,10 +83,10 @@ pub struct ObjectsStats {
pub publication_point_dropped: bool, pub publication_point_dropped: bool,
} }
/// Process objects from a fetch_cache_pp publication point pack using a known issuer CA certificate /// Process objects from a publication point snapshot using a known issuer CA certificate
/// and its effective resources (resolved via the resource-path, RFC 6487 §7.2). /// and its effective resources (resolved via the resource-path, RFC 6487 §7.2).
pub fn process_fetch_cache_pp_pack_for_issuer( pub fn process_publication_point_for_issuer<P: PublicationPointData>(
pack: &FetchCachePpPack, publication_point: &P,
policy: &Policy, policy: &Policy,
issuer_ca_der: &[u8], issuer_ca_der: &[u8],
issuer_ca_rsync_uri: Option<&str>, issuer_ca_rsync_uri: Option<&str>,
@ -93,23 +95,24 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
validation_time: time::OffsetDateTime, validation_time: time::OffsetDateTime,
timing: Option<&TimingHandle>, timing: Option<&TimingHandle>,
) -> ObjectsOutput { ) -> ObjectsOutput {
let manifest_rsync_uri = publication_point.manifest_rsync_uri();
let manifest_bytes = publication_point.manifest_bytes();
let locked_files = publication_point.files();
let mut warnings: Vec<Warning> = Vec::new(); let mut warnings: Vec<Warning> = Vec::new();
let mut stats = ObjectsStats::default(); let mut stats = ObjectsStats::default();
stats.roa_total = pack stats.roa_total = locked_files
.files
.iter() .iter()
.filter(|f| f.rsync_uri.ends_with(".roa")) .filter(|f| f.rsync_uri.ends_with(".roa"))
.count(); .count();
stats.aspa_total = pack stats.aspa_total = locked_files
.files
.iter() .iter()
.filter(|f| f.rsync_uri.ends_with(".asa")) .filter(|f| f.rsync_uri.ends_with(".asa"))
.count(); .count();
let mut audit: Vec<ObjectAuditEntry> = Vec::new(); let mut audit: Vec<ObjectAuditEntry> = Vec::new();
// Enforce that `manifest_bytes` is actually a manifest object. // Enforce that `manifest_bytes` is actually a manifest object.
let _manifest = let _manifest = ManifestObject::decode_der(manifest_bytes)
ManifestObject::decode_der(&pack.manifest_bytes).expect("fetch_cache_pp manifest decodes"); .expect("publication point snapshot manifest decodes");
// Decode issuer CA once; if it fails we cannot validate ROA/ASPA EE certificates. // Decode issuer CA once; if it fails we cannot validate ROA/ASPA EE certificates.
let issuer_ca = match ResourceCertificate::decode_der(issuer_ca_der) { let issuer_ca = match ResourceCertificate::decode_der(issuer_ca_der) {
@ -121,9 +124,9 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
"dropping publication point: issuer CA decode failed: {e}" "dropping publication point: issuer CA decode failed: {e}"
)) ))
.with_rfc_refs(&[RfcRef("RFC 6487 §7.2"), RfcRef("RFC 5280 §6.1")]) .with_rfc_refs(&[RfcRef("RFC 6487 §7.2"), RfcRef("RFC 5280 §6.1")])
.with_context(&pack.manifest_rsync_uri), .with_context(manifest_rsync_uri),
); );
for f in &pack.files { for f in locked_files {
if f.rsync_uri.ends_with(".roa") { if f.rsync_uri.ends_with(".roa") {
audit.push(ObjectAuditEntry { audit.push(ObjectAuditEntry {
rsync_uri: f.rsync_uri.clone(), rsync_uri: f.rsync_uri.clone(),
@ -163,7 +166,7 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
rem.len() rem.len()
)) ))
.with_rfc_refs(&[RfcRef("RFC 5280 §4.1.2.7")]) .with_rfc_refs(&[RfcRef("RFC 5280 §4.1.2.7")])
.with_context(&pack.manifest_rsync_uri), .with_context(manifest_rsync_uri),
); );
return ObjectsOutput { return ObjectsOutput {
vrps: Vec::new(), vrps: Vec::new(),
@ -180,7 +183,7 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
"dropping publication point: issuer SPKI parse failed: {e}" "dropping publication point: issuer SPKI parse failed: {e}"
)) ))
.with_rfc_refs(&[RfcRef("RFC 5280 §4.1.2.7")]) .with_rfc_refs(&[RfcRef("RFC 5280 §4.1.2.7")])
.with_context(&pack.manifest_rsync_uri), .with_context(manifest_rsync_uri),
); );
return ObjectsOutput { return ObjectsOutput {
vrps: Vec::new(), vrps: Vec::new(),
@ -192,8 +195,7 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
} }
}; };
let mut crl_cache: std::collections::HashMap<String, CachedIssuerCrl> = pack let mut crl_cache: std::collections::HashMap<String, CachedIssuerCrl> = locked_files
.files
.iter() .iter()
.filter(|f| f.rsync_uri.ends_with(".crl")) .filter(|f| f.rsync_uri.ends_with(".crl"))
.map(|f| { .map(|f| {
@ -207,23 +209,26 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
let issuer_resources_index = let issuer_resources_index =
build_issuer_resources_index(issuer_effective_ip, issuer_effective_as); build_issuer_resources_index(issuer_effective_ip, issuer_effective_as);
// If the pack has signed objects but no CRLs at all, we cannot validate any embedded EE // If the snapshot has signed objects but no CRLs at all, we cannot validate any embedded EE
// certificate paths deterministically (EE CRLDP must reference an rsync URI in the pack). // certificate paths deterministically (EE CRLDP must reference an rsync URI in the snapshot).
if crl_cache.is_empty() && (stats.roa_total > 0 || stats.aspa_total > 0) { if crl_cache.is_empty() && (stats.roa_total > 0 || stats.aspa_total > 0) {
stats.publication_point_dropped = true; stats.publication_point_dropped = true;
warnings.push( warnings.push(
Warning::new("dropping publication point: no CRL files in fetch_cache_pp") Warning::new("dropping publication point: no CRL files in validated publication point")
.with_rfc_refs(&[RfcRef("RFC 6487 §4.8.6"), RfcRef("RFC 9286 §7")]) .with_rfc_refs(&[RfcRef("RFC 6487 §4.8.6"), RfcRef("RFC 9286 §7")])
.with_context(&pack.manifest_rsync_uri), .with_context(manifest_rsync_uri),
); );
for f in &pack.files { for f in locked_files {
if f.rsync_uri.ends_with(".roa") { if f.rsync_uri.ends_with(".roa") {
audit.push(ObjectAuditEntry { audit.push(ObjectAuditEntry {
rsync_uri: f.rsync_uri.clone(), rsync_uri: f.rsync_uri.clone(),
sha256_hex: sha256_hex_from_32(&f.sha256), sha256_hex: sha256_hex_from_32(&f.sha256),
kind: AuditObjectKind::Roa, kind: AuditObjectKind::Roa,
result: AuditObjectResult::Skipped, result: AuditObjectResult::Skipped,
detail: Some("skipped due to missing CRL files in fetch_cache_pp".to_string()), detail: Some(
"skipped due to missing CRL files in validated publication point"
.to_string(),
),
}); });
} else if f.rsync_uri.ends_with(".asa") { } else if f.rsync_uri.ends_with(".asa") {
audit.push(ObjectAuditEntry { audit.push(ObjectAuditEntry {
@ -231,7 +236,10 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
sha256_hex: sha256_hex_from_32(&f.sha256), sha256_hex: sha256_hex_from_32(&f.sha256),
kind: AuditObjectKind::Aspa, kind: AuditObjectKind::Aspa,
result: AuditObjectResult::Skipped, result: AuditObjectResult::Skipped,
detail: Some("skipped due to missing CRL files in fetch_cache_pp".to_string()), detail: Some(
"skipped due to missing CRL files in validated publication point"
.to_string(),
),
}); });
} }
} }
@ -247,7 +255,7 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
let mut vrps: Vec<Vrp> = Vec::new(); let mut vrps: Vec<Vrp> = Vec::new();
let mut aspas: Vec<AspaAttestation> = Vec::new(); let mut aspas: Vec<AspaAttestation> = Vec::new();
for (idx, file) in pack.files.iter().enumerate() { for (idx, file) in locked_files.iter().enumerate() {
if file.rsync_uri.ends_with(".roa") { if file.rsync_uri.ends_with(".roa") {
let _t = timing.as_ref().map(|t| t.span_phase("objects_roa_total")); let _t = timing.as_ref().map(|t| t.span_phase("objects_roa_total"));
match process_roa_with_issuer( match process_roa_with_issuer(
@ -300,7 +308,7 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
result: AuditObjectResult::Error, result: AuditObjectResult::Error,
detail: Some(e.to_string()), detail: Some(e.to_string()),
}); });
for f in pack.files.iter().skip(idx + 1) { for f in locked_files.iter().skip(idx + 1) {
if f.rsync_uri.ends_with(".roa") { if f.rsync_uri.ends_with(".roa") {
audit.push(ObjectAuditEntry { audit.push(ObjectAuditEntry {
rsync_uri: f.rsync_uri.clone(), rsync_uri: f.rsync_uri.clone(),
@ -333,7 +341,7 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
file.rsync_uri file.rsync_uri
)) ))
.with_rfc_refs(&refs) .with_rfc_refs(&refs)
.with_context(&pack.manifest_rsync_uri), .with_context(manifest_rsync_uri),
); );
return ObjectsOutput { return ObjectsOutput {
vrps: Vec::new(), vrps: Vec::new(),
@ -397,7 +405,7 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
result: AuditObjectResult::Error, result: AuditObjectResult::Error,
detail: Some(e.to_string()), detail: Some(e.to_string()),
}); });
for f in pack.files.iter().skip(idx + 1) { for f in locked_files.iter().skip(idx + 1) {
if f.rsync_uri.ends_with(".roa") { if f.rsync_uri.ends_with(".roa") {
audit.push(ObjectAuditEntry { audit.push(ObjectAuditEntry {
rsync_uri: f.rsync_uri.clone(), rsync_uri: f.rsync_uri.clone(),
@ -430,7 +438,7 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
file.rsync_uri file.rsync_uri
)) ))
.with_rfc_refs(&refs) .with_rfc_refs(&refs)
.with_context(&pack.manifest_rsync_uri), .with_context(manifest_rsync_uri),
); );
return ObjectsOutput { return ObjectsOutput {
vrps: Vec::new(), vrps: Vec::new(),
@ -453,6 +461,28 @@ pub fn process_fetch_cache_pp_pack_for_issuer(
audit, audit,
} }
} }
/// Compatibility wrapper that processes a publication point snapshot.
pub fn process_publication_point_snapshot_for_issuer(
pack: &PublicationPointSnapshot,
policy: &Policy,
issuer_ca_der: &[u8],
issuer_ca_rsync_uri: Option<&str>,
issuer_effective_ip: Option<&crate::data_model::rc::IpResourceSet>,
issuer_effective_as: Option<&crate::data_model::rc::AsResourceSet>,
validation_time: time::OffsetDateTime,
timing: Option<&TimingHandle>,
) -> ObjectsOutput {
process_publication_point_for_issuer(
pack,
policy,
issuer_ca_der,
issuer_ca_rsync_uri,
issuer_effective_ip,
issuer_effective_as,
validation_time,
timing,
)
}
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
enum ObjectValidateError { enum ObjectValidateError {
@ -480,12 +510,12 @@ enum ObjectValidateError {
MissingCrlDpUris, MissingCrlDpUris,
#[error( #[error(
"no CRL available in fetch_cache_pp (cannot validate certificates) (RFC 9286 §7; RFC 6487 §4.8.6)" "no CRL available in publication point snapshot (cannot validate certificates) (RFC 9286 §7; RFC 6487 §4.8.6)"
)] )]
MissingCrlInPack, MissingCrlInPack,
#[error( #[error(
"CRL referenced by CRLDistributionPoints not found in fetch_cache_pp: {0} (RFC 6487 §4.8.6; RFC 9286 §4.2.1)" "CRL referenced by CRLDistributionPoints not found in publication point snapshot: {0} (RFC 6487 §4.8.6; RFC 9286 §4.2.1)"
)] )]
CrlNotFound(String), CrlNotFound(String),
@ -1283,7 +1313,7 @@ mod tests {
} }
#[test] #[test]
fn choose_crl_for_certificate_reports_missing_crl_in_pack() { fn choose_crl_for_certificate_reports_missing_crl_in_snapshot() {
let roa_der = let roa_der =
fixture_bytes("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS4538.roa"); fixture_bytes("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS4538.roa");
let roa = RoaObject::decode_der(&roa_der).expect("decode roa"); let roa = RoaObject::decode_der(&roa_der).expect("decode roa");
@ -1335,7 +1365,7 @@ mod tests {
} }
#[test] #[test]
fn choose_crl_for_certificate_reports_not_found_when_crldp_does_not_match_pack() { fn choose_crl_for_certificate_reports_not_found_when_crldp_does_not_match_snapshot() {
let roa_der = let roa_der =
fixture_bytes("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS4538.roa"); fixture_bytes("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS4538.roa");
let roa = RoaObject::decode_der(&roa_der).expect("decode roa"); let roa = RoaObject::decode_der(&roa_der).expect("decode roa");
@ -1421,4 +1451,225 @@ mod tests {
let err = validate_ee_resources_subset(ee, Some(&issuer_ip), None, &idx).unwrap_err(); let err = validate_ee_resources_subset(ee, Some(&issuer_ip), None, &idx).unwrap_err();
assert!(matches!(err, ObjectValidateError::EeResourcesNotSubset)); assert!(matches!(err, ObjectValidateError::EeResourcesNotSubset));
} }
#[test]
fn extra_rfc_refs_for_crl_selection_distinguishes_crl_errors() {
assert_eq!(
extra_rfc_refs_for_crl_selection(&ObjectValidateError::MissingCrlDpUris),
RFC_CRLDP
);
assert_eq!(
extra_rfc_refs_for_crl_selection(&ObjectValidateError::CrlNotFound(
"rsync://example.test/x.crl".to_string(),
)),
RFC_CRLDP_AND_LOCKED_PACK
);
assert!(
extra_rfc_refs_for_crl_selection(&ObjectValidateError::MissingCrlInPack).is_empty()
);
}
#[test]
fn as_subset_helpers_cover_success_and_failure_paths() {
let child = AsIdentifierChoice::AsIdsOrRanges(vec![
AsIdOrRange::Id(5),
AsIdOrRange::Range { min: 7, max: 9 },
]);
let parent =
AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Range { min: 1, max: 10 }]);
let parent_intervals = [(1, 10)];
assert!(as_choice_subset(None, Some(&parent)));
assert!(!as_choice_subset(Some(&child), None));
assert!(!as_choice_subset(
Some(&AsIdentifierChoice::Inherit),
Some(&parent)
));
assert!(!as_choice_subset(
Some(&child),
Some(&AsIdentifierChoice::Inherit)
));
assert!(as_choice_subset(Some(&child), Some(&parent)));
assert!(as_choice_subset_indexed(None, Some(&parent_intervals)));
assert!(!as_choice_subset_indexed(Some(&child), None));
assert!(!as_choice_subset_indexed(
Some(&AsIdentifierChoice::Inherit),
Some(&parent_intervals),
));
assert!(as_choice_subset_indexed(
Some(&child),
Some(&parent_intervals)
));
assert!(!as_choice_subset_indexed(
Some(&AsIdentifierChoice::AsIdsOrRanges(vec![
AsIdOrRange::Range { min: 11, max: 12 }
])),
Some(&parent_intervals),
));
let child_set = AsResourceSet {
asnum: Some(child.clone()),
rdi: Some(AsIdentifierChoice::AsIdsOrRanges(vec![AsIdOrRange::Id(42)])),
};
let parent_set = AsResourceSet {
asnum: Some(parent.clone()),
rdi: Some(AsIdentifierChoice::AsIdsOrRanges(vec![
AsIdOrRange::Range { min: 40, max: 50 },
])),
};
assert!(as_resources_is_subset(&child_set, &parent_set));
assert!(as_resources_is_subset_indexed(
&child_set,
&parent_set,
&IssuerResourcesIndex {
asnum: Some(vec![(1, 10)]),
rdi: Some(vec![(40, 50)]),
..IssuerResourcesIndex::default()
},
));
}
#[test]
fn ip_subset_helpers_cover_strict_and_indexed_paths() {
let parent = IpResourceSet {
families: vec![
IpAddressFamily {
afi: Afi::Ipv4,
choice: IpAddressChoice::AddressesOrRanges(vec![
IpAddressOrRange::Prefix(IpPrefix {
afi: Afi::Ipv4,
prefix_len: 8,
addr: vec![10, 0, 0, 0],
}),
IpAddressOrRange::Range(IpAddressRange {
min: vec![192, 0, 2, 0],
max: vec![192, 0, 2, 255],
}),
]),
},
IpAddressFamily {
afi: Afi::Ipv6,
choice: IpAddressChoice::Inherit,
},
],
};
let child = IpResourceSet {
families: vec![IpAddressFamily {
afi: Afi::Ipv4,
choice: IpAddressChoice::AddressesOrRanges(vec![
IpAddressOrRange::Prefix(IpPrefix {
afi: Afi::Ipv4,
prefix_len: 16,
addr: vec![10, 1, 0, 0],
}),
IpAddressOrRange::Range(IpAddressRange {
min: vec![192, 0, 2, 10],
max: vec![192, 0, 2, 20],
}),
]),
}],
};
let strict_bad = IpResourceSet {
families: vec![IpAddressFamily {
afi: Afi::Ipv4,
choice: IpAddressChoice::Inherit,
}],
};
assert!(ip_resources_is_subset(&child, &parent));
assert!(ip_resources_to_merged_intervals(&parent).contains_key(&AfiKey::V4));
assert!(ip_resources_to_merged_intervals_strict(&child).is_ok());
assert!(ip_resources_to_merged_intervals_strict(&strict_bad).is_err());
let idx = build_issuer_resources_index(
Some(&parent),
Some(&AsResourceSet {
asnum: Some(AsIdentifierChoice::AsIdsOrRanges(vec![
AsIdOrRange::Range {
min: 64500,
max: 64510,
},
])),
rdi: Some(AsIdentifierChoice::Inherit),
}),
);
assert!(idx.ip_v4.is_some());
assert!(idx.ip_v6.is_none());
assert!(idx.asnum.is_some());
assert!(idx.rdi.is_none());
assert!(ip_resources_is_subset_indexed(&child, &parent, &idx));
assert!(!ip_resources_is_subset_indexed(&strict_bad, &parent, &idx));
}
#[test]
fn interval_and_byte_helpers_cover_edge_cases() {
let parent = vec![(vec![0, 0, 0, 0], vec![0, 0, 0, 10])];
assert!(interval_is_covered(&parent, &[0, 0, 0, 1], &[0, 0, 0, 2]));
assert!(!interval_is_covered(
&parent,
&[0, 0, 0, 11],
&[0, 0, 0, 12]
));
assert!(intervals_are_covered(
&parent,
&[(vec![0, 0, 0, 1], vec![0, 0, 0, 2])]
));
assert!(!intervals_are_covered(
&parent,
&[(vec![0, 0, 0, 9], vec![0, 0, 0, 11])],
));
let prefix = RcIpPrefix {
afi: Afi::Ipv4,
prefix_len: 24,
addr: vec![203, 0, 113, 7],
};
assert_eq!(
prefix_to_range(&prefix),
(vec![203, 0, 113, 0], vec![203, 0, 113, 255])
);
assert_eq!(increment_bytes(&[0, 0, 0, 255]), vec![0, 0, 1, 0]);
assert!(bytes_is_next(&[0, 0, 1, 0], &[0, 0, 0, 255]));
assert!(!bytes_is_next(&[1, 2], &[1]));
}
#[test]
fn merged_interval_helpers_cover_empty_and_break_paths() {
let mut empty: Vec<(Vec<u8>, Vec<u8>)> = Vec::new();
merge_ip_intervals_in_place(&mut empty);
assert!(empty.is_empty());
let mut v = vec![
(vec![0, 0, 0, 20], vec![0, 0, 0, 30]),
(vec![0, 0, 0, 0], vec![0, 0, 0, 10]),
(vec![0, 0, 0, 11], vec![0, 0, 0, 19]),
];
v.sort_by(|(a, _), (b, _)| a.cmp(b));
merge_ip_intervals_in_place(&mut v);
assert_eq!(v, vec![(vec![0, 0, 0, 0], vec![0, 0, 0, 30])]);
assert_eq!(
as_choice_to_merged_intervals(&AsIdentifierChoice::AsIdsOrRanges(vec![
AsIdOrRange::Id(1),
AsIdOrRange::Range { min: 2, max: 3 },
AsIdOrRange::Range { min: 7, max: 9 },
])),
vec![(1, 3), (7, 9)]
);
assert!(as_interval_is_covered(&[(1, 3), (7, 9)], 2, 3));
assert!(!as_interval_is_covered(&[(7, 9)], 2, 3));
}
#[test]
fn roa_output_helpers_cover_vrps_and_afi_strings() {
let roa_der =
fixture_bytes("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS4538.roa");
let roa = RoaObject::decode_der(&roa_der).expect("decode roa");
let vrps = roa_to_vrps(&roa);
assert!(!vrps.is_empty());
assert!(vrps.iter().all(|vrp| vrp.asn == roa.roa.as_id));
assert_eq!(roa_afi_to_string(RoaAfi::Ipv4), "ipv4");
assert_eq!(roa_afi_to_string(RoaAfi::Ipv6), "ipv6");
}
} }

View File

@ -0,0 +1,18 @@
use crate::storage::{PackFile, PackTime};
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct PublicationPointSnapshot {
pub format_version: u32,
pub manifest_rsync_uri: String,
pub publication_point_rsync_uri: String,
pub manifest_number_be: Vec<u8>,
pub this_update: PackTime,
pub next_update: PackTime,
pub verified_at: PackTime,
pub manifest_bytes: Vec<u8>,
pub files: Vec<PackFile>,
}
impl PublicationPointSnapshot {
pub const FORMAT_VERSION_V1: u32 = 1;
}

View File

@ -1,34 +1,35 @@
use crate::data_model::rc::{AsResourceSet, IpResourceSet}; use crate::data_model::rc::{AsResourceSet, IpResourceSet};
use crate::fetch::rsync::RsyncFetcher; use crate::fetch::rsync::RsyncFetcher;
use crate::policy::Policy; use crate::policy::Policy;
use crate::storage::{FetchCachePpKey, RocksStore}; use crate::report::Warning;
use crate::sync::repo::{RepoSyncResult, sync_publication_point}; use crate::storage::RocksStore;
use crate::sync::rrdp::Fetcher as HttpFetcher; use crate::sync::rrdp::Fetcher as HttpFetcher;
use crate::validation::manifest::{PublicationPointResult, process_manifest_publication_point}; use crate::validation::manifest::PublicationPointSource;
use crate::validation::objects::{ObjectsOutput, process_fetch_cache_pp_pack_for_issuer}; use crate::validation::objects::ObjectsOutput;
use crate::validation::tree::{CaInstanceHandle, PublicationPointRunner};
use crate::validation::tree_runner::Rpkiv1PublicationPointRunner;
use std::collections::HashMap;
use std::sync::Mutex;
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub struct RunOutput { pub struct RunOutput {
pub repo_sync: RepoSyncResult, pub publication_point_source: PublicationPointSource,
pub publication_point: PublicationPointResult, pub publication_point_warnings: Vec<Warning>,
pub objects: ObjectsOutput, pub objects: ObjectsOutput,
} }
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
pub enum RunError { pub enum RunError {
#[error("repo sync failed: {0}")] #[error("publication point runner failed: {0}")]
RepoSync(#[from] crate::sync::repo::RepoSyncError), Runner(String),
#[error("manifest processing failed: {0}")]
Manifest(#[from] crate::validation::manifest::ManifestProcessError),
} }
/// v1 serial offline-friendly end-to-end execution for a single publication point. /// Offline-friendly end-to-end execution for a single publication point.
/// ///
/// This orchestrates: /// This reuses the same fresh-first runtime path as the tree runner:
/// 1) repo sync (RRDP or rsync fallback) into `raw_objects` /// 1) repository sync (RRDP or rsync fallback) into the current repository view
/// 2) manifest RP processing into a fetch_cache_pp pack (`fetch_cache_pp:<manifest-rsync-uri>`) /// 2) manifest processing into a validated publication point result
/// 3) signed object processing (ROA/ASPA) from the fetch_cache_pp pack /// 3) signed object processing (ROA/ASPA) and latest-VCIR persistence
pub fn run_publication_point_once( pub fn run_publication_point_once(
store: &RocksStore, store: &RocksStore,
policy: &Policy, policy: &Policy,
@ -44,49 +45,41 @@ pub fn run_publication_point_once(
issuer_effective_as: Option<&AsResourceSet>, issuer_effective_as: Option<&AsResourceSet>,
validation_time: time::OffsetDateTime, validation_time: time::OffsetDateTime,
) -> Result<RunOutput, RunError> { ) -> Result<RunOutput, RunError> {
let repo_sync = sync_publication_point( let handle = CaInstanceHandle {
depth: 0,
tal_id: "single-publication-point".to_string(),
parent_manifest_rsync_uri: None,
ca_certificate_der: issuer_ca_der.to_vec(),
ca_certificate_rsync_uri: issuer_ca_rsync_uri.map(str::to_string),
effective_ip_resources: issuer_effective_ip.cloned(),
effective_as_resources: issuer_effective_as.cloned(),
rsync_base_uri: rsync_base_uri.to_string(),
manifest_rsync_uri: manifest_rsync_uri.to_string(),
publication_point_rsync_uri: publication_point_rsync_uri.to_string(),
rrdp_notification_uri: rrdp_notification_uri.map(str::to_string),
};
let runner = Rpkiv1PublicationPointRunner {
store, store,
policy, policy,
rrdp_notification_uri,
rsync_base_uri,
http_fetcher, http_fetcher,
rsync_fetcher, rsync_fetcher,
None,
None,
)?;
let publication_point = process_manifest_publication_point(
store,
policy,
manifest_rsync_uri,
publication_point_rsync_uri,
issuer_ca_der,
issuer_ca_rsync_uri,
validation_time, validation_time,
)?; timing: None,
download_log: None,
rrdp_dedup: false,
rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: false,
rsync_repo_cache: Mutex::new(HashMap::new()),
};
let objects = process_fetch_cache_pp_pack_for_issuer( let result = runner
&publication_point.pack, .run_publication_point(&handle)
policy, .map_err(RunError::Runner)?;
issuer_ca_der,
issuer_ca_rsync_uri,
issuer_effective_ip,
issuer_effective_as,
validation_time,
None,
);
Ok(RunOutput { Ok(RunOutput {
repo_sync, publication_point_source: result.source,
publication_point, publication_point_warnings: result.warnings,
objects, objects: result.objects,
}) })
} }
pub fn fetch_cache_pp_exists(store: &RocksStore, manifest_rsync_uri: &str) -> Result<bool, String> {
let key = FetchCachePpKey::from_manifest_rsync_uri(manifest_rsync_uri);
store
.get_fetch_cache_pp(&key)
.map(|v| v.is_some())
.map_err(|e| e.to_string())
}

View File

@ -17,6 +17,44 @@ use crate::validation::tree_runner::Rpkiv1PublicationPointRunner;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Mutex; use std::sync::Mutex;
fn tal_id_from_url_like(s: &str) -> Option<String> {
let url = Url::parse(s).ok()?;
if let Some(last) = url
.path_segments()
.and_then(|segments| segments.filter(|seg| !seg.is_empty()).next_back())
{
let stem = last.rsplit_once('.').map(|(stem, _)| stem).unwrap_or(last);
let trimmed = stem.trim();
if !trimmed.is_empty() {
return Some(trimmed.to_string());
}
}
url.host_str().map(|host| host.to_string())
}
fn derive_tal_id(discovery: &DiscoveredRootCaInstance) -> String {
discovery
.tal_url
.as_deref()
.and_then(tal_id_from_url_like)
.or_else(|| {
discovery
.trust_anchor
.resolved_ta_uri
.as_ref()
.and_then(|uri| tal_id_from_url_like(uri.as_str()))
})
.or_else(|| {
discovery
.trust_anchor
.tal
.ta_uris
.first()
.and_then(|uri| tal_id_from_url_like(uri.as_str()))
})
.unwrap_or_else(|| "unknown-tal".to_string())
}
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub struct RunTreeFromTalOutput { pub struct RunTreeFromTalOutput {
pub discovery: DiscoveredRootCaInstance, pub discovery: DiscoveredRootCaInstance,
@ -43,12 +81,15 @@ pub enum RunTreeFromTalError {
pub fn root_handle_from_trust_anchor( pub fn root_handle_from_trust_anchor(
trust_anchor: &TrustAnchor, trust_anchor: &TrustAnchor,
tal_id: String,
ca_certificate_rsync_uri: Option<String>, ca_certificate_rsync_uri: Option<String>,
ca_instance: &crate::validation::ca_instance::CaInstanceUris, ca_instance: &crate::validation::ca_instance::CaInstanceUris,
) -> CaInstanceHandle { ) -> CaInstanceHandle {
let ta_rc = trust_anchor.ta_certificate.rc_ca.clone(); let ta_rc = trust_anchor.ta_certificate.rc_ca.clone();
CaInstanceHandle { CaInstanceHandle {
depth: 0, depth: 0,
tal_id,
parent_manifest_rsync_uri: None,
ca_certificate_der: trust_anchor.ta_certificate.raw_der.clone(), ca_certificate_der: trust_anchor.ta_certificate.raw_der.clone(),
ca_certificate_rsync_uri, ca_certificate_rsync_uri,
effective_ip_resources: ta_rc.tbs.extensions.ip_resources.clone(), effective_ip_resources: ta_rc.tbs.extensions.ip_resources.clone(),
@ -79,14 +120,18 @@ pub fn run_tree_from_tal_url_serial(
validation_time, validation_time,
timing: None, timing: None,
download_log: None, download_log: None,
revalidate_only: config.revalidate_only,
rrdp_dedup: true, rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()), rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true, rsync_dedup: true,
rsync_repo_cache: Mutex::new(HashMap::new()), rsync_repo_cache: Mutex::new(HashMap::new()),
}; };
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance); let root = root_handle_from_trust_anchor(
&discovery.trust_anchor,
derive_tal_id(&discovery),
None,
&discovery.ca_instance,
);
let tree = run_tree_serial(root, &runner, config)?; let tree = run_tree_serial(root, &runner, config)?;
Ok(RunTreeFromTalOutput { discovery, tree }) Ok(RunTreeFromTalOutput { discovery, tree })
@ -112,14 +157,18 @@ pub fn run_tree_from_tal_url_serial_audit(
validation_time, validation_time,
timing: None, timing: None,
download_log: Some(download_log.clone()), download_log: Some(download_log.clone()),
revalidate_only: config.revalidate_only,
rrdp_dedup: true, rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()), rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true, rsync_dedup: true,
rsync_repo_cache: Mutex::new(HashMap::new()), rsync_repo_cache: Mutex::new(HashMap::new()),
}; };
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance); let root = root_handle_from_trust_anchor(
&discovery.trust_anchor,
derive_tal_id(&discovery),
None,
&discovery.ca_instance,
);
let TreeRunAuditOutput { let TreeRunAuditOutput {
tree, tree,
publication_points, publication_points,
@ -159,14 +208,18 @@ pub fn run_tree_from_tal_url_serial_audit_with_timing(
validation_time, validation_time,
timing: Some(timing.clone()), timing: Some(timing.clone()),
download_log: Some(download_log.clone()), download_log: Some(download_log.clone()),
revalidate_only: config.revalidate_only,
rrdp_dedup: true, rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()), rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true, rsync_dedup: true,
rsync_repo_cache: Mutex::new(HashMap::new()), rsync_repo_cache: Mutex::new(HashMap::new()),
}; };
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance); let root = root_handle_from_trust_anchor(
&discovery.trust_anchor,
derive_tal_id(&discovery),
None,
&discovery.ca_instance,
);
let _tree = timing.span_phase("tree_run_total"); let _tree = timing.span_phase("tree_run_total");
let TreeRunAuditOutput { let TreeRunAuditOutput {
tree, tree,
@ -206,14 +259,18 @@ pub fn run_tree_from_tal_and_ta_der_serial(
validation_time, validation_time,
timing: None, timing: None,
download_log: None, download_log: None,
revalidate_only: config.revalidate_only,
rrdp_dedup: true, rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()), rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true, rsync_dedup: true,
rsync_repo_cache: Mutex::new(HashMap::new()), rsync_repo_cache: Mutex::new(HashMap::new()),
}; };
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance); let root = root_handle_from_trust_anchor(
&discovery.trust_anchor,
derive_tal_id(&discovery),
None,
&discovery.ca_instance,
);
let tree = run_tree_serial(root, &runner, config)?; let tree = run_tree_serial(root, &runner, config)?;
Ok(RunTreeFromTalOutput { discovery, tree }) Ok(RunTreeFromTalOutput { discovery, tree })
@ -242,14 +299,18 @@ pub fn run_tree_from_tal_and_ta_der_serial_audit(
validation_time, validation_time,
timing: None, timing: None,
download_log: Some(download_log.clone()), download_log: Some(download_log.clone()),
revalidate_only: config.revalidate_only,
rrdp_dedup: true, rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()), rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true, rsync_dedup: true,
rsync_repo_cache: Mutex::new(HashMap::new()), rsync_repo_cache: Mutex::new(HashMap::new()),
}; };
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance); let root = root_handle_from_trust_anchor(
&discovery.trust_anchor,
derive_tal_id(&discovery),
None,
&discovery.ca_instance,
);
let TreeRunAuditOutput { let TreeRunAuditOutput {
tree, tree,
publication_points, publication_points,
@ -292,14 +353,18 @@ pub fn run_tree_from_tal_and_ta_der_serial_audit_with_timing(
validation_time, validation_time,
timing: Some(timing.clone()), timing: Some(timing.clone()),
download_log: Some(download_log.clone()), download_log: Some(download_log.clone()),
revalidate_only: config.revalidate_only,
rrdp_dedup: true, rrdp_dedup: true,
rrdp_repo_cache: Mutex::new(HashMap::new()), rrdp_repo_cache: Mutex::new(HashMap::new()),
rsync_dedup: true, rsync_dedup: true,
rsync_repo_cache: Mutex::new(HashMap::new()), rsync_repo_cache: Mutex::new(HashMap::new()),
}; };
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance); let root = root_handle_from_trust_anchor(
&discovery.trust_anchor,
derive_tal_id(&discovery),
None,
&discovery.ca_instance,
);
let _tree = timing.span_phase("tree_run_total"); let _tree = timing.span_phase("tree_run_total");
let TreeRunAuditOutput { let TreeRunAuditOutput {
tree, tree,

View File

@ -1,10 +1,10 @@
use crate::audit::DiscoveredFrom; use crate::audit::DiscoveredFrom;
use crate::audit::PublicationPointAudit; use crate::audit::PublicationPointAudit;
use crate::data_model::rc::{AsResourceSet, IpResourceSet}; use crate::data_model::rc::{AsResourceSet, IpResourceSet};
use crate::report::{RfcRef, Warning}; use crate::report::Warning;
use crate::storage::FetchCachePpPack;
use crate::validation::manifest::PublicationPointSource; use crate::validation::manifest::PublicationPointSource;
use crate::validation::objects::{AspaAttestation, ObjectsOutput, Vrp}; use crate::validation::objects::{AspaAttestation, ObjectsOutput, Vrp};
use crate::validation::publication_point::PublicationPointSnapshot;
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub struct TreeRunConfig { pub struct TreeRunConfig {
@ -12,10 +12,6 @@ pub struct TreeRunConfig {
pub max_depth: Option<usize>, pub max_depth: Option<usize>,
/// Max number of CA instances to process. /// Max number of CA instances to process.
pub max_instances: Option<usize>, pub max_instances: Option<usize>,
/// Skip RRDP/rsync fetch and re-validate from existing `fetch_cache_pp` packs in the DB.
///
/// This is primarily intended for profiling/analysis runs to remove network noise.
pub revalidate_only: bool,
} }
impl Default for TreeRunConfig { impl Default for TreeRunConfig {
@ -23,7 +19,6 @@ impl Default for TreeRunConfig {
Self { Self {
max_depth: None, max_depth: None,
max_instances: None, max_instances: None,
revalidate_only: false,
} }
} }
} }
@ -31,6 +26,8 @@ impl Default for TreeRunConfig {
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub struct CaInstanceHandle { pub struct CaInstanceHandle {
pub depth: usize, pub depth: usize,
pub tal_id: String,
pub parent_manifest_rsync_uri: Option<String>,
/// DER bytes of the CA certificate for this CA instance. /// DER bytes of the CA certificate for this CA instance.
pub ca_certificate_der: Vec<u8>, pub ca_certificate_der: Vec<u8>,
/// rsync URI of this CA certificate object (where it is published). /// rsync URI of this CA certificate object (where it is published).
@ -59,15 +56,15 @@ impl CaInstanceHandle {
#[derive(Clone, Debug, PartialEq, Eq)] #[derive(Clone, Debug, PartialEq, Eq)]
pub struct PublicationPointRunResult { pub struct PublicationPointRunResult {
pub source: PublicationPointSource, pub source: PublicationPointSource,
pub pack: FetchCachePpPack, pub snapshot: Option<PublicationPointSnapshot>,
pub warnings: Vec<Warning>, pub warnings: Vec<Warning>,
pub objects: ObjectsOutput, pub objects: ObjectsOutput,
pub audit: PublicationPointAudit, pub audit: PublicationPointAudit,
/// Candidate child CA instances discovered from this publication point. /// Candidate child CA instances to enqueue after this publication point completes.
/// ///
/// RFC 9286 §6.6 restriction is enforced by the tree engine: if this /// - For `Fresh`, these are discovered from the current validated publication point.
/// publication point used fetch_cache_pp due to failed fetch, children MUST NOT /// - For `VcirCurrentInstance`, these are restored from the current instance VCIR and then
/// be enqueued/processed in this run. /// processed fresh-first when their own turn arrives.
pub discovered_children: Vec<DiscoveredChildCaInstance>, pub discovered_children: Vec<DiscoveredChildCaInstance>,
} }
@ -187,37 +184,25 @@ pub fn run_tree_serial_audit(
audit.discovered_from = node.discovered_from.clone(); audit.discovered_from = node.discovered_from.clone();
publication_points.push(audit); publication_points.push(audit);
let enqueue_children = let mut children = res.discovered_children;
res.source == PublicationPointSource::Fresh || config.revalidate_only; children.sort_by(|a, b| {
if !enqueue_children && !res.discovered_children.is_empty() { a.handle
warnings.push( .manifest_rsync_uri
Warning::new("skipping child CA discovery due to failed fetch cache use") .cmp(&b.handle.manifest_rsync_uri)
.with_rfc_refs(&[RfcRef("RFC 9286 §6.6")]) .then_with(|| {
.with_context(&ca.manifest_rsync_uri), a.discovered_from
); .child_ca_certificate_rsync_uri
} .cmp(&b.discovered_from.child_ca_certificate_rsync_uri)
})
if enqueue_children { });
let mut children = res.discovered_children; for child in children {
children.sort_by(|a, b| { queue.push_back(QueuedCaInstance {
a.handle id: next_id,
.manifest_rsync_uri handle: child.handle.with_depth(ca.depth + 1),
.cmp(&b.handle.manifest_rsync_uri) parent_id: Some(node.id),
.then_with(|| { discovered_from: Some(child.discovered_from),
a.discovered_from
.child_ca_certificate_rsync_uri
.cmp(&b.discovered_from.child_ca_certificate_rsync_uri)
})
}); });
for child in children { next_id += 1;
queue.push_back(QueuedCaInstance {
id: next_id,
handle: child.handle.with_depth(ca.depth + 1),
parent_id: Some(node.id),
discovered_from: Some(child.discovered_from),
});
next_id += 1;
}
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -6,8 +6,8 @@ use rpki::data_model::rc::{
}; };
use rpki::data_model::roa::RoaObject; use rpki::data_model::roa::RoaObject;
use rpki::storage::PackFile;
use rpki::storage::RocksStore; use rpki::storage::RocksStore;
use rpki::storage::pack::PackFile;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::time::Instant; use std::time::Instant;

View File

@ -6,7 +6,7 @@ use std::time::{Duration, Instant};
use rpki::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig}; use rpki::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig};
use rpki::fetch::rsync::{RsyncFetchError, RsyncFetcher}; use rpki::fetch::rsync::{RsyncFetchError, RsyncFetcher};
use rpki::policy::{CaFailedFetchPolicy, Policy, SyncPreference}; use rpki::policy::{CaFailedFetchPolicy, Policy, SyncPreference};
use rpki::storage::{FetchCachePpKey, RocksStore}; use rpki::storage::RocksStore;
use rpki::sync::repo::{RepoSyncSource, sync_publication_point}; use rpki::sync::repo::{RepoSyncSource, sync_publication_point};
use rpki::sync::rrdp::{Fetcher, parse_notification, sync_from_notification}; use rpki::sync::rrdp::{Fetcher, parse_notification, sync_from_notification};
use rpki::validation::from_tal::discover_root_ca_instance_from_tal_url; use rpki::validation::from_tal::discover_root_ca_instance_from_tal_url;
@ -80,13 +80,13 @@ impl Fetcher for CountingDenyUriFetcher {
fn live_policy() -> Policy { fn live_policy() -> Policy {
let mut p = Policy::default(); let mut p = Policy::default();
p.sync_preference = SyncPreference::RrdpThenRsync; p.sync_preference = SyncPreference::RrdpThenRsync;
p.ca_failed_fetch_policy = CaFailedFetchPolicy::UseFetchCachePp; p.ca_failed_fetch_policy = CaFailedFetchPolicy::ReuseCurrentInstanceVcir;
p p
} }
#[test] #[test]
#[ignore = "live network: APNIC RRDP snapshot bootstrap into persistent RocksDB"] #[ignore = "live network: APNIC RRDP snapshot bootstrap into persistent RocksDB"]
fn apnic_live_bootstrap_snapshot_and_fetch_cache_pp_pack_to_persistent_db() { fn apnic_live_bootstrap_snapshot_and_persist_root_vcir_to_persistent_db() {
let http = live_http_fetcher(); let http = live_http_fetcher();
let rsync = AlwaysFailRsyncFetcher; let rsync = AlwaysFailRsyncFetcher;
@ -120,8 +120,8 @@ fn apnic_live_bootstrap_snapshot_and_fetch_cache_pp_pack_to_persistent_db() {
assert_eq!(sync.source, RepoSyncSource::Rrdp); assert_eq!(sync.source, RepoSyncSource::Rrdp);
// Build + persist a fetch_cache_pp pack for the root publication point so later runs can // Build the root publication point and persist the latest VCIR so later runs can
// validate behavior under failed fetch conditions (RFC 9286 §6.6). // validate current-instance failed-fetch reuse behavior (RFC 9286 §6.6).
let ta_der = discovery.trust_anchor.ta_certificate.raw_der; let ta_der = discovery.trust_anchor.ta_certificate.raw_der;
let pp = process_manifest_publication_point( let pp = process_manifest_publication_point(
&store, &store,
@ -136,9 +136,10 @@ fn apnic_live_bootstrap_snapshot_and_fetch_cache_pp_pack_to_persistent_db() {
assert_eq!(pp.source, PublicationPointSource::Fresh); assert_eq!(pp.source, PublicationPointSource::Fresh);
let key = FetchCachePpKey::from_manifest_rsync_uri(&ca_instance.manifest_rsync_uri); let cached = store
let cached = store.get_fetch_cache_pp(&key).expect("get fetch_cache_pp"); .get_vcir(&ca_instance.manifest_rsync_uri)
assert!(cached.is_some(), "expected fetch_cache_pp to be stored"); .expect("get vcir");
assert!(cached.is_some(), "expected VCIR to be stored");
eprintln!( eprintln!(
"OK: bootstrap complete; persistent db at: {}", "OK: bootstrap complete; persistent db at: {}",
@ -262,15 +263,15 @@ fn apnic_live_delta_only_from_persistent_db() {
} }
#[test] #[test]
#[ignore = "offline/synthetic: after bootstrap, force repo sync failure and assert fetch_cache_pp is used (RFC 9286 §6.6)"] #[ignore = "offline/synthetic: after bootstrap, force repo sync failure and assert current-instance VCIR is reused (RFC 9286 §6.6)"]
fn apnic_root_repo_sync_failure_uses_fetch_cache_pp_pack() { fn apnic_root_repo_sync_failure_reuses_current_instance_vcir() {
let http = live_http_fetcher(); let http = live_http_fetcher();
let db_dir = persistent_db_dir(); let db_dir = persistent_db_dir();
let store = RocksStore::open(&db_dir).expect("open rocksdb (must have been bootstrapped)"); let store = RocksStore::open(&db_dir).expect("open rocksdb (must have been bootstrapped)");
let mut policy = live_policy(); let mut policy = live_policy();
policy.sync_preference = SyncPreference::RrdpThenRsync; policy.sync_preference = SyncPreference::RrdpThenRsync;
policy.ca_failed_fetch_policy = CaFailedFetchPolicy::UseFetchCachePp; policy.ca_failed_fetch_policy = CaFailedFetchPolicy::ReuseCurrentInstanceVcir;
let validation_time = time::OffsetDateTime::now_utc(); let validation_time = time::OffsetDateTime::now_utc();
@ -278,12 +279,13 @@ fn apnic_root_repo_sync_failure_uses_fetch_cache_pp_pack() {
.expect("discover root CA instance from APNIC TAL"); .expect("discover root CA instance from APNIC TAL");
let ca_instance = discovery.ca_instance; let ca_instance = discovery.ca_instance;
// Ensure cache exists (created by bootstrap). // Ensure current-instance VCIR exists (created by bootstrap).
let key = FetchCachePpKey::from_manifest_rsync_uri(&ca_instance.manifest_rsync_uri); let cached = store
let cached = store.get_fetch_cache_pp(&key).expect("get fetch_cache_pp"); .get_vcir(&ca_instance.manifest_rsync_uri)
.expect("get vcir");
assert!( assert!(
cached.is_some(), cached.is_some(),
"missing fetch_cache_pp; run bootstrap test first. db_dir={}", "missing VCIR; run bootstrap test first. db_dir={}",
db_dir.display() db_dir.display()
); );
@ -301,13 +303,13 @@ fn apnic_root_repo_sync_failure_uses_fetch_cache_pp_pack() {
false, false,
Some("synthetic repo sync failure"), Some("synthetic repo sync failure"),
) )
.expect("must fall back to fetch_cache_pp"); .expect("must reuse current-instance VCIR");
assert_eq!(pp.source, PublicationPointSource::FetchCachePp); assert_eq!(pp.source, PublicationPointSource::VcirCurrentInstance);
assert!( assert!(
pp.warnings pp.warnings.iter().any(|w| w
.iter() .message
.any(|w| w.message.contains("using fetch_cache_pp")), .contains("using latest validated result for current CA instance")),
"expected cache-use warning" "expected current-instance VCIR reuse warning"
); );
} }

View File

@ -63,37 +63,39 @@ impl LiveStats {
rpki::validation::manifest::PublicationPointSource::Fresh => { rpki::validation::manifest::PublicationPointSource::Fresh => {
self.publication_points_fresh += 1 self.publication_points_fresh += 1
} }
rpki::validation::manifest::PublicationPointSource::FetchCachePp => { rpki::validation::manifest::PublicationPointSource::VcirCurrentInstance => {
self.publication_points_cached += 1 self.publication_points_cached += 1
} }
rpki::validation::manifest::PublicationPointSource::FailedFetchNoCache => {}
} }
// Include manifest object URI itself. if let Some(pack) = res.snapshot.as_ref() {
self.pack_uris_total += 1;
self.pack_file_uris_unique
.insert(res.pack.manifest_rsync_uri.clone());
*self
.pack_uris_by_ext_total
.entry(ext_of_uri(&res.pack.manifest_rsync_uri))
.or_insert(0) += 1;
for f in &res.pack.files {
self.pack_uris_total += 1; self.pack_uris_total += 1;
self.pack_file_uris_unique.insert(f.rsync_uri.clone()); self.pack_file_uris_unique
.insert(pack.manifest_rsync_uri.clone());
*self *self
.pack_uris_by_ext_total .pack_uris_by_ext_total
.entry(ext_of_uri(&f.rsync_uri)) .entry(ext_of_uri(&pack.manifest_rsync_uri))
.or_insert(0) += 1; .or_insert(0) += 1;
if f.rsync_uri.ends_with(".crl") { for f in &pack.files {
self.crl_total += 1; self.pack_uris_total += 1;
if RpkixCrl::decode_der(&f.bytes).is_ok() { self.pack_file_uris_unique.insert(f.rsync_uri.clone());
self.crl_decode_ok += 1; *self
} .pack_uris_by_ext_total
} .entry(ext_of_uri(&f.rsync_uri))
.or_insert(0) += 1;
if f.rsync_uri.ends_with(".cer") { if f.rsync_uri.ends_with(".crl") {
self.child_ca_cert_candidates_total += 1; self.crl_total += 1;
if RpkixCrl::decode_der(&f.bytes).is_ok() {
self.crl_decode_ok += 1;
}
}
if f.rsync_uri.ends_with(".cer") {
self.child_ca_cert_candidates_total += 1;
}
} }
} }
@ -168,7 +170,6 @@ fn apnic_tree_full_stats_serial() {
validation_time, validation_time,
timing: None, timing: None,
download_log: None, download_log: None,
revalidate_only: false,
rrdp_dedup: true, rrdp_dedup: true,
rrdp_repo_cache: std::sync::Mutex::new(std::collections::HashMap::new()), rrdp_repo_cache: std::sync::Mutex::new(std::collections::HashMap::new()),
rsync_dedup: true, rsync_dedup: true,
@ -181,8 +182,12 @@ fn apnic_tree_full_stats_serial() {
stats: &stats, stats: &stats,
}; };
let root: CaInstanceHandle = let root: CaInstanceHandle = root_handle_from_trust_anchor(
root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance); &discovery.trust_anchor,
"test-tal".to_string(),
None,
&discovery.ca_instance,
);
let max_depth = std::env::var("RPKI_APNIC_MAX_DEPTH") let max_depth = std::env::var("RPKI_APNIC_MAX_DEPTH")
.ok() .ok()
@ -197,7 +202,6 @@ fn apnic_tree_full_stats_serial() {
&TreeRunConfig { &TreeRunConfig {
max_depth, max_depth,
max_instances, max_instances,
revalidate_only: false,
}, },
) )
.expect("run tree"); .expect("run tree");
@ -213,11 +217,6 @@ fn apnic_tree_full_stats_serial() {
} }
} }
let fetch_cache_pp_total = store
.fetch_cache_pp_iter_all()
.expect("fetch_cache_pp_iter_all")
.count();
println!("APNIC Stage2 full-tree serial stats"); println!("APNIC Stage2 full-tree serial stats");
println!("tal_url={APNIC_TAL_URL}"); println!("tal_url={APNIC_TAL_URL}");
println!( println!(
@ -246,7 +245,6 @@ fn apnic_tree_full_stats_serial() {
stats.pack_file_uris_unique.len() stats.pack_file_uris_unique.len()
); );
println!("pack_uris_by_ext_total={:?}", stats.pack_uris_by_ext_total); println!("pack_uris_by_ext_total={:?}", stats.pack_uris_by_ext_total);
println!("fetch_cache_pp_total={fetch_cache_pp_total}");
println!(); println!();
println!( println!(
"crl_total={} crl_decode_ok={}", "crl_total={} crl_decode_ok={}",
@ -273,7 +271,6 @@ fn apnic_tree_full_stats_serial() {
"rocksdb_raw_objects_total={} raw_by_ext={:?}", "rocksdb_raw_objects_total={} raw_by_ext={:?}",
raw_total, raw_by_ext raw_total, raw_by_ext
); );
println!("rocksdb_fetch_cache_pp_total={fetch_cache_pp_total}");
// Loose sanity assertions (avoid flakiness due to repository churn). // Loose sanity assertions (avoid flakiness due to repository churn).
// //

View File

@ -36,7 +36,6 @@ fn apnic_tree_depth1_processes_more_than_root() {
&TreeRunConfig { &TreeRunConfig {
max_depth: Some(1), max_depth: Some(1),
max_instances: Some(2), max_instances: Some(2),
revalidate_only: false,
}, },
) )
.expect("run tree from tal"); .expect("run tree from tal");
@ -75,7 +74,6 @@ fn apnic_tree_root_only_processes_root_with_long_timeouts() {
&TreeRunConfig { &TreeRunConfig {
max_depth: Some(0), max_depth: Some(0),
max_instances: Some(1), max_instances: Some(1),
revalidate_only: false,
}, },
) )
.expect("run APNIC root-only"); .expect("run APNIC root-only");

View File

@ -467,3 +467,337 @@ fn validate_subordinate_ca_rejects_tampered_crl_signature() {
.unwrap_err(); .unwrap_err();
assert!(matches!(err, CaPathError::CrlVerify(_))); assert!(matches!(err, CaPathError::CrlVerify(_)));
} }
#[test]
fn validate_subordinate_ca_rejects_non_ca_child_and_non_ca_issuer() {
let generated = generate_chain_and_crl(
"keyUsage = critical, keyCertSign, cRLSign
sbgp-ipAddrBlock = critical, IPv4:10.0.0.0/16
",
false,
);
let issuer = ResourceCertificate::decode_der(&generated.issuer_ca_der).expect("decode issuer");
let ee_child_der =
std::fs::read("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS4538.roa")
.expect("read roa fixture");
let roa = rpki::data_model::roa::RoaObject::decode_der(&ee_child_der).expect("decode roa");
let ee_der = roa.signed_object.signed_data.certificates[0]
.raw_der
.clone();
let err = validate_subordinate_ca_cert(
&ee_der,
&generated.issuer_ca_der,
&generated.issuer_crl_der,
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
time::OffsetDateTime::now_utc(),
)
.unwrap_err();
assert!(matches!(err, CaPathError::ChildNotCa), "{err}");
let err = validate_subordinate_ca_cert(
&generated.child_ca_der,
&ee_der,
&generated.issuer_crl_der,
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
time::OffsetDateTime::now_utc(),
)
.unwrap_err();
assert!(matches!(err, CaPathError::IssuerNotCa), "{err}");
}
#[test]
fn validate_subordinate_ca_rejects_mismatched_issuer_subject_and_missing_resources() {
let with_resources = generate_chain_and_crl(
"keyUsage = critical, keyCertSign, cRLSign
sbgp-ipAddrBlock = critical, IPv4:10.0.0.0/16
sbgp-autonomousSysNum = critical, AS:64496
",
false,
);
let issuer =
ResourceCertificate::decode_der(&with_resources.issuer_ca_der).expect("decode issuer");
let wrong_issuer_der = std::fs::read(
"tests/fixtures/repository/ca.rg.net/rpki/RGnet-OU/R-lVU1XGsAeqzV1Fv0HjOD6ZFkE.cer",
)
.expect("read wrong issuer fixture");
let wrong_issuer =
ResourceCertificate::decode_der(&wrong_issuer_der).expect("decode wrong issuer");
let err = validate_subordinate_ca_cert(
&with_resources.child_ca_der,
&wrong_issuer_der,
&with_resources.issuer_crl_der,
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
wrong_issuer.tbs.extensions.ip_resources.as_ref(),
wrong_issuer.tbs.extensions.as_resources.as_ref(),
time::OffsetDateTime::now_utc(),
)
.unwrap_err();
assert!(
matches!(err, CaPathError::IssuerSubjectMismatch { .. }),
"{err}"
);
let no_resources = generate_chain_and_crl(
"keyUsage = critical, keyCertSign, cRLSign
",
false,
);
let err = validate_subordinate_ca_cert(
&no_resources.child_ca_der,
&no_resources.issuer_ca_der,
&no_resources.issuer_crl_der,
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
time::OffsetDateTime::now_utc(),
)
.unwrap_err();
assert!(matches!(err, CaPathError::ResourcesMissing), "{err}");
}
#[test]
fn validate_subordinate_ca_with_prevalidated_issuer_covers_success_and_error_paths() {
use rpki::data_model::common::BigUnsigned;
use rpki::data_model::crl::RpkixCrl;
use rpki::validation::ca_path::validate_subordinate_ca_cert_with_prevalidated_issuer;
use std::collections::HashSet;
use x509_parser::prelude::FromDer;
use x509_parser::x509::SubjectPublicKeyInfo;
let generated = generate_chain_and_crl(
"keyUsage = critical, keyCertSign, cRLSign
sbgp-ipAddrBlock = critical, IPv4:10.0.0.0/16
sbgp-autonomousSysNum = critical, AS:64496
",
false,
);
let issuer = ResourceCertificate::decode_der(&generated.issuer_ca_der).expect("decode issuer");
let child = ResourceCertificate::decode_der(&generated.child_ca_der).expect("decode child");
let issuer_crl = RpkixCrl::decode_der(&generated.issuer_crl_der).expect("decode crl");
let (rem, issuer_spki) = SubjectPublicKeyInfo::from_der(&issuer.tbs.subject_public_key_info)
.expect("parse issuer spki");
assert!(rem.is_empty());
let now = time::OffsetDateTime::now_utc();
let validated = validate_subordinate_ca_cert_with_prevalidated_issuer(
&generated.child_ca_der,
child.clone(),
&issuer,
&issuer_spki,
&issuer_crl,
&HashSet::new(),
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
now,
)
.expect("validate subordinate with prevalidated issuer");
assert!(validated.effective_ip_resources.is_some());
assert!(validated.effective_as_resources.is_some());
let mut revoked = HashSet::new();
revoked.insert(BigUnsigned::from_biguint(&child.tbs.serial_number).bytes_be);
let err = validate_subordinate_ca_cert_with_prevalidated_issuer(
&generated.child_ca_der,
child,
&issuer,
&issuer_spki,
&issuer_crl,
&revoked,
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
now,
)
.unwrap_err();
assert!(matches!(err, CaPathError::ChildRevoked), "{err}");
}
#[test]
fn validate_subordinate_ca_with_prevalidated_issuer_rejects_non_ca_inputs_and_invalid_times() {
use rpki::data_model::crl::RpkixCrl;
use rpki::validation::ca_path::validate_subordinate_ca_cert_with_prevalidated_issuer;
use std::collections::HashSet;
use x509_parser::prelude::FromDer;
use x509_parser::x509::SubjectPublicKeyInfo;
let generated = generate_chain_and_crl(
"keyUsage = critical, keyCertSign, cRLSign
sbgp-ipAddrBlock = critical, IPv4:10.0.0.0/16
sbgp-autonomousSysNum = critical, AS:64496
",
false,
);
let issuer = ResourceCertificate::decode_der(&generated.issuer_ca_der).expect("decode issuer");
let child = ResourceCertificate::decode_der(&generated.child_ca_der).expect("decode child");
let issuer_crl = RpkixCrl::decode_der(&generated.issuer_crl_der).expect("decode crl");
let (rem, issuer_spki) = SubjectPublicKeyInfo::from_der(&issuer.tbs.subject_public_key_info)
.expect("parse issuer spki");
assert!(rem.is_empty());
let roa_der =
std::fs::read("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS4538.roa")
.expect("read roa fixture");
let roa = rpki::data_model::roa::RoaObject::decode_der(&roa_der).expect("decode roa");
let ee_rc =
ResourceCertificate::decode_der(&roa.signed_object.signed_data.certificates[0].raw_der)
.expect("decode ee rc");
let (rem, ee_spki) =
SubjectPublicKeyInfo::from_der(&ee_rc.tbs.subject_public_key_info).expect("parse ee spki");
assert!(rem.is_empty());
let err = validate_subordinate_ca_cert_with_prevalidated_issuer(
&generated.child_ca_der,
child.clone(),
&ee_rc,
&ee_spki,
&issuer_crl,
&HashSet::new(),
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
time::OffsetDateTime::now_utc(),
)
.unwrap_err();
assert!(matches!(err, CaPathError::IssuerNotCa), "{err}");
let err = validate_subordinate_ca_cert_with_prevalidated_issuer(
&generated.child_ca_der,
ee_rc,
&issuer,
&issuer_spki,
&issuer_crl,
&HashSet::new(),
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
time::OffsetDateTime::now_utc(),
)
.unwrap_err();
assert!(matches!(err, CaPathError::ChildNotCa), "{err}");
let err = validate_subordinate_ca_cert_with_prevalidated_issuer(
&generated.child_ca_der,
child.clone(),
&issuer,
&issuer_spki,
&issuer_crl,
&HashSet::new(),
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
child.tbs.validity_not_before - time::Duration::seconds(1),
)
.unwrap_err();
assert!(
matches!(err, CaPathError::CertificateNotValidAtTime),
"{err}"
);
let err = validate_subordinate_ca_cert_with_prevalidated_issuer(
&generated.child_ca_der,
child,
&issuer,
&issuer_spki,
&issuer_crl,
&HashSet::new(),
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
issuer_crl.next_update.utc + time::Duration::seconds(1),
)
.unwrap_err();
assert!(matches!(err, CaPathError::CrlNotValidAtTime), "{err}");
}
#[test]
fn validate_subordinate_ca_with_prevalidated_issuer_rejects_mismatch_and_missing_resources() {
use rpki::data_model::crl::RpkixCrl;
use rpki::validation::ca_path::validate_subordinate_ca_cert_with_prevalidated_issuer;
use std::collections::HashSet;
use x509_parser::prelude::FromDer;
use x509_parser::x509::SubjectPublicKeyInfo;
let generated = generate_chain_and_crl(
"keyUsage = critical, keyCertSign, cRLSign
sbgp-ipAddrBlock = critical, IPv4:10.0.0.0/16
sbgp-autonomousSysNum = critical, AS:64496
",
false,
);
let issuer = ResourceCertificate::decode_der(&generated.issuer_ca_der).expect("decode issuer");
let child = ResourceCertificate::decode_der(&generated.child_ca_der).expect("decode child");
let issuer_crl = RpkixCrl::decode_der(&generated.issuer_crl_der).expect("decode crl");
let (rem, issuer_spki) = SubjectPublicKeyInfo::from_der(&issuer.tbs.subject_public_key_info)
.expect("parse issuer spki");
assert!(rem.is_empty());
let mut mismatched_child = child.clone();
mismatched_child.tbs.issuer_name = mismatched_child.tbs.subject_name.clone();
let err = validate_subordinate_ca_cert_with_prevalidated_issuer(
&generated.child_ca_der,
mismatched_child,
&issuer,
&issuer_spki,
&issuer_crl,
&HashSet::new(),
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
issuer.tbs.extensions.ip_resources.as_ref(),
issuer.tbs.extensions.as_resources.as_ref(),
time::OffsetDateTime::now_utc(),
)
.unwrap_err();
assert!(
matches!(err, CaPathError::IssuerSubjectMismatch { .. }),
"{err}"
);
let no_resources = generate_chain_and_crl(
"keyUsage = critical, keyCertSign, cRLSign
",
false,
);
let no_resources_child = ResourceCertificate::decode_der(&no_resources.child_ca_der)
.expect("decode no resources child");
let no_resources_issuer = ResourceCertificate::decode_der(&no_resources.issuer_ca_der)
.expect("decode no resources issuer");
let no_resources_crl =
RpkixCrl::decode_der(&no_resources.issuer_crl_der).expect("decode no resources crl");
let (rem, no_resources_spki) =
SubjectPublicKeyInfo::from_der(&no_resources_issuer.tbs.subject_public_key_info)
.expect("parse no resources issuer spki");
assert!(rem.is_empty());
let err = validate_subordinate_ca_cert_with_prevalidated_issuer(
&no_resources.child_ca_der,
no_resources_child,
&no_resources_issuer,
&no_resources_spki,
&no_resources_crl,
&HashSet::new(),
Some("rsync://example.test/repo/issuer/issuer.cer"),
"rsync://example.test/repo/issuer/issuer.crl",
no_resources_issuer.tbs.extensions.ip_resources.as_ref(),
no_resources_issuer.tbs.extensions.as_resources.as_ref(),
time::OffsetDateTime::now_utc(),
)
.unwrap_err();
assert!(matches!(err, CaPathError::ResourcesMissing), "{err}");
}

View File

@ -244,3 +244,250 @@ fn ee_key_usage_wrong_bits_is_rejected() {
.unwrap_err(); .unwrap_err();
assert!(matches!(err, CertPathError::KeyUsageInvalidBits), "{err}"); assert!(matches!(err, CertPathError::KeyUsageInvalidBits), "{err}");
} }
#[test]
fn validate_ee_cert_path_with_prevalidated_issuer_covers_success_and_error_paths() {
use rpki::data_model::common::BigUnsigned;
use rpki::data_model::crl::RpkixCrl;
use rpki::data_model::rc::ResourceCertificate;
use rpki::validation::cert_path::validate_ee_cert_path_with_prevalidated_issuer;
use std::collections::HashSet;
use x509_parser::prelude::FromDer;
use x509_parser::x509::SubjectPublicKeyInfo;
let g = generate_issuer_ca_ee_and_crl(
"keyUsage = critical, digitalSignature
",
);
let issuer = ResourceCertificate::decode_der(&g.issuer_ca_der).expect("decode issuer");
let ee = ResourceCertificate::decode_der(&g.ee_der).expect("decode ee");
let issuer_crl = RpkixCrl::decode_der(&g.issuer_crl_der).expect("decode crl");
let (rem, issuer_spki) = SubjectPublicKeyInfo::from_der(&issuer.tbs.subject_public_key_info)
.expect("parse issuer spki");
assert!(rem.is_empty());
let now = time::OffsetDateTime::now_utc();
validate_ee_cert_path_with_prevalidated_issuer(
&g.ee_der,
&issuer,
&issuer_spki,
&issuer_crl,
&HashSet::new(),
Some("rsync://example.test/repo/issuer/issuer.cer"),
Some("rsync://example.test/repo/issuer/issuer.crl"),
now,
)
.expect("prevalidated ee path ok");
let mut revoked = HashSet::new();
revoked.insert(BigUnsigned::from_biguint(&ee.tbs.serial_number).bytes_be);
let err = validate_ee_cert_path_with_prevalidated_issuer(
&g.ee_der,
&issuer,
&issuer_spki,
&issuer_crl,
&revoked,
Some("rsync://example.test/repo/issuer/issuer.cer"),
Some("rsync://example.test/repo/issuer/issuer.crl"),
now,
)
.unwrap_err();
assert!(matches!(err, CertPathError::EeRevoked), "{err}");
}
#[test]
fn validate_ee_cert_path_with_prevalidated_issuer_rejects_non_ee_and_non_ca_issuer() {
use rpki::data_model::crl::RpkixCrl;
use rpki::data_model::rc::ResourceCertificate;
use rpki::validation::cert_path::validate_ee_cert_path_with_prevalidated_issuer;
use std::collections::HashSet;
use x509_parser::prelude::FromDer;
use x509_parser::x509::SubjectPublicKeyInfo;
let g = generate_issuer_ca_ee_and_crl(
"keyUsage = critical, digitalSignature
",
);
let issuer = ResourceCertificate::decode_der(&g.issuer_ca_der).expect("decode issuer");
let ee = ResourceCertificate::decode_der(&g.ee_der).expect("decode ee");
let issuer_crl = RpkixCrl::decode_der(&g.issuer_crl_der).expect("decode crl");
let (rem, issuer_spki) = SubjectPublicKeyInfo::from_der(&issuer.tbs.subject_public_key_info)
.expect("parse issuer spki");
assert!(rem.is_empty());
let now = time::OffsetDateTime::now_utc();
let err = validate_ee_cert_path_with_prevalidated_issuer(
&g.issuer_ca_der,
&issuer,
&issuer_spki,
&issuer_crl,
&HashSet::new(),
None,
None,
now,
)
.unwrap_err();
assert!(matches!(err, CertPathError::EeNotEe), "{err}");
let (rem, bad_spki) =
SubjectPublicKeyInfo::from_der(&ee.tbs.subject_public_key_info).expect("parse ee spki");
assert!(rem.is_empty());
let err = validate_ee_cert_path_with_prevalidated_issuer(
&g.ee_der,
&ee,
&bad_spki,
&issuer_crl,
&HashSet::new(),
None,
None,
now,
)
.unwrap_err();
assert!(matches!(err, CertPathError::IssuerNotCa), "{err}");
}
#[test]
fn validate_ee_cert_path_with_prevalidated_issuer_rejects_mismatched_issuer_subject() {
use rpki::data_model::crl::RpkixCrl;
use rpki::data_model::rc::ResourceCertificate;
use rpki::validation::cert_path::validate_ee_cert_path_with_prevalidated_issuer;
use std::collections::HashSet;
use x509_parser::prelude::FromDer;
use x509_parser::x509::SubjectPublicKeyInfo;
let g = generate_issuer_ca_ee_and_crl(
"keyUsage = critical, digitalSignature
",
);
let wrong_issuer_der = std::fs::read(
"tests/fixtures/repository/ca.rg.net/rpki/RGnet-OU/R-lVU1XGsAeqzV1Fv0HjOD6ZFkE.cer",
)
.expect("read wrong issuer fixture");
let wrong_issuer =
ResourceCertificate::decode_der(&wrong_issuer_der).expect("decode wrong issuer");
let issuer_crl = RpkixCrl::decode_der(&g.issuer_crl_der).expect("decode crl");
let (rem, wrong_spki) =
SubjectPublicKeyInfo::from_der(&wrong_issuer.tbs.subject_public_key_info)
.expect("parse wrong issuer spki");
assert!(rem.is_empty());
let err = validate_ee_cert_path_with_prevalidated_issuer(
&g.ee_der,
&wrong_issuer,
&wrong_spki,
&issuer_crl,
&HashSet::new(),
None,
None,
time::OffsetDateTime::now_utc(),
)
.unwrap_err();
assert!(
matches!(err, CertPathError::IssuerSubjectMismatch { .. }),
"{err}"
);
}
#[test]
fn validate_ee_cert_path_rejects_non_ee_and_non_ca_issuer() {
use rpki::data_model::roa::RoaObject;
let g = generate_issuer_ca_ee_and_crl(
"keyUsage = critical, digitalSignature
",
);
let roa_der =
std::fs::read("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS4538.roa")
.expect("read roa fixture");
let roa = RoaObject::decode_der(&roa_der).expect("decode roa");
let ee_from_roa = roa.signed_object.signed_data.certificates[0]
.raw_der
.clone();
let now = time::OffsetDateTime::now_utc();
let err = validate_ee_cert_path(
&g.issuer_ca_der,
&g.issuer_ca_der,
&g.issuer_crl_der,
None,
None,
now,
)
.unwrap_err();
assert!(matches!(err, CertPathError::EeNotEe), "{err}");
let err = validate_ee_cert_path(&g.ee_der, &ee_from_roa, &g.issuer_crl_der, None, None, now)
.unwrap_err();
assert!(matches!(err, CertPathError::IssuerNotCa), "{err}");
}
#[test]
fn validate_ee_cert_path_rejects_stale_crl() {
use rpki::data_model::crl::RpkixCrl;
let g = generate_issuer_ca_ee_and_crl(
"keyUsage = critical, digitalSignature
",
);
let crl = RpkixCrl::decode_der(&g.issuer_crl_der).expect("decode crl");
let err = validate_ee_cert_path(
&g.ee_der,
&g.issuer_ca_der,
&g.issuer_crl_der,
None,
None,
crl.next_update.utc + time::Duration::seconds(1),
)
.unwrap_err();
assert!(matches!(err, CertPathError::CrlNotValidAtTime), "{err}");
}
#[test]
fn validate_ee_cert_path_with_prevalidated_issuer_rejects_invalid_times() {
use rpki::data_model::crl::RpkixCrl;
use rpki::data_model::rc::ResourceCertificate;
use rpki::validation::cert_path::validate_ee_cert_path_with_prevalidated_issuer;
use std::collections::HashSet;
use x509_parser::prelude::FromDer;
use x509_parser::x509::SubjectPublicKeyInfo;
let g = generate_issuer_ca_ee_and_crl(
"keyUsage = critical, digitalSignature
",
);
let issuer = ResourceCertificate::decode_der(&g.issuer_ca_der).expect("decode issuer");
let ee = ResourceCertificate::decode_der(&g.ee_der).expect("decode ee");
let issuer_crl = RpkixCrl::decode_der(&g.issuer_crl_der).expect("decode crl");
let (rem, issuer_spki) = SubjectPublicKeyInfo::from_der(&issuer.tbs.subject_public_key_info)
.expect("parse issuer spki");
assert!(rem.is_empty());
let err = validate_ee_cert_path_with_prevalidated_issuer(
&g.ee_der,
&issuer,
&issuer_spki,
&issuer_crl,
&HashSet::new(),
Some("rsync://example.test/repo/issuer/issuer.cer"),
Some("rsync://example.test/repo/issuer/issuer.crl"),
ee.tbs.validity_not_before - time::Duration::seconds(1),
)
.unwrap_err();
assert!(
matches!(err, CertPathError::CertificateNotValidAtTime),
"{err}"
);
let err = validate_ee_cert_path_with_prevalidated_issuer(
&g.ee_der,
&issuer,
&issuer_spki,
&issuer_crl,
&HashSet::new(),
Some("rsync://example.test/repo/issuer/issuer.cer"),
Some("rsync://example.test/repo/issuer/issuer.crl"),
issuer_crl.next_update.utc + time::Duration::seconds(1),
)
.unwrap_err();
assert!(matches!(err, CertPathError::CrlNotValidAtTime), "{err}");
}

View File

@ -1,8 +1,9 @@
use rpki::audit::PublicationPointAudit; use rpki::audit::PublicationPointAudit;
use rpki::policy::{Policy, SignedObjectFailurePolicy}; use rpki::policy::{Policy, SignedObjectFailurePolicy};
use rpki::storage::{FetchCachePpPack, PackFile, PackTime}; use rpki::storage::{PackFile, PackTime};
use rpki::validation::manifest::PublicationPointSource; use rpki::validation::manifest::PublicationPointSource;
use rpki::validation::objects::process_fetch_cache_pp_pack_for_issuer; use rpki::validation::objects::process_publication_point_snapshot_for_issuer;
use rpki::validation::publication_point::PublicationPointSnapshot;
use rpki::validation::tree::{ use rpki::validation::tree::{
CaInstanceHandle, PublicationPointRunResult, PublicationPointRunner, TreeRunConfig, CaInstanceHandle, PublicationPointRunResult, PublicationPointRunner, TreeRunConfig,
run_tree_serial_audit, run_tree_serial_audit,
@ -13,12 +14,12 @@ fn fixture_bytes(path: &str) -> Vec<u8> {
.unwrap_or_else(|e| panic!("read fixture {path}: {e}")) .unwrap_or_else(|e| panic!("read fixture {path}: {e}"))
} }
fn dummy_pack(files: Vec<PackFile>) -> FetchCachePpPack { fn dummy_snapshot(files: Vec<PackFile>) -> PublicationPointSnapshot {
let now = time::OffsetDateTime::now_utc(); let now = time::OffsetDateTime::now_utc();
let manifest_rsync_uri = let manifest_rsync_uri =
"rsync://rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft"; "rsync://rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft";
FetchCachePpPack { PublicationPointSnapshot {
format_version: FetchCachePpPack::FORMAT_VERSION_V1, format_version: PublicationPointSnapshot::FORMAT_VERSION_V1,
manifest_rsync_uri: manifest_rsync_uri.to_string(), manifest_rsync_uri: manifest_rsync_uri.to_string(),
publication_point_rsync_uri: "rsync://rpki.cernet.net/repo/cernet/0/".to_string(), publication_point_rsync_uri: "rsync://rpki.cernet.net/repo/cernet/0/".to_string(),
manifest_number_be: vec![1], manifest_number_be: vec![1],
@ -34,7 +35,7 @@ fn dummy_pack(files: Vec<PackFile>) -> FetchCachePpPack {
struct SinglePackRunner { struct SinglePackRunner {
policy: Policy, policy: Policy,
pack: FetchCachePpPack, snapshot: PublicationPointSnapshot,
} }
impl PublicationPointRunner for SinglePackRunner { impl PublicationPointRunner for SinglePackRunner {
@ -42,8 +43,8 @@ impl PublicationPointRunner for SinglePackRunner {
&self, &self,
ca: &CaInstanceHandle, ca: &CaInstanceHandle,
) -> Result<PublicationPointRunResult, String> { ) -> Result<PublicationPointRunResult, String> {
let objects = process_fetch_cache_pp_pack_for_issuer( let objects = process_publication_point_snapshot_for_issuer(
&self.pack, &self.snapshot,
&self.policy, &self.policy,
&ca.ca_certificate_der, &ca.ca_certificate_der,
ca.ca_certificate_rsync_uri.as_deref(), ca.ca_certificate_rsync_uri.as_deref(),
@ -55,7 +56,7 @@ impl PublicationPointRunner for SinglePackRunner {
Ok(PublicationPointRunResult { Ok(PublicationPointRunResult {
source: PublicationPointSource::Fresh, source: PublicationPointSource::Fresh,
pack: self.pack.clone(), snapshot: Some(self.snapshot.clone()),
warnings: Vec::new(), warnings: Vec::new(),
objects, objects,
audit: PublicationPointAudit::default(), audit: PublicationPointAudit::default(),
@ -70,7 +71,7 @@ fn crl_mismatch_drops_publication_point_and_cites_rfc_sections() {
fixture_bytes("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS4538.roa"); fixture_bytes("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS4538.roa");
// Include at least one CRL file but with a URI that does NOT match the EE certificate's CRLDP. // Include at least one CRL file but with a URI that does NOT match the EE certificate's CRLDP.
let pack = dummy_pack(vec![ let pack = dummy_snapshot(vec![
PackFile::from_bytes_compute_sha256("rsync://example.test/repo/not-it.crl", vec![0x01]), PackFile::from_bytes_compute_sha256("rsync://example.test/repo/not-it.crl", vec![0x01]),
PackFile::from_bytes_compute_sha256("rsync://example.test/repo/a.roa", roa_bytes), PackFile::from_bytes_compute_sha256("rsync://example.test/repo/a.roa", roa_bytes),
]); ]);
@ -78,10 +79,15 @@ fn crl_mismatch_drops_publication_point_and_cites_rfc_sections() {
let mut policy = Policy::default(); let mut policy = Policy::default();
policy.signed_object_failure_policy = SignedObjectFailurePolicy::DropPublicationPoint; policy.signed_object_failure_policy = SignedObjectFailurePolicy::DropPublicationPoint;
let runner = SinglePackRunner { policy, pack }; let runner = SinglePackRunner {
policy,
snapshot: pack,
};
let root = CaInstanceHandle { let root = CaInstanceHandle {
depth: 0, depth: 0,
tal_id: "test-tal".to_string(),
parent_manifest_rsync_uri: None,
// Use a real, parseable CA certificate DER so objects processing can reach CRL selection. // Use a real, parseable CA certificate DER so objects processing can reach CRL selection.
// The test only asserts CRLDP/locked-pack error handling, not signature chaining. // The test only asserts CRLDP/locked-pack error handling, not signature chaining.
ca_certificate_der: fixture_bytes("tests/fixtures/ta/apnic-ta.cer"), ca_certificate_der: fixture_bytes("tests/fixtures/ta/apnic-ta.cer"),
@ -102,7 +108,6 @@ fn crl_mismatch_drops_publication_point_and_cites_rfc_sections() {
&TreeRunConfig { &TreeRunConfig {
max_depth: Some(0), max_depth: Some(0),
max_instances: Some(1), max_instances: Some(1),
revalidate_only: false,
}, },
) )
.expect("run tree audit"); .expect("run tree audit");

View File

@ -1,280 +0,0 @@
use std::path::Path;
use rpki::data_model::manifest::ManifestObject;
use rpki::policy::{CaFailedFetchPolicy, Policy};
use rpki::storage::{FetchCachePpKey, FetchCachePpPack, RocksStore};
use rpki::validation::manifest::{
PublicationPointSource, process_manifest_publication_point,
process_manifest_publication_point_after_repo_sync,
};
fn issuer_ca_fixture() -> Vec<u8> {
std::fs::read(
"tests/fixtures/repository/rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer",
)
.expect("read issuer ca fixture")
}
fn issuer_ca_rsync_uri() -> &'static str {
"rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer"
}
fn fixture_to_rsync_uri(path: &Path) -> String {
let rel = path
.strip_prefix("tests/fixtures/repository")
.expect("path under tests/fixtures/repository");
let mut it = rel.components();
let host = it
.next()
.expect("host component")
.as_os_str()
.to_string_lossy();
let rest = it.as_path().to_string_lossy();
format!("rsync://{host}/{rest}")
}
fn fixture_dir_to_rsync_uri(dir: &Path) -> String {
let mut s = fixture_to_rsync_uri(dir);
if !s.ends_with('/') {
s.push('/');
}
s
}
fn load_cernet_manifest_fixture() -> (std::path::PathBuf, Vec<u8>, ManifestObject) {
let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
)
.to_path_buf();
let bytes = std::fs::read(&manifest_path).expect("read manifest fixture");
let obj = ManifestObject::decode_der(&bytes).expect("decode manifest fixture");
(manifest_path, bytes, obj)
}
fn store_raw_publication_point_files(
store: &RocksStore,
manifest_path: &Path,
manifest_rsync_uri: &str,
manifest_bytes: &[u8],
manifest: &ManifestObject,
publication_point_rsync_uri: &str,
) {
store
.put_raw(manifest_rsync_uri, manifest_bytes)
.expect("store manifest raw");
let entries = manifest
.manifest
.parse_files()
.expect("parse validated manifest fileList");
for entry in &entries {
let file_path = manifest_path
.parent()
.unwrap()
.join(entry.file_name.as_str());
let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
store.put_raw(&rsync_uri, &bytes).expect("store file raw");
}
}
#[test]
fn cached_pack_revalidation_rejects_missing_file_referenced_by_manifest() {
let (manifest_path, manifest_bytes, manifest) = load_cernet_manifest_fixture();
let validation_time = manifest.manifest.this_update + time::Duration::seconds(1);
let manifest_rsync_uri = fixture_to_rsync_uri(&manifest_path);
let publication_point_rsync_uri = fixture_dir_to_rsync_uri(manifest_path.parent().unwrap());
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store_raw_publication_point_files(
&store,
&manifest_path,
&manifest_rsync_uri,
&manifest_bytes,
&manifest,
&publication_point_rsync_uri,
);
let mut policy = Policy::default();
policy.ca_failed_fetch_policy = CaFailedFetchPolicy::UseFetchCachePp;
let issuer_ca_der = issuer_ca_fixture();
let _fresh = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
validation_time,
)
.expect("fresh run stores fetch_cache_pp");
let key = FetchCachePpKey::from_manifest_rsync_uri(&manifest_rsync_uri);
let cached_bytes = store
.get_fetch_cache_pp(&key)
.expect("get fetch_cache_pp")
.expect("fetch_cache_pp exists");
let mut pack = FetchCachePpPack::decode(&cached_bytes).expect("decode pack");
// Remove one file from the pack: pack stays internally consistent, but no longer satisfies
// RFC 9286 §6.4 when revalidated against the manifest fileList.
pack.files.pop().expect("non-empty pack");
let bytes = pack.encode().expect("encode pack");
store
.put_fetch_cache_pp(&key, &bytes)
.expect("overwrite fetch_cache_pp");
// Force cache path: remove raw manifest so fresh processing fails at §6.2.
store
.delete_raw(&manifest_rsync_uri)
.expect("delete raw manifest");
let err = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
validation_time,
)
.expect_err("cache pack missing file must be rejected");
let msg = err.to_string();
assert!(msg.contains("cached fetch_cache_pp missing file"), "{msg}");
assert!(msg.contains("RFC 9286 §6.4"), "{msg}");
}
#[test]
fn cached_pack_revalidation_rejects_hash_mismatch_against_manifest_filelist() {
let (manifest_path, manifest_bytes, manifest) = load_cernet_manifest_fixture();
let validation_time = manifest.manifest.this_update + time::Duration::seconds(1);
let manifest_rsync_uri = fixture_to_rsync_uri(&manifest_path);
let publication_point_rsync_uri = fixture_dir_to_rsync_uri(manifest_path.parent().unwrap());
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store_raw_publication_point_files(
&store,
&manifest_path,
&manifest_rsync_uri,
&manifest_bytes,
&manifest,
&publication_point_rsync_uri,
);
let mut policy = Policy::default();
policy.ca_failed_fetch_policy = CaFailedFetchPolicy::UseFetchCachePp;
let issuer_ca_der = issuer_ca_fixture();
let _fresh = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
validation_time,
)
.expect("fresh run stores fetch_cache_pp");
let key = FetchCachePpKey::from_manifest_rsync_uri(&manifest_rsync_uri);
let cached_bytes = store
.get_fetch_cache_pp(&key)
.expect("get fetch_cache_pp")
.expect("fetch_cache_pp exists");
let mut pack = FetchCachePpPack::decode(&cached_bytes).expect("decode pack");
// Mutate one file but keep pack internally consistent by recomputing its sha256 field.
let victim = pack.files.first_mut().expect("non-empty pack");
victim.bytes[0] ^= 0xFF;
victim.sha256 = victim.compute_sha256();
let bytes = pack.encode().expect("encode pack");
store
.put_fetch_cache_pp(&key, &bytes)
.expect("overwrite fetch_cache_pp");
// Force cache path.
store
.delete_raw(&manifest_rsync_uri)
.expect("delete raw manifest");
let err = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
validation_time,
)
.expect_err("cache pack hash mismatch must be rejected");
let msg = err.to_string();
assert!(
msg.contains("cached fetch_cache_pp file hash mismatch"),
"{msg}"
);
assert!(msg.contains("RFC 9286 §6.5"), "{msg}");
}
#[test]
fn repo_sync_failure_forces_fetch_cache_pp_even_if_raw_objects_are_present() {
let (manifest_path, manifest_bytes, manifest) = load_cernet_manifest_fixture();
let validation_time = manifest.manifest.this_update + time::Duration::seconds(1);
let manifest_rsync_uri = fixture_to_rsync_uri(&manifest_path);
let publication_point_rsync_uri = fixture_dir_to_rsync_uri(manifest_path.parent().unwrap());
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store_raw_publication_point_files(
&store,
&manifest_path,
&manifest_rsync_uri,
&manifest_bytes,
&manifest,
&publication_point_rsync_uri,
);
let mut policy = Policy::default();
policy.ca_failed_fetch_policy = CaFailedFetchPolicy::UseFetchCachePp;
let issuer_ca_der = issuer_ca_fixture();
// First run: fresh processing stores fetch_cache_pp.
let _fresh = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
validation_time,
)
.expect("fresh run stores fetch_cache_pp");
// Second run: simulate repo sync failure. Even though raw_objects still contain everything
// needed for a fresh pack, failed fetch semantics require using cached objects only.
let res = process_manifest_publication_point_after_repo_sync(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
validation_time,
false,
Some("synthetic repo sync failure"),
)
.expect("must fall back to fetch_cache_pp");
assert_eq!(res.source, PublicationPointSource::FetchCachePp);
assert!(
res.warnings
.iter()
.any(|w| w.message.contains("using fetch_cache_pp")),
"expected fetch_cache_pp warning"
);
}

View File

@ -10,6 +10,7 @@ use rpki::validation::from_tal::{
discover_root_ca_instance_from_tal_and_ta_der, discover_root_ca_instance_from_tal_url, discover_root_ca_instance_from_tal_and_ta_der, discover_root_ca_instance_from_tal_url,
run_root_from_tal_url_once, run_root_from_tal_url_once,
}; };
use rpki::validation::manifest::PublicationPointSource;
use url::Url; use url::Url;
struct MapFetcher { struct MapFetcher {
@ -42,6 +43,26 @@ impl RsyncFetcher for EmptyRsync {
} }
} }
fn openssl_available() -> bool {
std::process::Command::new("openssl")
.arg("version")
.output()
.map(|o| o.status.success())
.unwrap_or(false)
}
fn run(cmd: &mut std::process::Command) {
let out = cmd.output().expect("run command");
if !out.status.success() {
panic!(
"command failed: {:?}\nstdout={}\nstderr={}",
cmd,
String::from_utf8_lossy(&out.stdout),
String::from_utf8_lossy(&out.stderr)
);
}
}
fn apnic_tal_bytes() -> Vec<u8> { fn apnic_tal_bytes() -> Vec<u8> {
std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal").expect("read apnic TAL fixture") std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal").expect("read apnic TAL fixture")
} }
@ -145,7 +166,7 @@ fn discover_root_from_tal_url_errors_when_tal_fetch_fails() {
} }
#[test] #[test]
fn run_root_from_tal_url_once_propagates_run_error_when_repo_is_empty() { fn run_root_from_tal_url_once_returns_failed_fetch_no_cache_when_repo_is_empty() {
let tal_bytes = apnic_tal_bytes(); let tal_bytes = apnic_tal_bytes();
let tal = Tal::decode_bytes(&tal_bytes).expect("decode TAL"); let tal = Tal::decode_bytes(&tal_bytes).expect("decode TAL");
let ta_uri = tal.ta_uris[0].as_str().to_string(); let ta_uri = tal.ta_uris[0].as_str().to_string();
@ -161,7 +182,7 @@ fn run_root_from_tal_url_once_propagates_run_error_when_repo_is_empty() {
let mut policy = Policy::default(); let mut policy = Policy::default();
policy.sync_preference = SyncPreference::RsyncOnly; policy.sync_preference = SyncPreference::RsyncOnly;
let err = run_root_from_tal_url_once( let out = run_root_from_tal_url_once(
&store, &store,
&policy, &policy,
"https://example.test/apnic.tal", "https://example.test/apnic.tal",
@ -169,7 +190,105 @@ fn run_root_from_tal_url_once_propagates_run_error_when_repo_is_empty() {
&EmptyRsync, &EmptyRsync,
time::OffsetDateTime::now_utc(), time::OffsetDateTime::now_utc(),
) )
.unwrap_err(); .expect("run should return failed-fetch-no-cache output");
assert!(matches!(err, FromTalError::Run(_))); assert_eq!(
out.run.publication_point_source,
PublicationPointSource::FailedFetchNoCache
);
assert!(out.run.objects.vrps.is_empty());
assert!(out.run.objects.aspas.is_empty());
assert!(
out.run
.publication_point_warnings
.iter()
.any(|warning| warning.message.contains("no latest validated result"))
);
}
#[test]
fn discover_root_records_ca_instance_discovery_failure_when_ta_lacks_sia() {
use base64::Engine;
assert!(openssl_available(), "openssl is required for this test");
let temp = tempfile::tempdir().expect("tempdir");
let dir = temp.path();
std::fs::write(
dir.join("openssl.cnf"),
r#"
[ req ]
prompt = no
distinguished_name = dn
x509_extensions = v3_ta
[ dn ]
CN = Test TA Without SIA
[ v3_ta ]
basicConstraints = critical,CA:true
keyUsage = critical, keyCertSign, cRLSign
subjectKeyIdentifier = hash
certificatePolicies = critical, 1.3.6.1.5.5.7.14.2
sbgp-ipAddrBlock = critical, IPv4:10.0.0.0/8
sbgp-autonomousSysNum = critical, AS:64496-64511
"#,
)
.expect("write openssl cnf");
run(std::process::Command::new("openssl")
.arg("genrsa")
.arg("-out")
.arg(dir.join("ta.key"))
.arg("2048"));
run(std::process::Command::new("openssl")
.arg("req")
.arg("-new")
.arg("-x509")
.arg("-sha256")
.arg("-days")
.arg("365")
.arg("-key")
.arg(dir.join("ta.key"))
.arg("-config")
.arg(dir.join("openssl.cnf"))
.arg("-extensions")
.arg("v3_ta")
.arg("-out")
.arg(dir.join("ta.pem")));
run(std::process::Command::new("openssl")
.arg("x509")
.arg("-in")
.arg(dir.join("ta.pem"))
.arg("-outform")
.arg("DER")
.arg("-out")
.arg(dir.join("ta.cer")));
run(std::process::Command::new("sh").arg("-c").arg(format!(
"openssl x509 -in {} -pubkey -noout | openssl pkey -pubin -outform DER > {}",
dir.join("ta.pem").display(),
dir.join("spki.der").display(),
)));
let ta_uri = "https://example.test/no-sia-ta.cer";
let tal_text = format!(
"{ta_uri}\n\n{}\n",
base64::engine::general_purpose::STANDARD
.encode(std::fs::read(dir.join("spki.der")).expect("read spki der"))
);
let tal = Tal::decode_bytes(tal_text.as_bytes()).expect("decode generated tal");
let mut map = HashMap::new();
map.insert(
ta_uri.to_string(),
std::fs::read(dir.join("ta.cer")).expect("read ta der"),
);
let fetcher = MapFetcher::new(map);
let err = discover_root_ca_instance_from_tal(&fetcher, tal, None).unwrap_err();
assert!(matches!(err, FromTalError::TaFetch(_)), "{err}");
assert!(
err.to_string().contains("CA instance discovery failed"),
"{err}"
);
} }

View File

@ -1,8 +1,5 @@
use std::path::Path;
use rpki::data_model::manifest::ManifestObject;
use rpki::policy::{CaFailedFetchPolicy, Policy}; use rpki::policy::{CaFailedFetchPolicy, Policy};
use rpki::storage::{FetchCachePpKey, FetchCachePpPack, RocksStore}; use rpki::storage::RocksStore;
use rpki::validation::manifest::process_manifest_publication_point; use rpki::validation::manifest::process_manifest_publication_point;
fn issuer_ca_fixture() -> Vec<u8> { fn issuer_ca_fixture() -> Vec<u8> {
@ -16,35 +13,13 @@ fn issuer_ca_rsync_uri() -> &'static str {
"rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer" "rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer"
} }
fn fixture_to_rsync_uri(path: &Path) -> String {
let rel = path
.strip_prefix("tests/fixtures/repository")
.expect("path under tests/fixtures/repository");
let mut it = rel.components();
let host = it
.next()
.expect("host component")
.as_os_str()
.to_string_lossy();
let rest = it.as_path().to_string_lossy();
format!("rsync://{host}/{rest}")
}
fn fixture_dir_to_rsync_uri(dir: &Path) -> String {
let mut s = fixture_to_rsync_uri(dir);
if !s.ends_with('/') {
s.push('/');
}
s
}
#[test] #[test]
fn cache_is_not_used_when_missing_and_fresh_manifest_is_missing() { fn cache_is_not_used_when_missing_and_fresh_manifest_is_missing() {
let temp = tempfile::tempdir().expect("tempdir"); let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb"); let store = RocksStore::open(temp.path()).expect("open rocksdb");
let mut policy = Policy::default(); let mut policy = Policy::default();
policy.ca_failed_fetch_policy = CaFailedFetchPolicy::UseFetchCachePp; policy.ca_failed_fetch_policy = CaFailedFetchPolicy::ReuseCurrentInstanceVcir;
let issuer_ca_der = issuer_ca_fixture(); let issuer_ca_der = issuer_ca_fixture();
let err = process_manifest_publication_point( let err = process_manifest_publication_point(
@ -56,89 +31,15 @@ fn cache_is_not_used_when_missing_and_fresh_manifest_is_missing() {
Some(issuer_ca_rsync_uri()), Some(issuer_ca_rsync_uri()),
time::OffsetDateTime::from_unix_timestamp(0).unwrap(), time::OffsetDateTime::from_unix_timestamp(0).unwrap(),
) )
.expect_err("no raw and no fetch_cache_pp should fail"); .expect_err("no raw and no current-instance VCIR should fail");
assert!(err.to_string().contains("fetch_cache_pp entry missing"));
}
#[test]
fn cache_pack_publication_point_mismatch_is_rejected() {
let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let manifest_bytes = std::fs::read(manifest_path).expect("read manifest fixture");
let manifest = ManifestObject::decode_der(&manifest_bytes).expect("decode manifest fixture");
let validation_time = manifest.manifest.this_update + time::Duration::seconds(1);
let manifest_rsync_uri = fixture_to_rsync_uri(manifest_path);
let publication_point_rsync_uri = fixture_dir_to_rsync_uri(manifest_path.parent().unwrap());
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store
.put_raw(&manifest_rsync_uri, &manifest_bytes)
.expect("store manifest");
let entries = manifest
.manifest
.parse_files()
.expect("parse validated manifest fileList");
for entry in &entries {
let file_path = manifest_path
.parent()
.unwrap()
.join(entry.file_name.as_str());
let bytes = std::fs::read(&file_path)
.unwrap_or_else(|_| panic!("read fixture file referenced by manifest: {file_path:?}"));
let rsync_uri = format!("{publication_point_rsync_uri}{}", entry.file_name);
store.put_raw(&rsync_uri, &bytes).expect("store file");
}
let policy = Policy::default();
let issuer_ca_der = issuer_ca_fixture();
let _ = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
validation_time,
)
.expect("first run stores fetch_cache_pp pack");
// Corrupt the cached pack by changing the publication point.
let key = FetchCachePpKey::from_manifest_rsync_uri(&manifest_rsync_uri);
let bytes = store
.get_fetch_cache_pp(&key)
.expect("get fetch_cache_pp")
.expect("fetch_cache_pp exists");
let mut pack = FetchCachePpPack::decode(&bytes).expect("decode pack");
pack.publication_point_rsync_uri = "rsync://evil.invalid/repo/".to_string();
let bytes = pack.encode().expect("re-encode pack");
store
.put_fetch_cache_pp(&key, &bytes)
.expect("overwrite fetch_cache_pp");
// Remove raw manifest to force cache path.
store
.delete_raw(&manifest_rsync_uri)
.expect("delete raw manifest");
let err = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
validation_time,
)
.expect_err("cache pack mismatch should fail");
let msg = err.to_string();
assert!( assert!(
err.to_string() msg.contains("no reusable current-instance validated result is available"),
.contains("publication_point_rsync_uri does not match expected") "{msg}"
);
assert!(
msg.contains("latest current-instance VCIR missing"),
"{msg}"
); );
} }

View File

@ -1,8 +1,14 @@
use std::path::Path; use std::path::Path;
use sha2::Digest;
use rpki::data_model::manifest::ManifestObject; use rpki::data_model::manifest::ManifestObject;
use rpki::policy::{CaFailedFetchPolicy, Policy}; use rpki::policy::{CaFailedFetchPolicy, Policy};
use rpki::storage::{FetchCachePpKey, FetchCachePpPack, RocksStore}; use rpki::storage::{
PackTime, RawByHashEntry, RocksStore, ValidatedCaInstanceResult, ValidatedManifestMeta,
VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary,
VcirInstanceGate, VcirRelatedArtifact, VcirSummary,
};
use rpki::validation::manifest::{PublicationPointSource, process_manifest_publication_point}; use rpki::validation::manifest::{PublicationPointSource, process_manifest_publication_point};
fn issuer_ca_fixture() -> Vec<u8> { fn issuer_ca_fixture() -> Vec<u8> {
@ -38,8 +44,78 @@ fn fixture_dir_to_rsync_uri(dir: &Path) -> String {
s s
} }
fn store_validated_manifest_baseline(
store: &RocksStore,
manifest_rsync_uri: &str,
manifest_bytes: &[u8],
manifest_number_be: Vec<u8>,
this_update: time::OffsetDateTime,
next_update: time::OffsetDateTime,
) {
let manifest_sha256 = hex::encode(sha2::Sha256::digest(manifest_bytes));
let mut manifest_raw =
RawByHashEntry::from_bytes(manifest_sha256.clone(), manifest_bytes.to_vec());
manifest_raw
.origin_uris
.push(manifest_rsync_uri.to_string());
manifest_raw.object_type = Some("mft".to_string());
manifest_raw.encoding = Some("der".to_string());
store
.put_raw_by_hash_entry(&manifest_raw)
.expect("store VCIR manifest raw_by_hash");
let vcir = ValidatedCaInstanceResult {
manifest_rsync_uri: manifest_rsync_uri.to_string(),
parent_manifest_rsync_uri: None,
tal_id: "test-tal".to_string(),
ca_subject_name: "CN=test".to_string(),
ca_ski: "aa".to_string(),
issuer_ski: "aa".to_string(),
last_successful_validation_time: PackTime::from_utc_offset_datetime(this_update),
current_manifest_rsync_uri: manifest_rsync_uri.to_string(),
current_crl_rsync_uri: format!("{manifest_rsync_uri}.crl"),
validated_manifest_meta: ValidatedManifestMeta {
validated_manifest_number: manifest_number_be,
validated_manifest_this_update: PackTime::from_utc_offset_datetime(this_update),
validated_manifest_next_update: PackTime::from_utc_offset_datetime(next_update),
},
instance_gate: VcirInstanceGate {
manifest_next_update: PackTime::from_utc_offset_datetime(next_update),
current_crl_next_update: PackTime::from_utc_offset_datetime(next_update),
self_ca_not_after: PackTime::from_utc_offset_datetime(next_update),
instance_effective_until: PackTime::from_utc_offset_datetime(next_update),
},
child_entries: Vec::new(),
local_outputs: Vec::new(),
related_artifacts: vec![VcirRelatedArtifact {
artifact_role: VcirArtifactRole::Manifest,
artifact_kind: VcirArtifactKind::Mft,
uri: Some(manifest_rsync_uri.to_string()),
sha256: manifest_sha256,
object_type: Some("mft".to_string()),
validation_status: VcirArtifactValidationStatus::Accepted,
}],
summary: VcirSummary {
local_vrp_count: 0,
local_aspa_count: 0,
child_count: 0,
accepted_object_count: 1,
rejected_object_count: 0,
},
audit_summary: VcirAuditSummary {
failed_fetch_eligible: true,
last_failed_fetch_reason: None,
warning_count: 0,
audit_flags: Vec::new(),
},
};
store
.put_vcir(&vcir)
.expect("store validated manifest baseline");
}
#[test] #[test]
fn manifest_success_writes_fetch_cache_pp_pack() { fn manifest_success_returns_validated_publication_point_data() {
let manifest_path = Path::new( let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
@ -87,21 +163,15 @@ fn manifest_success_writes_fetch_cache_pp_pack() {
assert_eq!(out.source, PublicationPointSource::Fresh); assert_eq!(out.source, PublicationPointSource::Fresh);
assert!(out.warnings.is_empty()); assert!(out.warnings.is_empty());
let key = FetchCachePpKey::from_manifest_rsync_uri(&manifest_rsync_uri); assert_eq!(out.snapshot.manifest_rsync_uri, manifest_rsync_uri);
let stored = store
.get_fetch_cache_pp(&key)
.expect("get fetch_cache_pp")
.expect("fetch_cache_pp pack exists");
let decoded = FetchCachePpPack::decode(&stored).expect("decode stored pack");
assert_eq!(decoded.manifest_rsync_uri, manifest_rsync_uri);
assert_eq!( assert_eq!(
decoded.publication_point_rsync_uri, out.snapshot.publication_point_rsync_uri,
publication_point_rsync_uri publication_point_rsync_uri
); );
} }
#[test] #[test]
fn manifest_hash_mismatch_falls_back_to_fetch_cache_pp_when_enabled() { fn manifest_hash_mismatch_reuses_current_instance_vcir_when_enabled() {
let manifest_path = Path::new( let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
@ -144,15 +214,17 @@ fn manifest_hash_mismatch_falls_back_to_fetch_cache_pp_when_enabled() {
Some(issuer_ca_rsync_uri()), Some(issuer_ca_rsync_uri()),
validation_time, validation_time,
) )
.expect("first run stores fetch_cache_pp pack"); .expect("first run returns validated publication point");
assert_eq!(first.source, PublicationPointSource::Fresh); assert_eq!(first.source, PublicationPointSource::Fresh);
let key = FetchCachePpKey::from_manifest_rsync_uri(&manifest_rsync_uri); store_validated_manifest_baseline(
let cached_bytes = store &store,
.get_fetch_cache_pp(&key) &manifest_rsync_uri,
.expect("get fetch_cache_pp") &manifest_bytes,
.expect("fetch_cache_pp pack exists"); manifest.manifest.manifest_number.bytes_be.clone(),
let cached_pack = FetchCachePpPack::decode(&cached_bytes).expect("decode cached"); manifest.manifest.this_update,
manifest.manifest.next_update,
);
let entries = manifest let entries = manifest
.manifest .manifest
@ -176,10 +248,14 @@ fn manifest_hash_mismatch_falls_back_to_fetch_cache_pp_when_enabled() {
Some(issuer_ca_rsync_uri()), Some(issuer_ca_rsync_uri()),
validation_time, validation_time,
) )
.expect("second run falls back to fetch_cache_pp"); .expect("second run reuses current-instance VCIR");
assert_eq!(second.source, PublicationPointSource::FetchCachePp); assert_eq!(second.source, PublicationPointSource::VcirCurrentInstance);
assert!(!second.warnings.is_empty()); assert!(
assert_eq!(second.pack, cached_pack); second.warnings.iter().any(|w| w
.message
.contains("using latest validated result for current CA instance")),
"expected current-instance VCIR reuse warning"
);
} }
#[test] #[test]
@ -216,7 +292,7 @@ fn manifest_failed_fetch_stop_all_output() {
} }
let mut policy = Policy::default(); let mut policy = Policy::default();
policy.ca_failed_fetch_policy = CaFailedFetchPolicy::UseFetchCachePp; policy.ca_failed_fetch_policy = CaFailedFetchPolicy::ReuseCurrentInstanceVcir;
let issuer_ca_der = issuer_ca_fixture(); let issuer_ca_der = issuer_ca_fixture();
let _ = process_manifest_publication_point( let _ = process_manifest_publication_point(
&store, &store,
@ -227,7 +303,7 @@ fn manifest_failed_fetch_stop_all_output() {
Some(issuer_ca_rsync_uri()), Some(issuer_ca_rsync_uri()),
validation_time, validation_time,
) )
.expect("first run stores fetch_cache_pp pack"); .expect("first run returns validated publication point");
let entries = manifest let entries = manifest
.manifest .manifest
@ -252,13 +328,13 @@ fn manifest_failed_fetch_stop_all_output() {
Some(issuer_ca_rsync_uri()), Some(issuer_ca_rsync_uri()),
validation_time, validation_time,
) )
.expect_err("stop_all_output should not use fetch_cache_pp"); .expect_err("stop_all_output should not reuse current-instance VCIR");
let msg = err.to_string(); let msg = err.to_string();
assert!(msg.contains("cache use is disabled")); assert!(msg.contains("cache use is disabled"));
} }
#[test] #[test]
fn manifest_fallback_pack_is_revalidated_and_rejected_if_stale() { fn manifest_failed_fetch_rejects_stale_current_instance_vcir() {
let manifest_path = Path::new( let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
@ -302,7 +378,16 @@ fn manifest_fallback_pack_is_revalidated_and_rejected_if_stale() {
Some(issuer_ca_rsync_uri()), Some(issuer_ca_rsync_uri()),
ok_time, ok_time,
) )
.expect("first run stores fetch_cache_pp pack"); .expect("first run returns validated publication point");
store_validated_manifest_baseline(
&store,
&manifest_rsync_uri,
&manifest_bytes,
manifest.manifest.manifest_number.bytes_be.clone(),
manifest.manifest.this_update,
manifest.manifest.next_update,
);
store store
.delete_raw(&manifest_rsync_uri) .delete_raw(&manifest_rsync_uri)
@ -317,9 +402,9 @@ fn manifest_fallback_pack_is_revalidated_and_rejected_if_stale() {
Some(issuer_ca_rsync_uri()), Some(issuer_ca_rsync_uri()),
stale_time, stale_time,
) )
.expect_err("stale validation_time must reject fetch_cache_pp pack"); .expect_err("stale validation_time must reject current-instance VCIR reuse");
let msg = err.to_string(); let msg = err.to_string();
assert!(msg.contains("not valid at validation_time")); assert!(msg.contains("instance_gate expired"), "{msg}");
} }
#[test] #[test]
@ -369,9 +454,18 @@ fn manifest_revalidation_with_unchanged_manifest_is_fresh() {
Some(issuer_ca_rsync_uri()), Some(issuer_ca_rsync_uri()),
t1, t1,
) )
.expect("first run builds and stores fetch_cache_pp pack"); .expect("first run returns validated publication point");
assert_eq!(first.source, PublicationPointSource::Fresh); assert_eq!(first.source, PublicationPointSource::Fresh);
store_validated_manifest_baseline(
&store,
&manifest_rsync_uri,
&manifest_bytes,
first.snapshot.manifest_number_be.clone(),
manifest.manifest.this_update,
manifest.manifest.next_update,
);
let second = process_manifest_publication_point( let second = process_manifest_publication_point(
&store, &store,
&policy, &policy,
@ -384,16 +478,19 @@ fn manifest_revalidation_with_unchanged_manifest_is_fresh() {
.expect("second run should accept revalidation of the same manifest"); .expect("second run should accept revalidation of the same manifest");
assert_eq!(second.source, PublicationPointSource::Fresh); assert_eq!(second.source, PublicationPointSource::Fresh);
assert!(second.warnings.is_empty()); assert!(second.warnings.is_empty());
assert_eq!(second.pack.manifest_bytes, first.pack.manifest_bytes);
assert_eq!( assert_eq!(
second.pack.manifest_number_be, second.snapshot.manifest_bytes,
first.pack.manifest_number_be first.snapshot.manifest_bytes
); );
assert_eq!(second.pack.files, first.pack.files); assert_eq!(
second.snapshot.manifest_number_be,
first.snapshot.manifest_number_be
);
assert_eq!(second.snapshot.files, first.snapshot.files);
} }
#[test] #[test]
fn manifest_rollback_is_treated_as_failed_fetch_and_uses_fetch_cache_pp() { fn manifest_rollback_is_treated_as_failed_fetch_and_reuses_current_instance_vcir() {
let manifest_path = Path::new( let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
@ -439,16 +536,11 @@ fn manifest_rollback_is_treated_as_failed_fetch_and_uses_fetch_cache_pp() {
Some(issuer_ca_rsync_uri()), Some(issuer_ca_rsync_uri()),
t1, t1,
) )
.expect("first run builds and stores fetch_cache_pp pack"); .expect("first run returns validated publication point");
assert_eq!(first.source, PublicationPointSource::Fresh); assert_eq!(first.source, PublicationPointSource::Fresh);
// Simulate a previously validated manifest with a higher manifestNumber (rollback detection). // Simulate a previously validated manifest with a higher manifestNumber (rollback detection).
let key = FetchCachePpKey::from_manifest_rsync_uri(&manifest_rsync_uri); let mut bumped = first.snapshot.clone();
let stored = store
.get_fetch_cache_pp(&key)
.expect("get fetch_cache_pp")
.expect("fetch_cache_pp pack exists");
let mut bumped = FetchCachePpPack::decode(&stored).expect("decode stored pack");
// Deterministically bump the cached manifestNumber to be strictly greater than the current one. // Deterministically bump the cached manifestNumber to be strictly greater than the current one.
for i in (0..bumped.manifest_number_be.len()).rev() { for i in (0..bumped.manifest_number_be.len()).rev() {
let (v, carry) = bumped.manifest_number_be[i].overflowing_add(1); let (v, carry) = bumped.manifest_number_be[i].overflowing_add(1);
@ -461,10 +553,14 @@ fn manifest_rollback_is_treated_as_failed_fetch_and_uses_fetch_cache_pp() {
break; break;
} }
} }
let bumped_bytes = bumped.encode().expect("encode bumped pack"); store_validated_manifest_baseline(
store &store,
.put_fetch_cache_pp(&key, &bumped_bytes) &manifest_rsync_uri,
.expect("store bumped pack"); &manifest_bytes,
bumped.manifest_number_be.clone(),
manifest.manifest.this_update,
manifest.manifest.next_update,
);
let second = process_manifest_publication_point( let second = process_manifest_publication_point(
&store, &store,
@ -475,9 +571,8 @@ fn manifest_rollback_is_treated_as_failed_fetch_and_uses_fetch_cache_pp() {
Some(issuer_ca_rsync_uri()), Some(issuer_ca_rsync_uri()),
t2, t2,
) )
.expect("second run should treat rollback as failed fetch and use cache"); .expect("second run should treat rollback as failed fetch and reuse current-instance VCIR");
assert_eq!(second.source, PublicationPointSource::FetchCachePp); assert_eq!(second.source, PublicationPointSource::VcirCurrentInstance);
assert_eq!(second.pack, bumped);
assert!( assert!(
second second
.warnings .warnings

View File

@ -90,7 +90,7 @@ fn manifest_outside_publication_point_yields_no_usable_cache() {
) )
.unwrap_err(); .unwrap_err();
// With no cached pack available for this wrong publication point, we get NoUsableCache. // With no reusable current-instance snapshot/VCIR for this wrong publication point, we get NoUsableCache.
assert!( assert!(
matches!(err, ManifestProcessError::NoUsableCache { .. }), matches!(err, ManifestProcessError::NoUsableCache { .. }),
"{err}" "{err}"
@ -98,7 +98,7 @@ fn manifest_outside_publication_point_yields_no_usable_cache() {
} }
#[test] #[test]
fn manifest_outside_publication_point_detects_cached_pack_pp_mismatch() { fn manifest_outside_publication_point_detects_current_instance_snapshot_pp_mismatch() {
let manifest_path = Path::new( let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
@ -133,7 +133,7 @@ fn manifest_outside_publication_point_detects_cached_pack_pp_mismatch() {
let policy = Policy::default(); let policy = Policy::default();
let issuer_ca_der = issuer_ca_fixture_der(); let issuer_ca_der = issuer_ca_fixture_der();
// First run creates and stores fetch_cache_pp pack (Fresh). // First run validates a fresh publication point snapshot and stores the latest VCIR.
let first = process_manifest_publication_point( let first = process_manifest_publication_point(
&store, &store,
&policy, &policy,
@ -147,7 +147,7 @@ fn manifest_outside_publication_point_detects_cached_pack_pp_mismatch() {
assert_eq!(first.source, PublicationPointSource::Fresh); assert_eq!(first.source, PublicationPointSource::Fresh);
// Second run with wrong publication point: fresh fails outside PP; cache load also fails // Second run with wrong publication point: fresh fails outside PP; cache load also fails
// because the cached pack's publication_point_rsync_uri doesn't match the expected one. // because the reusable current-instance snapshot carries a different publication_point_rsync_uri.
let wrong_pp = "rsync://example.test/not-the-pp/"; let wrong_pp = "rsync://example.test/not-the-pp/";
let err = process_manifest_publication_point( let err = process_manifest_publication_point(
&store, &store,

View File

@ -1,8 +1,14 @@
use std::path::Path; use std::path::Path;
use sha2::Digest;
use rpki::data_model::manifest::ManifestObject; use rpki::data_model::manifest::ManifestObject;
use rpki::policy::{CaFailedFetchPolicy, Policy}; use rpki::policy::{CaFailedFetchPolicy, Policy};
use rpki::storage::{FetchCachePpKey, FetchCachePpPack, RocksStore}; use rpki::storage::{
PackTime, RawByHashEntry, RocksStore, ValidatedCaInstanceResult, ValidatedManifestMeta,
VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirAuditSummary,
VcirInstanceGate, VcirRelatedArtifact, VcirSummary,
};
use rpki::validation::manifest::{ use rpki::validation::manifest::{
ManifestProcessError, PublicationPointSource, process_manifest_publication_point, ManifestProcessError, PublicationPointSource, process_manifest_publication_point,
process_manifest_publication_point_after_repo_sync, process_manifest_publication_point_after_repo_sync,
@ -41,6 +47,76 @@ fn fixture_dir_to_rsync_uri(dir: &Path) -> String {
s s
} }
fn store_validated_manifest_baseline(
store: &RocksStore,
manifest_rsync_uri: &str,
manifest_bytes: &[u8],
manifest_number_be: Vec<u8>,
this_update: time::OffsetDateTime,
next_update: time::OffsetDateTime,
) {
let manifest_sha256 = hex::encode(sha2::Sha256::digest(manifest_bytes));
let mut manifest_raw =
RawByHashEntry::from_bytes(manifest_sha256.clone(), manifest_bytes.to_vec());
manifest_raw
.origin_uris
.push(manifest_rsync_uri.to_string());
manifest_raw.object_type = Some("mft".to_string());
manifest_raw.encoding = Some("der".to_string());
store
.put_raw_by_hash_entry(&manifest_raw)
.expect("store VCIR manifest raw_by_hash");
let vcir = ValidatedCaInstanceResult {
manifest_rsync_uri: manifest_rsync_uri.to_string(),
parent_manifest_rsync_uri: None,
tal_id: "test-tal".to_string(),
ca_subject_name: "CN=test".to_string(),
ca_ski: "aa".to_string(),
issuer_ski: "aa".to_string(),
last_successful_validation_time: PackTime::from_utc_offset_datetime(this_update),
current_manifest_rsync_uri: manifest_rsync_uri.to_string(),
current_crl_rsync_uri: format!("{manifest_rsync_uri}.crl"),
validated_manifest_meta: ValidatedManifestMeta {
validated_manifest_number: manifest_number_be,
validated_manifest_this_update: PackTime::from_utc_offset_datetime(this_update),
validated_manifest_next_update: PackTime::from_utc_offset_datetime(next_update),
},
instance_gate: VcirInstanceGate {
manifest_next_update: PackTime::from_utc_offset_datetime(next_update),
current_crl_next_update: PackTime::from_utc_offset_datetime(next_update),
self_ca_not_after: PackTime::from_utc_offset_datetime(next_update),
instance_effective_until: PackTime::from_utc_offset_datetime(next_update),
},
child_entries: Vec::new(),
local_outputs: Vec::new(),
related_artifacts: vec![VcirRelatedArtifact {
artifact_role: VcirArtifactRole::Manifest,
artifact_kind: VcirArtifactKind::Mft,
uri: Some(manifest_rsync_uri.to_string()),
sha256: manifest_sha256,
object_type: Some("mft".to_string()),
validation_status: VcirArtifactValidationStatus::Accepted,
}],
summary: VcirSummary {
local_vrp_count: 0,
local_aspa_count: 0,
child_count: 0,
accepted_object_count: 1,
rejected_object_count: 0,
},
audit_summary: VcirAuditSummary {
failed_fetch_eligible: true,
last_failed_fetch_reason: None,
warning_count: 0,
audit_flags: Vec::new(),
},
};
store
.put_vcir(&vcir)
.expect("store validated manifest baseline");
}
fn store_manifest_and_locked_files( fn store_manifest_and_locked_files(
store: &RocksStore, store: &RocksStore,
manifest_path: &Path, manifest_path: &Path,
@ -70,7 +146,7 @@ fn store_manifest_and_locked_files(
} }
#[test] #[test]
fn repo_sync_failed_can_fall_back_to_fetch_cache_pp_when_present() { fn repo_sync_failed_can_reuse_current_instance_vcir_when_present() {
let manifest_path = Path::new( let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
@ -94,7 +170,7 @@ fn repo_sync_failed_can_fall_back_to_fetch_cache_pp_when_present() {
let issuer_ca_der = issuer_ca_fixture_der(); let issuer_ca_der = issuer_ca_fixture_der();
// First run: build and store a valid fetch_cache_pp pack (Fresh). // First run: build a fresh publication point result, then seed a current-instance VCIR baseline.
let policy = Policy::default(); let policy = Policy::default();
let first = process_manifest_publication_point( let first = process_manifest_publication_point(
&store, &store,
@ -108,7 +184,16 @@ fn repo_sync_failed_can_fall_back_to_fetch_cache_pp_when_present() {
.expect("first run ok"); .expect("first run ok");
assert_eq!(first.source, PublicationPointSource::Fresh); assert_eq!(first.source, PublicationPointSource::Fresh);
// Second run: simulate RRDP/rsync repo sync failure and ensure we still accept the cached pack. store_validated_manifest_baseline(
&store,
&manifest_rsync_uri,
&manifest_bytes,
manifest.manifest.manifest_number.bytes_be.clone(),
manifest.manifest.this_update,
manifest.manifest.next_update,
);
// Seed a latest current-instance VCIR baseline, then simulate RRDP/rsync repo sync failure.
let second = process_manifest_publication_point_after_repo_sync( let second = process_manifest_publication_point_after_repo_sync(
&store, &store,
&policy, &policy,
@ -120,83 +205,13 @@ fn repo_sync_failed_can_fall_back_to_fetch_cache_pp_when_present() {
false, false,
Some("repo sync failed in test"), Some("repo sync failed in test"),
) )
.expect("repo sync failure should fall back to fetch_cache_pp"); .expect("repo sync failure should reuse current-instance VCIR");
assert_eq!(second.source, PublicationPointSource::FetchCachePp); assert_eq!(second.source, PublicationPointSource::VcirCurrentInstance);
assert_eq!(second.pack, first.pack);
assert!(!second.warnings.is_empty());
}
#[test]
fn cached_pack_manifest_rsync_uri_mismatch_is_rejected_as_invalid_pack() {
let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let manifest_bytes = std::fs::read(manifest_path).expect("read manifest fixture");
let manifest = ManifestObject::decode_der(&manifest_bytes).expect("decode manifest fixture");
let validation_time = manifest.manifest.this_update + time::Duration::seconds(1);
let manifest_rsync_uri = fixture_to_rsync_uri(manifest_path);
let publication_point_rsync_uri = fixture_dir_to_rsync_uri(manifest_path.parent().unwrap());
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store_manifest_and_locked_files(
&store,
manifest_path,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&manifest,
&manifest_bytes,
);
let issuer_ca_der = issuer_ca_fixture_der();
let policy = Policy::default();
// First run stores a valid pack.
let _ = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
validation_time,
)
.expect("first run stores pack");
// Corrupt cached pack metadata: manifest_rsync_uri doesn't match the key.
let key = FetchCachePpKey::from_manifest_rsync_uri(&manifest_rsync_uri);
let cached_bytes = store
.get_fetch_cache_pp(&key)
.expect("get fetch_cache_pp")
.expect("fetch_cache_pp exists");
let mut pack = FetchCachePpPack::decode(&cached_bytes).expect("decode pack");
pack.manifest_rsync_uri = "rsync://example.test/wrong.mft".to_string();
store
.put_fetch_cache_pp(&key, &pack.encode().expect("encode pack"))
.expect("store corrupted pack");
// Force fresh failure and trigger cache load.
let err = process_manifest_publication_point_after_repo_sync(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
validation_time,
false,
Some("repo sync failed in test"),
)
.unwrap_err();
assert!( assert!(
matches!(err, ManifestProcessError::NoUsableCache { .. }), second.warnings.iter().any(|w| w
"{err}" .message
); .contains("using latest validated result for current CA instance")),
assert!( "expected current-instance VCIR reuse warning"
err.to_string()
.contains("cached pack manifest_rsync_uri does not match key"),
"unexpected error: {err}"
); );
} }
@ -269,7 +284,7 @@ fn manifest_missing_locked_file_is_treated_as_failed_fetch() {
} }
#[test] #[test]
fn manifest_number_increases_but_this_update_not_increasing_is_failed_fetch() { fn manifest_number_increases_but_this_update_not_increasing_reuses_current_instance_vcir() {
let manifest_path = Path::new( let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
@ -294,7 +309,7 @@ fn manifest_number_increases_but_this_update_not_increasing_is_failed_fetch() {
let issuer_ca_der = issuer_ca_fixture_der(); let issuer_ca_der = issuer_ca_fixture_der();
let policy = Policy::default(); let policy = Policy::default();
// Build and store a valid pack first. // Build and store a valid publication point snapshot first.
let _ = process_manifest_publication_point( let _ = process_manifest_publication_point(
&store, &store,
&policy, &policy,
@ -304,23 +319,18 @@ fn manifest_number_increases_but_this_update_not_increasing_is_failed_fetch() {
Some(issuer_ca_rsync_uri()), Some(issuer_ca_rsync_uri()),
validation_time, validation_time,
) )
.expect("first run stores pack"); .expect("first run returns validated publication point");
// Replace the cached pack with an "older" manifestNumber but a newer thisUpdate to trigger // Seed VCIR with an older manifestNumber but a newer thisUpdate to trigger
// RFC 9286 §4.2.1 thisUpdate monotonicity failure on the fresh path. // RFC 9286 §4.2.1 thisUpdate monotonicity failure on the fresh path.
let key = FetchCachePpKey::from_manifest_rsync_uri(&manifest_rsync_uri); store_validated_manifest_baseline(
let cached_bytes = store &store,
.get_fetch_cache_pp(&key) &manifest_rsync_uri,
.expect("get fetch_cache_pp") &manifest_bytes,
.expect("fetch_cache_pp exists"); vec![0],
let mut old_pack = FetchCachePpPack::decode(&cached_bytes).expect("decode pack");
old_pack.manifest_number_be = vec![0];
old_pack.this_update = rpki::storage::PackTime::from_utc_offset_datetime(
manifest.manifest.this_update + time::Duration::hours(24), manifest.manifest.this_update + time::Duration::hours(24),
manifest.manifest.next_update,
); );
store
.put_fetch_cache_pp(&key, &old_pack.encode().expect("encode pack"))
.expect("store adjusted pack");
let out = process_manifest_publication_point( let out = process_manifest_publication_point(
&store, &store,
@ -331,8 +341,8 @@ fn manifest_number_increases_but_this_update_not_increasing_is_failed_fetch() {
Some(issuer_ca_rsync_uri()), Some(issuer_ca_rsync_uri()),
validation_time, validation_time,
) )
.expect("should fall back to fetch_cache_pp"); .expect("should reuse current-instance VCIR");
assert_eq!(out.source, PublicationPointSource::FetchCachePp); assert_eq!(out.source, PublicationPointSource::VcirCurrentInstance);
assert!( assert!(
out.warnings out.warnings
.iter() .iter()
@ -368,7 +378,7 @@ fn manifest_number_equal_but_bytes_differ_is_rejected_without_cache() {
let mut policy = Policy::default(); let mut policy = Policy::default();
policy.ca_failed_fetch_policy = CaFailedFetchPolicy::StopAllOutput; policy.ca_failed_fetch_policy = CaFailedFetchPolicy::StopAllOutput;
// Store a cached pack that has the same manifestNumber but different manifest bytes. // Store a cached snapshot that has the same manifestNumber but different manifest bytes.
let _ = process_manifest_publication_point( let _ = process_manifest_publication_point(
&store, &store,
&policy, &policy,
@ -378,18 +388,18 @@ fn manifest_number_equal_but_bytes_differ_is_rejected_without_cache() {
Some(issuer_ca_rsync_uri()), Some(issuer_ca_rsync_uri()),
validation_time, validation_time,
) )
.expect("first run stores pack"); .expect("first run returns validated publication point");
let key = FetchCachePpKey::from_manifest_rsync_uri(&manifest_rsync_uri); let mut mutated_manifest_bytes = manifest_bytes.clone();
let cached_bytes = store mutated_manifest_bytes[0] ^= 0xFF;
.get_fetch_cache_pp(&key) store_validated_manifest_baseline(
.expect("get fetch_cache_pp") &store,
.expect("fetch_cache_pp exists"); &manifest_rsync_uri,
let mut pack = FetchCachePpPack::decode(&cached_bytes).expect("decode pack"); &mutated_manifest_bytes,
pack.manifest_bytes[0] ^= 0xFF; manifest.manifest.manifest_number.bytes_be.clone(),
store manifest.manifest.this_update,
.put_fetch_cache_pp(&key, &pack.encode().expect("encode pack")) manifest.manifest.next_update,
.expect("store adjusted pack"); );
let err = process_manifest_publication_point( let err = process_manifest_publication_point(
&store, &store,
@ -455,154 +465,3 @@ fn manifest_embedded_ee_cert_path_validation_fails_with_wrong_issuer_ca() {
"unexpected error: {err}" "unexpected error: {err}"
); );
} }
#[test]
fn cached_pack_missing_file_is_rejected_during_revalidation() {
let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let manifest_bytes = std::fs::read(manifest_path).expect("read manifest fixture");
let manifest = ManifestObject::decode_der(&manifest_bytes).expect("decode manifest fixture");
let validation_time = manifest.manifest.this_update + time::Duration::seconds(1);
let manifest_rsync_uri = fixture_to_rsync_uri(manifest_path);
let publication_point_rsync_uri = fixture_dir_to_rsync_uri(manifest_path.parent().unwrap());
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store_manifest_and_locked_files(
&store,
manifest_path,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&manifest,
&manifest_bytes,
);
let issuer_ca_der = issuer_ca_fixture_der();
let policy = Policy::default();
// Store a valid pack first.
let _ = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
validation_time,
)
.expect("first run stores pack");
// Corrupt cached pack by removing one referenced file.
let key = FetchCachePpKey::from_manifest_rsync_uri(&manifest_rsync_uri);
let cached_bytes = store
.get_fetch_cache_pp(&key)
.expect("get fetch_cache_pp")
.expect("fetch_cache_pp exists");
let mut pack = FetchCachePpPack::decode(&cached_bytes).expect("decode pack");
assert!(!pack.files.is_empty(), "fixture should lock some files");
pack.files.pop();
store
.put_fetch_cache_pp(&key, &pack.encode().expect("encode pack"))
.expect("store corrupted pack");
// Force the fresh path to fail and trigger cache revalidation.
let err = process_manifest_publication_point_after_repo_sync(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
validation_time,
false,
Some("repo sync failed in test"),
)
.unwrap_err();
assert!(
matches!(err, ManifestProcessError::NoUsableCache { .. }),
"{err}"
);
assert!(
err.to_string()
.contains("cached fetch_cache_pp missing file"),
"unexpected error: {err}"
);
}
#[test]
fn cached_pack_hash_mismatch_is_rejected_during_revalidation() {
let manifest_path = Path::new(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
);
let manifest_bytes = std::fs::read(manifest_path).expect("read manifest fixture");
let manifest = ManifestObject::decode_der(&manifest_bytes).expect("decode manifest fixture");
let validation_time = manifest.manifest.this_update + time::Duration::seconds(1);
let manifest_rsync_uri = fixture_to_rsync_uri(manifest_path);
let publication_point_rsync_uri = fixture_dir_to_rsync_uri(manifest_path.parent().unwrap());
let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb");
store_manifest_and_locked_files(
&store,
manifest_path,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&manifest,
&manifest_bytes,
);
let issuer_ca_der = issuer_ca_fixture_der();
let policy = Policy::default();
// Store a valid pack first.
let _ = process_manifest_publication_point(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
validation_time,
)
.expect("first run stores pack");
// Corrupt cached pack by changing one file's bytes+sha256 so internal validation passes,
// but the manifest fileList binding check fails (RFC 9286 §6.5).
let key = FetchCachePpKey::from_manifest_rsync_uri(&manifest_rsync_uri);
let cached_bytes = store
.get_fetch_cache_pp(&key)
.expect("get fetch_cache_pp")
.expect("fetch_cache_pp exists");
let mut pack = FetchCachePpPack::decode(&cached_bytes).expect("decode pack");
let victim = pack.files.first_mut().expect("non-empty file list");
victim.bytes[0] ^= 0xFF;
victim.sha256 = victim.compute_sha256();
store
.put_fetch_cache_pp(&key, &pack.encode().expect("encode pack"))
.expect("store corrupted pack");
let err = process_manifest_publication_point_after_repo_sync(
&store,
&policy,
&manifest_rsync_uri,
&publication_point_rsync_uri,
&issuer_ca_der,
Some(issuer_ca_rsync_uri()),
validation_time,
false,
Some("repo sync failed in test"),
)
.unwrap_err();
assert!(
matches!(err, ManifestProcessError::NoUsableCache { .. }),
"{err}"
);
assert!(
err.to_string()
.contains("cached fetch_cache_pp file hash mismatch"),
"unexpected error: {err}"
);
}

View File

@ -2,7 +2,7 @@ use std::path::Path;
use rpki::data_model::manifest::ManifestObject; use rpki::data_model::manifest::ManifestObject;
use rpki::policy::{CaFailedFetchPolicy, Policy}; use rpki::policy::{CaFailedFetchPolicy, Policy};
use rpki::storage::{FetchCachePpKey, RocksStore}; use rpki::storage::RocksStore;
use rpki::validation::manifest::process_manifest_publication_point; use rpki::validation::manifest::process_manifest_publication_point;
fn issuer_ca_fixture() -> Vec<u8> { fn issuer_ca_fixture() -> Vec<u8> {
@ -70,12 +70,11 @@ fn manifest_outside_publication_point_is_failed_fetch_rfc9286_section6_1() {
let msg = err.to_string(); let msg = err.to_string();
assert!(msg.contains("RFC 9286 §6.1"), "{msg}"); assert!(msg.contains("RFC 9286 §6.1"), "{msg}");
let key = FetchCachePpKey::from_manifest_rsync_uri(manifest_rsync_uri);
assert!( assert!(
store store
.get_fetch_cache_pp(&key) .get_vcir(manifest_rsync_uri)
.expect("get fetch_cache_pp") .expect("get vcir")
.is_none(), .is_none(),
"must not write fetch_cache_pp on failed fetch" "must not write VCIR on failed fetch"
); );
} }

View File

@ -6,7 +6,7 @@ use rpki::data_model::rc::ResourceCertificate;
use rpki::policy::{Policy, SignedObjectFailurePolicy}; use rpki::policy::{Policy, SignedObjectFailurePolicy};
use rpki::storage::{PackFile, RocksStore}; use rpki::storage::{PackFile, RocksStore};
use rpki::validation::manifest::process_manifest_publication_point; use rpki::validation::manifest::process_manifest_publication_point;
use rpki::validation::objects::process_fetch_cache_pp_pack_for_issuer; use rpki::validation::objects::process_publication_point_snapshot_for_issuer;
fn fixture_to_rsync_uri(path: &Path) -> String { fn fixture_to_rsync_uri(path: &Path) -> String {
let rel = path let rel = path
@ -31,7 +31,7 @@ fn fixture_dir_to_rsync_uri(dir: &Path) -> String {
} }
fn build_cernet_pack_and_validation_time() -> ( fn build_cernet_pack_and_validation_time() -> (
rpki::storage::FetchCachePpPack, rpki::validation::publication_point::PublicationPointSnapshot,
time::OffsetDateTime, time::OffsetDateTime,
Vec<u8>, Vec<u8>,
ResourceCertificate, ResourceCertificate,
@ -85,11 +85,11 @@ fn build_cernet_pack_and_validation_time() -> (
let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer CA cert"); let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer CA cert");
let crl_file = out let crl_file = out
.pack .snapshot
.files .files
.iter() .iter()
.find(|f| f.rsync_uri.ends_with(".crl")) .find(|f| f.rsync_uri.ends_with(".crl"))
.expect("crl present in pack"); .expect("crl present in snapshot");
let crl = RpkixCrl::decode_der(&crl_file.bytes).expect("decode crl"); let crl = RpkixCrl::decode_der(&crl_file.bytes).expect("decode crl");
let mut t = manifest.manifest.this_update; let mut t = manifest.manifest.this_update;
@ -101,7 +101,7 @@ fn build_cernet_pack_and_validation_time() -> (
} }
t += time::Duration::seconds(1); t += time::Duration::seconds(1);
(out.pack, t, issuer_ca_der, issuer_ca) (out.snapshot, t, issuer_ca_der, issuer_ca)
} }
#[test] #[test]
@ -113,7 +113,7 @@ fn missing_crl_causes_roas_to_be_dropped_under_drop_object_policy() {
let mut policy = Policy::default(); let mut policy = Policy::default();
policy.signed_object_failure_policy = SignedObjectFailurePolicy::DropObject; policy.signed_object_failure_policy = SignedObjectFailurePolicy::DropObject;
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&issuer_ca_der, &issuer_ca_der,
@ -139,7 +139,7 @@ fn wrong_issuer_ca_cert_causes_roas_to_be_dropped_under_drop_object_policy() {
// Use an unrelated trust anchor certificate as the issuer to force EE cert path validation to fail. // Use an unrelated trust anchor certificate as the issuer to force EE cert path validation to fail.
let wrong_issuer_ca_der = let wrong_issuer_ca_der =
std::fs::read("tests/fixtures/ta/arin-ta.cer").expect("read wrong issuer ca"); std::fs::read("tests/fixtures/ta/arin-ta.cer").expect("read wrong issuer ca");
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&wrong_issuer_ca_der, &wrong_issuer_ca_der,
@ -167,7 +167,7 @@ fn invalid_aspa_object_is_reported_as_warning_under_drop_object_policy() {
let mut policy = Policy::default(); let mut policy = Policy::default();
policy.signed_object_failure_policy = SignedObjectFailurePolicy::DropObject; policy.signed_object_failure_policy = SignedObjectFailurePolicy::DropObject;
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&issuer_ca_der, &issuer_ca_der,

View File

@ -6,7 +6,7 @@ use rpki::data_model::rc::ResourceCertificate;
use rpki::policy::{Policy, SignedObjectFailurePolicy}; use rpki::policy::{Policy, SignedObjectFailurePolicy};
use rpki::storage::{PackFile, RocksStore}; use rpki::storage::{PackFile, RocksStore};
use rpki::validation::manifest::process_manifest_publication_point; use rpki::validation::manifest::process_manifest_publication_point;
use rpki::validation::objects::process_fetch_cache_pp_pack_for_issuer; use rpki::validation::objects::process_publication_point_snapshot_for_issuer;
fn fixture_to_rsync_uri(path: &Path) -> String { fn fixture_to_rsync_uri(path: &Path) -> String {
let rel = path let rel = path
@ -31,7 +31,7 @@ fn fixture_dir_to_rsync_uri(dir: &Path) -> String {
} }
fn build_cernet_pack_and_validation_time() -> ( fn build_cernet_pack_and_validation_time() -> (
rpki::storage::FetchCachePpPack, rpki::validation::publication_point::PublicationPointSnapshot,
time::OffsetDateTime, time::OffsetDateTime,
Vec<u8>, Vec<u8>,
ResourceCertificate, ResourceCertificate,
@ -85,11 +85,11 @@ fn build_cernet_pack_and_validation_time() -> (
let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer CA cert"); let issuer_ca = ResourceCertificate::decode_der(&issuer_ca_der).expect("decode issuer CA cert");
let crl_file = out let crl_file = out
.pack .snapshot
.files .files
.iter() .iter()
.find(|f| f.rsync_uri.ends_with(".crl")) .find(|f| f.rsync_uri.ends_with(".crl"))
.expect("crl present in pack"); .expect("crl present in snapshot");
let crl = RpkixCrl::decode_der(&crl_file.bytes).expect("decode crl"); let crl = RpkixCrl::decode_der(&crl_file.bytes).expect("decode crl");
// Choose a validation_time that is within: // Choose a validation_time that is within:
@ -106,7 +106,7 @@ fn build_cernet_pack_and_validation_time() -> (
} }
t += time::Duration::seconds(1); t += time::Duration::seconds(1);
(out.pack, t, issuer_ca_der, issuer_ca) (out.snapshot, t, issuer_ca_der, issuer_ca)
} }
#[test] #[test]
@ -119,13 +119,13 @@ fn drop_object_policy_drops_only_failing_object() {
.iter() .iter()
.find(|f| f.rsync_uri.ends_with("AS4538.roa")) .find(|f| f.rsync_uri.ends_with("AS4538.roa"))
.map(|f| f.rsync_uri.clone()) .map(|f| f.rsync_uri.clone())
.expect("AS4538.roa present in pack"); .expect("AS4538.roa present in snapshot");
let tamper_idx = pack let tamper_idx = pack
.files .files
.iter() .iter()
.position(|f| f.rsync_uri.ends_with(".roa") && f.rsync_uri != valid_roa_uri) .position(|f| f.rsync_uri.ends_with(".roa") && f.rsync_uri != valid_roa_uri)
.expect("another ROA present in pack"); .expect("another ROA present in snapshot");
let victim_uri = pack.files[tamper_idx].rsync_uri.clone(); let victim_uri = pack.files[tamper_idx].rsync_uri.clone();
let mut tampered = pack.files[tamper_idx].bytes.clone(); let mut tampered = pack.files[tamper_idx].bytes.clone();
@ -136,7 +136,7 @@ fn drop_object_policy_drops_only_failing_object() {
let mut policy = Policy::default(); let mut policy = Policy::default();
policy.signed_object_failure_policy = SignedObjectFailurePolicy::DropObject; policy.signed_object_failure_policy = SignedObjectFailurePolicy::DropObject;
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&issuer_ca_der, &issuer_ca_der,
@ -168,7 +168,7 @@ fn drop_publication_point_policy_drops_the_publication_point() {
.files .files
.iter() .iter()
.position(|f| f.rsync_uri.ends_with(".roa")) .position(|f| f.rsync_uri.ends_with(".roa"))
.expect("a ROA present in pack"); .expect("a ROA present in snapshot");
let victim_uri = pack.files[tamper_idx].rsync_uri.clone(); let victim_uri = pack.files[tamper_idx].rsync_uri.clone();
let mut tampered = pack.files[tamper_idx].bytes.clone(); let mut tampered = pack.files[tamper_idx].bytes.clone();
@ -179,7 +179,7 @@ fn drop_publication_point_policy_drops_the_publication_point() {
let mut policy = Policy::default(); let mut policy = Policy::default();
policy.signed_object_failure_policy = SignedObjectFailurePolicy::DropPublicationPoint; policy.signed_object_failure_policy = SignedObjectFailurePolicy::DropPublicationPoint;
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&issuer_ca_der, &issuer_ca_der,

View File

@ -1,10 +1,11 @@
use rpki::fetch::rsync::LocalDirRsyncFetcher; use rpki::fetch::rsync::LocalDirRsyncFetcher;
use rpki::policy::{Policy, SignedObjectFailurePolicy, SyncPreference}; use rpki::policy::{Policy, SignedObjectFailurePolicy, SyncPreference};
use rpki::storage::{FetchCachePpPack, PackFile, PackTime, RocksStore}; use rpki::storage::{PackFile, PackTime, RocksStore};
use rpki::sync::repo::sync_publication_point; use rpki::sync::repo::sync_publication_point;
use rpki::sync::rrdp::Fetcher; use rpki::sync::rrdp::Fetcher;
use rpki::validation::manifest::process_manifest_publication_point; use rpki::validation::manifest::process_manifest_publication_point;
use rpki::validation::objects::process_fetch_cache_pp_pack_for_issuer; use rpki::validation::objects::process_publication_point_snapshot_for_issuer;
use rpki::validation::publication_point::PublicationPointSnapshot;
struct NoopHttpFetcher; struct NoopHttpFetcher;
impl Fetcher for NoopHttpFetcher { impl Fetcher for NoopHttpFetcher {
@ -50,16 +51,16 @@ fn issuer_ca_rsync_uri() -> &'static str {
"rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer" "rsync://rpki.apnic.net/repository/B527EF581D6611E2BB468F7C72FD1FF2/BfycW4hQb3wNP4YsiJW-1n6fjro.cer"
} }
fn minimal_pack( fn minimal_snapshot(
manifest_rsync_uri: &str, manifest_rsync_uri: &str,
publication_point_rsync_uri: &str, publication_point_rsync_uri: &str,
manifest_bytes: Vec<u8>, manifest_bytes: Vec<u8>,
files: Vec<PackFile>, files: Vec<PackFile>,
validation_time: time::OffsetDateTime, validation_time: time::OffsetDateTime,
) -> FetchCachePpPack { ) -> PublicationPointSnapshot {
// Keep times consistent enough to pass internal pack validation. // Keep times consistent enough for a valid snapshot.
FetchCachePpPack { PublicationPointSnapshot {
format_version: FetchCachePpPack::FORMAT_VERSION_V1, format_version: PublicationPointSnapshot::FORMAT_VERSION_V1,
manifest_rsync_uri: manifest_rsync_uri.to_string(), manifest_rsync_uri: manifest_rsync_uri.to_string(),
publication_point_rsync_uri: publication_point_rsync_uri.to_string(), publication_point_rsync_uri: publication_point_rsync_uri.to_string(),
manifest_number_be: vec![1], manifest_number_be: vec![1],
@ -71,12 +72,12 @@ fn minimal_pack(
} }
} }
fn build_fetch_cache_pp_from_local_rsync_fixture( fn build_publication_point_snapshot_from_local_rsync_fixture(
dir: &std::path::Path, dir: &std::path::Path,
rsync_base_uri: &str, rsync_base_uri: &str,
manifest_rsync_uri: &str, manifest_rsync_uri: &str,
validation_time: time::OffsetDateTime, validation_time: time::OffsetDateTime,
) -> rpki::storage::FetchCachePpPack { ) -> rpki::validation::publication_point::PublicationPointSnapshot {
let store_dir = tempfile::tempdir().expect("store dir"); let store_dir = tempfile::tempdir().expect("store dir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb"); let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
let policy = Policy { let policy = Policy {
@ -107,16 +108,16 @@ fn build_fetch_cache_pp_from_local_rsync_fixture(
) )
.expect("process manifest"); .expect("process manifest");
pp.pack pp.snapshot
} }
#[test] #[test]
fn process_pack_for_issuer_extracts_vrps_from_real_cernet_fixture() { fn process_snapshot_for_issuer_extracts_vrps_from_real_cernet_fixture() {
let (dir, rsync_base_uri, manifest_file) = cernet_fixture(); let (dir, rsync_base_uri, manifest_file) = cernet_fixture();
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}"); let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file); let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file);
let pack = build_fetch_cache_pp_from_local_rsync_fixture( let pack = build_publication_point_snapshot_from_local_rsync_fixture(
&dir, &dir,
&rsync_base_uri, &rsync_base_uri,
&manifest_rsync_uri, &manifest_rsync_uri,
@ -128,7 +129,7 @@ fn process_pack_for_issuer_extracts_vrps_from_real_cernet_fixture() {
.expect("decode issuer ca"); .expect("decode issuer ca");
let policy = Policy::default(); let policy = Policy::default();
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&issuer_ca_der, &issuer_ca_der,
@ -153,7 +154,7 @@ fn signed_object_failure_policy_drop_object_drops_only_bad_object() {
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}"); let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file); let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file);
let mut pack = build_fetch_cache_pp_from_local_rsync_fixture( let mut pack = build_publication_point_snapshot_from_local_rsync_fixture(
&dir, &dir,
&rsync_base_uri, &rsync_base_uri,
&manifest_rsync_uri, &manifest_rsync_uri,
@ -171,7 +172,7 @@ fn signed_object_failure_policy_drop_object_drops_only_bad_object() {
.files .files
.iter() .iter()
.position(|f| f.rsync_uri.ends_with(".roa")) .position(|f| f.rsync_uri.ends_with(".roa"))
.expect("pack contains roa"); .expect("snapshot contains roa");
let bad_uri = pack.files[bad_idx].rsync_uri.clone(); let bad_uri = pack.files[bad_idx].rsync_uri.clone();
pack.files[bad_idx] = PackFile::from_bytes_compute_sha256(bad_uri, vec![0u8]); pack.files[bad_idx] = PackFile::from_bytes_compute_sha256(bad_uri, vec![0u8]);
@ -183,7 +184,7 @@ fn signed_object_failure_policy_drop_object_drops_only_bad_object() {
signed_object_failure_policy: SignedObjectFailurePolicy::DropObject, signed_object_failure_policy: SignedObjectFailurePolicy::DropObject,
..Policy::default() ..Policy::default()
}; };
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&issuer_ca_der, &issuer_ca_der,
@ -216,7 +217,7 @@ fn signed_object_failure_policy_drop_publication_point_drops_all_output() {
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}"); let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file); let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file);
let mut pack = build_fetch_cache_pp_from_local_rsync_fixture( let mut pack = build_publication_point_snapshot_from_local_rsync_fixture(
&dir, &dir,
&rsync_base_uri, &rsync_base_uri,
&manifest_rsync_uri, &manifest_rsync_uri,
@ -234,7 +235,7 @@ fn signed_object_failure_policy_drop_publication_point_drops_all_output() {
.files .files
.iter() .iter()
.position(|f| f.rsync_uri.ends_with(".roa")) .position(|f| f.rsync_uri.ends_with(".roa"))
.expect("pack contains roa"); .expect("snapshot contains roa");
let bad_uri = pack.files[bad_idx].rsync_uri.clone(); let bad_uri = pack.files[bad_idx].rsync_uri.clone();
pack.files[bad_idx] = PackFile::from_bytes_compute_sha256(bad_uri, vec![0u8]); pack.files[bad_idx] = PackFile::from_bytes_compute_sha256(bad_uri, vec![0u8]);
@ -246,7 +247,7 @@ fn signed_object_failure_policy_drop_publication_point_drops_all_output() {
signed_object_failure_policy: SignedObjectFailurePolicy::DropPublicationPoint, signed_object_failure_policy: SignedObjectFailurePolicy::DropPublicationPoint,
..Policy::default() ..Policy::default()
}; };
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&issuer_ca_der, &issuer_ca_der,
@ -283,13 +284,13 @@ fn signed_object_failure_policy_drop_publication_point_drops_all_output() {
} }
#[test] #[test]
fn process_pack_for_issuer_without_crl_drops_publication_point() { fn process_snapshot_for_issuer_without_crl_drops_publication_point() {
let (dir, rsync_base_uri, manifest_file) = cernet_fixture(); let (dir, rsync_base_uri, manifest_file) = cernet_fixture();
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}"); let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file); let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file);
let manifest_bytes = std::fs::read(dir.join(&manifest_file)).expect("read mft"); let manifest_bytes = std::fs::read(dir.join(&manifest_file)).expect("read mft");
let pack = minimal_pack( let pack = minimal_snapshot(
&manifest_rsync_uri, &manifest_rsync_uri,
&rsync_base_uri, &rsync_base_uri,
manifest_bytes, manifest_bytes,
@ -301,7 +302,7 @@ fn process_pack_for_issuer_without_crl_drops_publication_point() {
); );
let policy = Policy::default(); let policy = Policy::default();
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&[], &[],
@ -318,7 +319,7 @@ fn process_pack_for_issuer_without_crl_drops_publication_point() {
} }
#[test] #[test]
fn process_pack_for_issuer_handles_invalid_aspa_bytes() { fn process_snapshot_for_issuer_handles_invalid_aspa_bytes() {
let (dir, rsync_base_uri, manifest_file) = cernet_fixture(); let (dir, rsync_base_uri, manifest_file) = cernet_fixture();
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}"); let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file); let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file);
@ -326,7 +327,7 @@ fn process_pack_for_issuer_handles_invalid_aspa_bytes() {
let crl_bytes = let crl_bytes =
std::fs::read(dir.join("05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl")).expect("read crl"); std::fs::read(dir.join("05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl")).expect("read crl");
let pack = minimal_pack( let pack = minimal_snapshot(
&manifest_rsync_uri, &manifest_rsync_uri,
&rsync_base_uri, &rsync_base_uri,
manifest_bytes, manifest_bytes,
@ -344,7 +345,7 @@ fn process_pack_for_issuer_handles_invalid_aspa_bytes() {
signed_object_failure_policy: SignedObjectFailurePolicy::DropObject, signed_object_failure_policy: SignedObjectFailurePolicy::DropObject,
..Policy::default() ..Policy::default()
}; };
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&[], &[],
@ -359,7 +360,7 @@ fn process_pack_for_issuer_handles_invalid_aspa_bytes() {
} }
#[test] #[test]
fn process_pack_for_issuer_drop_publication_point_on_invalid_aspa_bytes() { fn process_snapshot_for_issuer_drop_publication_point_on_invalid_aspa_bytes() {
let (dir, rsync_base_uri, manifest_file) = cernet_fixture(); let (dir, rsync_base_uri, manifest_file) = cernet_fixture();
let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}"); let manifest_rsync_uri = format!("{rsync_base_uri}{manifest_file}");
let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file); let validation_time = validation_time_from_manifest_fixture(&dir, &manifest_file);
@ -367,7 +368,7 @@ fn process_pack_for_issuer_drop_publication_point_on_invalid_aspa_bytes() {
let crl_bytes = let crl_bytes =
std::fs::read(dir.join("05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl")).expect("read crl"); std::fs::read(dir.join("05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl")).expect("read crl");
let pack = minimal_pack( let pack = minimal_snapshot(
&manifest_rsync_uri, &manifest_rsync_uri,
&rsync_base_uri, &rsync_base_uri,
manifest_bytes, manifest_bytes,
@ -387,7 +388,7 @@ fn process_pack_for_issuer_drop_publication_point_on_invalid_aspa_bytes() {
signed_object_failure_policy: SignedObjectFailurePolicy::DropPublicationPoint, signed_object_failure_policy: SignedObjectFailurePolicy::DropPublicationPoint,
..Policy::default() ..Policy::default()
}; };
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&[], &[],

View File

@ -1,16 +1,17 @@
use rpki::policy::{Policy, SignedObjectFailurePolicy}; use rpki::policy::{Policy, SignedObjectFailurePolicy};
use rpki::storage::{FetchCachePpPack, PackFile, PackTime}; use rpki::storage::{PackFile, PackTime};
use rpki::validation::objects::process_fetch_cache_pp_pack_for_issuer; use rpki::validation::objects::process_publication_point_snapshot_for_issuer;
use rpki::validation::publication_point::PublicationPointSnapshot;
fn fixture_bytes(path: &str) -> Vec<u8> { fn fixture_bytes(path: &str) -> Vec<u8> {
std::fs::read(std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(path)) std::fs::read(std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(path))
.unwrap_or_else(|e| panic!("read fixture {path}: {e}")) .unwrap_or_else(|e| panic!("read fixture {path}: {e}"))
} }
fn dummy_pack(manifest_bytes: Vec<u8>, files: Vec<PackFile>) -> FetchCachePpPack { fn dummy_snapshot(manifest_bytes: Vec<u8>, files: Vec<PackFile>) -> PublicationPointSnapshot {
let now = time::OffsetDateTime::now_utc(); let now = time::OffsetDateTime::now_utc();
FetchCachePpPack { PublicationPointSnapshot {
format_version: FetchCachePpPack::FORMAT_VERSION_V1, format_version: PublicationPointSnapshot::FORMAT_VERSION_V1,
manifest_rsync_uri: "rsync://example.test/repo/pp/manifest.mft".to_string(), manifest_rsync_uri: "rsync://example.test/repo/pp/manifest.mft".to_string(),
publication_point_rsync_uri: "rsync://example.test/repo/pp/".to_string(), publication_point_rsync_uri: "rsync://example.test/repo/pp/".to_string(),
manifest_number_be: vec![1], manifest_number_be: vec![1],
@ -23,7 +24,7 @@ fn dummy_pack(manifest_bytes: Vec<u8>, files: Vec<PackFile>) -> FetchCachePpPack
} }
#[test] #[test]
fn process_pack_drop_object_on_wrong_issuer_ca_for_roa() { fn process_snapshot_drop_object_on_wrong_issuer_ca_for_roa() {
let manifest_bytes = fixture_bytes( let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
@ -43,7 +44,7 @@ fn process_pack_drop_object_on_wrong_issuer_ca_for_roa() {
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl",
); );
let pack = dummy_pack( let pack = dummy_snapshot(
manifest_bytes, manifest_bytes,
vec![ vec![
PackFile::from_bytes_compute_sha256(ee_crldp, crl_bytes), PackFile::from_bytes_compute_sha256(ee_crldp, crl_bytes),
@ -60,7 +61,7 @@ fn process_pack_drop_object_on_wrong_issuer_ca_for_roa() {
}; };
let wrong_issuer_ca_der = fixture_bytes("tests/fixtures/ta/arin-ta.cer"); let wrong_issuer_ca_der = fixture_bytes("tests/fixtures/ta/arin-ta.cer");
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&wrong_issuer_ca_der, &wrong_issuer_ca_der,
@ -78,7 +79,7 @@ fn process_pack_drop_object_on_wrong_issuer_ca_for_roa() {
} }
#[test] #[test]
fn process_pack_drop_publication_point_on_wrong_issuer_ca_for_roa_skips_rest() { fn process_snapshot_drop_publication_point_on_wrong_issuer_ca_for_roa_skips_rest() {
let manifest_bytes = fixture_bytes( let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
@ -101,7 +102,7 @@ fn process_pack_drop_publication_point_on_wrong_issuer_ca_for_roa_skips_rest() {
"tests/fixtures/repository/chloe.sobornost.net/rpki/RIPE-nljobsnijders/5m80fwYws_3FiFD7JiQjAqZ1RYQ.asa", "tests/fixtures/repository/chloe.sobornost.net/rpki/RIPE-nljobsnijders/5m80fwYws_3FiFD7JiQjAqZ1RYQ.asa",
); );
let pack = dummy_pack( let pack = dummy_snapshot(
manifest_bytes, manifest_bytes,
vec![ vec![
PackFile::from_bytes_compute_sha256(ee_crldp, crl_bytes), PackFile::from_bytes_compute_sha256(ee_crldp, crl_bytes),
@ -123,7 +124,7 @@ fn process_pack_drop_publication_point_on_wrong_issuer_ca_for_roa_skips_rest() {
}; };
let wrong_issuer_ca_der = fixture_bytes("tests/fixtures/ta/arin-ta.cer"); let wrong_issuer_ca_der = fixture_bytes("tests/fixtures/ta/arin-ta.cer");
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&wrong_issuer_ca_der, &wrong_issuer_ca_der,
@ -138,7 +139,7 @@ fn process_pack_drop_publication_point_on_wrong_issuer_ca_for_roa_skips_rest() {
} }
#[test] #[test]
fn process_pack_drop_object_on_wrong_issuer_ca_for_aspa() { fn process_snapshot_drop_object_on_wrong_issuer_ca_for_aspa() {
let manifest_bytes = fixture_bytes( let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
@ -159,7 +160,7 @@ fn process_pack_drop_object_on_wrong_issuer_ca_for_aspa() {
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl",
); );
let pack = dummy_pack( let pack = dummy_snapshot(
manifest_bytes, manifest_bytes,
vec![ vec![
PackFile::from_bytes_compute_sha256(ee_crldp, crl_bytes), PackFile::from_bytes_compute_sha256(ee_crldp, crl_bytes),
@ -173,7 +174,7 @@ fn process_pack_drop_object_on_wrong_issuer_ca_for_aspa() {
}; };
let wrong_issuer_ca_der = fixture_bytes("tests/fixtures/ta/arin-ta.cer"); let wrong_issuer_ca_der = fixture_bytes("tests/fixtures/ta/arin-ta.cer");
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&wrong_issuer_ca_der, &wrong_issuer_ca_der,
@ -191,7 +192,7 @@ fn process_pack_drop_object_on_wrong_issuer_ca_for_aspa() {
} }
#[test] #[test]
fn process_pack_drop_publication_point_on_wrong_issuer_ca_for_aspa_skips_rest() { fn process_snapshot_drop_publication_point_on_wrong_issuer_ca_for_aspa_skips_rest() {
let manifest_bytes = fixture_bytes( let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
@ -214,7 +215,7 @@ fn process_pack_drop_publication_point_on_wrong_issuer_ca_for_aspa_skips_rest()
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.crl",
); );
let pack = dummy_pack( let pack = dummy_snapshot(
manifest_bytes, manifest_bytes,
vec![ vec![
PackFile::from_bytes_compute_sha256(ee_crldp, crl_bytes), PackFile::from_bytes_compute_sha256(ee_crldp, crl_bytes),
@ -229,7 +230,7 @@ fn process_pack_drop_publication_point_on_wrong_issuer_ca_for_aspa_skips_rest()
}; };
let wrong_issuer_ca_der = fixture_bytes("tests/fixtures/ta/arin-ta.cer"); let wrong_issuer_ca_der = fixture_bytes("tests/fixtures/ta/arin-ta.cer");
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&wrong_issuer_ca_der, &wrong_issuer_ca_der,
@ -243,7 +244,7 @@ fn process_pack_drop_publication_point_on_wrong_issuer_ca_for_aspa_skips_rest()
} }
#[test] #[test]
fn process_pack_for_issuer_marks_objects_skipped_when_missing_issuer_crl() { fn process_snapshot_for_issuer_marks_objects_skipped_when_missing_issuer_crl() {
let manifest_bytes = fixture_bytes( let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
@ -253,7 +254,7 @@ fn process_pack_for_issuer_marks_objects_skipped_when_missing_issuer_crl() {
"tests/fixtures/repository/chloe.sobornost.net/rpki/RIPE-nljobsnijders/5m80fwYws_3FiFD7JiQjAqZ1RYQ.asa", "tests/fixtures/repository/chloe.sobornost.net/rpki/RIPE-nljobsnijders/5m80fwYws_3FiFD7JiQjAqZ1RYQ.asa",
); );
let pack = dummy_pack( let pack = dummy_snapshot(
manifest_bytes, manifest_bytes,
vec![ vec![
PackFile::from_bytes_compute_sha256("rsync://example.test/repo/pp/a.roa", roa_bytes), PackFile::from_bytes_compute_sha256("rsync://example.test/repo/pp/a.roa", roa_bytes),
@ -266,7 +267,7 @@ fn process_pack_for_issuer_marks_objects_skipped_when_missing_issuer_crl() {
..Policy::default() ..Policy::default()
}; };
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&fixture_bytes("tests/fixtures/ta/apnic-ta.cer"), &fixture_bytes("tests/fixtures/ta/apnic-ta.cer"),
@ -287,12 +288,12 @@ fn process_pack_for_issuer_marks_objects_skipped_when_missing_issuer_crl() {
} }
#[test] #[test]
fn process_pack_for_issuer_drop_object_records_errors_and_continues() { fn process_snapshot_for_issuer_drop_object_records_errors_and_continues() {
let manifest_bytes = fixture_bytes( let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
let pack = dummy_pack( let pack = dummy_snapshot(
manifest_bytes, manifest_bytes,
vec![ vec![
PackFile::from_bytes_compute_sha256( PackFile::from_bytes_compute_sha256(
@ -309,7 +310,7 @@ fn process_pack_for_issuer_drop_object_records_errors_and_continues() {
..Policy::default() ..Policy::default()
}; };
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&fixture_bytes("tests/fixtures/ta/apnic-ta.cer"), &fixture_bytes("tests/fixtures/ta/apnic-ta.cer"),
@ -328,12 +329,12 @@ fn process_pack_for_issuer_drop_object_records_errors_and_continues() {
} }
#[test] #[test]
fn process_pack_for_issuer_drop_publication_point_records_skips_for_rest() { fn process_snapshot_for_issuer_drop_publication_point_records_skips_for_rest() {
let manifest_bytes = fixture_bytes( let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
let pack = dummy_pack( let pack = dummy_snapshot(
manifest_bytes, manifest_bytes,
vec![ vec![
PackFile::from_bytes_compute_sha256( PackFile::from_bytes_compute_sha256(
@ -351,7 +352,7 @@ fn process_pack_for_issuer_drop_publication_point_records_skips_for_rest() {
..Policy::default() ..Policy::default()
}; };
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&fixture_bytes("tests/fixtures/ta/apnic-ta.cer"), &fixture_bytes("tests/fixtures/ta/apnic-ta.cer"),
@ -370,7 +371,7 @@ fn process_pack_for_issuer_drop_publication_point_records_skips_for_rest() {
} }
#[test] #[test]
fn process_pack_for_issuer_selects_crl_by_ee_crldp_uri_roa() { fn process_snapshot_for_issuer_selects_crl_by_ee_crldp_uri_roa() {
let manifest_bytes = fixture_bytes( let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
@ -389,7 +390,7 @@ fn process_pack_for_issuer_selects_crl_by_ee_crldp_uri_roa() {
// Provide a CRL file with the *exact* rsync URI referenced by the embedded EE certificate. // Provide a CRL file with the *exact* rsync URI referenced by the embedded EE certificate.
// Bytes need not be valid for this test: we just want to cover deterministic selection. // Bytes need not be valid for this test: we just want to cover deterministic selection.
let pack = dummy_pack( let pack = dummy_snapshot(
manifest_bytes, manifest_bytes,
vec![ vec![
PackFile::from_bytes_compute_sha256(ee_crldp, vec![0x01]), PackFile::from_bytes_compute_sha256(ee_crldp, vec![0x01]),
@ -402,7 +403,7 @@ fn process_pack_for_issuer_selects_crl_by_ee_crldp_uri_roa() {
..Policy::default() ..Policy::default()
}; };
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&fixture_bytes("tests/fixtures/ta/apnic-ta.cer"), &fixture_bytes("tests/fixtures/ta/apnic-ta.cer"),
@ -420,7 +421,7 @@ fn process_pack_for_issuer_selects_crl_by_ee_crldp_uri_roa() {
} }
#[test] #[test]
fn process_pack_for_issuer_rejects_roa_when_crldp_crl_missing() { fn process_snapshot_for_issuer_rejects_roa_when_crldp_crl_missing() {
let manifest_bytes = fixture_bytes( let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
@ -428,7 +429,7 @@ fn process_pack_for_issuer_rejects_roa_when_crldp_crl_missing() {
fixture_bytes("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS4538.roa"); fixture_bytes("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/AS4538.roa");
// Pack has a CRL, but its URI does not match the embedded EE certificate CRLDP. // Pack has a CRL, but its URI does not match the embedded EE certificate CRLDP.
let pack = dummy_pack( let pack = dummy_snapshot(
manifest_bytes, manifest_bytes,
vec![ vec![
PackFile::from_bytes_compute_sha256( PackFile::from_bytes_compute_sha256(
@ -444,7 +445,7 @@ fn process_pack_for_issuer_rejects_roa_when_crldp_crl_missing() {
..Policy::default() ..Policy::default()
}; };
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&fixture_bytes("tests/fixtures/ta/apnic-ta.cer"), &fixture_bytes("tests/fixtures/ta/apnic-ta.cer"),
@ -469,7 +470,7 @@ fn process_pack_for_issuer_rejects_roa_when_crldp_crl_missing() {
} }
#[test] #[test]
fn process_pack_for_issuer_selects_crl_by_ee_crldp_uri_aspa() { fn process_snapshot_for_issuer_selects_crl_by_ee_crldp_uri_aspa() {
let manifest_bytes = fixture_bytes( let manifest_bytes = fixture_bytes(
"tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft", "tests/fixtures/repository/rpki.cernet.net/repo/cernet/0/05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft",
); );
@ -487,7 +488,7 @@ fn process_pack_for_issuer_selects_crl_by_ee_crldp_uri_aspa() {
.as_str() .as_str()
.to_string(); .to_string();
let pack = dummy_pack( let pack = dummy_snapshot(
manifest_bytes, manifest_bytes,
vec![ vec![
PackFile::from_bytes_compute_sha256(ee_crldp, vec![0x01]), PackFile::from_bytes_compute_sha256(ee_crldp, vec![0x01]),
@ -500,7 +501,7 @@ fn process_pack_for_issuer_selects_crl_by_ee_crldp_uri_aspa() {
..Policy::default() ..Policy::default()
}; };
let out = process_fetch_cache_pp_pack_for_issuer( let out = process_publication_point_snapshot_for_issuer(
&pack, &pack,
&policy, &policy,
&fixture_bytes("tests/fixtures/ta/apnic-ta.cer"), &fixture_bytes("tests/fixtures/ta/apnic-ta.cer"),

View File

@ -6,7 +6,7 @@ fn policy_defaults_are_correct() {
assert_eq!(p.sync_preference, SyncPreference::RrdpThenRsync); assert_eq!(p.sync_preference, SyncPreference::RrdpThenRsync);
assert_eq!( assert_eq!(
p.ca_failed_fetch_policy, p.ca_failed_fetch_policy,
CaFailedFetchPolicy::UseFetchCachePp CaFailedFetchPolicy::ReuseCurrentInstanceVcir
); );
assert_eq!( assert_eq!(
p.signed_object_failure_policy, p.signed_object_failure_policy,
@ -30,6 +30,17 @@ signed_object_failure_policy = "drop_publication_point"
); );
} }
#[test]
fn policy_toml_parsing_accepts_reuse_current_instance_vcir() {
let toml = r#"ca_failed_fetch_policy = "reuse_current_instance_vcir"
"#;
let p = Policy::from_toml_str(toml).expect("parse TOML policy");
assert_eq!(
p.ca_failed_fetch_policy,
CaFailedFetchPolicy::ReuseCurrentInstanceVcir
);
}
#[test] #[test]
fn policy_toml_parsing_uses_defaults_when_missing() { fn policy_toml_parsing_uses_defaults_when_missing() {
let p = Policy::from_toml_str("").expect("parse empty TOML policy"); let p = Policy::from_toml_str("").expect("parse empty TOML policy");

View File

@ -7,7 +7,8 @@ use rpki::fetch::rsync::LocalDirRsyncFetcher;
use rpki::policy::{Policy, SyncPreference}; use rpki::policy::{Policy, SyncPreference};
use rpki::storage::RocksStore; use rpki::storage::RocksStore;
use rpki::sync::rrdp::Fetcher; use rpki::sync::rrdp::Fetcher;
use rpki::validation::run::{fetch_cache_pp_exists, run_publication_point_once}; use rpki::validation::manifest::PublicationPointSource;
use rpki::validation::run::run_publication_point_once;
fn fixture_to_rsync_uri(path: &Path) -> String { fn fixture_to_rsync_uri(path: &Path) -> String {
let rel = path let rel = path
@ -40,7 +41,7 @@ impl Fetcher for NeverHttpFetcher {
} }
#[test] #[test]
fn e2e_offline_uses_rsync_then_writes_fetch_cache_pp_then_outputs_vrps() { fn e2e_offline_uses_rsync_then_writes_latest_vcir_then_outputs_vrps() {
let fixture_dir = Path::new("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0"); let fixture_dir = Path::new("tests/fixtures/repository/rpki.cernet.net/repo/cernet/0");
let rsync_base_uri = "rsync://rpki.cernet.net/repo/cernet/0/"; let rsync_base_uri = "rsync://rpki.cernet.net/repo/cernet/0/";
let manifest_path = fixture_dir.join("05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft"); let manifest_path = fixture_dir.join("05FC9C5B88506F7C0D3F862C8895BED67E9F8EBA.mft");
@ -80,14 +81,6 @@ fn e2e_offline_uses_rsync_then_writes_fetch_cache_pp_then_outputs_vrps() {
let temp = tempfile::tempdir().expect("tempdir"); let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb"); let store = RocksStore::open(temp.path()).expect("open rocksdb");
let expected_files = std::fs::read_dir(fixture_dir)
.expect("read fixture dir")
.filter_map(|e| e.ok())
.filter_map(|e| e.metadata().ok().map(|m| (e, m)))
.filter(|(_e, m)| m.is_file())
.count();
assert!(expected_files >= 3, "fixture dir seems incomplete");
let out = run_publication_point_once( let out = run_publication_point_once(
&store, &store,
&policy, &policy,
@ -105,9 +98,14 @@ fn e2e_offline_uses_rsync_then_writes_fetch_cache_pp_then_outputs_vrps() {
) )
.expect("run publication point once"); .expect("run publication point once");
assert!(fetch_cache_pp_exists(&store, &manifest_rsync_uri).expect("exists check")); assert!(
assert_eq!(out.repo_sync.objects_written, expected_files); store
.get_vcir(&manifest_rsync_uri)
.expect("get vcir")
.is_some()
);
assert_eq!(out.publication_point_source, PublicationPointSource::Fresh);
assert!(out.publication_point_warnings.is_empty());
assert!( assert!(
out.objects.vrps.iter().any(|v| v.asn == 4538), out.objects.vrps.iter().any(|v| v.asn == 4538),
"expected VRPs for AS4538" "expected VRPs for AS4538"

View File

@ -1,8 +1,10 @@
use rpki::analysis::timing::{TimingHandle, TimingMeta};
use rpki::validation::from_tal::discover_root_ca_instance_from_tal_and_ta_der; use rpki::validation::from_tal::discover_root_ca_instance_from_tal_and_ta_der;
use rpki::validation::run_tree_from_tal::root_handle_from_trust_anchor; use rpki::validation::run_tree_from_tal::root_handle_from_trust_anchor;
use rpki::validation::run_tree_from_tal::{ use rpki::validation::run_tree_from_tal::{
run_tree_from_tal_and_ta_der_serial, run_tree_from_tal_and_ta_der_serial_audit, run_tree_from_tal_and_ta_der_serial, run_tree_from_tal_and_ta_der_serial_audit,
run_tree_from_tal_url_serial, run_tree_from_tal_url_serial_audit, run_tree_from_tal_and_ta_der_serial_audit_with_timing, run_tree_from_tal_url_serial,
run_tree_from_tal_url_serial_audit, run_tree_from_tal_url_serial_audit_with_timing,
}; };
use rpki::validation::tree::TreeRunConfig; use rpki::validation::tree::TreeRunConfig;
@ -31,6 +33,22 @@ impl rpki::fetch::rsync::RsyncFetcher for EmptyRsyncFetcher {
} }
} }
fn test_timing_handle(temp: &tempfile::TempDir) -> TimingHandle {
TimingHandle::new(TimingMeta {
recorded_at_utc_rfc3339: "2026-03-11T00:00:00Z".to_string(),
validation_time_utc_rfc3339: "2026-03-11T00:00:00Z".to_string(),
tal_url: Some("mock:apnic.tal".to_string()),
db_path: Some(temp.path().display().to_string()),
})
}
fn read_timing_json(temp: &tempfile::TempDir, timing: &TimingHandle) -> serde_json::Value {
let path = temp.path().join("timing.json");
timing.write_json(&path, 20).expect("write timing json");
serde_json::from_slice(&std::fs::read(&path).expect("read timing json"))
.expect("parse timing json")
}
#[test] #[test]
fn root_handle_is_constructible_from_fixture_tal_and_ta() { fn root_handle_is_constructible_from_fixture_tal_and_ta() {
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal") let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
@ -40,7 +58,12 @@ fn root_handle_is_constructible_from_fixture_tal_and_ta() {
let discovery = let discovery =
discover_root_ca_instance_from_tal_and_ta_der(&tal_bytes, &ta_der, None).expect("discover"); discover_root_ca_instance_from_tal_and_ta_der(&tal_bytes, &ta_der, None).expect("discover");
let root = root_handle_from_trust_anchor(&discovery.trust_anchor, None, &discovery.ca_instance); let root = root_handle_from_trust_anchor(
&discovery.trust_anchor,
"test-tal".to_string(),
None,
&discovery.ca_instance,
);
assert_eq!(root.depth, 0); assert_eq!(root.depth, 0);
assert_eq!( assert_eq!(
@ -91,18 +114,18 @@ fn run_tree_from_tal_url_entry_executes_and_records_failure_when_repo_empty() {
&TreeRunConfig { &TreeRunConfig {
max_depth: Some(0), max_depth: Some(0),
max_instances: Some(1), max_instances: Some(1),
revalidate_only: false,
}, },
) )
.expect("run tree"); .expect("run tree");
assert_eq!(out.tree.instances_processed, 0); assert_eq!(out.tree.instances_processed, 1);
assert_eq!(out.tree.instances_failed, 1); assert_eq!(out.tree.instances_failed, 0);
assert!( assert!(
out.tree out.tree
.warnings .warnings
.iter() .iter()
.any(|w| w.message.contains("publication point failed")), .any(|w| w.message.contains("manifest failed fetch")
"expected failure warning" || w.message.contains("no latest validated result")),
"expected failed-fetch warning"
); );
} }
@ -135,18 +158,18 @@ fn run_tree_from_tal_and_ta_der_entry_executes_and_records_failure_when_repo_emp
&TreeRunConfig { &TreeRunConfig {
max_depth: Some(0), max_depth: Some(0),
max_instances: Some(1), max_instances: Some(1),
revalidate_only: false,
}, },
) )
.expect("run tree"); .expect("run tree");
assert_eq!(out.tree.instances_processed, 0); assert_eq!(out.tree.instances_processed, 1);
assert_eq!(out.tree.instances_failed, 1); assert_eq!(out.tree.instances_failed, 0);
assert!( assert!(
out.tree out.tree
.warnings .warnings
.iter() .iter()
.any(|w| w.message.contains("publication point failed")), .any(|w| w.message.contains("manifest failed fetch")
"expected failure warning" || w.message.contains("no latest validated result")),
"expected failed-fetch warning"
); );
} }
@ -187,14 +210,14 @@ fn run_tree_from_tal_url_audit_entry_collects_no_publication_points_when_repo_em
&TreeRunConfig { &TreeRunConfig {
max_depth: Some(0), max_depth: Some(0),
max_instances: Some(1), max_instances: Some(1),
revalidate_only: false,
}, },
) )
.expect("run tree audit"); .expect("run tree audit");
assert_eq!(out.tree.instances_processed, 0); assert_eq!(out.tree.instances_processed, 1);
assert_eq!(out.tree.instances_failed, 1); assert_eq!(out.tree.instances_failed, 0);
assert!(out.publication_points.is_empty()); assert_eq!(out.publication_points.len(), 1);
assert_eq!(out.publication_points[0].source, "failed_fetch_no_cache");
} }
#[test] #[test]
@ -227,12 +250,113 @@ fn run_tree_from_tal_and_ta_der_audit_entry_collects_no_publication_points_when_
&TreeRunConfig { &TreeRunConfig {
max_depth: Some(0), max_depth: Some(0),
max_instances: Some(1), max_instances: Some(1),
revalidate_only: false,
}, },
) )
.expect("run tree audit"); .expect("run tree audit");
assert_eq!(out.tree.instances_processed, 0); assert_eq!(out.tree.instances_processed, 1);
assert_eq!(out.tree.instances_failed, 1); assert_eq!(out.tree.instances_failed, 0);
assert!(out.publication_points.is_empty()); assert_eq!(out.publication_points.len(), 1);
assert_eq!(out.publication_points[0].source, "failed_fetch_no_cache");
}
#[test]
fn run_tree_from_tal_url_audit_with_timing_records_phases_when_repo_empty() {
let tal_url = "mock:apnic.tal";
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let mut map = HashMap::new();
map.insert(tal_url.to_string(), tal_bytes);
map.insert(
"https://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer".to_string(),
ta_der.clone(),
);
map.insert(
"rsync://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer".to_string(),
ta_der.clone(),
);
let http = MapHttpFetcher { map };
let rsync = EmptyRsyncFetcher;
let temp = tempfile::tempdir().expect("tempdir");
let store = rpki::storage::RocksStore::open(temp.path()).expect("open rocksdb");
let policy = rpki::policy::Policy {
sync_preference: rpki::policy::SyncPreference::RsyncOnly,
..rpki::policy::Policy::default()
};
let timing = test_timing_handle(&temp);
let out = run_tree_from_tal_url_serial_audit_with_timing(
&store,
&policy,
tal_url,
&http,
&rsync,
time::OffsetDateTime::now_utc(),
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
&timing,
)
.expect("run tree audit with timing");
assert_eq!(out.tree.instances_processed, 1);
assert_eq!(out.tree.instances_failed, 0);
let report = read_timing_json(&temp, &timing);
assert_eq!(report["phases"]["tal_bootstrap"]["count"].as_u64(), Some(1));
assert_eq!(
report["phases"]["tree_run_total"]["count"].as_u64(),
Some(1)
);
}
#[test]
fn run_tree_from_tal_and_ta_der_audit_with_timing_records_phases_when_repo_empty() {
let tal_bytes = std::fs::read("tests/fixtures/tal/apnic-rfc7730-https.tal")
.expect("read apnic tal fixture");
let ta_der = std::fs::read("tests/fixtures/ta/apnic-ta.cer").expect("read apnic ta fixture");
let http = MapHttpFetcher {
map: HashMap::new(),
};
let rsync = EmptyRsyncFetcher;
let temp = tempfile::tempdir().expect("tempdir");
let store = rpki::storage::RocksStore::open(temp.path()).expect("open rocksdb");
let policy = rpki::policy::Policy {
sync_preference: rpki::policy::SyncPreference::RsyncOnly,
..rpki::policy::Policy::default()
};
let timing = test_timing_handle(&temp);
let out = run_tree_from_tal_and_ta_der_serial_audit_with_timing(
&store,
&policy,
&tal_bytes,
&ta_der,
None,
&http,
&rsync,
time::OffsetDateTime::now_utc(),
&TreeRunConfig {
max_depth: Some(0),
max_instances: Some(1),
},
&timing,
)
.expect("run tree audit with timing");
assert_eq!(out.tree.instances_processed, 1);
assert_eq!(out.tree.instances_failed, 0);
let report = read_timing_json(&temp, &timing);
assert_eq!(report["phases"]["tal_bootstrap"]["count"].as_u64(), Some(1));
assert_eq!(
report["phases"]["tree_run_total"]["count"].as_u64(),
Some(1)
);
} }

View File

@ -1,7 +1,7 @@
use rpki::storage::{FetchCachePpKey, RocksStore}; use rpki::storage::RocksStore;
#[test] #[test]
fn storage_iter_all_lists_raw_and_fetch_cache_pp_entries() { fn storage_iter_all_lists_raw_entries() {
let temp = tempfile::tempdir().expect("tempdir"); let temp = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(temp.path()).expect("open rocksdb"); let store = RocksStore::open(temp.path()).expect("open rocksdb");
@ -12,11 +12,6 @@ fn storage_iter_all_lists_raw_and_fetch_cache_pp_entries() {
.put_raw("rsync://example.test/repo/b.roa", b"b") .put_raw("rsync://example.test/repo/b.roa", b"b")
.expect("put_raw b"); .expect("put_raw b");
let key = FetchCachePpKey::from_manifest_rsync_uri("rsync://example.test/repo/m.mft");
store
.put_fetch_cache_pp(&key, b"x")
.expect("put_fetch_cache_pp");
let raw_keys = store let raw_keys = store
.raw_iter_all() .raw_iter_all()
.expect("raw_iter_all") .expect("raw_iter_all")
@ -25,11 +20,4 @@ fn storage_iter_all_lists_raw_and_fetch_cache_pp_entries() {
assert_eq!(raw_keys.len(), 2); assert_eq!(raw_keys.len(), 2);
assert!(raw_keys.contains(&"rsync://example.test/repo/a.cer".to_string())); assert!(raw_keys.contains(&"rsync://example.test/repo/a.cer".to_string()));
assert!(raw_keys.contains(&"rsync://example.test/repo/b.roa".to_string())); assert!(raw_keys.contains(&"rsync://example.test/repo/b.roa".to_string()));
let keys = store
.fetch_cache_pp_iter_all()
.expect("fetch_cache_pp_iter_all")
.map(|(k, _v)| String::from_utf8(k.to_vec()).expect("utf8 key"))
.collect::<Vec<_>>();
assert_eq!(keys, vec![key.as_str().to_string()]);
} }

View File

@ -1,6 +1,6 @@
use rocksdb::WriteBatch; use rocksdb::WriteBatch;
use rpki::storage::{FetchCachePpKey, RocksStore}; use rpki::storage::RocksStore;
#[test] #[test]
fn storage_delete_rrdp_state_works() { fn storage_delete_rrdp_state_works() {
@ -57,15 +57,6 @@ fn storage_raw_iter_prefix_filters_by_prefix() {
} }
} }
#[test]
fn storage_fetch_cache_pp_key_format_is_stable() {
let k = FetchCachePpKey::from_manifest_rsync_uri("rsync://example.net/repo/manifest.mft");
assert_eq!(
k.as_str(),
"fetch_cache_pp:rsync://example.net/repo/manifest.mft"
);
}
#[test] #[test]
fn storage_write_batch_accepts_empty_batch() { fn storage_write_batch_accepts_empty_batch() {
let temp = tempfile::tempdir().expect("tempdir"); let temp = tempfile::tempdir().expect("tempdir");

View File

@ -1,6 +1,6 @@
use std::path::Path; use std::path::Path;
use rpki::storage::{FetchCachePpKey, RocksStore}; use rpki::storage::RocksStore;
#[test] #[test]
fn storage_opens_and_creates_column_families() { fn storage_opens_and_creates_column_families() {
@ -24,26 +24,6 @@ fn raw_objects_roundtrip_by_rsync_uri() {
assert!(got.is_none()); assert!(got.is_none());
} }
#[test]
fn fetch_cache_pp_roundtrip_by_manifest_uri() {
let dir = tempfile::tempdir().expect("tempdir");
let store = RocksStore::open(dir.path()).expect("open rocksdb");
let manifest_uri = "rsync://example.invalid/repo/manifest.mft";
let key = FetchCachePpKey::from_manifest_rsync_uri(manifest_uri);
assert_eq!(
key.as_str(),
"fetch_cache_pp:rsync://example.invalid/repo/manifest.mft"
);
let bytes = b"pack";
store
.put_fetch_cache_pp(&key, bytes)
.expect("put fetch_cache_pp");
let got = store.get_fetch_cache_pp(&key).expect("get fetch_cache_pp");
assert_eq!(got.as_deref(), Some(bytes.as_slice()));
}
#[test] #[test]
fn rrdp_state_roundtrip_by_notification_uri() { fn rrdp_state_roundtrip_by_notification_uri() {
let dir = tempfile::tempdir().expect("tempdir"); let dir = tempfile::tempdir().expect("tempdir");

View File

@ -2,17 +2,18 @@ use std::collections::HashMap;
use rpki::audit::{DiscoveredFrom, PublicationPointAudit}; use rpki::audit::{DiscoveredFrom, PublicationPointAudit};
use rpki::report::Warning; use rpki::report::Warning;
use rpki::storage::{FetchCachePpPack, PackTime}; use rpki::storage::PackTime;
use rpki::validation::manifest::PublicationPointSource; use rpki::validation::manifest::PublicationPointSource;
use rpki::validation::objects::{ObjectsOutput, ObjectsStats}; use rpki::validation::objects::{ObjectsOutput, ObjectsStats};
use rpki::validation::publication_point::PublicationPointSnapshot;
use rpki::validation::tree::{ use rpki::validation::tree::{
CaInstanceHandle, DiscoveredChildCaInstance, PublicationPointRunResult, PublicationPointRunner, CaInstanceHandle, DiscoveredChildCaInstance, PublicationPointRunResult, PublicationPointRunner,
TreeRunConfig, run_tree_serial, TreeRunConfig, run_tree_serial,
}; };
fn empty_pack(manifest_uri: &str, pp_uri: &str) -> FetchCachePpPack { fn empty_snapshot(manifest_uri: &str, pp_uri: &str) -> PublicationPointSnapshot {
FetchCachePpPack { PublicationPointSnapshot {
format_version: FetchCachePpPack::FORMAT_VERSION_V1, format_version: PublicationPointSnapshot::FORMAT_VERSION_V1,
publication_point_rsync_uri: pp_uri.to_string(), publication_point_rsync_uri: pp_uri.to_string(),
manifest_rsync_uri: manifest_uri.to_string(), manifest_rsync_uri: manifest_uri.to_string(),
manifest_number_be: vec![1], manifest_number_be: vec![1],
@ -33,6 +34,8 @@ fn empty_pack(manifest_uri: &str, pp_uri: &str) -> FetchCachePpPack {
fn ca_handle(manifest_uri: &str) -> CaInstanceHandle { fn ca_handle(manifest_uri: &str) -> CaInstanceHandle {
CaInstanceHandle { CaInstanceHandle {
depth: 0, depth: 0,
tal_id: "test-tal".to_string(),
parent_manifest_rsync_uri: None,
ca_certificate_der: Vec::new(), ca_certificate_der: Vec::new(),
ca_certificate_rsync_uri: None, ca_certificate_rsync_uri: None,
effective_ip_resources: None, effective_ip_resources: None,
@ -104,7 +107,7 @@ fn tree_continues_when_a_publication_point_fails() {
root_manifest, root_manifest,
PublicationPointRunResult { PublicationPointRunResult {
source: PublicationPointSource::Fresh, source: PublicationPointSource::Fresh,
pack: empty_pack(root_manifest, "rsync://example.test/repo/"), snapshot: Some(empty_snapshot(root_manifest, "rsync://example.test/repo/")),
warnings: Vec::new(), warnings: Vec::new(),
objects: ObjectsOutput { objects: ObjectsOutput {
vrps: Vec::new(), vrps: Vec::new(),
@ -125,7 +128,10 @@ fn tree_continues_when_a_publication_point_fails() {
ok_child_manifest, ok_child_manifest,
PublicationPointRunResult { PublicationPointRunResult {
source: PublicationPointSource::Fresh, source: PublicationPointSource::Fresh,
pack: empty_pack(ok_child_manifest, "rsync://example.test/repo/ok-child/"), snapshot: Some(empty_snapshot(
ok_child_manifest,
"rsync://example.test/repo/ok-child/",
)),
warnings: vec![Warning::new("ok child warning")], warnings: vec![Warning::new("ok child warning")],
objects: ObjectsOutput { objects: ObjectsOutput {
vrps: Vec::new(), vrps: Vec::new(),

View File

@ -2,9 +2,10 @@ use std::collections::HashMap;
use rpki::audit::{DiscoveredFrom, PublicationPointAudit}; use rpki::audit::{DiscoveredFrom, PublicationPointAudit};
use rpki::report::Warning; use rpki::report::Warning;
use rpki::storage::{FetchCachePpPack, PackFile, PackTime}; use rpki::storage::{PackFile, PackTime};
use rpki::validation::manifest::PublicationPointSource; use rpki::validation::manifest::PublicationPointSource;
use rpki::validation::objects::{ObjectsOutput, ObjectsStats}; use rpki::validation::objects::{ObjectsOutput, ObjectsStats};
use rpki::validation::publication_point::PublicationPointSnapshot;
use rpki::validation::tree::{ use rpki::validation::tree::{
CaInstanceHandle, DiscoveredChildCaInstance, PublicationPointRunResult, PublicationPointRunner, CaInstanceHandle, DiscoveredChildCaInstance, PublicationPointRunResult, PublicationPointRunner,
TreeRunConfig, run_tree_serial, run_tree_serial_audit, TreeRunConfig, run_tree_serial, run_tree_serial_audit,
@ -43,8 +44,8 @@ impl PublicationPointRunner for MockRunner {
} }
} }
fn empty_pack(manifest_uri: &str, pp_uri: &str) -> FetchCachePpPack { fn empty_snapshot(manifest_uri: &str, pp_uri: &str) -> PublicationPointSnapshot {
FetchCachePpPack { PublicationPointSnapshot {
format_version: 1, format_version: 1,
publication_point_rsync_uri: pp_uri.to_string(), publication_point_rsync_uri: pp_uri.to_string(),
manifest_rsync_uri: manifest_uri.to_string(), manifest_rsync_uri: manifest_uri.to_string(),
@ -66,6 +67,8 @@ fn empty_pack(manifest_uri: &str, pp_uri: &str) -> FetchCachePpPack {
fn ca_handle(manifest_uri: &str) -> CaInstanceHandle { fn ca_handle(manifest_uri: &str) -> CaInstanceHandle {
CaInstanceHandle { CaInstanceHandle {
depth: 0, depth: 0,
tal_id: "test-tal".to_string(),
parent_manifest_rsync_uri: None,
ca_certificate_der: Vec::new(), ca_certificate_der: Vec::new(),
ca_certificate_rsync_uri: None, ca_certificate_rsync_uri: None,
effective_ip_resources: None, effective_ip_resources: None,
@ -97,7 +100,7 @@ fn discovered_child(
} }
#[test] #[test]
fn tree_enqueues_children_only_for_fresh_publication_points() { fn tree_enqueues_children_for_fresh_and_current_instance_vcir_results() {
let root_manifest = "rsync://example.test/repo/root.mft"; let root_manifest = "rsync://example.test/repo/root.mft";
let child1_manifest = "rsync://example.test/repo/child1.mft"; let child1_manifest = "rsync://example.test/repo/child1.mft";
let child2_manifest = "rsync://example.test/repo/child2.mft"; let child2_manifest = "rsync://example.test/repo/child2.mft";
@ -114,7 +117,7 @@ fn tree_enqueues_children_only_for_fresh_publication_points() {
root_manifest, root_manifest,
PublicationPointRunResult { PublicationPointRunResult {
source: PublicationPointSource::Fresh, source: PublicationPointSource::Fresh,
pack: empty_pack(root_manifest, "rsync://example.test/repo/"), snapshot: Some(empty_snapshot(root_manifest, "rsync://example.test/repo/")),
warnings: Vec::new(), warnings: Vec::new(),
objects: ObjectsOutput { objects: ObjectsOutput {
vrps: Vec::new(), vrps: Vec::new(),
@ -130,8 +133,11 @@ fn tree_enqueues_children_only_for_fresh_publication_points() {
.with( .with(
child1_manifest, child1_manifest,
PublicationPointRunResult { PublicationPointRunResult {
source: PublicationPointSource::FetchCachePp, source: PublicationPointSource::VcirCurrentInstance,
pack: empty_pack(child1_manifest, "rsync://example.test/repo/child1/"), snapshot: Some(empty_snapshot(
child1_manifest,
"rsync://example.test/repo/child1/",
)),
warnings: vec![Warning::new("child1 warning")], warnings: vec![Warning::new("child1 warning")],
objects: ObjectsOutput { objects: ObjectsOutput {
vrps: Vec::new(), vrps: Vec::new(),
@ -148,7 +154,30 @@ fn tree_enqueues_children_only_for_fresh_publication_points() {
child2_manifest, child2_manifest,
PublicationPointRunResult { PublicationPointRunResult {
source: PublicationPointSource::Fresh, source: PublicationPointSource::Fresh,
pack: empty_pack(child2_manifest, "rsync://example.test/repo/child2/"), snapshot: Some(empty_snapshot(
child2_manifest,
"rsync://example.test/repo/child2/",
)),
warnings: Vec::new(),
objects: ObjectsOutput {
vrps: Vec::new(),
aspas: Vec::new(),
warnings: Vec::new(),
stats: ObjectsStats::default(),
audit: Vec::new(),
},
audit: PublicationPointAudit::default(),
discovered_children: Vec::new(),
},
)
.with(
grandchild_manifest,
PublicationPointRunResult {
source: PublicationPointSource::Fresh,
snapshot: Some(empty_snapshot(
grandchild_manifest,
"rsync://example.test/repo/grandchild/",
)),
warnings: Vec::new(), warnings: Vec::new(),
objects: ObjectsOutput { objects: ObjectsOutput {
vrps: Vec::new(), vrps: Vec::new(),
@ -165,14 +194,20 @@ fn tree_enqueues_children_only_for_fresh_publication_points() {
let out = run_tree_serial(ca_handle(root_manifest), &runner, &TreeRunConfig::default()) let out = run_tree_serial(ca_handle(root_manifest), &runner, &TreeRunConfig::default())
.expect("run tree"); .expect("run tree");
// root + child1 + child2. grandchild must NOT be processed because child1 used cache. // root + child1 + child2 + grandchild. child1 uses current-instance VCIR, but its child
assert_eq!(out.instances_processed, 3); // entry is still restored and processed fresh-first later in the traversal.
assert_eq!(out.instances_processed, 4);
assert_eq!(out.instances_failed, 0); assert_eq!(out.instances_failed, 0);
let called = runner.called(); let called = runner.called();
assert_eq!( assert_eq!(
called, called,
vec![root_manifest, child1_manifest, child2_manifest] vec![
root_manifest,
child1_manifest,
child2_manifest,
grandchild_manifest
]
); );
assert!( assert!(
@ -184,8 +219,8 @@ fn tree_enqueues_children_only_for_fresh_publication_points() {
assert!( assert!(
out.warnings out.warnings
.iter() .iter()
.any(|w| w.message.contains("skipping child CA discovery")), .all(|w| !w.message.contains("skipping child CA discovery")),
"expected RFC 9286 §6.6 enforcement warning" "did not expect old skip-child warning"
); );
} }
@ -199,7 +234,7 @@ fn tree_respects_max_depth_and_max_instances() {
root_manifest, root_manifest,
PublicationPointRunResult { PublicationPointRunResult {
source: PublicationPointSource::Fresh, source: PublicationPointSource::Fresh,
pack: empty_pack(root_manifest, "rsync://example.test/repo/"), snapshot: Some(empty_snapshot(root_manifest, "rsync://example.test/repo/")),
warnings: Vec::new(), warnings: Vec::new(),
objects: ObjectsOutput { objects: ObjectsOutput {
vrps: Vec::new(), vrps: Vec::new(),
@ -216,7 +251,10 @@ fn tree_respects_max_depth_and_max_instances() {
child_manifest, child_manifest,
PublicationPointRunResult { PublicationPointRunResult {
source: PublicationPointSource::Fresh, source: PublicationPointSource::Fresh,
pack: empty_pack(child_manifest, "rsync://example.test/repo/child/"), snapshot: Some(empty_snapshot(
child_manifest,
"rsync://example.test/repo/child/",
)),
warnings: Vec::new(), warnings: Vec::new(),
objects: ObjectsOutput { objects: ObjectsOutput {
vrps: Vec::new(), vrps: Vec::new(),
@ -236,7 +274,6 @@ fn tree_respects_max_depth_and_max_instances() {
&TreeRunConfig { &TreeRunConfig {
max_depth: Some(0), max_depth: Some(0),
max_instances: None, max_instances: None,
revalidate_only: false,
}, },
) )
.expect("run tree depth-limited"); .expect("run tree depth-limited");
@ -249,7 +286,6 @@ fn tree_respects_max_depth_and_max_instances() {
&TreeRunConfig { &TreeRunConfig {
max_depth: None, max_depth: None,
max_instances: Some(1), max_instances: Some(1),
revalidate_only: false,
}, },
) )
.expect("run tree instance-limited"); .expect("run tree instance-limited");
@ -267,7 +303,7 @@ fn tree_audit_includes_parent_and_discovered_from_for_non_root_nodes() {
root_manifest, root_manifest,
PublicationPointRunResult { PublicationPointRunResult {
source: PublicationPointSource::Fresh, source: PublicationPointSource::Fresh,
pack: empty_pack(root_manifest, "rsync://example.test/repo/"), snapshot: Some(empty_snapshot(root_manifest, "rsync://example.test/repo/")),
warnings: Vec::new(), warnings: Vec::new(),
objects: ObjectsOutput { objects: ObjectsOutput {
vrps: Vec::new(), vrps: Vec::new(),
@ -284,7 +320,10 @@ fn tree_audit_includes_parent_and_discovered_from_for_non_root_nodes() {
child_manifest, child_manifest,
PublicationPointRunResult { PublicationPointRunResult {
source: PublicationPointSource::Fresh, source: PublicationPointSource::Fresh,
pack: empty_pack(child_manifest, "rsync://example.test/repo/child/"), snapshot: Some(empty_snapshot(
child_manifest,
"rsync://example.test/repo/child/",
)),
warnings: Vec::new(), warnings: Vec::new(),
objects: ObjectsOutput { objects: ObjectsOutput {
vrps: Vec::new(), vrps: Vec::new(),
@ -320,16 +359,23 @@ fn tree_audit_includes_parent_and_discovered_from_for_non_root_nodes() {
} }
#[test] #[test]
fn tree_revalidate_only_enqueues_children_from_fetch_cache_pp() { fn tree_prefers_lexicographically_first_discovery_when_duplicate_manifest_is_queued() {
let root_manifest = "rsync://example.test/repo/root.mft"; let root_manifest = "rsync://example.test/repo/root.mft";
let child_manifest = "rsync://example.test/repo/child.mft"; let duplicate_manifest = "rsync://example.test/repo/child.mft";
let mut first = discovered_child(root_manifest, duplicate_manifest);
first.discovered_from.child_ca_certificate_rsync_uri =
"rsync://example.test/repo/z-child.cer".to_string();
let mut second = discovered_child(root_manifest, duplicate_manifest);
second.discovered_from.child_ca_certificate_rsync_uri =
"rsync://example.test/repo/a-child.cer".to_string();
let runner = MockRunner::default() let runner = MockRunner::default()
.with( .with(
root_manifest, root_manifest,
PublicationPointRunResult { PublicationPointRunResult {
source: PublicationPointSource::FetchCachePp, source: PublicationPointSource::Fresh,
pack: empty_pack(root_manifest, "rsync://example.test/repo/"), snapshot: Some(empty_snapshot(root_manifest, "rsync://example.test/repo/")),
warnings: Vec::new(), warnings: Vec::new(),
objects: ObjectsOutput { objects: ObjectsOutput {
vrps: Vec::new(), vrps: Vec::new(),
@ -339,14 +385,17 @@ fn tree_revalidate_only_enqueues_children_from_fetch_cache_pp() {
audit: Vec::new(), audit: Vec::new(),
}, },
audit: PublicationPointAudit::default(), audit: PublicationPointAudit::default(),
discovered_children: vec![discovered_child(root_manifest, child_manifest)], discovered_children: vec![first, second],
}, },
) )
.with( .with(
child_manifest, duplicate_manifest,
PublicationPointRunResult { PublicationPointRunResult {
source: PublicationPointSource::FetchCachePp, source: PublicationPointSource::Fresh,
pack: empty_pack(child_manifest, "rsync://example.test/repo/child/"), snapshot: Some(empty_snapshot(
duplicate_manifest,
"rsync://example.test/repo/child/",
)),
warnings: Vec::new(), warnings: Vec::new(),
objects: ObjectsOutput { objects: ObjectsOutput {
vrps: Vec::new(), vrps: Vec::new(),
@ -360,27 +409,21 @@ fn tree_revalidate_only_enqueues_children_from_fetch_cache_pp() {
}, },
); );
let out = run_tree_serial( let out = run_tree_serial_audit(ca_handle(root_manifest), &runner, &TreeRunConfig::default())
ca_handle(root_manifest), .expect("run tree audit");
&runner,
&TreeRunConfig {
max_depth: None,
max_instances: None,
revalidate_only: false,
},
)
.expect("run tree");
assert_eq!(out.instances_processed, 1);
let out = run_tree_serial( assert_eq!(out.tree.instances_processed, 2);
ca_handle(root_manifest), assert_eq!(
&runner, runner.called(),
&TreeRunConfig { vec![root_manifest.to_string(), duplicate_manifest.to_string()]
max_depth: None, );
max_instances: None, let child_audit = &out.publication_points[1];
revalidate_only: true, assert_eq!(
}, child_audit
) .discovered_from
.expect("run tree"); .as_ref()
assert_eq!(out.instances_processed, 2); .expect("child discovered_from")
.child_ca_certificate_rsync_uri,
"rsync://example.test/repo/a-child.cer"
);
} }

View File

@ -1,69 +0,0 @@
use rpki::storage::{FetchCachePpPack, PackFile, PackTime};
fn sample_pack() -> FetchCachePpPack {
let this_update =
PackTime::from_utc_offset_datetime(time::OffsetDateTime::from_unix_timestamp(0).unwrap());
let next_update = PackTime::from_utc_offset_datetime(
time::OffsetDateTime::from_unix_timestamp(3600).unwrap(),
);
let verified_at =
PackTime::from_utc_offset_datetime(time::OffsetDateTime::from_unix_timestamp(10).unwrap());
let file1 = PackFile::from_bytes_compute_sha256(
"rsync://example.net/repo/CA/1.crl",
b"crl-bytes".to_vec(),
);
let file2 = PackFile::from_bytes_compute_sha256(
"rsync://example.net/repo/CA/2.cer",
b"cer-bytes".to_vec(),
);
FetchCachePpPack {
format_version: FetchCachePpPack::FORMAT_VERSION_V1,
manifest_rsync_uri: "rsync://example.net/repo/CA/manifest.mft".to_string(),
publication_point_rsync_uri: "rsync://example.net/repo/CA/".to_string(),
manifest_number_be: vec![1],
this_update,
next_update,
verified_at,
manifest_bytes: b"manifest-bytes".to_vec(),
files: vec![file1, file2],
}
}
#[test]
fn pack_encode_decode_roundtrip() {
let pack = sample_pack();
let bytes = pack.encode().expect("encode");
let decoded = FetchCachePpPack::decode(&bytes).expect("decode");
assert_eq!(decoded, pack);
}
#[test]
fn pack_rejects_missing_manifest() {
let mut pack = sample_pack();
pack.manifest_bytes.clear();
let bytes = pack.encode().expect("encode");
assert!(FetchCachePpPack::decode(&bytes).is_err());
}
#[test]
fn pack_rejects_duplicate_rsync_uri_entries() {
let mut pack = sample_pack();
let dup =
PackFile::from_bytes_compute_sha256("rsync://example.net/repo/CA/1.crl", b"other".to_vec());
pack.files.push(dup);
let bytes = pack.encode().expect("encode");
assert!(FetchCachePpPack::decode(&bytes).is_err());
}
#[test]
fn pack_includes_this_update_next_update() {
let pack = sample_pack();
let bytes = pack.encode().expect("encode");
let decoded = FetchCachePpPack::decode(&bytes).expect("decode");
let this_update = decoded.this_update.parse().expect("parse this_update");
let next_update = decoded.next_update.parse().expect("parse next_update");
assert!(next_update > this_update);
}

View File

@ -1,129 +0,0 @@
use rpki::storage::{FetchCachePpPack, PackFile, PackTime};
fn base_pack() -> FetchCachePpPack {
let this_update =
PackTime::from_utc_offset_datetime(time::OffsetDateTime::from_unix_timestamp(0).unwrap());
let next_update = PackTime::from_utc_offset_datetime(
time::OffsetDateTime::from_unix_timestamp(3600).unwrap(),
);
let verified_at =
PackTime::from_utc_offset_datetime(time::OffsetDateTime::from_unix_timestamp(10).unwrap());
let file =
PackFile::from_bytes_compute_sha256("rsync://example.net/repo/obj.cer", b"x".to_vec());
FetchCachePpPack {
format_version: FetchCachePpPack::FORMAT_VERSION_V1,
manifest_rsync_uri: "rsync://example.net/repo/manifest.mft".to_string(),
publication_point_rsync_uri: "rsync://example.net/repo/".to_string(),
manifest_number_be: vec![1],
this_update,
next_update,
verified_at,
manifest_bytes: b"manifest".to_vec(),
files: vec![file],
}
}
#[test]
fn pack_rejects_unsupported_format_version() {
let mut pack = base_pack();
pack.format_version = 999;
let bytes = pack.encode().expect("encode");
assert!(
FetchCachePpPack::decode(&bytes)
.unwrap_err()
.to_string()
.contains("unsupported pack format_version")
);
}
#[test]
fn pack_rejects_missing_manifest_rsync_uri() {
let mut pack = base_pack();
pack.manifest_rsync_uri.clear();
let bytes = pack.encode().expect("encode");
assert!(FetchCachePpPack::decode(&bytes).is_err());
}
#[test]
fn pack_rejects_missing_publication_point_rsync_uri() {
let mut pack = base_pack();
pack.publication_point_rsync_uri.clear();
let bytes = pack.encode().expect("encode");
assert!(FetchCachePpPack::decode(&bytes).is_err());
}
#[test]
fn pack_rejects_missing_manifest_number() {
let mut pack = base_pack();
pack.manifest_number_be.clear();
let bytes = pack.encode().expect("encode");
let err = FetchCachePpPack::decode(&bytes).unwrap_err();
assert!(err.to_string().contains("missing required field"));
}
#[test]
fn pack_rejects_manifest_number_too_long() {
let mut pack = base_pack();
pack.manifest_number_be = vec![1u8; 21];
let bytes = pack.encode().expect("encode");
let err = FetchCachePpPack::decode(&bytes).unwrap_err();
assert!(err.to_string().contains("at most 20 octets"));
}
#[test]
fn pack_rejects_manifest_number_with_leading_zeros() {
let mut pack = base_pack();
pack.manifest_number_be = vec![0u8, 1u8];
let bytes = pack.encode().expect("encode");
let err = FetchCachePpPack::decode(&bytes).unwrap_err();
assert!(err.to_string().contains("leading zeros"));
}
#[test]
fn pack_rejects_invalid_time_fields() {
let mut pack = base_pack();
pack.this_update = PackTime {
rfc3339_utc: "not-a-time".to_string(),
};
let bytes = pack.encode().expect("encode");
assert!(FetchCachePpPack::decode(&bytes).is_err());
}
#[test]
fn pack_rejects_empty_file_bytes() {
let mut pack = base_pack();
let mut sha = [0u8; 32];
sha[0] = 1;
pack.files = vec![PackFile::new(
"rsync://example.net/repo/empty.cer",
Vec::new(),
sha,
)];
let bytes = pack.encode().expect("encode");
assert!(FetchCachePpPack::decode(&bytes).is_err());
}
#[test]
fn pack_rejects_file_hash_mismatch() {
let mut pack = base_pack();
pack.files = vec![PackFile::new(
"rsync://example.net/repo/bad.cer",
b"abc".to_vec(),
[0u8; 32],
)];
let bytes = pack.encode().expect("encode");
let err = FetchCachePpPack::decode(&bytes).unwrap_err();
assert!(err.to_string().contains("file hash mismatch"));
}
#[test]
fn pack_rejects_missing_file_rsync_uri() {
let mut pack = base_pack();
let file = PackFile::from_bytes_compute_sha256("", b"x".to_vec());
pack.files = vec![file];
let bytes = pack.encode().expect("encode");
let err = FetchCachePpPack::decode(&bytes).unwrap_err();
assert!(err.to_string().contains("missing required field"));
}