rpki/src/bin/replay_bundle_refresh_sequence_outputs.rs

1055 lines
40 KiB
Rust

use std::collections::{BTreeMap, BTreeSet};
use std::fs;
use std::path::{Path, PathBuf};
use rpki::bundle::{
build_vap_compare_rows, build_vrp_compare_rows, sha256_hex, write_json, write_vap_csv,
write_vrp_csv,
};
use rpki::ccr::{build_ccr_from_run, decode_content_info, verify_content_info, write_ccr_file};
use rpki::policy::Policy;
use rpki::replay::archive::canonical_rsync_module;
use rpki::storage::RocksStore;
use rpki::validation::run_tree_from_tal::{
run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit,
run_tree_from_tal_and_ta_der_payload_replay_serial_audit,
};
use rpki::validation::tree::TreeRunConfig;
use serde::{Deserialize, Serialize};
use time::format_description::well_known::Rfc3339;
fn usage() -> &'static str {
"Usage: replay_bundle_refresh_sequence_outputs --rir-dir <path> [--keep-db]"
}
#[derive(Default)]
struct Args {
rir_dir: Option<PathBuf>,
keep_db: bool,
}
fn parse_args() -> Result<Args, String> {
let mut args = Args::default();
let argv: Vec<String> = std::env::args().skip(1).collect();
let mut i = 0;
while i < argv.len() {
match argv[i].as_str() {
"--rir-dir" => {
i += 1;
args.rir_dir = Some(PathBuf::from(
argv.get(i).ok_or("--rir-dir requires a value")?,
));
}
"--keep-db" => {
args.keep_db = true;
}
"--help" | "-h" => {
return Err(usage().to_string());
}
other => return Err(format!("unknown argument: {other}\n{}", usage())),
}
i += 1;
}
if args.rir_dir.is_none() {
return Err(format!("--rir-dir is required\n{}", usage()));
}
Ok(args)
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
struct RirBundleMetadataV2Serde {
schema_version: String,
bundle_producer: String,
rir: String,
tal_sha256: String,
ta_cert_sha256: String,
has_any_aspa: bool,
has_any_router_key: bool,
base: BaseBundleStateMetadataV2Serde,
delta_sequence: DeltaSequenceMetadataV2Serde,
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
struct BaseBundleStateMetadataV2Serde {
validation_time: String,
ccr_sha256: String,
vrp_count: usize,
vap_count: usize,
relative_archive_path: String,
relative_locks_path: String,
relative_ccr_path: String,
relative_vrps_path: String,
relative_vaps_path: String,
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
struct DeltaSequenceMetadataV2Serde {
configured_delta_count: usize,
configured_interval_seconds: u64,
steps: Vec<DeltaStepMetadataV2Serde>,
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
struct DeltaStepMetadataV2Serde {
index: usize,
id: String,
relative_path: String,
base_ref: String,
validation_time: String,
delta_ccr_sha256: String,
vrp_count: usize,
vap_count: usize,
relative_archive_path: String,
relative_transition_locks_path: String,
relative_target_locks_path: String,
relative_ccr_path: String,
relative_vrps_path: String,
relative_vaps_path: String,
has_aspa: bool,
has_router_key: bool,
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
struct VerificationV2 {
base: serde_json::Value,
steps: Vec<serde_json::Value>,
summary: serde_json::Value,
}
fn parse_time(value: &str) -> Result<time::OffsetDateTime, String> {
time::OffsetDateTime::parse(value, &Rfc3339)
.map_err(|e| format!("invalid RFC3339 time `{value}`: {e}"))
}
fn path_join(root: &Path, relative: &str) -> PathBuf {
root.join(relative)
}
fn is_failed_fetch_source(source: &str) -> bool {
source == "failed_fetch_no_cache"
}
fn current_module_objects_from_store(
store: &RocksStore,
module_uri: &str,
) -> Result<BTreeMap<String, Vec<u8>>, String> {
let entries = store
.list_repository_view_entries_with_prefix(module_uri)
.map_err(|e| format!("list repository view failed for {module_uri}: {e}"))?;
let mut out = BTreeMap::new();
for entry in entries {
if entry.state != rpki::storage::RepositoryViewState::Present {
continue;
}
let bytes = store
.load_current_object_bytes_by_uri(&entry.rsync_uri)
.map_err(|e| format!("load current object failed for {}: {e}", entry.rsync_uri))?
.ok_or_else(|| format!("current object missing for {}", entry.rsync_uri))?;
out.insert(entry.rsync_uri, bytes);
}
Ok(out)
}
fn rsync_bucket_dir(capture_root: &Path, module_uri: &str) -> PathBuf {
capture_root
.join("rsync")
.join("modules")
.join(sha256_hex(module_uri.as_bytes()))
}
fn materialize_rsync_module_from_store(
capture_root: &Path,
module_uri: &str,
objects: &BTreeMap<String, Vec<u8>>,
) -> Result<Vec<String>, String> {
let bucket_dir = rsync_bucket_dir(capture_root, module_uri);
let tree_root = bucket_dir.join("tree");
if tree_root.exists() {
fs::remove_dir_all(&tree_root)
.map_err(|e| format!("remove old rsync tree failed: {}: {e}", tree_root.display()))?;
}
let relative_root = module_uri
.strip_prefix("rsync://")
.ok_or_else(|| format!("invalid rsync module uri: {module_uri}"))?
.trim_end_matches('/');
fs::create_dir_all(tree_root.join(relative_root)).map_err(|e| {
format!(
"create rsync tree root failed: {}: {e}",
tree_root.join(relative_root).display()
)
})?;
for (uri, bytes) in objects {
let rel = uri
.strip_prefix(module_uri)
.ok_or_else(|| format!("object uri {uri} does not belong to module {module_uri}"))?;
let path = tree_root.join(relative_root).join(rel);
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).map_err(|e| {
format!(
"create rsync object parent failed: {}: {e}",
parent.display()
)
})?;
}
fs::write(&path, bytes)
.map_err(|e| format!("write rsync object failed: {}: {e}", path.display()))?;
}
Ok(objects.keys().cloned().collect())
}
fn copy_dir_all(src: &Path, dst: &Path) -> Result<(), String> {
fs::create_dir_all(dst)
.map_err(|e| format!("create directory failed: {}: {e}", dst.display()))?;
for entry in
fs::read_dir(src).map_err(|e| format!("read directory failed: {}: {e}", src.display()))?
{
let entry = entry.map_err(|e| format!("read entry failed: {}: {e}", src.display()))?;
let file_type = entry
.file_type()
.map_err(|e| format!("read file type failed: {}: {e}", entry.path().display()))?;
let target = dst.join(entry.file_name());
if file_type.is_dir() {
copy_dir_all(&entry.path(), &target)?;
} else if file_type.is_file() {
fs::copy(entry.path(), &target).map_err(|e| {
format!(
"copy file failed: {} -> {}: {e}",
entry.path().display(),
target.display()
)
})?;
}
}
Ok(())
}
fn load_json(path: &Path) -> Result<serde_json::Value, String> {
serde_json::from_slice(
&fs::read(path).map_err(|e| format!("read json failed: {}: {e}", path.display()))?,
)
.map_err(|e| format!("parse json failed: {}: {e}", path.display()))
}
fn write_json_value(path: &Path, value: &serde_json::Value) -> Result<(), String> {
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)
.map_err(|e| format!("create json parent failed: {}: {e}", parent.display()))?;
}
fs::write(
path,
serde_json::to_vec_pretty(value).map_err(|e| format!("serialize json failed: {e}"))?,
)
.map_err(|e| format!("write json failed: {}: {e}", path.display()))
}
fn base_capture_root_from_locks(archive_root: &Path, locks_path: &Path) -> Result<PathBuf, String> {
let value = load_json(locks_path)?;
let capture = value
.get("capture")
.and_then(|v| v.as_str())
.ok_or_else(|| format!("missing capture in {}", locks_path.display()))?;
Ok(archive_root.join("v1").join("captures").join(capture))
}
fn keep_rsync_module(pp: &rpki::audit::PublicationPointAudit) -> Result<Option<String>, String> {
if is_failed_fetch_source(&pp.source) {
return Ok(None);
}
let module_uri = canonical_rsync_module(&pp.rsync_base_uri).map_err(|e| {
format!(
"canonicalize rsync module failed for {}: {e}",
pp.rsync_base_uri
)
})?;
if pp.rrdp_notification_uri.is_none() || pp.repo_sync_source.as_deref() == Some("rsync") {
return Ok(Some(module_uri));
}
Ok(None)
}
fn repair_base_inputs(
archive_root: &Path,
locks_path: &Path,
publication_points: &[rpki::audit::PublicationPointAudit],
store: &RocksStore,
verification: &mut VerificationV2,
) -> Result<(), String> {
let capture_root = base_capture_root_from_locks(archive_root, locks_path)?;
let mut locks = load_json(locks_path)?;
let candidate_modules: BTreeSet<String> = publication_points
.iter()
.filter_map(|pp| keep_rsync_module(pp).transpose())
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.collect();
let old_modules: Vec<String> = locks
.get("rsync")
.and_then(|v| v.as_object())
.map(|m| m.keys().cloned().collect())
.unwrap_or_default();
if let Some(rrdp_obj) = locks.get_mut("rrdp").and_then(|v| v.as_object_mut()) {
for pp in publication_points {
let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() else {
continue;
};
let lock_value = match store
.get_rrdp_source_record(notify_uri)
.map_err(|e| format!("read rrdp source record failed for {notify_uri}: {e}"))?
{
Some(record)
if record.last_session_id.is_some()
&& record.last_serial.is_some()
&& record.last_snapshot_uri.is_some()
&& record.last_snapshot_hash.is_some() =>
{
serde_json::json!({
"transport": "rrdp",
"session": record.last_session_id,
"serial": record.last_serial,
})
}
_ => serde_json::json!({
"transport": "rsync",
"session": null,
"serial": null,
}),
};
rrdp_obj.insert(notify_uri.to_string(), lock_value);
}
let repos_dir = capture_root.join("rrdp").join("repos");
if repos_dir.exists() {
for entry in fs::read_dir(&repos_dir)
.map_err(|e| format!("scan rrdp repo dir failed: {}: {e}", repos_dir.display()))?
{
let entry = entry.map_err(|e| {
format!("read rrdp repo entry failed: {}: {e}", repos_dir.display())
})?;
let meta = entry.path().join("meta.json");
if !meta.exists() {
continue;
}
let meta_value = load_json(&meta)?;
let notify_uri = match meta_value.get("rpkiNotify").and_then(|v| v.as_str()) {
Some(value) => value.to_string(),
None => continue,
};
if rrdp_obj.contains_key(&notify_uri) {
continue;
}
let lock_value = match store
.get_rrdp_source_record(&notify_uri)
.map_err(|e| format!("read rrdp source record failed for {notify_uri}: {e}"))?
{
Some(record)
if record.last_session_id.is_some()
&& record.last_serial.is_some()
&& record.last_snapshot_uri.is_some()
&& record.last_snapshot_hash.is_some() =>
{
serde_json::json!({
"transport": "rrdp",
"session": record.last_session_id,
"serial": record.last_serial,
})
}
_ => serde_json::json!({
"transport": "rsync",
"session": null,
"serial": null,
}),
};
rrdp_obj.insert(notify_uri, lock_value);
}
}
}
let mut final_modules = serde_json::Map::new();
for module_uri in candidate_modules {
let objects = current_module_objects_from_store(store, &module_uri)?;
if objects.is_empty() {
continue;
}
let _files = materialize_rsync_module_from_store(&capture_root, &module_uri, &objects)?;
final_modules.insert(
module_uri,
serde_json::json!({
"transport": "rsync"
}),
);
}
for module_uri in old_modules {
if !final_modules.contains_key(&module_uri) {
let bucket_dir = rsync_bucket_dir(&capture_root, &module_uri);
let _ = fs::remove_dir_all(bucket_dir);
}
}
if let Some(rsync_value) = locks.get_mut("rsync") {
*rsync_value = serde_json::Value::Object(final_modules.clone());
}
write_json_value(locks_path, &locks)?;
verification.base["capture"]["rrdpRepoCount"] = serde_json::Value::from(
locks
.get("rrdp")
.and_then(|v| v.as_object())
.map(|m| m.len())
.unwrap_or(0),
);
verification.base["capture"]["rsyncModuleCount"] = serde_json::Value::from(final_modules.len());
Ok(())
}
fn repair_target_locks(
locks_path: &Path,
previous_locks_path: &Path,
publication_points: &[rpki::audit::PublicationPointAudit],
store: &RocksStore,
) -> Result<(), String> {
let mut locks = load_json(locks_path)?;
let previous_locks = load_json(previous_locks_path)?;
if let Some(rrdp_obj) = locks.get_mut("rrdp").and_then(|v| v.as_object_mut()) {
for pp in publication_points {
let Some(notify_uri) = pp.rrdp_notification_uri.as_deref() else {
continue;
};
let mut lock_value = match store
.get_rrdp_source_record(notify_uri)
.map_err(|e| format!("read rrdp source record failed for {notify_uri}: {e}"))?
{
Some(record)
if record.last_session_id.is_some()
&& record.last_serial.is_some()
&& record.last_snapshot_uri.is_some()
&& record.last_snapshot_hash.is_some() =>
{
serde_json::json!({
"transport": "rrdp",
"session": record.last_session_id,
"serial": record.last_serial,
})
}
_ => serde_json::json!({
"transport": "rsync",
"session": null,
"serial": null,
}),
};
let previous_transport = previous_locks
.get("rrdp")
.and_then(|v| v.get(notify_uri))
.and_then(|v| v.get("transport"))
.and_then(|v| v.as_str());
if previous_transport != Some("rrdp") {
lock_value = serde_json::json!({
"transport": "rsync",
"session": null,
"serial": null,
});
}
rrdp_obj.insert(notify_uri.to_string(), lock_value);
}
}
let candidate_modules: BTreeSet<String> = publication_points
.iter()
.filter_map(|pp| keep_rsync_module(pp).transpose())
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.collect();
let mut final_modules = serde_json::Map::new();
for module_uri in candidate_modules {
let objects = current_module_objects_from_store(store, &module_uri)?;
if objects.is_empty() {
continue;
}
final_modules.insert(
module_uri,
serde_json::json!({
"transport": "rsync"
}),
);
}
if let Some(rsync_value) = locks.get_mut("rsync") {
*rsync_value = serde_json::Value::Object(final_modules);
}
write_json_value(locks_path, &locks)
}
fn repair_delta_step_inputs(
step_dir: &Path,
base_archive_root: &Path,
base_locks_path: &Path,
previous_locks_path: &Path,
publication_points: &[rpki::audit::PublicationPointAudit],
store: &RocksStore,
step_verification: &mut serde_json::Value,
) -> Result<(), String> {
let locks_path = step_dir.join("locks-delta.json");
let mut locks = load_json(&locks_path)?;
let previous_locks = load_json(previous_locks_path)?;
let capture = locks
.get("capture")
.and_then(|v| v.as_str())
.ok_or_else(|| format!("missing capture in {}", locks_path.display()))?
.to_string();
let capture_root = step_dir
.join("payload-delta-archive")
.join("v1")
.join("captures")
.join(capture);
if let Some(rrdp_obj) = locks.get_mut("rrdp").and_then(|v| v.as_object_mut()) {
let repos_dir = capture_root.join("rrdp").join("repos");
if repos_dir.exists() {
for entry in fs::read_dir(&repos_dir)
.map_err(|e| format!("scan rrdp repo dir failed: {}: {e}", repos_dir.display()))?
{
let entry = entry.map_err(|e| {
format!("read rrdp repo entry failed: {}: {e}", repos_dir.display())
})?;
let meta = entry.path().join("meta.json");
if !meta.exists() {
continue;
}
let meta_value = load_json(&meta)?;
let notify_uri = match meta_value.get("rpkiNotify").and_then(|v| v.as_str()) {
Some(value) => value.to_string(),
None => continue,
};
if rrdp_obj.contains_key(&notify_uri) {
continue;
}
let transition_path = entry.path().join("transition.json");
let lock_value = if transition_path.exists() {
let transition = load_json(&transition_path)?;
serde_json::json!({
"kind": transition.get("kind").cloned().unwrap_or(serde_json::Value::String("fallback-rsync".to_string())),
"base": transition.get("base").cloned().unwrap_or(serde_json::json!({
"transport": "rsync",
"session": null,
"serial": null
})),
"target": transition.get("target").cloned().unwrap_or(serde_json::json!({
"transport": "rsync",
"session": null,
"serial": null
})),
"delta_count": transition.get("delta_count").cloned().unwrap_or(serde_json::Value::from(0)),
"deltas": transition.get("deltas").cloned().unwrap_or(serde_json::Value::Array(vec![])),
})
} else {
serde_json::json!({
"kind": "fallback-rsync",
"base": {"transport":"rsync","session":null,"serial":null},
"target": {"transport":"rsync","session":null,"serial":null},
"delta_count": 0,
"deltas": []
})
};
rrdp_obj.insert(notify_uri, lock_value);
}
}
for (notify_uri, entry) in rrdp_obj.iter_mut() {
let previous_transport = previous_locks
.get("rrdp")
.and_then(|v| v.get(notify_uri))
.and_then(|v| v.get("transport"))
.and_then(|v| v.as_str());
if previous_transport != Some("rrdp") {
let fallback = serde_json::json!({
"kind": "fallback-rsync",
"base": {"transport":"rsync","session":null,"serial":null},
"target": {"transport":"rsync","session":null,"serial":null},
"delta_count": 0,
"deltas": []
});
*entry = fallback.clone();
let bucket_dir = capture_root
.join("rrdp")
.join("repos")
.join(sha256_hex(notify_uri.as_bytes()));
if bucket_dir.exists() {
write_json(&bucket_dir.join("transition.json"), &fallback)?;
}
}
}
}
let candidate_modules: BTreeSet<String> = publication_points
.iter()
.filter_map(|pp| keep_rsync_module(pp).transpose())
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.collect();
let old_modules: Vec<String> = locks
.get("rsync")
.and_then(|v| v.as_object())
.map(|m| m.keys().cloned().collect())
.unwrap_or_default();
let mut final_modules = serde_json::Map::new();
for module_uri in candidate_modules {
let objects = current_module_objects_from_store(store, &module_uri)?;
if objects.is_empty() {
continue;
}
let files = materialize_rsync_module_from_store(&capture_root, &module_uri, &objects)?;
let bucket_dir = rsync_bucket_dir(&capture_root, &module_uri);
write_json(
&bucket_dir.join("files.json"),
&serde_json::json!({
"version": 1,
"module": module_uri,
"fileCount": files.len(),
"files": files,
}),
)?;
final_modules.insert(
module_uri,
serde_json::json!({
"file_count": objects.len(),
"overlay_only": true
}),
);
}
for module_uri in old_modules {
if !final_modules.contains_key(&module_uri) {
let bucket_dir = rsync_bucket_dir(&capture_root, &module_uri);
let _ = fs::remove_dir_all(bucket_dir);
}
}
if let Some(rsync_value) = locks.get_mut("rsync") {
*rsync_value = serde_json::Value::Object(final_modules.clone());
}
let base_capture_root = base_capture_root_from_locks(base_archive_root, base_locks_path)?;
if let Some(rrdp_obj) = locks.get("rrdp").and_then(|v| v.as_object()) {
for (notify_uri, entry) in rrdp_obj {
let kind = entry.get("kind").and_then(|v| v.as_str()).unwrap_or("");
if kind != "unchanged" {
continue;
}
let session = entry
.get("target")
.and_then(|v| v.get("session"))
.and_then(|v| v.as_str())
.or_else(|| {
entry
.get("base")
.and_then(|v| v.get("session"))
.and_then(|v| v.as_str())
});
let Some(session) = session else { continue };
let bucket_hash = sha256_hex(notify_uri.as_bytes());
let bucket_dir = capture_root.join("rrdp").join("repos").join(&bucket_hash);
let session_dir = bucket_dir.join(session);
if session_dir.exists() {
continue;
}
let base_bucket_dir = base_capture_root
.join("rrdp")
.join("repos")
.join(&bucket_hash);
let base_session_dir = base_bucket_dir.join(session);
if !base_session_dir.exists() {
continue;
}
fs::create_dir_all(&bucket_dir).map_err(|e| {
format!(
"create delta rrdp repo dir failed: {}: {e}",
bucket_dir.display()
)
})?;
let base_meta = base_bucket_dir.join("meta.json");
if !bucket_dir.join("meta.json").exists() && base_meta.exists() {
fs::copy(&base_meta, bucket_dir.join("meta.json")).map_err(|e| {
format!(
"copy base repo meta failed: {} -> {}: {e}",
base_meta.display(),
bucket_dir.join("meta.json").display()
)
})?;
}
copy_dir_all(&base_session_dir, &session_dir)?;
}
}
write_json_value(&locks_path, &locks)?;
step_verification["capture"]["rrdpRepoCount"] = serde_json::Value::from(
locks
.get("rrdp")
.and_then(|v| v.as_object())
.map(|m| m.len())
.unwrap_or(0),
);
step_verification["capture"]["rsyncModuleCount"] = serde_json::Value::from(final_modules.len());
Ok(())
}
fn rewrite_delta_base_hash(step_dir: &Path, previous_locks_path: &Path) -> Result<(), String> {
let previous_locks_bytes = fs::read(previous_locks_path).map_err(|e| {
format!(
"read previous locks failed for delta base hash rewrite: {}: {e}",
previous_locks_path.display()
)
})?;
let previous_locks_sha256 = sha256_hex(&previous_locks_bytes);
let locks_path = step_dir.join("locks-delta.json");
let mut locks = load_json(&locks_path)?;
let previous_locks = serde_json::from_slice::<serde_json::Value>(&previous_locks_bytes)
.map_err(|e| {
format!(
"parse previous locks failed: {}: {e}",
previous_locks_path.display()
)
})?;
locks["baseLocksSha256"] = serde_json::Value::String(previous_locks_sha256.clone());
let capture = locks
.get("capture")
.and_then(|v| v.as_str())
.map(|s| s.to_string())
.ok_or_else(|| format!("missing capture in {}", locks_path.display()))?;
write_json_value(&locks_path, &locks)?;
let base_meta_path = step_dir
.join("payload-delta-archive")
.join("v1")
.join("captures")
.join(&capture)
.join("base.json");
let mut base_meta = load_json(&base_meta_path)?;
base_meta["baseLocksSha256"] = serde_json::Value::String(previous_locks_sha256);
write_json_value(&base_meta_path, &base_meta)?;
if let Some(rrdp_obj) = locks.get_mut("rrdp").and_then(|v| v.as_object_mut()) {
for (notify_uri, entry) in rrdp_obj.iter_mut() {
let previous_transport = previous_locks
.get("rrdp")
.and_then(|v| v.get(notify_uri))
.and_then(|v| v.get("transport"))
.and_then(|v| v.as_str());
if previous_transport != Some("rrdp") {
let fallback = serde_json::json!({
"kind": "fallback-rsync",
"base": {"transport":"rsync","session":null,"serial":null},
"target": {"transport":"rsync","session":null,"serial":null},
"delta_count": 0,
"deltas": []
});
*entry = fallback.clone();
let bucket_dir = step_dir
.join("payload-delta-archive")
.join("v1")
.join("captures")
.join(&capture)
.join("rrdp")
.join("repos")
.join(sha256_hex(notify_uri.as_bytes()));
if bucket_dir.exists() {
write_json(&bucket_dir.join("transition.json"), &fallback)?;
}
}
}
}
write_json_value(&locks_path, &locks)?;
Ok(())
}
fn main() {
if let Err(err) = real_main() {
eprintln!("{err}");
std::process::exit(1);
}
}
fn real_main() -> Result<(), String> {
let args = parse_args()?;
let rir_dir = args.rir_dir.unwrap();
let bundle_json_path = rir_dir.join("bundle.json");
let verification_path = rir_dir.join("verification.json");
let mut bundle: RirBundleMetadataV2Serde =
serde_json::from_slice(&fs::read(&bundle_json_path).map_err(|e| {
format!(
"read bundle.json failed: {}: {e}",
bundle_json_path.display()
)
})?)
.map_err(|e| {
format!(
"parse bundle.json failed: {}: {e}",
bundle_json_path.display()
)
})?;
let mut verification: VerificationV2 =
serde_json::from_slice(&fs::read(&verification_path).map_err(|e| {
format!(
"read verification.json failed: {}: {e}",
verification_path.display()
)
})?)
.map_err(|e| {
format!(
"parse verification.json failed: {}: {e}",
verification_path.display()
)
})?;
let tal_bytes = fs::read(rir_dir.join("tal.tal"))
.map_err(|e| format!("read tal.tal failed: {}: {e}", rir_dir.display()))?;
let ta_bytes = fs::read(rir_dir.join("ta.cer"))
.map_err(|e| format!("read ta.cer failed: {}: {e}", rir_dir.display()))?;
let tmp_root = rir_dir.parent().unwrap_or(&rir_dir).join(".tmp-refresh");
let work_db = tmp_root.join(format!("{}-work-db", bundle.rir));
if work_db.exists() {
fs::remove_dir_all(&work_db)
.map_err(|e| format!("remove old refresh db failed: {}: {e}", work_db.display()))?;
}
if let Some(parent) = work_db.parent() {
fs::create_dir_all(parent)
.map_err(|e| format!("create refresh db parent failed: {}: {e}", parent.display()))?;
}
let store =
RocksStore::open(&work_db).map_err(|e| format!("open refresh rocksdb failed: {e}"))?;
let base_archive = path_join(&rir_dir, &bundle.base.relative_archive_path);
let base_locks = path_join(&rir_dir, &bundle.base.relative_locks_path);
let base_ccr = path_join(&rir_dir, &bundle.base.relative_ccr_path);
let base_vrps = path_join(&rir_dir, &bundle.base.relative_vrps_path);
let base_vaps = path_join(&rir_dir, &bundle.base.relative_vaps_path);
let base_validation_time = parse_time(&bundle.base.validation_time)?;
let base_out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
&store,
&Policy::default(),
&tal_bytes,
&ta_bytes,
None,
&base_archive,
&base_locks,
base_validation_time,
&TreeRunConfig {
max_depth: None,
max_instances: None,
},
)
.map_err(|e| format!("base replay failed: {e}"))?;
let base_ccr_content = build_ccr_from_run(
&store,
&[base_out.discovery.trust_anchor.clone()],
&base_out.tree.vrps,
&base_out.tree.aspas,
&base_out.tree.router_keys,
base_validation_time,
)
.map_err(|e| format!("build base ccr failed: {e}"))?;
write_ccr_file(&base_ccr, &base_ccr_content)
.map_err(|e| format!("write base ccr failed: {}: {e}", base_ccr.display()))?;
let base_ccr_bytes = fs::read(&base_ccr)
.map_err(|e| format!("read base ccr failed: {}: {e}", base_ccr.display()))?;
let base_decoded =
decode_content_info(&base_ccr_bytes).map_err(|e| format!("decode base ccr failed: {e}"))?;
let base_verify =
verify_content_info(&base_decoded).map_err(|e| format!("verify base ccr failed: {e}"))?;
let base_vrp_rows = build_vrp_compare_rows(&base_out.tree.vrps, &bundle.rir);
let base_vap_rows = build_vap_compare_rows(&base_out.tree.aspas, &bundle.rir);
write_vrp_csv(&base_vrps, &base_vrp_rows)?;
write_vap_csv(&base_vaps, &base_vap_rows)?;
bundle.base.ccr_sha256 = sha256_hex(&base_ccr_bytes);
bundle.base.vrp_count = base_vrp_rows.len();
bundle.base.vap_count = base_vap_rows.len();
verification.base["ccr"]["sha256"] = serde_json::Value::String(bundle.base.ccr_sha256.clone());
verification.base["ccr"]["stateHashesOk"] =
serde_json::Value::Bool(base_verify.state_hashes_ok);
verification.base["ccr"]["manifestInstances"] =
serde_json::Value::from(base_verify.manifest_instances);
verification.base["ccr"]["roaVrpCount"] = serde_json::Value::from(base_vrp_rows.len());
verification.base["ccr"]["aspaPayloadSets"] = serde_json::Value::from(base_vap_rows.len());
verification.base["ccr"]["routerKeyCount"] =
serde_json::Value::from(base_verify.router_key_count);
verification.base["compareViews"]["baseVrpCount"] =
serde_json::Value::from(base_vrp_rows.len());
verification.base["compareViews"]["baseVapCount"] =
serde_json::Value::from(base_vap_rows.len());
verification.base["capture"]["selfReplayOk"] = serde_json::Value::Bool(true);
repair_base_inputs(
&base_archive,
&base_locks,
&base_out.publication_points,
&store,
&mut verification,
)?;
let mut previous_locks_path = base_locks.clone();
let mut any_aspa = !base_vap_rows.is_empty();
let mut all_steps_self_replay_ok = true;
for (idx, step) in bundle.delta_sequence.steps.iter_mut().enumerate() {
let step_dir = path_join(&rir_dir, &step.relative_path);
rewrite_delta_base_hash(&step_dir, &previous_locks_path)?;
let delta_archive = path_join(&rir_dir, &step.relative_archive_path);
let delta_locks = path_join(&rir_dir, &step.relative_transition_locks_path);
let delta_ccr = path_join(&rir_dir, &step.relative_ccr_path);
let delta_vrps = path_join(&rir_dir, &step.relative_vrps_path);
let delta_vaps = path_join(&rir_dir, &step.relative_vaps_path);
let target_locks = path_join(&rir_dir, &step.relative_target_locks_path);
let delta_validation_time = parse_time(&step.validation_time)?;
let delta_out = run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit(
&store,
&Policy::default(),
&tal_bytes,
&ta_bytes,
None,
&delta_archive,
&previous_locks_path,
&delta_locks,
delta_validation_time,
&TreeRunConfig {
max_depth: None,
max_instances: None,
},
)
.map_err(|e| format!("delta step {} replay failed: {e}", step.id))?;
let delta_ccr_content = build_ccr_from_run(
&store,
&[delta_out.discovery.trust_anchor.clone()],
&delta_out.tree.vrps,
&delta_out.tree.aspas,
&delta_out.tree.router_keys,
delta_validation_time,
)
.map_err(|e| format!("build delta ccr failed for {}: {e}", step.id))?;
write_ccr_file(&delta_ccr, &delta_ccr_content)
.map_err(|e| format!("write delta ccr failed: {}: {e}", delta_ccr.display()))?;
let delta_ccr_bytes = fs::read(&delta_ccr)
.map_err(|e| format!("read delta ccr failed: {}: {e}", delta_ccr.display()))?;
let delta_decoded = decode_content_info(&delta_ccr_bytes)
.map_err(|e| format!("decode delta ccr failed for {}: {e}", step.id))?;
let delta_verify = verify_content_info(&delta_decoded)
.map_err(|e| format!("verify delta ccr failed for {}: {e}", step.id))?;
let delta_vrp_rows = build_vrp_compare_rows(&delta_out.tree.vrps, &bundle.rir);
let delta_vap_rows = build_vap_compare_rows(&delta_out.tree.aspas, &bundle.rir);
write_vrp_csv(&delta_vrps, &delta_vrp_rows)?;
write_vap_csv(&delta_vaps, &delta_vap_rows)?;
step.delta_ccr_sha256 = sha256_hex(&delta_ccr_bytes);
step.vrp_count = delta_vrp_rows.len();
step.vap_count = delta_vap_rows.len();
step.has_aspa = !delta_vap_rows.is_empty();
any_aspa |= step.has_aspa;
if let Some(step_verification) = verification.steps.get_mut(idx) {
step_verification["ccr"]["sha256"] =
serde_json::Value::String(step.delta_ccr_sha256.clone());
step_verification["ccr"]["stateHashesOk"] =
serde_json::Value::Bool(delta_verify.state_hashes_ok);
step_verification["ccr"]["manifestInstances"] =
serde_json::Value::from(delta_verify.manifest_instances);
step_verification["ccr"]["roaVrpCount"] = serde_json::Value::from(delta_vrp_rows.len());
step_verification["ccr"]["aspaPayloadSets"] =
serde_json::Value::from(delta_vap_rows.len());
step_verification["ccr"]["routerKeyCount"] =
serde_json::Value::from(delta_verify.router_key_count);
step_verification["compareViews"]["vrpCount"] =
serde_json::Value::from(delta_vrp_rows.len());
step_verification["compareViews"]["vapCount"] =
serde_json::Value::from(delta_vap_rows.len());
step_verification["selfReplayOk"] = serde_json::Value::Bool(true);
}
let step_verification_path =
path_join(&rir_dir, &step.relative_path).join("verification.json");
let mut step_verification_json: serde_json::Value =
serde_json::from_slice(&fs::read(&step_verification_path).map_err(|e| {
format!(
"read step verification failed: {}: {e}",
step_verification_path.display()
)
})?)
.map_err(|e| {
format!(
"parse step verification failed: {}: {e}",
step_verification_path.display()
)
})?;
step_verification_json["ccr"]["sha256"] =
serde_json::Value::String(step.delta_ccr_sha256.clone());
step_verification_json["ccr"]["stateHashesOk"] =
serde_json::Value::Bool(delta_verify.state_hashes_ok);
step_verification_json["ccr"]["manifestInstances"] =
serde_json::Value::from(delta_verify.manifest_instances);
step_verification_json["ccr"]["roaVrpCount"] =
serde_json::Value::from(delta_vrp_rows.len());
step_verification_json["ccr"]["aspaPayloadSets"] =
serde_json::Value::from(delta_vap_rows.len());
step_verification_json["ccr"]["routerKeyCount"] =
serde_json::Value::from(delta_verify.router_key_count);
step_verification_json["compareViews"]["vrpCount"] =
serde_json::Value::from(delta_vrp_rows.len());
step_verification_json["compareViews"]["vapCount"] =
serde_json::Value::from(delta_vap_rows.len());
step_verification_json["selfReplayOk"] = serde_json::Value::Bool(true);
repair_delta_step_inputs(
&step_dir,
&base_archive,
&base_locks,
&previous_locks_path,
&delta_out.publication_points,
&store,
&mut step_verification_json,
)?;
write_json(&step_verification_path, &step_verification_json)?;
all_steps_self_replay_ok &= true;
repair_target_locks(
&target_locks,
&previous_locks_path,
&delta_out.publication_points,
&store,
)?;
previous_locks_path = target_locks;
}
bundle.has_any_aspa = any_aspa;
verification.summary["baseSelfReplayOk"] = serde_json::Value::Bool(true);
verification.summary["allStepsSelfReplayOk"] =
serde_json::Value::Bool(all_steps_self_replay_ok);
write_json(&bundle_json_path, &bundle)?;
write_json(&verification_path, &verification)?;
if !args.keep_db && work_db.exists() {
fs::remove_dir_all(&work_db)
.map_err(|e| format!("remove refresh db failed: {}: {e}", work_db.display()))?;
if tmp_root.exists()
&& fs::read_dir(&tmp_root)
.map_err(|e| format!("read_dir failed: {}: {e}", tmp_root.display()))?
.next()
.is_none()
{
let _ = fs::remove_dir(&tmp_root);
}
}
println!("{}", rir_dir.display());
Ok(())
}