Compare commits

..

30 Commits

Author SHA1 Message Date
38421b1ae7 20260415_2 支持多个RIR并行混合执行,APNIC+ARIN运行1 snapshot + 1 delta,对比rpki-client,snapshot比rpki-client慢4分钟(398vs137),输出未收敛,delta时输出收敛(348vs181),评估应该是正确性没有问题,下一步进一步优化性能 2026-04-16 11:33:52 +08:00
585c41b83b 20260413_2 并行化架构优化第一阶段,apnic 串行98s->并行74s,传输层任务并行,同repo内发布点串行 2026-04-15 09:53:11 +08:00
af1c2c7f88 20260413 增加定时周期任务与rpki-client对比,发现rpki-client rsync降权问题,改到tmp目录执行,执行两轮step发现输入基本对齐 2026-04-13 16:30:36 +08:00
e45830d79f 20260411 apply snapahot内存优化,采用流式写文件和分块处理降低运行内存需求 2026-04-11 14:45:08 +08:00
77fc2f1a41 20260410 完成五个rir 基于cir的三方回放,raw by hash 独立db,发现内存占用大,连续大rir 录制发生oom 2026-04-11 11:24:32 +08:00
e083fe4daa 20260408_2 增加CIR sequence,未验证drop analysis,遇到问题是static pool保存太慢,拖慢整体录制,待解决 2026-04-09 16:08:11 +08:00
c9ef5aaf4c 20260407 & 20260408 基于cir 三方replay对齐,并且materialize 使用hard link优化 2026-04-08 16:27:46 +08:00
34fb9657f1 20260401 live recorder 扩展到1+N个delta,完成5个RIR录制,以及三方replay,结果三方均对齐vrps和vaps 2026-04-03 16:44:27 +08:00
6edc420ce2 20260330 完成live bundle录制,远程录制,以及与routinator/rpki-client replay对比 2026-03-31 17:34:32 +08:00
cd0ba15286 20260326 完成数据库model迁移;20260327 增加一键replay脚本 2026-03-27 11:24:34 +08:00
fe8b89d829 20260324_2 增加ccr & router key support 2026-03-26 11:52:06 +08:00
d6d44669b4 20260324 完成和routinator对齐snapshot delta replay correctness一致 2026-03-24 10:10:04 +08:00
557a69cbd2 20260316迭代 增加delta replay以及multi-rir
replay 对比,五个RIR 输出vrp与routinator一致
2026-03-16 22:54:48 +08:00
73d8ebb5c1 增加 payload replay for snapshot,20260313 迭代 2026-03-15 22:49:06 +08:00
cf764c35bb 将fetch pp cache改成使用vcir结构,跑通apnic全量同步 2026-03-13 14:45:41 +08:00
e3339533b8 增加delta设计草图 2026-03-11 10:03:15 +08:00
afc50364f8 全量同步测试增加download过程审计输出 2026-03-06 11:52:59 +08:00
6276d13814 手动执行全量同步 2026-03-04 11:12:53 +08:00
0f3d65254e 5 TAL test pass 2026-02-28 10:10:03 +08:00
13516c4f73 add delta sync and fail fetch process 2026-02-27 18:02:01 +08:00
68cbd3c500 优化数据对象decode + profile validate;benchmark对比routinator geomean 0.8 2026-02-26 17:01:51 +08:00
1cc3351bef manifest decode & profile validate optimization 2026-02-25 11:16:02 +08:00
2a6a963ecd fetch cache pp imporved 2026-02-11 10:07:24 +08:00
6e135b9d7a run tree from tal pass 2026-02-10 12:09:59 +08:00
afc31c02ab 串行验证通过 2026-02-09 19:35:54 +08:00
7be865d7f1 优化时间表示 2026-02-06 15:30:26 +08:00
a58e507f92 重构error code 2026-02-04 17:02:17 +08:00
cc9f3f21de 增加rc,完成所有模型的解析,优化error code 的RFC引用 2026-02-03 16:50:52 +08:00
56ae2ca4fc 增加aspa对象解析 2026-02-02 15:42:30 +08:00
bcd4829486 add signed object, manifest impl. add coverage script 2026-02-02 15:42:01 +08:00
396 changed files with 116819 additions and 1404 deletions

1
.gitignore vendored
View File

@ -1,2 +1,3 @@
target/
Cargo.lock
perf.*

View File

@ -3,13 +3,34 @@ name = "rpki"
version = "0.1.0"
edition = "2024"
[features]
default = ["full"]
# Full build used by the main RP implementation (includes RocksDB-backed storage).
full = ["dep:rocksdb"]
profile = ["dep:pprof", "dep:flate2"]
[dependencies]
der-parser = "10.0.0"
asn1-rs = "0.7.1"
der-parser = { version = "10.0.0", features = ["serialize"] }
hex = "0.4.3"
base64 = "0.22.1"
sha2 = "0.10.8"
thiserror = "2.0.18"
time = "0.3.45"
ring = "0.17.14"
x509-parser = { version = "0.18.0", features = ["verify"] }
url = "2.5.8"
asn1-rs = "0.7.1"
asn1-rs-derive = "0.6.0"
asn1 = "0.23.0"
serde = { version = "1.0.218", features = ["derive"] }
serde_json = "1.0.140"
toml = "0.8.20"
rocksdb = { version = "0.22.0", optional = true, default-features = false, features = ["lz4"] }
serde_cbor = "0.11.2"
roxmltree = "0.20.0"
quick-xml = "0.37.2"
uuid = { version = "1.7.0", features = ["v4"] }
reqwest = { version = "0.12.12", default-features = false, features = ["blocking", "rustls-tls"] }
pprof = { version = "0.14.1", optional = true, features = ["flamegraph", "prost-codec"] }
flate2 = { version = "1.0.35", optional = true }
tempfile = "3.16.0"
[dev-dependencies]

View File

@ -9,3 +9,20 @@ cargo test
cargo test -- --nocapture
```
# 覆盖率cargo-llvm-cov
安装工具:
```
rustup component add llvm-tools-preview
cargo install cargo-llvm-cov --locked
```
统计行覆盖率并要求 >=90%
```
./scripts/coverage.sh
# 或
cargo llvm-cov --fail-under-lines 90
```

View File

@ -0,0 +1,8 @@
[package]
name = "ours-manifest-bench"
version = "0.1.0"
edition = "2024"
[dependencies]
rpki = { path = "../..", default-features = false }

View File

@ -0,0 +1,145 @@
use rpki::data_model::manifest::ManifestObject;
use std::hint::black_box;
use std::path::PathBuf;
use std::time::Instant;
#[derive(Debug, Clone)]
struct Config {
sample: Option<String>,
manifest_path: Option<PathBuf>,
iterations: u64,
warmup_iterations: u64,
repeats: u32,
}
fn usage_and_exit() -> ! {
eprintln!(
"Usage:\n ours-manifest-bench (--sample <name> | --manifest <path>) [--iterations N] [--warmup-iterations N] [--repeats N]\n\nExamples:\n cargo run --release -- --sample small-01 --iterations 20000 --warmup-iterations 2000 --repeats 3\n cargo run --release -- --manifest ../../tests/benchmark/selected_der/small-01.mft"
);
std::process::exit(2);
}
fn parse_args() -> Config {
let mut sample: Option<String> = None;
let mut manifest_path: Option<PathBuf> = None;
let mut iterations: u64 = 20_000;
let mut warmup_iterations: u64 = 2_000;
let mut repeats: u32 = 3;
let mut args = std::env::args().skip(1);
while let Some(arg) = args.next() {
match arg.as_str() {
"--sample" => sample = Some(args.next().unwrap_or_else(|| usage_and_exit())),
"--manifest" => {
manifest_path = Some(PathBuf::from(args.next().unwrap_or_else(|| usage_and_exit())))
}
"--iterations" => {
iterations = args
.next()
.unwrap_or_else(|| usage_and_exit())
.parse()
.unwrap_or_else(|_| usage_and_exit())
}
"--warmup-iterations" => {
warmup_iterations = args
.next()
.unwrap_or_else(|| usage_and_exit())
.parse()
.unwrap_or_else(|_| usage_and_exit())
}
"--repeats" => {
repeats = args
.next()
.unwrap_or_else(|| usage_and_exit())
.parse()
.unwrap_or_else(|_| usage_and_exit())
}
"-h" | "--help" => usage_and_exit(),
_ => usage_and_exit(),
}
}
if sample.is_none() && manifest_path.is_none() {
usage_and_exit();
}
if sample.is_some() && manifest_path.is_some() {
usage_and_exit();
}
Config {
sample,
manifest_path,
iterations,
warmup_iterations,
repeats,
}
}
fn derive_manifest_path(sample: &str) -> PathBuf {
// Assumes current working directory is `rpki/benchmark/ours_manifest_bench`.
PathBuf::from(format!("../../tests/benchmark/selected_der/{sample}.mft"))
}
fn main() {
let cfg = parse_args();
let manifest_path = cfg
.manifest_path
.clone()
.unwrap_or_else(|| derive_manifest_path(cfg.sample.as_deref().unwrap()));
let bytes = std::fs::read(&manifest_path).unwrap_or_else(|e| {
eprintln!("read manifest fixture failed: {e}; path={}", manifest_path.display());
std::process::exit(1);
});
let decoded_once = ManifestObject::decode_der(&bytes).unwrap_or_else(|e| {
eprintln!("decode failed: {e}; path={}", manifest_path.display());
std::process::exit(1);
});
let file_count = decoded_once.manifest.file_count();
let mut round_ns_per_op: Vec<f64> = Vec::with_capacity(cfg.repeats as usize);
let mut round_ops_per_s: Vec<f64> = Vec::with_capacity(cfg.repeats as usize);
for _round in 0..cfg.repeats {
for _ in 0..cfg.warmup_iterations {
let obj = ManifestObject::decode_der(black_box(&bytes)).expect("warmup decode");
black_box(obj);
}
let start = Instant::now();
for _ in 0..cfg.iterations {
let obj = ManifestObject::decode_der(black_box(&bytes)).expect("timed decode");
black_box(obj);
}
let elapsed = start.elapsed();
let ns_per_op = (elapsed.as_secs_f64() * 1e9) / (cfg.iterations as f64);
let ops_per_s = (cfg.iterations as f64) / elapsed.as_secs_f64();
round_ns_per_op.push(ns_per_op);
round_ops_per_s.push(ops_per_s);
}
let avg_ns_per_op = round_ns_per_op.iter().sum::<f64>() / (round_ns_per_op.len() as f64);
let avg_ops_per_s = round_ops_per_s.iter().sum::<f64>() / (round_ops_per_s.len() as f64);
let sample_name = cfg.sample.clone().unwrap_or_else(|| {
manifest_path
.file_name()
.map(|s| s.to_string_lossy().to_string())
.unwrap_or_else(|| manifest_path.display().to_string())
});
let sample_name = sample_name
.strip_suffix(".mft")
.unwrap_or(&sample_name)
.to_string();
println!("fixture: {}", manifest_path.display());
println!();
println!("| sample | avg ns/op | ops/s | file count |");
println!("|---|---:|---:|---:|");
println!(
"| {} | {:.2} | {:.2} | {} |",
sample_name, avg_ns_per_op, avg_ops_per_s, file_count
);
}

View File

@ -0,0 +1,8 @@
[package]
name = "routinator-object-bench"
version = "0.1.0"
edition = "2024"
publish = false
[dependencies]
rpki = { version = "=0.19.1", features = ["repository"] }

View File

@ -0,0 +1,552 @@
use rpki::repository::cert::Cert;
use rpki::repository::crl::Crl;
use rpki::repository::manifest::Manifest;
use rpki::repository::roa::Roa;
use rpki::repository::aspa::Aspa;
use rpki::repository::resources::{AsResources, IpResources};
use std::hint::black_box;
use std::path::{Path, PathBuf};
use std::time::Instant;
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
enum ObjType {
Cer,
Crl,
Manifest,
Roa,
Aspa,
}
impl ObjType {
fn parse(s: &str) -> Result<Self, String> {
match s {
"cer" => Ok(Self::Cer),
"crl" => Ok(Self::Crl),
"manifest" => Ok(Self::Manifest),
"roa" => Ok(Self::Roa),
"aspa" => Ok(Self::Aspa),
_ => Err("type must be one of: cer, crl, manifest, roa, aspa".into()),
}
}
fn as_str(self) -> &'static str {
match self {
ObjType::Cer => "cer",
ObjType::Crl => "crl",
ObjType::Manifest => "manifest",
ObjType::Roa => "roa",
ObjType::Aspa => "aspa",
}
}
fn ext(self) -> &'static str {
match self {
ObjType::Cer => "cer",
ObjType::Crl => "crl",
ObjType::Manifest => "mft",
ObjType::Roa => "roa",
ObjType::Aspa => "asa",
}
}
}
#[derive(Clone, Debug)]
struct Sample {
obj_type: ObjType,
name: String,
path: PathBuf,
}
#[derive(Clone, Debug)]
struct Config {
dir: PathBuf,
type_filter: Option<ObjType>,
sample_filter: Option<String>,
fixed_iters: Option<u64>,
warmup_iters: u64,
rounds: u64,
min_round_ms: u64,
max_adaptive_iters: u64,
strict: bool,
cert_inspect: bool,
out_csv: Option<PathBuf>,
out_md: Option<PathBuf>,
}
fn usage_and_exit(err: Option<&str>) -> ! {
if let Some(err) = err {
eprintln!("error: {err}");
eprintln!();
}
eprintln!(
"Usage:\n\
cargo run --release --manifest-path rpki/benchmark/routinator_object_bench/Cargo.toml -- [OPTIONS]\n\
\n\
Options:\n\
--dir <PATH> Fixtures root dir (default: ../../tests/benchmark/selected_der_v2)\n\
--type <cer|crl|manifest|roa|aspa> Filter by type\n\
--sample <NAME> Filter by sample name (e.g. p50)\n\
--iters <N> Fixed iterations per round (optional; otherwise adaptive)\n\
--warmup-iters <N> Warmup iterations (default: 50)\n\
--rounds <N> Rounds (default: 5)\n\
--min-round-ms <MS> Adaptive: minimum round time (default: 200)\n\
--max-iters <N> Adaptive: maximum iters (default: 1_000_000)\n\
--strict <true|false> Strict DER where applicable (default: true)\n\
--cert-inspect Also run Cert::inspect_ca/inspect_ee where applicable (default: false)\n\
--out-csv <PATH> Write CSV output\n\
--out-md <PATH> Write Markdown output\n\
"
);
std::process::exit(2);
}
fn parse_bool(s: &str, name: &str) -> bool {
match s {
"1" | "true" | "TRUE" | "yes" | "YES" => true,
"0" | "false" | "FALSE" | "no" | "NO" => false,
_ => usage_and_exit(Some(&format!("{name} must be true/false"))),
}
}
fn parse_u64(s: &str, name: &str) -> u64 {
s.parse::<u64>()
.unwrap_or_else(|_| usage_and_exit(Some(&format!("{name} must be an integer"))))
}
fn default_samples_dir() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../../tests/benchmark/selected_der_v2")
}
fn parse_args() -> Config {
let mut dir: PathBuf = default_samples_dir();
let mut type_filter: Option<ObjType> = None;
let mut sample_filter: Option<String> = None;
let mut fixed_iters: Option<u64> = None;
let mut warmup_iters: u64 = 50;
let mut rounds: u64 = 5;
let mut min_round_ms: u64 = 200;
let mut max_adaptive_iters: u64 = 1_000_000;
let mut strict: bool = true;
let mut cert_inspect: bool = false;
let mut out_csv: Option<PathBuf> = None;
let mut out_md: Option<PathBuf> = None;
let mut args = std::env::args().skip(1);
while let Some(arg) = args.next() {
match arg.as_str() {
"--dir" => dir = PathBuf::from(args.next().unwrap_or_else(|| usage_and_exit(None))),
"--type" => {
type_filter = Some(ObjType::parse(
&args.next().unwrap_or_else(|| usage_and_exit(None)),
)
.unwrap_or_else(|e| usage_and_exit(Some(&e))))
}
"--sample" => {
sample_filter = Some(args.next().unwrap_or_else(|| usage_and_exit(None)))
}
"--iters" => {
fixed_iters = Some(parse_u64(
&args.next().unwrap_or_else(|| usage_and_exit(None)),
"--iters",
))
}
"--warmup-iters" => {
warmup_iters = parse_u64(
&args.next().unwrap_or_else(|| usage_and_exit(None)),
"--warmup-iters",
)
}
"--rounds" => {
rounds = parse_u64(&args.next().unwrap_or_else(|| usage_and_exit(None)), "--rounds")
}
"--min-round-ms" => {
min_round_ms = parse_u64(
&args.next().unwrap_or_else(|| usage_and_exit(None)),
"--min-round-ms",
)
}
"--max-iters" => {
max_adaptive_iters = parse_u64(
&args.next().unwrap_or_else(|| usage_and_exit(None)),
"--max-iters",
)
}
"--strict" => {
strict = parse_bool(
&args.next().unwrap_or_else(|| usage_and_exit(None)),
"--strict",
)
}
"--cert-inspect" => cert_inspect = true,
"--out-csv" => out_csv = Some(PathBuf::from(args.next().unwrap_or_else(|| usage_and_exit(None)))),
"--out-md" => out_md = Some(PathBuf::from(args.next().unwrap_or_else(|| usage_and_exit(None)))),
"-h" | "--help" => usage_and_exit(None),
_ => usage_and_exit(Some(&format!("unknown argument: {arg}"))),
}
}
if warmup_iters == 0 {
usage_and_exit(Some("--warmup-iters must be > 0"));
}
if rounds == 0 {
usage_and_exit(Some("--rounds must be > 0"));
}
if min_round_ms == 0 {
usage_and_exit(Some("--min-round-ms must be > 0"));
}
if max_adaptive_iters == 0 {
usage_and_exit(Some("--max-iters must be > 0"));
}
if let Some(n) = fixed_iters {
if n == 0 {
usage_and_exit(Some("--iters must be > 0"));
}
}
Config {
dir,
type_filter,
sample_filter,
fixed_iters,
warmup_iters,
rounds,
min_round_ms,
max_adaptive_iters,
strict,
cert_inspect,
out_csv,
out_md,
}
}
fn read_samples(root: &Path) -> Vec<Sample> {
let mut out = Vec::new();
for obj_type in [
ObjType::Cer,
ObjType::Crl,
ObjType::Manifest,
ObjType::Roa,
ObjType::Aspa,
] {
let dir = root.join(obj_type.as_str());
let rd = match std::fs::read_dir(&dir) {
Ok(rd) => rd,
Err(_) => continue,
};
for ent in rd.flatten() {
let path = ent.path();
if path.extension().and_then(|s| s.to_str()) != Some(obj_type.ext()) {
continue;
}
let name = path
.file_stem()
.and_then(|s| s.to_str())
.unwrap_or("unknown")
.to_string();
out.push(Sample { obj_type, name, path });
}
}
out.sort_by(|a, b| a.obj_type.cmp(&b.obj_type).then_with(|| a.name.cmp(&b.name)));
out
}
fn choose_iters_adaptive<F: FnMut()>(mut op: F, min_round_ms: u64, max_iters: u64) -> u64 {
let min_secs = (min_round_ms as f64) / 1e3;
let mut iters: u64 = 1;
loop {
let start = Instant::now();
for _ in 0..iters {
op();
}
let elapsed = start.elapsed().as_secs_f64();
if elapsed >= min_secs {
return iters;
}
if iters >= max_iters {
return iters;
}
iters = (iters.saturating_mul(2)).min(max_iters);
}
}
fn count_ip(res: &IpResources) -> u64 {
if res.is_inherited() {
return 1;
}
let Ok(blocks) = res.to_blocks() else {
return 0;
};
blocks.iter().count() as u64
}
fn count_as(res: &AsResources) -> u64 {
if res.is_inherited() {
return 1;
}
let Ok(blocks) = res.to_blocks() else {
return 0;
};
blocks.iter().count() as u64
}
fn complexity(obj_type: ObjType, bytes: &[u8], strict: bool, cert_inspect: bool) -> u64 {
match obj_type {
ObjType::Cer => {
let cert = Cert::decode(bytes).expect("decode cert");
if cert_inspect {
if cert.is_ca() {
cert.inspect_ca(strict).expect("inspect ca");
} else {
cert.inspect_ee(strict).expect("inspect ee");
}
}
count_ip(cert.v4_resources())
.saturating_add(count_ip(cert.v6_resources()))
.saturating_add(count_as(cert.as_resources()))
}
ObjType::Crl => {
let crl = Crl::decode(bytes).expect("decode crl");
crl.revoked_certs().iter().count() as u64
}
ObjType::Manifest => {
let mft = Manifest::decode(bytes, strict).expect("decode manifest");
if cert_inspect {
mft.cert().inspect_ee(strict).expect("inspect ee");
}
mft.content().len() as u64
}
ObjType::Roa => {
let roa = Roa::decode(bytes, strict).expect("decode roa");
if cert_inspect {
roa.cert().inspect_ee(strict).expect("inspect ee");
}
roa.content().iter().count() as u64
}
ObjType::Aspa => {
let asa = Aspa::decode(bytes, strict).expect("decode aspa");
if cert_inspect {
asa.cert().inspect_ee(strict).expect("inspect ee");
}
asa.content().provider_as_set().len() as u64
}
}
}
fn decode_profile(obj_type: ObjType, bytes: &[u8], strict: bool, cert_inspect: bool) {
match obj_type {
ObjType::Cer => {
let cert = Cert::decode(black_box(bytes)).expect("decode cert");
if cert_inspect {
if cert.is_ca() {
cert.inspect_ca(strict).expect("inspect ca");
} else {
cert.inspect_ee(strict).expect("inspect ee");
}
}
black_box(cert);
}
ObjType::Crl => {
let crl = Crl::decode(black_box(bytes)).expect("decode crl");
black_box(crl);
}
ObjType::Manifest => {
let mft = Manifest::decode(black_box(bytes), strict).expect("decode manifest");
if cert_inspect {
mft.cert().inspect_ee(strict).expect("inspect ee");
}
black_box(mft);
}
ObjType::Roa => {
let roa = Roa::decode(black_box(bytes), strict).expect("decode roa");
if cert_inspect {
roa.cert().inspect_ee(strict).expect("inspect ee");
}
black_box(roa);
}
ObjType::Aspa => {
let asa = Aspa::decode(black_box(bytes), strict).expect("decode aspa");
if cert_inspect {
asa.cert().inspect_ee(strict).expect("inspect ee");
}
black_box(asa);
}
}
}
#[derive(Clone, Debug)]
struct ResultRow {
obj_type: String,
sample: String,
size_bytes: usize,
complexity: u64,
avg_ns_per_op: f64,
ops_per_sec: f64,
}
fn render_markdown(title: &str, rows: &[ResultRow]) -> String {
let mut out = String::new();
out.push_str(&format!("# {title}\n\n"));
out.push_str("| type | sample | size_bytes | complexity | avg ns/op | ops/s |\n");
out.push_str("|---|---|---:|---:|---:|---:|\n");
for r in rows {
out.push_str(&format!(
"| {} | {} | {} | {} | {:.2} | {:.2} |\n",
r.obj_type, r.sample, r.size_bytes, r.complexity, r.avg_ns_per_op, r.ops_per_sec
));
}
out
}
fn render_csv(rows: &[ResultRow]) -> String {
let mut out = String::new();
out.push_str("type,sample,size_bytes,complexity,avg_ns_per_op,ops_per_sec\n");
for r in rows {
let sample = r.sample.replace('"', "\"\"");
out.push_str(&format!(
"{},{},{},{},{:.6},{:.6}\n",
r.obj_type,
format!("\"{}\"", sample),
r.size_bytes,
r.complexity,
r.avg_ns_per_op,
r.ops_per_sec
));
}
out
}
fn create_parent_dirs(path: &Path) {
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent).unwrap_or_else(|e| {
panic!("create_dir_all {}: {e}", parent.display());
});
}
}
fn write_text_file(path: &Path, content: &str) {
create_parent_dirs(path);
std::fs::write(path, content).unwrap_or_else(|e| panic!("write {}: {e}", path.display()));
}
fn main() {
let cfg = parse_args();
let mut samples = read_samples(&cfg.dir);
if samples.is_empty() {
usage_and_exit(Some(&format!(
"no samples found under: {}",
cfg.dir.display()
)));
}
if let Some(t) = cfg.type_filter {
samples.retain(|s| s.obj_type == t);
if samples.is_empty() {
usage_and_exit(Some(&format!("no sample matched --type {}", t.as_str())));
}
}
if let Some(filter) = cfg.sample_filter.as_deref() {
samples.retain(|s| s.name == filter);
if samples.is_empty() {
usage_and_exit(Some(&format!("no sample matched --sample {filter}")));
}
}
println!("# Routinator baseline (rpki crate) decode benchmark (selected_der_v2)");
println!();
println!("- dir: {}", cfg.dir.display());
println!("- strict: {}", cfg.strict);
println!("- cert_inspect: {}", cfg.cert_inspect);
if let Some(t) = cfg.type_filter {
println!("- type: {}", t.as_str());
}
if let Some(s) = cfg.sample_filter.as_deref() {
println!("- sample: {}", s);
}
if let Some(n) = cfg.fixed_iters {
println!("- iters: {} (fixed)", n);
} else {
println!(
"- warmup: {} iters, rounds: {}, min_round: {}ms (adaptive iters, max {})",
cfg.warmup_iters, cfg.rounds, cfg.min_round_ms, cfg.max_adaptive_iters
);
}
if let Some(p) = cfg.out_csv.as_ref() {
println!("- out_csv: {}", p.display());
}
if let Some(p) = cfg.out_md.as_ref() {
println!("- out_md: {}", p.display());
}
println!();
println!("| type | sample | size_bytes | complexity | avg ns/op | ops/s |");
println!("|---|---|---:|---:|---:|---:|");
let mut rows: Vec<ResultRow> = Vec::with_capacity(samples.len());
for sample in &samples {
let bytes = std::fs::read(&sample.path)
.unwrap_or_else(|e| panic!("read {}: {e}", sample.path.display()));
let size_bytes = bytes.len();
let complexity = complexity(sample.obj_type, bytes.as_slice(), cfg.strict, cfg.cert_inspect);
for _ in 0..cfg.warmup_iters {
decode_profile(sample.obj_type, bytes.as_slice(), cfg.strict, cfg.cert_inspect);
}
let mut per_round_ns_per_op = Vec::with_capacity(cfg.rounds as usize);
for _round in 0..cfg.rounds {
let iters = if let Some(n) = cfg.fixed_iters {
n
} else {
choose_iters_adaptive(
|| decode_profile(sample.obj_type, bytes.as_slice(), cfg.strict, cfg.cert_inspect),
cfg.min_round_ms,
cfg.max_adaptive_iters,
)
};
let start = Instant::now();
for _ in 0..iters {
decode_profile(sample.obj_type, bytes.as_slice(), cfg.strict, cfg.cert_inspect);
}
let elapsed = start.elapsed();
let total_ns = elapsed.as_secs_f64() * 1e9;
per_round_ns_per_op.push(total_ns / (iters as f64));
}
let avg_ns = per_round_ns_per_op.iter().sum::<f64>() / (per_round_ns_per_op.len() as f64);
let ops_per_sec = 1e9_f64 / avg_ns;
println!(
"| {} | {} | {} | {} | {:.2} | {:.2} |",
sample.obj_type.as_str(),
sample.name,
size_bytes,
complexity,
avg_ns,
ops_per_sec
);
rows.push(ResultRow {
obj_type: sample.obj_type.as_str().to_string(),
sample: sample.name.clone(),
size_bytes,
complexity,
avg_ns_per_op: avg_ns,
ops_per_sec,
});
}
if let Some(path) = cfg.out_md.as_ref() {
let md = render_markdown(
"Routinator baseline (rpki crate) decode+inspect (selected_der_v2)",
&rows,
);
write_text_file(path, &md);
eprintln!("Wrote {}", path.display());
}
if let Some(path) = cfg.out_csv.as_ref() {
let csv = render_csv(&rows);
write_text_file(path, &csv);
eprintln!("Wrote {}", path.display());
}
}

2734
model.txt Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,70 @@
# RPKI Benchmarks (Stage2, selected_der_v2)
This directory contains a reproducible, one-click benchmark to measure **decode + profile validate**
performance for all supported object types and compare **OURS** against the **Routinator baseline**
(`rpki` crate `=0.19.1` with `repository` feature).
## What it measures
Dataset:
- Fixtures: `rpki/tests/benchmark/selected_der_v2/`
- Objects: `cer`, `crl`, `manifest` (`.mft`), `roa`, `aspa` (`.asa`)
- Samples: 10 quantiles per type (`min/p01/p10/p25/p50/p75/p90/p95/p99/max`) → 50 files total
Metrics:
- **decode+validate**: `decode_der` (parse + profile validate) for each object file
- **landing** (OURS only): `PackFile::from_bytes_compute_sha256` + CBOR encode + `RocksDB put_raw`
- **compare**: ratio `ours_ns/op ÷ rout_ns/op` for decode+validate
## Default benchmark settings
Both OURS and Routinator baseline use the same run settings:
- warmup: `10` iterations
- rounds: `3`
- adaptive loop target: `min_round_ms=200` (with an internal max of `1_000_000` iters)
- strict DER: `true` (baseline)
- cert inspect: `false` (baseline)
You can override the settings via environment variables in the runner script:
- `BENCH_WARMUP_ITERS` (default `10`)
- `BENCH_ROUNDS` (default `3`)
- `BENCH_MIN_ROUND_MS` (default `200`)
## One-click run (OURS + Routinator compare)
From the `rpki/` crate directory:
```bash
./scripts/benchmark/run_stage2_selected_der_v2_release.sh
```
Outputs are written under:
- `rpki/target/bench/`
- OURS decode+validate: `stage2_selected_der_v2_decode_release_<TS>.{md,csv}`
- OURS landing: `stage2_selected_der_v2_landing_release_<TS>.{md,csv}`
- Routinator: `stage2_selected_der_v2_routinator_decode_release_<TS>.{md,csv}`
- Compare: `stage2_selected_der_v2_compare_ours_vs_routinator_decode_release_<TS>.{md,csv}`
- Summary: `stage2_selected_der_v2_compare_summary_<TS>.md`
### Why decode and landing are separated
The underlying benchmark can run in `BENCH_MODE=both`, but the **landing** part writes to RocksDB
and may trigger background work (e.g., compactions) that can **skew subsequent decode timings**.
For a fair OURS-vs-Routinator comparison, the runner script:
- runs `BENCH_MODE=decode_validate` for comparison, and
- runs `BENCH_MODE=landing` separately for landing-only numbers.
## Notes
- The Routinator baseline benchmark is implemented in-repo under:
- `rpki/benchmark/routinator_object_bench/`
- It pins `rpki = "=0.19.1"` in its `Cargo.toml`.
- This benchmark is implemented as an `#[ignore]` integration test:
- `rpki/tests/bench_stage2_decode_profile_selected_der_v2.rs`
- The runner script invokes it with `cargo test --release ... -- --ignored --nocapture`.

View File

@ -0,0 +1,123 @@
#!/usr/bin/env bash
set -euo pipefail
# Stage2 (selected_der_v2) decode+profile validate benchmark.
# Runs:
# 1) OURS decode+validate benchmark and writes MD/CSV.
# 2) OURS landing benchmark and writes MD/CSV.
# 3) Routinator baseline decode benchmark (rpki crate =0.19.1).
# 4) Produces a joined compare CSV/MD and a short geomean summary.
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
OUT_DIR="$ROOT_DIR/target/bench"
mkdir -p "$OUT_DIR"
TS="$(date -u +%Y%m%dT%H%M%SZ)"
WARMUP_ITERS="${BENCH_WARMUP_ITERS:-10}"
ROUNDS="${BENCH_ROUNDS:-3}"
MIN_ROUND_MS="${BENCH_MIN_ROUND_MS:-200}"
OURS_MD="$OUT_DIR/stage2_selected_der_v2_decode_release_${TS}.md"
OURS_CSV="$OUT_DIR/stage2_selected_der_v2_decode_release_${TS}.csv"
OURS_LANDING_MD="$OUT_DIR/stage2_selected_der_v2_landing_release_${TS}.md"
OURS_LANDING_CSV="$OUT_DIR/stage2_selected_der_v2_landing_release_${TS}.csv"
ROUT_MD="$OUT_DIR/stage2_selected_der_v2_routinator_decode_release_${TS}.md"
ROUT_CSV="$OUT_DIR/stage2_selected_der_v2_routinator_decode_release_${TS}.csv"
COMPARE_MD="$OUT_DIR/stage2_selected_der_v2_compare_ours_vs_routinator_decode_release_${TS}.md"
COMPARE_CSV="$OUT_DIR/stage2_selected_der_v2_compare_ours_vs_routinator_decode_release_${TS}.csv"
SUMMARY_MD="$OUT_DIR/stage2_selected_der_v2_compare_summary_${TS}.md"
echo "[1/4] OURS: decode+validate benchmark (release)..." >&2
BENCH_MODE="decode_validate" \
BENCH_WARMUP_ITERS="$WARMUP_ITERS" \
BENCH_ROUNDS="$ROUNDS" \
BENCH_MIN_ROUND_MS="$MIN_ROUND_MS" \
BENCH_OUT_MD="$OURS_MD" \
BENCH_OUT_CSV="$OURS_CSV" \
cargo test --release --test bench_stage2_decode_profile_selected_der_v2 -- --ignored --nocapture >/dev/null
echo "[2/4] OURS: landing benchmark (release)..." >&2
BENCH_MODE="landing" \
BENCH_WARMUP_ITERS="$WARMUP_ITERS" \
BENCH_ROUNDS="$ROUNDS" \
BENCH_MIN_ROUND_MS="$MIN_ROUND_MS" \
BENCH_OUT_MD_LANDING="$OURS_LANDING_MD" \
BENCH_OUT_CSV_LANDING="$OURS_LANDING_CSV" \
cargo test --release --test bench_stage2_decode_profile_selected_der_v2 -- --ignored --nocapture >/dev/null
echo "[3/4] Routinator baseline + compare join..." >&2
OURS_CSV="$OURS_CSV" \
ROUT_CSV="$ROUT_CSV" \
ROUT_MD="$ROUT_MD" \
COMPARE_CSV="$COMPARE_CSV" \
COMPARE_MD="$COMPARE_MD" \
WARMUP_ITERS="$WARMUP_ITERS" \
ROUNDS="$ROUNDS" \
MIN_ROUND_MS="$MIN_ROUND_MS" \
scripts/stage2_perf_compare_m4.sh >/dev/null
echo "[4/4] Summary (geomean ratios)..." >&2
python3 - "$COMPARE_CSV" "$SUMMARY_MD" <<'PY'
import csv
import math
import sys
from pathlib import Path
from datetime import datetime, timezone
in_csv = Path(sys.argv[1])
out_md = Path(sys.argv[2])
rows = list(csv.DictReader(in_csv.open(newline="")))
ratios = {}
for r in rows:
ratios.setdefault(r["type"], []).append(float(r["ratio_ours_over_rout"]))
def geomean(vals):
return math.exp(sum(math.log(v) for v in vals) / len(vals))
def p50(vals):
v = sorted(vals)
n = len(v)
if n % 2 == 1:
return v[n // 2]
return (v[n // 2 - 1] + v[n // 2]) / 2.0
all_vals = [float(r["ratio_ours_over_rout"]) for r in rows]
types = ["all"] + sorted(ratios.keys())
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
lines = []
lines.append("# Stage2 selected_der_v2 compare summary (release)\n\n")
lines.append(f"- recorded_at_utc: `{now}`\n")
lines.append(f"- inputs_csv: `{in_csv}`\n\n")
lines.append("| type | n | min | p50 | geomean | max | >1 count |\n")
lines.append("|---|---:|---:|---:|---:|---:|---:|\n")
for t in types:
vals = all_vals if t == "all" else ratios[t]
vals_sorted = sorted(vals)
lines.append(
f"| {t} | {len(vals_sorted)} | {vals_sorted[0]:.4f} | {p50(vals_sorted):.4f} | "
f"{geomean(vals_sorted):.4f} | {vals_sorted[-1]:.4f} | {sum(1 for v in vals_sorted if v>1.0)} |\n"
)
out_md.write_text("".join(lines), encoding="utf-8")
print(out_md)
PY
echo "Done." >&2
echo "- OURS decode MD: $OURS_MD" >&2
echo "- OURS decode CSV: $OURS_CSV" >&2
echo "- OURS landing MD: $OURS_LANDING_MD" >&2
echo "- OURS landing CSV: $OURS_LANDING_CSV" >&2
echo "- Routinator: $ROUT_MD" >&2
echo "- Compare MD: $COMPARE_MD" >&2
echo "- Compare CSV: $COMPARE_CSV" >&2
echo "- Summary MD: $SUMMARY_MD" >&2

56
scripts/cir/README.md Normal file
View File

@ -0,0 +1,56 @@
# CIR Scripts
## `cir-rsync-wrapper`
一个用于 CIR 黑盒 replay 的 rsync wrapper。
### 环境变量
- `REAL_RSYNC_BIN`
- 真实 rsync 二进制路径
- 默认优先 `/usr/bin/rsync`
- `CIR_MIRROR_ROOT`
- 本地镜像树根目录
- 当命令行中出现 `rsync://...` source 时必需
### 语义
- 仅改写 `rsync://host/path` 类型参数
- 其它参数原样透传给真实 rsync
- 改写目标:
- `rsync://example.net/repo/a.roa`
- →
- `<CIR_MIRROR_ROOT>/example.net/repo/a.roa`
### 兼容目标
- Routinator `--rsync-command`
- `rpki-client -e rsync_prog`
## 其它脚本
- `run_cir_replay_ours.sh`
- `run_cir_replay_routinator.sh`
- `run_cir_replay_rpki_client.sh`
- `run_cir_replay_matrix.sh`
## `cir-local-link-sync.py`
`CIR_LOCAL_LINK_MODE=1` 且 wrapper 检测到 source 已经被改写为本地 mirror 路径时,
wrapper 不再调用真实 `rsync`,而是调用这个 helper 完成:
- `hardlink` 优先的本地树同步
- 失败时回退到 copy
- 支持 `--delete`
`run_cir_replay_matrix.sh` 会顺序执行:
- `ours`
- Routinator
- `rpki-client`
并汇总生成:
- `summary.json`
- `summary.md`
- `detail.md`

View File

@ -0,0 +1,136 @@
#!/usr/bin/env python3
import argparse
import errno
import os
import shutil
from pathlib import Path
def _same_inode(src: Path, dst: Path) -> bool:
try:
src_stat = src.stat()
dst_stat = dst.stat()
except FileNotFoundError:
return False
return (src_stat.st_dev, src_stat.st_ino) == (dst_stat.st_dev, dst_stat.st_ino)
def _remove_path(path: Path) -> None:
if not path.exists() and not path.is_symlink():
return
if path.is_dir() and not path.is_symlink():
shutil.rmtree(path)
else:
path.unlink()
def _prune_empty_dirs(root: Path) -> None:
if not root.exists():
return
for path in sorted((p for p in root.rglob("*") if p.is_dir()), key=lambda p: len(p.parts), reverse=True):
try:
path.rmdir()
except OSError:
pass
def _link_or_copy(src: Path, dst: Path) -> str:
dst.parent.mkdir(parents=True, exist_ok=True)
if dst.exists() or dst.is_symlink():
if _same_inode(src, dst):
return "reused"
_remove_path(dst)
try:
os.link(src, dst)
return "linked"
except OSError as err:
if err.errno not in (errno.EXDEV, errno.EPERM, errno.EMLINK, errno.ENOTSUP, errno.EACCES):
raise
shutil.copy2(src, dst)
return "copied"
def _file_map(src_arg: str, dest_arg: str) -> tuple[Path, dict[str, Path]]:
src = Path(src_arg.rstrip(os.sep))
if not src.exists():
raise FileNotFoundError(src)
mapping: dict[str, Path] = {}
if src.is_dir():
copy_contents = src_arg.endswith(os.sep)
if copy_contents:
root = src
for path in root.rglob("*"):
if path.is_file():
mapping[path.relative_to(root).as_posix()] = path
else:
root = src
base = src.name
for path in root.rglob("*"):
if path.is_file():
rel = Path(base) / path.relative_to(root)
mapping[rel.as_posix()] = path
else:
dest_path = Path(dest_arg)
if dest_arg.endswith(os.sep) or dest_path.is_dir():
mapping[src.name] = src
else:
mapping[dest_path.name] = src
return Path(dest_arg), mapping
def sync_local_tree(src_arg: str, dst_arg: str, delete: bool) -> dict[str, int]:
dst_root, mapping = _file_map(src_arg, dst_arg)
dst_root.mkdir(parents=True, exist_ok=True)
expected = {dst_root / rel for rel in mapping.keys()}
deleted = 0
if delete and dst_root.exists():
for path in sorted(dst_root.rglob("*"), key=lambda p: len(p.parts), reverse=True):
if path.is_dir():
continue
if path not in expected:
_remove_path(path)
deleted += 1
_prune_empty_dirs(dst_root)
linked = 0
copied = 0
reused = 0
for rel, src in mapping.items():
dst = dst_root / rel
result = _link_or_copy(src, dst)
if result == "linked":
linked += 1
elif result == "copied":
copied += 1
else:
reused += 1
return {
"files": len(mapping),
"linked": linked,
"copied": copied,
"reused": reused,
"deleted": deleted,
}
def main() -> int:
parser = argparse.ArgumentParser(description="Sync a local CIR mirror tree using hardlinks when possible.")
parser.add_argument("--delete", action="store_true", help="Delete target files not present in source")
parser.add_argument("source")
parser.add_argument("dest")
args = parser.parse_args()
summary = sync_local_tree(args.source, args.dest, args.delete)
print(
"local-link-sync files={files} linked={linked} copied={copied} reused={reused} deleted={deleted}".format(
**summary
)
)
return 0
if __name__ == "__main__":
raise SystemExit(main())

127
scripts/cir/cir-rsync-wrapper Executable file
View File

@ -0,0 +1,127 @@
#!/usr/bin/env python3
import os
import shutil
import sys
from pathlib import Path
from urllib.parse import urlparse
def real_rsync_bin() -> str:
env = os.environ.get("REAL_RSYNC_BIN")
if env:
return env
default = "/usr/bin/rsync"
if Path(default).exists():
return default
found = shutil.which("rsync")
if found:
return found
raise SystemExit("cir-rsync-wrapper: REAL_RSYNC_BIN is not set and rsync was not found")
def rewrite_arg(arg: str, mirror_root: str | None) -> str:
if not arg.startswith("rsync://"):
return arg
if not mirror_root:
raise SystemExit(
"cir-rsync-wrapper: CIR_MIRROR_ROOT is required when an rsync:// source is present"
)
parsed = urlparse(arg)
if parsed.scheme != "rsync" or not parsed.hostname:
raise SystemExit(f"cir-rsync-wrapper: invalid rsync URI: {arg}")
path = parsed.path.lstrip("/")
local = Path(mirror_root).resolve() / parsed.hostname
if path:
local = local / path
local_str = str(local)
if local.exists() and local.is_dir() and not local_str.endswith("/"):
local_str += "/"
elif arg.endswith("/") and not local_str.endswith("/"):
local_str += "/"
return local_str
def filter_args(args: list[str]) -> list[str]:
mirror_root = os.environ.get("CIR_MIRROR_ROOT")
rewritten_any = any(arg.startswith("rsync://") for arg in args)
out: list[str] = []
i = 0
while i < len(args):
arg = args[i]
if rewritten_any:
if arg == "--address":
i += 2
continue
if arg.startswith("--address="):
i += 1
continue
if arg == "--contimeout":
i += 2
continue
if arg.startswith("--contimeout="):
i += 1
continue
out.append(rewrite_arg(arg, mirror_root))
i += 1
return out
def local_link_mode_enabled() -> bool:
value = os.environ.get("CIR_LOCAL_LINK_MODE", "")
return value.lower() in {"1", "true", "yes", "on"}
def extract_source_and_dest(args: list[str]) -> tuple[str, str]:
expects_value = {
"--timeout",
"--min-size",
"--max-size",
"--include",
"--exclude",
"--compare-dest",
}
positionals: list[str] = []
i = 0
while i < len(args):
arg = args[i]
if arg in expects_value:
i += 2
continue
if any(arg.startswith(prefix + "=") for prefix in expects_value):
i += 1
continue
if arg.startswith("-"):
i += 1
continue
positionals.append(arg)
i += 1
if len(positionals) < 2:
raise SystemExit("cir-rsync-wrapper: expected source and destination arguments")
return positionals[-2], positionals[-1]
def maybe_exec_local_link_sync(args: list[str], rewritten_any: bool) -> None:
if not rewritten_any or not local_link_mode_enabled():
return
source, dest = extract_source_and_dest(args)
if source.startswith("rsync://"):
raise SystemExit("cir-rsync-wrapper: expected rewritten local source for CIR_LOCAL_LINK_MODE")
helper = Path(__file__).with_name("cir-local-link-sync.py")
cmd = [sys.executable, str(helper)]
if "--delete" in args:
cmd.append("--delete")
cmd.extend([source, dest])
os.execv(sys.executable, cmd)
def main() -> int:
args = sys.argv[1:]
rewritten_any = any(arg.startswith("rsync://") for arg in args)
rewritten = filter_args(args)
maybe_exec_local_link_sync(rewritten, rewritten_any)
os.execv(real_rsync_bin(), [real_rsync_bin(), *rewritten])
return 127
if __name__ == "__main__":
raise SystemExit(main())

View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/fetch_cir_sequence_from_remote.sh \
--ssh-target <user@host> \
--remote-path <path> \
--local-path <path>
EOF
}
SSH_TARGET=""
REMOTE_PATH=""
LOCAL_PATH=""
while [[ $# -gt 0 ]]; do
case "$1" in
--ssh-target) SSH_TARGET="$2"; shift 2 ;;
--remote-path) REMOTE_PATH="$2"; shift 2 ;;
--local-path) LOCAL_PATH="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$SSH_TARGET" && -n "$REMOTE_PATH" && -n "$LOCAL_PATH" ]] || { usage >&2; exit 2; }
mkdir -p "$(dirname "$LOCAL_PATH")"
rsync -a "$SSH_TARGET:$REMOTE_PATH/" "$LOCAL_PATH/"
echo "done: $LOCAL_PATH"

50
scripts/cir/json_to_vaps_csv.py Executable file
View File

@ -0,0 +1,50 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import csv
import json
from pathlib import Path
def normalize_asn(value: str | int) -> str:
text = str(value).strip().upper()
if text.startswith("AS"):
text = text[2:]
return f"AS{int(text)}"
def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True, type=Path)
parser.add_argument("--csv-out", required=True, type=Path)
args = parser.parse_args()
obj = json.loads(args.input.read_text(encoding="utf-8"))
rows: list[tuple[str, str, str]] = []
for aspa in obj.get("aspas", []):
providers = sorted(
{normalize_asn(item) for item in aspa.get("providers", [])},
key=lambda s: int(s[2:]),
)
rows.append(
(
normalize_asn(aspa["customer"]),
";".join(providers),
str(aspa.get("ta", "")).strip().lower(),
)
)
rows.sort(key=lambda row: (int(row[0][2:]), row[1], row[2]))
args.csv_out.parent.mkdir(parents=True, exist_ok=True)
with args.csv_out.open("w", encoding="utf-8", newline="") as fh:
writer = csv.writer(fh)
writer.writerow(["Customer ASN", "Providers", "Trust Anchor"])
writer.writerows(rows)
print(args.csv_out)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@ -0,0 +1,84 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/run_cir_drop_sequence.sh \
--sequence-root <path> \
[--drop-bin <path>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
SEQUENCE_ROOT=""
DROP_BIN="${DROP_BIN:-$ROOT_DIR/target/release/cir_drop_report}"
while [[ $# -gt 0 ]]; do
case "$1" in
--sequence-root) SEQUENCE_ROOT="$2"; shift 2 ;;
--drop-bin) DROP_BIN="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$SEQUENCE_ROOT" ]] || { usage >&2; exit 2; }
python3 - <<'PY' "$SEQUENCE_ROOT" "$DROP_BIN"
import json
import subprocess
import sys
from pathlib import Path
sequence_root = Path(sys.argv[1]).resolve()
drop_bin = sys.argv[2]
sequence = json.loads((sequence_root / "sequence.json").read_text(encoding="utf-8"))
static_root = sequence_root / sequence["staticRoot"] if "staticRoot" in sequence else None
raw_store_db = sequence_root / sequence["rawStoreDbPath"] if "rawStoreDbPath" in sequence else None
backend_count = sum(x is not None for x in (static_root, raw_store_db))
if backend_count != 1:
raise SystemExit("sequence must set exactly one of staticRoot or rawStoreDbPath")
summaries = []
for step in sequence["steps"]:
step_id = step["stepId"]
out_dir = sequence_root / "drop" / step_id
out_dir.mkdir(parents=True, exist_ok=True)
cmd = [
drop_bin,
"--cir",
str(sequence_root / step["cirPath"]),
"--ccr",
str(sequence_root / step["ccrPath"]),
"--report-json",
str(sequence_root / step["reportPath"]),
"--json-out",
str(out_dir / "drop.json"),
"--md-out",
str(out_dir / "drop.md"),
]
if static_root is not None:
cmd.extend(["--static-root", str(static_root)])
else:
cmd.extend(["--raw-store-db", str(raw_store_db)])
proc = subprocess.run(cmd, capture_output=True, text=True)
if proc.returncode != 0:
raise SystemExit(
f"drop report failed for {step_id}: stdout={proc.stdout} stderr={proc.stderr}"
)
result = json.loads((out_dir / "drop.json").read_text(encoding="utf-8"))
summaries.append(
{
"stepId": step_id,
"droppedVrpCount": result["summary"]["droppedVrpCount"],
"droppedObjectCount": result["summary"]["droppedObjectCount"],
"reportPath": str(out_dir / "drop.json"),
}
)
summary = {"version": 1, "steps": summaries}
(sequence_root / "drop-summary.json").write_text(json.dumps(summary, indent=2), encoding="utf-8")
PY
echo "done: $SEQUENCE_ROOT"

View File

@ -0,0 +1,173 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/run_cir_record_full_delta.sh \
--out-dir <path> \
--tal-path <path> \
--ta-path <path> \
--cir-tal-uri <url> \
--payload-replay-archive <path> \
--payload-replay-locks <path> \
--payload-base-archive <path> \
--payload-base-locks <path> \
--payload-delta-archive <path> \
--payload-delta-locks <path> \
[--base-validation-time <rfc3339>] \
[--delta-validation-time <rfc3339>] \
[--max-depth <n>] \
[--max-instances <n>] \
[--rpki-bin <path>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
OUT_DIR=""
TAL_PATH=""
TA_PATH=""
CIR_TAL_URI=""
PAYLOAD_REPLAY_ARCHIVE=""
PAYLOAD_REPLAY_LOCKS=""
PAYLOAD_BASE_ARCHIVE=""
PAYLOAD_BASE_LOCKS=""
PAYLOAD_DELTA_ARCHIVE=""
PAYLOAD_DELTA_LOCKS=""
BASE_VALIDATION_TIME=""
DELTA_VALIDATION_TIME=""
MAX_DEPTH=0
MAX_INSTANCES=1
RPKI_BIN="${RPKI_BIN:-$ROOT_DIR/target/release/rpki}"
while [[ $# -gt 0 ]]; do
case "$1" in
--out-dir) OUT_DIR="$2"; shift 2 ;;
--tal-path) TAL_PATH="$2"; shift 2 ;;
--ta-path) TA_PATH="$2"; shift 2 ;;
--cir-tal-uri) CIR_TAL_URI="$2"; shift 2 ;;
--payload-replay-archive) PAYLOAD_REPLAY_ARCHIVE="$2"; shift 2 ;;
--payload-replay-locks) PAYLOAD_REPLAY_LOCKS="$2"; shift 2 ;;
--payload-base-archive) PAYLOAD_BASE_ARCHIVE="$2"; shift 2 ;;
--payload-base-locks) PAYLOAD_BASE_LOCKS="$2"; shift 2 ;;
--payload-delta-archive) PAYLOAD_DELTA_ARCHIVE="$2"; shift 2 ;;
--payload-delta-locks) PAYLOAD_DELTA_LOCKS="$2"; shift 2 ;;
--base-validation-time) BASE_VALIDATION_TIME="$2"; shift 2 ;;
--delta-validation-time) DELTA_VALIDATION_TIME="$2"; shift 2 ;;
--max-depth) MAX_DEPTH="$2"; shift 2 ;;
--max-instances) MAX_INSTANCES="$2"; shift 2 ;;
--rpki-bin) RPKI_BIN="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$OUT_DIR" && -n "$TAL_PATH" && -n "$TA_PATH" && -n "$CIR_TAL_URI" && -n "$PAYLOAD_REPLAY_ARCHIVE" && -n "$PAYLOAD_REPLAY_LOCKS" && -n "$PAYLOAD_BASE_ARCHIVE" && -n "$PAYLOAD_BASE_LOCKS" && -n "$PAYLOAD_DELTA_ARCHIVE" && -n "$PAYLOAD_DELTA_LOCKS" ]] || {
usage >&2
exit 2
}
if [[ ! -x "$RPKI_BIN" ]]; then
(
cd "$ROOT_DIR"
cargo build --release --bin rpki
)
fi
resolve_validation_time() {
local path="$1"
python3 - <<'PY' "$path"
import json, sys
print(json.load(open(sys.argv[1], 'r', encoding='utf-8'))['validationTime'])
PY
}
if [[ -z "$BASE_VALIDATION_TIME" ]]; then
BASE_VALIDATION_TIME="$(resolve_validation_time "$PAYLOAD_REPLAY_LOCKS")"
fi
if [[ -z "$DELTA_VALIDATION_TIME" ]]; then
DELTA_VALIDATION_TIME="$(resolve_validation_time "$PAYLOAD_DELTA_LOCKS")"
fi
rm -rf "$OUT_DIR"
mkdir -p "$OUT_DIR/full" "$OUT_DIR/delta-001" "$OUT_DIR/static"
FULL_DB="$OUT_DIR/full/db"
DELTA_DB="$OUT_DIR/delta-001/db"
"$RPKI_BIN" \
--db "$FULL_DB" \
--tal-path "$TAL_PATH" \
--ta-path "$TA_PATH" \
--payload-replay-archive "$PAYLOAD_REPLAY_ARCHIVE" \
--payload-replay-locks "$PAYLOAD_REPLAY_LOCKS" \
--validation-time "$BASE_VALIDATION_TIME" \
--max-depth "$MAX_DEPTH" \
--max-instances "$MAX_INSTANCES" \
--ccr-out "$OUT_DIR/full/result.ccr" \
--report-json "$OUT_DIR/full/report.json" \
--cir-enable \
--cir-out "$OUT_DIR/full/input.cir" \
--cir-static-root "$OUT_DIR/static" \
--cir-tal-uri "$CIR_TAL_URI" \
>"$OUT_DIR/full/run.stdout.log" 2>"$OUT_DIR/full/run.stderr.log"
"$RPKI_BIN" \
--db "$DELTA_DB" \
--tal-path "$TAL_PATH" \
--ta-path "$TA_PATH" \
--payload-base-archive "$PAYLOAD_BASE_ARCHIVE" \
--payload-base-locks "$PAYLOAD_BASE_LOCKS" \
--payload-delta-archive "$PAYLOAD_DELTA_ARCHIVE" \
--payload-delta-locks "$PAYLOAD_DELTA_LOCKS" \
--payload-base-validation-time "$BASE_VALIDATION_TIME" \
--validation-time "$DELTA_VALIDATION_TIME" \
--max-depth "$MAX_DEPTH" \
--max-instances "$MAX_INSTANCES" \
--ccr-out "$OUT_DIR/delta-001/result.ccr" \
--report-json "$OUT_DIR/delta-001/report.json" \
--cir-enable \
--cir-out "$OUT_DIR/delta-001/input.cir" \
--cir-static-root "$OUT_DIR/static" \
--cir-tal-uri "$CIR_TAL_URI" \
>"$OUT_DIR/delta-001/run.stdout.log" 2>"$OUT_DIR/delta-001/run.stderr.log"
python3 - <<'PY' "$OUT_DIR" "$BASE_VALIDATION_TIME" "$DELTA_VALIDATION_TIME"
import json
import os
import sys
from pathlib import Path
out = Path(sys.argv[1])
base_validation_time = sys.argv[2]
delta_validation_time = sys.argv[3]
static_files = sum(1 for _ in (out / "static").rglob("*") if _.is_file())
summary = {
"version": 1,
"kind": "cir_pair",
"baseValidationTime": base_validation_time,
"deltaValidationTime": delta_validation_time,
"staticRoot": "static",
"steps": [
{
"kind": "full",
"cirPath": "full/input.cir",
"ccrPath": "full/result.ccr",
"reportPath": "full/report.json",
},
{
"kind": "delta",
"cirPath": "delta-001/input.cir",
"ccrPath": "delta-001/result.ccr",
"reportPath": "delta-001/report.json",
"previous": "full",
},
],
"staticFileCount": static_files,
}
(out / "summary.json").write_text(json.dumps(summary, indent=2), encoding="utf-8")
PY
echo "done: $OUT_DIR"

View File

@ -0,0 +1,129 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/run_cir_record_sequence_multi_rir_offline.sh \
[--bundle-root <path>] \
[--rir <afrinic,apnic,arin,lacnic,ripe>] \
[--delta-count <n>] \
[--full-repo] \
[--out-root <path>] \
[--rpki-bin <path>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
CASE_INFO="$ROOT_DIR/scripts/payload_replay/multi_rir_case_info.py"
SINGLE_SCRIPT="$ROOT_DIR/scripts/cir/run_cir_record_sequence_offline.sh"
BUNDLE_ROOT="/home/yuyr/dev/rust_playground/routinator/bench/multi_rir_demo/runs/20260316-112341-multi-final3"
RIRS="afrinic,apnic,arin,lacnic,ripe"
DELTA_COUNT=2
FULL_REPO=0
OUT_ROOT="$ROOT_DIR/target/replay/cir_sequence_multi_rir_offline_$(date -u +%Y%m%dT%H%M%SZ)"
RPKI_BIN="${RPKI_BIN:-$ROOT_DIR/target/release/rpki}"
while [[ $# -gt 0 ]]; do
case "$1" in
--bundle-root) BUNDLE_ROOT="$2"; shift 2 ;;
--rir) RIRS="$2"; shift 2 ;;
--delta-count) DELTA_COUNT="$2"; shift 2 ;;
--full-repo) FULL_REPO=1; shift 1 ;;
--out-root) OUT_ROOT="$2"; shift 2 ;;
--rpki-bin) RPKI_BIN="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
mkdir -p "$OUT_ROOT"
SUMMARY_JSON="$OUT_ROOT/summary.json"
SUMMARY_MD="$OUT_ROOT/summary.md"
IFS=',' read -r -a RIR_ITEMS <<< "$RIRS"
for rir in "${RIR_ITEMS[@]}"; do
CASE_JSON="$(python3 "$CASE_INFO" --bundle-root "$BUNDLE_ROOT" --repo-root "$ROOT_DIR" --rir "$rir")"
TAL_PATH="$(python3 - <<'PY' "$CASE_JSON"
import json,sys
print(json.loads(sys.argv[1])['tal_path'])
PY
)"
TA_PATH="$(python3 - <<'PY' "$CASE_JSON"
import json,sys
print(json.loads(sys.argv[1])['ta_path'])
PY
)"
BASE_ARCHIVE="$(python3 - <<'PY' "$CASE_JSON"
import json,sys
print(json.loads(sys.argv[1])['base_archive'])
PY
)"
BASE_LOCKS="$(python3 - <<'PY' "$CASE_JSON"
import json,sys
print(json.loads(sys.argv[1])['base_locks'])
PY
)"
DELTA_ARCHIVE="$(python3 - <<'PY' "$CASE_JSON"
import json,sys
print(json.loads(sys.argv[1])['delta_archive'])
PY
)"
DELTA_LOCKS="$(python3 - <<'PY' "$CASE_JSON"
import json,sys
print(json.loads(sys.argv[1])['delta_locks'])
PY
)"
OUT_DIR="$OUT_ROOT/$rir"
args=(
"$SINGLE_SCRIPT"
--out-dir "$OUT_DIR" \
--tal-path "$TAL_PATH" \
--ta-path "$TA_PATH" \
--cir-tal-uri "https://example.test/$rir.tal" \
--payload-replay-archive "$BASE_ARCHIVE" \
--payload-replay-locks "$BASE_LOCKS" \
--payload-base-archive "$BASE_ARCHIVE" \
--payload-base-locks "$BASE_LOCKS" \
--payload-delta-archive "$DELTA_ARCHIVE" \
--payload-delta-locks "$DELTA_LOCKS" \
--delta-count "$DELTA_COUNT" \
--rpki-bin "$RPKI_BIN"
)
if [[ "$FULL_REPO" -ne 1 ]]; then
args+=(--max-depth 0 --max-instances 1)
else
args+=(--full-repo)
fi
"${args[@]}"
done
python3 - <<'PY' "$OUT_ROOT" "$RIRS" "$SUMMARY_JSON" "$SUMMARY_MD"
import json, sys
from pathlib import Path
out_root = Path(sys.argv[1])
rirs = [item for item in sys.argv[2].split(',') if item]
summary_json = Path(sys.argv[3])
summary_md = Path(sys.argv[4])
items = []
for rir in rirs:
root = out_root / rir
seq = json.loads((root / "sequence.json").read_text(encoding="utf-8"))
summ = json.loads((root / "summary.json").read_text(encoding="utf-8"))
items.append({
"rir": rir,
"root": str(root),
"stepCount": len(seq["steps"]),
"staticFileCount": summ["staticFileCount"],
})
summary = {"version": 1, "rirs": items}
summary_json.write_text(json.dumps(summary, indent=2), encoding="utf-8")
lines = ["# Multi-RIR Offline CIR Sequence Summary", ""]
for item in items:
lines.append(f"- `{item['rir']}`: `stepCount={item['stepCount']}` `staticFileCount={item['staticFileCount']}` `root={item['root']}`")
summary_md.write_text("\n".join(lines) + "\n", encoding="utf-8")
PY
echo "done: $OUT_ROOT"

View File

@ -0,0 +1,206 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/run_cir_record_sequence_offline.sh \
--out-dir <path> \
--tal-path <path> \
--ta-path <path> \
--cir-tal-uri <url> \
--payload-replay-archive <path> \
--payload-replay-locks <path> \
--payload-base-archive <path> \
--payload-base-locks <path> \
--payload-delta-archive <path> \
--payload-delta-locks <path> \
[--delta-count <n>] \
[--base-validation-time <rfc3339>] \
[--delta-validation-time <rfc3339>] \
[--full-repo] \
[--max-depth <n>] \
[--max-instances <n>] \
[--rpki-bin <path>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
OUT_DIR=""
TAL_PATH=""
TA_PATH=""
CIR_TAL_URI=""
PAYLOAD_REPLAY_ARCHIVE=""
PAYLOAD_REPLAY_LOCKS=""
PAYLOAD_BASE_ARCHIVE=""
PAYLOAD_BASE_LOCKS=""
PAYLOAD_DELTA_ARCHIVE=""
PAYLOAD_DELTA_LOCKS=""
BASE_VALIDATION_TIME=""
DELTA_VALIDATION_TIME=""
DELTA_COUNT=2
FULL_REPO=0
MAX_DEPTH=0
MAX_INSTANCES=1
RPKI_BIN="${RPKI_BIN:-$ROOT_DIR/target/release/rpki}"
while [[ $# -gt 0 ]]; do
case "$1" in
--out-dir) OUT_DIR="$2"; shift 2 ;;
--tal-path) TAL_PATH="$2"; shift 2 ;;
--ta-path) TA_PATH="$2"; shift 2 ;;
--cir-tal-uri) CIR_TAL_URI="$2"; shift 2 ;;
--payload-replay-archive) PAYLOAD_REPLAY_ARCHIVE="$2"; shift 2 ;;
--payload-replay-locks) PAYLOAD_REPLAY_LOCKS="$2"; shift 2 ;;
--payload-base-archive) PAYLOAD_BASE_ARCHIVE="$2"; shift 2 ;;
--payload-base-locks) PAYLOAD_BASE_LOCKS="$2"; shift 2 ;;
--payload-delta-archive) PAYLOAD_DELTA_ARCHIVE="$2"; shift 2 ;;
--payload-delta-locks) PAYLOAD_DELTA_LOCKS="$2"; shift 2 ;;
--base-validation-time) BASE_VALIDATION_TIME="$2"; shift 2 ;;
--delta-validation-time) DELTA_VALIDATION_TIME="$2"; shift 2 ;;
--delta-count) DELTA_COUNT="$2"; shift 2 ;;
--full-repo) FULL_REPO=1; shift 1 ;;
--max-depth) MAX_DEPTH="$2"; shift 2 ;;
--max-instances) MAX_INSTANCES="$2"; shift 2 ;;
--rpki-bin) RPKI_BIN="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$OUT_DIR" && -n "$TAL_PATH" && -n "$TA_PATH" && -n "$CIR_TAL_URI" && -n "$PAYLOAD_REPLAY_ARCHIVE" && -n "$PAYLOAD_REPLAY_LOCKS" && -n "$PAYLOAD_BASE_ARCHIVE" && -n "$PAYLOAD_BASE_LOCKS" && -n "$PAYLOAD_DELTA_ARCHIVE" && -n "$PAYLOAD_DELTA_LOCKS" ]] || {
usage >&2
exit 2
}
if [[ ! -x "$RPKI_BIN" ]]; then
(
cd "$ROOT_DIR"
cargo build --release --bin rpki
)
fi
resolve_validation_time() {
local path="$1"
python3 - <<'PY' "$path"
import json, sys
print(json.load(open(sys.argv[1], 'r', encoding='utf-8'))['validationTime'])
PY
}
if [[ -z "$BASE_VALIDATION_TIME" ]]; then
BASE_VALIDATION_TIME="$(resolve_validation_time "$PAYLOAD_REPLAY_LOCKS")"
fi
if [[ -z "$DELTA_VALIDATION_TIME" ]]; then
DELTA_VALIDATION_TIME="$(resolve_validation_time "$PAYLOAD_DELTA_LOCKS")"
fi
rm -rf "$OUT_DIR"
mkdir -p "$OUT_DIR/static" "$OUT_DIR/full"
run_step() {
local kind="$1"
local step_dir="$2"
local db_dir="$3"
shift 3
mkdir -p "$step_dir"
local -a cmd=(
"$RPKI_BIN"
--db "$db_dir" \
--tal-path "$TAL_PATH" \
--ta-path "$TA_PATH" \
--ccr-out "$step_dir/result.ccr" \
--report-json "$step_dir/report.json" \
--cir-enable \
--cir-out "$step_dir/input.cir" \
--cir-static-root "$OUT_DIR/static" \
--cir-tal-uri "$CIR_TAL_URI"
)
if [[ "$FULL_REPO" -ne 1 ]]; then
cmd+=(--max-depth "$MAX_DEPTH" --max-instances "$MAX_INSTANCES")
fi
cmd+=("$@")
"${cmd[@]}" >"$step_dir/run.stdout.log" 2>"$step_dir/run.stderr.log"
}
run_step \
full \
"$OUT_DIR/full" \
"$OUT_DIR/full/db" \
--payload-replay-archive "$PAYLOAD_REPLAY_ARCHIVE" \
--payload-replay-locks "$PAYLOAD_REPLAY_LOCKS" \
--validation-time "$BASE_VALIDATION_TIME"
for idx in $(seq 1 "$DELTA_COUNT"); do
step_id="$(printf 'delta-%03d' "$idx")"
run_step \
delta \
"$OUT_DIR/$step_id" \
"$OUT_DIR/$step_id/db" \
--payload-base-archive "$PAYLOAD_BASE_ARCHIVE" \
--payload-base-locks "$PAYLOAD_BASE_LOCKS" \
--payload-delta-archive "$PAYLOAD_DELTA_ARCHIVE" \
--payload-delta-locks "$PAYLOAD_DELTA_LOCKS" \
--payload-base-validation-time "$BASE_VALIDATION_TIME" \
--validation-time "$DELTA_VALIDATION_TIME"
done
python3 - <<'PY' "$OUT_DIR" "$BASE_VALIDATION_TIME" "$DELTA_VALIDATION_TIME" "$DELTA_COUNT"
import json
import sys
from pathlib import Path
out = Path(sys.argv[1])
base_validation_time = sys.argv[2]
delta_validation_time = sys.argv[3]
delta_count = int(sys.argv[4])
steps = [
{
"stepId": "full",
"kind": "full",
"validationTime": base_validation_time,
"cirPath": "full/input.cir",
"ccrPath": "full/result.ccr",
"reportPath": "full/report.json",
"previousStepId": None,
}
]
previous = "full"
for idx in range(1, delta_count + 1):
step_id = f"delta-{idx:03d}"
steps.append(
{
"stepId": step_id,
"kind": "delta",
"validationTime": delta_validation_time,
"cirPath": f"{step_id}/input.cir",
"ccrPath": f"{step_id}/result.ccr",
"reportPath": f"{step_id}/report.json",
"previousStepId": previous,
}
)
previous = step_id
summary = {
"version": 1,
"kind": "cir_sequence_offline",
"staticRoot": "static",
"steps": steps,
}
(out / "sequence.json").write_text(json.dumps(summary, indent=2), encoding="utf-8")
(out / "summary.json").write_text(
json.dumps(
{
"version": 1,
"stepCount": len(steps),
"staticFileCount": sum(1 for p in (out / "static").rglob("*") if p.is_file()),
},
indent=2,
),
encoding="utf-8",
)
PY
echo "done: $OUT_DIR"

View File

@ -0,0 +1,244 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/run_cir_record_sequence_remote.sh \
--rir <name> \
--remote-root <path> \
[--ssh-target <user@host>] \
[--out-subdir <path>] \
[--delta-count <n>] \
[--sleep-secs <n>] \
[--full-repo] \
[--max-depth <n>] \
[--max-instances <n>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
SSH_TARGET="${SSH_TARGET:-root@47.77.183.68}"
RIR=""
REMOTE_ROOT=""
OUT_SUBDIR=""
DELTA_COUNT=2
SLEEP_SECS=30
FULL_REPO=0
MAX_DEPTH=0
MAX_INSTANCES=1
while [[ $# -gt 0 ]]; do
case "$1" in
--rir) RIR="$2"; shift 2 ;;
--remote-root) REMOTE_ROOT="$2"; shift 2 ;;
--ssh-target) SSH_TARGET="$2"; shift 2 ;;
--out-subdir) OUT_SUBDIR="$2"; shift 2 ;;
--delta-count) DELTA_COUNT="$2"; shift 2 ;;
--sleep-secs) SLEEP_SECS="$2"; shift 2 ;;
--full-repo) FULL_REPO=1; shift 1 ;;
--max-depth) MAX_DEPTH="$2"; shift 2 ;;
--max-instances) MAX_INSTANCES="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$RIR" && -n "$REMOTE_ROOT" ]] || { usage >&2; exit 2; }
case "$RIR" in
afrinic) TAL_REL="tests/fixtures/tal/afrinic.tal"; TA_REL="tests/fixtures/ta/afrinic-ta.cer" ;;
apnic) TAL_REL="tests/fixtures/tal/apnic-rfc7730-https.tal"; TA_REL="tests/fixtures/ta/apnic-ta.cer" ;;
arin) TAL_REL="tests/fixtures/tal/arin.tal"; TA_REL="tests/fixtures/ta/arin-ta.cer" ;;
lacnic) TAL_REL="tests/fixtures/tal/lacnic.tal"; TA_REL="tests/fixtures/ta/lacnic-ta.cer" ;;
ripe) TAL_REL="tests/fixtures/tal/ripe-ncc.tal"; TA_REL="tests/fixtures/ta/ripe-ncc-ta.cer" ;;
*) echo "unsupported rir: $RIR" >&2; exit 2 ;;
esac
rsync -a --delete \
--exclude target \
--exclude .git \
"$ROOT_DIR/" "$SSH_TARGET:$REMOTE_ROOT/"
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_ROOT/target/release'"
rsync -a "$ROOT_DIR/target/release/rpki" "$SSH_TARGET:$REMOTE_ROOT/target/release/"
ssh "$SSH_TARGET" \
RIR="$RIR" \
REMOTE_ROOT="$REMOTE_ROOT" \
OUT_SUBDIR="$OUT_SUBDIR" \
DELTA_COUNT="$DELTA_COUNT" \
SLEEP_SECS="$SLEEP_SECS" \
FULL_REPO="$FULL_REPO" \
MAX_DEPTH="$MAX_DEPTH" \
MAX_INSTANCES="$MAX_INSTANCES" \
TAL_REL="$TAL_REL" \
TA_REL="$TA_REL" \
'bash -s' <<'EOS'
set -euo pipefail
cd "$REMOTE_ROOT"
if [[ -n "${OUT_SUBDIR}" ]]; then
OUT="${OUT_SUBDIR}"
else
OUT="target/replay/cir_sequence_remote_${RIR}_$(date -u +%Y%m%dT%H%M%SZ)"
fi
mkdir -p "$OUT"
DB="$OUT/work-db"
RAW_STORE_DB="$OUT/raw-store.db"
ROWS="$OUT/.sequence_rows.tsv"
: > "$ROWS"
write_step_timing() {
local path="$1"
local start_ms="$2"
local end_ms="$3"
local started_at="$4"
local finished_at="$5"
python3 - <<'PY' "$path" "$start_ms" "$end_ms" "$started_at" "$finished_at"
import json, sys
path, start_ms, end_ms, started_at, finished_at = sys.argv[1:]
start_ms = int(start_ms)
end_ms = int(end_ms)
with open(path, "w", encoding="utf-8") as fh:
json.dump(
{
"durationMs": end_ms - start_ms,
"startedAt": started_at,
"finishedAt": finished_at,
},
fh,
indent=2,
)
PY
}
run_step() {
local step_id="$1"
local kind="$2"
local previous_step_id="$3"
shift 3
local started_at_iso started_at_ms finished_at_iso finished_at_ms prefix
started_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
started_at_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
prefix="${started_at_iso}-test"
local cir_out="$OUT/${prefix}.cir"
local ccr_out="$OUT/${prefix}.ccr"
local report_out="$OUT/${prefix}.report.json"
local timing_out="$OUT/${prefix}.timing.json"
local stdout_out="$OUT/${prefix}.stdout.log"
local stderr_out="$OUT/${prefix}.stderr.log"
local -a cmd=(
target/release/rpki
--db "$DB"
--raw-store-db "$RAW_STORE_DB"
--tal-path "$TAL_REL"
--ta-path "$TA_REL"
--ccr-out "$ccr_out"
--report-json "$report_out"
--cir-enable
--cir-out "$cir_out"
--cir-tal-uri "https://example.test/${RIR}.tal"
)
if [[ "$FULL_REPO" -ne 1 ]]; then
cmd+=(--max-depth "$MAX_DEPTH" --max-instances "$MAX_INSTANCES")
fi
cmd+=("$@")
env RPKI_PROGRESS_LOG=1 "${cmd[@]}" >"$stdout_out" 2>"$stderr_out"
finished_at_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
finished_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
write_step_timing "$timing_out" "$started_at_ms" "$finished_at_ms" "$started_at_iso" "$finished_at_iso"
local validation_time
validation_time="$(python3 - <<'PY' "$report_out"
import json, sys
print(json.load(open(sys.argv[1], 'r', encoding='utf-8'))['meta']['validation_time_rfc3339_utc'])
PY
)"
printf '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' \
"$step_id" \
"$kind" \
"$validation_time" \
"$(basename "$cir_out")" \
"$(basename "$ccr_out")" \
"$(basename "$report_out")" \
"$(basename "$timing_out")" \
"$(basename "$stdout_out")" \
"$(basename "$stderr_out")" >> "$ROWS"
}
run_step "full" "full" ""
prev="full"
for idx in $(seq 1 "$DELTA_COUNT"); do
sleep "$SLEEP_SECS"
step="$(printf 'delta-%03d' "$idx")"
run_step "$step" "delta" "$prev"
prev="$step"
done
python3 - <<'PY' "$OUT" "$ROWS" "$RIR"
import json, sys
from pathlib import Path
out = Path(sys.argv[1])
rows = Path(sys.argv[2]).read_text(encoding='utf-8').splitlines()
rir = sys.argv[3]
steps = []
for idx, row in enumerate(rows):
step_id, kind, validation_time, cir_name, ccr_name, report_name, timing_name, stdout_name, stderr_name = row.split('\t')
steps.append({
"stepId": step_id,
"kind": kind,
"validationTime": validation_time,
"cirPath": cir_name,
"ccrPath": ccr_name,
"reportPath": report_name,
"timingPath": timing_name,
"stdoutLogPath": stdout_name,
"stderrLogPath": stderr_name,
"artifactPrefix": cir_name[:-4], # strip .cir
"previousStepId": None if idx == 0 else steps[idx - 1]["stepId"],
})
(out / "sequence.json").write_text(
json.dumps({"version": 1, "rawStoreDbPath": "raw-store.db", "steps": steps}, indent=2),
encoding="utf-8",
)
summary = {
"version": 1,
"rir": rir,
"stepCount": len(steps),
"steps": [],
}
for step in steps:
timing = json.loads((out / step["timingPath"]).read_text(encoding="utf-8"))
summary["steps"].append({
"stepId": step["stepId"],
"kind": step["kind"],
"validationTime": step["validationTime"],
"artifactPrefix": step["artifactPrefix"],
**timing,
})
(out / "summary.json").write_text(json.dumps(summary, indent=2), encoding="utf-8")
PY
rm -f "$ROWS"
echo "$OUT"
EOS

View File

@ -0,0 +1,72 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/run_cir_record_sequence_remote_multi_rir.sh \
--remote-root <path> \
[--rir <afrinic,apnic,arin,lacnic,ripe>] \
[--ssh-target <user@host>] \
[--out-subdir-root <path>] \
[--delta-count <n>] \
[--sleep-secs <n>] \
[--full-repo] \
[--max-depth <n>] \
[--max-instances <n>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
SSH_TARGET="${SSH_TARGET:-root@47.77.183.68}"
REMOTE_ROOT=""
RIRS="afrinic,apnic,arin,lacnic,ripe"
OUT_SUBDIR_ROOT=""
DELTA_COUNT=2
SLEEP_SECS=30
FULL_REPO=0
MAX_DEPTH=0
MAX_INSTANCES=1
SINGLE="$ROOT_DIR/scripts/cir/run_cir_record_sequence_remote.sh"
while [[ $# -gt 0 ]]; do
case "$1" in
--remote-root) REMOTE_ROOT="$2"; shift 2 ;;
--rir) RIRS="$2"; shift 2 ;;
--ssh-target) SSH_TARGET="$2"; shift 2 ;;
--out-subdir-root) OUT_SUBDIR_ROOT="$2"; shift 2 ;;
--delta-count) DELTA_COUNT="$2"; shift 2 ;;
--sleep-secs) SLEEP_SECS="$2"; shift 2 ;;
--full-repo) FULL_REPO=1; shift 1 ;;
--max-depth) MAX_DEPTH="$2"; shift 2 ;;
--max-instances) MAX_INSTANCES="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$REMOTE_ROOT" ]] || { usage >&2; exit 2; }
if [[ -z "$OUT_SUBDIR_ROOT" ]]; then
OUT_SUBDIR_ROOT="target/replay/cir_sequence_remote_multi_rir_$(date -u +%Y%m%dT%H%M%SZ)"
fi
IFS=',' read -r -a ITEMS <<< "$RIRS"
for rir in "${ITEMS[@]}"; do
args=(
"$SINGLE"
--rir "$rir" \
--remote-root "$REMOTE_ROOT" \
--ssh-target "$SSH_TARGET" \
--out-subdir "$OUT_SUBDIR_ROOT/$rir" \
--delta-count "$DELTA_COUNT" \
--sleep-secs "$SLEEP_SECS" \
)
if [[ "$FULL_REPO" -eq 1 ]]; then
args+=(--full-repo)
else
args+=(--max-depth "$MAX_DEPTH" --max-instances "$MAX_INSTANCES")
fi
"${args[@]}"
done
echo "$OUT_SUBDIR_ROOT"

View File

@ -0,0 +1,118 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/run_cir_record_sequence_ta_only_multi_rir.sh \
[--rir <afrinic,apnic,arin,lacnic,ripe>] \
[--delta-count <n>] \
[--out-root <path>] \
[--rpki-bin <path>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
HELPER_BIN="${HELPER_BIN:-$ROOT_DIR/target/release/cir_ta_only_fixture}"
MATERIALIZE_BIN="${MATERIALIZE_BIN:-$ROOT_DIR/target/release/cir_materialize}"
EXTRACT_BIN="${EXTRACT_BIN:-$ROOT_DIR/target/release/cir_extract_inputs}"
WRAPPER="$ROOT_DIR/scripts/cir/cir-rsync-wrapper"
RIRS="afrinic,apnic,arin,lacnic,ripe"
DELTA_COUNT=2
OUT_ROOT="$ROOT_DIR/target/replay/cir_sequence_multi_rir_ta_only_$(date -u +%Y%m%dT%H%M%SZ)"
RPKI_BIN="${RPKI_BIN:-$ROOT_DIR/target/release/rpki}"
while [[ $# -gt 0 ]]; do
case "$1" in
--rir) RIRS="$2"; shift 2 ;;
--delta-count) DELTA_COUNT="$2"; shift 2 ;;
--out-root) OUT_ROOT="$2"; shift 2 ;;
--rpki-bin) RPKI_BIN="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
if [[ ! -x "$HELPER_BIN" ]]; then
(
cd "$ROOT_DIR"
cargo build --release --bin cir_ta_only_fixture --bin rpki --bin cir_materialize --bin cir_extract_inputs
)
fi
case_paths() {
case "$1" in
afrinic) echo "tests/fixtures/tal/afrinic.tal tests/fixtures/ta/afrinic-ta.cer" ;;
apnic) echo "tests/fixtures/tal/apnic-rfc7730-https.tal tests/fixtures/ta/apnic-ta.cer" ;;
arin) echo "tests/fixtures/tal/arin.tal tests/fixtures/ta/arin-ta.cer" ;;
lacnic) echo "tests/fixtures/tal/lacnic.tal tests/fixtures/ta/lacnic-ta.cer" ;;
ripe) echo "tests/fixtures/tal/ripe-ncc.tal tests/fixtures/ta/ripe-ncc-ta.cer" ;;
*) return 1 ;;
esac
}
mkdir -p "$OUT_ROOT"
IFS=',' read -r -a ITEMS <<< "$RIRS"
for rir in "${ITEMS[@]}"; do
read -r tal_rel ta_rel < <(case_paths "$rir")
rir_root="$OUT_ROOT/$rir"
mkdir -p "$rir_root/full" "$rir_root/static"
"$HELPER_BIN" \
--tal-path "$ROOT_DIR/$tal_rel" \
--ta-path "$ROOT_DIR/$ta_rel" \
--tal-uri "https://example.test/$rir.tal" \
--validation-time "2026-04-09T00:00:00Z" \
--cir-out "$rir_root/full/input.cir" \
--static-root "$rir_root/static"
"$EXTRACT_BIN" --cir "$rir_root/full/input.cir" --tals-dir "$rir_root/.tmp/tals" --meta-json "$rir_root/.tmp/meta.json"
"$MATERIALIZE_BIN" --cir "$rir_root/full/input.cir" --static-root "$rir_root/static" --mirror-root "$rir_root/.tmp/mirror"
FIRST_TAL="$(python3 - <<'PY' "$rir_root/.tmp/meta.json"
import json,sys
print(json.load(open(sys.argv[1]))["talFiles"][0]["path"])
PY
)"
export CIR_MIRROR_ROOT="$rir_root/.tmp/mirror"
export REAL_RSYNC_BIN=/usr/bin/rsync
export CIR_LOCAL_LINK_MODE=1
"$RPKI_BIN" \
--db "$rir_root/full/db" \
--tal-path "$FIRST_TAL" \
--disable-rrdp \
--rsync-command "$WRAPPER" \
--validation-time "2026-04-09T00:00:00Z" \
--ccr-out "$rir_root/full/result.ccr" \
--report-json "$rir_root/full/report.json" >/dev/null 2>&1
for idx in $(seq 1 "$DELTA_COUNT"); do
step="$(printf 'delta-%03d' "$idx")"
mkdir -p "$rir_root/$step"
cp "$rir_root/full/input.cir" "$rir_root/$step/input.cir"
cp "$rir_root/full/result.ccr" "$rir_root/$step/result.ccr"
cp "$rir_root/full/report.json" "$rir_root/$step/report.json"
done
python3 - <<'PY' "$rir_root" "$DELTA_COUNT"
import json, sys
from pathlib import Path
root = Path(sys.argv[1]); delta_count = int(sys.argv[2])
steps = [{"stepId":"full","kind":"full","validationTime":"2026-04-09T00:00:00Z","cirPath":"full/input.cir","ccrPath":"full/result.ccr","reportPath":"full/report.json","previousStepId":None}]
prev = "full"
for i in range(1, delta_count + 1):
step = f"delta-{i:03d}"
steps.append({"stepId":step,"kind":"delta","validationTime":"2026-04-09T00:00:00Z","cirPath":f"{step}/input.cir","ccrPath":f"{step}/result.ccr","reportPath":f"{step}/report.json","previousStepId":prev})
prev = step
(root/"sequence.json").write_text(json.dumps({"version":1,"staticRoot":"static","steps":steps}, indent=2), encoding="utf-8")
(root/"summary.json").write_text(json.dumps({"version":1,"stepCount":len(steps)}, indent=2), encoding="utf-8")
PY
done
python3 - <<'PY' "$OUT_ROOT" "$RIRS"
import json, sys
from pathlib import Path
root = Path(sys.argv[1]); rirs = [x for x in sys.argv[2].split(',') if x]
items=[]
for rir in rirs:
seq=json.loads((root/rir/'sequence.json').read_text())
items.append({"rir":rir,"stepCount":len(seq['steps'])})
(root/'summary.json').write_text(json.dumps({"version":1,"rirs":items}, indent=2), encoding='utf-8')
PY
echo "done: $OUT_ROOT"

View File

@ -0,0 +1,49 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/run_cir_record_sequence_ta_only_remote_multi_rir.sh \
--remote-root <path> \
[--ssh-target <user@host>] \
[--rir <afrinic,apnic,arin,lacnic,ripe>] \
[--delta-count <n>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
SSH_TARGET="${SSH_TARGET:-root@47.77.183.68}"
REMOTE_ROOT=""
RIRS="afrinic,apnic,arin,lacnic,ripe"
DELTA_COUNT=2
while [[ $# -gt 0 ]]; do
case "$1" in
--remote-root) REMOTE_ROOT="$2"; shift 2 ;;
--ssh-target) SSH_TARGET="$2"; shift 2 ;;
--rir) RIRS="$2"; shift 2 ;;
--delta-count) DELTA_COUNT="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$REMOTE_ROOT" ]] || { usage >&2; exit 2; }
rsync -a --delete \
--exclude target \
--exclude .git \
"$ROOT_DIR/" "$SSH_TARGET:$REMOTE_ROOT/"
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_ROOT/target/release'"
for bin in rpki cir_ta_only_fixture cir_materialize cir_extract_inputs; do
rsync -a "$ROOT_DIR/target/release/$bin" "$SSH_TARGET:$REMOTE_ROOT/target/release/"
done
ssh "$SSH_TARGET" "bash -lc '
set -euo pipefail
cd $REMOTE_ROOT
OUT=target/replay/cir_sequence_remote_ta_only_\$(date -u +%Y%m%dT%H%M%SZ)
./scripts/cir/run_cir_record_sequence_ta_only_multi_rir.sh --rir $RIRS --delta-count $DELTA_COUNT --out-root \"\$OUT\"
echo \"\$OUT\"
'"

View File

@ -0,0 +1,286 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/run_cir_replay_matrix.sh \
--cir <path> \
--static-root <path> \
--out-dir <path> \
--reference-ccr <path> \
--rpki-client-build-dir <path> \
[--keep-db] \
[--rpki-bin <path>] \
[--routinator-root <path>] \
[--routinator-bin <path>] \
[--real-rsync-bin <path>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
CIR=""
STATIC_ROOT=""
OUT_DIR=""
REFERENCE_CCR=""
RPKI_CLIENT_BUILD_DIR=""
KEEP_DB=0
RPKI_BIN="${RPKI_BIN:-$ROOT_DIR/target/release/rpki}"
ROUTINATOR_ROOT="${ROUTINATOR_ROOT:-/home/yuyr/dev/rust_playground/routinator}"
ROUTINATOR_BIN="${ROUTINATOR_BIN:-$ROUTINATOR_ROOT/target/debug/routinator}"
REAL_RSYNC_BIN="${REAL_RSYNC_BIN:-/usr/bin/rsync}"
OURS_SCRIPT="$ROOT_DIR/scripts/cir/run_cir_replay_ours.sh"
ROUTINATOR_SCRIPT="$ROOT_DIR/scripts/cir/run_cir_replay_routinator.sh"
RPKI_CLIENT_SCRIPT="$ROOT_DIR/scripts/cir/run_cir_replay_rpki_client.sh"
while [[ $# -gt 0 ]]; do
case "$1" in
--cir) CIR="$2"; shift 2 ;;
--static-root) STATIC_ROOT="$2"; shift 2 ;;
--out-dir) OUT_DIR="$2"; shift 2 ;;
--reference-ccr) REFERENCE_CCR="$2"; shift 2 ;;
--rpki-client-build-dir) RPKI_CLIENT_BUILD_DIR="$2"; shift 2 ;;
--keep-db) KEEP_DB=1; shift ;;
--rpki-bin) RPKI_BIN="$2"; shift 2 ;;
--routinator-root) ROUTINATOR_ROOT="$2"; shift 2 ;;
--routinator-bin) ROUTINATOR_BIN="$2"; shift 2 ;;
--real-rsync-bin) REAL_RSYNC_BIN="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$CIR" && -n "$STATIC_ROOT" && -n "$OUT_DIR" && -n "$REFERENCE_CCR" && -n "$RPKI_CLIENT_BUILD_DIR" ]] || {
usage >&2
exit 2
}
mkdir -p "$OUT_DIR"
run_with_timing() {
local summary_path="$1"
local timing_path="$2"
shift 2
local start end status
start="$(python3 - <<'PY'
import time
print(time.perf_counter_ns())
PY
)"
if "$@"; then
status=0
else
status=$?
fi
end="$(python3 - <<'PY'
import time
print(time.perf_counter_ns())
PY
)"
python3 - <<'PY' "$summary_path" "$timing_path" "$status" "$start" "$end"
import json, sys
summary_path, timing_path, status, start, end = sys.argv[1:]
duration_ms = max(0, (int(end) - int(start)) // 1_000_000)
data = {"exitCode": int(status), "durationMs": duration_ms}
try:
with open(summary_path, "r", encoding="utf-8") as f:
data["compare"] = json.load(f)
except FileNotFoundError:
data["compare"] = None
with open(timing_path, "w", encoding="utf-8") as f:
json.dump(data, f, indent=2)
PY
return "$status"
}
OURS_OUT="$OUT_DIR/ours"
ROUTINATOR_OUT="$OUT_DIR/routinator"
RPKI_CLIENT_OUT="$OUT_DIR/rpki-client"
mkdir -p "$OURS_OUT" "$ROUTINATOR_OUT" "$RPKI_CLIENT_OUT"
ours_cmd=(
"$OURS_SCRIPT"
--cir "$CIR"
--static-root "$STATIC_ROOT"
--out-dir "$OURS_OUT"
--reference-ccr "$REFERENCE_CCR"
--rpki-bin "$RPKI_BIN"
--real-rsync-bin "$REAL_RSYNC_BIN"
)
routinator_cmd=(
"$ROUTINATOR_SCRIPT"
--cir "$CIR"
--static-root "$STATIC_ROOT"
--out-dir "$ROUTINATOR_OUT"
--reference-ccr "$REFERENCE_CCR"
--routinator-root "$ROUTINATOR_ROOT"
--routinator-bin "$ROUTINATOR_BIN"
--real-rsync-bin "$REAL_RSYNC_BIN"
)
rpki_client_cmd=(
"$RPKI_CLIENT_SCRIPT"
--cir "$CIR"
--static-root "$STATIC_ROOT"
--out-dir "$RPKI_CLIENT_OUT"
--reference-ccr "$REFERENCE_CCR"
--build-dir "$RPKI_CLIENT_BUILD_DIR"
--real-rsync-bin "$REAL_RSYNC_BIN"
)
if [[ "$KEEP_DB" -eq 1 ]]; then
ours_cmd+=(--keep-db)
routinator_cmd+=(--keep-db)
rpki_client_cmd+=(--keep-db)
fi
ours_status=0
routinator_status=0
rpki_client_status=0
if run_with_timing "$OURS_OUT/compare-summary.json" "$OURS_OUT/timing.json" "${ours_cmd[@]}"; then
:
else
ours_status=$?
fi
if run_with_timing "$ROUTINATOR_OUT/compare-summary.json" "$ROUTINATOR_OUT/timing.json" "${routinator_cmd[@]}"; then
:
else
routinator_status=$?
fi
if run_with_timing "$RPKI_CLIENT_OUT/compare-summary.json" "$RPKI_CLIENT_OUT/timing.json" "${rpki_client_cmd[@]}"; then
:
else
rpki_client_status=$?
fi
SUMMARY_JSON="$OUT_DIR/summary.json"
SUMMARY_MD="$OUT_DIR/summary.md"
DETAIL_MD="$OUT_DIR/detail.md"
python3 - <<'PY' \
"$CIR" \
"$STATIC_ROOT" \
"$REFERENCE_CCR" \
"$OURS_OUT" \
"$ROUTINATOR_OUT" \
"$RPKI_CLIENT_OUT" \
"$SUMMARY_JSON" \
"$SUMMARY_MD" \
"$DETAIL_MD"
import json
import sys
from pathlib import Path
cir_path, static_root, reference_ccr, ours_out, routinator_out, rpki_client_out, summary_json, summary_md, detail_md = sys.argv[1:]
participants = []
all_match = True
for name, out_dir in [
("ours", ours_out),
("routinator", routinator_out),
("rpki-client", rpki_client_out),
]:
out = Path(out_dir)
timing = json.loads((out / "timing.json").read_text(encoding="utf-8"))
compare = timing.get("compare") or {}
vrps = compare.get("vrps") or {}
vaps = compare.get("vaps") or {}
participant = {
"name": name,
"outDir": str(out),
"tmpRoot": str(out / ".tmp"),
"mirrorPath": str(out / ".tmp" / "mirror"),
"timingPath": str(out / "timing.json"),
"summaryPath": str(out / "compare-summary.json"),
"exitCode": timing["exitCode"],
"durationMs": timing["durationMs"],
"vrps": vrps,
"vaps": vaps,
"match": bool(vrps.get("match")) and bool(vaps.get("match")) and timing["exitCode"] == 0,
"logPaths": [str(path) for path in sorted(out.glob("*.log"))],
}
participants.append(participant)
all_match = all_match and participant["match"]
summary = {
"cirPath": cir_path,
"staticRoot": static_root,
"referenceCcr": reference_ccr,
"participants": participants,
"allMatch": all_match,
}
Path(summary_json).write_text(json.dumps(summary, indent=2), encoding="utf-8")
lines = [
"# CIR Replay Matrix Summary",
"",
f"- `cir`: `{cir_path}`",
f"- `static_root`: `{static_root}`",
f"- `reference_ccr`: `{reference_ccr}`",
f"- `all_match`: `{all_match}`",
"",
"| Participant | Exit | Duration (ms) | VRP actual/ref | VRP match | VAP actual/ref | VAP match | Log |",
"| --- | ---: | ---: | --- | --- | --- | --- | --- |",
]
for participant in participants:
vrps = participant["vrps"] or {}
vaps = participant["vaps"] or {}
log_path = participant["logPaths"][0] if participant["logPaths"] else ""
lines.append(
"| {name} | {exit_code} | {duration_ms} | {vrp_actual}/{vrp_ref} | {vrp_match} | {vap_actual}/{vap_ref} | {vap_match} | `{log_path}` |".format(
name=participant["name"],
exit_code=participant["exitCode"],
duration_ms=participant["durationMs"],
vrp_actual=vrps.get("actual", "-"),
vrp_ref=vrps.get("reference", "-"),
vrp_match=vrps.get("match", False),
vap_actual=vaps.get("actual", "-"),
vap_ref=vaps.get("reference", "-"),
vap_match=vaps.get("match", False),
log_path=log_path,
)
)
Path(summary_md).write_text("\n".join(lines) + "\n", encoding="utf-8")
detail_lines = [
"# CIR Replay Matrix Detail",
"",
]
for participant in participants:
vrps = participant["vrps"] or {}
vaps = participant["vaps"] or {}
detail_lines.extend([
f"## {participant['name']}",
f"- `exit_code`: `{participant['exitCode']}`",
f"- `duration_ms`: `{participant['durationMs']}`",
f"- `out_dir`: `{participant['outDir']}`",
f"- `tmp_root`: `{participant['tmpRoot']}`",
f"- `mirror_path`: `{participant['mirrorPath']}`",
f"- `summary_path`: `{participant['summaryPath']}`",
f"- `timing_path`: `{participant['timingPath']}`",
f"- `log_paths`: `{', '.join(participant['logPaths'])}`",
f"- `vrps`: `actual={vrps.get('actual', '-')}` `reference={vrps.get('reference', '-')}` `match={vrps.get('match', False)}`",
f"- `vaps`: `actual={vaps.get('actual', '-')}` `reference={vaps.get('reference', '-')}` `match={vaps.get('match', False)}`",
f"- `vrps.only_in_actual`: `{vrps.get('only_in_actual', [])}`",
f"- `vrps.only_in_reference`: `{vrps.get('only_in_reference', [])}`",
f"- `vaps.only_in_actual`: `{vaps.get('only_in_actual', [])}`",
f"- `vaps.only_in_reference`: `{vaps.get('only_in_reference', [])}`",
"",
])
Path(detail_md).write_text("\n".join(detail_lines), encoding="utf-8")
PY
if [[ "$ours_status" -ne 0 || "$routinator_status" -ne 0 || "$rpki_client_status" -ne 0 ]]; then
exit 1
fi
all_match="$(python3 - <<'PY' "$SUMMARY_JSON"
import json,sys
print("true" if json.load(open(sys.argv[1]))["allMatch"] else "false")
PY
)"
if [[ "$all_match" != "true" ]]; then
exit 1
fi
echo "done: $OUT_DIR"

View File

@ -0,0 +1,162 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/run_cir_replay_ours.sh \
--cir <path> \
[--static-root <path> | --raw-store-db <path>] \
--out-dir <path> \
--reference-ccr <path> \
[--keep-db] \
[--rpki-bin <path>] \
[--real-rsync-bin <path>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
CIR=""
STATIC_ROOT=""
POOL_DB=""
RAW_STORE_DB=""
OUT_DIR=""
REFERENCE_CCR=""
KEEP_DB=0
RPKI_BIN="$ROOT_DIR/target/release/rpki"
CIR_MATERIALIZE_BIN="$ROOT_DIR/target/release/cir_materialize"
CIR_EXTRACT_INPUTS_BIN="$ROOT_DIR/target/release/cir_extract_inputs"
CCR_TO_COMPARE_VIEWS_BIN="$ROOT_DIR/target/release/ccr_to_compare_views"
REAL_RSYNC_BIN="${REAL_RSYNC_BIN:-/usr/bin/rsync}"
WRAPPER="$ROOT_DIR/scripts/cir/cir-rsync-wrapper"
while [[ $# -gt 0 ]]; do
case "$1" in
--cir) CIR="$2"; shift 2 ;;
--static-root) STATIC_ROOT="$2"; shift 2 ;;
--raw-store-db) RAW_STORE_DB="$2"; shift 2 ;;
--out-dir) OUT_DIR="$2"; shift 2 ;;
--reference-ccr) REFERENCE_CCR="$2"; shift 2 ;;
--keep-db) KEEP_DB=1; shift ;;
--rpki-bin) RPKI_BIN="$2"; shift 2 ;;
--real-rsync-bin) REAL_RSYNC_BIN="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$CIR" && -n "$OUT_DIR" && -n "$REFERENCE_CCR" ]] || {
usage >&2
exit 2
}
backend_count=0
[[ -n "$STATIC_ROOT" ]] && backend_count=$((backend_count+1))
[[ -n "$RAW_STORE_DB" ]] && backend_count=$((backend_count+1))
[[ "$backend_count" -eq 1 ]] || { usage >&2; exit 2; }
mkdir -p "$OUT_DIR"
if [[ ! -x "$RPKI_BIN" || ! -x "$CIR_MATERIALIZE_BIN" || ! -x "$CIR_EXTRACT_INPUTS_BIN" || ! -x "$CCR_TO_COMPARE_VIEWS_BIN" ]]; then
(
cd "$ROOT_DIR"
cargo build --release --bin rpki --bin cir_materialize --bin cir_extract_inputs --bin ccr_to_compare_views
)
fi
TMP_ROOT="$OUT_DIR/.tmp"
TALS_DIR="$TMP_ROOT/tals"
META_JSON="$TMP_ROOT/meta.json"
MIRROR_ROOT="$TMP_ROOT/mirror"
DB_DIR="$TMP_ROOT/work-db"
ACTUAL_CCR="$OUT_DIR/actual.ccr"
ACTUAL_REPORT="$OUT_DIR/report.json"
ACTUAL_VRPS="$OUT_DIR/actual-vrps.csv"
ACTUAL_VAPS="$OUT_DIR/actual-vaps.csv"
REF_VRPS="$OUT_DIR/reference-vrps.csv"
REF_VAPS="$OUT_DIR/reference-vaps.csv"
COMPARE_JSON="$OUT_DIR/compare-summary.json"
RUN_LOG="$OUT_DIR/run.log"
rm -rf "$TMP_ROOT"
mkdir -p "$TMP_ROOT"
"$CIR_EXTRACT_INPUTS_BIN" --cir "$CIR" --tals-dir "$TALS_DIR" --meta-json "$META_JSON"
materialize_cmd=("$CIR_MATERIALIZE_BIN" --cir "$CIR" --mirror-root "$MIRROR_ROOT")
if [[ -n "$STATIC_ROOT" ]]; then
materialize_cmd+=(--static-root "$STATIC_ROOT")
else
materialize_cmd+=(--raw-store-db "$RAW_STORE_DB")
fi
if [[ "$KEEP_DB" -eq 1 ]]; then
materialize_cmd+=(--keep-db)
fi
"${materialize_cmd[@]}"
VALIDATION_TIME="$(python3 - <<'PY' "$META_JSON"
import json,sys
print(json.load(open(sys.argv[1]))["validationTime"])
PY
)"
FIRST_TAL="$(python3 - <<'PY' "$META_JSON"
import json,sys
print(json.load(open(sys.argv[1]))["talFiles"][0]["path"])
PY
)"
export CIR_MIRROR_ROOT="$(python3 - <<'PY' "$MIRROR_ROOT"
from pathlib import Path
import sys
print(Path(sys.argv[1]).resolve())
PY
)"
export REAL_RSYNC_BIN="$REAL_RSYNC_BIN"
export CIR_LOCAL_LINK_MODE=1
"$RPKI_BIN" \
--db "$DB_DIR" \
--tal-path "$FIRST_TAL" \
--disable-rrdp \
--rsync-command "$WRAPPER" \
--validation-time "$VALIDATION_TIME" \
--ccr-out "$ACTUAL_CCR" \
--report-json "$ACTUAL_REPORT" \
>"$RUN_LOG" 2>&1
"$CCR_TO_COMPARE_VIEWS_BIN" --ccr "$ACTUAL_CCR" --vrps-out "$ACTUAL_VRPS" --vaps-out "$ACTUAL_VAPS" --trust-anchor unknown
"$CCR_TO_COMPARE_VIEWS_BIN" --ccr "$REFERENCE_CCR" --vrps-out "$REF_VRPS" --vaps-out "$REF_VAPS" --trust-anchor unknown
python3 - <<'PY' "$ACTUAL_VRPS" "$REF_VRPS" "$ACTUAL_VAPS" "$REF_VAPS" "$COMPARE_JSON"
import csv, json, sys
def rows(path):
with open(path, newline="") as f:
return list(csv.reader(f))[1:]
actual_vrps = {tuple(r) for r in rows(sys.argv[1])}
ref_vrps = {tuple(r) for r in rows(sys.argv[2])}
actual_vaps = {tuple(r) for r in rows(sys.argv[3])}
ref_vaps = {tuple(r) for r in rows(sys.argv[4])}
summary = {
"vrps": {
"actual": len(actual_vrps),
"reference": len(ref_vrps),
"only_in_actual": sorted(actual_vrps - ref_vrps)[:20],
"only_in_reference": sorted(ref_vrps - actual_vrps)[:20],
"match": actual_vrps == ref_vrps,
},
"vaps": {
"actual": len(actual_vaps),
"reference": len(ref_vaps),
"only_in_actual": sorted(actual_vaps - ref_vaps)[:20],
"only_in_reference": sorted(ref_vaps - actual_vaps)[:20],
"match": actual_vaps == ref_vaps,
}
}
with open(sys.argv[5], "w") as f:
json.dump(summary, f, indent=2)
PY
if [[ "$KEEP_DB" -ne 1 ]]; then
rm -rf "$TMP_ROOT"
fi
echo "done: $OUT_DIR"

View File

@ -0,0 +1,221 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/run_cir_replay_routinator.sh \
--cir <path> \
[--static-root <path> | --raw-store-db <path>] \
--out-dir <path> \
--reference-ccr <path> \
[--keep-db] \
[--routinator-root <path>] \
[--routinator-bin <path>] \
[--real-rsync-bin <path>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
RPKI_DEV_ROOT="${RPKI_DEV_ROOT:-$ROOT_DIR}"
CIR=""
STATIC_ROOT=""
POOL_DB=""
RAW_STORE_DB=""
OUT_DIR=""
REFERENCE_CCR=""
KEEP_DB=0
ROUTINATOR_ROOT="${ROUTINATOR_ROOT:-/home/yuyr/dev/rust_playground/routinator}"
ROUTINATOR_BIN="${ROUTINATOR_BIN:-$ROUTINATOR_ROOT/target/debug/routinator}"
REAL_RSYNC_BIN="${REAL_RSYNC_BIN:-/usr/bin/rsync}"
CIR_MATERIALIZE_BIN="$ROOT_DIR/target/release/cir_materialize"
CIR_EXTRACT_INPUTS_BIN="$ROOT_DIR/target/release/cir_extract_inputs"
CCR_TO_COMPARE_VIEWS_BIN="$ROOT_DIR/target/release/ccr_to_compare_views"
WRAPPER="$ROOT_DIR/scripts/cir/cir-rsync-wrapper"
JSON_TO_VAPS="$ROOT_DIR/scripts/cir/json_to_vaps_csv.py"
FAKETIME_LIB="${FAKETIME_LIB:-$ROOT_DIR/target/tools/faketime_pkg/extracted/libfaketime/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1}"
while [[ $# -gt 0 ]]; do
case "$1" in
--cir) CIR="$2"; shift 2 ;;
--static-root) STATIC_ROOT="$2"; shift 2 ;;
--raw-store-db) RAW_STORE_DB="$2"; shift 2 ;;
--out-dir) OUT_DIR="$2"; shift 2 ;;
--reference-ccr) REFERENCE_CCR="$2"; shift 2 ;;
--keep-db) KEEP_DB=1; shift ;;
--routinator-root) ROUTINATOR_ROOT="$2"; shift 2 ;;
--routinator-bin) ROUTINATOR_BIN="$2"; shift 2 ;;
--real-rsync-bin) REAL_RSYNC_BIN="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$CIR" && -n "$OUT_DIR" && -n "$REFERENCE_CCR" ]] || {
usage >&2
exit 2
}
backend_count=0
[[ -n "$STATIC_ROOT" ]] && backend_count=$((backend_count+1))
[[ -n "$RAW_STORE_DB" ]] && backend_count=$((backend_count+1))
[[ "$backend_count" -eq 1 ]] || { usage >&2; exit 2; }
mkdir -p "$OUT_DIR"
if [[ ! -x "$CIR_MATERIALIZE_BIN" || ! -x "$CIR_EXTRACT_INPUTS_BIN" || ! -x "$CCR_TO_COMPARE_VIEWS_BIN" ]]; then
(
cd "$ROOT_DIR"
cargo build --release --bin cir_materialize --bin cir_extract_inputs --bin ccr_to_compare_views
)
fi
TMP_ROOT="$OUT_DIR/.tmp"
TALS_DIR="$TMP_ROOT/tals"
META_JSON="$TMP_ROOT/meta.json"
MIRROR_ROOT="$TMP_ROOT/mirror"
WORK_REPO="$TMP_ROOT/repository"
RUN_LOG="$OUT_DIR/routinator.log"
ACTUAL_VRPS="$OUT_DIR/actual-vrps.csv"
ACTUAL_VAPS_JSON="$OUT_DIR/actual-vaps.json"
ACTUAL_VAPS="$OUT_DIR/actual-vaps.csv"
REF_VRPS="$OUT_DIR/reference-vrps.csv"
REF_VAPS="$OUT_DIR/reference-vaps.csv"
SUMMARY_JSON="$OUT_DIR/compare-summary.json"
rm -rf "$TMP_ROOT"
mkdir -p "$TMP_ROOT"
"$CIR_EXTRACT_INPUTS_BIN" --cir "$CIR" --tals-dir "$TALS_DIR" --meta-json "$META_JSON"
python3 - <<'PY' "$TALS_DIR"
from pathlib import Path
import sys
for tal in Path(sys.argv[1]).glob("*.tal"):
lines = tal.read_text(encoding="utf-8").splitlines()
rsync_uris = [line for line in lines if line.startswith("rsync://")]
base64_lines = []
seen_sep = False
for line in lines:
if seen_sep:
if line.strip():
base64_lines.append(line)
elif line.strip() == "":
seen_sep = True
tal.write_text("\n".join(rsync_uris) + "\n\n" + "\n".join(base64_lines) + "\n", encoding="utf-8")
PY
materialize_cmd=("$CIR_MATERIALIZE_BIN" --cir "$CIR" --mirror-root "$MIRROR_ROOT")
if [[ -n "$STATIC_ROOT" ]]; then
materialize_cmd+=(--static-root "$STATIC_ROOT")
else
materialize_cmd+=(--raw-store-db "$RAW_STORE_DB")
fi
if [[ "$KEEP_DB" -eq 1 ]]; then
materialize_cmd+=(--keep-db)
fi
"${materialize_cmd[@]}"
VALIDATION_TIME="$(python3 - <<'PY' "$META_JSON"
import json,sys
print(json.load(open(sys.argv[1]))["validationTime"])
PY
)"
FIRST_TAL="$(python3 - <<'PY' "$META_JSON"
import json,sys
print(json.load(open(sys.argv[1]))["talFiles"][0]["path"])
PY
)"
TA_NAME="$(basename "$FIRST_TAL" .tal)"
FAKE_EPOCH="$(python3 - <<'PY' "$VALIDATION_TIME"
from datetime import datetime, timezone
import sys
dt = datetime.fromisoformat(sys.argv[1].replace("Z", "+00:00")).astimezone(timezone.utc)
print(int(dt.timestamp()))
PY
)"
export CIR_MIRROR_ROOT="$(python3 - <<'PY' "$MIRROR_ROOT"
from pathlib import Path
import sys
print(Path(sys.argv[1]).resolve())
PY
)"
export REAL_RSYNC_BIN="$REAL_RSYNC_BIN"
export CIR_LOCAL_LINK_MODE=1
env \
LD_PRELOAD="$FAKETIME_LIB" \
FAKETIME_FMT=%s \
FAKETIME="$FAKE_EPOCH" \
FAKETIME_DONT_FAKE_MONOTONIC=1 \
"$ROUTINATOR_BIN" \
--repository-dir "$WORK_REPO" \
--disable-rrdp \
--rsync-command "$WRAPPER" \
--no-rir-tals \
--extra-tals-dir "$TALS_DIR" \
--enable-aspa \
update --complete >"$RUN_LOG" 2>&1 || true
env \
LD_PRELOAD="$FAKETIME_LIB" \
FAKETIME_FMT=%s \
FAKETIME="$FAKE_EPOCH" \
FAKETIME_DONT_FAKE_MONOTONIC=1 \
"$ROUTINATOR_BIN" \
--repository-dir "$WORK_REPO" \
--disable-rrdp \
--rsync-command "$WRAPPER" \
--no-rir-tals \
--extra-tals-dir "$TALS_DIR" \
--enable-aspa \
vrps --noupdate -o "$ACTUAL_VRPS" >>"$RUN_LOG" 2>&1
env \
LD_PRELOAD="$FAKETIME_LIB" \
FAKETIME_FMT=%s \
FAKETIME="$FAKE_EPOCH" \
FAKETIME_DONT_FAKE_MONOTONIC=1 \
"$ROUTINATOR_BIN" \
--repository-dir "$WORK_REPO" \
--disable-rrdp \
--rsync-command "$WRAPPER" \
--no-rir-tals \
--extra-tals-dir "$TALS_DIR" \
--enable-aspa \
vrps --noupdate --format json -o "$ACTUAL_VAPS_JSON" >>"$RUN_LOG" 2>&1
python3 "$JSON_TO_VAPS" --input "$ACTUAL_VAPS_JSON" --csv-out "$ACTUAL_VAPS"
"$CCR_TO_COMPARE_VIEWS_BIN" --ccr "$REFERENCE_CCR" --vrps-out "$REF_VRPS" --vaps-out "$REF_VAPS" --trust-anchor "$TA_NAME"
python3 - <<'PY' "$ACTUAL_VRPS" "$REF_VRPS" "$ACTUAL_VAPS" "$REF_VAPS" "$SUMMARY_JSON"
import csv, json, sys
def rows(path):
with open(path, newline="") as f:
return list(csv.reader(f))[1:]
actual_vrps = {tuple(r) for r in rows(sys.argv[1])}
ref_vrps = {tuple(r) for r in rows(sys.argv[2])}
actual_vaps = {tuple(r) for r in rows(sys.argv[3])}
ref_vaps = {tuple(r) for r in rows(sys.argv[4])}
summary = {
"vrps": {
"actual": len(actual_vrps),
"reference": len(ref_vrps),
"match": actual_vrps == ref_vrps,
"only_in_actual": sorted(actual_vrps - ref_vrps)[:20],
"only_in_reference": sorted(ref_vrps - actual_vrps)[:20],
},
"vaps": {
"actual": len(actual_vaps),
"reference": len(ref_vaps),
"match": actual_vaps == ref_vaps,
"only_in_actual": sorted(actual_vaps - ref_vaps)[:20],
"only_in_reference": sorted(ref_vaps - actual_vaps)[:20],
}
}
with open(sys.argv[5], "w") as f:
json.dump(summary, f, indent=2)
PY
if [[ "$KEEP_DB" -ne 1 ]]; then
rm -rf "$TMP_ROOT"
fi
echo "done: $OUT_DIR"

View File

@ -0,0 +1,195 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/run_cir_replay_rpki_client.sh \
--cir <path> \
[--static-root <path> | --raw-store-db <path>] \
--out-dir <path> \
--reference-ccr <path> \
--build-dir <path> \
[--keep-db] \
[--real-rsync-bin <path>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
CIR=""
STATIC_ROOT=""
POOL_DB=""
RAW_STORE_DB=""
OUT_DIR=""
REFERENCE_CCR=""
BUILD_DIR=""
KEEP_DB=0
REAL_RSYNC_BIN="${REAL_RSYNC_BIN:-/usr/bin/rsync}"
CIR_MATERIALIZE_BIN="$ROOT_DIR/target/release/cir_materialize"
CIR_EXTRACT_INPUTS_BIN="$ROOT_DIR/target/release/cir_extract_inputs"
CCR_TO_COMPARE_VIEWS_BIN="$ROOT_DIR/target/release/ccr_to_compare_views"
WRAPPER="$ROOT_DIR/scripts/cir/cir-rsync-wrapper"
while [[ $# -gt 0 ]]; do
case "$1" in
--cir) CIR="$2"; shift 2 ;;
--static-root) STATIC_ROOT="$2"; shift 2 ;;
--raw-store-db) RAW_STORE_DB="$2"; shift 2 ;;
--out-dir) OUT_DIR="$2"; shift 2 ;;
--reference-ccr) REFERENCE_CCR="$2"; shift 2 ;;
--build-dir) BUILD_DIR="$2"; shift 2 ;;
--keep-db) KEEP_DB=1; shift ;;
--real-rsync-bin) REAL_RSYNC_BIN="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$CIR" && -n "$OUT_DIR" && -n "$REFERENCE_CCR" && -n "$BUILD_DIR" ]] || {
usage >&2
exit 2
}
backend_count=0
[[ -n "$STATIC_ROOT" ]] && backend_count=$((backend_count+1))
[[ -n "$RAW_STORE_DB" ]] && backend_count=$((backend_count+1))
[[ "$backend_count" -eq 1 ]] || { usage >&2; exit 2; }
mkdir -p "$OUT_DIR"
if [[ ! -x "$CIR_MATERIALIZE_BIN" || ! -x "$CIR_EXTRACT_INPUTS_BIN" || ! -x "$CCR_TO_COMPARE_VIEWS_BIN" ]]; then
(
cd "$ROOT_DIR"
cargo build --release --bin cir_materialize --bin cir_extract_inputs --bin ccr_to_compare_views
)
fi
TMP_ROOT="$OUT_DIR/.tmp"
TALS_DIR="$TMP_ROOT/tals"
META_JSON="$TMP_ROOT/meta.json"
MIRROR_ROOT="$TMP_ROOT/mirror"
CACHE_DIR="$TMP_ROOT/cache"
OUT_CCR_DIR="$TMP_ROOT/out"
RUN_LOG="$OUT_DIR/rpki-client.log"
ACTUAL_VRPS="$OUT_DIR/actual-vrps.csv"
ACTUAL_VAPS="$OUT_DIR/actual-vaps.csv"
ACTUAL_VAPS_META="$OUT_DIR/actual-vaps-meta.json"
ACTUAL_VRPS_META="$OUT_DIR/actual-vrps-meta.json"
REF_VRPS="$OUT_DIR/reference-vrps.csv"
REF_VAPS="$OUT_DIR/reference-vaps.csv"
SUMMARY_JSON="$OUT_DIR/compare-summary.json"
rm -rf "$TMP_ROOT"
mkdir -p "$TMP_ROOT"
"$CIR_EXTRACT_INPUTS_BIN" --cir "$CIR" --tals-dir "$TALS_DIR" --meta-json "$META_JSON"
python3 - <<'PY' "$TALS_DIR"
from pathlib import Path
import sys
for tal in Path(sys.argv[1]).glob("*.tal"):
lines = tal.read_text(encoding="utf-8").splitlines()
rsync_uris = [line for line in lines if line.startswith("rsync://")]
base64_lines = []
seen_sep = False
for line in lines:
if seen_sep:
if line.strip():
base64_lines.append(line)
elif line.strip() == "":
seen_sep = True
tal.write_text("\n".join(rsync_uris) + "\n\n" + "\n".join(base64_lines) + "\n", encoding="utf-8")
PY
materialize_cmd=("$CIR_MATERIALIZE_BIN" --cir "$CIR" --mirror-root "$MIRROR_ROOT")
if [[ -n "$STATIC_ROOT" ]]; then
materialize_cmd+=(--static-root "$STATIC_ROOT")
else
materialize_cmd+=(--raw-store-db "$RAW_STORE_DB")
fi
if [[ "$KEEP_DB" -eq 1 ]]; then
materialize_cmd+=(--keep-db)
fi
"${materialize_cmd[@]}"
VALIDATION_EPOCH="$(python3 - <<'PY' "$META_JSON"
from datetime import datetime, timezone
import json, sys
vt = json.load(open(sys.argv[1]))["validationTime"]
dt = datetime.fromisoformat(vt.replace("Z", "+00:00")).astimezone(timezone.utc)
print(int(dt.timestamp()))
PY
)"
FIRST_TAL="$(python3 - <<'PY' "$META_JSON"
import json,sys
print(json.load(open(sys.argv[1]))["talFiles"][0]["path"])
PY
)"
TA_NAME="$(basename "$FIRST_TAL" .tal)"
export CIR_MIRROR_ROOT="$(python3 - <<'PY' "$MIRROR_ROOT"
from pathlib import Path
import sys
print(Path(sys.argv[1]).resolve())
PY
)"
export REAL_RSYNC_BIN="$REAL_RSYNC_BIN"
export CIR_LOCAL_LINK_MODE=1
mkdir -p "$CACHE_DIR" "$OUT_CCR_DIR"
"$BUILD_DIR/src/rpki-client" \
-R \
-e "$WRAPPER" \
-P "$VALIDATION_EPOCH" \
-t "$FIRST_TAL" \
-d "$CACHE_DIR" \
"$OUT_CCR_DIR" >"$RUN_LOG" 2>&1
"$CCR_TO_COMPARE_VIEWS_BIN" \
--ccr "$OUT_CCR_DIR/rpki.ccr" \
--vrps-out "$ACTUAL_VRPS" \
--vaps-out "$ACTUAL_VAPS" \
--trust-anchor "$TA_NAME"
python3 - <<'PY' "$ACTUAL_VRPS" "$ACTUAL_VAPS" "$ACTUAL_VRPS_META" "$ACTUAL_VAPS_META"
import csv, json, sys
def count_rows(path):
with open(path, newline="") as f:
rows = list(csv.reader(f))
return max(len(rows) - 1, 0)
json.dump({"count": count_rows(sys.argv[1])}, open(sys.argv[3], "w"), indent=2)
json.dump({"count": count_rows(sys.argv[2])}, open(sys.argv[4], "w"), indent=2)
PY
"$CCR_TO_COMPARE_VIEWS_BIN" --ccr "$REFERENCE_CCR" --vrps-out "$REF_VRPS" --vaps-out "$REF_VAPS" --trust-anchor "$TA_NAME"
python3 - <<'PY' "$ACTUAL_VRPS" "$REF_VRPS" "$ACTUAL_VAPS" "$REF_VAPS" "$SUMMARY_JSON"
import csv, json, sys
def rows(path):
with open(path, newline="") as f:
return list(csv.reader(f))[1:]
actual_vrps = {tuple(r) for r in rows(sys.argv[1])}
ref_vrps = {tuple(r) for r in rows(sys.argv[2])}
actual_vaps = {tuple(r) for r in rows(sys.argv[3])}
ref_vaps = {tuple(r) for r in rows(sys.argv[4])}
summary = {
"vrps": {
"actual": len(actual_vrps),
"reference": len(ref_vrps),
"match": actual_vrps == ref_vrps,
"only_in_actual": sorted(actual_vrps - ref_vrps)[:20],
"only_in_reference": sorted(ref_vrps - actual_vrps)[:20],
},
"vaps": {
"actual": len(actual_vaps),
"reference": len(ref_vaps),
"match": actual_vaps == ref_vaps,
"only_in_actual": sorted(actual_vaps - ref_vaps)[:20],
"only_in_reference": sorted(ref_vaps - actual_vaps)[:20],
}
}
with open(sys.argv[5], "w") as f:
json.dump(summary, f, indent=2)
PY
if [[ "$KEEP_DB" -ne 1 ]]; then
rm -rf "$TMP_ROOT"
fi
echo "done: $OUT_DIR"

View File

@ -0,0 +1,149 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/run_cir_replay_sequence_ours.sh \
--sequence-root <path> \
[--rpki-bin <path>] \
[--real-rsync-bin <path>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
SEQUENCE_ROOT=""
RPKI_BIN="${RPKI_BIN:-$ROOT_DIR/target/release/rpki}"
REAL_RSYNC_BIN="${REAL_RSYNC_BIN:-/usr/bin/rsync}"
STEP_SCRIPT="$ROOT_DIR/scripts/cir/run_cir_replay_ours.sh"
while [[ $# -gt 0 ]]; do
case "$1" in
--sequence-root) SEQUENCE_ROOT="$2"; shift 2 ;;
--rpki-bin) RPKI_BIN="$2"; shift 2 ;;
--real-rsync-bin) REAL_RSYNC_BIN="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$SEQUENCE_ROOT" ]] || { usage >&2; exit 2; }
SEQUENCE_ROOT="$(python3 - <<'PY' "$SEQUENCE_ROOT"
from pathlib import Path
import sys
print(Path(sys.argv[1]).resolve())
PY
)"
SUMMARY_JSON="$SEQUENCE_ROOT/sequence-summary.json"
SUMMARY_MD="$SEQUENCE_ROOT/sequence-summary.md"
DETAIL_JSON="$SEQUENCE_ROOT/sequence-detail.json"
python3 - <<'PY' "$SEQUENCE_ROOT" "$SUMMARY_JSON" "$SUMMARY_MD" "$DETAIL_JSON" "$STEP_SCRIPT" "$RPKI_BIN" "$REAL_RSYNC_BIN"
import json
import subprocess
import sys
from pathlib import Path
sequence_root = Path(sys.argv[1])
summary_json = Path(sys.argv[2])
summary_md = Path(sys.argv[3])
detail_json = Path(sys.argv[4])
step_script = Path(sys.argv[5])
rpki_bin = sys.argv[6]
real_rsync_bin = sys.argv[7]
sequence = json.loads((sequence_root / "sequence.json").read_text(encoding="utf-8"))
static_root = sequence_root / sequence["staticRoot"] if "staticRoot" in sequence else None
raw_store_db = sequence_root / sequence["rawStoreDbPath"] if "rawStoreDbPath" in sequence else None
backend_count = sum(x is not None for x in (static_root, raw_store_db))
if backend_count != 1:
raise SystemExit("sequence must set exactly one of staticRoot or rawStoreDbPath")
steps = sequence["steps"]
results = []
all_match = True
for step in steps:
step_id = step["stepId"]
out_dir = sequence_root / "replay-ours" / step_id
out_dir.parent.mkdir(parents=True, exist_ok=True)
cmd = [
str(step_script),
"--cir",
str(sequence_root / step["cirPath"]),
"--out-dir",
str(out_dir),
"--reference-ccr",
str(sequence_root / step["ccrPath"]),
"--rpki-bin",
rpki_bin,
"--real-rsync-bin",
real_rsync_bin,
]
if static_root is not None:
cmd.extend(["--static-root", str(static_root)])
else:
cmd.extend(["--raw-store-db", str(raw_store_db)])
proc = subprocess.run(cmd, capture_output=True, text=True)
if proc.returncode != 0:
raise SystemExit(
f"ours sequence replay failed for {step_id}: stdout={proc.stdout} stderr={proc.stderr}"
)
compare = json.loads((out_dir / "compare-summary.json").read_text(encoding="utf-8"))
timing = json.loads((out_dir / "timing.json").read_text(encoding="utf-8")) if (out_dir / "timing.json").exists() else {}
record = {
"stepId": step_id,
"kind": step["kind"],
"validationTime": step["validationTime"],
"outDir": str(out_dir),
"comparePath": str(out_dir / "compare-summary.json"),
"timingPath": str(out_dir / "timing.json"),
"compare": compare,
"timing": timing,
"match": bool(compare["vrps"]["match"]) and bool(compare["vaps"]["match"]),
}
all_match = all_match and record["match"]
results.append(record)
summary = {
"version": 1,
"participant": "ours",
"sequenceRoot": str(sequence_root),
"stepCount": len(results),
"allMatch": all_match,
"steps": results,
}
summary_json.write_text(json.dumps(summary, indent=2), encoding="utf-8")
detail_json.write_text(json.dumps(results, indent=2), encoding="utf-8")
lines = [
"# Ours CIR Sequence Replay Summary",
"",
f"- `sequence_root`: `{sequence_root}`",
f"- `step_count`: `{len(results)}`",
f"- `all_match`: `{all_match}`",
"",
"| Step | Kind | VRP actual/ref | VRP match | VAP actual/ref | VAP match | Duration (ms) |",
"| --- | --- | --- | --- | --- | --- | ---: |",
]
for item in results:
compare = item["compare"]
timing = item.get("timing") or {}
lines.append(
"| {step} | {kind} | {va}/{vr} | {vm} | {aa}/{ar} | {am} | {dur} |".format(
step=item["stepId"],
kind=item["kind"],
va=compare["vrps"]["actual"],
vr=compare["vrps"]["reference"],
vm=compare["vrps"]["match"],
aa=compare["vaps"]["actual"],
ar=compare["vaps"]["reference"],
am=compare["vaps"]["match"],
dur=timing.get("durationMs", "-"),
)
)
summary_md.write_text("\n".join(lines) + "\n", encoding="utf-8")
PY
echo "done: $SEQUENCE_ROOT"

View File

@ -0,0 +1,132 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/run_cir_replay_sequence_routinator.sh \
--sequence-root <path> \
[--routinator-root <path>] \
[--routinator-bin <path>] \
[--real-rsync-bin <path>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
SEQUENCE_ROOT=""
ROUTINATOR_ROOT="${ROUTINATOR_ROOT:-/home/yuyr/dev/rust_playground/routinator}"
ROUTINATOR_BIN="${ROUTINATOR_BIN:-$ROUTINATOR_ROOT/target/debug/routinator}"
REAL_RSYNC_BIN="${REAL_RSYNC_BIN:-/usr/bin/rsync}"
STEP_SCRIPT="$ROOT_DIR/scripts/cir/run_cir_replay_routinator.sh"
while [[ $# -gt 0 ]]; do
case "$1" in
--sequence-root) SEQUENCE_ROOT="$2"; shift 2 ;;
--routinator-root) ROUTINATOR_ROOT="$2"; shift 2 ;;
--routinator-bin) ROUTINATOR_BIN="$2"; shift 2 ;;
--real-rsync-bin) REAL_RSYNC_BIN="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$SEQUENCE_ROOT" ]] || { usage >&2; exit 2; }
SEQUENCE_ROOT="$(python3 - <<'PY' "$SEQUENCE_ROOT"
from pathlib import Path
import sys
print(Path(sys.argv[1]).resolve())
PY
)"
SUMMARY_JSON="$SEQUENCE_ROOT/sequence-summary-routinator.json"
SUMMARY_MD="$SEQUENCE_ROOT/sequence-summary-routinator.md"
python3 - <<'PY' "$SEQUENCE_ROOT" "$SUMMARY_JSON" "$SUMMARY_MD" "$STEP_SCRIPT" "$ROUTINATOR_ROOT" "$ROUTINATOR_BIN" "$REAL_RSYNC_BIN"
import json
import subprocess
import sys
from pathlib import Path
sequence_root = Path(sys.argv[1])
summary_json = Path(sys.argv[2])
summary_md = Path(sys.argv[3])
step_script = Path(sys.argv[4])
routinator_root = sys.argv[5]
routinator_bin = sys.argv[6]
real_rsync_bin = sys.argv[7]
sequence = json.loads((sequence_root / "sequence.json").read_text(encoding="utf-8"))
static_root = sequence_root / sequence["staticRoot"] if "staticRoot" in sequence else None
raw_store_db = sequence_root / sequence["rawStoreDbPath"] if "rawStoreDbPath" in sequence else None
backend_count = sum(x is not None for x in (static_root, raw_store_db))
if backend_count != 1:
raise SystemExit("sequence must set exactly one of staticRoot or rawStoreDbPath")
steps = sequence["steps"]
results = []
all_match = True
for step in steps:
step_id = step["stepId"]
out_dir = sequence_root / "replay-routinator" / step_id
out_dir.parent.mkdir(parents=True, exist_ok=True)
cmd = [
str(step_script),
"--cir",
str(sequence_root / step["cirPath"]),
"--out-dir",
str(out_dir),
"--reference-ccr",
str(sequence_root / step["ccrPath"]),
"--routinator-root",
routinator_root,
"--routinator-bin",
routinator_bin,
"--real-rsync-bin",
real_rsync_bin,
]
if static_root is not None:
cmd.extend(["--static-root", str(static_root)])
else:
cmd.extend(["--raw-store-db", str(raw_store_db)])
proc = subprocess.run(cmd, capture_output=True, text=True)
if proc.returncode != 0:
raise SystemExit(
f"routinator sequence replay failed for {step_id}: stdout={proc.stdout} stderr={proc.stderr}"
)
compare = json.loads((out_dir / "compare-summary.json").read_text(encoding="utf-8"))
match = bool(compare["vrps"]["match"]) and bool(compare["vaps"]["match"])
all_match = all_match and match
results.append(
{
"stepId": step_id,
"kind": step["kind"],
"validationTime": step["validationTime"],
"outDir": str(out_dir),
"comparePath": str(out_dir / "compare-summary.json"),
"match": match,
"compare": compare,
}
)
summary = {
"version": 1,
"participant": "routinator",
"sequenceRoot": str(sequence_root),
"stepCount": len(results),
"allMatch": all_match,
"steps": results,
}
summary_json.write_text(json.dumps(summary, indent=2), encoding="utf-8")
lines = [
"# Routinator CIR Sequence Replay Summary",
"",
f"- `sequence_root`: `{sequence_root}`",
f"- `step_count`: `{len(results)}`",
f"- `all_match`: `{all_match}`",
"",
]
summary_md.write_text("\n".join(lines), encoding="utf-8")
PY
echo "done: $SEQUENCE_ROOT"

View File

@ -0,0 +1,126 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/run_cir_replay_sequence_rpki_client.sh \
--sequence-root <path> \
--build-dir <path> \
[--real-rsync-bin <path>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
SEQUENCE_ROOT=""
BUILD_DIR=""
REAL_RSYNC_BIN="${REAL_RSYNC_BIN:-/usr/bin/rsync}"
STEP_SCRIPT="$ROOT_DIR/scripts/cir/run_cir_replay_rpki_client.sh"
while [[ $# -gt 0 ]]; do
case "$1" in
--sequence-root) SEQUENCE_ROOT="$2"; shift 2 ;;
--build-dir) BUILD_DIR="$2"; shift 2 ;;
--real-rsync-bin) REAL_RSYNC_BIN="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$SEQUENCE_ROOT" && -n "$BUILD_DIR" ]] || { usage >&2; exit 2; }
SEQUENCE_ROOT="$(python3 - <<'PY' "$SEQUENCE_ROOT"
from pathlib import Path
import sys
print(Path(sys.argv[1]).resolve())
PY
)"
SUMMARY_JSON="$SEQUENCE_ROOT/sequence-summary-rpki-client.json"
SUMMARY_MD="$SEQUENCE_ROOT/sequence-summary-rpki-client.md"
python3 - <<'PY' "$SEQUENCE_ROOT" "$SUMMARY_JSON" "$SUMMARY_MD" "$STEP_SCRIPT" "$BUILD_DIR" "$REAL_RSYNC_BIN"
import json
import subprocess
import sys
from pathlib import Path
sequence_root = Path(sys.argv[1])
summary_json = Path(sys.argv[2])
summary_md = Path(sys.argv[3])
step_script = Path(sys.argv[4])
build_dir = sys.argv[5]
real_rsync_bin = sys.argv[6]
sequence = json.loads((sequence_root / "sequence.json").read_text(encoding="utf-8"))
static_root = sequence_root / sequence["staticRoot"] if "staticRoot" in sequence else None
raw_store_db = sequence_root / sequence["rawStoreDbPath"] if "rawStoreDbPath" in sequence else None
backend_count = sum(x is not None for x in (static_root, raw_store_db))
if backend_count != 1:
raise SystemExit("sequence must set exactly one of staticRoot or rawStoreDbPath")
steps = sequence["steps"]
results = []
all_match = True
for step in steps:
step_id = step["stepId"]
out_dir = sequence_root / "replay-rpki-client" / step_id
out_dir.parent.mkdir(parents=True, exist_ok=True)
cmd = [
str(step_script),
"--cir",
str(sequence_root / step["cirPath"]),
"--out-dir",
str(out_dir),
"--reference-ccr",
str(sequence_root / step["ccrPath"]),
"--build-dir",
build_dir,
"--real-rsync-bin",
real_rsync_bin,
]
if static_root is not None:
cmd.extend(["--static-root", str(static_root)])
else:
cmd.extend(["--raw-store-db", str(raw_store_db)])
proc = subprocess.run(cmd, capture_output=True, text=True)
if proc.returncode != 0:
raise SystemExit(
f"rpki-client sequence replay failed for {step_id}: stdout={proc.stdout} stderr={proc.stderr}"
)
compare = json.loads((out_dir / "compare-summary.json").read_text(encoding="utf-8"))
match = bool(compare["vrps"]["match"]) and bool(compare["vaps"]["match"])
all_match = all_match and match
results.append(
{
"stepId": step_id,
"kind": step["kind"],
"validationTime": step["validationTime"],
"outDir": str(out_dir),
"comparePath": str(out_dir / "compare-summary.json"),
"match": match,
"compare": compare,
}
)
summary = {
"version": 1,
"participant": "rpki-client",
"sequenceRoot": str(sequence_root),
"stepCount": len(results),
"allMatch": all_match,
"steps": results,
}
summary_json.write_text(json.dumps(summary, indent=2), encoding="utf-8")
lines = [
"# rpki-client CIR Sequence Replay Summary",
"",
f"- `sequence_root`: `{sequence_root}`",
f"- `step_count`: `{len(results)}`",
f"- `all_match`: `{all_match}`",
"",
]
summary_md.write_text("\n".join(lines), encoding="utf-8")
PY
echo "done: $SEQUENCE_ROOT"

View File

@ -0,0 +1,132 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/cir/run_cir_sequence_matrix_multi_rir.sh \
--root <path> \
[--rir <afrinic,apnic,arin,lacnic,ripe>] \
[--rpki-bin <path>] \
[--routinator-root <path>] \
[--routinator-bin <path>] \
[--rpki-client-build-dir <path>] \
[--drop-bin <path>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
ROOT=""
RIRS="afrinic,apnic,arin,lacnic,ripe"
RPKI_BIN="${RPKI_BIN:-$ROOT_DIR/target/release/rpki}"
ROUTINATOR_ROOT="${ROUTINATOR_ROOT:-/home/yuyr/dev/rust_playground/routinator}"
ROUTINATOR_BIN="${ROUTINATOR_BIN:-$ROUTINATOR_ROOT/target/debug/routinator}"
RPKI_CLIENT_BUILD_DIR="${RPKI_CLIENT_BUILD_DIR:-/home/yuyr/dev/rpki-client-9.7/build-m5}"
DROP_BIN="${DROP_BIN:-$ROOT_DIR/target/release/cir_drop_report}"
OURS_SCRIPT="$ROOT_DIR/scripts/cir/run_cir_replay_sequence_ours.sh"
ROUTINATOR_SCRIPT="$ROOT_DIR/scripts/cir/run_cir_replay_sequence_routinator.sh"
RPKIC_SCRIPT="$ROOT_DIR/scripts/cir/run_cir_replay_sequence_rpki_client.sh"
DROP_SCRIPT="$ROOT_DIR/scripts/cir/run_cir_drop_sequence.sh"
while [[ $# -gt 0 ]]; do
case "$1" in
--root) ROOT="$2"; shift 2 ;;
--rir) RIRS="$2"; shift 2 ;;
--rpki-bin) RPKI_BIN="$2"; shift 2 ;;
--routinator-root) ROUTINATOR_ROOT="$2"; shift 2 ;;
--routinator-bin) ROUTINATOR_BIN="$2"; shift 2 ;;
--rpki-client-build-dir) RPKI_CLIENT_BUILD_DIR="$2"; shift 2 ;;
--drop-bin) DROP_BIN="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$ROOT" ]] || { usage >&2; exit 2; }
SUMMARY_JSON="$ROOT/final-summary.json"
SUMMARY_MD="$ROOT/final-summary.md"
IFS=',' read -r -a ITEMS <<< "$RIRS"
results=()
for rir in "${ITEMS[@]}"; do
seq_root="$ROOT/$rir"
"$OURS_SCRIPT" --sequence-root "$seq_root" --rpki-bin "$RPKI_BIN"
"$ROUTINATOR_SCRIPT" --sequence-root "$seq_root" --routinator-root "$ROUTINATOR_ROOT" --routinator-bin "$ROUTINATOR_BIN"
"$RPKIC_SCRIPT" --sequence-root "$seq_root" --build-dir "$RPKI_CLIENT_BUILD_DIR"
"$DROP_SCRIPT" --sequence-root "$seq_root" --drop-bin "$DROP_BIN"
done
python3 - <<'PY' "$ROOT" "$RIRS" "$SUMMARY_JSON" "$SUMMARY_MD"
import json, sys
from pathlib import Path
from collections import Counter
root = Path(sys.argv[1]).resolve()
rirs = [item for item in sys.argv[2].split(',') if item]
summary_json = Path(sys.argv[3])
summary_md = Path(sys.argv[4])
items = []
total_steps = 0
total_dropped_vrps = 0
total_dropped_objects = 0
reason_counter = Counter()
for rir in rirs:
seq_root = root / rir
ours = json.loads((seq_root / "sequence-summary.json").read_text(encoding="utf-8"))
routinator = json.loads((seq_root / "sequence-summary-routinator.json").read_text(encoding="utf-8"))
rpki_client = json.loads((seq_root / "sequence-summary-rpki-client.json").read_text(encoding="utf-8"))
drop = json.loads((seq_root / "drop-summary.json").read_text(encoding="utf-8"))
step_count = len(ours["steps"])
total_steps += step_count
rir_dropped_vrps = 0
rir_dropped_objects = 0
for step in drop["steps"]:
drop_path = Path(step["reportPath"])
detail = json.loads(drop_path.read_text(encoding="utf-8"))
summary = detail.get("summary", {})
rir_dropped_vrps += int(summary.get("droppedVrpCount", 0))
rir_dropped_objects += int(summary.get("droppedObjectCount", 0))
total_dropped_vrps += int(summary.get("droppedVrpCount", 0))
total_dropped_objects += int(summary.get("droppedObjectCount", 0))
for reason, count in summary.get("droppedByReason", {}).items():
reason_counter[reason] += int(count)
items.append({
"rir": rir,
"stepCount": step_count,
"oursAllMatch": ours["allMatch"],
"routinatorAllMatch": routinator["allMatch"],
"rpkiClientAllMatch": rpki_client["allMatch"],
"dropSummary": drop["steps"],
"droppedVrpCount": rir_dropped_vrps,
"droppedObjectCount": rir_dropped_objects,
})
summary = {
"version": 1,
"totalStepCount": total_steps,
"totalDroppedVrpCount": total_dropped_vrps,
"totalDroppedObjectCount": total_dropped_objects,
"topReasons": [{"reason": reason, "count": count} for reason, count in reason_counter.most_common(10)],
"rirs": items,
}
summary_json.write_text(json.dumps(summary, indent=2), encoding="utf-8")
lines = ["# Multi-RIR CIR Sequence Matrix Summary", ""]
lines.append(f"- `total_step_count`: `{total_steps}`")
lines.append(f"- `total_dropped_vrps`: `{total_dropped_vrps}`")
lines.append(f"- `total_dropped_objects`: `{total_dropped_objects}`")
lines.append("")
if reason_counter:
lines.append("## Top Drop Reasons")
lines.append("")
for reason, count in reason_counter.most_common(10):
lines.append(f"- `{reason}`: `{count}`")
lines.append("")
for item in items:
lines.append(
f"- `{item['rir']}`: `steps={item['stepCount']}` `ours={item['oursAllMatch']}` `routinator={item['routinatorAllMatch']}` `rpki-client={item['rpkiClientAllMatch']}` `drop_vrps={item['droppedVrpCount']}` `drop_objects={item['droppedObjectCount']}`"
)
summary_md.write_text("\n".join(lines) + "\n", encoding="utf-8")
PY
echo "done: $ROOT"

82
scripts/coverage.sh Executable file
View File

@ -0,0 +1,82 @@
#!/usr/bin/env bash
set -euo pipefail
# Requires:
# rustup component add llvm-tools-preview
# cargo install cargo-llvm-cov --locked
run_out="$(mktemp)"
text_out="$(mktemp)"
html_out="$(mktemp)"
cleanup() {
rm -f "$run_out" "$text_out" "$html_out"
}
trap cleanup EXIT
IGNORE_REGEX='src/bin/replay_bundle_capture\.rs|src/bin/replay_bundle_capture_delta\.rs|src/bin/replay_bundle_capture_sequence\.rs|src/bin/replay_bundle_record\.rs|src/bin/replay_bundle_refresh_sequence_outputs\.rs|src/bin/measure_sequence_replay\.rs|src/bin/repository_view_stats\.rs|src/bin/trace_arin_missing_vrps\.rs|src/bin/db_stats\.rs|src/bin/rrdp_state_dump\.rs|src/bin/ccr_dump\.rs|src/bin/ccr_verify\.rs|src/bin/ccr_to_routinator_csv\.rs|src/bin/ccr_to_compare_views\.rs|src/bin/cir_materialize\.rs|src/bin/cir_extract_inputs\.rs|src/bin/cir_drop_report\.rs|src/bin/cir_ta_only_fixture\.rs|src/bundle/live_capture\.rs|src/bundle/record_io\.rs|src/bundle/compare_view\.rs|src/progress_log\.rs|src/cli\.rs|src/validation/run_tree_from_tal\.rs|src/validation/from_tal\.rs|src/sync/store_projection\.rs|src/cir/materialize\.rs'
# Preserve colored output even though we post-process output by running under a pseudo-TTY.
# We run tests only once, then generate both CLI text + HTML reports without rerunning tests.
set +e
cargo llvm-cov clean --workspace >/dev/null 2>&1
# 1) Run tests once to collect coverage data (no report).
script -q -e -c "CARGO_TERM_COLOR=always cargo llvm-cov --no-report" "$run_out" >/dev/null 2>&1
run_status="$?"
# 2) CLI summary report + fail-under gate (no test rerun).
script -q -e -c "CARGO_TERM_COLOR=always cargo llvm-cov report --fail-under-lines 90 --ignore-filename-regex '$IGNORE_REGEX'" "$text_out" >/dev/null 2>&1
text_status="$?"
# 3) HTML report (no test rerun).
script -q -e -c "CARGO_TERM_COLOR=always cargo llvm-cov report --html --ignore-filename-regex '$IGNORE_REGEX'" "$html_out" >/dev/null 2>&1
html_status="$?"
set -e
strip_script_noise() {
tr -d '\r' | sed '/^Script \(started\|done\) on /d'
}
strip_ansi_for_parse() {
awk '
{
line = $0
gsub(/\033\[[0-9;]*[A-Za-z]/, "", line) # CSI escapes
gsub(/\033\([A-Za-z]/, "", line) # charset escapes (e.g., ESC(B)
gsub(/\r/, "", line)
print line
}
'
}
cat "$run_out" | strip_script_noise
cat "$text_out" | strip_script_noise
cat "$html_out" | strip_script_noise
cat "$run_out" | strip_ansi_for_parse | awk '
BEGIN {
passed=0; failed=0; ignored=0; measured=0; filtered=0;
}
/^test result: / {
if (match($0, /([0-9]+) passed; ([0-9]+) failed; ([0-9]+) ignored; ([0-9]+) measured; ([0-9]+) filtered out;/, m)) {
passed += m[1]; failed += m[2]; ignored += m[3]; measured += m[4]; filtered += m[5];
}
}
END {
executed = passed + failed;
total = passed + failed + ignored + measured;
printf("\nTEST SUMMARY (all suites): passed=%d failed=%d ignored=%d measured=%d filtered_out=%d executed=%d total=%d\n",
passed, failed, ignored, measured, filtered, executed, total);
}
'
echo
echo "HTML report: target/llvm-cov/html/index.html"
status="$text_status"
if [ "$run_status" -ne 0 ]; then status="$run_status"; fi
if [ "$html_status" -ne 0 ]; then status="$html_status"; fi
exit "$status"

View File

@ -0,0 +1,195 @@
#!/usr/bin/env bash
set -euo pipefail
# M2: Run per-sample decode+profile benchmark (Ours vs Routinator) on selected_der fixtures.
#
# Outputs:
# - specs/develop/20260224/data/m2_manifest_decode_profile_compare.csv
# - specs/develop/20260224/data/m2_raw.log
#
# Note: This script assumes Routinator benchmark repo exists at:
# /home/yuyr/dev/rust_playground/routinator/benchmark
#
# It also assumes fixtures exist under:
# rpki/tests/benchmark/selected_der/*.mft
# routinator/benchmark/fixtures/selected_der/*.mft
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
RPKI_DIR="$ROOT_DIR"
OURS_BENCH_DIR="$RPKI_DIR/benchmark/ours_manifest_bench"
ROUT_BENCH_DIR="${ROUT_BENCH_DIR:-/home/yuyr/dev/rust_playground/routinator/benchmark}"
ROUT_BIN="$ROUT_BENCH_DIR/target/release/routinator-manifest-benchmark"
DATE_TAG="${DATE_TAG:-20260224}"
OUT_DIR="$RPKI_DIR/../specs/develop/${DATE_TAG}/data"
OUT_CSV="${OUT_CSV:-$OUT_DIR/m2_manifest_decode_profile_compare.csv}"
OUT_RAW="${OUT_RAW:-$OUT_DIR/m2_raw.log}"
REPEATS="${REPEATS:-3}"
# Iterations / warmups (kept moderate for interactive iteration).
ITER_SMALL="${ITER_SMALL:-20000}"
ITER_MEDIUM="${ITER_MEDIUM:-20000}"
ITER_LARGE="${ITER_LARGE:-20000}"
ITER_XLARGE="${ITER_XLARGE:-2000}"
WARM_SMALL="${WARM_SMALL:-2000}"
WARM_MEDIUM="${WARM_MEDIUM:-2000}"
WARM_LARGE="${WARM_LARGE:-2000}"
WARM_XLARGE="${WARM_XLARGE:-200}"
SAMPLES=(
small-01
small-02
medium-01
medium-02
large-01
large-02
xlarge-01
xlarge-02
)
mkdir -p "$OUT_DIR"
: > "$OUT_RAW"
echo "sample,bucket,manifest_file_count,ours_avg_ns_per_op,ours_ops_per_s,rout_avg_ns_per_op,rout_ops_per_s,ratio_ours_over_rout,iterations,repeats,warmup" > "$OUT_CSV"
echo "[1/3] Build ours benchmark (release)..." | tee -a "$OUT_RAW"
(cd "$OURS_BENCH_DIR" && cargo build --release -q)
OURS_BIN="$OURS_BENCH_DIR/target/release/ours-manifest-bench"
echo "[2/3] Build routinator benchmark (release)..." | tee -a "$OUT_RAW"
(cd "$ROUT_BENCH_DIR" && cargo build --release -q)
taskset_prefix=""
if command -v taskset >/dev/null 2>&1; then
if [[ -n "${TASKSET_CPU:-}" ]]; then
taskset_prefix="taskset -c ${TASKSET_CPU}"
fi
fi
bucket_for() {
local s="$1"
case "$s" in
small-*) echo "small" ;;
medium-*) echo "medium" ;;
large-*) echo "large" ;;
xlarge-*) echo "xlarge" ;;
*) echo "unknown" ;;
esac
}
iters_for() {
local b="$1"
case "$b" in
small) echo "$ITER_SMALL" ;;
medium) echo "$ITER_MEDIUM" ;;
large) echo "$ITER_LARGE" ;;
xlarge) echo "$ITER_XLARGE" ;;
*) echo "$ITER_MEDIUM" ;;
esac
}
warm_for() {
local b="$1"
case "$b" in
small) echo "$WARM_SMALL" ;;
medium) echo "$WARM_MEDIUM" ;;
large) echo "$WARM_LARGE" ;;
xlarge) echo "$WARM_XLARGE" ;;
*) echo "$WARM_MEDIUM" ;;
esac
}
run_ours() {
local sample="$1"
local iters="$2"
local warm="$3"
local ours_fixture="$RPKI_DIR/tests/benchmark/selected_der/${sample}.mft"
if [[ ! -f "$ours_fixture" ]]; then
echo "ours fixture not found: $ours_fixture" >&2
exit 1
fi
echo "### ours $sample" >> "$OUT_RAW"
local out
out=$($taskset_prefix "$OURS_BIN" --manifest "$ours_fixture" --iterations "$iters" --warmup-iterations "$warm" --repeats "$REPEATS")
echo "$out" >> "$OUT_RAW"
local line
line=$(echo "$out" | rg "^\\| ${sample} \\|" | tail -n 1)
if [[ -z "${line:-}" ]]; then
echo "failed to parse ours output for $sample" >&2
exit 1
fi
# Expected final row: | sample | avg ns/op | ops/s | file count |
local avg ops cnt
avg=$(echo "$line" | awk -F'|' '{gsub(/^ +| +$/,"",$3); print $3}')
ops=$(echo "$line" | awk -F'|' '{gsub(/^ +| +$/,"",$4); print $4}')
cnt=$(echo "$line" | awk -F'|' '{gsub(/^ +| +$/,"",$5); print $5}')
echo "$avg,$ops,$cnt"
}
run_rout() {
local sample="$1"
local iters="$2"
local warm="$3"
local rout_fixture="$ROUT_BENCH_DIR/fixtures/selected_der/${sample}.mft"
if [[ ! -f "$rout_fixture" ]]; then
echo "routinator fixture not found: $rout_fixture" >&2
exit 1
fi
echo "### routinator $sample" >> "$OUT_RAW"
local out
out=$(
$taskset_prefix "$ROUT_BIN" \
--target decode_only \
--manifest "$rout_fixture" \
--issuer "$ROUT_BENCH_DIR/fixtures/ta.cer" \
--iterations "$iters" \
--repeats "$REPEATS" \
--warmup-iterations "$warm" \
--strict false
)
echo "$out" >> "$OUT_RAW"
local avg_line cnt_line
avg_line=$(echo "$out" | rg "^ avg:")
cnt_line=$(echo "$out" | rg "^ manifest_file_count:")
local avg_ns ops_s cnt
avg_ns=$(echo "$avg_line" | awk '{print $2}')
ops_s=$(echo "$avg_line" | awk '{gsub(/[()]/,"",$4); print $4}')
cnt=$(echo "$cnt_line" | awk '{print $2}')
echo "$avg_ns,$ops_s,$cnt"
}
echo "[3/3] Run per-sample benchmarks..." | tee -a "$OUT_RAW"
for s in "${SAMPLES[@]}"; do
b=$(bucket_for "$s")
it=$(iters_for "$b")
warm=$(warm_for "$b")
IFS=, read -r ours_avg ours_ops ours_cnt < <(run_ours "$s" "$it" "$warm")
IFS=, read -r rout_avg rout_ops rout_cnt < <(run_rout "$s" "$it" "$warm")
if [[ "$ours_cnt" != "$rout_cnt" ]]; then
echo "WARNING: file count differs for $s (ours=$ours_cnt rout=$rout_cnt)" | tee -a "$OUT_RAW"
fi
ratio=$(python3 - <<PY
o=float("$ours_avg")
r=float("$rout_avg")
print(f"{(o/r):.4f}" if r != 0 else "inf")
PY
)
echo "$s,$b,$ours_cnt,$ours_avg,$ours_ops,$rout_avg,$rout_ops,$ratio,$it,$REPEATS,$warm" >> "$OUT_CSV"
echo >> "$OUT_RAW"
done
echo "Done."
echo "- CSV: $OUT_CSV"
echo "- Raw: $OUT_RAW"

View File

@ -0,0 +1,142 @@
#!/usr/bin/env bash
set -euo pipefail
# M3: Generate flamegraphs + top hotspots for Manifest decode+profile (Ours vs Routinator).
#
# Outputs under:
# specs/develop/20260224/flamegraph/
# specs/develop/20260224/hotspots/
# specs/develop/20260224/perf/
#
# Notes:
# - On WSL2, /usr/bin/perf is often a wrapper that fails. This script uses a real perf binary
# from /usr/lib/linux-tools/*/perf (if present).
# - Ours profiling uses perf + flamegraph --perfdata to avoid rebuilding the whole crate graph
# with RocksDB.
ROOT_REPO="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
RPKI_DIR="$ROOT_REPO/rpki"
DATE_TAG="${DATE_TAG:-20260224}"
OUT_BASE="$ROOT_REPO/specs/develop/${DATE_TAG}"
OUT_FLAME="$OUT_BASE/flamegraph"
OUT_HOT="$OUT_BASE/hotspots"
OUT_PERF="$OUT_BASE/perf"
RUN_TAG="${RUN_TAG:-p2}"
OURS_BENCH_DIR="$RPKI_DIR/benchmark/ours_manifest_bench"
OURS_BIN="$OURS_BENCH_DIR/target/release/ours-manifest-bench"
ROUT_BENCH_DIR="${ROUT_BENCH_DIR:-/home/yuyr/dev/rust_playground/routinator/benchmark}"
ROUT_BIN="$ROUT_BENCH_DIR/target/release/routinator-manifest-benchmark"
ROUT_ISSUER="$ROUT_BENCH_DIR/fixtures/ta.cer"
PROFILE_HZ="${PROFILE_HZ:-99}"
mkdir -p "$OUT_FLAME" "$OUT_HOT" "$OUT_PERF"
PERF_WRAPPER_OUT="$(perf --version 2>&1 || true)"
PERF_REAL=""
if echo "${PERF_WRAPPER_OUT}" | grep -q "WARNING: perf not found for kernel"; then
PERF_REAL="$(ls -1 /usr/lib/linux-tools/*/perf 2>/dev/null | head -n 1 || true)"
else
PERF_REAL="$(command -v perf || true)"
fi
if [[ -z "${PERF_REAL}" ]]; then
echo "ERROR: usable perf binary not found (wrapper detected and no /usr/lib/linux-tools/*/perf)." >&2
exit 2
fi
SHIM_DIR="$RPKI_DIR/target/bench/tools"
mkdir -p "$SHIM_DIR"
cat > "$SHIM_DIR/perf" <<EOF
#!/usr/bin/env bash
exec "${PERF_REAL}" "\$@"
EOF
chmod +x "$SHIM_DIR/perf"
export PATH="$SHIM_DIR:$PATH"
echo "Using perf: $PERF_REAL"
echo "[1/3] Build ours benchmark with frame pointers..."
(cd "$OURS_BENCH_DIR" && RUSTFLAGS="-C force-frame-pointers=yes" cargo build --release -q)
echo "[2/3] Build routinator benchmark (release)..."
(cd "$ROUT_BENCH_DIR" && cargo build --release -q)
taskset_prefix=""
if command -v taskset >/dev/null 2>&1; then
taskset_prefix="taskset -c 0"
fi
profile_ours() {
local sample="$1"
local iters="$2"
local warm="$3"
local fixture="$RPKI_DIR/tests/benchmark/selected_der/${sample}.mft"
if [[ ! -f "$fixture" ]]; then
echo "ERROR: ours fixture not found: $fixture" >&2
exit 1
fi
local perfdata="$OUT_PERF/ours_${sample}_${RUN_TAG}.perf.data"
local svg="$OUT_FLAME/ours_${sample}_${RUN_TAG}.svg"
local tsv="$OUT_HOT/ours_${sample}_${RUN_TAG}.tsv"
echo "== ours $sample (iters=$iters warmup=$warm hz=$PROFILE_HZ)"
$taskset_prefix perf record -o "$perfdata" -F "$PROFILE_HZ" -g -- \
"$OURS_BIN" --manifest "$fixture" --iterations "$iters" --warmup-iterations "$warm" --repeats 1 >/dev/null
flamegraph --perfdata "$perfdata" --output "$svg" --title "ours ${sample} ManifestObject::decode_der" --deterministic >/dev/null
perf report -i "$perfdata" --stdio --no-children --sort symbol --percent-limit 0.5 \
| awk '/^[[:space:]]*[0-9.]+%/ {pct=$1; sub(/%/,"",pct); $1=""; sub(/^[[:space:]]+/,""); print pct "\t" $0}' \
> "$tsv"
}
profile_routinator() {
local sample="$1"
local iters="$2"
local warm="$3"
local fixture="$ROUT_BENCH_DIR/fixtures/selected_der/${sample}.mft"
if [[ ! -f "$fixture" ]]; then
echo "ERROR: routinator fixture not found: $fixture" >&2
exit 1
fi
local svg="$OUT_FLAME/routinator_${sample}_${RUN_TAG}.svg"
local tsv="$OUT_HOT/routinator_${sample}_${RUN_TAG}.tsv"
echo "== routinator $sample (iters=$iters warmup=$warm hz=$PROFILE_HZ)"
$taskset_prefix "$ROUT_BIN" \
--target decode_only \
--manifest "$fixture" \
--issuer "$ROUT_ISSUER" \
--iterations "$iters" \
--repeats 1 \
--warmup-iterations "$warm" \
--strict false \
--profile-hz "$PROFILE_HZ" \
--flamegraph "$svg" \
--hotspots "$tsv" \
>/dev/null
}
echo "[3/3] Profile samples..."
# Choose iterations so each capture runs ~10-20s serially.
profile_ours small-01 200000 0
profile_routinator small-01 200000 0
profile_ours large-02 50000 0
profile_routinator large-02 50000 0
profile_ours xlarge-02 5000 0
profile_routinator xlarge-02 5000 0
echo "Done."
echo "- Flamegraphs: $OUT_FLAME/"
echo "- Hotspots: $OUT_HOT/"
echo "- Perf data: $OUT_PERF/"

View File

@ -0,0 +1,97 @@
# Manual RRDP sync (APNIC-focused)
This directory contains **manual, command-line** scripts to reproduce the workflow described in:
- `specs/develop/20260226/apnic_rrdp_delta_analysis_after_manifest_revalidation_fix_20260227T022606Z.md`
They are meant for **hands-on validation / acceptance runs**, not for CI.
## Prerequisites
- Rust toolchain (`cargo`)
- `rsync` available on PATH (for rsync fallback/objects)
- Network access (RRDP over HTTPS)
## What the scripts do
### `full_sync.sh`
- Creates a fresh RocksDB directory
- Runs a **full serial** validation from a TAL URL (default: APNIC RFC7730 TAL)
- Writes:
- run log
- audit report JSON
- run meta JSON (includes durations + download_stats)
- short summary Markdown (includes durations + download_stats)
- RocksDB key statistics (`db_stats --exact`)
- RRDP legacy session/serial dump (`rrdp_state_dump --view legacy-state`)
### `delta_sync.sh`
- Copies an existing “baseline snapshot DB” to a new DB directory (so the baseline is not modified)
- Runs another validation against the copied DB (RRDP will prefer **delta** when available)
- Produces the same artifacts as `full_sync.sh`
- Additionally generates a Markdown **delta analysis** report by comparing:
- base vs delta report JSON
- base vs delta `rrdp_state_dump --view legacy-state` TSV
- and includes a **duration comparison** (base vs delta) if the base meta JSON is available
- delta meta JSON includes download_stats copied from delta report JSON
## Audit report fields (report.json)
The `rpki` binary writes an audit report JSON with:
- `format_version: 2`
- `downloads`: per-download RRDP/rsync events (URI, timestamps, duration, ok/fail, error, bytes, objects stats)
- `download_stats`: aggregate counters (by kind)
These are useful for diagnosing why a run is slow (e.g. RRDP snapshot vs delta vs rsync fallback).
The standalone `rrdp_state_dump` tool also supports `source`, `members`, `owners`, and `all` views.
The manual sync scripts intentionally call `--view legacy-state` so delta analysis keeps using a stable session/serial TSV format.
## Meta fields (meta.json)
The scripts generate `*_meta.json` next to `*_report.json` and include:
- `durations_secs`: wall-clock duration breakdown for the script steps
- `download_stats`: copied from `report_json.download_stats`
## Usage
Run from `rpki/`:
```bash
./scripts/manual_sync/full_sync.sh
```
After you have a baseline run, run delta against it:
```bash
./scripts/manual_sync/delta_sync.sh target/live/manual_sync/apnic_full_db_YYYYMMDDTHHMMSSZ \
target/live/manual_sync/apnic_full_report_YYYYMMDDTHHMMSSZ.json
```
If the baseline was produced by `full_sync.sh`, the delta script will auto-discover the base meta JSON
next to the base report (by replacing `_report.json` with `_meta.json`) and include base durations in
the delta analysis report.
## Configuration (env vars)
Both scripts accept overrides via env vars:
- `TAL_URL` (default: APNIC TAL URL)
- `HTTP_TIMEOUT_SECS` (default: 1800)
- `RSYNC_TIMEOUT_SECS` (default: 1800)
- `RSYNC_MIRROR_ROOT` (default: disabled; when set, passes `--rsync-mirror-root` to `rpki`)
- `VALIDATION_TIME` (RFC3339; default: now UTC)
- `OUT_DIR` (default: `rpki/target/live/manual_sync`)
- `RUN_NAME` (default: auto timestamped)
Example:
```bash
TAL_URL="https://tal.apnic.net/tal-archive/apnic-rfc7730-https.tal" \
HTTP_TIMEOUT_SECS=1800 RSYNC_TIMEOUT_SECS=1800 \
./scripts/manual_sync/full_sync.sh
```

543
scripts/manual_sync/delta_sync.sh Executable file
View File

@ -0,0 +1,543 @@
#!/usr/bin/env bash
set -euo pipefail
# Delta sync + validation starting from a baseline snapshot DB.
#
# This script:
# 1) Copies BASE_DB_DIR -> DELTA_DB_DIR (so baseline is not modified)
# 2) Runs rpki validation again (RRDP will prefer delta if available)
# 3) Writes artifacts + a markdown delta analysis report
#
# Usage:
# ./scripts/manual_sync/delta_sync.sh <base_db_dir> <base_report_json>
#
# Outputs under OUT_DIR (default: target/live/manual_sync):
# - *_delta_db_* copied RocksDB directory
# - *_delta_report_*.json audit report
# - *_delta_run_*.log stdout/stderr log (includes summary)
# - *_delta_db_stats_*.txt db_stats --exact output
# - *_delta_rrdp_state_*.tsv rrdp_state_dump --view legacy-state output
# - *_delta_analysis_*.md base vs delta comparison report
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
BASE_DB_DIR="${1:-}"
BASE_REPORT_JSON="${2:-}"
if [[ -z "${BASE_DB_DIR}" || -z "${BASE_REPORT_JSON}" ]]; then
echo "Usage: $0 <base_db_dir> <base_report_json>" >&2
exit 2
fi
if [[ ! -d "${BASE_DB_DIR}" ]]; then
echo "ERROR: base_db_dir is not a directory: ${BASE_DB_DIR}" >&2
exit 2
fi
if [[ ! -f "${BASE_REPORT_JSON}" ]]; then
echo "ERROR: base_report_json does not exist: ${BASE_REPORT_JSON}" >&2
exit 2
fi
TAL_URL="${TAL_URL:-https://tal.apnic.net/tal-archive/apnic-rfc7730-https.tal}"
HTTP_TIMEOUT_SECS="${HTTP_TIMEOUT_SECS:-1800}"
RSYNC_TIMEOUT_SECS="${RSYNC_TIMEOUT_SECS:-1800}"
RSYNC_MIRROR_ROOT="${RSYNC_MIRROR_ROOT:-}"
VALIDATION_TIME="${VALIDATION_TIME:-}"
OUT_DIR="${OUT_DIR:-$ROOT_DIR/target/live/manual_sync}"
mkdir -p "$OUT_DIR"
TS="$(date -u +%Y%m%dT%H%M%SZ)"
RUN_NAME="${RUN_NAME:-apnic_delta_${TS}}"
DELTA_DB_DIR="${DELTA_DB_DIR:-$OUT_DIR/${RUN_NAME}_db}"
DELTA_REPORT_JSON="${DELTA_REPORT_JSON:-$OUT_DIR/${RUN_NAME}_report.json}"
DELTA_RUN_LOG="${DELTA_RUN_LOG:-$OUT_DIR/${RUN_NAME}_run.log}"
BASE_DB_STATS_TXT="${BASE_DB_STATS_TXT:-$OUT_DIR/${RUN_NAME}_base_db_stats.txt}"
DELTA_DB_STATS_TXT="${DELTA_DB_STATS_TXT:-$OUT_DIR/${RUN_NAME}_delta_db_stats.txt}"
BASE_RRDP_STATE_TSV="${BASE_RRDP_STATE_TSV:-$OUT_DIR/${RUN_NAME}_base_rrdp_state.tsv}"
DELTA_RRDP_STATE_TSV="${DELTA_RRDP_STATE_TSV:-$OUT_DIR/${RUN_NAME}_delta_rrdp_state.tsv}"
DELTA_ANALYSIS_MD="${DELTA_ANALYSIS_MD:-$OUT_DIR/${RUN_NAME}_delta_analysis.md}"
DELTA_META_JSON="${DELTA_META_JSON:-$OUT_DIR/${RUN_NAME}_meta.json}"
# Best-effort base meta discovery (produced by `full_sync.sh`).
BASE_META_JSON="${BASE_META_JSON:-}"
if [[ -z "${BASE_META_JSON}" ]]; then
guess="${BASE_REPORT_JSON%_report.json}_meta.json"
if [[ -f "${guess}" ]]; then
BASE_META_JSON="${guess}"
fi
fi
echo "== rpki manual delta sync ==" >&2
echo "tal_url=$TAL_URL" >&2
echo "base_db=$BASE_DB_DIR" >&2
echo "base_report=$BASE_REPORT_JSON" >&2
echo "delta_db=$DELTA_DB_DIR" >&2
echo "delta_report=$DELTA_REPORT_JSON" >&2
echo "== copying base DB (baseline is not modified) ==" >&2
cp -a "$BASE_DB_DIR" "$DELTA_DB_DIR"
script_start_s="$(date +%s)"
run_start_s="$(date +%s)"
cmd=(cargo run --release --bin rpki -- \
--db "$DELTA_DB_DIR" \
--tal-url "$TAL_URL" \
--http-timeout-secs "$HTTP_TIMEOUT_SECS" \
--rsync-timeout-secs "$RSYNC_TIMEOUT_SECS" \
--report-json "$DELTA_REPORT_JSON")
if [[ -n "${RSYNC_MIRROR_ROOT}" ]]; then
cmd+=(--rsync-mirror-root "$RSYNC_MIRROR_ROOT")
fi
if [[ -n "${VALIDATION_TIME}" ]]; then
cmd+=(--validation-time "$VALIDATION_TIME")
fi
(
echo "# command:"
printf '%q ' "${cmd[@]}"
echo
echo
"${cmd[@]}"
) 2>&1 | tee "$DELTA_RUN_LOG" >/dev/null
run_end_s="$(date +%s)"
run_duration_s="$((run_end_s - run_start_s))"
echo "== db_stats (exact) ==" >&2
db_stats_start_s="$(date +%s)"
cargo run --release --bin db_stats -- --db "$BASE_DB_DIR" --exact 2>&1 | tee "$BASE_DB_STATS_TXT" >/dev/null
cargo run --release --bin db_stats -- --db "$DELTA_DB_DIR" --exact 2>&1 | tee "$DELTA_DB_STATS_TXT" >/dev/null
db_stats_end_s="$(date +%s)"
db_stats_duration_s="$((db_stats_end_s - db_stats_start_s))"
echo "== rrdp_state_dump (legacy-state) ==" >&2
state_start_s="$(date +%s)"
cargo run --release --bin rrdp_state_dump -- --db "$BASE_DB_DIR" --view legacy-state >"$BASE_RRDP_STATE_TSV"
cargo run --release --bin rrdp_state_dump -- --db "$DELTA_DB_DIR" --view legacy-state >"$DELTA_RRDP_STATE_TSV"
state_end_s="$(date +%s)"
state_duration_s="$((state_end_s - state_start_s))"
script_end_s="$(date +%s)"
total_duration_s="$((script_end_s - script_start_s))"
echo "== delta analysis report ==" >&2
TAL_URL="$TAL_URL" \
BASE_DB_DIR="$BASE_DB_DIR" \
DELTA_DB_DIR="$DELTA_DB_DIR" \
DELTA_RUN_LOG="$DELTA_RUN_LOG" \
VALIDATION_TIME_ARG="$VALIDATION_TIME" \
HTTP_TIMEOUT_SECS="$HTTP_TIMEOUT_SECS" \
RSYNC_TIMEOUT_SECS="$RSYNC_TIMEOUT_SECS" \
RUN_DURATION_S="$run_duration_s" \
DB_STATS_DURATION_S="$db_stats_duration_s" \
STATE_DURATION_S="$state_duration_s" \
TOTAL_DURATION_S="$total_duration_s" \
python3 - "$BASE_REPORT_JSON" "$DELTA_REPORT_JSON" "$BASE_RRDP_STATE_TSV" "$DELTA_RRDP_STATE_TSV" \
"$BASE_DB_STATS_TXT" "$DELTA_DB_STATS_TXT" "$BASE_META_JSON" "$DELTA_META_JSON" "$DELTA_ANALYSIS_MD" <<'PY'
import json
import os
import sys
from collections import Counter, defaultdict
from datetime import datetime, timezone
from pathlib import Path
base_report_path = Path(sys.argv[1])
delta_report_path = Path(sys.argv[2])
base_state_path = Path(sys.argv[3])
delta_state_path = Path(sys.argv[4])
base_db_stats_path = Path(sys.argv[5])
delta_db_stats_path = Path(sys.argv[6])
base_meta_path_s = sys.argv[7]
delta_meta_path = Path(sys.argv[8])
out_md_path = Path(sys.argv[9])
def load_json(p: Path):
s = p.read_text(encoding="utf-8")
try:
return json.loads(s)
except json.JSONDecodeError:
# Backwards-compat / robustness: tolerate accidental literal trailing "\\n".
s2 = s.strip()
if s2.endswith("\\n"):
s2 = s2[:-2].rstrip()
return json.loads(s2)
def load_optional_json(path_s: str):
if not path_s:
return None
p = Path(path_s)
if not p.exists():
return None
return load_json(p)
def parse_rrdp_state_tsv(p: Path):
# format from `rrdp_state_dump --view legacy-state`:
# [legacy-state]
# notify_uri serial session_id
# <notify_uri> <serial> <session_id>
out = {}
for line in p.read_text(encoding="utf-8").splitlines():
line = line.strip()
if not line or line.startswith("["):
continue
if line == "notify_uri serial session_id":
continue
parts = line.split(" ")
if len(parts) != 3:
raise SystemExit(f"invalid rrdp_state_dump line in {p}: {line!r}")
uri, serial, session = parts
out[uri] = (int(serial), session)
return out
def parse_db_stats(p: Path):
# lines: key=value
out = {}
for line in p.read_text(encoding="utf-8").splitlines():
if "=" not in line:
continue
k, v = line.split("=", 1)
k = k.strip()
v = v.strip()
if v.isdigit():
out[k] = int(v)
else:
out[k] = v
return out
def warnings_total(rep: dict) -> int:
return len(rep["tree"]["warnings"]) + sum(len(pp["warnings"]) for pp in rep["publication_points"])
def report_summary(rep: dict) -> dict:
return {
"validation_time": rep["meta"]["validation_time_rfc3339_utc"],
"publication_points_processed": rep["tree"]["instances_processed"],
"publication_points_failed": rep["tree"]["instances_failed"],
"rrdp_repos_unique": len({pp.get("rrdp_notification_uri") for pp in rep["publication_points"] if pp.get("rrdp_notification_uri")}),
"vrps": len(rep["vrps"]),
"aspas": len(rep["aspas"]),
"audit_publication_points": len(rep["publication_points"]),
"warnings_total": warnings_total(rep),
}
def count_repo_sync_failed(rep: dict) -> int:
# Best-effort heuristic (we don't currently expose a structured counter in the audit report).
# Keep the match conservative to avoid false positives.
def is_repo_sync_failed(msg: str) -> bool:
m = msg.lower()
return "repo sync failed" in m or "rrdp fetch failed" in m or "rsync fetch failed" in m
n = 0
for w in rep["tree"]["warnings"]:
if is_repo_sync_failed(w.get("message", "")):
n += 1
for pp in rep["publication_points"]:
for w in pp.get("warnings", []):
if is_repo_sync_failed(w.get("message", "")):
n += 1
return n
def pp_manifest_sha(pp: dict) -> str:
# In our audit format, the first object is the manifest (synthetic entry) with sha256 of manifest_bytes.
for o in pp["objects"]:
if o["kind"] == "manifest":
return o["sha256_hex"]
return ""
def pp_objects_by_uri(rep: dict):
m = {}
for pp in rep["publication_points"]:
for o in pp["objects"]:
m[o["rsync_uri"]] = (o["sha256_hex"], o["kind"])
return m
def vrp_set(rep: dict):
return {(v["asn"], v["prefix"], v["max_length"]) for v in rep["vrps"]}
def rfc_refs_str(w: dict) -> str:
refs = w.get("rfc_refs") or []
return ", ".join(refs) if refs else ""
base = load_json(base_report_path)
delta = load_json(delta_report_path)
base_sum = report_summary(base)
delta_sum = report_summary(delta)
base_db = parse_db_stats(base_db_stats_path)
delta_db = parse_db_stats(delta_db_stats_path)
base_state = parse_rrdp_state_tsv(base_state_path)
delta_state = parse_rrdp_state_tsv(delta_state_path)
base_meta = load_optional_json(base_meta_path_s)
download_stats = delta.get("download_stats") or {}
delta_meta = {
"recorded_at_utc": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
"tal_url": os.environ["TAL_URL"],
"base_db_dir": os.environ["BASE_DB_DIR"],
"delta_db_dir": os.environ["DELTA_DB_DIR"],
"base_report_json": str(base_report_path),
"delta_report_json": str(delta_report_path),
"delta_run_log": os.environ["DELTA_RUN_LOG"],
"validation_time_arg": os.environ.get("VALIDATION_TIME_ARG",""),
"http_timeout_secs": int(os.environ["HTTP_TIMEOUT_SECS"]),
"rsync_timeout_secs": int(os.environ["RSYNC_TIMEOUT_SECS"]),
"durations_secs": {
"rpki_run": int(os.environ["RUN_DURATION_S"]),
"db_stats_exact": int(os.environ["DB_STATS_DURATION_S"]),
"rrdp_state_dump": int(os.environ["STATE_DURATION_S"]),
"total_script": int(os.environ["TOTAL_DURATION_S"]),
},
"download_stats": download_stats,
}
delta_meta_path.write_text(json.dumps(delta_meta, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
# RRDP state changes
serial_changed = 0
session_changed = 0
serial_deltas = []
for uri, (old_serial, old_sess) in base_state.items():
if uri not in delta_state:
continue
new_serial, new_sess = delta_state[uri]
if new_serial != old_serial:
serial_changed += 1
serial_deltas.append((uri, old_serial, new_serial, new_serial - old_serial))
if new_sess != old_sess:
session_changed += 1
serial_deltas.sort(key=lambda x: x[3], reverse=True)
# Publication point diffs
base_pp = {pp["manifest_rsync_uri"]: pp for pp in base["publication_points"]}
delta_pp = {pp["manifest_rsync_uri"]: pp for pp in delta["publication_points"]}
base_keys = set(base_pp.keys())
delta_keys = set(delta_pp.keys())
new_pp = sorted(delta_keys - base_keys)
missing_pp = sorted(base_keys - delta_keys)
updated_pp = 0
unchanged_pp = 0
for k in sorted(base_keys & delta_keys):
if pp_manifest_sha(base_pp[k]) != pp_manifest_sha(delta_pp[k]):
updated_pp += 1
else:
unchanged_pp += 1
# Cache usage + repo sync failure hints
def source_counts(rep: dict) -> Counter:
c = Counter()
for pp in rep["publication_points"]:
c[pp.get("source","")] += 1
return c
base_sources = source_counts(base)
delta_sources = source_counts(delta)
base_repo_sync_failed = count_repo_sync_failed(base)
delta_repo_sync_failed = count_repo_sync_failed(delta)
def cache_reason_counts(rep: dict) -> Counter:
c = Counter()
for pp in rep.get("publication_points", []):
if pp.get("source") != "vcir_current_instance":
continue
# Use warning messages as "reason". If missing, emit a fallback bucket.
ws = pp.get("warnings", [])
if not ws:
c["(no warnings recorded)"] += 1
continue
for w in ws:
msg = w.get("message", "").strip() or "(empty warning message)"
c[msg] += 1
return c
base_cache_reasons = cache_reason_counts(base)
delta_cache_reasons = cache_reason_counts(delta)
# Object change stats (by rsync URI, sha256)
base_obj = pp_objects_by_uri(base)
delta_obj = pp_objects_by_uri(delta)
kind_stats = {k: {"added": 0, "changed": 0, "removed": 0} for k in ["manifest","crl","certificate","roa","aspa","other"]}
all_uris = set(base_obj.keys()) | set(delta_obj.keys())
for uri in all_uris:
b = base_obj.get(uri)
d = delta_obj.get(uri)
if b is None and d is not None:
kind_stats[d[1]]["added"] += 1
elif b is not None and d is None:
kind_stats[b[1]]["removed"] += 1
else:
if b[0] != d[0]:
kind_stats[d[1]]["changed"] += 1
# VRP diff
base_v = vrp_set(base)
delta_v = vrp_set(delta)
added_v = delta_v - base_v
removed_v = base_v - delta_v
def fmt_db_stats(db: dict) -> str:
ordered = [
"mode",
"repository_view",
"raw_by_hash",
"vcir",
"audit_rule_index",
"rrdp_source",
"rrdp_source_member",
"rrdp_uri_owner",
"rrdp_state",
"raw_objects",
"rrdp_object_index",
"group_current_repository_view",
"group_current_validation_state",
"group_current_rrdp_state",
"group_legacy_compatibility",
"total",
"sst_files",
]
out = []
seen = set()
for k in ordered:
if k in db:
out.append(f"- `{k}={db[k]}`")
seen.add(k)
for k in sorted(set(db) - seen):
out.append(f"- `{k}={db[k]}`")
return "\n".join(out) if out else "_(missing db_stats keys)_"
lines = []
lines.append("# APNIC RRDP 增量同步验收manual_sync\n\n")
lines.append(f"时间戳:`{now}`UTC\n\n")
lines.append("## 复现信息\n\n")
lines.append(f"- base_report`{base_report_path}`\n")
lines.append(f"- delta_report`{delta_report_path}`\n")
lines.append(f"- base_db_stats`{base_db_stats_path}`\n")
lines.append(f"- delta_db_stats`{delta_db_stats_path}`\n")
lines.append(f"- base_rrdp_state`{base_state_path}`\n")
lines.append(f"- delta_rrdp_state`{delta_state_path}`\n\n")
lines.append("## 运行结果概览\n\n")
lines.append("| metric | base | delta |\n")
lines.append("|---|---:|---:|\n")
for k in [
"validation_time",
"publication_points_processed",
"publication_points_failed",
"rrdp_repos_unique",
"vrps",
"aspas",
"audit_publication_points",
"warnings_total",
]:
lines.append(f"| {k} | {base_sum[k]} | {delta_sum[k]} |\n")
lines.append("\n")
def dur(meta: dict | None, key: str):
if not meta:
return None
return (meta.get("durations_secs") or {}).get(key)
base_rpki_run = dur(base_meta, "rpki_run")
delta_rpki_run = delta_meta["durations_secs"]["rpki_run"]
base_total = dur(base_meta, "total_script")
delta_total = delta_meta["durations_secs"]["total_script"]
lines.append("## 持续时间seconds\n\n")
lines.append("| step | base | delta |\n")
lines.append("|---|---:|---:|\n")
lines.append(f"| rpki_run | {base_rpki_run if base_rpki_run is not None else 'unknown'} | {delta_rpki_run} |\n")
lines.append(f"| total_script | {base_total if base_total is not None else 'unknown'} | {delta_total} |\n")
lines.append("\n")
if base_meta is None and base_meta_path_s:
lines.append(f"> 注:未能读取 base meta`{base_meta_path_s}`(文件不存在或不可读)。建议用 `full_sync.sh` 生成 baseline 以获得 base 时长对比。\n\n")
lines.append("RocksDB KV`db_stats --exact`\n\n")
lines.append("### 基线base\n\n")
lines.append(fmt_db_stats(base_db) + "\n\n")
lines.append("### 增量delta\n\n")
lines.append(fmt_db_stats(delta_db) + "\n\n")
lines.append("## RRDP 增量是否发生(基于 `rrdp_state` 变化)\n\n")
lines.append(f"- repo_total(base)={len(base_state)}\n")
lines.append(f"- repo_total(delta)={len(delta_state)}\n")
lines.append(f"- serial_changed={serial_changed}\n")
lines.append(f"- session_changed={session_changed}\n\n")
if serial_deltas:
lines.append("serial 增长最大的 10 个 RRDP repoold -> new\n\n")
for uri, old, new, diff in serial_deltas[:10]:
lines.append(f"- `{uri}``{old} -> {new}`+{diff}\n")
lines.append("\n")
lines.append("## 发布点Publication Point变化统计\n\n")
lines.append("以 `manifest_rsync_uri` 作为发布点 key对比 base vs delta\n\n")
lines.append(f"- base PP`{len(base_keys)}`\n")
lines.append(f"- delta PP`{len(delta_keys)}`\n")
lines.append(f"- `new_pp={len(new_pp)}`\n")
lines.append(f"- `missing_pp={len(missing_pp)}`\n")
lines.append(f"- `updated_pp={updated_pp}`\n")
lines.append(f"- `unchanged_pp={unchanged_pp}`\n\n")
lines.append("> 注:`new_pp/missing_pp/updated_pp` 会混入“遍历范围变化”的影响(例如 validation_time 不同、或 base 中存在更多失败 PP。\n\n")
lines.append("## fail fetch / VCIR 当前实例缓存复用情况\n\n")
lines.append(f"- repo sync failed启发式warning contains 'repo sync failed'/'rrdp fetch failed'/'rsync fetch failed'\n")
lines.append(f" - base`{base_repo_sync_failed}`\n")
lines.append(f" - delta`{delta_repo_sync_failed}`\n\n")
lines.append("- source 计数(按 `PublicationPointAudit.source`\n\n")
lines.append(f" - base`{dict(base_sources)}`\n")
lines.append(f" - delta`{dict(delta_sources)}`\n\n")
def render_cache_reasons(title: str, c: Counter) -> str:
if not c:
return f"{title}`0`(未使用 VCIR 当前实例缓存复用)\n\n"
lines = []
total = sum(c.values())
lines.append(f"{title}`{total}`\n\n")
lines.append("Top reasons按 warning message 聚合,可能一条 PP 有多条 warning\n\n")
for msg, n in c.most_common(10):
lines.append(f"- `{n}` × {msg}\n")
lines.append("\n")
return "".join(lines)
lines.append(render_cache_reasons("- base `source=vcir_current_instance`", base_cache_reasons))
lines.append(render_cache_reasons("- delta `source=vcir_current_instance`", delta_cache_reasons))
lines.append("## 文件变更统计(按对象类型)\n\n")
lines.append("按 `ObjectAuditEntry.sha256_hex` 对比(同一 rsync URI 前后 hash 变化记为 `~changed`\n\n")
lines.append("| kind | added | changed | removed |\n")
lines.append("|---|---:|---:|---:|\n")
for kind in ["manifest","crl","certificate","roa","aspa","other"]:
st = kind_stats[kind]
lines.append(f"| {kind} | {st['added']} | {st['changed']} | {st['removed']} |\n")
lines.append("\n")
lines.append("## VRP 影响(去重后集合 diff\n\n")
lines.append("以 `(asn, prefix, max_length)` 为 key\n\n")
lines.append(f"- base unique VRP`{len(base_v)}`\n")
lines.append(f"- delta unique VRP`{len(delta_v)}`\n")
lines.append(f"- `added={len(added_v)}`\n")
lines.append(f"- `removed={len(removed_v)}`\n")
lines.append(f"- `net={len(added_v) - len(removed_v)}`\n\n")
out_md_path.write_text("".join(lines), encoding="utf-8")
print(out_md_path)
PY
echo "== done ==" >&2
echo "artifacts:" >&2
echo "- delta db: $DELTA_DB_DIR" >&2
echo "- delta report: $DELTA_REPORT_JSON" >&2
echo "- delta run log: $DELTA_RUN_LOG" >&2
echo "- delta meta json: $DELTA_META_JSON" >&2
echo "- analysis md: $DELTA_ANALYSIS_MD" >&2
echo "- base state tsv: $BASE_RRDP_STATE_TSV" >&2
echo "- delta state tsv: $DELTA_RRDP_STATE_TSV" >&2

189
scripts/manual_sync/full_sync.sh Executable file
View File

@ -0,0 +1,189 @@
#!/usr/bin/env bash
set -euo pipefail
# Full sync + validation from a TAL URL (default: APNIC).
#
# Produces artifacts under OUT_DIR (default: target/live/manual_sync):
# - *_db_* RocksDB directory
# - *_report_*.json audit report
# - *_run_*.log stdout/stderr log (includes summary)
# - *_db_stats_*.txt db_stats --exact output
# - *_rrdp_state_*.tsv rrdp_state_dump --view legacy-state output
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
TAL_URL="${TAL_URL:-https://tal.apnic.net/tal-archive/apnic-rfc7730-https.tal}"
HTTP_TIMEOUT_SECS="${HTTP_TIMEOUT_SECS:-1800}"
RSYNC_TIMEOUT_SECS="${RSYNC_TIMEOUT_SECS:-1800}"
RSYNC_MIRROR_ROOT="${RSYNC_MIRROR_ROOT:-}"
VALIDATION_TIME="${VALIDATION_TIME:-}"
OUT_DIR="${OUT_DIR:-$ROOT_DIR/target/live/manual_sync}"
mkdir -p "$OUT_DIR"
TS="$(date -u +%Y%m%dT%H%M%SZ)"
RUN_NAME="${RUN_NAME:-apnic_full_${TS}}"
DB_DIR="${DB_DIR:-$OUT_DIR/${RUN_NAME}_db}"
REPORT_JSON="${REPORT_JSON:-$OUT_DIR/${RUN_NAME}_report.json}"
RUN_LOG="${RUN_LOG:-$OUT_DIR/${RUN_NAME}_run.log}"
DB_STATS_TXT="${DB_STATS_TXT:-$OUT_DIR/${RUN_NAME}_db_stats.txt}"
RRDP_STATE_TSV="${RRDP_STATE_TSV:-$OUT_DIR/${RUN_NAME}_rrdp_state.tsv}"
RUN_META_JSON="${RUN_META_JSON:-$OUT_DIR/${RUN_NAME}_meta.json}"
SUMMARY_MD="${SUMMARY_MD:-$OUT_DIR/${RUN_NAME}_summary.md}"
echo "== rpki manual full sync ==" >&2
echo "tal_url=$TAL_URL" >&2
echo "db=$DB_DIR" >&2
echo "report_json=$REPORT_JSON" >&2
echo "out_dir=$OUT_DIR" >&2
cmd=(cargo run --release --bin rpki -- \
--db "$DB_DIR" \
--tal-url "$TAL_URL" \
--http-timeout-secs "$HTTP_TIMEOUT_SECS" \
--rsync-timeout-secs "$RSYNC_TIMEOUT_SECS" \
--report-json "$REPORT_JSON")
if [[ -n "${RSYNC_MIRROR_ROOT}" ]]; then
cmd+=(--rsync-mirror-root "$RSYNC_MIRROR_ROOT")
fi
if [[ -n "${VALIDATION_TIME}" ]]; then
cmd+=(--validation-time "$VALIDATION_TIME")
fi
script_start_s="$(date +%s)"
run_start_s="$(date +%s)"
(
echo "# command:"
printf '%q ' "${cmd[@]}"
echo
echo
"${cmd[@]}"
) 2>&1 | tee "$RUN_LOG" >/dev/null
run_end_s="$(date +%s)"
run_duration_s="$((run_end_s - run_start_s))"
echo "== db_stats (exact) ==" >&2
db_stats_start_s="$(date +%s)"
cargo run --release --bin db_stats -- --db "$DB_DIR" --exact 2>&1 | tee "$DB_STATS_TXT" >/dev/null
db_stats_end_s="$(date +%s)"
db_stats_duration_s="$((db_stats_end_s - db_stats_start_s))"
echo "== rrdp_state_dump (legacy-state) ==" >&2
state_start_s="$(date +%s)"
cargo run --release --bin rrdp_state_dump -- --db "$DB_DIR" --view legacy-state >"$RRDP_STATE_TSV"
state_end_s="$(date +%s)"
state_duration_s="$((state_end_s - state_start_s))"
script_end_s="$(date +%s)"
total_duration_s="$((script_end_s - script_start_s))"
echo "== write run meta + summary ==" >&2
TAL_URL="$TAL_URL" \
DB_DIR="$DB_DIR" \
REPORT_JSON="$REPORT_JSON" \
RUN_LOG="$RUN_LOG" \
HTTP_TIMEOUT_SECS="$HTTP_TIMEOUT_SECS" \
RSYNC_TIMEOUT_SECS="$RSYNC_TIMEOUT_SECS" \
VALIDATION_TIME_ARG="$VALIDATION_TIME" \
RUN_DURATION_S="$run_duration_s" \
DB_STATS_DURATION_S="$db_stats_duration_s" \
STATE_DURATION_S="$state_duration_s" \
TOTAL_DURATION_S="$total_duration_s" \
python3 - "$REPORT_JSON" "$RUN_META_JSON" "$SUMMARY_MD" <<'PY'
import json
import os
import sys
from datetime import datetime, timezone
from pathlib import Path
report_path = Path(sys.argv[1])
meta_path = Path(sys.argv[2])
summary_path = Path(sys.argv[3])
rep = json.loads(report_path.read_text(encoding="utf-8"))
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
download_stats = rep.get("download_stats") or {}
meta = {
"recorded_at_utc": now,
"tal_url": os.environ["TAL_URL"],
"db_dir": os.environ["DB_DIR"],
"report_json": os.environ["REPORT_JSON"],
"run_log": os.environ["RUN_LOG"],
"validation_time_rfc3339_utc": rep["meta"]["validation_time_rfc3339_utc"],
"http_timeout_secs": int(os.environ["HTTP_TIMEOUT_SECS"]),
"rsync_timeout_secs": int(os.environ["RSYNC_TIMEOUT_SECS"]),
"validation_time_arg": os.environ.get("VALIDATION_TIME_ARG",""),
"durations_secs": {
"rpki_run": int(os.environ["RUN_DURATION_S"]),
"db_stats_exact": int(os.environ["DB_STATS_DURATION_S"]),
"rrdp_state_dump": int(os.environ["STATE_DURATION_S"]),
"total_script": int(os.environ["TOTAL_DURATION_S"]),
},
"counts": {
"publication_points_processed": rep["tree"]["instances_processed"],
"publication_points_failed": rep["tree"]["instances_failed"],
"vrps": len(rep["vrps"]),
"aspas": len(rep["aspas"]),
"audit_publication_points": len(rep["publication_points"]),
},
"download_stats": download_stats,
}
meta_path.write_text(json.dumps(meta, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
lines = []
lines.append("# Manual full sync summary\\n\\n")
lines.append(f"- recorded_at_utc: `{now}`\\n")
lines.append(f"- tal_url: `{meta['tal_url']}`\\n")
lines.append(f"- db: `{meta['db_dir']}`\\n")
lines.append(f"- report_json: `{meta['report_json']}`\\n")
lines.append(f"- validation_time: `{meta['validation_time_rfc3339_utc']}`\\n\\n")
lines.append("## Results\\n\\n")
lines.append("| metric | value |\\n")
lines.append("|---|---:|\\n")
for k in ["publication_points_processed","publication_points_failed","vrps","aspas","audit_publication_points"]:
lines.append(f"| {k} | {meta['counts'][k]} |\\n")
lines.append("\\n")
lines.append("## Durations (seconds)\\n\\n")
lines.append("| step | seconds |\\n")
lines.append("|---|---:|\\n")
for k,v in meta["durations_secs"].items():
lines.append(f"| {k} | {v} |\\n")
lines.append("\\n")
lines.append("## Download Stats\\n\\n")
lines.append("- raw events: `report_json.downloads`\\n")
lines.append("- aggregated: `report_json.download_stats` (copied into meta.json)\\n\\n")
def fmt_u(v):
if v is None:
return ""
return str(v)
by_kind = download_stats.get("by_kind") or {}
lines.append("| kind | ok | fail | duration_ms_total | bytes_total | objects_count_total | objects_bytes_total |\\n")
lines.append("|---|---:|---:|---:|---:|---:|---:|\\n")
for kind in sorted(by_kind.keys()):
st = by_kind[kind] or {}
lines.append(
f"| {kind} | {st.get('ok_total',0)} | {st.get('fail_total',0)} | {st.get('duration_ms_total',0)} | {fmt_u(st.get('bytes_total'))} | {fmt_u(st.get('objects_count_total'))} | {fmt_u(st.get('objects_bytes_total'))} |\\n"
)
lines.append("\\n")
summary_path.write_text("".join(lines), encoding="utf-8")
print(summary_path)
PY
echo "== done ==" >&2
echo "artifacts:" >&2
echo "- db: $DB_DIR" >&2
echo "- report: $REPORT_JSON" >&2
echo "- run log: $RUN_LOG" >&2
echo "- db stats: $DB_STATS_TXT" >&2
echo "- rrdp state: $RRDP_STATE_TSV" >&2
echo "- meta json: $RUN_META_JSON" >&2
echo "- summary md: $SUMMARY_MD" >&2

View File

@ -0,0 +1,268 @@
# Payload Replay Scripts
本目录提供基于本地 payload archive 的手工 replay 入口。
## `multi_rir_case_info.py`
用于从 multi-RIR bundle 中解析指定 `rir` 的输入路径、对照 CSV、fixture、以及 Routinator replay timing 基线。
示例:
```bash
python3 scripts/payload_replay/multi_rir_case_info.py \
--bundle-root ../../rpki/target/live/20260316-112341-multi-final3 \
--rir afrinic
```
也支持输出 shell 环境变量:
```bash
python3 scripts/payload_replay/multi_rir_case_info.py \
--bundle-root ../../rpki/target/live/20260316-112341-multi-final3 \
--rir afrinic \
--format env
```
## `run_multi_rir_replay_case.sh`
统一的 multi-RIR 入口。给定 `rir` 和模式后,它会自动选择该 RIR 的:
- snapshot/base replay 输入
- delta replay 输入
- 对照 CSV
- TAL / TA fixture
- trust anchor 名称
用法:
```bash
./scripts/payload_replay/run_multi_rir_replay_case.sh <rir> [describe|snapshot|delta|both]
```
示例:
```bash
./scripts/payload_replay/run_multi_rir_replay_case.sh afrinic describe
./scripts/payload_replay/run_multi_rir_replay_case.sh lacnic snapshot
./scripts/payload_replay/run_multi_rir_replay_case.sh arin delta
./scripts/payload_replay/run_multi_rir_replay_case.sh ripe both
```
脚本会自动:
- 从 multi-RIR bundle 中选择指定 RIR 的 snapshot/base 与 delta 输入
- 读取该 RIR 的 Routinator `base-replay` / `delta-replay` timing 基线
- 优先使用 `base-locks.json.validationTime``locks-delta.json.validationTime` 作为 replay `--validation-time`;若缺失才回退到 `timings/base-replay.json``timings/delta-replay.json``startedAt`
- 在 `target/live/multi_rir_replay_runs/<rir>/` 下生成:
- snapshot replay 产物
- delta replay 产物
- per-RIR 合并 case report含 correctness + timing compare
默认 bundle 根目录为:
- `../../rpki/target/live/20260316-112341-multi-final3`
也可以通过 `BUNDLE_ROOT` 覆盖。
## `run_apnic_snapshot_replay_profile.sh`
基于 multi-RIR bundle 中的 APNIC snapshot 输入,使用当前 replay 主流程执行一次带 `--analyze``--profile-cpu` 的离线 profile。
```bash
./scripts/payload_replay/run_apnic_snapshot_replay_profile.sh
```
作用:
- 使用 `APNIC` 的 snapshot/base replay 输入
- 自动开启:
- `--analyze`
- `--profile-cpu`
- 自动记录:
- replay wall-clock 时长
- Routinator baseline (`base-replay`)
- analyze 目录路径
- 生成:
- `report.json`
- `run.log`
- `meta.json`
- `summary.md`
- 以及 `target/live/analyze/<timestamp>/` 下的:
- `timing.json`
- `flamegraph.svg`
- `pprof.pb.gz`
支持:
- `DRY_RUN=1`:只打印命令,不真正执行
- `MAX_DEPTH` / `MAX_INSTANCES`:用于限定 replay 范围
- `PROFILE_RUN_ROOT`:覆盖 wrapper 产物输出目录
## `run_apnic_replay.sh`
默认使用:
- `tests/fixtures/tal/apnic-rfc7730-https.tal`
- `tests/fixtures/ta/apnic-ta.cer`
- `target/live/payload_replay/payload-archive`
- `target/live/payload_replay/locks.json`
运行:
```bash
./scripts/payload_replay/run_apnic_replay.sh
```
产物默认输出到:
- `target/live/payload_replay_runs/`
包含:
- replay DB 目录
- `report.json`
- `run.log`
- `meta.json`
- `summary.md`
## 环境变量
可覆盖:
- `TAL_PATH`
- `TA_PATH`
- `PAYLOAD_REPLAY_ARCHIVE`
- `PAYLOAD_REPLAY_LOCKS`
- `VALIDATION_TIME`
- `MAX_DEPTH`
- `MAX_INSTANCES`
- `OUT_DIR`
- `RUN_NAME`
- `DB_DIR`
- `REPORT_JSON`
- `RUN_LOG`
- `META_JSON`
- `SUMMARY_MD`
## 说明
- 该脚本依赖 `rpki` CLI 已支持:
- `--payload-replay-archive`
- `--payload-replay-locks`
- replay 模式必须搭配离线 TAL/TA 输入,不会去访问真实 RRDP / rsync 网络源。
## `report_to_routinator_csv.py`
`rpki` 生成的 `report.json` 转成 Routinator 风格的 VRP CSV
```bash
python3 scripts/payload_replay/report_to_routinator_csv.py \
--report target/live/payload_replay_runs/<run>_report.json \
--out target/live/payload_replay_runs/<run>_vrps.csv \
--trust-anchor apnic
```
输出列为:
- `ASN`
- `IP Prefix`
- `Max Length`
- `Trust Anchor`
## `compare_with_routinator_record.sh`
把 ours 生成的 VRP CSV 与 Routinator 的 `record.csv` 做对比:
```bash
./scripts/payload_replay/compare_with_routinator_record.sh \
target/live/payload_replay_runs/<run>_vrps.csv \
target/live/payload_replay/record.csv
```
会产出:
- compare summary Markdown
- `only_in_ours.csv`
- `only_in_record.csv`
## `run_apnic_replay.sh` 现有额外产物
脚本现在除了 `report/meta/summary`,还会额外生成:
- `vrps.csv`
- 若 `ROUTINATOR_RECORD_CSV` 存在,则生成:
- compare summary
- `only_in_ours.csv`
- `only_in_record.csv`
## `run_apnic_delta_replay.sh`
使用 APNIC delta demo 数据集运行 base + delta replay
```bash
./scripts/payload_replay/run_apnic_delta_replay.sh
```
默认输入:
- `target/live/apnic_delta_demo/20260315-170223-autoplay/base-payload-archive`
- `target/live/apnic_delta_demo/20260315-170223-autoplay/base-locks.json`
- `target/live/apnic_delta_demo/20260315-170223-autoplay/payload-delta-archive`
- `target/live/apnic_delta_demo/20260315-170223-autoplay/locks-delta.json`
- `tests/fixtures/tal/apnic-rfc7730-https.tal`
- `tests/fixtures/ta/apnic-ta.cer`
输出目录默认:`target/live/payload_delta_replay_runs/`
## `run_apnic_delta_replay.sh` compare outputs
脚本现在在 delta replay 结束后还会额外生成:
- `vrps.csv`
- compare summary Markdown
- `only_in_ours.csv`
- `only_in_record.csv`
默认 compare 输入是:
- `target/live/apnic_delta_demo/20260315-170223-autoplay/record-delta.csv`
也可以通过环境变量覆盖:
- `TRUST_ANCHOR`
- `ROUTINATOR_RECORD_CSV`
- `VRPS_CSV`
- `COMPARE_SUMMARY_MD`
- `ONLY_IN_OURS_CSV`
- `ONLY_IN_RECORD_CSV`
## `write_multi_rir_case_report.py`
把某个 RIR 的 snapshot replay 与 delta replay 的 `meta.json`、compare summary 以及 Routinator timing 基线合并成一个 per-RIR Markdown/JSON 报告。
该脚本通常由 `run_multi_rir_replay_case.sh <rir> both` 自动调用。
## `run_multi_rir_replay_suite.sh`
顺序执行 5 个 RIR或环境变量 `RIRS` 指定的子集)的 `both` 模式,并最终生成 multi-RIR 汇总报告。
```bash
./scripts/payload_replay/run_multi_rir_replay_suite.sh
```
可覆盖环境变量:
- `BUNDLE_ROOT`
- `SUITE_OUT_DIR`
- `RIRS`
最终输出:
- `<suite_out_dir>/<rir>/<rir>_case_report.md`
- `<suite_out_dir>/multi_rir_summary.md`
- `<suite_out_dir>/multi_rir_summary.json`
## `write_multi_rir_summary.py`
汇总 5 个 RIR 的 per-RIR case report生成 correctness + timing 总表与几何平均比值。

View File

@ -0,0 +1,110 @@
#!/usr/bin/env bash
set -euo pipefail
if [[ $# -lt 2 || $# -gt 5 ]]; then
echo "Usage: $0 <ours.csv> <record.csv> [summary.md] [only_in_ours.csv] [only_in_record.csv]" >&2
exit 2
fi
OURS_CSV="$1"
RECORD_CSV="$2"
SUMMARY_MD="${3:-}"
ONLY_IN_OURS_CSV="${4:-}"
ONLY_IN_RECORD_CSV="${5:-}"
if [[ -z "$SUMMARY_MD" ]]; then
SUMMARY_MD="$(dirname "$OURS_CSV")/$(basename "$OURS_CSV" .csv)_vs_routinator_summary.md"
fi
if [[ -z "$ONLY_IN_OURS_CSV" ]]; then
ONLY_IN_OURS_CSV="$(dirname "$OURS_CSV")/$(basename "$OURS_CSV" .csv)_only_in_ours.csv"
fi
if [[ -z "$ONLY_IN_RECORD_CSV" ]]; then
ONLY_IN_RECORD_CSV="$(dirname "$OURS_CSV")/$(basename "$OURS_CSV" .csv)_only_in_record.csv"
fi
python3 - "$OURS_CSV" "$RECORD_CSV" "$SUMMARY_MD" "$ONLY_IN_OURS_CSV" "$ONLY_IN_RECORD_CSV" <<'PY'
import csv
import ipaddress
import sys
from pathlib import Path
ours_csv = Path(sys.argv[1])
record_csv = Path(sys.argv[2])
summary_md = Path(sys.argv[3])
only_in_ours_csv = Path(sys.argv[4])
only_in_record_csv = Path(sys.argv[5])
def normalize_row(row: dict):
asn = row["ASN"].strip().upper()
prefix = row["IP Prefix"].strip()
max_len = str(int(row["Max Length"]))
ta = row["Trust Anchor"].strip()
network = ipaddress.ip_network(prefix, strict=False)
return {
"ASN": asn,
"IP Prefix": str(network),
"Max Length": max_len,
"Trust Anchor": ta,
}
def read_rows(path: Path):
with path.open(encoding="utf-8", newline="") as f:
rows = [normalize_row(r) for r in csv.DictReader(f)]
return rows
def row_key(row: dict):
network = ipaddress.ip_network(row["IP Prefix"], strict=False)
return (
row["ASN"],
network.version,
int(network.network_address),
network.prefixlen,
int(row["Max Length"]),
row["Trust Anchor"],
)
def write_rows(path: Path, rows):
path.parent.mkdir(parents=True, exist_ok=True)
with path.open("w", encoding="utf-8", newline="") as f:
writer = csv.DictWriter(f, fieldnames=["ASN", "IP Prefix", "Max Length", "Trust Anchor"])
writer.writeheader()
for row in rows:
writer.writerow(row)
ours = read_rows(ours_csv)
record = read_rows(record_csv)
ours_map = {row_key(r): r for r in ours}
record_map = {row_key(r): r for r in record}
only_in_ours = [ours_map[k] for k in sorted(set(ours_map) - set(record_map))]
only_in_record = [record_map[k] for k in sorted(set(record_map) - set(ours_map))]
intersection = len(set(ours_map) & set(record_map))
write_rows(only_in_ours_csv, only_in_ours)
write_rows(only_in_record_csv, only_in_record)
summary_md.parent.mkdir(parents=True, exist_ok=True)
summary = []
summary.append("# Replay vs Routinator VRP Compare\n\n")
summary.append(f"- ours_csv: `{ours_csv}`\n")
summary.append(f"- record_csv: `{record_csv}`\n")
summary.append(f"- only_in_ours_csv: `{only_in_ours_csv}`\n")
summary.append(f"- only_in_record_csv: `{only_in_record_csv}`\n\n")
summary.append("| metric | value |\n")
summary.append("|---|---:|\n")
summary.append(f"| ours_total | {len(ours_map)} |\n")
summary.append(f"| record_total | {len(record_map)} |\n")
summary.append(f"| intersection | {intersection} |\n")
summary.append(f"| only_in_ours | {len(only_in_ours)} |\n")
summary.append(f"| only_in_record | {len(only_in_record)} |\n")
summary_md.write_text("".join(summary), encoding="utf-8")
print(summary_md)
PY
echo "== compare complete ==" >&2
echo "- summary: $SUMMARY_MD" >&2
echo "- only_in_ours: $ONLY_IN_OURS_CSV" >&2
echo "- only_in_record: $ONLY_IN_RECORD_CSV" >&2

View File

@ -0,0 +1,172 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import json
import shlex
import sys
from pathlib import Path
RIR_CONFIG = {
"afrinic": {
"tal": "tests/fixtures/tal/afrinic.tal",
"ta": "tests/fixtures/ta/afrinic-ta.cer",
"trust_anchor": "afrinic",
},
"apnic": {
"tal": "tests/fixtures/tal/apnic-rfc7730-https.tal",
"ta": "tests/fixtures/ta/apnic-ta.cer",
"trust_anchor": "apnic",
},
"arin": {
"tal": "tests/fixtures/tal/arin.tal",
"ta": "tests/fixtures/ta/arin-ta.cer",
"trust_anchor": "arin",
},
"lacnic": {
"tal": "tests/fixtures/tal/lacnic.tal",
"ta": "tests/fixtures/ta/lacnic-ta.cer",
"trust_anchor": "lacnic",
},
"ripe": {
"tal": "tests/fixtures/tal/ripe-ncc.tal",
"ta": "tests/fixtures/ta/ripe-ncc-ta.cer",
"trust_anchor": "ripe",
},
}
def default_repo_root() -> Path:
return Path(__file__).resolve().parents[2]
def default_bundle_root(repo_root: Path) -> Path:
return (repo_root / "../../rpki/target/live/20260316-112341-multi-final3").resolve()
def require_path(path: Path, kind: str) -> Path:
if kind == "dir" and not path.is_dir():
raise SystemExit(f"missing directory: {path}")
if kind == "file" and not path.is_file():
raise SystemExit(f"missing file: {path}")
return path
def load_timing_summary(bundle_root: Path) -> dict:
timing_path = require_path(bundle_root / "timing-summary.json", "file")
return json.loads(timing_path.read_text(encoding="utf-8"))
def load_json(path: Path) -> dict:
return json.loads(require_path(path, "file").read_text(encoding="utf-8"))
def lock_validation_time(lock_obj: dict, fallback_started_at: str) -> str:
return lock_obj.get("validationTime") or lock_obj.get("validation_time") or fallback_started_at
def build_case(bundle_root: Path, repo_root: Path, rir: str) -> dict:
if rir not in RIR_CONFIG:
raise SystemExit(
f"unsupported rir: {rir}; expected one of: {', '.join(sorted(RIR_CONFIG))}"
)
rir_root = require_path(bundle_root / rir, "dir")
cfg = RIR_CONFIG[rir]
timing_summary = load_timing_summary(bundle_root)
if rir not in timing_summary:
raise SystemExit(f"timing-summary.json missing entry for rir: {rir}")
timing_entry = timing_summary[rir]
durations = timing_entry.get("durations") or {}
base_timing = require_path(rir_root / "timings" / "base-replay.json", "file")
delta_timing = require_path(rir_root / "timings" / "delta-replay.json", "file")
base_timing_obj = json.loads(base_timing.read_text(encoding="utf-8"))
delta_timing_obj = json.loads(delta_timing.read_text(encoding="utf-8"))
base_locks_obj = load_json(rir_root / "base-locks.json")
delta_locks_obj = load_json(rir_root / "locks-delta.json")
case = {
"bundle_root": str(bundle_root),
"repo_root": str(repo_root),
"rir": rir,
"trust_anchor": cfg["trust_anchor"],
"rir_root": str(rir_root),
"base_archive": str(require_path(rir_root / "base-payload-archive", "dir")),
"base_locks": str(require_path(rir_root / "base-locks.json", "file")),
"base_vrps_csv": str(require_path(rir_root / "base-vrps.csv", "file")),
"delta_archive": str(require_path(rir_root / "payload-delta-archive", "dir")),
"delta_locks": str(require_path(rir_root / "locks-delta.json", "file")),
"delta_record_csv": str(require_path(rir_root / "record-delta.csv", "file")),
"replay_delta_csv": str(require_path(rir_root / "replay-delta.csv", "file")),
"verification_json": str(require_path(rir_root / "verification.json", "file")),
"readme": str(require_path(rir_root / "README.md", "file")),
"timings_dir": str(require_path(rir_root / "timings", "dir")),
"base_timing_json": str(base_timing),
"delta_timing_json": str(delta_timing),
"tal_path": str(require_path(repo_root / cfg["tal"], "file")),
"ta_path": str(require_path(repo_root / cfg["ta"], "file")),
"validation_times": {
"snapshot": lock_validation_time(base_locks_obj, base_timing_obj["startedAt"]),
"delta": lock_validation_time(delta_locks_obj, delta_timing_obj["startedAt"]),
},
"timing_started_at": {
"snapshot_replay": base_timing_obj["startedAt"],
"delta_replay": delta_timing_obj["startedAt"],
},
"routinator_timings": {
"base_replay_seconds": float(durations["base-replay"]),
"delta_replay_seconds": float(durations["delta-replay"]),
},
}
return case
def emit_env(case: dict) -> str:
ordered = {
"BUNDLE_ROOT": case["bundle_root"],
"RIR": case["rir"],
"TRUST_ANCHOR": case["trust_anchor"],
"RIR_ROOT": case["rir_root"],
"TAL_PATH": case["tal_path"],
"TA_PATH": case["ta_path"],
"PAYLOAD_REPLAY_ARCHIVE": case["base_archive"],
"PAYLOAD_REPLAY_LOCKS": case["base_locks"],
"ROUTINATOR_BASE_RECORD_CSV": case["base_vrps_csv"],
"PAYLOAD_BASE_ARCHIVE": case["base_archive"],
"PAYLOAD_BASE_LOCKS": case["base_locks"],
"PAYLOAD_DELTA_ARCHIVE": case["delta_archive"],
"PAYLOAD_DELTA_LOCKS": case["delta_locks"],
"ROUTINATOR_DELTA_RECORD_CSV": case["delta_record_csv"],
"SNAPSHOT_VALIDATION_TIME": case["validation_times"]["snapshot"],
"DELTA_VALIDATION_TIME": case["validation_times"]["delta"],
"ROUTINATOR_BASE_REPLAY_SECONDS": str(case["routinator_timings"]["base_replay_seconds"]),
"ROUTINATOR_DELTA_REPLAY_SECONDS": str(case["routinator_timings"]["delta_replay_seconds"]),
}
return "\n".join(
f"export {key}={shlex.quote(value)}" for key, value in ordered.items()
)
def main() -> int:
parser = argparse.ArgumentParser(description="Resolve one RIR case inside a multi-RIR replay bundle")
parser.add_argument("--bundle-root", type=Path, default=None)
parser.add_argument("--repo-root", type=Path, default=None)
parser.add_argument("--rir", required=True, choices=sorted(RIR_CONFIG))
parser.add_argument("--format", choices=["json", "env"], default="json")
args = parser.parse_args()
repo_root = (args.repo_root or default_repo_root()).resolve()
bundle_root = (args.bundle_root or default_bundle_root(repo_root)).resolve()
case = build_case(bundle_root, repo_root, args.rir)
if args.format == "env":
print(emit_env(case))
else:
print(json.dumps(case, ensure_ascii=False, indent=2))
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@ -0,0 +1,57 @@
#!/usr/bin/env python3
import argparse
import csv
import ipaddress
import json
from pathlib import Path
def parse_args() -> argparse.Namespace:
p = argparse.ArgumentParser(
description="Convert rpki report.json VRPs into Routinator-compatible CSV"
)
p.add_argument("--report", required=True, help="path to rpki report.json")
p.add_argument("--out", required=True, help="output CSV path")
p.add_argument(
"--trust-anchor",
default="unknown",
help="Trust Anchor column value (default: unknown)",
)
return p.parse_args()
def sort_key(vrp: dict):
network = ipaddress.ip_network(vrp["prefix"], strict=False)
return (
int(vrp["asn"]),
network.version,
int(network.network_address),
network.prefixlen,
int(vrp["max_length"]),
)
def main() -> int:
args = parse_args()
report = json.loads(Path(args.report).read_text(encoding="utf-8"))
vrps = list(report.get("vrps") or [])
vrps.sort(key=sort_key)
out_path = Path(args.out)
out_path.parent.mkdir(parents=True, exist_ok=True)
with out_path.open("w", encoding="utf-8", newline="") as f:
w = csv.writer(f)
w.writerow(["ASN", "IP Prefix", "Max Length", "Trust Anchor"])
for vrp in vrps:
w.writerow([
f"AS{vrp['asn']}",
vrp["prefix"],
vrp["max_length"],
args.trust_anchor,
])
print(out_path)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@ -0,0 +1,168 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
DELTA_ROOT="${DELTA_ROOT:-$ROOT_DIR/target/live/apnic_delta_demo/20260315-170223-autoplay}"
TAL_PATH="${TAL_PATH:-$ROOT_DIR/tests/fixtures/tal/apnic-rfc7730-https.tal}"
TA_PATH="${TA_PATH:-$ROOT_DIR/tests/fixtures/ta/apnic-ta.cer}"
PAYLOAD_BASE_ARCHIVE="${PAYLOAD_BASE_ARCHIVE:-$DELTA_ROOT/base-payload-archive}"
PAYLOAD_BASE_LOCKS="${PAYLOAD_BASE_LOCKS:-$DELTA_ROOT/base-locks.json}"
PAYLOAD_DELTA_ARCHIVE="${PAYLOAD_DELTA_ARCHIVE:-$DELTA_ROOT/payload-delta-archive}"
PAYLOAD_DELTA_LOCKS="${PAYLOAD_DELTA_LOCKS:-$DELTA_ROOT/locks-delta.json}"
VALIDATION_TIME="${VALIDATION_TIME:-}"
PAYLOAD_BASE_VALIDATION_TIME="${PAYLOAD_BASE_VALIDATION_TIME:-}"
TRUST_ANCHOR="${TRUST_ANCHOR:-apnic}"
ROUTINATOR_RECORD_CSV="${ROUTINATOR_RECORD_CSV:-$DELTA_ROOT/record-delta.csv}"
MAX_DEPTH="${MAX_DEPTH:-}"
MAX_INSTANCES="${MAX_INSTANCES:-}"
OUT_DIR="${OUT_DIR:-$ROOT_DIR/target/live/payload_delta_replay_runs}"
mkdir -p "$OUT_DIR"
if [[ -z "$PAYLOAD_BASE_VALIDATION_TIME" ]]; then
PAYLOAD_BASE_VALIDATION_TIME="$(python3 - "$PAYLOAD_BASE_LOCKS" <<'LOCKPY'
import json, sys
from pathlib import Path
path = Path(sys.argv[1])
data = json.loads(path.read_text(encoding='utf-8'))
print(data.get('validationTime') or data.get('validation_time') or '')
LOCKPY
)"
fi
if [[ -z "$VALIDATION_TIME" ]]; then
VALIDATION_TIME="$(python3 - "$PAYLOAD_DELTA_LOCKS" <<'LOCKPY'
import json, sys
from pathlib import Path
path = Path(sys.argv[1])
data = json.loads(path.read_text(encoding='utf-8'))
print(data.get('validationTime') or data.get('validation_time') or '2026-03-15T10:00:00Z')
LOCKPY
)"
fi
TS="$(date -u +%Y%m%dT%H%M%SZ)"
RUN_NAME="${RUN_NAME:-apnic_delta_replay_${TS}}"
DB_DIR="${DB_DIR:-$OUT_DIR/${RUN_NAME}_db}"
REPORT_JSON="${REPORT_JSON:-$OUT_DIR/${RUN_NAME}_report.json}"
RUN_LOG="${RUN_LOG:-$OUT_DIR/${RUN_NAME}_run.log}"
META_JSON="${META_JSON:-$OUT_DIR/${RUN_NAME}_meta.json}"
SUMMARY_MD="${SUMMARY_MD:-$OUT_DIR/${RUN_NAME}_summary.md}"
VRPS_CSV="${VRPS_CSV:-$OUT_DIR/${RUN_NAME}_vrps.csv}"
COMPARE_SUMMARY_MD="${COMPARE_SUMMARY_MD:-$OUT_DIR/${RUN_NAME}_compare_summary.md}"
ONLY_IN_OURS_CSV="${ONLY_IN_OURS_CSV:-$OUT_DIR/${RUN_NAME}_only_in_ours.csv}"
ONLY_IN_RECORD_CSV="${ONLY_IN_RECORD_CSV:-$OUT_DIR/${RUN_NAME}_only_in_record.csv}"
cmd=(cargo run --release --bin rpki --
--db "$DB_DIR"
--tal-path "$TAL_PATH"
--ta-path "$TA_PATH"
--payload-base-archive "$PAYLOAD_BASE_ARCHIVE"
--payload-base-locks "$PAYLOAD_BASE_LOCKS"
--payload-delta-archive "$PAYLOAD_DELTA_ARCHIVE"
--payload-delta-locks "$PAYLOAD_DELTA_LOCKS"
--validation-time "$VALIDATION_TIME"
--report-json "$REPORT_JSON")
if [[ -n "$MAX_DEPTH" ]]; then
cmd+=(--max-depth "$MAX_DEPTH")
fi
if [[ -n "$MAX_INSTANCES" ]]; then
cmd+=(--max-instances "$MAX_INSTANCES")
fi
run_start_s="$(date +%s)"
(
echo "# command:"
printf '%q ' "${cmd[@]}"
echo
echo
"${cmd[@]}"
) 2>&1 | tee "$RUN_LOG" >/dev/null
run_end_s="$(date +%s)"
run_duration_s="$((run_end_s - run_start_s))"
PAYLOAD_BASE_ARCHIVE="$PAYLOAD_BASE_ARCHIVE" \
PAYLOAD_BASE_LOCKS="$PAYLOAD_BASE_LOCKS" \
PAYLOAD_DELTA_ARCHIVE="$PAYLOAD_DELTA_ARCHIVE" \
PAYLOAD_DELTA_LOCKS="$PAYLOAD_DELTA_LOCKS" \
PAYLOAD_BASE_VALIDATION_TIME="$PAYLOAD_BASE_VALIDATION_TIME" \
DB_DIR="$DB_DIR" \
REPORT_JSON="$REPORT_JSON" \
RUN_LOG="$RUN_LOG" \
VALIDATION_TIME="$VALIDATION_TIME" \
RUN_DURATION_S="$run_duration_s" \
python3 - "$REPORT_JSON" "$META_JSON" "$SUMMARY_MD" <<'PY'
import json
import os
import sys
from datetime import datetime, timezone
from pathlib import Path
report_path = Path(sys.argv[1])
meta_path = Path(sys.argv[2])
summary_path = Path(sys.argv[3])
rep = json.loads(report_path.read_text(encoding='utf-8'))
now = datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%SZ')
meta = {
'recorded_at_utc': now,
'payload_base_archive': os.environ['PAYLOAD_BASE_ARCHIVE'],
'payload_base_locks': os.environ['PAYLOAD_BASE_LOCKS'],
'payload_delta_archive': os.environ['PAYLOAD_DELTA_ARCHIVE'],
'payload_delta_locks': os.environ['PAYLOAD_DELTA_LOCKS'],
'db_dir': os.environ['DB_DIR'],
'report_json': os.environ['REPORT_JSON'],
'run_log': os.environ['RUN_LOG'],
'validation_time_arg': os.environ['VALIDATION_TIME'],
'base_validation_time_arg': os.environ.get('PAYLOAD_BASE_VALIDATION_TIME') or os.environ['VALIDATION_TIME'],
'durations_secs': {'rpki_run': int(os.environ['RUN_DURATION_S'])},
'counts': {
'publication_points_processed': rep['tree']['instances_processed'],
'publication_points_failed': rep['tree']['instances_failed'],
'vrps': len(rep['vrps']),
'aspas': len(rep['aspas']),
'audit_publication_points': len(rep['publication_points']),
},
}
meta_path.write_text(json.dumps(meta, ensure_ascii=False, indent=2)+'\n', encoding='utf-8')
summary = []
summary.append('# Payload Delta Replay Summary\n\n')
for key in ['payload_base_archive','payload_base_locks','payload_delta_archive','payload_delta_locks','db_dir','report_json','base_validation_time_arg','validation_time_arg']:
summary.append(f'- {key}: `{meta[key]}`\n')
summary.append('\n## Results\n\n| metric | value |\n|---|---:|\n')
for k,v in meta['counts'].items():
summary.append(f'| {k} | {v} |\n')
summary.append('\n## Durations\n\n| step | seconds |\n|---|---:|\n')
for k,v in meta['durations_secs'].items():
summary.append(f'| {k} | {v} |\n')
summary_path.write_text(''.join(summary), encoding='utf-8')
print(summary_path)
PY
python3 scripts/payload_replay/report_to_routinator_csv.py \
--report "$REPORT_JSON" \
--out "$VRPS_CSV" \
--trust-anchor "$TRUST_ANCHOR" >/dev/null
if [[ -f "$ROUTINATOR_RECORD_CSV" ]]; then
./scripts/payload_replay/compare_with_routinator_record.sh \
"$VRPS_CSV" \
"$ROUTINATOR_RECORD_CSV" \
"$COMPARE_SUMMARY_MD" \
"$ONLY_IN_OURS_CSV" \
"$ONLY_IN_RECORD_CSV" >/dev/null
fi
echo "== payload delta replay run complete ==" >&2
echo "- db: $DB_DIR" >&2
echo "- report: $REPORT_JSON" >&2
echo "- run log: $RUN_LOG" >&2
echo "- meta json: $META_JSON" >&2
echo "- summary md: $SUMMARY_MD" >&2
echo "- vrps csv: $VRPS_CSV" >&2
if [[ -f "$COMPARE_SUMMARY_MD" ]]; then
echo "- compare summary: $COMPARE_SUMMARY_MD" >&2
echo "- only in ours: $ONLY_IN_OURS_CSV" >&2
echo "- only in record: $ONLY_IN_RECORD_CSV" >&2
fi

View File

@ -0,0 +1,161 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
TAL_PATH="${TAL_PATH:-$ROOT_DIR/tests/fixtures/tal/apnic-rfc7730-https.tal}"
TA_PATH="${TA_PATH:-$ROOT_DIR/tests/fixtures/ta/apnic-ta.cer}"
PAYLOAD_REPLAY_ARCHIVE="${PAYLOAD_REPLAY_ARCHIVE:-$ROOT_DIR/target/live/payload_replay/payload-archive}"
PAYLOAD_REPLAY_LOCKS="${PAYLOAD_REPLAY_LOCKS:-$ROOT_DIR/target/live/payload_replay/locks.json}"
VALIDATION_TIME="${VALIDATION_TIME:-}"
TRUST_ANCHOR="${TRUST_ANCHOR:-apnic}"
ROUTINATOR_RECORD_CSV="${ROUTINATOR_RECORD_CSV:-$ROOT_DIR/target/live/payload_replay/record.csv}"
MAX_DEPTH="${MAX_DEPTH:-}"
MAX_INSTANCES="${MAX_INSTANCES:-}"
OUT_DIR="${OUT_DIR:-$ROOT_DIR/target/live/payload_replay_runs}"
mkdir -p "$OUT_DIR"
if [[ -z "$VALIDATION_TIME" ]]; then
VALIDATION_TIME="$(python3 - "$PAYLOAD_REPLAY_LOCKS" <<'LOCKPY'
import json, sys
from pathlib import Path
path = Path(sys.argv[1])
data = json.loads(path.read_text(encoding='utf-8'))
print(data.get('validationTime') or data.get('validation_time') or '2026-03-13T02:30:00Z')
LOCKPY
)"
fi
TS="$(date -u +%Y%m%dT%H%M%SZ)"
RUN_NAME="${RUN_NAME:-apnic_replay_${TS}}"
DB_DIR="${DB_DIR:-$OUT_DIR/${RUN_NAME}_db}"
REPORT_JSON="${REPORT_JSON:-$OUT_DIR/${RUN_NAME}_report.json}"
RUN_LOG="${RUN_LOG:-$OUT_DIR/${RUN_NAME}_run.log}"
META_JSON="${META_JSON:-$OUT_DIR/${RUN_NAME}_meta.json}"
SUMMARY_MD="${SUMMARY_MD:-$OUT_DIR/${RUN_NAME}_summary.md}"
VRPS_CSV="${VRPS_CSV:-$OUT_DIR/${RUN_NAME}_vrps.csv}"
COMPARE_SUMMARY_MD="${COMPARE_SUMMARY_MD:-$OUT_DIR/${RUN_NAME}_compare_summary.md}"
ONLY_IN_OURS_CSV="${ONLY_IN_OURS_CSV:-$OUT_DIR/${RUN_NAME}_only_in_ours.csv}"
ONLY_IN_RECORD_CSV="${ONLY_IN_RECORD_CSV:-$OUT_DIR/${RUN_NAME}_only_in_record.csv}"
cmd=(cargo run --release --bin rpki --
--db "$DB_DIR"
--tal-path "$TAL_PATH"
--ta-path "$TA_PATH"
--payload-replay-archive "$PAYLOAD_REPLAY_ARCHIVE"
--payload-replay-locks "$PAYLOAD_REPLAY_LOCKS"
--validation-time "$VALIDATION_TIME"
--report-json "$REPORT_JSON")
if [[ -n "$MAX_DEPTH" ]]; then
cmd+=(--max-depth "$MAX_DEPTH")
fi
if [[ -n "$MAX_INSTANCES" ]]; then
cmd+=(--max-instances "$MAX_INSTANCES")
fi
run_start_s="$(date +%s)"
(
echo "# command:"
printf '%q ' "${cmd[@]}"
echo
echo
"${cmd[@]}"
) 2>&1 | tee "$RUN_LOG" >/dev/null
run_end_s="$(date +%s)"
run_duration_s="$((run_end_s - run_start_s))"
TAL_PATH="$TAL_PATH" \
TA_PATH="$TA_PATH" \
PAYLOAD_REPLAY_ARCHIVE="$PAYLOAD_REPLAY_ARCHIVE" \
PAYLOAD_REPLAY_LOCKS="$PAYLOAD_REPLAY_LOCKS" \
DB_DIR="$DB_DIR" \
REPORT_JSON="$REPORT_JSON" \
RUN_LOG="$RUN_LOG" \
VALIDATION_TIME="$VALIDATION_TIME" \
RUN_DURATION_S="$run_duration_s" \
python3 - "$REPORT_JSON" "$META_JSON" "$SUMMARY_MD" <<'PY'
import json
import os
import sys
from datetime import datetime, timezone
from pathlib import Path
report_path = Path(sys.argv[1])
meta_path = Path(sys.argv[2])
summary_path = Path(sys.argv[3])
rep = json.loads(report_path.read_text(encoding="utf-8"))
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
meta = {
"recorded_at_utc": now,
"tal_path": os.environ["TAL_PATH"],
"ta_path": os.environ["TA_PATH"],
"payload_replay_archive": os.environ["PAYLOAD_REPLAY_ARCHIVE"],
"payload_replay_locks": os.environ["PAYLOAD_REPLAY_LOCKS"],
"db_dir": os.environ["DB_DIR"],
"report_json": os.environ["REPORT_JSON"],
"run_log": os.environ["RUN_LOG"],
"validation_time_arg": os.environ["VALIDATION_TIME"],
"durations_secs": {
"rpki_run": int(os.environ["RUN_DURATION_S"]),
},
"counts": {
"publication_points_processed": rep["tree"]["instances_processed"],
"publication_points_failed": rep["tree"]["instances_failed"],
"vrps": len(rep["vrps"]),
"aspas": len(rep["aspas"]),
"audit_publication_points": len(rep["publication_points"]),
},
}
meta_path.write_text(json.dumps(meta, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
summary = []
summary.append("# Payload Replay Summary\n\n")
summary.append(f"- recorded_at_utc: `{now}`\n")
summary.append(f"- tal_path: `{meta['tal_path']}`\n")
summary.append(f"- ta_path: `{meta['ta_path']}`\n")
summary.append(f"- payload_replay_archive: `{meta['payload_replay_archive']}`\n")
summary.append(f"- payload_replay_locks: `{meta['payload_replay_locks']}`\n")
summary.append(f"- db: `{meta['db_dir']}`\n")
summary.append(f"- report_json: `{meta['report_json']}`\n")
summary.append(f"- validation_time_arg: `{meta['validation_time_arg']}`\n\n")
summary.append("## Results\n\n")
summary.append("| metric | value |\n")
summary.append("|---|---:|\n")
for k, v in meta["counts"].items():
summary.append(f"| {k} | {v} |\n")
summary.append("\n## Durations\n\n")
summary.append("| step | seconds |\n")
summary.append("|---|---:|\n")
for k, v in meta["durations_secs"].items():
summary.append(f"| {k} | {v} |\n")
summary_path.write_text("".join(summary), encoding="utf-8")
print(summary_path)
PY
python3 scripts/payload_replay/report_to_routinator_csv.py \
--report "$REPORT_JSON" \
--out "$VRPS_CSV" \
--trust-anchor "$TRUST_ANCHOR" >/dev/null
if [[ -f "$ROUTINATOR_RECORD_CSV" ]]; then
./scripts/payload_replay/compare_with_routinator_record.sh \
"$VRPS_CSV" \
"$ROUTINATOR_RECORD_CSV" \
"$COMPARE_SUMMARY_MD" \
"$ONLY_IN_OURS_CSV" \
"$ONLY_IN_RECORD_CSV" >/dev/null
fi
echo "== payload replay run complete ==" >&2
echo "- db: $DB_DIR" >&2
echo "- report: $REPORT_JSON" >&2
echo "- run log: $RUN_LOG" >&2
echo "- meta json: $META_JSON" >&2
echo "- summary md: $SUMMARY_MD" >&2
echo "- vrps csv: $VRPS_CSV" >&2
if [[ -f "$COMPARE_SUMMARY_MD" ]]; then
echo "- compare summary: $COMPARE_SUMMARY_MD" >&2
echo "- only in ours: $ONLY_IN_OURS_CSV" >&2
echo "- only in record: $ONLY_IN_RECORD_CSV" >&2
fi

View File

@ -0,0 +1,168 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
BUNDLE_ROOT="${BUNDLE_ROOT:-$ROOT_DIR/../../rpki/target/live/20260316-112341-multi-final3}"
CASE_INFO_SCRIPT="$ROOT_DIR/scripts/payload_replay/multi_rir_case_info.py"
PROFILE_RUN_ROOT="${PROFILE_RUN_ROOT:-$ROOT_DIR/target/live/analyze_runs}"
mkdir -p "$PROFILE_RUN_ROOT"
TS="$(date -u +%Y%m%dT%H%M%SZ)"
RUN_NAME="${RUN_NAME:-apnic_snapshot_profile_${TS}}"
RUN_DIR="$PROFILE_RUN_ROOT/$RUN_NAME"
mkdir -p "$RUN_DIR"
ANALYZE_ROOT="$ROOT_DIR/target/live/analyze"
mkdir -p "$ANALYZE_ROOT"
mapfile -t ANALYZE_BEFORE < <(find "$ANALYZE_ROOT" -mindepth 1 -maxdepth 1 -type d 2>/dev/null | sort)
eval "$(python3 "$CASE_INFO_SCRIPT" --bundle-root "$BUNDLE_ROOT" --rir apnic --format env)"
DB_DIR="${DB_DIR:-$RUN_DIR/db}"
REPORT_JSON="${REPORT_JSON:-$RUN_DIR/report.json}"
RUN_LOG="${RUN_LOG:-$RUN_DIR/run.log}"
META_JSON="${META_JSON:-$RUN_DIR/meta.json}"
SUMMARY_MD="${SUMMARY_MD:-$RUN_DIR/summary.md}"
rm -rf "$DB_DIR"
cmd=(cargo run --release --features profile --bin rpki --
--db "$DB_DIR"
--tal-path "$TAL_PATH"
--ta-path "$TA_PATH"
--payload-replay-archive "$PAYLOAD_REPLAY_ARCHIVE"
--payload-replay-locks "$PAYLOAD_REPLAY_LOCKS"
--validation-time "$SNAPSHOT_VALIDATION_TIME"
--analyze
--profile-cpu
--report-json "$REPORT_JSON")
if [[ -n "${MAX_DEPTH:-}" ]]; then
cmd+=(--max-depth "$MAX_DEPTH")
fi
if [[ -n "${MAX_INSTANCES:-}" ]]; then
cmd+=(--max-instances "$MAX_INSTANCES")
fi
if [[ "${DRY_RUN:-0}" == "1" ]]; then
printf '%q ' "${cmd[@]}"
echo
exit 0
fi
run_start_s="$(date +%s)"
(
echo '# command:'
printf '%q ' "${cmd[@]}"
echo
echo
"${cmd[@]}"
) 2>&1 | tee "$RUN_LOG" >/dev/null
run_end_s="$(date +%s)"
run_duration_s="$((run_end_s - run_start_s))"
mapfile -t ANALYZE_AFTER < <(find "$ANALYZE_ROOT" -mindepth 1 -maxdepth 1 -type d 2>/dev/null | sort)
ANALYZE_DIR=""
for candidate in "${ANALYZE_AFTER[@]}"; do
seen=0
for old in "${ANALYZE_BEFORE[@]}"; do
if [[ "$candidate" == "$old" ]]; then
seen=1
break
fi
done
if [[ "$seen" == "0" ]]; then
ANALYZE_DIR="$candidate"
fi
done
if [[ -z "$ANALYZE_DIR" ]]; then
ANALYZE_DIR="$(find "$ANALYZE_ROOT" -mindepth 1 -maxdepth 1 -type d 2>/dev/null | sort | tail -n 1)"
fi
BUNDLE_ROOT="$BUNDLE_ROOT" \
TRUST_ANCHOR="$TRUST_ANCHOR" \
TAL_PATH="$TAL_PATH" \
TA_PATH="$TA_PATH" \
PAYLOAD_REPLAY_ARCHIVE="$PAYLOAD_REPLAY_ARCHIVE" \
PAYLOAD_REPLAY_LOCKS="$PAYLOAD_REPLAY_LOCKS" \
SNAPSHOT_VALIDATION_TIME="$SNAPSHOT_VALIDATION_TIME" \
ROUTINATOR_BASE_REPLAY_SECONDS="$ROUTINATOR_BASE_REPLAY_SECONDS" \
DB_DIR="$DB_DIR" \
REPORT_JSON="$REPORT_JSON" \
RUN_LOG="$RUN_LOG" \
ANALYZE_DIR="$ANALYZE_DIR" \
RUN_DURATION_S="$run_duration_s" \
python3 - "$META_JSON" "$SUMMARY_MD" <<'PY'
import json
import os
import sys
from datetime import datetime, timezone
from pathlib import Path
meta_path = Path(sys.argv[1])
summary_path = Path(sys.argv[2])
report_path = Path(os.environ['REPORT_JSON'])
report = json.loads(report_path.read_text(encoding='utf-8'))
recorded = datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%SZ')
meta = {
'recorded_at_utc': recorded,
'bundle_root': os.environ['BUNDLE_ROOT'],
'trust_anchor': os.environ['TRUST_ANCHOR'],
'tal_path': os.environ['TAL_PATH'],
'ta_path': os.environ['TA_PATH'],
'payload_replay_archive': os.environ['PAYLOAD_REPLAY_ARCHIVE'],
'payload_replay_locks': os.environ['PAYLOAD_REPLAY_LOCKS'],
'validation_time_arg': os.environ['SNAPSHOT_VALIDATION_TIME'],
'routinator_base_replay_seconds': float(os.environ['ROUTINATOR_BASE_REPLAY_SECONDS']),
'db_dir': os.environ['DB_DIR'],
'report_json': os.environ['REPORT_JSON'],
'run_log': os.environ['RUN_LOG'],
'analyze_dir': os.environ.get('ANALYZE_DIR') or '',
'durations_secs': {
'rpki_run_wall': int(os.environ['RUN_DURATION_S']),
},
'counts': {
'publication_points_processed': report['tree']['instances_processed'],
'publication_points_failed': report['tree']['instances_failed'],
'vrps': len(report['vrps']),
'aspas': len(report['aspas']),
'audit_publication_points': len(report['publication_points']),
},
}
meta_path.write_text(json.dumps(meta, ensure_ascii=False, indent=2) + '\n', encoding='utf-8')
ratio = meta['durations_secs']['rpki_run_wall'] / meta['routinator_base_replay_seconds'] if meta['routinator_base_replay_seconds'] else None
lines = []
lines.append('# APNIC Snapshot Replay Profile Summary\n\n')
lines.append(f"- recorded_at_utc: `{recorded}`\n")
lines.append(f"- bundle_root: `{meta['bundle_root']}`\n")
lines.append(f"- tal_path: `{meta['tal_path']}`\n")
lines.append(f"- ta_path: `{meta['ta_path']}`\n")
lines.append(f"- payload_replay_archive: `{meta['payload_replay_archive']}`\n")
lines.append(f"- payload_replay_locks: `{meta['payload_replay_locks']}`\n")
lines.append(f"- validation_time_arg: `{meta['validation_time_arg']}`\n")
lines.append(f"- db_dir: `{meta['db_dir']}`\n")
lines.append(f"- report_json: `{meta['report_json']}`\n")
lines.append(f"- run_log: `{meta['run_log']}`\n")
lines.append(f"- analyze_dir: `{meta['analyze_dir']}`\n\n")
lines.append('## Timing\n\n')
lines.append('| metric | value |\n')
lines.append('|---|---:|\n')
lines.append(f"| ours_snapshot_replay_wall_s | {meta['durations_secs']['rpki_run_wall']} |\n")
lines.append(f"| routinator_base_replay_s | {meta['routinator_base_replay_seconds']:.3f} |\n")
if ratio is not None:
lines.append(f"| ratio_ours_over_routinator | {ratio:.3f} |\n")
lines.append('\n## Counts\n\n')
for key, value in meta['counts'].items():
lines.append(f"- {key}: `{value}`\n")
summary_path.write_text(''.join(lines), encoding='utf-8')
PY
echo "== APNIC snapshot replay profiling complete ==" >&2
echo "- run_dir: $RUN_DIR" >&2
echo "- analyze_dir: $ANALYZE_DIR" >&2
echo "- report_json: $REPORT_JSON" >&2
echo "- run_log: $RUN_LOG" >&2
echo "- meta_json: $META_JSON" >&2
echo "- summary_md: $SUMMARY_MD" >&2

View File

@ -0,0 +1,128 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
if [[ $# -lt 1 || $# -gt 2 ]]; then
echo "usage: $0 <rir> [describe|snapshot|delta|both]" >&2
exit 2
fi
RIR="$1"
MODE="${2:-both}"
BUNDLE_ROOT="${BUNDLE_ROOT:-$ROOT_DIR/../../rpki/target/live/20260316-112341-multi-final3}"
CASE_INFO_SCRIPT="$ROOT_DIR/scripts/payload_replay/multi_rir_case_info.py"
CASE_REPORT_SCRIPT="$ROOT_DIR/scripts/payload_replay/write_multi_rir_case_report.py"
MULTI_RIR_OUT_DIR="${MULTI_RIR_OUT_DIR:-$ROOT_DIR/target/live/multi_rir_replay_runs/$RIR}"
mkdir -p "$MULTI_RIR_OUT_DIR"
eval "$(python3 "$CASE_INFO_SCRIPT" --bundle-root "$BUNDLE_ROOT" --rir "$RIR" --format env)"
SNAPSHOT_DB_DIR="${SNAPSHOT_DB_DIR:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_replay_db}"
SNAPSHOT_REPORT_MD="${SNAPSHOT_REPORT_MD:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_compare_summary.md}"
SNAPSHOT_META_JSON="${SNAPSHOT_META_JSON:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_meta.json}"
SNAPSHOT_RUN_LOG="${SNAPSHOT_RUN_LOG:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_run.log}"
SNAPSHOT_REPORT_JSON="${SNAPSHOT_REPORT_JSON:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_report.json}"
SNAPSHOT_VRPS_CSV="${SNAPSHOT_VRPS_CSV:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_vrps.csv}"
SNAPSHOT_ONLY_OURS="${SNAPSHOT_ONLY_OURS:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_only_in_ours.csv}"
SNAPSHOT_ONLY_RECORD="${SNAPSHOT_ONLY_RECORD:-$MULTI_RIR_OUT_DIR/${RIR}_snapshot_only_in_record.csv}"
DELTA_DB_DIR="${DELTA_DB_DIR:-$MULTI_RIR_OUT_DIR/${RIR}_delta_replay_db}"
DELTA_REPORT_MD="${DELTA_REPORT_MD:-$MULTI_RIR_OUT_DIR/${RIR}_delta_compare_summary.md}"
DELTA_META_JSON="${DELTA_META_JSON:-$MULTI_RIR_OUT_DIR/${RIR}_delta_meta.json}"
DELTA_RUN_LOG="${DELTA_RUN_LOG:-$MULTI_RIR_OUT_DIR/${RIR}_delta_run.log}"
DELTA_REPORT_JSON="${DELTA_REPORT_JSON:-$MULTI_RIR_OUT_DIR/${RIR}_delta_report.json}"
DELTA_VRPS_CSV="${DELTA_VRPS_CSV:-$MULTI_RIR_OUT_DIR/${RIR}_delta_vrps.csv}"
DELTA_ONLY_OURS="${DELTA_ONLY_OURS:-$MULTI_RIR_OUT_DIR/${RIR}_delta_only_in_ours.csv}"
DELTA_ONLY_RECORD="${DELTA_ONLY_RECORD:-$MULTI_RIR_OUT_DIR/${RIR}_delta_only_in_record.csv}"
CASE_REPORT_JSON="${CASE_REPORT_JSON:-$MULTI_RIR_OUT_DIR/${RIR}_case_report.json}"
CASE_REPORT_MD="${CASE_REPORT_MD:-$MULTI_RIR_OUT_DIR/${RIR}_case_report.md}"
case "$MODE" in
describe)
python3 "$CASE_INFO_SCRIPT" --bundle-root "$BUNDLE_ROOT" --rir "$RIR"
;;
snapshot)
rm -rf "$SNAPSHOT_DB_DIR"
ROUTINATOR_RECORD_CSV="$ROUTINATOR_BASE_RECORD_CSV" \
VALIDATION_TIME="$SNAPSHOT_VALIDATION_TIME" \
OUT_DIR="$MULTI_RIR_OUT_DIR" \
DB_DIR="$SNAPSHOT_DB_DIR" \
RUN_NAME="${RUN_NAME:-${RIR}_snapshot_replay}" \
META_JSON="$SNAPSHOT_META_JSON" \
RUN_LOG="$SNAPSHOT_RUN_LOG" \
REPORT_JSON="$SNAPSHOT_REPORT_JSON" \
VRPS_CSV="$SNAPSHOT_VRPS_CSV" \
COMPARE_SUMMARY_MD="$SNAPSHOT_REPORT_MD" \
ONLY_IN_OURS_CSV="$SNAPSHOT_ONLY_OURS" \
ONLY_IN_RECORD_CSV="$SNAPSHOT_ONLY_RECORD" \
./scripts/payload_replay/run_apnic_replay.sh
;;
delta)
rm -rf "$DELTA_DB_DIR"
ROUTINATOR_RECORD_CSV="$ROUTINATOR_DELTA_RECORD_CSV" \
VALIDATION_TIME="$DELTA_VALIDATION_TIME" \
PAYLOAD_BASE_VALIDATION_TIME="$SNAPSHOT_VALIDATION_TIME" \
OUT_DIR="$MULTI_RIR_OUT_DIR" \
DB_DIR="$DELTA_DB_DIR" \
RUN_NAME="${RUN_NAME:-${RIR}_delta_replay}" \
DELTA_ROOT="$RIR_ROOT" \
META_JSON="$DELTA_META_JSON" \
RUN_LOG="$DELTA_RUN_LOG" \
REPORT_JSON="$DELTA_REPORT_JSON" \
VRPS_CSV="$DELTA_VRPS_CSV" \
COMPARE_SUMMARY_MD="$DELTA_REPORT_MD" \
ONLY_IN_OURS_CSV="$DELTA_ONLY_OURS" \
ONLY_IN_RECORD_CSV="$DELTA_ONLY_RECORD" \
./scripts/payload_replay/run_apnic_delta_replay.sh
;;
both)
rm -rf "$SNAPSHOT_DB_DIR" "$DELTA_DB_DIR"
ROUTINATOR_RECORD_CSV="$ROUTINATOR_BASE_RECORD_CSV" \
VALIDATION_TIME="$SNAPSHOT_VALIDATION_TIME" \
OUT_DIR="$MULTI_RIR_OUT_DIR" \
DB_DIR="$SNAPSHOT_DB_DIR" \
RUN_NAME="${RUN_NAME_SNAPSHOT:-${RIR}_snapshot_replay}" \
META_JSON="$SNAPSHOT_META_JSON" \
RUN_LOG="$SNAPSHOT_RUN_LOG" \
REPORT_JSON="$SNAPSHOT_REPORT_JSON" \
VRPS_CSV="$SNAPSHOT_VRPS_CSV" \
COMPARE_SUMMARY_MD="$SNAPSHOT_REPORT_MD" \
ONLY_IN_OURS_CSV="$SNAPSHOT_ONLY_OURS" \
ONLY_IN_RECORD_CSV="$SNAPSHOT_ONLY_RECORD" \
./scripts/payload_replay/run_apnic_replay.sh
ROUTINATOR_RECORD_CSV="$ROUTINATOR_DELTA_RECORD_CSV" \
VALIDATION_TIME="$DELTA_VALIDATION_TIME" \
PAYLOAD_BASE_VALIDATION_TIME="$SNAPSHOT_VALIDATION_TIME" \
OUT_DIR="$MULTI_RIR_OUT_DIR" \
DB_DIR="$DELTA_DB_DIR" \
RUN_NAME="${RUN_NAME_DELTA:-${RIR}_delta_replay}" \
DELTA_ROOT="$RIR_ROOT" \
META_JSON="$DELTA_META_JSON" \
RUN_LOG="$DELTA_RUN_LOG" \
REPORT_JSON="$DELTA_REPORT_JSON" \
VRPS_CSV="$DELTA_VRPS_CSV" \
COMPARE_SUMMARY_MD="$DELTA_REPORT_MD" \
ONLY_IN_OURS_CSV="$DELTA_ONLY_OURS" \
ONLY_IN_RECORD_CSV="$DELTA_ONLY_RECORD" \
./scripts/payload_replay/run_apnic_delta_replay.sh
python3 "$CASE_REPORT_SCRIPT" \
--rir "$RIR" \
--snapshot-meta "$SNAPSHOT_META_JSON" \
--snapshot-compare "$SNAPSHOT_REPORT_MD" \
--delta-meta "$DELTA_META_JSON" \
--delta-compare "$DELTA_REPORT_MD" \
--routinator-base-seconds "$ROUTINATOR_BASE_REPLAY_SECONDS" \
--routinator-delta-seconds "$ROUTINATOR_DELTA_REPLAY_SECONDS" \
--out-md "$CASE_REPORT_MD" \
--out-json "$CASE_REPORT_JSON" >/dev/null
echo "- case report: $CASE_REPORT_MD" >&2
echo "- case report json: $CASE_REPORT_JSON" >&2
;;
*)
echo "unsupported mode: $MODE; expected describe|snapshot|delta|both" >&2
exit 2
;;
esac

View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
BUNDLE_ROOT="${BUNDLE_ROOT:-$ROOT_DIR/../../rpki/target/live/20260316-112341-multi-final3}"
SUITE_OUT_DIR="${SUITE_OUT_DIR:-$ROOT_DIR/target/live/multi_rir_replay_runs}"
RIRS="${RIRS:-afrinic apnic arin lacnic ripe}"
CASE_SCRIPT="$ROOT_DIR/scripts/payload_replay/run_multi_rir_replay_case.sh"
SUMMARY_SCRIPT="$ROOT_DIR/scripts/payload_replay/write_multi_rir_summary.py"
mkdir -p "$SUITE_OUT_DIR"
for rir in $RIRS; do
MULTI_RIR_OUT_DIR="$SUITE_OUT_DIR/$rir" \
BUNDLE_ROOT="$BUNDLE_ROOT" \
"$CASE_SCRIPT" "$rir" both
echo "completed $rir" >&2
echo >&2
done
python3 "$SUMMARY_SCRIPT" \
--case-root "$SUITE_OUT_DIR" \
--out-md "$SUITE_OUT_DIR/multi_rir_summary.md" \
--out-json "$SUITE_OUT_DIR/multi_rir_summary.json" \
--rirs $RIRS >/dev/null
echo "== multi-RIR replay suite complete ==" >&2
echo "- suite_out_dir: $SUITE_OUT_DIR" >&2
echo "- summary_md: $SUITE_OUT_DIR/multi_rir_summary.md" >&2
echo "- summary_json: $SUITE_OUT_DIR/multi_rir_summary.json" >&2

View File

@ -0,0 +1,133 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import json
from pathlib import Path
def parse_args() -> argparse.Namespace:
p = argparse.ArgumentParser(description="Generate one multi-RIR replay case report")
p.add_argument("--rir", required=True)
p.add_argument("--snapshot-meta", required=True)
p.add_argument("--snapshot-compare", required=True)
p.add_argument("--delta-meta", required=True)
p.add_argument("--delta-compare", required=True)
p.add_argument("--routinator-base-seconds", required=True, type=float)
p.add_argument("--routinator-delta-seconds", required=True, type=float)
p.add_argument("--out-md", required=True)
p.add_argument("--out-json", required=True)
return p.parse_args()
def read_json(path: str) -> dict:
return json.loads(Path(path).read_text(encoding="utf-8"))
def parse_compare_md(path: str) -> dict:
lines = Path(path).read_text(encoding="utf-8").splitlines()
out = {}
for line in lines:
if not line.startswith("| "):
continue
parts = [p.strip() for p in line.strip("|").split("|")]
if len(parts) != 2:
continue
key, value = parts
if key in {"metric", "---"}:
continue
try:
out[key] = int(value)
except ValueError:
pass
return out
def ratio(ours: float, baseline: float) -> float | None:
if baseline <= 0:
return None
return ours / baseline
def build_report(args: argparse.Namespace) -> dict:
snapshot_meta = read_json(args.snapshot_meta)
delta_meta = read_json(args.delta_meta)
snapshot_compare = parse_compare_md(args.snapshot_compare)
delta_compare = parse_compare_md(args.delta_compare)
snapshot_ours = float(snapshot_meta["durations_secs"]["rpki_run"])
delta_ours = float(delta_meta["durations_secs"]["rpki_run"])
report = {
"rir": args.rir,
"snapshot": {
"meta_json": str(Path(args.snapshot_meta).resolve()),
"compare_md": str(Path(args.snapshot_compare).resolve()),
"ours_seconds": snapshot_ours,
"routinator_seconds": args.routinator_base_seconds,
"ratio": ratio(snapshot_ours, args.routinator_base_seconds),
"compare": snapshot_compare,
"match": snapshot_compare.get("only_in_ours", -1) == 0
and snapshot_compare.get("only_in_record", -1) == 0,
"counts": snapshot_meta.get("counts", {}),
},
"delta": {
"meta_json": str(Path(args.delta_meta).resolve()),
"compare_md": str(Path(args.delta_compare).resolve()),
"ours_seconds": delta_ours,
"routinator_seconds": args.routinator_delta_seconds,
"ratio": ratio(delta_ours, args.routinator_delta_seconds),
"compare": delta_compare,
"match": delta_compare.get("only_in_ours", -1) == 0
and delta_compare.get("only_in_record", -1) == 0,
"counts": delta_meta.get("counts", {}),
},
}
return report
def write_md(path: Path, report: dict) -> None:
snapshot = report["snapshot"]
delta = report["delta"]
lines = []
lines.append(f"# {report['rir'].upper()} Replay Report\n\n")
lines.append("## Summary\n\n")
lines.append("| mode | match | ours_s | routinator_s | ratio | only_in_ours | only_in_record |\n")
lines.append("|---|---|---:|---:|---:|---:|---:|\n")
lines.append(
f"| snapshot | {str(snapshot['match']).lower()} | {snapshot['ours_seconds']:.3f} | {snapshot['routinator_seconds']:.3f} | {snapshot['ratio']:.3f} | {snapshot['compare'].get('only_in_ours', 0)} | {snapshot['compare'].get('only_in_record', 0)} |\n"
)
lines.append(
f"| delta | {str(delta['match']).lower()} | {delta['ours_seconds']:.3f} | {delta['routinator_seconds']:.3f} | {delta['ratio']:.3f} | {delta['compare'].get('only_in_ours', 0)} | {delta['compare'].get('only_in_record', 0)} |\n"
)
lines.append("\n## Snapshot Inputs\n\n")
lines.append(f"- meta_json: `{snapshot['meta_json']}`\n")
lines.append(f"- compare_md: `{snapshot['compare_md']}`\n")
lines.append("\n## Delta Inputs\n\n")
lines.append(f"- meta_json: `{delta['meta_json']}`\n")
lines.append(f"- compare_md: `{delta['compare_md']}`\n")
lines.append("\n## Counts\n\n")
lines.append("### Snapshot\n\n")
for k, v in sorted(snapshot.get("counts", {}).items()):
lines.append(f"- {k}: `{v}`\n")
lines.append("\n### Delta\n\n")
for k, v in sorted(delta.get("counts", {}).items()):
lines.append(f"- {k}: `{v}`\n")
path.write_text("".join(lines), encoding="utf-8")
def main() -> int:
args = parse_args()
report = build_report(args)
out_json = Path(args.out_json)
out_md = Path(args.out_md)
out_json.parent.mkdir(parents=True, exist_ok=True)
out_md.parent.mkdir(parents=True, exist_ok=True)
out_json.write_text(json.dumps(report, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
write_md(out_md, report)
print(out_md)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@ -0,0 +1,87 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import json
import math
from pathlib import Path
DEFAULT_RIRS = ["afrinic", "apnic", "arin", "lacnic", "ripe"]
def parse_args() -> argparse.Namespace:
p = argparse.ArgumentParser(description="Aggregate per-RIR replay case reports")
p.add_argument("--case-root", required=True, help="directory containing <rir>/<rir>_case_report.json")
p.add_argument("--out-md", required=True)
p.add_argument("--out-json", required=True)
p.add_argument("--rirs", nargs="*", default=None, help="RIRs to include (default: all 5)")
return p.parse_args()
def read_case(case_root: Path, rir: str) -> dict:
path = case_root / rir / f"{rir}_case_report.json"
return json.loads(path.read_text(encoding="utf-8"))
def geomean(values: list[float]) -> float:
vals = [v for v in values if v > 0]
if not vals:
return 0.0
return math.exp(sum(math.log(v) for v in vals) / len(vals))
def build_summary(cases: list[dict]) -> dict:
snapshot_ratios = [c["snapshot"]["ratio"] for c in cases]
delta_ratios = [c["delta"]["ratio"] for c in cases]
return {
"cases": cases,
"summary": {
"snapshot_all_match": all(c["snapshot"]["match"] for c in cases),
"delta_all_match": all(c["delta"]["match"] for c in cases),
"snapshot_ratio_geomean": geomean(snapshot_ratios),
"delta_ratio_geomean": geomean(delta_ratios),
"all_ratio_geomean": geomean(snapshot_ratios + delta_ratios),
},
}
def write_md(path: Path, data: dict) -> None:
lines = []
lines.append("# Multi-RIR Replay Summary\n\n")
lines.append("## Correctness + Timing\n\n")
lines.append("| RIR | snapshot_match | snapshot_ours_s | snapshot_routinator_s | snapshot_ratio | delta_match | delta_ours_s | delta_routinator_s | delta_ratio |\n")
lines.append("|---|---|---:|---:|---:|---|---:|---:|---:|\n")
for case in data["cases"]:
lines.append(
f"| {case['rir']} | {str(case['snapshot']['match']).lower()} | {case['snapshot']['ours_seconds']:.3f} | {case['snapshot']['routinator_seconds']:.3f} | {case['snapshot']['ratio']:.3f} | {str(case['delta']['match']).lower()} | {case['delta']['ours_seconds']:.3f} | {case['delta']['routinator_seconds']:.3f} | {case['delta']['ratio']:.3f} |\n"
)
s = data["summary"]
lines.append("\n## Aggregate Metrics\n\n")
lines.append("| metric | value |\n")
lines.append("|---|---:|\n")
lines.append(f"| snapshot_all_match | {str(s['snapshot_all_match']).lower()} |\n")
lines.append(f"| delta_all_match | {str(s['delta_all_match']).lower()} |\n")
lines.append(f"| snapshot_ratio_geomean | {s['snapshot_ratio_geomean']:.3f} |\n")
lines.append(f"| delta_ratio_geomean | {s['delta_ratio_geomean']:.3f} |\n")
lines.append(f"| all_ratio_geomean | {s['all_ratio_geomean']:.3f} |\n")
path.write_text("".join(lines), encoding="utf-8")
def main() -> int:
args = parse_args()
case_root = Path(args.case_root)
rirs = args.rirs or DEFAULT_RIRS
cases = [read_case(case_root, rir) for rir in rirs]
data = build_summary(cases)
out_md = Path(args.out_md)
out_json = Path(args.out_json)
out_md.parent.mkdir(parents=True, exist_ok=True)
out_json.parent.mkdir(parents=True, exist_ok=True)
out_json.write_text(json.dumps(data, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
write_md(out_md, data)
print(out_md)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@ -0,0 +1,123 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/periodic/compare_ccr_round.sh \
--ours-ccr <path> \
--rpki-client-ccr <path> \
--out-dir <path> \
[--trust-anchor <name>]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
OURS_CCR=""
CLIENT_CCR=""
OUT_DIR=""
TRUST_ANCHOR="unknown"
CCR_TO_COMPARE_VIEWS_BIN="$ROOT_DIR/target/release/ccr_to_compare_views"
while [[ $# -gt 0 ]]; do
case "$1" in
--ours-ccr) OURS_CCR="$2"; shift 2 ;;
--rpki-client-ccr) CLIENT_CCR="$2"; shift 2 ;;
--out-dir) OUT_DIR="$2"; shift 2 ;;
--trust-anchor) TRUST_ANCHOR="$2"; shift 2 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$OURS_CCR" && -n "$CLIENT_CCR" && -n "$OUT_DIR" ]] || { usage >&2; exit 2; }
mkdir -p "$OUT_DIR"
if [[ ! -x "$CCR_TO_COMPARE_VIEWS_BIN" ]]; then
(
cd "$ROOT_DIR"
cargo build --release --bin ccr_to_compare_views
)
fi
OURS_VRPS="$OUT_DIR/ours-vrps.csv"
OURS_VAPS="$OUT_DIR/ours-vaps.csv"
CLIENT_VRPS="$OUT_DIR/rpki-client-vrps.csv"
CLIENT_VAPS="$OUT_DIR/rpki-client-vaps.csv"
SUMMARY_JSON="$OUT_DIR/compare-summary.json"
SUMMARY_MD="$OUT_DIR/compare-summary.md"
"$CCR_TO_COMPARE_VIEWS_BIN" \
--ccr "$OURS_CCR" \
--vrps-out "$OURS_VRPS" \
--vaps-out "$OURS_VAPS" \
--trust-anchor "$TRUST_ANCHOR"
"$CCR_TO_COMPARE_VIEWS_BIN" \
--ccr "$CLIENT_CCR" \
--vrps-out "$CLIENT_VRPS" \
--vaps-out "$CLIENT_VAPS" \
--trust-anchor "$TRUST_ANCHOR"
python3 - <<'PY' "$OURS_VRPS" "$CLIENT_VRPS" "$OURS_VAPS" "$CLIENT_VAPS" "$SUMMARY_JSON" "$SUMMARY_MD"
import csv
import json
import sys
from pathlib import Path
ours_vrps_path, client_vrps_path, ours_vaps_path, client_vaps_path, json_out, md_out = sys.argv[1:]
def rows(path):
with open(path, newline="") as f:
return list(csv.reader(f))[1:]
ours_vrps = {tuple(r) for r in rows(ours_vrps_path)}
client_vrps = {tuple(r) for r in rows(client_vrps_path)}
ours_vaps = {tuple(r) for r in rows(ours_vaps_path)}
client_vaps = {tuple(r) for r in rows(client_vaps_path)}
summary = {
"vrps": {
"ours": len(ours_vrps),
"rpkiClient": len(client_vrps),
"match": ours_vrps == client_vrps,
"onlyInOurs": sorted(ours_vrps - client_vrps)[:20],
"onlyInRpkiClient": sorted(client_vrps - ours_vrps)[:20],
},
"vaps": {
"ours": len(ours_vaps),
"rpkiClient": len(client_vaps),
"match": ours_vaps == client_vaps,
"onlyInOurs": sorted(ours_vaps - client_vaps)[:20],
"onlyInRpkiClient": sorted(client_vaps - ours_vaps)[:20],
},
}
summary["allMatch"] = summary["vrps"]["match"] and summary["vaps"]["match"]
Path(json_out).write_text(json.dumps(summary, indent=2), encoding="utf-8")
lines = [
"# Round Compare Summary",
"",
f"- `allMatch`: `{summary['allMatch']}`",
f"- `vrpMatch`: `{summary['vrps']['match']}`",
f"- `vapMatch`: `{summary['vaps']['match']}`",
f"- `ours_vrps`: `{summary['vrps']['ours']}`",
f"- `rpki_client_vrps`: `{summary['vrps']['rpkiClient']}`",
f"- `ours_vaps`: `{summary['vaps']['ours']}`",
f"- `rpki_client_vaps`: `{summary['vaps']['rpkiClient']}`",
"",
"## Sample Differences",
"",
f"- `vrps.onlyInOurs`: `{len(summary['vrps']['onlyInOurs'])}`",
f"- `vrps.onlyInRpkiClient`: `{len(summary['vrps']['onlyInRpkiClient'])}`",
f"- `vaps.onlyInOurs`: `{len(summary['vaps']['onlyInOurs'])}`",
f"- `vaps.onlyInRpkiClient`: `{len(summary['vaps']['onlyInRpkiClient'])}`",
]
Path(md_out).write_text("\n".join(lines) + "\n", encoding="utf-8")
PY
echo "$OUT_DIR"

View File

@ -0,0 +1,156 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/periodic/run_apnic_ours_parallel_round_remote.sh \
--run-root <path> \
--round-id <round-XXX> \
--kind <snapshot|delta> \
--ssh-target <user@host> \
--remote-root <path> \
[--scheduled-at <RFC3339>] \
[--skip-sync]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
RUN_ROOT=""
ROUND_ID=""
KIND=""
SSH_TARGET="${SSH_TARGET:-root@47.77.183.68}"
REMOTE_ROOT=""
SCHEDULED_AT=""
SKIP_SYNC=0
while [[ $# -gt 0 ]]; do
case "$1" in
--run-root) RUN_ROOT="$2"; shift 2 ;;
--round-id) ROUND_ID="$2"; shift 2 ;;
--kind) KIND="$2"; shift 2 ;;
--ssh-target) SSH_TARGET="$2"; shift 2 ;;
--remote-root) REMOTE_ROOT="$2"; shift 2 ;;
--scheduled-at) SCHEDULED_AT="$2"; shift 2 ;;
--skip-sync) SKIP_SYNC=1; shift 1 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$RUN_ROOT" && -n "$ROUND_ID" && -n "$KIND" && -n "$REMOTE_ROOT" ]] || { usage >&2; exit 2; }
[[ "$KIND" == "snapshot" || "$KIND" == "delta" ]] || { echo "--kind must be snapshot or delta" >&2; exit 2; }
LOCAL_OUT="$RUN_ROOT/rounds/$ROUND_ID/ours"
REMOTE_REPO="$REMOTE_ROOT/repo"
REMOTE_OUT="$REMOTE_ROOT/rounds/$ROUND_ID/ours"
REMOTE_WORK_DB="$REMOTE_ROOT/state/ours/work-db"
REMOTE_RAW_STORE="$REMOTE_ROOT/state/ours/raw-store.db"
mkdir -p "$LOCAL_OUT"
if [[ "$SKIP_SYNC" -eq 0 ]]; then
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_ROOT'"
rsync -a --delete \
--exclude target \
--exclude .git \
"$ROOT_DIR/" "$SSH_TARGET:$REMOTE_REPO/"
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_REPO/target/release' '$REMOTE_OUT' '$REMOTE_ROOT/state/ours'"
rsync -a "$ROOT_DIR/target/release/rpki" "$SSH_TARGET:$REMOTE_REPO/target/release/"
else
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_OUT' '$REMOTE_ROOT/state/ours'"
fi
ssh "$SSH_TARGET" \
REMOTE_REPO="$REMOTE_REPO" \
REMOTE_OUT="$REMOTE_OUT" \
REMOTE_WORK_DB="$REMOTE_WORK_DB" \
REMOTE_RAW_STORE="$REMOTE_RAW_STORE" \
KIND="$KIND" \
ROUND_ID="$ROUND_ID" \
SCHEDULED_AT="$SCHEDULED_AT" \
'bash -s' <<'EOS'
set -euo pipefail
cd "$REMOTE_REPO"
mkdir -p "$REMOTE_OUT"
if [[ "$KIND" == "snapshot" ]]; then
rm -rf "$REMOTE_WORK_DB" "$REMOTE_RAW_STORE"
fi
mkdir -p "$(dirname "$REMOTE_WORK_DB")"
started_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
started_at_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
ccr_out="$REMOTE_OUT/result.ccr"
cir_out="$REMOTE_OUT/result.cir"
report_out="$REMOTE_OUT/report.json"
run_log="$REMOTE_OUT/run.log"
meta_out="$REMOTE_OUT/round-result.json"
set +e
env RPKI_PROGRESS_LOG=1 RPKI_PROGRESS_SLOW_SECS=0 target/release/rpki \
--db "$REMOTE_WORK_DB" \
--raw-store-db "$REMOTE_RAW_STORE" \
--tal-path tests/fixtures/tal/apnic-rfc7730-https.tal \
--ta-path tests/fixtures/ta/apnic-ta.cer \
--parallel-phase1 \
--ccr-out "$ccr_out" \
--report-json "$report_out" \
--cir-enable \
--cir-out "$cir_out" \
--cir-tal-uri "https://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer" \
>"$run_log" 2>&1
exit_code=$?
set -e
finished_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
finished_at_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
python3 - <<'PY' "$meta_out" "$ROUND_ID" "$KIND" "$SCHEDULED_AT" "$started_at_iso" "$finished_at_iso" "$REMOTE_WORK_DB" "$REMOTE_RAW_STORE" "$exit_code" "$started_at_ms" "$finished_at_ms"
import json, sys
(
path,
round_id,
kind,
scheduled_at,
started_at,
finished_at,
work_db,
raw_store,
exit_code,
start_ms,
end_ms,
) = sys.argv[1:]
with open(path, "w", encoding="utf-8") as fh:
json.dump(
{
"roundId": round_id,
"kind": kind,
"scheduledAt": scheduled_at or None,
"startedAt": started_at,
"finishedAt": finished_at,
"durationMs": int(end_ms) - int(start_ms),
"remoteWorkDbPath": work_db,
"remoteRawStoreDbPath": raw_store,
"exitCode": int(exit_code),
},
fh,
indent=2,
)
PY
exit "$exit_code"
EOS
rsync -a "$SSH_TARGET:$REMOTE_OUT/" "$LOCAL_OUT/"
echo "$LOCAL_OUT"

View File

@ -0,0 +1,280 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/periodic/run_apnic_parallel_dual_rp_periodic_ccr_compare.sh \
--run-root <path> \
[--ssh-target <user@host>] \
[--remote-root <path>] \
[--rpki-client-bin <path>] \
[--round-count <n>] \
[--interval-secs <n>] \
[--start-at <RFC3339>] \
[--dry-run]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
RUN_ROOT=""
SSH_TARGET="${SSH_TARGET:-root@47.77.183.68}"
REMOTE_ROOT=""
RPKI_CLIENT_BIN="${RPKI_CLIENT_BIN:-/home/yuyr/dev/rpki-client-9.7/build-m5/src/rpki-client}"
ROUND_COUNT=10
INTERVAL_SECS=600
START_AT=""
DRY_RUN=0
while [[ $# -gt 0 ]]; do
case "$1" in
--run-root) RUN_ROOT="$2"; shift 2 ;;
--ssh-target) SSH_TARGET="$2"; shift 2 ;;
--remote-root) REMOTE_ROOT="$2"; shift 2 ;;
--rpki-client-bin) RPKI_CLIENT_BIN="$2"; shift 2 ;;
--round-count) ROUND_COUNT="$2"; shift 2 ;;
--interval-secs) INTERVAL_SECS="$2"; shift 2 ;;
--start-at) START_AT="$2"; shift 2 ;;
--dry-run) DRY_RUN=1; shift 1 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$RUN_ROOT" ]] || { usage >&2; exit 2; }
[[ "$ROUND_COUNT" =~ ^[0-9]+$ ]] || { echo "--round-count must be an integer" >&2; exit 2; }
[[ "$INTERVAL_SECS" =~ ^[0-9]+$ ]] || { echo "--interval-secs must be an integer" >&2; exit 2; }
if [[ "$DRY_RUN" -ne 1 ]]; then
[[ -n "$REMOTE_ROOT" ]] || { echo "--remote-root is required unless --dry-run" >&2; exit 2; }
[[ -x "$RPKI_CLIENT_BIN" ]] || { echo "rpki-client binary not executable: $RPKI_CLIENT_BIN" >&2; exit 2; }
fi
mkdir -p "$RUN_ROOT"
python3 - <<'PY' "$RUN_ROOT" "$SSH_TARGET" "$REMOTE_ROOT" "$ROUND_COUNT" "$INTERVAL_SECS" "$START_AT" "$DRY_RUN"
import json
import sys
from datetime import datetime, timedelta, timezone
from pathlib import Path
run_root = Path(sys.argv[1]).resolve()
ssh_target = sys.argv[2]
remote_root = sys.argv[3]
round_count = int(sys.argv[4])
interval_secs = int(sys.argv[5])
start_at_arg = sys.argv[6]
dry_run = bool(int(sys.argv[7]))
def parse_rfc3339_utc(value: str) -> datetime:
return datetime.fromisoformat(value.replace("Z", "+00:00")).astimezone(timezone.utc)
def fmt(dt: datetime) -> str:
return dt.astimezone(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
base_time = parse_rfc3339_utc(start_at_arg) if start_at_arg else datetime.now(timezone.utc)
(run_root / "rounds").mkdir(parents=True, exist_ok=True)
(run_root / "state" / "ours" / "work-db").mkdir(parents=True, exist_ok=True)
(run_root / "state" / "ours" / "raw-store.db").mkdir(parents=True, exist_ok=True)
(run_root / "state" / "rpki-client" / "cache").mkdir(parents=True, exist_ok=True)
(run_root / "state" / "rpki-client" / "out").mkdir(parents=True, exist_ok=True)
meta = {
"version": 1,
"rir": "apnic",
"roundCount": round_count,
"intervalSecs": interval_secs,
"baseScheduledAt": fmt(base_time),
"mode": "dry_run" if dry_run else "remote_periodic",
"execution": {
"mode": "remote",
"sshTarget": ssh_target,
"remoteRoot": remote_root or None,
},
}
(run_root / "meta.json").write_text(json.dumps(meta, indent=2), encoding="utf-8")
rounds = []
for idx in range(round_count):
round_id = f"round-{idx+1:03d}"
kind = "snapshot" if idx == 0 else "delta"
scheduled_at = base_time + timedelta(seconds=interval_secs * idx)
round_dir = run_root / "rounds" / round_id
for name in ("ours", "rpki-client", "compare"):
(round_dir / name).mkdir(parents=True, exist_ok=True)
round_meta = {
"roundId": round_id,
"kind": kind,
"scheduledAt": fmt(scheduled_at),
"status": "dry_run" if dry_run else "pending",
"paths": {
"ours": f"rounds/{round_id}/ours",
"rpkiClient": f"rounds/{round_id}/rpki-client",
"compare": f"rounds/{round_id}/compare",
},
}
(round_dir / "round-meta.json").write_text(json.dumps(round_meta, indent=2), encoding="utf-8")
rounds.append(round_meta)
final_summary = {
"version": 1,
"status": "dry_run" if dry_run else "pending",
"roundCount": round_count,
"allMatch": None,
"rounds": rounds,
}
(run_root / "final-summary.json").write_text(json.dumps(final_summary, indent=2), encoding="utf-8")
PY
if [[ "$DRY_RUN" -eq 1 ]]; then
echo "$RUN_ROOT"
exit 0
fi
if [[ ! -x "$ROOT_DIR/target/release/rpki" || ! -x "$ROOT_DIR/target/release/ccr_to_compare_views" ]]; then
(
cd "$ROOT_DIR"
cargo build --release --bin rpki --bin ccr_to_compare_views
)
fi
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_ROOT'"
rsync -a --delete \
--exclude target \
--exclude .git \
"$ROOT_DIR/" "$SSH_TARGET:$REMOTE_ROOT/repo/"
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_ROOT/repo/target/release' '$REMOTE_ROOT/bin' '$REMOTE_ROOT/rounds' '$REMOTE_ROOT/state/ours' '$REMOTE_ROOT/state/rpki-client'"
rsync -a "$ROOT_DIR/target/release/rpki" "$SSH_TARGET:$REMOTE_ROOT/repo/target/release/"
rsync -a "$RPKI_CLIENT_BIN" "$SSH_TARGET:$REMOTE_ROOT/bin/rpki-client"
for idx in $(seq 1 "$ROUND_COUNT"); do
ROUND_ID="$(printf 'round-%03d' "$idx")"
ROUND_DIR="$RUN_ROOT/rounds/$ROUND_ID"
SCHEDULED_AT="$(python3 - <<'PY' "$ROUND_DIR/round-meta.json"
import json, sys
print(json.load(open(sys.argv[1], 'r', encoding='utf-8'))['scheduledAt'])
PY
)"
python3 - <<'PY' "$SCHEDULED_AT"
from datetime import datetime, timezone
import sys, time
scheduled = datetime.fromisoformat(sys.argv[1].replace("Z", "+00:00")).astimezone(timezone.utc)
delay = (scheduled - datetime.now(timezone.utc)).total_seconds()
if delay > 0:
time.sleep(delay)
PY
"$ROOT_DIR/scripts/periodic/run_apnic_ours_parallel_round_remote.sh" \
--run-root "$RUN_ROOT" \
--round-id "$ROUND_ID" \
--kind "$(python3 - <<'PY' "$ROUND_DIR/round-meta.json"
import json, sys
print(json.load(open(sys.argv[1], 'r', encoding='utf-8'))['kind'])
PY
)" \
--ssh-target "$SSH_TARGET" \
--remote-root "$REMOTE_ROOT" \
--scheduled-at "$SCHEDULED_AT" \
--skip-sync &
OURS_PID=$!
"$ROOT_DIR/scripts/periodic/run_apnic_rpki_client_round_remote.sh" \
--run-root "$RUN_ROOT" \
--round-id "$ROUND_ID" \
--kind "$(python3 - <<'PY' "$ROUND_DIR/round-meta.json"
import json, sys
print(json.load(open(sys.argv[1], 'r', encoding='utf-8'))['kind'])
PY
)" \
--ssh-target "$SSH_TARGET" \
--remote-root "$REMOTE_ROOT" \
--scheduled-at "$SCHEDULED_AT" \
--rpki-client-bin "$RPKI_CLIENT_BIN" \
--skip-sync &
CLIENT_PID=$!
set +e
wait "$OURS_PID"; OURS_STATUS=$?
wait "$CLIENT_PID"; CLIENT_STATUS=$?
set -e
rsync -az "$SSH_TARGET:$REMOTE_ROOT/rounds/$ROUND_ID/ours/" "$ROUND_DIR/ours/"
rsync -az "$SSH_TARGET:$REMOTE_ROOT/rounds/$ROUND_ID/rpki-client/" "$ROUND_DIR/rpki-client/"
if [[ "$OURS_STATUS" -eq 0 && "$CLIENT_STATUS" -eq 0 \
&& -f "$ROUND_DIR/ours/result.ccr" && -f "$ROUND_DIR/rpki-client/result.ccr" ]]; then
"$ROOT_DIR/scripts/periodic/compare_ccr_round.sh" \
--ours-ccr "$ROUND_DIR/ours/result.ccr" \
--rpki-client-ccr "$ROUND_DIR/rpki-client/result.ccr" \
--out-dir "$ROUND_DIR/compare" \
--trust-anchor apnic >/dev/null
fi
python3 - <<'PY' "$ROUND_DIR/round-meta.json" "$ROUND_DIR/ours/round-result.json" "$ROUND_DIR/rpki-client/round-result.json" "$ROUND_DIR/compare/compare-summary.json"
import json, sys
from datetime import datetime, timezone
from pathlib import Path
meta_path, ours_path, client_path, compare_path = sys.argv[1:]
meta = json.load(open(meta_path, 'r', encoding='utf-8'))
ours = json.load(open(ours_path, 'r', encoding='utf-8'))
client = json.load(open(client_path, 'r', encoding='utf-8'))
scheduled = datetime.fromisoformat(meta['scheduledAt'].replace('Z', '+00:00')).astimezone(timezone.utc)
started = [
datetime.fromisoformat(v.replace('Z', '+00:00')).astimezone(timezone.utc)
for v in [ours.get('startedAt'), client.get('startedAt')] if v
]
finished = [
datetime.fromisoformat(v.replace('Z', '+00:00')).astimezone(timezone.utc)
for v in [ours.get('finishedAt'), client.get('finishedAt')] if v
]
if started:
start_at = min(started)
meta['startedAt'] = start_at.strftime('%Y-%m-%dT%H:%M:%SZ')
meta['startLagMs'] = max(int((start_at - scheduled).total_seconds() * 1000), 0)
if finished:
finish_at = max(finished)
meta['finishedAt'] = finish_at.strftime('%Y-%m-%dT%H:%M:%SZ')
meta['status'] = 'completed' if ours.get('exitCode') == 0 and client.get('exitCode') == 0 else 'failed'
meta['ours'] = {'exitCode': ours.get('exitCode'), 'durationMs': ours.get('durationMs')}
meta['rpkiClient'] = {'exitCode': client.get('exitCode'), 'durationMs': client.get('durationMs')}
if Path(compare_path).exists():
compare = json.load(open(compare_path, 'r', encoding='utf-8'))
meta['compare'] = {
'allMatch': compare.get('allMatch'),
'vrpMatch': compare.get('vrps', {}).get('match'),
'vapMatch': compare.get('vaps', {}).get('match'),
'oursVrps': compare.get('vrps', {}).get('ours'),
'rpkiClientVrps': compare.get('vrps', {}).get('rpkiClient'),
'oursVaps': compare.get('vaps', {}).get('ours'),
'rpkiClientVaps': compare.get('vaps', {}).get('rpkiClient'),
}
json.dump(meta, open(meta_path, 'w', encoding='utf-8'), indent=2)
PY
ssh "$SSH_TARGET" "rm -rf '$REMOTE_ROOT/rounds/$ROUND_ID'"
done
python3 - <<'PY' "$RUN_ROOT/final-summary.json" "$RUN_ROOT/rounds"
import json, sys
from pathlib import Path
summary_path = Path(sys.argv[1])
rounds_root = Path(sys.argv[2])
rounds = []
all_match = True
for round_dir in sorted(rounds_root.glob('round-*')):
meta = json.load(open(round_dir / 'round-meta.json', 'r', encoding='utf-8'))
rounds.append(meta)
compare = meta.get('compare')
if compare is None or compare.get('allMatch') is not True:
all_match = False
summary = {
'version': 1,
'status': 'completed',
'roundCount': len(rounds),
'allMatch': all_match,
'rounds': rounds,
}
json.dump(summary, open(summary_path, 'w', encoding='utf-8'), indent=2)
PY
echo "$RUN_ROOT"

View File

@ -0,0 +1,163 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/periodic/run_apnic_rpki_client_round_remote.sh \
--run-root <path> \
--round-id <round-XXX> \
--kind <snapshot|delta> \
--ssh-target <user@host> \
--remote-root <path> \
[--scheduled-at <RFC3339>] \
[--rpki-client-bin <local path>] \
[--skip-sync]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
RUN_ROOT=""
ROUND_ID=""
KIND=""
SSH_TARGET="${SSH_TARGET:-root@47.77.183.68}"
REMOTE_ROOT=""
SCHEDULED_AT=""
RPKI_CLIENT_BIN="${RPKI_CLIENT_BIN:-/home/yuyr/dev/rpki-client-9.7/build-m5/src/rpki-client}"
SKIP_SYNC=0
while [[ $# -gt 0 ]]; do
case "$1" in
--run-root) RUN_ROOT="$2"; shift 2 ;;
--round-id) ROUND_ID="$2"; shift 2 ;;
--kind) KIND="$2"; shift 2 ;;
--ssh-target) SSH_TARGET="$2"; shift 2 ;;
--remote-root) REMOTE_ROOT="$2"; shift 2 ;;
--scheduled-at) SCHEDULED_AT="$2"; shift 2 ;;
--rpki-client-bin) RPKI_CLIENT_BIN="$2"; shift 2 ;;
--skip-sync) SKIP_SYNC=1; shift 1 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$RUN_ROOT" && -n "$ROUND_ID" && -n "$KIND" && -n "$REMOTE_ROOT" ]] || { usage >&2; exit 2; }
[[ "$KIND" == "snapshot" || "$KIND" == "delta" ]] || { echo "--kind must be snapshot or delta" >&2; exit 2; }
[[ -x "$RPKI_CLIENT_BIN" ]] || { echo "rpki-client binary not executable: $RPKI_CLIENT_BIN" >&2; exit 2; }
LOCAL_OUT="$RUN_ROOT/rounds/$ROUND_ID/rpki-client"
REMOTE_REPO="$REMOTE_ROOT/repo"
REMOTE_BIN_DIR="$REMOTE_ROOT/bin"
REMOTE_BIN="$REMOTE_BIN_DIR/rpki-client"
REMOTE_OUT="$REMOTE_ROOT/rounds/$ROUND_ID/rpki-client"
REMOTE_CACHE="$REMOTE_ROOT/state/rpki-client/cache"
REMOTE_STATE_OUT="$REMOTE_ROOT/state/rpki-client/out"
REMOTE_STATE_ROOT="$REMOTE_ROOT/state/rpki-client"
mkdir -p "$LOCAL_OUT"
if [[ "$SKIP_SYNC" -eq 0 ]]; then
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_ROOT'"
rsync -a --delete \
--exclude target \
--exclude .git \
"$ROOT_DIR/" "$SSH_TARGET:$REMOTE_REPO/"
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_BIN_DIR' '$REMOTE_OUT' '$REMOTE_STATE_ROOT'"
rsync -a "$RPKI_CLIENT_BIN" "$SSH_TARGET:$REMOTE_BIN"
else
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_BIN_DIR' '$REMOTE_OUT' '$REMOTE_STATE_ROOT'"
fi
ssh "$SSH_TARGET" \
REMOTE_ROOT="$REMOTE_ROOT" \
REMOTE_BIN="$REMOTE_BIN" \
REMOTE_OUT="$REMOTE_OUT" \
REMOTE_CACHE="$REMOTE_CACHE" \
REMOTE_STATE_OUT="$REMOTE_STATE_OUT" \
REMOTE_STATE_ROOT="$REMOTE_STATE_ROOT" \
KIND="$KIND" \
ROUND_ID="$ROUND_ID" \
SCHEDULED_AT="$SCHEDULED_AT" \
'bash -s' <<'EOS'
set -euo pipefail
cd "$REMOTE_ROOT"
mkdir -p "$REMOTE_OUT"
if [[ "$KIND" == "snapshot" ]]; then
rm -rf "$REMOTE_CACHE" "$REMOTE_STATE_OUT" "$REMOTE_STATE_ROOT/ta" "$REMOTE_STATE_ROOT/.ta"
fi
mkdir -p "$REMOTE_CACHE" "$REMOTE_STATE_OUT"
chmod 0777 "$REMOTE_STATE_ROOT" "$REMOTE_CACHE" "$REMOTE_STATE_OUT"
started_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
started_at_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
ccr_out="$REMOTE_OUT/result.ccr"
run_log="$REMOTE_OUT/run.log"
meta_out="$REMOTE_OUT/round-result.json"
set +e
(
cd "$REMOTE_STATE_ROOT"
"$REMOTE_BIN" -vv -t "../../repo/tests/fixtures/tal/apnic-rfc7730-https.tal" -d "cache" "out"
) >"$run_log" 2>&1
exit_code=$?
set -e
if [[ -f "$REMOTE_STATE_OUT/rpki.ccr" ]]; then
cp "$REMOTE_STATE_OUT/rpki.ccr" "$ccr_out"
fi
if [[ -f "$REMOTE_STATE_OUT/openbgpd" ]]; then
cp "$REMOTE_STATE_OUT/openbgpd" "$REMOTE_OUT/openbgpd"
fi
finished_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
finished_at_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
python3 - <<'PY' "$meta_out" "$ROUND_ID" "$KIND" "$SCHEDULED_AT" "$started_at_iso" "$finished_at_iso" "$REMOTE_CACHE" "$REMOTE_STATE_OUT" "$exit_code" "$started_at_ms" "$finished_at_ms"
import json, sys
(
path,
round_id,
kind,
scheduled_at,
started_at,
finished_at,
cache_path,
out_path,
exit_code,
start_ms,
end_ms,
) = sys.argv[1:]
with open(path, "w", encoding="utf-8") as fh:
json.dump(
{
"roundId": round_id,
"kind": kind,
"scheduledAt": scheduled_at or None,
"startedAt": started_at,
"finishedAt": finished_at,
"durationMs": int(end_ms) - int(start_ms),
"remoteCachePath": cache_path,
"remoteOutPath": out_path,
"exitCode": int(exit_code),
},
fh,
indent=2,
)
PY
exit "$exit_code"
EOS
rsync -a "$SSH_TARGET:$REMOTE_OUT/" "$LOCAL_OUT/"
echo "$LOCAL_OUT"

View File

@ -0,0 +1,317 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/periodic/run_arin_dual_rp_periodic_ccr_compare.sh \
--run-root <path> \
[--ssh-target <user@host>] \
[--remote-root <path>] \
[--rpki-client-bin <path>] \
[--round-count <n>] \
[--interval-secs <n>] \
[--start-at <RFC3339>] \
[--dry-run]
M1 behavior:
- creates the periodic run skeleton
- writes per-round scheduling metadata
- does not execute RP binaries yet
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
RUN_ROOT=""
SSH_TARGET="${SSH_TARGET:-root@47.77.183.68}"
REMOTE_ROOT=""
RPKI_CLIENT_BIN="${RPKI_CLIENT_BIN:-/home/yuyr/dev/rpki-client-9.7/build-m5/src/rpki-client}"
ROUND_COUNT=10
INTERVAL_SECS=600
START_AT=""
DRY_RUN=0
while [[ $# -gt 0 ]]; do
case "$1" in
--run-root) RUN_ROOT="$2"; shift 2 ;;
--ssh-target) SSH_TARGET="$2"; shift 2 ;;
--remote-root) REMOTE_ROOT="$2"; shift 2 ;;
--rpki-client-bin) RPKI_CLIENT_BIN="$2"; shift 2 ;;
--round-count) ROUND_COUNT="$2"; shift 2 ;;
--interval-secs) INTERVAL_SECS="$2"; shift 2 ;;
--start-at) START_AT="$2"; shift 2 ;;
--dry-run) DRY_RUN=1; shift 1 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$RUN_ROOT" ]] || { usage >&2; exit 2; }
[[ "$ROUND_COUNT" =~ ^[0-9]+$ ]] || { echo "--round-count must be an integer" >&2; exit 2; }
[[ "$INTERVAL_SECS" =~ ^[0-9]+$ ]] || { echo "--interval-secs must be an integer" >&2; exit 2; }
if [[ "$DRY_RUN" -ne 1 ]]; then
[[ -n "$REMOTE_ROOT" ]] || { echo "--remote-root is required unless --dry-run" >&2; exit 2; }
[[ -x "$RPKI_CLIENT_BIN" ]] || { echo "rpki-client binary not executable: $RPKI_CLIENT_BIN" >&2; exit 2; }
fi
mkdir -p "$RUN_ROOT"
python3 - <<'PY' "$RUN_ROOT" "$SSH_TARGET" "$REMOTE_ROOT" "$ROUND_COUNT" "$INTERVAL_SECS" "$START_AT" "$DRY_RUN"
import json
import sys
from datetime import datetime, timedelta, timezone
from pathlib import Path
run_root = Path(sys.argv[1]).resolve()
ssh_target = sys.argv[2]
remote_root = sys.argv[3]
round_count = int(sys.argv[4])
interval_secs = int(sys.argv[5])
start_at_arg = sys.argv[6]
dry_run = bool(int(sys.argv[7]))
def parse_rfc3339_utc(value: str) -> datetime:
return datetime.fromisoformat(value.replace("Z", "+00:00")).astimezone(timezone.utc)
def fmt(dt: datetime) -> str:
return dt.astimezone(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
base_time = (
parse_rfc3339_utc(start_at_arg)
if start_at_arg
else datetime.now(timezone.utc)
)
(run_root / "rounds").mkdir(parents=True, exist_ok=True)
(run_root / "state" / "ours" / "work-db").mkdir(parents=True, exist_ok=True)
(run_root / "state" / "ours" / "raw-store.db").mkdir(parents=True, exist_ok=True)
(run_root / "state" / "rpki-client" / "cache").mkdir(parents=True, exist_ok=True)
(run_root / "state" / "rpki-client" / "out").mkdir(parents=True, exist_ok=True)
meta = {
"version": 1,
"rir": "arin",
"roundCount": round_count,
"intervalSecs": interval_secs,
"baseScheduledAt": fmt(base_time),
"mode": "dry_run" if dry_run else "skeleton_only",
"execution": {
"mode": "remote",
"sshTarget": ssh_target,
"remoteRoot": remote_root or None,
},
"state": {
"ours": {
"workDbPath": "state/ours/work-db",
"rawStoreDbPath": "state/ours/raw-store.db",
"remoteWorkDbPath": "state/ours/work-db",
"remoteRawStoreDbPath": "state/ours/raw-store.db",
},
"rpkiClient": {
"cachePath": "state/rpki-client/cache",
"outPath": "state/rpki-client/out",
"remoteCachePath": "state/rpki-client/cache",
"remoteOutPath": "state/rpki-client/out",
},
},
}
(run_root / "meta.json").write_text(json.dumps(meta, indent=2), encoding="utf-8")
rounds = []
for idx in range(round_count):
round_id = f"round-{idx+1:03d}"
kind = "snapshot" if idx == 0 else "delta"
scheduled_at = base_time + timedelta(seconds=interval_secs * idx)
round_dir = run_root / "rounds" / round_id
for name in ("ours", "rpki-client", "compare"):
(round_dir / name).mkdir(parents=True, exist_ok=True)
# M1 only builds the schedule skeleton, so lag is defined relative to the schedule model.
started_at = scheduled_at if dry_run else None
finished_at = scheduled_at if dry_run else None
round_meta = {
"roundId": round_id,
"kind": kind,
"scheduledAt": fmt(scheduled_at),
"startedAt": fmt(started_at) if started_at else None,
"finishedAt": fmt(finished_at) if finished_at else None,
"startLagMs": 0 if dry_run else None,
"status": "dry_run" if dry_run else "pending",
"paths": {
"ours": f"rounds/{round_id}/ours",
"rpkiClient": f"rounds/{round_id}/rpki-client",
"compare": f"rounds/{round_id}/compare",
},
}
(round_dir / "round-meta.json").write_text(
json.dumps(round_meta, indent=2), encoding="utf-8"
)
rounds.append(round_meta)
final_summary = {
"version": 1,
"status": "dry_run" if dry_run else "pending",
"roundCount": round_count,
"allMatch": None,
"rounds": rounds,
}
(run_root / "final-summary.json").write_text(
json.dumps(final_summary, indent=2), encoding="utf-8"
)
PY
if [[ "$DRY_RUN" -eq 1 ]]; then
echo "$RUN_ROOT"
exit 0
fi
if [[ ! -x "$ROOT_DIR/target/release/rpki" || ! -x "$ROOT_DIR/target/release/ccr_to_compare_views" ]]; then
(
cd "$ROOT_DIR"
cargo build --release --bin rpki --bin ccr_to_compare_views
)
fi
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_ROOT'"
rsync -a --delete \
--exclude target \
--exclude .git \
"$ROOT_DIR/" "$SSH_TARGET:$REMOTE_ROOT/repo/"
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_ROOT/repo/target/release' '$REMOTE_ROOT/bin' '$REMOTE_ROOT/rounds' '$REMOTE_ROOT/state/ours' '$REMOTE_ROOT/state/rpki-client'"
rsync -a "$ROOT_DIR/target/release/rpki" "$SSH_TARGET:$REMOTE_ROOT/repo/target/release/"
rsync -a "$RPKI_CLIENT_BIN" "$SSH_TARGET:$REMOTE_ROOT/bin/rpki-client"
for idx in $(seq 1 "$ROUND_COUNT"); do
ROUND_ID="$(printf 'round-%03d' "$idx")"
ROUND_DIR="$RUN_ROOT/rounds/$ROUND_ID"
SCHEDULED_AT="$(python3 - <<'PY' "$ROUND_DIR/round-meta.json"
import json, sys
print(json.load(open(sys.argv[1], 'r', encoding='utf-8'))['scheduledAt'])
PY
)"
python3 - <<'PY' "$SCHEDULED_AT"
from datetime import datetime, timezone
import sys, time
scheduled = datetime.fromisoformat(sys.argv[1].replace("Z", "+00:00")).astimezone(timezone.utc)
now = datetime.now(timezone.utc)
delay = (scheduled - now).total_seconds()
if delay > 0:
time.sleep(delay)
PY
"$ROOT_DIR/scripts/periodic/run_arin_ours_round_remote.sh" \
--run-root "$RUN_ROOT" \
--round-id "$ROUND_ID" \
--kind "$(python3 - <<'PY' "$ROUND_DIR/round-meta.json"
import json, sys
print(json.load(open(sys.argv[1], 'r', encoding='utf-8'))['kind'])
PY
)" \
--ssh-target "$SSH_TARGET" \
--remote-root "$REMOTE_ROOT" \
--scheduled-at "$SCHEDULED_AT" \
--skip-sync &
OURS_PID=$!
"$ROOT_DIR/scripts/periodic/run_arin_rpki_client_round_remote.sh" \
--run-root "$RUN_ROOT" \
--round-id "$ROUND_ID" \
--kind "$(python3 - <<'PY' "$ROUND_DIR/round-meta.json"
import json, sys
print(json.load(open(sys.argv[1], 'r', encoding='utf-8'))['kind'])
PY
)" \
--ssh-target "$SSH_TARGET" \
--remote-root "$REMOTE_ROOT" \
--scheduled-at "$SCHEDULED_AT" \
--rpki-client-bin "$RPKI_CLIENT_BIN" \
--skip-sync &
CLIENT_PID=$!
set +e
wait "$OURS_PID"
OURS_STATUS=$?
wait "$CLIENT_PID"
CLIENT_STATUS=$?
set -e
if [[ "$OURS_STATUS" -eq 0 && "$CLIENT_STATUS" -eq 0 \
&& -f "$ROUND_DIR/ours/result.ccr" && -f "$ROUND_DIR/rpki-client/result.ccr" ]]; then
"$ROOT_DIR/scripts/periodic/compare_ccr_round.sh" \
--ours-ccr "$ROUND_DIR/ours/result.ccr" \
--rpki-client-ccr "$ROUND_DIR/rpki-client/result.ccr" \
--out-dir "$ROUND_DIR/compare"
fi
python3 - <<'PY' "$ROUND_DIR/round-meta.json" "$ROUND_DIR/ours/round-result.json" "$ROUND_DIR/rpki-client/round-result.json" "$ROUND_DIR/compare/compare-summary.json"
import json, sys
from datetime import datetime, timezone
round_meta_path, ours_result_path, client_result_path, compare_path = sys.argv[1:]
meta = json.load(open(round_meta_path, 'r', encoding='utf-8'))
ours = json.load(open(ours_result_path, 'r', encoding='utf-8'))
client = json.load(open(client_result_path, 'r', encoding='utf-8'))
scheduled = datetime.fromisoformat(meta['scheduledAt'].replace('Z', '+00:00')).astimezone(timezone.utc)
started_candidates = []
for item in (ours, client):
if item.get('startedAt'):
started_candidates.append(datetime.fromisoformat(item['startedAt'].replace('Z', '+00:00')).astimezone(timezone.utc))
finished_candidates = []
for item in (ours, client):
if item.get('finishedAt'):
finished_candidates.append(datetime.fromisoformat(item['finishedAt'].replace('Z', '+00:00')).astimezone(timezone.utc))
if started_candidates:
started_at = min(started_candidates)
meta['startedAt'] = started_at.strftime('%Y-%m-%dT%H:%M:%SZ')
lag_ms = int((started_at - scheduled).total_seconds() * 1000)
meta['startLagMs'] = max(lag_ms, 0)
if finished_candidates:
finished_at = max(finished_candidates)
meta['finishedAt'] = finished_at.strftime('%Y-%m-%dT%H:%M:%SZ')
meta['status'] = 'completed' if ours.get('exitCode') == 0 and client.get('exitCode') == 0 else 'failed'
meta['ours'] = {
'exitCode': ours.get('exitCode'),
'durationMs': json.load(open(ours_result_path.replace('round-result.json', 'timing.json'), 'r', encoding='utf-8')).get('durationMs'),
}
meta['rpkiClient'] = {
'exitCode': client.get('exitCode'),
'durationMs': json.load(open(client_result_path.replace('round-result.json', 'timing.json'), 'r', encoding='utf-8')).get('durationMs'),
}
if compare_path and __import__('pathlib').Path(compare_path).exists():
compare = json.load(open(compare_path, 'r', encoding='utf-8'))
meta['compare'] = {
'allMatch': compare.get('allMatch'),
'vrpMatch': compare.get('vrps', {}).get('match'),
'vapMatch': compare.get('vaps', {}).get('match'),
}
json.dump(meta, open(round_meta_path, 'w', encoding='utf-8'), indent=2)
PY
done
python3 - <<'PY' "$RUN_ROOT/final-summary.json" "$RUN_ROOT/rounds"
import json, sys
from pathlib import Path
summary_path = Path(sys.argv[1])
rounds_root = Path(sys.argv[2])
rounds = []
all_match = True
for round_dir in sorted(rounds_root.glob('round-*')):
meta = json.load(open(round_dir / 'round-meta.json', 'r', encoding='utf-8'))
rounds.append(meta)
compare = meta.get('compare')
if compare is None or compare.get('allMatch') is not True:
all_match = False
summary = {
'version': 1,
'status': 'completed',
'roundCount': len(rounds),
'allMatch': all_match,
'rounds': rounds,
}
json.dump(summary, open(summary_path, 'w', encoding='utf-8'), indent=2)
PY
echo "$RUN_ROOT"

View File

@ -0,0 +1,155 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/periodic/run_arin_ours_round_remote.sh \
--run-root <path> \
--round-id <round-XXX> \
--kind <snapshot|delta> \
--ssh-target <user@host> \
--remote-root <path> \
[--scheduled-at <RFC3339>] \
[--skip-sync]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
RUN_ROOT=""
ROUND_ID=""
KIND=""
SSH_TARGET="${SSH_TARGET:-root@47.77.183.68}"
REMOTE_ROOT=""
SCHEDULED_AT=""
SKIP_SYNC=0
while [[ $# -gt 0 ]]; do
case "$1" in
--run-root) RUN_ROOT="$2"; shift 2 ;;
--round-id) ROUND_ID="$2"; shift 2 ;;
--kind) KIND="$2"; shift 2 ;;
--ssh-target) SSH_TARGET="$2"; shift 2 ;;
--remote-root) REMOTE_ROOT="$2"; shift 2 ;;
--scheduled-at) SCHEDULED_AT="$2"; shift 2 ;;
--skip-sync) SKIP_SYNC=1; shift 1 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$RUN_ROOT" && -n "$ROUND_ID" && -n "$KIND" && -n "$REMOTE_ROOT" ]] || { usage >&2; exit 2; }
[[ "$KIND" == "snapshot" || "$KIND" == "delta" ]] || { echo "--kind must be snapshot or delta" >&2; exit 2; }
LOCAL_OUT="$RUN_ROOT/rounds/$ROUND_ID/ours"
REMOTE_REPO="$REMOTE_ROOT/repo"
REMOTE_OUT="$REMOTE_ROOT/rounds/$ROUND_ID/ours"
REMOTE_WORK_DB="$REMOTE_ROOT/state/ours/work-db"
REMOTE_RAW_STORE="$REMOTE_ROOT/state/ours/raw-store.db"
mkdir -p "$LOCAL_OUT"
if [[ "$SKIP_SYNC" -eq 0 ]]; then
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_ROOT'"
rsync -a --delete \
--exclude target \
--exclude .git \
"$ROOT_DIR/" "$SSH_TARGET:$REMOTE_REPO/"
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_REPO/target/release' '$REMOTE_OUT' '$REMOTE_ROOT/state/ours'"
rsync -a "$ROOT_DIR/target/release/rpki" "$SSH_TARGET:$REMOTE_REPO/target/release/"
else
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_OUT' '$REMOTE_ROOT/state/ours'"
fi
ssh "$SSH_TARGET" \
REMOTE_REPO="$REMOTE_REPO" \
REMOTE_OUT="$REMOTE_OUT" \
REMOTE_WORK_DB="$REMOTE_WORK_DB" \
REMOTE_RAW_STORE="$REMOTE_RAW_STORE" \
KIND="$KIND" \
ROUND_ID="$ROUND_ID" \
SCHEDULED_AT="$SCHEDULED_AT" \
'bash -s' <<'EOS'
set -euo pipefail
cd "$REMOTE_REPO"
mkdir -p "$REMOTE_OUT"
if [[ "$KIND" == "snapshot" ]]; then
rm -rf "$REMOTE_WORK_DB" "$REMOTE_RAW_STORE"
fi
mkdir -p "$(dirname "$REMOTE_WORK_DB")"
started_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
started_at_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
ccr_out="$REMOTE_OUT/result.ccr"
report_out="$REMOTE_OUT/report.json"
run_log="$REMOTE_OUT/run.log"
timing_out="$REMOTE_OUT/timing.json"
meta_out="$REMOTE_OUT/round-result.json"
set +e
env RPKI_PROGRESS_LOG=1 target/release/rpki \
--db "$REMOTE_WORK_DB" \
--raw-store-db "$REMOTE_RAW_STORE" \
--tal-path tests/fixtures/tal/arin.tal \
--ta-path tests/fixtures/ta/arin-ta.cer \
--ccr-out "$ccr_out" \
--report-json "$report_out" \
>"$run_log" 2>&1
exit_code=$?
set -e
finished_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
finished_at_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
python3 - <<'PY' "$timing_out" "$started_at_ms" "$finished_at_ms" "$started_at_iso" "$finished_at_iso" "$exit_code"
import json, sys
path, start_ms, end_ms, started_at, finished_at, exit_code = sys.argv[1:]
with open(path, "w", encoding="utf-8") as fh:
json.dump(
{
"durationMs": int(end_ms) - int(start_ms),
"startedAt": started_at,
"finishedAt": finished_at,
"exitCode": int(exit_code),
},
fh,
indent=2,
)
PY
python3 - <<'PY' "$meta_out" "$ROUND_ID" "$KIND" "$SCHEDULED_AT" "$started_at_iso" "$finished_at_iso" "$REMOTE_WORK_DB" "$REMOTE_RAW_STORE" "$exit_code"
import json, sys
path, round_id, kind, scheduled_at, started_at, finished_at, work_db, raw_store, exit_code = sys.argv[1:]
with open(path, "w", encoding="utf-8") as fh:
json.dump(
{
"roundId": round_id,
"kind": kind,
"scheduledAt": scheduled_at or None,
"startedAt": started_at,
"finishedAt": finished_at,
"remoteWorkDbPath": work_db,
"remoteRawStoreDbPath": raw_store,
"exitCode": int(exit_code),
},
fh,
indent=2,
)
PY
exit "$exit_code"
EOS
rsync -a "$SSH_TARGET:$REMOTE_OUT/" "$LOCAL_OUT/"
echo "$LOCAL_OUT"

View File

@ -0,0 +1,165 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/periodic/run_arin_rpki_client_round_remote.sh \
--run-root <path> \
--round-id <round-XXX> \
--kind <snapshot|delta> \
--ssh-target <user@host> \
--remote-root <path> \
[--scheduled-at <RFC3339>] \
[--rpki-client-bin <local path>] \
[--skip-sync]
EOF
}
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
RUN_ROOT=""
ROUND_ID=""
KIND=""
SSH_TARGET="${SSH_TARGET:-root@47.77.183.68}"
REMOTE_ROOT=""
SCHEDULED_AT=""
RPKI_CLIENT_BIN="${RPKI_CLIENT_BIN:-/home/yuyr/dev/rpki-client-9.7/build-m5/src/rpki-client}"
SKIP_SYNC=0
while [[ $# -gt 0 ]]; do
case "$1" in
--run-root) RUN_ROOT="$2"; shift 2 ;;
--round-id) ROUND_ID="$2"; shift 2 ;;
--kind) KIND="$2"; shift 2 ;;
--ssh-target) SSH_TARGET="$2"; shift 2 ;;
--remote-root) REMOTE_ROOT="$2"; shift 2 ;;
--scheduled-at) SCHEDULED_AT="$2"; shift 2 ;;
--rpki-client-bin) RPKI_CLIENT_BIN="$2"; shift 2 ;;
--skip-sync) SKIP_SYNC=1; shift 1 ;;
-h|--help) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage; exit 2 ;;
esac
done
[[ -n "$RUN_ROOT" && -n "$ROUND_ID" && -n "$KIND" && -n "$REMOTE_ROOT" ]] || { usage >&2; exit 2; }
[[ "$KIND" == "snapshot" || "$KIND" == "delta" ]] || { echo "--kind must be snapshot or delta" >&2; exit 2; }
[[ -x "$RPKI_CLIENT_BIN" ]] || { echo "rpki-client binary not executable: $RPKI_CLIENT_BIN" >&2; exit 2; }
LOCAL_OUT="$RUN_ROOT/rounds/$ROUND_ID/rpki-client"
REMOTE_REPO="$REMOTE_ROOT/repo"
REMOTE_BIN_DIR="$REMOTE_ROOT/bin"
REMOTE_BIN="$REMOTE_BIN_DIR/rpki-client"
REMOTE_OUT="$REMOTE_ROOT/rounds/$ROUND_ID/rpki-client"
REMOTE_CACHE="$REMOTE_ROOT/state/rpki-client/cache"
REMOTE_STATE_OUT="$REMOTE_ROOT/state/rpki-client/out"
REMOTE_STATE_ROOT="$REMOTE_ROOT/state/rpki-client"
mkdir -p "$LOCAL_OUT"
if [[ "$SKIP_SYNC" -eq 0 ]]; then
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_ROOT'"
rsync -a --delete \
--exclude target \
--exclude .git \
"$ROOT_DIR/" "$SSH_TARGET:$REMOTE_REPO/"
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_BIN_DIR' '$REMOTE_OUT' '$REMOTE_STATE_ROOT'"
rsync -a "$RPKI_CLIENT_BIN" "$SSH_TARGET:$REMOTE_BIN"
else
ssh "$SSH_TARGET" "mkdir -p '$REMOTE_BIN_DIR' '$REMOTE_OUT' '$REMOTE_STATE_ROOT'"
fi
ssh "$SSH_TARGET" \
REMOTE_ROOT="$REMOTE_ROOT" \
REMOTE_REPO="$REMOTE_REPO" \
REMOTE_BIN="$REMOTE_BIN" \
REMOTE_OUT="$REMOTE_OUT" \
REMOTE_CACHE="$REMOTE_CACHE" \
REMOTE_STATE_OUT="$REMOTE_STATE_OUT" \
REMOTE_STATE_ROOT="$REMOTE_STATE_ROOT" \
KIND="$KIND" \
ROUND_ID="$ROUND_ID" \
SCHEDULED_AT="$SCHEDULED_AT" \
'bash -s' <<'EOS'
set -euo pipefail
cd "$REMOTE_ROOT"
mkdir -p "$REMOTE_OUT"
if [[ "$KIND" == "snapshot" ]]; then
rm -rf "$REMOTE_CACHE" "$REMOTE_STATE_OUT" "$REMOTE_STATE_ROOT/ta" "$REMOTE_STATE_ROOT/.ta"
fi
mkdir -p "$REMOTE_CACHE" "$REMOTE_STATE_OUT"
chmod 0777 "$REMOTE_STATE_ROOT" "$REMOTE_CACHE" "$REMOTE_STATE_OUT"
started_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
started_at_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
ccr_out="$REMOTE_OUT/result.ccr"
run_log="$REMOTE_OUT/run.log"
timing_out="$REMOTE_OUT/timing.json"
meta_out="$REMOTE_OUT/round-result.json"
set +e
(
cd "$REMOTE_STATE_ROOT"
"$REMOTE_BIN" -vv -t "../../repo/tests/fixtures/tal/arin.tal" -d "cache" "out"
) >"$run_log" 2>&1
exit_code=$?
set -e
if [[ -f "$REMOTE_STATE_OUT/rpki.ccr" ]]; then
cp "$REMOTE_STATE_OUT/rpki.ccr" "$ccr_out"
fi
finished_at_iso="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
finished_at_ms="$(python3 - <<'PY'
import time
print(int(time.time() * 1000))
PY
)"
python3 - <<'PY' "$timing_out" "$started_at_ms" "$finished_at_ms" "$started_at_iso" "$finished_at_iso" "$exit_code"
import json, sys
path, start_ms, end_ms, started_at, finished_at, exit_code = sys.argv[1:]
with open(path, "w", encoding="utf-8") as fh:
json.dump(
{
"durationMs": int(end_ms) - int(start_ms),
"startedAt": started_at,
"finishedAt": finished_at,
"exitCode": int(exit_code),
},
fh,
indent=2,
)
PY
python3 - <<'PY' "$meta_out" "$ROUND_ID" "$KIND" "$SCHEDULED_AT" "$started_at_iso" "$finished_at_iso" "$REMOTE_CACHE" "$REMOTE_STATE_OUT" "$exit_code"
import json, sys
path, round_id, kind, scheduled_at, started_at, finished_at, cache_path, out_path, exit_code = sys.argv[1:]
with open(path, "w", encoding="utf-8") as fh:
json.dump(
{
"roundId": round_id,
"kind": kind,
"scheduledAt": scheduled_at or None,
"startedAt": started_at,
"finishedAt": finished_at,
"remoteCachePath": cache_path,
"remoteOutPath": out_path,
"exitCode": int(exit_code),
},
fh,
indent=2,
)
PY
exit "$exit_code"
EOS
rsync -a "$SSH_TARGET:$REMOTE_OUT/" "$LOCAL_OUT/"
echo "$LOCAL_OUT"

View File

@ -0,0 +1,129 @@
# Live Bundle Record
`run_live_bundle_record.sh` 是当前 `ours` 的单命令 live bundle 录制入口。
它做三件事:
1. 联网执行 **live base recorder**
2. 基于刚录制的 base bundle 执行 **live delta recorder**
3. 产出一个统一的最终目录,包含:
- `base-payload-archive/`
- `payload-delta-archive/`
- `base-locks.json`
- `locks-delta.json`
- `tal.tal`
- `ta.cer`
- `base.ccr`
- `delta.ccr`
- `base-vrps.csv`
- `base-vaps.csv`
- `record-delta.csv`
- `record-delta-vaps.csv`
- `bundle.json`
- `verification.json`
- `timings/`
## 用法
```bash
cd rpki
./scripts/replay_bundle/run_live_bundle_record.sh \
--rir apnic \
--tal-path tests/fixtures/tal/apnic-rfc7730-https.tal \
--ta-path tests/fixtures/ta/apnic-ta.cer
```
默认输出目录:
```text
target/replay/<rir>_live_bundle_<timestamp>
```
如果要一次录制多个 RIR使用
```bash
cd rpki
./scripts/replay_bundle/run_live_bundle_record_multi_rir.sh \
--rir afrinic,apnic,arin,lacnic,ripe
```
默认输出目录:
```text
target/replay/live_bundle_matrix_<timestamp>
```
每个 RIR 会落到:
```text
target/replay/live_bundle_matrix_<timestamp>/<rir>_live_bundle_<timestamp>
```
如果要录制单个 RIR 的 `1 base + N delta` 序列,使用:
```bash
cd rpki
./scripts/replay_bundle/run_live_bundle_record_sequence.sh \
--rir apnic \
--tal-path tests/fixtures/tal/apnic-rfc7730-https.tal \
--ta-path tests/fixtures/ta/apnic-ta.cer \
--delta-count 2 \
--delta-interval-secs 0
```
默认输出目录:
```text
target/replay/<rir>_live_bundle_sequence_<timestamp>
```
如果要一次录制多个 RIR 的 `1 base + N delta` 序列,使用:
```bash
cd rpki
./scripts/replay_bundle/run_live_bundle_record_multi_rir_sequence.sh \
--rir afrinic,apnic,arin,lacnic,ripe
```
默认输出目录:
```text
target/replay/live_bundle_sequence_matrix_<timestamp>
```
## 可选参数
- `--out-dir <path>`
- `--base-validation-time <rfc3339>`
- `--delta-validation-time <rfc3339>`
- `--http-timeout-secs <n>`
- `--rsync-timeout-secs <n>`
- `--rsync-mirror-root <path>`
- `--max-depth <n>`
- `--max-instances <n>`
- `--trust-anchor <name>`
- `--bin-dir <path>`
- `--no-build`
- `--delta-count <n>`sequence 入口)
- `--delta-interval-secs <n>`sequence 入口)
- `--keep-db`sequence 入口)
`run_live_bundle_record_multi_rir.sh` 会自动按 RIR 选择当前仓库内置的:
- `tests/fixtures/tal/*.tal`
- `tests/fixtures/ta/*.cer`
并将 `--trust-anchor` 设置为对应 RIR 名称。
## 说明
- 该脚本会先构建:
- `replay_bundle_capture`
- `replay_bundle_capture_delta`
- 如果提供 `--no-build`,则直接复用:
- `--bin-dir <path>` 下的现有二进制
- 中间 staging 目录:
- `<out>.stage-base`
- `<out>.stage-delta`
在成功完成后会清理,只保留最终输出目录。
- 最终输出目录是 **delta 阶段产物**,其中已经包含 base 阶段结果。

View File

@ -0,0 +1,135 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
RIR=""
OUT_DIR=""
TAL_PATH=""
TA_PATH=""
BASE_VALIDATION_TIME=""
DELTA_VALIDATION_TIME=""
HTTP_TIMEOUT_SECS=""
RSYNC_TIMEOUT_SECS=""
RSYNC_MIRROR_ROOT=""
MAX_DEPTH=""
MAX_INSTANCES=""
TRUST_ANCHOR=""
NO_BUILD=0
BIN_DIR="target/release"
usage() {
cat <<'EOF'
Usage:
./scripts/replay_bundle/run_live_bundle_record.sh \
--rir <name> \
--tal-path <path> \
--ta-path <path> \
[--out-dir <path>] \
[--base-validation-time <rfc3339>] \
[--delta-validation-time <rfc3339>] \
[--http-timeout-secs <n>] \
[--rsync-timeout-secs <n>] \
[--rsync-mirror-root <path>] \
[--max-depth <n>] \
[--max-instances <n>] \
[--trust-anchor <name>] \
[--bin-dir <path>] \
[--no-build]
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--rir) RIR="${2:?}"; shift 2 ;;
--out-dir) OUT_DIR="${2:?}"; shift 2 ;;
--tal-path) TAL_PATH="${2:?}"; shift 2 ;;
--ta-path) TA_PATH="${2:?}"; shift 2 ;;
--base-validation-time) BASE_VALIDATION_TIME="${2:?}"; shift 2 ;;
--delta-validation-time) DELTA_VALIDATION_TIME="${2:?}"; shift 2 ;;
--http-timeout-secs) HTTP_TIMEOUT_SECS="${2:?}"; shift 2 ;;
--rsync-timeout-secs) RSYNC_TIMEOUT_SECS="${2:?}"; shift 2 ;;
--rsync-mirror-root) RSYNC_MIRROR_ROOT="${2:?}"; shift 2 ;;
--max-depth) MAX_DEPTH="${2:?}"; shift 2 ;;
--max-instances) MAX_INSTANCES="${2:?}"; shift 2 ;;
--trust-anchor) TRUST_ANCHOR="${2:?}"; shift 2 ;;
--bin-dir) BIN_DIR="${2:?}"; shift 2 ;;
--no-build) NO_BUILD=1; shift ;;
--help|-h) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage >&2; exit 2 ;;
esac
done
if [[ -z "$RIR" || -z "$TAL_PATH" || -z "$TA_PATH" ]]; then
usage >&2
exit 2
fi
TS="$(date -u +%Y%m%dT%H%M%SZ)"
if [[ -z "$OUT_DIR" ]]; then
OUT_DIR="target/replay/${RIR}_live_bundle_${TS}"
fi
STAGE_BASE="${OUT_DIR}.stage-base"
STAGE_DELTA="${OUT_DIR}.stage-delta"
rm -rf "$OUT_DIR" "$STAGE_BASE" "$STAGE_DELTA"
mkdir -p "$(dirname "$OUT_DIR")"
CAPTURE_BIN="$BIN_DIR/replay_bundle_capture"
DELTA_CAPTURE_BIN="$BIN_DIR/replay_bundle_capture_delta"
if [[ "$NO_BUILD" -eq 0 ]]; then
echo "[1/3] build release binaries"
cargo build --release --bin replay_bundle_capture --bin replay_bundle_capture_delta
else
echo "[1/3] reuse existing binaries from $BIN_DIR"
fi
if [[ ! -x "$CAPTURE_BIN" ]]; then
echo "missing executable: $CAPTURE_BIN" >&2
exit 1
fi
if [[ ! -x "$DELTA_CAPTURE_BIN" ]]; then
echo "missing executable: $DELTA_CAPTURE_BIN" >&2
exit 1
fi
echo "[2/3] record live base bundle into $STAGE_BASE"
BASE_CMD=(
"$CAPTURE_BIN"
--rir "$RIR"
--out-dir "$STAGE_BASE"
--tal-path "$TAL_PATH"
--ta-path "$TA_PATH"
)
[[ -n "$BASE_VALIDATION_TIME" ]] && BASE_CMD+=(--validation-time "$BASE_VALIDATION_TIME")
[[ -n "$HTTP_TIMEOUT_SECS" ]] && BASE_CMD+=(--http-timeout-secs "$HTTP_TIMEOUT_SECS")
[[ -n "$RSYNC_TIMEOUT_SECS" ]] && BASE_CMD+=(--rsync-timeout-secs "$RSYNC_TIMEOUT_SECS")
[[ -n "$RSYNC_MIRROR_ROOT" ]] && BASE_CMD+=(--rsync-mirror-root "$RSYNC_MIRROR_ROOT")
[[ -n "$MAX_DEPTH" ]] && BASE_CMD+=(--max-depth "$MAX_DEPTH")
[[ -n "$MAX_INSTANCES" ]] && BASE_CMD+=(--max-instances "$MAX_INSTANCES")
[[ -n "$TRUST_ANCHOR" ]] && BASE_CMD+=(--trust-anchor "$TRUST_ANCHOR")
"${BASE_CMD[@]}"
echo "[3/3] record live delta bundle into $STAGE_DELTA"
DELTA_CMD=(
"$DELTA_CAPTURE_BIN"
--rir "$RIR"
--base-bundle-dir "$STAGE_BASE"
--out-dir "$STAGE_DELTA"
)
[[ -n "$DELTA_VALIDATION_TIME" ]] && DELTA_CMD+=(--validation-time "$DELTA_VALIDATION_TIME")
[[ -n "$HTTP_TIMEOUT_SECS" ]] && DELTA_CMD+=(--http-timeout-secs "$HTTP_TIMEOUT_SECS")
[[ -n "$RSYNC_TIMEOUT_SECS" ]] && DELTA_CMD+=(--rsync-timeout-secs "$RSYNC_TIMEOUT_SECS")
[[ -n "$RSYNC_MIRROR_ROOT" ]] && DELTA_CMD+=(--rsync-mirror-root "$RSYNC_MIRROR_ROOT")
[[ -n "$MAX_DEPTH" ]] && DELTA_CMD+=(--max-depth "$MAX_DEPTH")
[[ -n "$MAX_INSTANCES" ]] && DELTA_CMD+=(--max-instances "$MAX_INSTANCES")
[[ -n "$TRUST_ANCHOR" ]] && DELTA_CMD+=(--trust-anchor "$TRUST_ANCHOR")
"${DELTA_CMD[@]}"
mv "$STAGE_DELTA" "$OUT_DIR"
rm -rf "$STAGE_BASE"
echo "$OUT_DIR"

View File

@ -0,0 +1,166 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
RIRS=""
OUT_ROOT=""
BASE_VALIDATION_TIME=""
DELTA_VALIDATION_TIME=""
HTTP_TIMEOUT_SECS=""
RSYNC_TIMEOUT_SECS=""
RSYNC_MIRROR_ROOT=""
MAX_DEPTH=""
MAX_INSTANCES=""
NO_BUILD=0
BIN_DIR="target/release"
usage() {
cat <<'EOF'
Usage:
./scripts/replay_bundle/run_live_bundle_record_multi_rir.sh \
--rir <afrinic,apnic,...> \
[--out-root <path>] \
[--base-validation-time <rfc3339>] \
[--delta-validation-time <rfc3339>] \
[--http-timeout-secs <n>] \
[--rsync-timeout-secs <n>] \
[--rsync-mirror-root <path>] \
[--max-depth <n>] \
[--max-instances <n>] \
[--bin-dir <path>] \
[--no-build]
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--rir) RIRS="${2:?}"; shift 2 ;;
--out-root) OUT_ROOT="${2:?}"; shift 2 ;;
--base-validation-time) BASE_VALIDATION_TIME="${2:?}"; shift 2 ;;
--delta-validation-time) DELTA_VALIDATION_TIME="${2:?}"; shift 2 ;;
--http-timeout-secs) HTTP_TIMEOUT_SECS="${2:?}"; shift 2 ;;
--rsync-timeout-secs) RSYNC_TIMEOUT_SECS="${2:?}"; shift 2 ;;
--rsync-mirror-root) RSYNC_MIRROR_ROOT="${2:?}"; shift 2 ;;
--max-depth) MAX_DEPTH="${2:?}"; shift 2 ;;
--max-instances) MAX_INSTANCES="${2:?}"; shift 2 ;;
--bin-dir) BIN_DIR="${2:?}"; shift 2 ;;
--no-build) NO_BUILD=1; shift ;;
--help|-h) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage >&2; exit 2 ;;
esac
done
if [[ -z "$RIRS" ]]; then
usage >&2
exit 2
fi
RUN_TAG="$(date -u +%Y%m%dT%H%M%SZ)"
if [[ -z "$OUT_ROOT" ]]; then
OUT_ROOT="target/replay/live_bundle_matrix_${RUN_TAG}"
fi
mkdir -p "$OUT_ROOT"
resolve_tal_path() {
case "$1" in
afrinic) printf 'tests/fixtures/tal/afrinic.tal' ;;
apnic) printf 'tests/fixtures/tal/apnic-rfc7730-https.tal' ;;
arin) printf 'tests/fixtures/tal/arin.tal' ;;
lacnic) printf 'tests/fixtures/tal/lacnic.tal' ;;
ripe) printf 'tests/fixtures/tal/ripe-ncc.tal' ;;
*) echo "unsupported rir: $1" >&2; exit 2 ;;
esac
}
resolve_ta_path() {
case "$1" in
afrinic) printf 'tests/fixtures/ta/afrinic-ta.cer' ;;
apnic) printf 'tests/fixtures/ta/apnic-ta.cer' ;;
arin) printf 'tests/fixtures/ta/arin-ta.cer' ;;
lacnic) printf 'tests/fixtures/ta/lacnic-ta.cer' ;;
ripe) printf 'tests/fixtures/ta/ripe-ncc-ta.cer' ;;
*) echo "unsupported rir: $1" >&2; exit 2 ;;
esac
}
SUMMARY_JSON="$OUT_ROOT/summary.json"
SUMMARY_MD="$OUT_ROOT/summary.md"
python3 - "$SUMMARY_JSON" "$RUN_TAG" <<'PY'
import json, sys
out, run_tag = sys.argv[1:]
with open(out, "w") as fh:
json.dump({"runTag": run_tag, "results": []}, fh, indent=2)
PY
IFS=',' read -r -a RIR_LIST <<< "$RIRS"
for raw_rir in "${RIR_LIST[@]}"; do
rir="$(printf '%s' "$raw_rir" | tr '[:upper:]' '[:lower:]' | xargs)"
[[ -n "$rir" ]] || continue
tal_path="$(resolve_tal_path "$rir")"
ta_path="$(resolve_ta_path "$rir")"
out_dir="$OUT_ROOT/${rir}_live_bundle_${RUN_TAG}"
cmd=(
./scripts/replay_bundle/run_live_bundle_record.sh
--rir "$rir"
--out-dir "$out_dir"
--tal-path "$tal_path"
--ta-path "$ta_path"
--trust-anchor "$rir"
--bin-dir "$BIN_DIR"
)
[[ -n "$BASE_VALIDATION_TIME" ]] && cmd+=(--base-validation-time "$BASE_VALIDATION_TIME")
[[ -n "$DELTA_VALIDATION_TIME" ]] && cmd+=(--delta-validation-time "$DELTA_VALIDATION_TIME")
[[ -n "$HTTP_TIMEOUT_SECS" ]] && cmd+=(--http-timeout-secs "$HTTP_TIMEOUT_SECS")
[[ -n "$RSYNC_TIMEOUT_SECS" ]] && cmd+=(--rsync-timeout-secs "$RSYNC_TIMEOUT_SECS")
[[ -n "$RSYNC_MIRROR_ROOT" ]] && cmd+=(--rsync-mirror-root "$RSYNC_MIRROR_ROOT")
[[ -n "$MAX_DEPTH" ]] && cmd+=(--max-depth "$MAX_DEPTH")
[[ -n "$MAX_INSTANCES" ]] && cmd+=(--max-instances "$MAX_INSTANCES")
[[ "$NO_BUILD" -eq 1 ]] && cmd+=(--no-build)
"${cmd[@]}"
python3 - "$SUMMARY_JSON" "$rir" "$out_dir" <<'PY'
import json, pathlib, sys
summary_path, rir, out_dir = sys.argv[1:]
summary = json.loads(pathlib.Path(summary_path).read_text())
bundle = json.loads(pathlib.Path(out_dir, rir, "bundle.json").read_text())
verification = json.loads(pathlib.Path(out_dir, rir, "verification.json").read_text())
summary["results"].append({
"rir": rir,
"outDir": out_dir,
"baseVrpCount": bundle["baseVrpCount"],
"deltaVrpCount": bundle["deltaVrpCount"],
"baseVapCount": bundle["baseVapCount"],
"deltaVapCount": bundle["deltaVapCount"],
"baseSelfReplayOk": verification["base"]["capture"]["selfReplayOk"],
"deltaSelfReplayOk": verification["delta"]["capture"]["selfReplayOk"],
})
pathlib.Path(summary_path).write_text(json.dumps(summary, indent=2))
PY
done
python3 - "$SUMMARY_JSON" "$SUMMARY_MD" <<'PY'
import json, pathlib, sys
summary = json.loads(pathlib.Path(sys.argv[1]).read_text())
out = pathlib.Path(sys.argv[2])
lines = [
"# Multi-RIR Live Bundle Record Summary",
"",
f"- runTag: `{summary['runTag']}`",
"",
"| rir | base_vrps | delta_vrps | base_vaps | delta_vaps | base_self_replay | delta_self_replay | out_dir |",
"|---|---:|---:|---:|---:|---|---|---|",
]
for item in summary["results"]:
lines.append(
f"| {item['rir']} | {item['baseVrpCount']} | {item['deltaVrpCount']} | "
f"{item['baseVapCount']} | {item['deltaVapCount']} | "
f"{str(item['baseSelfReplayOk']).lower()} | {str(item['deltaSelfReplayOk']).lower()} | "
f"`{item['outDir']}` |"
)
out.write_text("\n".join(lines) + "\n")
PY
echo "$OUT_ROOT"

View File

@ -0,0 +1,173 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
RIRS=""
OUT_ROOT=""
BASE_VALIDATION_TIME=""
DELTA_COUNT=""
DELTA_INTERVAL_SECS=""
HTTP_TIMEOUT_SECS=""
RSYNC_TIMEOUT_SECS=""
RSYNC_MIRROR_ROOT=""
MAX_DEPTH=""
MAX_INSTANCES=""
NO_BUILD=0
KEEP_DB=0
CAPTURE_INPUTS_ONLY=0
BIN_DIR="target/release"
usage() {
cat <<'EOF'
Usage:
./scripts/replay_bundle/run_live_bundle_record_multi_rir_sequence.sh \
--rir <afrinic,apnic,...> \
[--out-root <path>] \
[--base-validation-time <rfc3339>] \
[--delta-count <n>] \
[--delta-interval-secs <n>] \
[--http-timeout-secs <n>] \
[--rsync-timeout-secs <n>] \
[--rsync-mirror-root <path>] \
[--max-depth <n>] \
[--max-instances <n>] \
[--bin-dir <path>] \
[--no-build] \
[--keep-db] \
[--capture-inputs-only]
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--rir) RIRS="${2:?}"; shift 2 ;;
--out-root) OUT_ROOT="${2:?}"; shift 2 ;;
--base-validation-time) BASE_VALIDATION_TIME="${2:?}"; shift 2 ;;
--delta-count) DELTA_COUNT="${2:?}"; shift 2 ;;
--delta-interval-secs) DELTA_INTERVAL_SECS="${2:?}"; shift 2 ;;
--http-timeout-secs) HTTP_TIMEOUT_SECS="${2:?}"; shift 2 ;;
--rsync-timeout-secs) RSYNC_TIMEOUT_SECS="${2:?}"; shift 2 ;;
--rsync-mirror-root) RSYNC_MIRROR_ROOT="${2:?}"; shift 2 ;;
--max-depth) MAX_DEPTH="${2:?}"; shift 2 ;;
--max-instances) MAX_INSTANCES="${2:?}"; shift 2 ;;
--bin-dir) BIN_DIR="${2:?}"; shift 2 ;;
--no-build) NO_BUILD=1; shift ;;
--keep-db) KEEP_DB=1; shift ;;
--capture-inputs-only) CAPTURE_INPUTS_ONLY=1; shift ;;
--help|-h) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage >&2; exit 2 ;;
esac
done
if [[ -z "$RIRS" ]]; then
usage >&2
exit 2
fi
RUN_TAG="$(date -u +%Y%m%dT%H%M%SZ)"
if [[ -z "$OUT_ROOT" ]]; then
OUT_ROOT="target/replay/live_bundle_sequence_matrix_${RUN_TAG}"
fi
mkdir -p "$OUT_ROOT"
resolve_tal_path() {
case "$1" in
afrinic) printf 'tests/fixtures/tal/afrinic.tal' ;;
apnic) printf 'tests/fixtures/tal/apnic-rfc7730-https.tal' ;;
arin) printf 'tests/fixtures/tal/arin.tal' ;;
lacnic) printf 'tests/fixtures/tal/lacnic.tal' ;;
ripe) printf 'tests/fixtures/tal/ripe-ncc.tal' ;;
*) echo "unsupported rir: $1" >&2; exit 2 ;;
esac
}
resolve_ta_path() {
case "$1" in
afrinic) printf 'tests/fixtures/ta/afrinic-ta.cer' ;;
apnic) printf 'tests/fixtures/ta/apnic-ta.cer' ;;
arin) printf 'tests/fixtures/ta/arin-ta.cer' ;;
lacnic) printf 'tests/fixtures/ta/lacnic-ta.cer' ;;
ripe) printf 'tests/fixtures/ta/ripe-ncc-ta.cer' ;;
*) echo "unsupported rir: $1" >&2; exit 2 ;;
esac
}
SUMMARY_JSON="$OUT_ROOT/summary.json"
SUMMARY_MD="$OUT_ROOT/summary.md"
python3 - "$SUMMARY_JSON" "$RUN_TAG" <<'PY'
import json, sys
path, run_tag = sys.argv[1:]
with open(path, "w") as fh:
json.dump({"runTag": run_tag, "results": []}, fh, indent=2)
PY
IFS=',' read -r -a RIR_LIST <<< "$RIRS"
for raw_rir in "${RIR_LIST[@]}"; do
rir="$(printf '%s' "$raw_rir" | tr '[:upper:]' '[:lower:]' | xargs)"
[[ -n "$rir" ]] || continue
tal_path="$(resolve_tal_path "$rir")"
ta_path="$(resolve_ta_path "$rir")"
out_dir="$OUT_ROOT/${rir}_live_bundle_sequence_${RUN_TAG}"
cmd=(
./scripts/replay_bundle/run_live_bundle_record_sequence.sh
--rir "$rir"
--out-dir "$out_dir"
--tal-path "$tal_path"
--ta-path "$ta_path"
--trust-anchor "$rir"
--bin-dir "$BIN_DIR"
)
[[ -n "$BASE_VALIDATION_TIME" ]] && cmd+=(--base-validation-time "$BASE_VALIDATION_TIME")
[[ -n "$DELTA_COUNT" ]] && cmd+=(--delta-count "$DELTA_COUNT")
[[ -n "$DELTA_INTERVAL_SECS" ]] && cmd+=(--delta-interval-secs "$DELTA_INTERVAL_SECS")
[[ -n "$HTTP_TIMEOUT_SECS" ]] && cmd+=(--http-timeout-secs "$HTTP_TIMEOUT_SECS")
[[ -n "$RSYNC_TIMEOUT_SECS" ]] && cmd+=(--rsync-timeout-secs "$RSYNC_TIMEOUT_SECS")
[[ -n "$RSYNC_MIRROR_ROOT" ]] && cmd+=(--rsync-mirror-root "$RSYNC_MIRROR_ROOT")
[[ -n "$MAX_DEPTH" ]] && cmd+=(--max-depth "$MAX_DEPTH")
[[ -n "$MAX_INSTANCES" ]] && cmd+=(--max-instances "$MAX_INSTANCES")
[[ "$NO_BUILD" -eq 1 ]] && cmd+=(--no-build)
[[ "$KEEP_DB" -eq 1 ]] && cmd+=(--keep-db)
[[ "$CAPTURE_INPUTS_ONLY" -eq 1 ]] && cmd+=(--capture-inputs-only)
"${cmd[@]}"
python3 - "$SUMMARY_JSON" "$rir" "$out_dir" <<'PY'
import json, pathlib, sys
summary_path, rir, out_dir = sys.argv[1:]
summary = json.loads(pathlib.Path(summary_path).read_text())
bundle = json.loads(pathlib.Path(out_dir, rir, "bundle.json").read_text())
verification = json.loads(pathlib.Path(out_dir, rir, "verification.json").read_text())
summary["results"].append({
"rir": rir,
"outDir": out_dir,
"stepCount": len(bundle["deltaSequence"]["steps"]),
"baseVrpCount": bundle["base"]["vrpCount"],
"baseVapCount": bundle["base"]["vapCount"],
"allStepsSelfReplayOk": verification["summary"]["allStepsSelfReplayOk"],
})
pathlib.Path(summary_path).write_text(json.dumps(summary, indent=2))
PY
done
python3 - "$SUMMARY_JSON" "$SUMMARY_MD" <<'PY'
import json, pathlib, sys
summary = json.loads(pathlib.Path(sys.argv[1]).read_text())
out = pathlib.Path(sys.argv[2])
lines = [
"# Multi-RIR Live Bundle Sequence Summary",
"",
f"- runTag: `{summary['runTag']}`",
"",
"| rir | step_count | base_vrps | base_vaps | all_steps_self_replay | out_dir |",
"|---|---:|---:|---:|---|---|",
]
for item in summary["results"]:
lines.append(
f"| {item['rir']} | {item['stepCount']} | {item['baseVrpCount']} | {item['baseVapCount']} | "
f"{str(item['allStepsSelfReplayOk']).lower()} | `{item['outDir']}` |"
)
out.write_text("\n".join(lines) + "\n")
PY
echo "$OUT_ROOT"

View File

@ -0,0 +1,119 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
RIR=""
OUT_DIR=""
TAL_PATH=""
TA_PATH=""
BASE_VALIDATION_TIME=""
DELTA_COUNT=""
DELTA_INTERVAL_SECS=""
HTTP_TIMEOUT_SECS=""
RSYNC_TIMEOUT_SECS=""
RSYNC_MIRROR_ROOT=""
MAX_DEPTH=""
MAX_INSTANCES=""
TRUST_ANCHOR=""
NO_BUILD=0
KEEP_DB=0
CAPTURE_INPUTS_ONLY=0
BIN_DIR="target/release"
PROGRESS_LOG="${RPKI_PROGRESS_LOG:-1}"
PROGRESS_SLOW_SECS="${RPKI_PROGRESS_SLOW_SECS:-30}"
usage() {
cat <<'EOF'
Usage:
./scripts/replay_bundle/run_live_bundle_record_sequence.sh \
--rir <name> \
--tal-path <path> \
--ta-path <path> \
[--out-dir <path>] \
[--base-validation-time <rfc3339>] \
[--delta-count <n>] \
[--delta-interval-secs <n>] \
[--http-timeout-secs <n>] \
[--rsync-timeout-secs <n>] \
[--rsync-mirror-root <path>] \
[--max-depth <n>] \
[--max-instances <n>] \
[--trust-anchor <name>] \
[--bin-dir <path>] \
[--no-build] \
[--keep-db] \
[--capture-inputs-only]
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--rir) RIR="${2:?}"; shift 2 ;;
--out-dir) OUT_DIR="${2:?}"; shift 2 ;;
--tal-path) TAL_PATH="${2:?}"; shift 2 ;;
--ta-path) TA_PATH="${2:?}"; shift 2 ;;
--base-validation-time) BASE_VALIDATION_TIME="${2:?}"; shift 2 ;;
--delta-count) DELTA_COUNT="${2:?}"; shift 2 ;;
--delta-interval-secs) DELTA_INTERVAL_SECS="${2:?}"; shift 2 ;;
--http-timeout-secs) HTTP_TIMEOUT_SECS="${2:?}"; shift 2 ;;
--rsync-timeout-secs) RSYNC_TIMEOUT_SECS="${2:?}"; shift 2 ;;
--rsync-mirror-root) RSYNC_MIRROR_ROOT="${2:?}"; shift 2 ;;
--max-depth) MAX_DEPTH="${2:?}"; shift 2 ;;
--max-instances) MAX_INSTANCES="${2:?}"; shift 2 ;;
--trust-anchor) TRUST_ANCHOR="${2:?}"; shift 2 ;;
--bin-dir) BIN_DIR="${2:?}"; shift 2 ;;
--no-build) NO_BUILD=1; shift ;;
--keep-db) KEEP_DB=1; shift ;;
--capture-inputs-only) CAPTURE_INPUTS_ONLY=1; shift ;;
--help|-h) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage >&2; exit 2 ;;
esac
done
if [[ -z "$RIR" || -z "$TAL_PATH" || -z "$TA_PATH" ]]; then
usage >&2
exit 2
fi
TS="$(date -u +%Y%m%dT%H%M%SZ)"
if [[ -z "$OUT_DIR" ]]; then
OUT_DIR="target/replay/${RIR}_live_bundle_sequence_${TS}"
fi
SEQUENCE_BIN="$BIN_DIR/replay_bundle_capture_sequence"
if [[ "$NO_BUILD" -eq 0 ]]; then
echo "[1/1] build release binary"
cargo build --release --bin replay_bundle_capture_sequence
else
echo "[1/1] reuse existing binary from $BIN_DIR"
fi
if [[ ! -x "$SEQUENCE_BIN" ]]; then
echo "missing executable: $SEQUENCE_BIN" >&2
exit 1
fi
cmd=(
"$SEQUENCE_BIN"
--rir "$RIR"
--out-dir "$OUT_DIR"
--tal-path "$TAL_PATH"
--ta-path "$TA_PATH"
)
[[ -n "$BASE_VALIDATION_TIME" ]] && cmd+=(--base-validation-time "$BASE_VALIDATION_TIME")
[[ -n "$DELTA_COUNT" ]] && cmd+=(--delta-count "$DELTA_COUNT")
[[ -n "$DELTA_INTERVAL_SECS" ]] && cmd+=(--delta-interval-secs "$DELTA_INTERVAL_SECS")
[[ -n "$HTTP_TIMEOUT_SECS" ]] && cmd+=(--http-timeout-secs "$HTTP_TIMEOUT_SECS")
[[ -n "$RSYNC_TIMEOUT_SECS" ]] && cmd+=(--rsync-timeout-secs "$RSYNC_TIMEOUT_SECS")
[[ -n "$RSYNC_MIRROR_ROOT" ]] && cmd+=(--rsync-mirror-root "$RSYNC_MIRROR_ROOT")
[[ -n "$MAX_DEPTH" ]] && cmd+=(--max-depth "$MAX_DEPTH")
[[ -n "$MAX_INSTANCES" ]] && cmd+=(--max-instances "$MAX_INSTANCES")
[[ -n "$TRUST_ANCHOR" ]] && cmd+=(--trust-anchor "$TRUST_ANCHOR")
[[ "$KEEP_DB" -eq 1 ]] && cmd+=(--keep-db)
[[ "$CAPTURE_INPUTS_ONLY" -eq 1 ]] && cmd+=(--capture-inputs-only)
RPKI_PROGRESS_LOG="$PROGRESS_LOG" \
RPKI_PROGRESS_SLOW_SECS="$PROGRESS_SLOW_SECS" \
"${cmd[@]}"

View File

@ -0,0 +1,67 @@
# Replay Verify Scripts
## `run_multi_rir_ccr_replay_verify.sh`
用途:
- 通用 multi-RIR CCR replay verify 入口
- 通过 `--rir` 指定一个或多个 RIR按顺序执行
- 通过 `--mode` 指定 `snapshot``delta``both`
- 默认每个 RIR 的 RocksDB 目录在 compare/verify 结束后自动删除;传 `--keep-db` 才保留
- 同一次执行的所有产物都会先落到 `rpki/target/replay/<timestamp>/`
- 该时间戳目录下再按 RIR 分目录:
- `<rir>_ccr_replay_<timestamp>`
默认输入:
- bundle root: `/home/yuyr/dev/rust_playground/routinator/bench/multi_rir_demo/runs/20260316-112341-multi-final3`
- 每个 RIR 的 TAL / TA / validation time / record CSV 由 `scripts/payload_replay/multi_rir_case_info.py` 解析
用法:
- 单个 RIR
- `./scripts/replay_verify/run_multi_rir_ccr_replay_verify.sh --rir apnic --mode both`
- `./scripts/replay_verify/run_multi_rir_ccr_replay_verify.sh --rir apnic --mode snapshot`
- `./scripts/replay_verify/run_multi_rir_ccr_replay_verify.sh --rir apnic --mode delta`
- `./scripts/replay_verify/run_multi_rir_ccr_replay_verify.sh --rir apnic --mode both`
- `./scripts/replay_verify/run_multi_rir_ccr_replay_verify.sh --rir apnic,ripe --mode snapshot`
- `./scripts/replay_verify/run_multi_rir_ccr_replay_verify.sh --rir afrinic,apnic,arin,lacnic,ripe --mode both`
- `./scripts/replay_verify/run_multi_rir_ccr_replay_verify.sh --rir apnic --mode delta --keep-db`
可覆盖环境变量:
- `BUNDLE_ROOT`
- `OUT_ROOT`(默认:`rpki/target/replay`
- `RUN_TAG`
主要产物:
- 单次执行根目录:
- `rpki/target/replay/<timestamp>/`
- 每个 RIR 子目录下:
- `<rir>_snapshot.ccr`
- `<rir>_delta.ccr`
- `<rir>_*_report.json`
- `<rir>_*_ccr_vrps.csv`
- `<rir>_*_ccr_compare_summary.md`
- `<rir>_*_ccr_verify.json`
- 同次执行总汇总:
- `multi_rir_ccr_replay_verify_<timestamp>_summary.md`
- `multi_rir_ccr_replay_verify_<timestamp>_summary.json`
## `run_peer_bundle_matrix.sh`
用途:
- 对一组 `ours live bundle` 做本地 peer replay 矩阵验证
- Routinator 与 `rpki-client` 分别消费相同 bundle root
- 汇总 `VRP + VAP` 的 base / delta 结果
用法:
- `./scripts/replay_verify/run_peer_bundle_matrix.sh --bundle-root target/replay/live_bundle_matrix_<timestamp>`
- `./scripts/replay_verify/run_peer_bundle_matrix.sh --bundle-root target/replay/live_bundle_matrix_<timestamp> --rir apnic,ripe`
主要产物:
- 输出根目录:
- `target/replay/peer_bundle_matrix_<timestamp>/`
- Routinator
- `target/replay/peer_bundle_matrix_<timestamp>/routinator/<rir>/`
- `rpki-client`
- `target/replay/peer_bundle_matrix_<timestamp>/rpki-client/`
- 汇总:
- `summary.json`
- `summary.md`

View File

@ -0,0 +1,284 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
usage() {
cat <<'USAGE'
Usage:
run_multi_rir_ccr_replay_verify.sh --rir <rir[,rir...]> [--mode snapshot|delta|both] [--keep-db]
Options:
--rir <list> Comma-separated RIR list, e.g. apnic or apnic,ripe
--mode <mode> snapshot | delta | both (default: both)
--keep-db Keep per-run RocksDB directories (default: remove after verify)
--bundle-root <p> Override bundle root
--out-root <p> Override output root (default: rpki/target/replay)
--run-tag <tag> Override timestamp suffix for all RIR runs
USAGE
}
MODE="both"
KEEP_DB=0
RIR_LIST=""
BUNDLE_ROOT="${BUNDLE_ROOT:-/home/yuyr/dev/rust_playground/routinator/bench/multi_rir_demo/runs/20260316-112341-multi-final3}"
OUT_ROOT="${OUT_ROOT:-$ROOT_DIR/target/replay}"
RUN_TAG="${RUN_TAG:-$(date -u +%Y%m%dT%H%M%SZ)}"
while [[ $# -gt 0 ]]; do
case "$1" in
--rir)
shift
RIR_LIST="${1:-}"
;;
--mode)
shift
MODE="${1:-}"
;;
--keep-db)
KEEP_DB=1
;;
--bundle-root)
shift
BUNDLE_ROOT="${1:-}"
;;
--out-root)
shift
OUT_ROOT="${1:-}"
;;
--run-tag)
shift
RUN_TAG="${1:-}"
;;
-h|--help)
usage
exit 0
;;
*)
echo "unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
shift || true
done
if [[ -z "$RIR_LIST" ]]; then
echo "--rir is required" >&2
usage >&2
exit 2
fi
case "$MODE" in
snapshot|delta|both) ;;
*)
echo "invalid --mode: $MODE" >&2
usage >&2
exit 2
;;
esac
CASE_INFO_SCRIPT="$ROOT_DIR/scripts/payload_replay/multi_rir_case_info.py"
mkdir -p "$OUT_ROOT"
RUN_ROOT="$OUT_ROOT/$RUN_TAG"
mkdir -p "$RUN_ROOT"
cargo build --release --bin rpki --bin ccr_to_routinator_csv --bin ccr_verify >/dev/null
summary_md="$RUN_ROOT/multi_rir_ccr_replay_verify_${RUN_TAG}_summary.md"
summary_json="$RUN_ROOT/multi_rir_ccr_replay_verify_${RUN_TAG}_summary.json"
python3 - <<'PY' >/dev/null
PY
summary_json_tmp="$(mktemp)"
printf '[]' > "$summary_json_tmp"
run_one_mode() {
local rir="$1"
local mode="$2"
local run_dir="$3"
local trust_anchor="$4"
local tal_path="$5"
local ta_path="$6"
local base_archive="$7"
local base_locks="$8"
local base_csv="$9"
local delta_archive="${10}"
local delta_locks="${11}"
local delta_csv="${12}"
local snapshot_validation_time="${13}"
local delta_validation_time="${14}"
local db_dir="$run_dir/${rir}_${mode}_db"
local report_json="$run_dir/${rir}_${mode}_report.json"
local run_log="$run_dir/${rir}_${mode}_run.log"
local ccr_path="$run_dir/${rir}_${mode}.ccr"
local csv_path="$run_dir/${rir}_${mode}_ccr_vrps.csv"
local compare_md="$run_dir/${rir}_${mode}_ccr_compare_summary.md"
local only_ours="$run_dir/${rir}_${mode}_ccr_only_in_ours.csv"
local only_record="$run_dir/${rir}_${mode}_ccr_only_in_record.csv"
local verify_json="$run_dir/${rir}_${mode}_ccr_verify.json"
local meta_json="$run_dir/${rir}_${mode}_meta.json"
rm -rf "$db_dir"
local -a cmd=(target/release/rpki --db "$db_dir" --tal-path "$tal_path" --ta-path "$ta_path")
if [[ "$mode" == "snapshot" ]]; then
cmd+=(--payload-replay-archive "$base_archive" --payload-replay-locks "$base_locks" --validation-time "$snapshot_validation_time")
else
cmd+=(
--payload-base-archive "$base_archive"
--payload-base-locks "$base_locks"
--payload-base-validation-time "$snapshot_validation_time"
--payload-delta-archive "$delta_archive"
--payload-delta-locks "$delta_locks"
--validation-time "$delta_validation_time"
)
fi
cmd+=(--report-json "$report_json" --ccr-out "$ccr_path")
local start_s end_s duration_s
start_s="$(date +%s)"
(
echo "# ${rir} ${mode} command:"
printf '%q ' "${cmd[@]}"
echo
echo
"${cmd[@]}"
) 2>&1 | tee "$run_log" >/dev/null
end_s="$(date +%s)"
duration_s="$((end_s - start_s))"
target/release/ccr_to_routinator_csv \
--ccr "$ccr_path" \
--out "$csv_path" \
--trust-anchor "$trust_anchor" >/dev/null
local record_csv
if [[ "$mode" == "snapshot" ]]; then
record_csv="$base_csv"
else
record_csv="$delta_csv"
fi
./scripts/payload_replay/compare_with_routinator_record.sh \
"$csv_path" \
"$record_csv" \
"$compare_md" \
"$only_ours" \
"$only_record" >/dev/null
target/release/ccr_verify \
--ccr "$ccr_path" \
--db "$db_dir" > "$verify_json"
python3 - "$report_json" "$meta_json" "$mode" "$duration_s" <<'PY'
import json, sys
from pathlib import Path
report = json.loads(Path(sys.argv[1]).read_text(encoding='utf-8'))
meta = {
'mode': sys.argv[3],
'duration_seconds': int(sys.argv[4]),
'validation_time': report.get('validation_time_rfc3339_utc'),
'publication_points_processed': report['tree']['instances_processed'],
'publication_points_failed': report['tree']['instances_failed'],
'vrps': len(report['vrps']),
'aspas': len(report['aspas']),
}
Path(sys.argv[2]).write_text(json.dumps(meta, ensure_ascii=False, indent=2)+'\n', encoding='utf-8')
PY
if [[ "$KEEP_DB" -eq 0 ]]; then
rm -rf "$db_dir"
fi
}
IFS=',' read -r -a RIRS <<< "$RIR_LIST"
for rir in "${RIRS[@]}"; do
rir="$(echo "$rir" | xargs)"
[[ -n "$rir" ]] || continue
eval "$(python3 "$CASE_INFO_SCRIPT" --bundle-root "$BUNDLE_ROOT" --rir "$rir" --format env)"
run_dir="$RUN_ROOT/${rir}_ccr_replay_${RUN_TAG}"
mkdir -p "$run_dir"
if [[ "$MODE" == "snapshot" || "$MODE" == "both" ]]; then
run_one_mode \
"$rir" snapshot "$run_dir" "$TRUST_ANCHOR" "$TAL_PATH" "$TA_PATH" \
"$PAYLOAD_REPLAY_ARCHIVE" "$PAYLOAD_REPLAY_LOCKS" "$ROUTINATOR_BASE_RECORD_CSV" \
"$PAYLOAD_DELTA_ARCHIVE" "$PAYLOAD_DELTA_LOCKS" "$ROUTINATOR_DELTA_RECORD_CSV" \
"$SNAPSHOT_VALIDATION_TIME" "$DELTA_VALIDATION_TIME"
fi
if [[ "$MODE" == "delta" || "$MODE" == "both" ]]; then
run_one_mode \
"$rir" delta "$run_dir" "$TRUST_ANCHOR" "$TAL_PATH" "$TA_PATH" \
"$PAYLOAD_REPLAY_ARCHIVE" "$PAYLOAD_REPLAY_LOCKS" "$ROUTINATOR_BASE_RECORD_CSV" \
"$PAYLOAD_DELTA_ARCHIVE" "$PAYLOAD_DELTA_LOCKS" "$ROUTINATOR_DELTA_RECORD_CSV" \
"$SNAPSHOT_VALIDATION_TIME" "$DELTA_VALIDATION_TIME"
fi
python3 - "$summary_json_tmp" "$run_dir" "$rir" "$MODE" <<'PY'
import json, sys
from pathlib import Path
summary_path = Path(sys.argv[1])
run_dir = Path(sys.argv[2])
rir = sys.argv[3]
mode = sys.argv[4]
rows = json.loads(summary_path.read_text(encoding='utf-8'))
for submode in ['snapshot','delta']:
if mode not in ('both', submode):
continue
compare = run_dir / f'{rir}_{submode}_ccr_compare_summary.md'
meta = run_dir / f'{rir}_{submode}_meta.json'
verify = run_dir / f'{rir}_{submode}_ccr_verify.json'
if not compare.exists() or not meta.exists() or not verify.exists():
continue
compare_text = compare.read_text(encoding='utf-8')
meta_obj = json.loads(meta.read_text(encoding='utf-8'))
verify_obj = json.loads(verify.read_text(encoding='utf-8'))
def metric(name):
prefix = f'| {name} | '
for line in compare_text.splitlines():
if line.startswith(prefix):
return int(line.split('|')[2].strip())
raise SystemExit(f'missing metric {name} in {compare}')
rows.append({
'rir': rir,
'mode': submode,
'run_dir': str(run_dir),
'duration_seconds': meta_obj['duration_seconds'],
'vrps': meta_obj['vrps'],
'aspas': meta_obj['aspas'],
'only_in_ours': metric('only_in_ours'),
'only_in_record': metric('only_in_record'),
'intersection': metric('intersection'),
'state_hashes_ok': verify_obj.get('state_hashes_ok'),
})
summary_path.write_text(json.dumps(rows, ensure_ascii=False, indent=2)+'\n', encoding='utf-8')
PY
done
python3 - "$summary_json_tmp" "$summary_json" "$summary_md" "$RUN_TAG" <<'PY'
import json, sys
from pathlib import Path
rows = json.loads(Path(sys.argv[1]).read_text(encoding='utf-8'))
out_json = Path(sys.argv[2])
out_md = Path(sys.argv[3])
run_tag = sys.argv[4]
out_json.write_text(json.dumps(rows, ensure_ascii=False, indent=2)+'\n', encoding='utf-8')
parts = []
parts.append('# Multi-RIR CCR Replay Verify Summary\n\n')
parts.append(f'- run_tag: `{run_tag}`\n\n')
parts.append('| rir | mode | duration_s | vrps | aspas | only_in_ours | only_in_record | state_hashes_ok |\n')
parts.append('|---|---|---:|---:|---:|---:|---:|---|\n')
for row in rows:
parts.append(f"| {row['rir']} | {row['mode']} | {row['duration_seconds']} | {row['vrps']} | {row['aspas']} | {row['only_in_ours']} | {row['only_in_record']} | {row['state_hashes_ok']} |\n")
out_md.write_text(''.join(parts), encoding='utf-8')
PY
rm -f "$summary_json_tmp"
echo "== multi-rir replay verify complete ==" >&2
echo "- summary: $summary_md" >&2
echo "- summary json: $summary_json" >&2

View File

@ -0,0 +1,210 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT_DIR"
BUNDLE_ROOT=""
RIRS=""
OUT_ROOT=""
ROUTINATOR_ROOT="/home/yuyr/dev/rust_playground/routinator"
RPKI_CLIENT_ROOT="/home/yuyr/dev/rpki-client-9.7"
RPKI_CLIENT_BUILD_DIR="/home/yuyr/dev/rpki-client-9.7/build-m5"
KEEP_DB=0
usage() {
cat <<'EOF'
Usage:
./scripts/replay_verify/run_peer_bundle_matrix.sh \
--bundle-root <dir> \
[--rir <afrinic,apnic,...>] \
[--out-root <dir>] \
[--routinator-root <dir>] \
[--rpki-client-root <dir>] \
[--rpki-client-build-dir <dir>] \
[--keep-db]
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--bundle-root) BUNDLE_ROOT="${2:?}"; shift 2 ;;
--rir) RIRS="${2:?}"; shift 2 ;;
--out-root) OUT_ROOT="${2:?}"; shift 2 ;;
--routinator-root) ROUTINATOR_ROOT="${2:?}"; shift 2 ;;
--rpki-client-root) RPKI_CLIENT_ROOT="${2:?}"; shift 2 ;;
--rpki-client-build-dir) RPKI_CLIENT_BUILD_DIR="${2:?}"; shift 2 ;;
--keep-db) KEEP_DB=1; shift ;;
--help|-h) usage; exit 0 ;;
*) echo "unknown argument: $1" >&2; usage >&2; exit 2 ;;
esac
done
if [[ -z "$BUNDLE_ROOT" ]]; then
usage >&2
exit 2
fi
BUNDLE_ROOT="$(python3 - "$BUNDLE_ROOT" <<'PY'
from pathlib import Path
import sys
print(Path(sys.argv[1]).resolve())
PY
)"
RUN_TAG="$(date -u +%Y%m%dT%H%M%SZ)"
if [[ -z "$OUT_ROOT" ]]; then
OUT_ROOT="target/replay/peer_bundle_matrix_${RUN_TAG}"
fi
mkdir -p "$OUT_ROOT"
OUT_ROOT="$(python3 - "$OUT_ROOT" <<'PY'
from pathlib import Path
import sys
print(Path(sys.argv[1]).resolve())
PY
)"
discover_rirs() {
python3 - "$BUNDLE_ROOT" <<'PY'
from pathlib import Path
import sys
root = Path(sys.argv[1])
if (root / "base-locks.json").exists():
print(root.name)
raise SystemExit
rirs = []
for entry in sorted(root.iterdir()):
if not entry.is_dir():
continue
if (entry / "base-locks.json").exists():
rirs.append(entry.name)
continue
nested = sorted(
child.name for child in entry.iterdir()
if child.is_dir() and (child / "base-locks.json").exists()
)
if len(nested) == 1:
rirs.append(nested[0])
print(",".join(rirs))
PY
}
if [[ -z "$RIRS" ]]; then
RIRS="$(discover_rirs)"
fi
ROUTI_OUT="$OUT_ROOT/routinator"
CLIENT_OUT="$OUT_ROOT/rpki-client"
NORMALIZED_BUNDLE_ROOT="$OUT_ROOT/.normalized-bundle-root"
mkdir -p "$ROUTI_OUT" "$CLIENT_OUT"
rm -rf "$NORMALIZED_BUNDLE_ROOT"
mkdir -p "$NORMALIZED_BUNDLE_ROOT"
IFS=',' read -r -a RIR_LIST <<< "$RIRS"
for raw_rir in "${RIR_LIST[@]}"; do
rir="$(printf '%s' "$raw_rir" | tr '[:upper:]' '[:lower:]' | xargs)"
[[ -n "$rir" ]] || continue
source_bundle_dir=""
if [[ -d "$BUNDLE_ROOT/$rir" && -f "$BUNDLE_ROOT/$rir/base-locks.json" ]]; then
source_bundle_dir="$BUNDLE_ROOT/$rir"
else
match="$(find "$BUNDLE_ROOT" -maxdepth 2 -type d -path "*/${rir}" -exec test -f '{}/base-locks.json' ';' -print | head -n 1)"
if [[ -z "$match" ]]; then
echo "unable to resolve bundle directory for RIR: $rir" >&2
exit 1
fi
source_bundle_dir="$match"
fi
ln -sfn "$source_bundle_dir" "$NORMALIZED_BUNDLE_ROOT/$rir"
ROUTI_CMD=(
"$ROUTINATOR_ROOT/bench/multi_rir_demo_ours/run_single_rir_ours_bundle.sh"
"$source_bundle_dir"
"$ROUTI_OUT/$rir"
)
[[ "$KEEP_DB" -eq 1 ]] && ROUTI_CMD=( "$ROUTINATOR_ROOT/bench/multi_rir_demo_ours/run_single_rir_ours_bundle.sh" --keep-db "$source_bundle_dir" "$ROUTI_OUT/$rir" )
"${ROUTI_CMD[@]}"
done
CLIENT_ARGS=(
python3 "$RPKI_CLIENT_ROOT/tools/run_bundle_matrix.py"
--bundle-dir "$NORMALIZED_BUNDLE_ROOT"
--build-dir "$RPKI_CLIENT_BUILD_DIR"
--work-dir "$CLIENT_OUT"
)
[[ "$KEEP_DB" -eq 1 ]] && CLIENT_ARGS+=(--keep-db)
for raw_rir in "${RIR_LIST[@]}"; do
rir="$(printf '%s' "$raw_rir" | tr '[:upper:]' '[:lower:]' | xargs)"
[[ -n "$rir" ]] || continue
CLIENT_ARGS+=(--rir "$rir")
done
"${CLIENT_ARGS[@]}"
SUMMARY_JSON="$OUT_ROOT/summary.json"
SUMMARY_MD="$OUT_ROOT/summary.md"
python3 - "$ROUTI_OUT" "$CLIENT_OUT/matrix-summary.json" "$SUMMARY_JSON" <<'PY'
import json
from pathlib import Path
import sys
routi_root = Path(sys.argv[1])
client_summary = json.loads(Path(sys.argv[2]).read_text())
summary_path = Path(sys.argv[3])
summary = {"routinator": {}, "rpki_client": client_summary}
for verification in sorted(routi_root.glob("*/verification.json")):
rir = verification.parent.name
summary["routinator"][rir] = json.loads(verification.read_text())
summary_path.write_text(json.dumps(summary, indent=2))
PY
python3 - "$SUMMARY_JSON" "$SUMMARY_MD" <<'PY'
import json
from pathlib import Path
import sys
summary = json.loads(Path(sys.argv[1]).read_text())
out = Path(sys.argv[2])
lines = [
"# Peer Bundle Matrix Summary",
"",
"## Routinator",
"",
"| rir | base_vrp | base_vap | sequence_vrp | sequence_vap |",
"|---|---|---|---|---|",
]
for rir, data in sorted(summary["routinator"].items()):
if "steps" in data:
lines.append(
f"| {rir} | {str(data.get('baseMatch')).lower()} | {str(data.get('baseVapsMatch')).lower()} | "
f"{str(data.get('summary', {}).get('allStepsMatch')).lower()} | "
f"{str(data.get('summary', {}).get('allStepsVapsMatch')).lower()} |"
)
else:
lines.append(
f"| {rir} | {str(data.get('baseMatch')).lower()} | {str(data.get('baseVapsMatch')).lower()} | "
f"{str(data.get('deltaMatch')).lower()} | {str(data.get('deltaVapsMatch')).lower()} |"
)
lines += [
"",
"## rpki-client",
"",
"| rir | base_vrp | base_vap | sequence_vrp | sequence_vap |",
"|---|---|---|---|---|",
]
for rir, phases in sorted(summary["rpki_client"].items()):
base = phases.get("base", {})
step_items = [
value for key, value in phases.items()
if key not in ("base", "delta") and isinstance(value, dict)
]
if "delta" in phases:
step_items.append(phases["delta"])
all_step_match = all(item.get("match") for item in step_items) if step_items else None
all_step_vap_match = all(item.get("vaps_match") for item in step_items) if step_items else None
lines.append(
f"| {rir} | {str(base.get('match')).lower()} | {str(base.get('vaps_match')).lower()} | "
f"{str(all_step_match).lower()} | {str(all_step_vap_match).lower()} |"
)
out.write_text("\n".join(lines) + "\n")
PY
echo "$OUT_ROOT"

123
scripts/stage2_perf_compare_m4.sh Executable file
View File

@ -0,0 +1,123 @@
#!/usr/bin/env bash
set -euo pipefail
# M4: Compare decode+profile (OURS) vs routinator baseline (rpki crate 0.19.1)
# on selected_der_v2 fixtures (cer/crl/manifest/roa/aspa).
#
# Outputs under:
# - rpki/target/bench/stage2_selected_der_v2_routinator_decode_release.{csv,md}
# - rpki/target/bench/stage2_selected_der_v2_compare_ours_vs_routinator_decode_release.{csv,md}
#
# Notes:
# - OURS decode benchmark is produced by:
# `cargo test --release --test bench_stage2_decode_profile_selected_der_v2 -- --ignored --nocapture`
# and writes `stage2_selected_der_v2_decode_release.csv` when BENCH_OUT_CSV is set.
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
RPKI_DIR="$ROOT_DIR"
OUT_DIR="$RPKI_DIR/target/bench"
mkdir -p "$OUT_DIR"
OURS_CSV="${OURS_CSV:-$OUT_DIR/stage2_selected_der_v2_decode_release.csv}"
ROUT_CSV="${ROUT_CSV:-$OUT_DIR/stage2_selected_der_v2_routinator_decode_release.csv}"
ROUT_MD="${ROUT_MD:-$OUT_DIR/stage2_selected_der_v2_routinator_decode_release.md}"
COMPARE_CSV="${COMPARE_CSV:-$OUT_DIR/stage2_selected_der_v2_compare_ours_vs_routinator_decode_release.csv}"
COMPARE_MD="${COMPARE_MD:-$OUT_DIR/stage2_selected_der_v2_compare_ours_vs_routinator_decode_release.md}"
WARMUP_ITERS="${WARMUP_ITERS:-10}"
ROUNDS="${ROUNDS:-3}"
MIN_ROUND_MS="${MIN_ROUND_MS:-200}"
if [[ ! -f "$OURS_CSV" ]]; then
echo "ERROR: missing OURS CSV: $OURS_CSV" >&2
echo "Hint: run:" >&2
echo " cd rpki && BENCH_WARMUP_ITERS=$WARMUP_ITERS BENCH_ROUNDS=$ROUNDS BENCH_MIN_ROUND_MS=$MIN_ROUND_MS \\" >&2
echo " BENCH_OUT_CSV=target/bench/stage2_selected_der_v2_decode_release.csv \\" >&2
echo " cargo test --release --test bench_stage2_decode_profile_selected_der_v2 -- --ignored --nocapture" >&2
exit 2
fi
echo "[1/2] Run routinator baseline bench (release)..." >&2
(cd "$RPKI_DIR/benchmark/routinator_object_bench" && cargo run --release -q -- \
--dir "$RPKI_DIR/tests/benchmark/selected_der_v2" \
--warmup-iters "$WARMUP_ITERS" \
--rounds "$ROUNDS" \
--min-round-ms "$MIN_ROUND_MS" \
--out-csv "$ROUT_CSV" \
--out-md "$ROUT_MD")
echo "[2/2] Join CSVs + compute ratios..." >&2
python3 - "$OURS_CSV" "$ROUT_CSV" "$COMPARE_CSV" "$COMPARE_MD" <<'PY'
import csv
import sys
from pathlib import Path
ours_path = Path(sys.argv[1])
rout_path = Path(sys.argv[2])
out_csv_path = Path(sys.argv[3])
out_md_path = Path(sys.argv[4])
def read_csv(path: Path):
with path.open(newline="") as f:
return list(csv.DictReader(f))
ours_rows = read_csv(ours_path)
rout_rows = read_csv(rout_path)
rout_by_key = {(r["type"], r["sample"]): r for r in rout_rows}
out_rows = []
for r in ours_rows:
key = (r["type"], r["sample"])
rr = rout_by_key.get(key)
if rr is None:
raise SystemExit(f"missing routinator row for {key}")
ours_ns = float(r["avg_ns_per_op"])
rout_ns = float(rr["avg_ns_per_op"])
ratio = (ours_ns / rout_ns) if rout_ns != 0.0 else float("inf")
out_rows.append({
"type": r["type"],
"sample": r["sample"],
"size_bytes": r["size_bytes"],
"complexity": r["complexity"],
"ours_avg_ns_per_op": f"{ours_ns:.2f}",
"ours_ops_per_sec": f"{float(r['ops_per_sec']):.2f}",
"rout_avg_ns_per_op": f"{rout_ns:.2f}",
"rout_ops_per_sec": f"{float(rr['ops_per_sec']):.2f}",
"ratio_ours_over_rout": f"{ratio:.4f}",
})
out_rows.sort(key=lambda x: (x["type"], x["sample"]))
out_csv_path.parent.mkdir(parents=True, exist_ok=True)
with out_csv_path.open("w", newline="") as f:
w = csv.DictWriter(f, fieldnames=list(out_rows[0].keys()))
w.writeheader()
w.writerows(out_rows)
lines = []
lines.append("# Stage2 ours vs routinator (decode+profile, selected_der_v2)\n")
lines.append(f"- ours_csv: `{ours_path}`\n")
lines.append(f"- rout_csv: `{rout_path}`\n")
lines.append("\n")
lines.append("| type | sample | size_bytes | complexity | ours ns/op | rout ns/op | ratio |\n")
lines.append("|---|---|---:|---:|---:|---:|---:|\n")
for r in out_rows:
lines.append(
f"| {r['type']} | {r['sample']} | {r['size_bytes']} | {r['complexity']} | "
f"{r['ours_avg_ns_per_op']} | {r['rout_avg_ns_per_op']} | {r['ratio_ours_over_rout']} |\n"
)
out_md_path.parent.mkdir(parents=True, exist_ok=True)
out_md_path.write_text("".join(lines), encoding="utf-8")
PY
echo "Done." >&2
echo "- routinator CSV: $ROUT_CSV" >&2
echo "- compare CSV: $COMPARE_CSV" >&2
echo "- compare MD: $COMPARE_MD" >&2

View File

@ -182,6 +182,8 @@ RFC 引用RFC 5280 §4.2.2.1RFC 5280 §4.2.2.2RFC 5280 §4.2.1.6。
| `1.2.840.113549.1.9.5` | CMS signedAttrs: signing-time | RFC 9589 §4更新 RFC 6488 §3(1f)/(1g) |
| `1.2.840.113549.1.9.16.1.24` | ROA eContentType: id-ct-routeOriginAuthz | RFC 9582 §3 |
| `1.2.840.113549.1.9.16.1.26` | Manifest eContentType: id-ct-rpkiManifest | RFC 9286 §4.1 |
| `1.2.840.113549.1.9.16.1.35` | Ghostbusters eContentType: id-ct-rpkiGhostbusters | RFC 6493 §6RFC 6493 §9.1 |
| `1.2.840.113549.1.9.16.1.49` | ASPA eContentType: id-ct-ASPA | `draft-ietf-sidrops-aspa-profile-21` §2 |
| `1.3.6.1.5.5.7.1.1` | X.509 v3 扩展authorityInfoAccess | RFC 5280 §4.2.2.1 |
| `1.3.6.1.5.5.7.1.11` | X.509 v3 扩展subjectInfoAccess | RFC 5280 §4.2.2.2RPKI 约束见 RFC 6487 §4.8.8 |
| `1.3.6.1.5.5.7.48.2` | AIA accessMethod: id-ad-caIssuers | RFC 5280 §4.2.2.1 |

View File

@ -1,36 +1,94 @@
# 01. Trust Anchor Locator (TAL)
# 01. TALTrust Anchor Locator
## 1.1 对象定位
TAL是一个数据格式/配置文件目的是告诉RP信任锚的公钥是什么以及相关对象可以从哪里获取。
## 1.2 数据格式 RFC 8630 §2.2
TAL是一个配置文件格式定义如下
```
The TAL is an ordered sequence of:
1. an optional comment section consisting of one or more lines each starting with the "#" character, followed by human-readable informational UTF-8 text, conforming to the restrictions defined
in Section 2 of [RFC5198], and ending with a line break,
2. a URI section that is comprised of one or more ordered lines, each containing a TA URI, and ending with a line break,
3. a line break, and
4. a subjectPublicKeyInfo [RFC5280] in DER format [X.509], encoded in base64 (see Section 4 of [RFC4648]). To avoid long lines,
line breaks MAY be inserted into the base64-encoded string.
Note that line breaks in this file can use either "<CRLF>" or "<LF>".
TALTrust Anchor Locator用于向 RP 提供:
1) 可检索“当前 TA 证书”的一个或多个 URI以及
2) 该 TA 证书的 `subjectPublicKeyInfo`SPKI期望值用于绑定/防替换)。
RFC 8630 §2RFC 8630 §2.3。
## 1.2 原始载体与编码
- 载体文本文件ASCII/UTF-8 兼容的行文本)。
- 行结束:允许 `CRLF``LF`。RFC 8630 §2.2。
- 结构:`[可选注释区] + URI 区 + 空行 + Base64(SPKI DER)`。RFC 8630 §2.2。
### 1.2.1 注释区
- 一行或多行,以 `#` 开头,后随人类可读 UTF-8 文本。RFC 8630 §2.2。
- 注释行文本需符合 RFC 5198 §2 的限制RFC 8630 §2.2 引用)。
### 1.2.2 URI 区
- 一行或多行,每行一个 TA URI按序排列。RFC 8630 §2.2。
- TA URI **MUST**`rsync``https`。RFC 8630 §2.2。
### 1.2.3 空行分隔
- URI 区后必须有一个额外的换行(即空行),用于与 Base64 区分隔。RFC 8630 §2.2(第 3 点)。
### 1.2.4 SPKIBase64
- `subjectPublicKeyInfo` 以 DER 编码ASN.1)后,再 Base64 编码表示。RFC 8630 §2.2(第 4 点)。
- 为避免长行Base64 字符串中 **MAY** 插入换行。RFC 8630 §2.2。
- SPKI ASN.1 类型来自 X.509 / RFC 5280。RFC 8630 §2.2(第 4 点RFC 5280 §4.1.2.7。
#### 1.2.4.1 `SubjectPublicKeyInfo` 的 ASN.1 定义RFC 5280 §4.1
TAL 中携带的是一个 X.509 `SubjectPublicKeyInfo` 的 DER 字节串(再 Base64。其 ASN.1 定义如下RFC 5280 §4.1。
```asn1
SubjectPublicKeyInfo ::= SEQUENCE {
algorithm AlgorithmIdentifier,
subjectPublicKey BIT STRING }
```
## 1.3 抽象数据模型
其中 `algorithm`/`subjectPublicKey` 的取值受 RPKI 算法 profile 约束(例如 RSA 2048 + SHA-256 等SKI/AKI 计算仍用 SHA-1。RFC 5280 §4.1.2.7RFC 7935 §2-§3.1RFC 6487 §4.8.2-§4.8.3。
### 1.3.1 TAL
## 1.3 解析规则(语义层)
输入:`TalFileBytes: bytes`
解析步骤:
1) 按 `LF` / `CRLF` 识别行。RFC 8630 §2.2。
2) 从文件开头读取所有以 `#` 开头的行,作为 `comments`(保留去掉 `#` 后的 UTF-8 文本或保留原始行均可,但需保持 UTF-8。RFC 8630 §2.2。
3) 继续读取一行或多行非空行,作为 `ta_uris`保持顺序。RFC 8630 §2.2(第 2 点)。
4) 读取一个空行必须存在。RFC 8630 §2.2(第 3 点)。
5) 将剩余行拼接为 Base64 文本移除行分隔Base64 解码得到 `subject_public_key_info_der`。RFC 8630 §2.2(第 4 点)。
6) 可选:将 `subject_public_key_info_der` 解析为 X.509 `SubjectPublicKeyInfo` 结构(用于与 TA 证书比对。RFC 8630 §2.3RFC 5280 §4.1.2.7。
URI 解析与约束:
- `ta_uris[*]` 的 scheme **MUST**`rsync``https`。RFC 8630 §2.2。
- 每个 `ta_uri` **MUST** 指向“单个对象”,且 **MUST NOT** 指向目录或集合。RFC 8630 §2.3。
## 1.4 抽象数据模型(接口)
### 1.4.1 `Tal`
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|----------|-------------|-------------------------|--------------------------------------------|---------------|
| uris | Vec<TalUri> | 指向TA的URI列表 | 允许rsync和https协议。 | RFC 8630 §2.1 |
| comment | Vec<String> | 注释(可选) | | RFC 8630 §2.2 |
| spki_der | Vec<u8> | 原始的subjectPublicKeyInfo | x.509 SubjectPublicKeyInfo DER编码再base64编码 | RFC 8630 §2.2 |
|---|---|---|---|---|
| `raw` | `bytes` | TAL 原始文件字节 | 原样保留(可选但建议) | RFC 8630 §2.2 |
| `comments` | `list[Utf8Text]` | 注释行(按出现顺序) | 每行以 `#` 开头;文本为 UTF-8内容限制见 RFC 5198 §2 | RFC 8630 §2.2 |
| `ta_uris` | `list[Uri]` | TA 证书位置列表 | 至少 1 个;按序;每个 scheme 必须是 `rsync``https` | RFC 8630 §2.2 |
| `subject_public_key_info_der` | `DerBytes` | TA 证书 SPKI 的期望 DER | Base64 解码所得 DERBase64 中可有换行 | RFC 8630 §2.2 |
### 1.4.2 `TaUri`(可选细化)
### 1.3.2 TalUri
> 若你的实现希望对 URI 做更强类型化,可在 `Tal.ta_uris` 上进一步拆分为 `TaUri` 结构。
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|-------|--------|---------|---------|---------------|
| Rsync | String | rsync地址 | | RFC 8630 §2.1 |
| Https | String | https地址 | | RFC 8630 §2.1 |
|---|---|---|---|---|
| `uri` | `Uri` | 完整 URI 文本 | scheme 为 `rsync``https` | RFC 8630 §2.2 |
| `scheme` | `enum` | `rsync` / `https` | 从 `uri` 解析 | RFC 8630 §2.2 |
## 1.5 字段级约束清单(实现对照)
- TAL 由(可选)注释区 + URI 区 + 空行 + Base64(SPKI DER) 组成。RFC 8630 §2.2。
- URI 区至少 1 行,每行一个 TA URI顺序有意义。RFC 8630 §2.2。
- TA URI 仅允许 `rsync``https`。RFC 8630 §2.2。
- Base64 区允许插入换行。RFC 8630 §2.2。
- 每个 TA URI 必须引用单个对象,不能指向目录/集合。RFC 8630 §2.3。

View File

@ -1,121 +0,0 @@
# 02. Trust Anchor (TA)
## 2.1 对象定位
TA是一个自签名的CA证书。
## 2.2 原始载体与编码
- 载体X.509 certificates.
- 编码DER遵循 RFC 5280 的 certificate 结构与字段语义但受限于RFC 8630 §2.3
## 2.3 抽象数据类型
### 2.3.1 TA
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|-------------------|------------------|---------------|---------|---------------|
| name | String | 标识该TA如apnic等 | | |
| cert_der | Vec<u8> | 原始DER内容 | | |
| cert | X509Certificate | 基础X509证书 | | RFC 5280 §4.1 |
| resource | ResourceSet | 资源集合 | | |
| publication_point | Uri | 获取该TA的URI | | |
### 2.3.2 ResourceSet
资源集合是来自RFC 3779的IP地址块§2和AS号段§3)受约束于RFC 8630 §2.3
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|------|----------------|--------|-------------|---------------------------|
| ips | IpResourceSet | IP地址集合 | 不能是inherit | RFC 3779 §2和RFC 8630 §2.3 |
| asns | AsnResourceSet | ASN集合 | 不能是inherit | RFC 3779 §3和RFC 8630 §2.3 |
[//]: # ()
[//]: # (### 2.3.3 IpResourceSet)
[//]: # (包括IPv4和IPv6的前缀表示)
[//]: # ()
[//]: # (| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |)
[//]: # (|----|------------------------|----------|-------------|--------------|)
[//]: # (| v4 | PrefixSet<Ipv4Prefix> | IPv4前缀集合 | | RFC 3779 §2 |)
[//]: # (| v6 | PrefixSet<Ipv6Prefix> | IPv6前缀集合 | | RFC 3779 §2 |)
[//]: # ()
[//]: # (### 2.3.4 AsnResourceSet)
[//]: # ()
[//]: # (| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |)
[//]: # (|-------|--------------------|-------|-------------|-------------|)
[//]: # (| range | RangeSet<AsnBlock> | ASN集合 | | RFC 3779 §3 |)
[//]: # ()
[//]: # (### 2.3.5 Ipv4Prefix)
[//]: # ()
[//]: # (| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |)
[//]: # (|------|-----|-----|---------|-------------|)
[//]: # (| addr | u32 | 地址 | | RFC 3779 §2 |)
[//]: # (| len | u8 | 长度 | 0-32 | RFC 3779 §2 |)
[//]: # ()
[//]: # ()
[//]: # (### 2.3.6 Ipv6Prefix)
[//]: # ()
[//]: # (| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |)
[//]: # (|------|------|-----|---------|-------------|)
[//]: # (| addr | u128 | 地址 | | RFC 3779 §2 |)
[//]: # (| len | u8 | 长度 | 0-128 | RFC 3779 §2 |)
[//]: # ()
[//]: # (### 2.3.7 AsnBlock)
[//]: # ()
[//]: # (| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |)
[//]: # (|----------|----------|-------|---------|--------------|)
[//]: # (| asn | Asn | ASN | | RFC 3779 §3 |)
[//]: # (| asnRange | AsnRange | ASN范围 | | RFC 3779 §3 |)
[//]: # ()
[//]: # ()
[//]: # (### 2.3.8 Asn)
[//]: # ()
[//]: # (| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |)
[//]: # (|-----|-----|-----|---------|-------------|)
[//]: # (| asn | u32 | ASN | | RFC 3779 §3 |)
[//]: # ()
[//]: # (### 2.3.8 AsnRange)
[//]: # ()
[//]: # (| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |)
[//]: # (|-----|-----|-------|---------|--------------|)
[//]: # (| min | Asn | 最小ASN | | RFC 3779 §3 |)
[//]: # (| max | Asn | 最大ASN | | RFC 3779 §3 |)
# 2.4 TA校验流程RFC 8630 §3
1. 从TAL的URI列表中获取证书对象。顺序访问若前面失效再访问后面的
2. 验证证书格式必须是当前、有效的自签名RPKI证书。
3. 验证公钥匹配。TAL中的SubjectPublicKeyInfo与下载证书的公钥一致。
4. 其他检查。
5. 更新本地存储库缓存。

View File

@ -0,0 +1,88 @@
# 02. TATrust Anchor自签名证书
## 2.1 对象定位
在 RP 侧“信任锚Trust Anchor, TA”以一个**自签名 CA 资源证书**体现,其可获取位置与期望公钥由 TAL 提供。RFC 8630 §2.3。
本文件描述两个紧密相关的数据对象:
1) `TaCertificate`TA 自签名资源证书本体X.509 DER
2) `TrustAnchor`:语义组合对象(`TAL` + `TaCertificate` 的绑定语义)
## 2.2 原始载体与编码
- 载体X.509 证书(通常以 `.cer` 存放于仓库,但文件扩展名不作为语义依据)。
- 编码DER。TA 证书必须符合 RPKI 资源证书 profile。RFC 8630 §2.3RFC 6487 §4。
### 2.2.1 X.509 Certificate 的 ASN.1 定义RFC 5280 §4.1TA 与 RC 共享)
TA 证书与普通资源证书RC在编码层面都是 X.509 `Certificate`DER。其 ASN.1 定义如下RFC 5280 §4.1。
```asn1
Certificate ::= SEQUENCE {
tbsCertificate TBSCertificate,
signatureAlgorithm AlgorithmIdentifier,
signatureValue BIT STRING }
```
其中 `tbsCertificate.extensions`v3 扩展)是 RPKI 语义的主要承载处IP/AS 资源扩展、SIA/AIA/CRLDP 等。RFC 5280 §4.1RPKI 对字段/扩展存在性与关键性约束见 RFC 6487 §4。
> 说明:更完整的 RC 编码层结构(包括 Extension 外层“extnValue 二次 DER 解码”的套娃方式)在 `03_resource_certificate_rc.md``00_common_types.md` 中给出。
## 2.3 TA 证书的 RPKI 语义约束(在 RC profile 基础上额外强调)
### 2.3.1 自签名与 profile
- TA URI 指向的对象 **MUST** 是一个**自签名 CA 证书**,并且 **MUST** 符合 RPKI 证书 profile。RFC 8630 §2.3RFC 6487 §4。
- 自签名证书在 RC profile 下的通用差异(例如 CRLDP/AIA 的省略规则、AKI 的规则)见 RFC 6487。RFC 6487 §4.8.3RFC 6487 §4.8.6RFC 6487 §4.8.7。
### 2.3.2 INRIP/AS 资源扩展)在 TA 上的额外约束
- TA 的 INR 扩展IP/AS 资源扩展RFC 3779**MUST** 是非空资源集合。RFC 8630 §2.3。
- TA 的 INR 扩展 **MUST NOT** 使用 `inherit` 形式。RFC 8630 §2.3。
- 说明:一般 RC profile 允许 `inherit`。RFC 6487 §4.8.10RFC 6487 §4.8.11RFC 3779 §2.2.3.5RFC 3779 §3.2.3.3。
### 2.3.3 TAL ↔ TA 公钥绑定
- 用于验证 TA 的公钥(来自 TAL 中的 SPKI**MUST** 与 TA 证书中的 `subjectPublicKeyInfo` 相同。RFC 8630 §2.3。
### 2.3.4 TA 稳定性语义(实现需建模为“约束/假设”,但不属于验证结果态)
- TA 公钥与 TAL 中公钥必须保持稳定(用于 RP 侧长期信任锚。RFC 8630 §2.3。
### 2.3.5 TA 与 CRL/Manifest 的关系(语义)
- RFC 8630 指出TA 为自签名证书,没有对应 CRL且不会被 manifest 列出TA 的获取/轮换由 TAL 控制。RFC 8630 §2.3。
> 注:这条更偏“发布/运维语义”,但对数据对象建模有影响:`TrustAnchor` 组合对象不应依赖 CRL/MFT 的存在。
## 2.4 抽象数据模型(接口)
### 2.4.1 `TaCertificate`
> 该对象在字段层面复用 `RC(CA)` 的语义模型(见 `03_resource_certificate_rc.md`),但增加 TA 特有约束。
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `raw_der` | `DerBytes` | TA 证书 DER | X.509 DER证书 profile 约束见 RC 文档 | RFC 8630 §2.3RFC 6487 §4 |
| `rc_ca` | `ResourceCaCertificate` | 以 RC(CA) 语义解析出的字段集合 | 必须满足“自签名 CA”分支约束且 INR 必须非空且不允许 inherit | RFC 8630 §2.3RFC 6487 §4RFC 3779 §2/§3 |
### 2.4.2 `TrustAnchor`
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `tal` | `Tal` | TAL 文件语义对象 | 见 `01_tal.md` | RFC 8630 §2.2 |
| `ta_certificate` | `TaCertificate` | TA 证书语义对象 | TA URI 指向的对象 | RFC 8630 §2.3 |
| `tal_spki_der` | `DerBytes` | 从 TAL 解析出的 SPKI DER | `tal.subject_public_key_info_der` | RFC 8630 §2.2 |
| `ta_spki_der` | `DerBytes` | 从 TA 证书抽取的 SPKI DER | `ta_certificate``subjectPublicKeyInfo` | RFC 8630 §2.3RFC 5280 §4.1.2.7 |
**绑定约束(字段级)**
- `tal_spki_der` 必须与 `ta_spki_der` 完全相等(字节层面的 DER 等价。RFC 8630 §2.3。
## 2.5 字段级约束清单(实现对照)
- TA URI 指向的对象必须是自签名 CA 证书,且符合 RPKI 证书 profile。RFC 8630 §2.3RFC 6487 §4。
- TA 的 INR 扩展必须非空,且不得使用 inherit。RFC 8630 §2.3。
- TAL 中 SPKI 必须与 TA 证书的 `subjectPublicKeyInfo` 匹配。RFC 8630 §2.3。
- TA 不依赖 CRL/MFT无对应 CRL且不被 manifest 列出。RFC 8630 §2.3。

View File

@ -1,314 +0,0 @@
# 03. RC (Resource Certifications)
## 3.1 对象定位
RC是资源证书包括CA和EE
## 3.2 原始载体与编码
- 载体X.509 certificates.
- 编码DER遵循 RFC 5280 的 Certificate 结构与字段语义,但受 RPKI profile 限制RFC 6487 §4
### 3.2.1 基本语法RFC 5280 §4RFC 6487
RC是遵循RFC5280定义的X.509Certificate语法(RFC 5280 §4)并且符合RFC 6487 §4的约束。只选取RFC 6487 §4章节列出来的字段。Unless specifically noted as being OPTIONAL, all the fields listed
here MUST be present, and any other fields MUST NOT appear in a
conforming resource certificate.
```
Certificate ::= SEQUENCE {
tbsCertificate TBSCertificate,
signatureAlgorithm AlgorithmIdentifier,
signatureValue BIT STRING
}
TBSCertificate ::= SEQUENCE {
version [0] EXPLICIT Version MUST be v3,
serialNumber CertificateSerialNumber,
signature AlgorithmIdentifier,
issuer Name,
subject Name,
validity Validity,
subjectPublicKeyInfo SubjectPublicKeyInfo,
extensions [3] EXPLICIT Extensions OPTIONAL
-- If present, version MUST be v3
}
Version ::= INTEGER { v1(0), v2(1), v3(2) }
CertificateSerialNumber ::= INTEGER
Validity ::= SEQUENCE {
notBefore Time,
notAfter Time }
Time ::= CHOICE {
utcTime UTCTime,
generalTime GeneralizedTime }
UniqueIdentifier ::= BIT STRING
SubjectPublicKeyInfo ::= SEQUENCE {
algorithm AlgorithmIdentifier,
subjectPublicKey BIT STRING }
Extensions ::= SEQUENCE SIZE (1..MAX) OF Extension
Extension ::= SEQUENCE {
extnID OBJECT IDENTIFIER,
critical BOOLEAN DEFAULT FALSE,
extnValue OCTET STRING
-- contains the DER encoding of an ASN.1 value
-- corresponding to the extension type identified
-- by extnID
}
```
> 其中`Name` "a valid X.501 distinguished name"(RFC 6487 §4.4)
### 3.2.2 证书扩展字段 RFC 6487 §4.8)
RC的证书扩展字段按照RFC 6487 §4.8的规定,有以下几个扩展:
- Basic Constraints
- Subject Key Identifier
- Authority Key Identifier
- Key Usage
- Extended Key Usage(CA证书以及验证RPKI对象的EE证书不能出现该字段。非RPKI对象的EE可以出现EKU但必须为non-critical)
- CRL Distribution Points
- Authority Information Access
- Subject Information Access
- SIA for CA Certificates
- SIA for EE Certificates
- Certificate Policies
- IP Resources
- AS Resources
```
# Basic Constraints
id-ce-basicConstraints OBJECT IDENTIFIER ::= { id-ce 19 }
BasicConstraints ::= SEQUENCE {
cA BOOLEAN DEFAULT FALSE }
# Subject Key Identifier
id-ce-subjectKeyIdentifier OBJECT IDENTIFIER ::= { id-ce 14 }
SubjectKeyIdentifier ::= KeyIdentifier
KeyIdentifier ::= OCTET STRING
# Authority Key Identifier
id-ce-authorityKeyIdentifier OBJECT IDENTIFIER ::= { id-ce 35 }
AuthorityKeyIdentifier ::= SEQUENCE {
keyIdentifier [0] KeyIdentifier OPTIONAL }
# Key Usage
id-ce-keyUsage OBJECT IDENTIFIER ::= { id-ce 15 }
KeyUsage ::= BIT STRING {
digitalSignature (0),
nonRepudiation (1), -- recent editions of X.509 have
-- renamed this bit to contentCommitment
keyEncipherment (2),
dataEncipherment (3),
keyAgreement (4),
keyCertSign (5),
cRLSign (6),
encipherOnly (7),
decipherOnly (8) }
# Extended Key Usage
id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 }
ExtKeyUsageSyntax ::= SEQUENCE SIZE (1..MAX) OF KeyPurposeId
KeyPurposeId ::= OBJECT IDENTIFIER
# CRL Distribution Points
id-ce-cRLDistributionPoints OBJECT IDENTIFIER ::= { id-ce 31 }
CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint
DistributionPoint ::= SEQUENCE {
distributionPoint [0] DistributionPointName OPTIONAL }
DistributionPointName ::= CHOICE {
fullName [0] GeneralNames }
## Authority Information Access
id-pe-authorityInfoAccess OBJECT IDENTIFIER ::= { id-pe 1 }
AuthorityInfoAccessSyntax ::=
SEQUENCE SIZE (1..MAX) OF AccessDescription
AccessDescription ::= SEQUENCE {
accessMethod OBJECT IDENTIFIER,
accessLocation GeneralName }
# AccessDescription
id-ad OBJECT IDENTIFIER ::= { id-pkix 48 }
# CA 证书发布位置
id-ad-caIssuers OBJECT IDENTIFIER ::= { id-ad 2 }
# OCSP 服务地址
id-ad-ocsp OBJECT IDENTIFIER ::= { id-ad 1 }
# Subject Information Access
id-pe-subjectInfoAccess OBJECT IDENTIFIER ::= { id-pe 11 }
SubjectInfoAccessSyntax ::= SEQUENCE SIZE (1..MAX) OF AccessDescription
AccessDescription ::= SEQUENCE {
accessMethod OBJECT IDENTIFIER,
accessLocation GeneralName }
## Subject Information Access for CA (RFC 6487 §4.8.8.1)
id-ad OBJECT IDENTIFIER ::= { id-pkix 48 }
id-ad-rpkiManifest OBJECT IDENTIFIER ::= { id-ad 10 }
必须存在一个accessMethod=id-ad-caRepositoryaccessLocation=rsyncURI。
必须存在一个accessMethod=id-ad-repiManifest, accessLocation=rsync URI指向该CA的mft对象。
## Subject Information Access for EE (RFC 6487 §4.8.8.2)
id-ad-signedObject OBJECT IDENTIFIER ::= { id-ad 11 }
必须存在一个accessMethod=id-ad-signedObject, accessLocation=rsyncURI
不允许其他的accessMethod
# Certificate Policies
id-ce-certificatePolicies OBJECT IDENTIFIER ::= { id-ce 32 }
anyPolicy OBJECT IDENTIFIER ::= { id-ce-certificatePolicies 0 }
certificatePolicies ::= SEQUENCE SIZE (1..MAX) OF PolicyInformation
PolicyInformation ::= SEQUENCE {
policyIdentifier CertPolicyId,
policyQualifiers SEQUENCE SIZE (1..MAX) OF PolicyQualifierInfo OPTIONAL }
CertPolicyId ::= OBJECT IDENTIFIER
PolicyQualifierInfo ::= SEQUENCE {
policyQualifierId PolicyQualifierId,
qualifier ANY DEFINED BY policyQualifierId }
-- policyQualifierIds for Internet policy qualifiers
id-qt OBJECT IDENTIFIER ::= { id-pkix 2 }
id-qt-cps OBJECT IDENTIFIER ::= { id-qt 1 }
id-qt-unotice OBJECT IDENTIFIER ::= { id-qt 2 }
PolicyQualifierId ::= OBJECT IDENTIFIER ( id-qt-cps | id-qt-unotice )
Qualifier ::= CHOICE {
cPSuri CPSuri,
userNotice UserNotice }
CPSuri ::= IA5String
UserNotice ::= SEQUENCE {
noticeRef NoticeReference OPTIONAL,
explicitText DisplayText OPTIONAL }
NoticeReference ::= SEQUENCE {
organization DisplayText,
noticeNumbers SEQUENCE OF INTEGER }
DisplayText ::= CHOICE {
ia5String IA5String (SIZE (1..200)),
visibleString VisibleString (SIZE (1..200)),
bmpString BMPString (SIZE (1..200)),
utf8String UTF8String (SIZE (1..200)) }
# IP Resources
id-pe-ipAddrBlocks OBJECT IDENTIFIER ::= { id-pe 7 }
IPAddrBlocks ::= SEQUENCE OF IPAddressFamily
IPAddressFamily ::= SEQUENCE { -- AFI & optional SAFI --
addressFamily OCTET STRING (SIZE (2..3)),
ipAddressChoice IPAddressChoice }
IPAddressChoice ::= CHOICE {
inherit NULL, -- inherit from issuer --
addressesOrRanges SEQUENCE OF IPAddressOrRange }
IPAddressOrRange ::= CHOICE {
addressPrefix IPAddress,
addressRange IPAddressRange }
IPAddressRange ::= SEQUENCE {
min IPAddress,
max IPAddress }
IPAddress ::= BIT STRING
# AS Resources
id-pe-autonomousSysIds OBJECT IDENTIFIER ::= { id-pe 8 }
ASIdentifiers ::= SEQUENCE {
asnum [0] EXPLICIT ASIdentifierChoice OPTIONAL,
rdi [1] EXPLICIT ASIdentifierChoice OPTIONAL}
ASIdentifierChoice ::= CHOICE {
inherit NULL, -- inherit from issuer --
asIdsOrRanges SEQUENCE OF ASIdOrRange }
ASIdOrRange ::= CHOICE {
id ASId,
range ASRange }
ASRange ::= SEQUENCE {
min ASId,
max ASId }
ASId ::= INTEGER
```
# 3.3 抽象数据结构
采用X509 Certificate + Resource + 约束校验的方式组合
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|----------|---------------------|----------|---------|---------------|
| cert_der | Vec<u8> | 证书原始数据 | | |
| cert | X509Certificate | 基础X509证书 | | RFC 5280 §4.1 |
| resource | ResourceSet | 资源集合 | | |
# 3.4 约束规则
## 3.4.1 Cert约束校验规则
RFC 6487中规定的证书的字段参见[3.2.1 ](#321-基本语法rfc-5280-4rfc-6487-)
-
| 字段 | 语义 | 约束/解析规则 | RFC 引用 |
|-----------|-------|----------------------------------------------|--------------|
| version | 证书版本 | 必须是v3(值为2 | RFC6487 §4.1 |
| serial | 证书编号 | 同一个CA签发的证书编号必须唯一 | RFC6487 §4.2 |
| validity | 证书有效期 | notBefore时间不能早于证书的生成时间。若时间段大于上级证书的有效期也是有效的 | RFC6487 §4.6 |
## 3.4.2 Cert Extentions中字段的约束校验规则
RFC 6487中规定的扩展字段参见[3.2.2 ](#322-证书扩展字段-rfc-6487-48)
| 字段 | critical | 语义 | 约束/解析规则 | RFC 引用 |
|----------------------------|----------|-------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------|
| basicConstraints | Y | 证书类型 | CA证书cA=TRUE; EE证书cA=FALSE | RFC6487 §4.8.1 |
| subjectKeyIdentifier | N | 证书公钥 | SKI = SHA-1(DER-encoded SPKI bit string) | RFC6487 §4.8.2 |
| authorityKeyIdentifier | N | 父证书的公钥 | 字段只包含keyIdentifier不能包含authorityCertIssuer和authorityCertSerialNumber除了自签名CA外其余证书必须出现。自签名CA若出现该字段则等于SKI | RFC6487 §4.8.3 |
| keyUsage | Y | 证书公钥的用途权限 | CA证书keyCertSign = TRUE, cRLSign = TRUE 其他都是FALSE。EE证书digitalSignature = TRUE 其他都是FALSE | RFC6487 §4.8.4 |
| extendedKeyUsage | N | 扩展证书公钥的用途权限 | CA证书不能出现EKU验证 RPKI 对象的 EE 证书不能出现EKU非 RPKI 对象的 EE可以出现EKU但必须为non-critical. | RFC6487 §4.8.5 |
| cRLDistributionPoints | N | CRL的发布点位置 | 字段distributionPoint不能包含reasons、cRLIssuer。其中distributionPoint字段包含fullName不能包含nameRelativeToCRLIssuer。fullName的格式必须是URI。自签名证书禁止出现该字段。非自签名证书必须出现。一个CA只能有一个CRL。一个CRLDP只能包含一个distributionPoint。但一个distributionPoint字段中可以包含多于1个的URI但必须包含rsync URI且必须是最新的。 | RFC6487 §4.8.6 |
| authorityInformationAccess | N | 签发者的发布点位置 | 除了自签名的CA必须出现。自签名CA禁止出现。推荐的URI访问方式是rsync并且rsyncURI的话必须指定accessMethod=id-ad-caIssuers | RFC6487 §4.8.7 |
| subjectInformationAccess | N | 发布点位置 | CA证书必须存在。必须存在一个accessMethod=id-ad-caRepositoryaccessLocation=rsyncURI。必须存在一个accessMethod=id-ad-repiManifest,accessLocation=rsync URI指向该CA的mft对象。 EE证书必须存在。必须存在一个accessMethod=id-ad-signedObject,accessLocation=rsyncURI。不允许其他的accessMethod | RFC6487 §4.8.8 |
| certificatePolicies | Y | 证书策略 | 必须存在并且只能存在一种策略RFC 6484 — RPKI Certificate Policy (CP) | RFC6487 §4.8.9 |
| iPResources | Y | IP地址集合 | 所有的RPKI证书中必须包含IP Resources或者ASResources或者两者都包含。 | RFC6487 §4.8.10 |
| aSResources | Y | ASN集合 | 所有的RPKI证书中必须包含IP Resources或者ASResources或者两者都包含。 | RFC6487 §4.8.11 |

View File

@ -0,0 +1,460 @@
# 03. RCResource Certificate资源证书CA/EE
## 3.1 对象定位
资源证书RC是 X.509 v3 证书,遵循 PKIX profileRFC 5280并受 RPKI profile 进一步约束。RFC 6487 §4。
RC 在 RPKI 中至少分为两类语义用途:
- `CA 证书`:签发下级证书/CRL并在 SIA 中声明发布点与 manifest。RFC 6487 §4.8.8.1。
- `EE 证书`:用于验证某个 RPKI Signed Object如 ROA/MFT在 SIA 中指向被验证对象。RFC 6487 §4.8.8.2。
## 3.2 原始载体与编码
- 载体X.509 证书。
- 编码DER。RFC 6487 §4“valid X.509 public key certificate consistent with RFC 5280” + RPKI 限制)。
### 3.2.1 X.509 v3 证书基本语法ASN.1RFC 5280 §4.1
资源证书在编码层面是 RFC 5280 定义的 X.509 v3 `Certificate`DER其中 `tbsCertificate` 携带主体字段与扩展集合(`Extensions`。RFC 5280 §4.1。
```asn1
Certificate ::= SEQUENCE {
tbsCertificate TBSCertificate,
signatureAlgorithm AlgorithmIdentifier,
signatureValue BIT STRING }
TBSCertificate ::= SEQUENCE {
version [0] EXPLICIT Version DEFAULT v1,
serialNumber CertificateSerialNumber,
signature AlgorithmIdentifier,
issuer Name,
validity Validity,
subject Name,
subjectPublicKeyInfo SubjectPublicKeyInfo,
issuerUniqueID [1] IMPLICIT UniqueIdentifier OPTIONAL,
-- If present, version MUST be v2 or v3
subjectUniqueID [2] IMPLICIT UniqueIdentifier OPTIONAL,
-- If present, version MUST be v2 or v3
extensions [3] EXPLICIT Extensions OPTIONAL
-- If present, version MUST be v3
}
Version ::= INTEGER { v1(0), v2(1), v3(2) }
CertificateSerialNumber ::= INTEGER
Validity ::= SEQUENCE {
notBefore Time,
notAfter Time }
Time ::= CHOICE {
utcTime UTCTime,
generalTime GeneralizedTime }
UniqueIdentifier ::= BIT STRING
SubjectPublicKeyInfo ::= SEQUENCE {
algorithm AlgorithmIdentifier,
subjectPublicKey BIT STRING }
Extensions ::= SEQUENCE SIZE (1..MAX) OF Extension
Extension ::= SEQUENCE {
extnID OBJECT IDENTIFIER,
critical BOOLEAN DEFAULT FALSE,
extnValue OCTET STRING
-- contains the DER encoding of an ASN.1 value
-- corresponding to the extension type identified
-- by extnID
}
```
### 3.2.2 AlgorithmIdentifierASN.1RFC 5280 §4.1.1.2
```asn1
AlgorithmIdentifier ::= SEQUENCE {
algorithm OBJECT IDENTIFIER,
parameters ANY DEFINED BY algorithm OPTIONAL }
```
### 3.2.3 Name / DN 结构ASN.1RFC 5280 §4.1.2.4
```asn1
Name ::= CHOICE { -- only one possibility for now --
rdnSequence RDNSequence }
RDNSequence ::= SEQUENCE OF RelativeDistinguishedName
RelativeDistinguishedName ::=
SET SIZE (1..MAX) OF AttributeTypeAndValue
AttributeTypeAndValue ::= SEQUENCE {
type AttributeType,
value AttributeValue }
AttributeType ::= OBJECT IDENTIFIER
AttributeValue ::= ANY -- DEFINED BY AttributeType
DirectoryString ::= CHOICE {
teletexString TeletexString (SIZE (1..MAX)),
printableString PrintableString (SIZE (1..MAX)),
universalString UniversalString (SIZE (1..MAX)),
utf8String UTF8String (SIZE (1..MAX)),
bmpString BMPString (SIZE (1..MAX)) }
```
### 3.2.4 GeneralNames / GeneralNameASN.1RFC 5280 §4.2.1.6
> 说明RPKI 的 AIA/SIA/CRLDP 等扩展通常把 URI 编码在 `uniformResourceIdentifier [6] IA5String` 分支中。RFC 5280 §4.2.1.6。
```asn1
GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName
GeneralName ::= CHOICE {
otherName [0] OtherName,
rfc822Name [1] IA5String,
dNSName [2] IA5String,
x400Address [3] ORAddress,
directoryName [4] Name,
ediPartyName [5] EDIPartyName,
uniformResourceIdentifier [6] IA5String,
iPAddress [7] OCTET STRING,
registeredID [8] OBJECT IDENTIFIER }
OtherName ::= SEQUENCE {
type-id OBJECT IDENTIFIER,
value [0] EXPLICIT ANY DEFINED BY type-id }
EDIPartyName ::= SEQUENCE {
nameAssigner [0] DirectoryString OPTIONAL,
partyName [1] DirectoryString }
```
### 3.2.5 AIAAuthority Information AccessASN.1RFC 5280 §4.2.2.1
```asn1
id-pe-authorityInfoAccess OBJECT IDENTIFIER ::= { id-pe 1 }
AuthorityInfoAccessSyntax ::=
SEQUENCE SIZE (1..MAX) OF AccessDescription
AccessDescription ::= SEQUENCE {
accessMethod OBJECT IDENTIFIER,
accessLocation GeneralName }
id-ad OBJECT IDENTIFIER ::= { id-pkix 48 }
id-ad-caIssuers OBJECT IDENTIFIER ::= { id-ad 2 }
```
### 3.2.6 SIASubject Information AccessASN.1RFC 5280 §4.2.2.2
```asn1
id-pe-subjectInfoAccess OBJECT IDENTIFIER ::= { id-pe 11 }
SubjectInfoAccessSyntax ::=
SEQUENCE SIZE (1..MAX) OF AccessDescription
AccessDescription ::= SEQUENCE {
accessMethod OBJECT IDENTIFIER,
accessLocation GeneralName }
id-ad OBJECT IDENTIFIER ::= { id-pkix 48 }
id-ad-caRepository OBJECT IDENTIFIER ::= { id-ad 5 }
```
### 3.2.7 RPKI 在 SIA 中新增/使用的 accessMethod OIDRFC 6487 §4.8.8.1 / §4.8.8.2RFC 8182 §3.2
> 说明:下列 OID 用于 `AccessDescription.accessMethod`,并放在 SIA 的 `extnValue` 内层结构中(其外层 extnID 仍为 SIA`id-pe-subjectInfoAccess`。RFC 6487 §4.8.8RFC 8182 §3.2。
```asn1
id-ad OBJECT IDENTIFIER ::= { id-pkix 48 }
id-ad-rpkiManifest OBJECT IDENTIFIER ::= { id-ad 10 } -- 1.3.6.1.5.5.7.48.10
id-ad-signedObject OBJECT IDENTIFIER ::= { id-ad 11 } -- 1.3.6.1.5.5.7.48.11
id-ad-rpkiNotify OBJECT IDENTIFIER ::= { id-ad 13 } -- 1.3.6.1.5.5.7.48.13
```
### 3.2.8 CRLDistributionPointsCRLDPASN.1RFC 5280 §4.2.1.13
```asn1
id-ce-cRLDistributionPoints OBJECT IDENTIFIER ::= { id-ce 31 }
CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint
DistributionPoint ::= SEQUENCE {
distributionPoint [0] DistributionPointName OPTIONAL,
reasons [1] ReasonFlags OPTIONAL,
cRLIssuer [2] GeneralNames OPTIONAL }
DistributionPointName ::= CHOICE {
fullName [0] GeneralNames,
nameRelativeToCRLIssuer [1] RelativeDistinguishedName }
ReasonFlags ::= BIT STRING {
unused (0),
keyCompromise (1),
cACompromise (2),
affiliationChanged (3),
superseded (4),
cessationOfOperation (5),
certificateHold (6),
privilegeWithdrawn (7),
aACompromise (8) }
```
### 3.2.9 Certificate PoliciesASN.1RFC 5280 §4.2.1.4
```asn1
id-ce-certificatePolicies OBJECT IDENTIFIER ::= { id-ce 32 }
anyPolicy OBJECT IDENTIFIER ::= { id-ce-certificatePolicies 0 }
certificatePolicies ::= SEQUENCE SIZE (1..MAX) OF PolicyInformation
PolicyInformation ::= SEQUENCE {
policyIdentifier CertPolicyId,
policyQualifiers SEQUENCE SIZE (1..MAX) OF
PolicyQualifierInfo OPTIONAL }
CertPolicyId ::= OBJECT IDENTIFIER
PolicyQualifierInfo ::= SEQUENCE {
policyQualifierId PolicyQualifierId,
qualifier ANY DEFINED BY policyQualifierId }
-- policyQualifierIds for Internet policy qualifiers
id-qt OBJECT IDENTIFIER ::= { id-pkix 2 }
id-qt-cps OBJECT IDENTIFIER ::= { id-qt 1 }
id-qt-unotice OBJECT IDENTIFIER ::= { id-qt 2 }
PolicyQualifierId ::= OBJECT IDENTIFIER ( id-qt-cps | id-qt-unotice )
Qualifier ::= CHOICE {
cPSuri CPSuri,
userNotice UserNotice }
CPSuri ::= IA5String
```
### 3.2.10 RFC 3779 IP/AS 资源扩展ASN.1RFC 3779 §2.2.1-§2.2.3RFC 3779 §3.2.1-§3.2.3
> 说明RFC 3779 给出两个扩展的 OID 与 ASN.1 语法;它们作为 X.509 v3 扩展出现在 `extensions` 中(外层 extnID 为下列 OID。RPKI profile 进一步约束 criticality/SAFI/RDI 等,见 RFC 6487 §4.8.10-§4.8.11。
```asn1
-- IP Address Delegation Extension
id-pe-ipAddrBlocks OBJECT IDENTIFIER ::= { id-pe 7 }
IPAddrBlocks ::= SEQUENCE OF IPAddressFamily
IPAddressFamily ::= SEQUENCE { -- AFI & optional SAFI --
addressFamily OCTET STRING (SIZE (2..3)),
ipAddressChoice IPAddressChoice }
IPAddressChoice ::= CHOICE {
inherit NULL, -- inherit from issuer --
addressesOrRanges SEQUENCE OF IPAddressOrRange }
IPAddressOrRange ::= CHOICE {
addressPrefix IPAddress,
addressRange IPAddressRange }
IPAddressRange ::= SEQUENCE {
min IPAddress,
max IPAddress }
IPAddress ::= BIT STRING
-- Autonomous System Identifier Delegation Extension
id-pe-autonomousSysIds OBJECT IDENTIFIER ::= { id-pe 8 }
ASIdentifiers ::= SEQUENCE {
asnum [0] EXPLICIT ASIdentifierChoice OPTIONAL,
rdi [1] EXPLICIT ASIdentifierChoice OPTIONAL}
ASIdentifierChoice ::= CHOICE {
inherit NULL, -- inherit from issuer --
asIdsOrRanges SEQUENCE OF ASIdOrRange }
ASIdOrRange ::= CHOICE {
id ASId,
range ASRange }
ASRange ::= SEQUENCE {
min ASId,
max ASId }
ASId ::= INTEGER
```
### 3.2.11 其它 RPKI profile 相关扩展的 ASN.1 定义RFC 5280 §4.2.1.1-§4.2.1.3RFC 5280 §4.2.1.9RFC 5280 §4.2.1.12
> 说明:这些是 RPKI 资源证书 profileRFC 6487 §4.8)所引用的通用 PKIX 扩展语法。RPKI 对其“必须/禁止/criticality/字段允许性”有额外限制(见本文件 3.3/3.4),但编码层的 ASN.1 类型来自 RFC 5280。
```asn1
id-ce-authorityKeyIdentifier OBJECT IDENTIFIER ::= { id-ce 35 }
AuthorityKeyIdentifier ::= SEQUENCE {
keyIdentifier [0] KeyIdentifier OPTIONAL,
authorityCertIssuer [1] GeneralNames OPTIONAL,
authorityCertSerialNumber [2] CertificateSerialNumber OPTIONAL }
KeyIdentifier ::= OCTET STRING
id-ce-subjectKeyIdentifier OBJECT IDENTIFIER ::= { id-ce 14 }
SubjectKeyIdentifier ::= KeyIdentifier
id-ce-keyUsage OBJECT IDENTIFIER ::= { id-ce 15 }
KeyUsage ::= BIT STRING {
digitalSignature (0),
nonRepudiation (1), -- recent editions of X.509 have
-- renamed this bit to contentCommitment
keyEncipherment (2),
dataEncipherment (3),
keyAgreement (4),
keyCertSign (5),
cRLSign (6),
encipherOnly (7),
decipherOnly (8) }
id-ce-basicConstraints OBJECT IDENTIFIER ::= { id-ce 19 }
BasicConstraints ::= SEQUENCE {
cA BOOLEAN DEFAULT FALSE,
pathLenConstraint INTEGER (0..MAX) OPTIONAL }
id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 }
ExtKeyUsageSyntax ::= SEQUENCE SIZE (1..MAX) OF KeyPurposeId
KeyPurposeId ::= OBJECT IDENTIFIER
```
## 3.3 抽象数据模型(接口)
> 说明:本模型面向“语义化解析产物”。实现可保留 `raw_der` 作为可追溯入口。
### 3.3.1 顶层联合类型:`ResourceCertificate`
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `raw_der` | `DerBytes` | 证书 DER | 原样保留(建议) | RFC 6487 §4 |
| `tbs` | `RpkixTbsCertificate` | 证书语义字段(见下) | 仅允许 RFC 6487 允许的字段/扩展;其他字段 MUST NOT 出现 | RFC 6487 §4 |
| `kind` | `enum { ca, ee }` | 语义分类 | 来自 BasicConstraints + 用途约束 | RFC 6487 §4.8.1RFC 6487 §4.8.8 |
### 3.3.1.1 派生类型(用于字段类型标注)
为避免在其它对象文档里反复写“`ResourceCertificate``kind==...`”,这里定义两个派生/别名类型:
- `ResourceCaCertificate``ResourceCertificate``kind == ca`
- `ResourceEeCertificate``ResourceCertificate``kind == ee`
这些派生类型不引入新字段,只是对 `ResourceCertificate.kind` 的约束化视图。
### 3.3.2 `RpkixTbsCertificate`(语义字段集合)
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `version` | `int` | X.509 版本 | MUST 为 v3字段值为 2 | RFC 6487 §4.1 |
| `serial_number` | `int` | 序列号 | 正整数;对每 CA 签发唯一 | RFC 6487 §4.2 |
| `signature_algorithm` | `Oid` | 证书签名算法 | 必须为 `sha256WithRSAEncryption``1.2.840.113549.1.1.11` | RFC 6487 §4.3RFC 7935 §2引用 RFC 4055 |
| `issuer_dn` | `RpkixDistinguishedName` | 颁发者 DN | 必含 1 个 CommonName可含 1 个 serialNumberCN 必须 PrintableString | RFC 6487 §4.4 |
| `subject_dn` | `RpkixDistinguishedName` | 主体 DN | 同 issuer 约束;且对同一 issuer 下“实体+公钥”唯一 | RFC 6487 §4.5 |
| `validity_not_before` | `UtcTime` | 有效期起 | X.509 `Time`UTCTime/GeneralizedTime解析为 UTC 时间点 | RFC 6487 §4.6.1RFC 5280 §4.1.2.5 |
| `validity_not_after` | `UtcTime` | 有效期止 | X.509 `Time`UTCTime/GeneralizedTime解析为 UTC 时间点 | RFC 6487 §4.6.2RFC 5280 §4.1.2.5 |
| `subject_public_key_info` | `DerBytes` | SPKI DER | 算法 profile 指定 | RFC 6487 §4.7RFC 7935 §3.1 |
| `extensions` | `RpkixExtensions` | 扩展集合 | 见下表criticality/存在性/内容受约束 | RFC 6487 §4.8 |
### 3.3.3 `RpkixDistinguishedName`RPKI profile 下的 DN 语义)
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `common_name` | `string` | CommonName (CN) | MUST 存在且仅 1 个;类型为 PrintableString | RFC 6487 §4.4RFC 6487 §4.5 |
| `serial_number` | `optional[string]` | serialNumber | MAY 存在且仅 1 个 | RFC 6487 §4.4RFC 6487 §4.5 |
| `rfc4514` | `string` | DN 的 RFC4514 字符串表示 | 便于日志/索引(实现自选) | RFC 6487 §4.5(引用 RFC4514 |
### 3.3.4 `RpkixExtensions`(核心扩展语义)
> 表中 “存在性/criticality” 指 RPKI profile 下对该扩展的要求;实现应能区分 “字段缺失” 与 “字段存在但不符合约束”。
| 字段 | 类型 | 语义 | 存在性/criticality 与内容约束 | RFC 引用 |
|---|---|---|---|---|
| `basic_constraints` | `optional[BasicConstraints]` | CA 标志 | **extnID=`2.5.29.19`**CA 证书MUST present & criticalEEMUST NOT presentpathLen MUST NOT present | RFC 6487 §4.8.1RFC 5280 §4.2.1.9 |
| `subject_key_identifier` | `bytes` | SKI | **extnID=`2.5.29.14`**MUST present & non-critical值为 subjectPublicKey 的 DER bit string 的 SHA-1 哈希 | RFC 6487 §4.8.2(引用 RFC 5280 §4.2.1.2 |
| `authority_key_identifier` | `optional[AuthorityKeyIdentifier]` | AKI | **extnID=`2.5.29.35`**自签名MAY present 且可等于 SKI非自签名MUST presentauthorityCertIssuer/authorityCertSerialNumber MUST NOT presentnon-critical | RFC 6487 §4.8.3RFC 5280 §4.2.1.1 |
| `key_usage` | `KeyUsage` | KeyUsage | **extnID=`2.5.29.15`**MUST present & criticalCA`keyCertSign``cRLSign` 为 TRUEEE`digitalSignature` 为 TRUE | RFC 6487 §4.8.4RFC 5280 §4.2.1.3 |
| `extended_key_usage` | `optional[OidSet]` | EKU | **extnID=`2.5.29.37`**CAMUST NOT appear用于验证 RPKI 对象的 EEMUST NOT appear若出现不得标 critical | RFC 6487 §4.8.5RFC 5280 §4.2.1.12 |
| `crl_distribution_points` | `optional[CrlDistributionPoints]` | CRLDP | **extnID=`2.5.29.31`**自签名MUST be omitted非自签名MUST present & non-critical仅 1 个 DistributionPointfullName URI必须包含至少 1 个 `rsync://` | RFC 6487 §4.8.6RFC 5280 §4.2.1.13 |
| `authority_info_access` | `optional[AuthorityInfoAccess]` | AIA | **extnID=`1.3.6.1.5.5.7.1.1`**自签名MUST be omitted非自签名MUST present & non-critical必须含 accessMethod `id-ad-caIssuers`(**`1.3.6.1.5.5.7.48.2`**) 的 `rsync://` URI可含同对象其它 URI | RFC 6487 §4.8.7RFC 5280 §4.2.2.1 |
| `subject_info_access_ca` | `optional[SubjectInfoAccessCa]` | SIACA | **extnID=`1.3.6.1.5.5.7.1.11`**CAMUST present & non-critical必须含 accessMethod `id-ad-caRepository`(**`1.3.6.1.5.5.7.48.5`**)`rsync://` 目录 URI`id-ad-rpkiManifest`(**`1.3.6.1.5.5.7.48.10`**)`rsync://` 对象 URI若 CA 使用 RRDP还会包含 `id-ad-rpkiNotify`(**`1.3.6.1.5.5.7.48.13`**)HTTPS Notification URI | RFC 6487 §4.8.8.1RFC 5280 §4.2.2.2RFC 8182 §3.2 |
| `subject_info_access_ee` | `optional[SubjectInfoAccessEe]` | SIAEE | **extnID=`1.3.6.1.5.5.7.1.11`**EEMUST present & non-critical必须含 accessMethod `id-ad-signedObject`(**`1.3.6.1.5.5.7.48.11`**)URI **MUST include** `rsync://`EE 的 SIA 不允许其它 AccessMethods | RFC 6487 §4.8.8.2RFC 5280 §4.2.2.2 |
| `certificate_policies` | `CertificatePolicies` | 证书策略 | **extnID=`2.5.29.32`**MUST present & critical恰好 1 个 policy并允许 0 或 1 个 CPS qualifier若存在其 id 必为 `id-qt-cps`(**`1.3.6.1.5.5.7.2.1`**) | RFC 6487 §4.8.9RFC 7318 §2RFC 5280 §4.2.1.4 |
| `ip_resources` | `optional[IpResourceSet]` | IP 资源扩展 | **extnID=`1.3.6.1.5.5.7.1.7`**IP/AS 两者至少其一 MUST present若 present MUST be critical内容为 RFC 3779 语义;在公用互联网场景 SAFI MUST NOT 使用;且必须为非空或 inherit | RFC 6487 §4.8.10RFC 3779 §2.2.1RFC 3779 §2.2.2 |
| `as_resources` | `optional[AsResourceSet]` | AS 资源扩展 | **extnID=`1.3.6.1.5.5.7.1.8`**IP/AS 两者至少其一 MUST present若 present MUST be critical内容为 RFC 3779 语义RDI MUST NOT 使用;且必须为非空或 inherit | RFC 6487 §4.8.11RFC 3779 §3.2.1RFC 3779 §3.2.2 |
### 3.3.5 结构化子类型(建议)
#### `BasicConstraints`
| 字段 | 类型 | 语义 | 约束 | RFC 引用 |
|---|---|---|---|---|
| `ca` | `bool` | 是否 CA | 由 issuer 决定;在 CA 证书中该扩展必须存在 | RFC 6487 §4.8.1 |
| `path_len_constraint` | `None` | pathLenConstraint | MUST NOT presentRPKI profile 不使用) | RFC 6487 §4.8.1 |
#### `AuthorityKeyIdentifier`
| 字段 | 类型 | 语义 | 约束 | RFC 引用 |
|---|---|---|---|---|
| `key_identifier` | `bytes` | AKI.keyIdentifier | 使用 issuer 公钥的 SHA-1 哈希(按 RFC 5280 的定义) | RFC 6487 §4.8.3(引用 RFC 5280 §4.2.1.1 |
| `authority_cert_issuer` | `None` | authorityCertIssuer | MUST NOT present | RFC 6487 §4.8.3 |
| `authority_cert_serial_number` | `None` | authorityCertSerialNumber | MUST NOT present | RFC 6487 §4.8.3 |
#### `CrlDistributionPoints`
| 字段 | 类型 | 语义 | 约束 | RFC 引用 |
|---|---|---|---|---|
| `distribution_point_uris` | `list[Uri]` | CRL 位置列表 | 仅 1 个 DistributionPoint必须包含至少 1 个 `rsync://` URI 指向该 issuer 最新 CRL可含其它 URI | RFC 6487 §4.8.6 |
#### `AuthorityInfoAccess`
| 字段 | 类型 | 语义 | 约束 | RFC 引用 |
|---|---|---|---|---|
| `ca_issuers_uris` | `list[Uri]` | 上级 CA 证书位置 | accessMethod=`id-ad-caIssuers``1.3.6.1.5.5.7.48.2`);必含 `rsync://` URI可含同对象其它 URI | RFC 6487 §4.8.7RFC 5280 §4.2.2.1 |
#### `SubjectInfoAccessCa`
| 字段 | 类型 | 语义 | 约束 | RFC 引用 |
|---|---|---|---|---|
| `ca_repository_uris` | `list[Uri]` | CA 发布点目录repository publication point | accessMethod=`id-ad-caRepository``1.3.6.1.5.5.7.48.5`);至少 1 个;必须包含 `rsync://`;也可包含其它机制(例如 `https://`)作为“同一目录”的替代访问方式;顺序表示 CA 偏好 | RFC 6487 §4.8.8.1RFC 5280 §4.2.2.2 |
| `rpki_manifest_uris` | `list[Uri]` | 当前 manifest 对象 URI | accessMethod=`id-ad-rpkiManifest``1.3.6.1.5.5.7.48.10`);至少 1 个;必须包含 `rsync://`;也可包含其它机制(例如 `https://`)作为“同一对象”的替代访问方式 | RFC 6487 §4.8.8.1RFC 5280 §4.2.2.2 |
| `rpki_notify_uris` | `optional[list[Uri]]` | RRDP NotificationUpdate Notification FileURI | accessMethod=`id-ad-rpkiNotify``1.3.6.1.5.5.7.48.13`);若存在则 accessLocation MUST 为 `https://` URI指向 RRDP Notification 文件 | RFC 8182 §3.2RFC 5280 §4.2.2.2 |
#### `SubjectInfoAccessEe`
| 字段 | 类型 | 语义 | 约束 | RFC 引用 |
|---|---|---|---|---|
| `signed_object_uris` | `list[Uri]` | 被 EE 证书验证的签名对象位置 | accessMethod=`id-ad-signedObject``1.3.6.1.5.5.7.48.11`);必须包含 `rsync://`;其它 URI 可作为同对象替代机制EE SIA 不允许其它 AccessMethods | RFC 6487 §4.8.8.2RFC 5280 §4.2.2.2 |
#### `CertificatePolicies`
| 字段 | 类型 | 语义 | 约束 | RFC 引用 |
|---|---|---|---|---|
| `policy_oid` | `Oid` | 唯一 policy OID | 恰好 1 个 policyRPKI CP 分配的 OID 为 `id-cp-ipAddr-asNumber``1.3.6.1.5.5.7.14.2` | RFC 6487 §4.8.9RFC 6484 §1.2 |
| `cps_uri` | `optional[Uri]` | CPS policy qualifier URI | MAY 存在且最多 1 个;若存在其 `policyQualifierId` 必为 `id-qt-cps`;对该 URI 不施加处理要求 | RFC 7318 §2RFC 5280 §4.2.1.4 |
## 3.4 字段级约束清单(实现对照)
- 仅允许 RFC 6487 §4 指定的字段/扩展;未列出字段 MUST NOT 出现。RFC 6487 §4。
- 证书版本必须为 v3。RFC 6487 §4.1。
- CA/EE 在 BasicConstraints 与 SIA 的约束不同。RFC 6487 §4.8.1RFC 6487 §4.8.8.1RFC 6487 §4.8.8.2。
- KeyUsageCA 仅 `keyCertSign`/`cRLSign`EE 仅 `digitalSignature`。RFC 6487 §4.8.4。
- CRLDP/AIA自签名必须省略非自签名必须存在并包含 `rsync://`。RFC 6487 §4.8.6RFC 6487 §4.8.7。
- IP/AS 资源扩展:两者至少其一存在;若存在必须 critical语义来自 RFC 3779在公用互联网场景 SAFI 与 RDI 均不得使用。RFC 6487 §4.8.10RFC 6487 §4.8.11RFC 3779 §2.2.3RFC 3779 §3.2.3。

View File

@ -0,0 +1,158 @@
# 05. RPKI Signed ObjectCMS SignedData 外壳)
## 5.1 对象定位
ROA、Manifest 等都属于 “RPKI Signed Object”其外壳是 CMS SignedData并受 RFC 6488 的 profile 约束RFC 9589 进一步更新了 `signedAttrs` 的要求。RFC 6488 §2-§4RFC 9589 §4。
本文件描述**通用外壳模型**eContentType/eContent 由具体对象文档给出)。
## 5.2 原始载体与编码
- 载体CMS `ContentInfo`,其中 `contentType` 为 SignedData。RFC 6488 §2RFC 6488 §3(1a)。
- 编码DER。RFC 6488 §2RFC 6488 §3(1l)。
### 5.2.1 CMS 外壳ContentInfoASN.1RFC 5652 §3
```asn1
ContentInfo ::= SEQUENCE {
contentType ContentType,
content [0] EXPLICIT ANY DEFINED BY contentType }
ContentType ::= OBJECT IDENTIFIER
```
### 5.2.2 CMS 外壳SignedDataASN.1RFC 5652 §5.1
```asn1
id-signedData OBJECT IDENTIFIER ::= { iso(1) member-body(2)
us(840) rsadsi(113549) pkcs(1) pkcs7(7) 2 }
SignedData ::= SEQUENCE {
version CMSVersion,
digestAlgorithms DigestAlgorithmIdentifiers,
encapContentInfo EncapsulatedContentInfo,
certificates [0] IMPLICIT CertificateSet OPTIONAL,
crls [1] IMPLICIT RevocationInfoChoices OPTIONAL,
signerInfos SignerInfos }
DigestAlgorithmIdentifiers ::= SET OF DigestAlgorithmIdentifier
SignerInfos ::= SET OF SignerInfo
```
### 5.2.3 CMS 外壳EncapsulatedContentInfoASN.1RFC 5652 §5.2
```asn1
EncapsulatedContentInfo ::= SEQUENCE {
eContentType ContentType,
eContent [0] EXPLICIT OCTET STRING OPTIONAL }
ContentType ::= OBJECT IDENTIFIER
```
> 注CMS 允许 `eContent` 不一定 DER 编码RFC 5652 §5.2);但 RPKI signed object profile 要求**整个对象 DER 编码**RFC 6488 §2RFC 6488 §3(1l)),且 eContentpayload由对象规范定义并通常为 DER如 ROARFC 9582 §4ManifestRFC 9286 §4.2)。
### 5.2.4 CMS 外壳SignerInfo 与 AttributeASN.1RFC 5652 §5.3
```asn1
SignerInfo ::= SEQUENCE {
version CMSVersion,
sid SignerIdentifier,
digestAlgorithm DigestAlgorithmIdentifier,
signedAttrs [0] IMPLICIT SignedAttributes OPTIONAL,
signatureAlgorithm SignatureAlgorithmIdentifier,
signature SignatureValue,
unsignedAttrs [1] IMPLICIT UnsignedAttributes OPTIONAL }
SignerIdentifier ::= CHOICE {
issuerAndSerialNumber IssuerAndSerialNumber,
subjectKeyIdentifier [0] SubjectKeyIdentifier }
SignedAttributes ::= SET SIZE (1..MAX) OF Attribute
UnsignedAttributes ::= SET SIZE (1..MAX) OF Attribute
Attribute ::= SEQUENCE {
attrType OBJECT IDENTIFIER,
attrValues SET OF AttributeValue }
AttributeValue ::= ANY
SignatureValue ::= OCTET STRING
```
### 5.2.5 RPKI 对 CMS 外壳字段的 profile 约束RFC 6488 §2.1RFC 6488 §3更新RFC 9589 §4
> 说明:上面是 CMS 的通用 ASN.1RPKI 进一步约束取值与允许出现的字段(例如 SignedData.version 必须为 3、crls 必须省略、signedAttrs 的内容限制等。RFC 6488 §2-§3RFC 9589 §4。
### 5.2.6 signedAttrs 中允许的属性与 attrType OIDRFC 6488 §2.1.6.4.1-§2.1.6.4.2更新RFC 9589 §4
RPKI signed object profile 对 `SignerInfo.signedAttrs` 的 Attribute 集合施加限制(除 ASN.1 结构外,还包含“只允许哪些 attrType”的编码约束
- `content-type`attrType OID `1.2.840.113549.1.9.3`。RFC 6488 §2.1.6.4.1。
- `message-digest`attrType OID `1.2.840.113549.1.9.4`。RFC 6488 §2.1.6.4.2。
- `signing-time`attrType OID `1.2.840.113549.1.9.5`。RFC 9589 §4更新 RFC 6488 的相关要求)。
并且:
- 每种属性在集合中只能出现一次;且 `attrValues` 虽然语法是 `SET OF`,但在 RPKI 中必须只含一个值。RFC 6488 §2.1.6.4。
## 5.3 抽象数据模型(接口)
### 5.3.1 `RpkiSignedObject`
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `raw_der` | `DerBytes` | CMS DER | 原样保留(建议) | RFC 6488 §2RFC 6488 §3(1l) |
| `content_info_content_type` | `Oid` | ContentInfo.contentType | MUST 为 SignedData`1.2.840.113549.1.7.2` | RFC 6488 §3(1a) |
| `signed_data` | `SignedDataProfiled` | SignedData 语义字段 | 见下 | RFC 6488 §2.1RFC 6488 §3 |
### 5.3.2 `SignedDataProfiled`
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `version` | `int` | SignedData.version | MUST 为 3 | RFC 6488 §3(1b)RFC 6488 §2.1.1 |
| `digest_algorithms` | `list[Oid]` | SignedData.digestAlgorithms | MUST contain exactly one digest algorithm且必须为 `id-sha256``2.16.840.1.101.3.4.2.1` | RFC 6488 §2.1.2RFC 7935 §2引用 RFC 5754 |
| `encap_content_info` | `EncapsulatedContentInfo` | EncapsulatedContentInfo | 见下eContentType 由具体对象定义 | RFC 6488 §2.1.3 |
| `certificates` | `list[ResourceEeCertificate]` | SignedData.certificates | MUST present且仅包含 1 个 EE 证书;该 EE 的 SKI 必须匹配 SignerInfo.sid | RFC 6488 §3(1c) |
| `crls` | `None` | SignedData.crls | MUST be omitted | RFC 6488 §3(1d) |
| `signer_infos` | `list[SignerInfoProfiled]` | SignedData.signerInfos | MUST contain exactly one SignerInfo | RFC 6488 §2.1RFC 6488 §2.1SignerInfos 约束段落) |
### 5.3.3 `EncapsulatedContentInfo`
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `econtent_type` | `Oid` | eContentType | MUST 与 signedAttrs.content-type 的 attrValues 一致;具体值由对象定义(如 ROA/MFT | RFC 6488 §3(1h)RFC 6488 §2.1.3.1 |
| `econtent_der` | `DerBytes` | eContent对象 payload | DER 编码的对象特定 ASN.1ROA/MFT 文档定义);在 CMS 中以 OCTET STRING 承载 | RFC 6488 §2.1.3RFC 9286 §4.2RFC 9582 §4 |
### 5.3.4 `SignerInfoProfiled`
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `version` | `int` | SignerInfo.version | MUST 为 3 | RFC 6488 §3(1e) |
| `sid_ski` | `bytes` | sidSubjectKeyIdentifier | 必须与 EE 证书的 SKI 匹配 | RFC 6488 §3(1c) |
| `digest_algorithm` | `Oid` | SignerInfo.digestAlgorithm | 必须为 `id-sha256``2.16.840.1.101.3.4.2.1` | RFC 6488 §3(1j)RFC 7935 §2引用 RFC 5754 |
| `signature_algorithm` | `Oid` | SignerInfo.signatureAlgorithm | 生成时 MUST 为 `rsaEncryption``1.2.840.113549.1.1.1`);验证时实现必须接受 `rsaEncryption``sha256WithRSAEncryption``1.2.840.113549.1.1.11` | RFC 6488 §3(1k)RFC 7935 §2 |
| `signed_attrs` | `SignedAttrsProfiled` | signedAttrs | MUST present仅允许特定 3 个属性 | RFC 9589 §4更新 RFC 6488 §3(1f)/(1g) |
| `unsigned_attrs` | `None` | unsignedAttrs | MUST be omitted | RFC 6488 §3(1i) |
### 5.3.5 `SignedAttrsProfiled`
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `content_type` | `Oid` | signedAttrs.content-type | attrType=`1.2.840.113549.1.9.3`MUST presentattrValues 等于 eContentType | RFC 9589 §4RFC 6488 §3(1h) |
| `message_digest` | `bytes` | signedAttrs.message-digest | attrType=`1.2.840.113549.1.9.4`MUST present | RFC 9589 §4更新 RFC 6488 §3(1f) |
| `signing_time` | `UtcTime` | signedAttrs.signing-time | attrType=`1.2.840.113549.1.9.5`MUST present时间值正确性不用于安全假设 | RFC 9589 §4RFC 9589 §5 |
| `other_attrs` | `None` | 其它 signed attributes | MUST NOT be includedbinary-signing-time 也不允许) | RFC 9589 §4 |
## 5.4 字段级约束清单(实现对照)
- ContentInfo.contentType 必须为 SignedDataOID `1.2.840.113549.1.7.2`。RFC 6488 §3(1a)。
- SignedData.version 必须为 3且 SignerInfos 仅允许 1 个 SignerInfo。RFC 6488 §3(1b)RFC 6488 §2.1。
- SignedData.certificates 必须存在且仅含 1 个 EE 证书;该证书 SKI 必须匹配 SignerInfo.sid。RFC 6488 §3(1c)。
- SignedData.crls 必须省略。RFC 6488 §3(1d)。
- signedAttrs 必须存在,且仅允许 content-type/message-digest/signing-time其它全部禁止。RFC 9589 §4。
- eContentType 必须与 content-type attribute 一致。RFC 6488 §3(1h)。
- unsignedAttrs 必须省略。RFC 6488 §3(1i)。
- digest/signature 算法必须符合算法 profile。RFC 6488 §3(1j)/(1k)RFC 7935 §2。
- 整个对象必须 DER 编码。RFC 6488 §3(1l)。

110
specs/06_manifest_mft.md Normal file
View File

@ -0,0 +1,110 @@
# 06. ManifestMFT
## 6.1 对象定位
Manifest 是 CA 发布点内对象的“清单”(文件名 + hash用于 RP 侧检测删除/替换/回放等不一致情况。RFC 9286 §1RFC 9286 §6。
Manifest 是一种 RPKI Signed ObjectCMS 外壳遵循 RFC 6488/9589eContent 遵循 RFC 9286。RFC 9286 §4RFC 6488 §4RFC 9589 §4。
## 6.2 原始载体与编码
- 外壳CMS SignedData DER`05_signed_object_cms.md`。RFC 9286 §4RFC 6488 §2。
- eContentType`id-ct-rpkiManifest`OID `1.2.840.113549.1.9.16.1.26`。RFC 9286 §4.1。
- eContentDER 编码 ASN.1 `Manifest`。RFC 9286 §4.2。
### 6.2.1 eContentType 与 eContent 的 ASN.1 定义RFC 9286 §4.1RFC 9286 §4.2
Manifest 是一种 RPKI signed objectCMS 外壳见 `05_signed_object_cms.md`)。其 `eContentType``eContent` 的 ASN.1 由 RFC 9286 明确定义。RFC 9286 §4。
**eContentTypeOID**RFC 9286 §4.1。
```asn1
id-smime OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840)
rsadsi(113549) pkcs(1) pkcs9(9) 16 }
id-ct OBJECT IDENTIFIER ::= { id-smime 1 }
id-ct-rpkiManifest OBJECT IDENTIFIER ::= { id-ct 26 }
```
**eContentManifest 结构)**RFC 9286 §4.2。
```asn1
Manifest ::= SEQUENCE {
version [0] INTEGER DEFAULT 0,
manifestNumber INTEGER (0..MAX),
thisUpdate GeneralizedTime,
nextUpdate GeneralizedTime,
fileHashAlg OBJECT IDENTIFIER,
fileList SEQUENCE SIZE (0..MAX) OF FileAndHash
}
FileAndHash ::= SEQUENCE {
file IA5String,
hash BIT STRING
}
```
解码要点:
- `fileHashAlg` 决定 `FileAndHash.hash` 的算法与输出长度RPKI profile 要求 SHA-256。RFC 9286 §4.2.1RFC 7935 §2。
- `hash` 在 ASN.1 中是 BIT STRING但 hash 输出是按字节的比特串DER 编码时应为 “unused bits = 0” 的 octet-aligned BIT STRING实现可据此做一致性检查。RFC 9286 §4.2。
## 6.3 解析规则eContent 语义层)
输入:`RpkiSignedObject`
1) 先按通用 Signed Object 外壳解析得到 `encap_content_info.econtent_type``econtent_der`。RFC 6488 §3RFC 9589 §4。
2) 要求 `econtent_type == 1.2.840.113549.1.9.16.1.26`。RFC 9286 §4.1RFC 9286 §4.4(1)。
3) 将 `econtent_der` 以 DER 解析为 `Manifest` ASN.1。RFC 9286 §4.2。
4) 将 `fileList` 映射为语义字段 `files: list[FileAndHash]`,其中 `hash``fileHashAlg` 对应算法的输出字节序列。RFC 9286 §4.2.1fileHashAlg/fileList 定义)。
## 6.4 抽象数据模型(接口)
### 6.4.1 `ManifestObject`
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `signed_object` | `RpkiSignedObject` | CMS 外壳 | 外壳约束见 RFC 6488/9589 | RFC 9286 §4RFC 6488 §3RFC 9589 §4 |
| `econtent_type` | `Oid` | eContentType | 必须为 `1.2.840.113549.1.9.16.1.26` | RFC 9286 §4.1 |
| `manifest` | `ManifestEContent` | eContent 语义对象 | 见下 | RFC 9286 §4.2 |
### 6.4.2 `ManifestEContent`
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `version` | `int` | Manifest.version | MUST 为 0 | RFC 9286 §4.2.1version |
| `manifest_number` | `int` | manifestNumber | 0..MAX可达 20 octetsissuer 必须单调递增RP 必须可处理至 20 octets | RFC 9286 §4.2RFC 9286 §4.2.1manifestNumber |
| `this_update` | `UtcTime` | thisUpdate | 由 ASN.1 `GeneralizedTime` 解析为 UTC 时间点;且必须比先前生成的 manifest 更新 | RFC 9286 §4.2RFC 9286 §4.2.1thisUpdate |
| `next_update` | `UtcTime` | nextUpdate | 由 ASN.1 `GeneralizedTime` 解析为 UTC 时间点;且必须晚于 thisUpdate | RFC 9286 §4.2RFC 9286 §4.2.1nextUpdate |
| `file_hash_alg` | `Oid` | fileHashAlg | 必须为 `id-sha256``2.16.840.1.101.3.4.2.1` | RFC 9286 §4.2.1fileHashAlgRFC 7935 §2引用 RFC 5754 |
| `files` | `list[FileAndHash]` | fileList | `SEQUENCE SIZE (0..MAX)`;每项含文件名与 hash | RFC 9286 §4.2RFC 9286 §4.2.1fileList |
### 6.4.3 `FileAndHash`
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `file_name` | `string` | 文件名(不含路径) | 字符集限制:`[a-zA-Z0-9-_]+` + `.` + 三字母扩展;扩展必须在 IANA “RPKI Repository Name Schemes” 注册表中 | RFC 9286 §4.2.2 |
| `hash_bytes` | `bytes` | 文件内容 hash | 由 `file_hash_alg` 指定算法计算 | RFC 9286 §4.2.1fileHashAlg/fileList |
## 6.5 字段级约束清单(实现对照)
- eContentType 必须为 `id-ct-rpkiManifest`OID `1.2.840.113549.1.9.16.1.26`。RFC 9286 §4.1。
- eContent 必须 DER 编码且符合 `Manifest` ASN.1。RFC 9286 §4.2。
- `version` 必须为 0。RFC 9286 §4.2.1。
- `manifestNumber` 由 issuer 单调递增RP 必须能处理至 20 octetsissuer 不得超过 20 octets。RFC 9286 §4.2.1。
- `nextUpdate` 必须晚于 `thisUpdate`。RFC 9286 §4.2.1。
- `fileHashAlg` 必须符合算法 profileSHA-256。RFC 9286 §4.2.1RFC 7935 §2。
- `fileList``file` 名称字符集与扩展名受限;实现需按 RFC 限制解析并保留大小写语义。RFC 9286 §4.2.2。
## 6.6 与 EE 证书的语义约束(为后续验证准备)
Manifest 使用“one-time-use EE certificate”进行签名验证规范对该 EE 证书的使用方式给出约束:
- Manifest 相关 EE 证书应为 one-time-use每次新 manifest 生成新密钥对/新 EE。RFC 9286 §4Section 4 前导段落)。
- 用于签名/验证 manifest 的 EE 证书在 RFC 3779 的资源扩展中描述 INRs 时,**MUST** 使用 `inherit`而不是显式列出资源集合。RFC 9286 §5.1(生成步骤 2
- 若证书包含 **IP Address Delegation Extension**IP prefix delegation内容必须为 `inherit`(不得显式列 prefix/range。RFC 9286 §5.1RFC 3779。
- 若证书包含 **AS Identifier Delegation Extension**ASN delegation内容必须为 `inherit`(不得显式列 ASID/range。RFC 9286 §5.1RFC 3779。
- 另外按资源证书 profile资源证书 **MUST** 至少包含上述两类扩展之一(也可两者都有),且这些扩展 **MUST** 标记为 critical。RFC 6487 §2。
- 用于验证 manifest 的 EE 证书 **MUST** 具有与 `thisUpdate..nextUpdate` 区间一致的有效期,以避免 CRL 无谓增长。RFC 9286 §4.2.1manifestNumber 段落前的说明)。
- 替换 manifest 时CA 必须撤销旧 manifest 对应 EE 证书;且若新 manifest 早于旧 manifest 的 nextUpdate 发行,则 CA **MUST** 同时发行新 CRL 撤销旧 manifest EE。RFC 9286 §4.2.1nextUpdate 段落末RFC 9286 §5.1(生成步骤)。

159
specs/07_roa.md Normal file
View File

@ -0,0 +1,159 @@
# 07. ROARoute Origin Authorization
## 7.1 对象定位
ROA 是一种 RPKI Signed Object用于声明“某 AS 被授权起源某些前缀”。RFC 9582 §1RFC 9582 §4。
ROA 由 CMS 外壳 + ROA eContent 组成:
- 外壳RFC 6488更新RFC 9589
- eContentRFC 9582
## 7.2 原始载体与编码
- 外壳CMS SignedData DER`05_signed_object_cms.md`。RFC 9582 §1引用 RFC 6488
- eContentType`id-ct-routeOriginAuthz`OID `1.2.840.113549.1.9.16.1.24`。RFC 9582 §3。
- eContentDER 编码 ASN.1 `RouteOriginAttestation`。RFC 9582 §4。
### 7.2.1 eContentType 与 eContent 的 ASN.1 定义RFC 9582 §3RFC 9582 §4
ROA 是一种 RPKI signed objectCMS 外壳见 `05_signed_object_cms.md`。RFC 9582 定义了其 `eContentType` 以及 `eContent`payload的 ASN.1。RFC 9582 §3-§4。
**eContentTypeOID**RFC 9582 §3。
```asn1
id-ct-routeOriginAuthz OBJECT IDENTIFIER ::=
{ iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1)
pkcs-9(9) id-smime(16) id-ct(1) routeOriginAuthz(24) }
```
**eContentROA ASN.1 模块)**RFC 9582 §4。
```asn1
RPKI-ROA-2023
{ iso(1) member-body(2) us(840) rsadsi(113549)
pkcs(1) pkcs9(9) smime(16) mod(0)
id-mod-rpkiROA-2023(75) }
DEFINITIONS EXPLICIT TAGS ::=
BEGIN
IMPORTS
CONTENT-TYPE
FROM CryptographicMessageSyntax-2010 -- in [RFC6268]
{ iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1)
pkcs-9(9) smime(16) modules(0) id-mod-cms-2009(58) } ;
ct-routeOriginAttestation CONTENT-TYPE ::=
{ TYPE RouteOriginAttestation
IDENTIFIED BY id-ct-routeOriginAuthz }
id-ct-routeOriginAuthz OBJECT IDENTIFIER ::=
{ iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1)
pkcs-9(9) id-smime(16) id-ct(1) routeOriginAuthz(24) }
RouteOriginAttestation ::= SEQUENCE {
version [0] INTEGER DEFAULT 0,
asID ASID,
ipAddrBlocks SEQUENCE (SIZE(1..2)) OF ROAIPAddressFamily }
ASID ::= INTEGER (0..4294967295)
ROAIPAddressFamily ::= SEQUENCE {
addressFamily ADDRESS-FAMILY.&afi ({AddressFamilySet}),
addresses ADDRESS-FAMILY.&Addresses
({AddressFamilySet}{@addressFamily}) }
ADDRESS-FAMILY ::= CLASS {
&afi OCTET STRING (SIZE(2)) UNIQUE,
&Addresses
} WITH SYNTAX { AFI &afi ADDRESSES &Addresses }
AddressFamilySet ADDRESS-FAMILY ::=
{ addressFamilyIPv4 | addressFamilyIPv6 }
addressFamilyIPv4 ADDRESS-FAMILY ::=
{ AFI afi-IPv4 ADDRESSES ROAAddressesIPv4 }
addressFamilyIPv6 ADDRESS-FAMILY ::=
{ AFI afi-IPv6 ADDRESSES ROAAddressesIPv6 }
afi-IPv4 OCTET STRING ::= '0001'H
afi-IPv6 OCTET STRING ::= '0002'H
ROAAddressesIPv4 ::= SEQUENCE (SIZE(1..MAX)) OF ROAIPAddress{ub-IPv4}
ROAAddressesIPv6 ::= SEQUENCE (SIZE(1..MAX)) OF ROAIPAddress{ub-IPv6}
ub-IPv4 INTEGER ::= 32
ub-IPv6 INTEGER ::= 128
ROAIPAddress {INTEGER: ub} ::= SEQUENCE {
address BIT STRING (SIZE(0..ub)),
maxLength INTEGER (0..ub) OPTIONAL }
END
```
编码/解码要点(与上面 ASN.1 结构直接对应):
- `addressFamily` 仅允许 IPv4/IPv6 两种 AFI并且每个 AFI 最多出现一次。RFC 9582 §4.3.1。
- `address` 是 BIT STRING 表示的前缀,语义与 RFC 3779 的 `IPAddress` 一致按前缀长度截断DER unused bits 置零。RFC 9582 §4.3.2.1(引用 RFC 3779 §2.2.3.8)。
- `maxLength` 为可选字段出现与否会影响语义与编码规范约束例如等于前缀长时不建议编码。RFC 9582 §4.3.2.2。
## 7.3 解析规则eContent 语义层)
输入:`RpkiSignedObject`
1) 解析 CMS 外壳,得到 `econtent_type``econtent_der`。RFC 6488 §3RFC 9589 §4。
2) 要求 `econtent_type == 1.2.840.113549.1.9.16.1.24`。RFC 9582 §3。
3) 将 `econtent_der` 以 DER 解析为 `RouteOriginAttestation` ASN.1。RFC 9582 §4。
4) 将 `ipAddrBlocks` 解析为“前缀集合”的语义结构,并建议按 RFC 9582 给出的 canonicalization 过程做去重/排序/归一化以便后续处理一致。RFC 9582 §4.3.3。
## 7.4 抽象数据模型(接口)
### 7.4.1 `RoaObject`
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `signed_object` | `RpkiSignedObject` | CMS 外壳 | 外壳约束见 RFC 6488/9589 | RFC 9582 §1RFC 6488 §3RFC 9589 §4 |
| `econtent_type` | `Oid` | eContentType | 必须为 `1.2.840.113549.1.9.16.1.24` | RFC 9582 §3 |
| `roa` | `RoaEContent` | eContent 语义对象 | 见下 | RFC 9582 §4 |
### 7.4.2 `RoaEContent`RouteOriginAttestation
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `version` | `int` | version | MUST 为 0 | RFC 9582 §4.1 |
| `as_id` | `int` | asID | 0..4294967295 | RFC 9582 §4ASID 定义RFC 9582 §4.2 |
| `ip_addr_blocks` | `list[RoaIpAddressFamily]` | ipAddrBlocks | `SIZE(1..2)`;最多 IPv4/IPv6 各一个;建议 canonicalize | RFC 9582 §4RFC 9582 §4.3.1RFC 9582 §4.3.3 |
### 7.4.3 `RoaIpAddressFamily`
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `afi` | `enum { ipv4, ipv6 }` | Address Family | MUST 为 IPv4(0001) 或 IPv6(0002) | RFC 9582 §4.3.1 |
| `addresses` | `list[RoaIpAddress]` | 前缀列表 | `SIZE(1..MAX)`;每项为前缀 + 可选 maxLength | RFC 9582 §4ROAAddressesIPv4/IPv6RFC 9582 §4.3.2 |
### 7.4.4 `RoaIpAddress`
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `prefix` | `IpPrefix` | 前缀 | address 以 BIT STRING 表示前缀(同 RFC 3779 IPAddress 表示) | RFC 9582 §4.3.2.1(引用 RFC 3779 §2.2.3.8 |
| `max_length` | `optional[int]` | 最大允许前缀长 | 若存在:必须 `>= prefix_len``<= 32/128`;并且 `maxLength == prefix_len`**SHOULD NOT** 编码(未来 RP 可能视为编码错误) | RFC 9582 §4.3.2.2 |
## 7.5 字段级约束清单(实现对照)
- eContentType 必须为 `id-ct-routeOriginAuthz`OID `1.2.840.113549.1.9.16.1.24`),且该 OID 必须同时出现在 eContentType 与 signedAttrs.content-type。RFC 9582 §3引用 RFC 6488
- eContent 必须 DER 编码并符合 `RouteOriginAttestation` ASN.1。RFC 9582 §4。
- `version` 必须为 0。RFC 9582 §4.1。
- `ipAddrBlocks` 长度为 1..2;每种 AFI 最多出现一次;仅支持 IPv4/IPv6。RFC 9582 §4RFC 9582 §4.3.1。
- `maxLength` 若存在必须在范围内且不应出现“等于前缀长”的冗余编码。RFC 9582 §4.3.2.2。
- 建议按 canonical form 归一化/排序以利一致处理。RFC 9582 §4.3.3。
## 7.6 与 EE 证书的语义约束(为后续验证准备)
ROA 的外壳包含一个 EE 证书,用于验证 ROA 签名RFC 对该 EE 证书与 ROA payload 的匹配关系提出要求:
- ROA 的 EE 证书必须是有效的 RPKI EE 证书(路径从 TA 到 EE 可建立),并用于验证 CMS 签名。RFC 9582 §1引用 RFC 6488RFC 6488 §3(2)-(3)。
- ROA EE 证书中的 IP 资源扩展必须存在且不得使用 inherit。RFC 9582 §5。
- ROA EE 证书中 AS 资源扩展不得出现。RFC 9582 §5。
- ROA payload 中每个前缀必须包含在 EE 证书的 IP 资源集合内(资源包含语义来自 RFC 3779。RFC 9582 §5RFC 3779 §2.3。

100
specs/08_aspa.md Normal file
View File

@ -0,0 +1,100 @@
# 08. ASPAAutonomous System Provider Authorization
## 8.1 对象定位
ASPAAutonomous System Provider Authorization是一种 RPKI Signed Object用于由“客户 AS”Customer AS, CAS签名声明其上游“提供者 AS”Provider AS, PAS集合以支持路由泄漏route leak检测/缓解。`draft-ietf-sidrops-aspa-profile-21``draft-ietf-sidrops-aspa-verification`
ASPA 由 CMS 外壳 + ASPA eContent 组成:
- 外壳RFC 6488更新RFC 9589
- eContent`draft-ietf-sidrops-aspa-profile-21`
## 8.2 原始载体与编码
- 外壳CMS SignedData DER`05_signed_object_cms.md`。RFC 6488 §2-§3RFC 9589 §4。
- eContentType`id-ct-ASPA`OID `1.2.840.113549.1.9.16.1.49``draft-ietf-sidrops-aspa-profile-21` §2。
- eContentDER 编码 ASN.1 `ASProviderAttestation``draft-ietf-sidrops-aspa-profile-21` §3。
### 8.2.1 eContentType 与 eContent 的 ASN.1 定义(`draft-ietf-sidrops-aspa-profile-21` §2-§3
ASPA 是一种 RPKI signed objectCMS 外壳见 `05_signed_object_cms.md`)。其 `eContentType``eContent`payload的 ASN.1 定义见 `draft-ietf-sidrops-aspa-profile-21` §2-§3。
**eContentTypeOID**`draft-ietf-sidrops-aspa-profile-21` §2。
```asn1
id-ct-ASPA OBJECT IDENTIFIER ::=
{ iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1)
pkcs-9(9) id-smime(16) id-ct(1) aspa(49) }
```
**eContentASPA ASN.1 模块)**`draft-ietf-sidrops-aspa-profile-21` §3。
```asn1
ASProviderAttestation ::= SEQUENCE {
version [0] INTEGER DEFAULT 0,
customerASID ASID,
providers ProviderASSet }
ProviderASSet ::= SEQUENCE (SIZE(1..MAX)) OF ASID
ASID ::= INTEGER (0..4294967295)
```
编码/解码要点(与上面 ASN.1 结构直接对应):
- `version`:规范要求 **MUST 为 1****MUST 显式编码**(不得依赖 DEFAULT 省略)。`draft-ietf-sidrops-aspa-profile-21` §3.1。
- `customerASID`:客户 AS 号CAS`draft-ietf-sidrops-aspa-profile-21` §3.2。
- `providers`:授权的提供者 AS 集合SPAS。并对“自包含/排序/去重”施加额外约束(见下)。`draft-ietf-sidrops-aspa-profile-21` §3.3。
## 8.3 解析规则eContent 语义层)
输入:`RpkiSignedObject`
1) 解析 CMS 外壳,得到 `econtent_type``econtent_der`。RFC 6488 §3RFC 9589 §4。
2) 要求 `econtent_type == 1.2.840.113549.1.9.16.1.49``draft-ietf-sidrops-aspa-profile-21` §2。
3) 将 `econtent_der` 以 DER 解析为 `ASProviderAttestation` ASN.1。`draft-ietf-sidrops-aspa-profile-21` §3。
4) 将 `providers` 映射为语义字段 `provider_as_ids: list[int]`,并对其执行“约束检查/(可选)归一化”。`draft-ietf-sidrops-aspa-profile-21` §3.3。
## 8.4 抽象数据模型(接口)
### 8.4.1 `AspaObject`
| 字段 | 类型 | 语义 | 约束/解析规则 | 规范引用 |
|---|---|---|---|---|
| `signed_object` | `RpkiSignedObject` | CMS 外壳 | 外壳约束见 RFC 6488/9589 | RFC 6488 §3RFC 9589 §4 |
| `econtent_type` | `Oid` | eContentType | 必须为 `1.2.840.113549.1.9.16.1.49` | `draft-ietf-sidrops-aspa-profile-21` §2 |
| `aspa` | `AspaEContent` | eContent 语义对象 | 见下 | `draft-ietf-sidrops-aspa-profile-21` §3 |
### 8.4.2 `AspaEContent`ASProviderAttestation
| 字段 | 类型 | 语义 | 约束/解析规则 | 规范引用 |
|---|---|---|---|---|
| `version` | `int` | ASPA 版本 | MUST 为 `1` 且 MUST 显式编码(字段不可省略) | `draft-ietf-sidrops-aspa-profile-21` §3.1 |
| `customer_as_id` | `int` | Customer ASID | 0..4294967295 | `draft-ietf-sidrops-aspa-profile-21` §3.2ASID 定义) |
| `provider_as_ids` | `list[int]` | Provider ASID 列表SPAS | 长度 `>= 1`;且满足“不得包含 customer、升序、去重” | `draft-ietf-sidrops-aspa-profile-21` §3.3 |
## 8.5 字段级约束清单(实现对照)
- eContentType 必须为 `id-ct-ASPA`OID `1.2.840.113549.1.9.16.1.49`),且该 OID 必须同时出现在 eContentType 与 signedAttrs.content-type。`draft-ietf-sidrops-aspa-profile-21` §2引用 RFC 6488
- eContent 必须 DER 编码并符合 `ASProviderAttestation` ASN.1。`draft-ietf-sidrops-aspa-profile-21` §3。
- `version` 必须为 1且必须显式编码缺失视为不合规`draft-ietf-sidrops-aspa-profile-21` §3.1。
- `providers``provider_as_ids`)必须满足:
- `customer_as_id` **MUST NOT** 出现在 `provider_as_ids` 中;
- `provider_as_ids` 必须按数值 **升序排序**
- `provider_as_ids` 中每个 ASID 必须 **唯一**`draft-ietf-sidrops-aspa-profile-21` §3.3。
## 8.6 与 EE 证书的语义约束(为后续验证准备)
ASPA 的外壳包含一个 EE 证书,用于验证 ASPA 签名;规范对该 EE 证书与 ASPA payload 的匹配关系提出要求:
- EE 证书必须包含 AS 资源扩展Autonomous System Identifier Delegation Extension`customer_as_id` 必须与该扩展中的 ASId 匹配。`draft-ietf-sidrops-aspa-profile-21` §4引用 RFC 3779
- EE 证书的 AS 资源扩展 **必须**
- 恰好包含 1 个 `id` 元素;
- **不得**包含 `inherit` 元素;
- **不得**包含 `range` 元素。`draft-ietf-sidrops-aspa-profile-21` §4引用 RFC 3779 §3.2.3.3 / §3.2.3.6 / §3.2.3.7)。
- EE 证书 **不得**包含 IP 资源扩展IP Address Delegation Extension`draft-ietf-sidrops-aspa-profile-21` §4引用 RFC 3779
## 8.7 实现建议(非规范约束)
`draft-ietf-sidrops-aspa-profile-21` 给出了一条 RP 侧建议:实现可对单个 `customer_as_id``provider_as_ids` 数量施加上界(例如 4,000~10,000超过阈值时建议将该 `customer_as_id` 的所有 ASPA 视为无效并记录错误日志。`draft-ietf-sidrops-aspa-profile-21` §6。

View File

@ -0,0 +1,87 @@
# 09. Ghostbusters RecordGBR
## 9.1 对象定位
Ghostbusters RecordGBR是一个可选的 RPKI Signed Object用于承载“联系人信息”人类可读的联系渠道以便在证书过期、CRL 失效、密钥轮换等事件中能够联系到维护者。RFC 6493 §1。
GBR 由 CMS 外壳 + vCard 载荷组成:
- 外壳RFC 6488更新RFC 9589
- 载荷payloadRFC 6493 定义的 vCard profile基于 RFC 6350 vCard 4.0 的严格子集。RFC 6493 §5。
## 9.2 原始载体与编码
- 外壳CMS SignedData DER`05_signed_object_cms.md`。RFC 6493 §6引用 RFC 6488
- eContentType`id-ct-rpkiGhostbusters`OID `1.2.840.113549.1.9.16.1.35`。RFC 6493 §6RFC 6493 §9.1。
- eContent一个 OCTET STRING其 octets 是 vCard 文本vCard 4.0,且受 RFC 6493 的 profile 约束。RFC 6493 §5RFC 6493 §6。
> 说明:与 ROA/MFT 这类“eContent 内部再 DER 解码为 ASN.1 结构”的对象不同GBR 的 eContent 语义上就是“vCard 文本内容本身”(由 Signed Object Template 的 `eContent OCTET STRING` 承载。RFC 6493 §6。
## 9.3 vCard profileRFC 6493 §5
GBR 的 vCard payload 是 RFC 6350 vCard 4.0 的严格子集仅允许以下属性properties
- `BEGIN`:必须为第一行,值必须为 `BEGIN:VCARD`。RFC 6493 §5。
- `VERSION`:必须为第二行,值必须为 `VERSION:4.0`。RFC 6493 §5引用 RFC 6350 §3.7.9)。
- `FN`联系人姓名或角色名。RFC 6493 §5引用 RFC 6350 §6.2.1)。
- `ORG`组织信息可选。RFC 6493 §5引用 RFC 6350 §6.6.4)。
- `ADR`邮寄地址可选。RFC 6493 §5引用 RFC 6350 §6.3)。
- `TEL`:语音/传真电话可选。RFC 6493 §5引用 RFC 6350 §6.4.1)。
- `EMAIL`邮箱可选。RFC 6493 §5引用 RFC 6350 §6.4.2)。
- `END`:必须为最后一行,值必须为 `END:VCARD`。RFC 6493 §5。
额外约束:
- `BEGIN``VERSION``FN``END` 必须包含。RFC 6493 §5。
- 为保证可用性,`ADR`/`TEL`/`EMAIL` 三者中至少一个必须包含。RFC 6493 §5。
- 除上述属性外,**其他属性 MUST NOT** 出现。RFC 6493 §5。
## 9.4 解析规则payload 语义层)
输入:`RpkiSignedObject`
1) 解析 CMS 外壳,得到 `econtent_type``econtent_bytes`。RFC 6488 §3RFC 9589 §4。
2) 要求 `econtent_type == 1.2.840.113549.1.9.16.1.35`。RFC 6493 §6。
3) 将 `econtent_bytes` 解析为 vCard 文本,并按 RFC 6493 §5 的 profile 校验(属性集合、必选项、行首/行尾约束。RFC 6493 §5RFC 6493 §7。
4) 通过校验后,将允许属性映射为 `GhostbustersVCard` 语义对象(见下)。
## 9.5 抽象数据模型(接口)
### 9.5.1 `GhostbustersObject`
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `signed_object` | `RpkiSignedObject` | CMS 外壳 | 外壳约束见 RFC 6488/9589 | RFC 6493 §6RFC 6488 §3RFC 9589 §4 |
| `econtent_type` | `Oid` | eContentType | 必须为 `1.2.840.113549.1.9.16.1.35` | RFC 6493 §6 |
| `vcard` | `GhostbustersVCard` | vCard 语义对象 | 由 eContent 文本解析并校验 profile | RFC 6493 §5RFC 6493 §7 |
### 9.5.2 `GhostbustersVCard`vCard 4.0 profile
| 字段 | 类型 | 语义 | 约束/解析规则 | RFC 引用 |
|---|---|---|---|---|
| `raw_text` | `string` | 原始 vCard 文本 | 由 eContent bytes 解码得到;用于保留原文/诊断 | RFC 6493 §5-§7 |
| `fn` | `string` | 联系人姓名/角色名 | `FN` 必须存在 | RFC 6493 §5 |
| `org` | `optional[string]` | 组织 | `ORG` 可选 | RFC 6493 §5 |
| `adrs` | `list[string]` | 邮寄地址(原始 ADR value | 允许 0..N至少满足“ADR/TEL/EMAIL 至少一项存在” | RFC 6493 §5 |
| `tels` | `list[Uri]` | 电话 URI从 TEL 提取的 `tel:` 等 URI | 允许 0..N至少满足“ADR/TEL/EMAIL 至少一项存在” | RFC 6493 §5 |
| `emails` | `list[string]` | 邮箱地址 | 允许 0..N至少满足“ADR/TEL/EMAIL 至少一项存在” | RFC 6493 §5 |
> 说明RFC 6493 并未要求 RP 必须完整解析 vCard 参数(例如 `TYPE=WORK``VALUE=uri`),因此此处将 `ADR`/`TEL`/`EMAIL` 建模为“足以联络”的最小语义集合;实现可在保留 `raw_text` 的同时按 RFC 6350 扩展解析能力。
## 9.6 字段级约束清单(实现对照)
- eContentType 必须为 `id-ct-rpkiGhostbusters`OID `1.2.840.113549.1.9.16.1.35`),且该 OID 必须同时出现在 eContentType 与 signedAttrs.content-type。RFC 6493 §6引用 RFC 6488
- eContent 必须是 vCard 4.0 文本,且必须满足 RFC 6493 §5 的 profile
- 第一行 `BEGIN:VCARD`
- 第二行 `VERSION:4.0`
- 末行 `END:VCARD`
- 必须包含 `FN`
- `ADR`/`TEL`/`EMAIL` 至少一个存在;
- 除允许集合外不得出现其他属性。RFC 6493 §5RFC 6493 §7。
## 9.7 与 EE 证书的语义约束(为后续验证准备)
GBR 使用 CMS 外壳内的 EE 证书验证签名。RFC 6493 对该 EE 证书提出一条资源扩展约束:
- 用于验证 GBR 的 EE 证书在描述 Internet Number Resources 时,必须使用 `inherit`而不是显式资源集合。RFC 6493 §6引用 RFC 3779

5532
specs/arch.excalidraw Normal file

File diff suppressed because it is too large Load Diff

4034
specs/cir.excalidraw Normal file

File diff suppressed because it is too large Load Diff

196
specs/cir_draft.md Normal file
View File

@ -0,0 +1,196 @@
---
**Internet-Draft** Yirong Yu
**Intended status: Standards Track** Zhongguancun Labortary
**Expires: [Date, e.g., October 2026]** April 2026
# A Profile for Resource Public Key Infrastructure (RPKI) Canonical Input Representation (CIR)
## draft-yu-sidrops-rpki-cir-00
### Abstract
This document specifies a Canonical Input Representation (CIR) content type for use with the Resource Public Key Infrastructure (RPKI). While the Canonical Cache Representation (CCR) profiles the *validated* output state of a Relying Party (RP), CIR is a DER-encoded data interchange format used to represent the exact, *unvalidated* raw input data fetched by an RP at a particular point in time. The CIR profile provides a deterministic "world view" snapshot, enabling advanced operational capabilities such as differential testing, failure path debugging, and highly accurate historical black-box replay of RPKI validation logic.
### Status of This Memo
TBD
### Table of Contents
1. Introduction
1.1. Requirements Language
2. Motivation and Architecture
3. The Canonical Input Representation Content Type
4. The Canonical Input Representation Content
4.1. version
4.2. metaInfo
4.3. BaseCIR Fields
4.4. DeltaCIR Fields
5. Operational Considerations
5.1. Differential Testing and Historical Replay
5.2. Delta Compression for Archival
6. Security Considerations
7. IANA Considerations
8. References
---
### 1. Introduction
This document specifies a Canonical Input Representation (CIR) content type for use with the Resource Public Key Infrastructure (RPKI).
A Relying Party (RP) fetches RPKI objects from publication points using protocols such as rsync [RFC5781] or RRDP [RFC8182] prior to executing cryptographic validation. While the Canonical Cache Representation (CCR) [draft-ietf-sidrops-rpki-ccr] accurately describes the subset of objects that successfully passed validation, it inherently omits objects that were rejected due to format errors, invalid signatures, or expired timestamps (survivorship bias).
CIR records the precise mapping of object URIs to their cryptographic hashes *before* validation occurs. By decoupling the network transport layer from the validation layer, CIR allows researchers and operators to reconstruct the exact physical file tree (the "dirty inputs") perceived by a vantage point.
#### 1.1. Requirements Language
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in BCP 14 [RFC2119] [RFC8174] when, and only when, they appear in all capitals, as shown here.
### 2. Motivation and Architecture
CIR is designed to solve the "time paradox" and "state desynchronization" problems inherent to RPKI historical archiving. It defines two distinct operational modes:
* **Base CIR**: A complete snapshot of all fetched Trust Anchor Locators (TALs) and RPKI objects, typically generated by an RP immediately after a synchronization cycle.
* **Delta CIR**: A compressed representation generated by offline archival processes, describing the additions, modifications, and deletions between two chronological Base CIR snapshots.
### 3. The Canonical Input Representation Content Type
The content of a CIR file is an instance of `ContentInfo`.
The `contentType` for a CIR is defined as `id-ct-rpkiCanonicalInputRepresentation`, with Object Identifier (OID) `[TBD-OID]`.
The content is an instance of `RpkiCanonicalInputRepresentation`.
### 4. The Canonical Input Representation Content
The content of a Canonical Input Representation is formally defined using ASN.1. To ensure absolute deterministic serialization, CIR MUST be encoded using Distinguished Encoding Rules (DER, [X.690]).
```asn.1
RpkiCanonicalInputRepresentation-2026
{ iso(1) member-body(2) us(840) rsadsi(113549)
pkcs(1) pkcs9(9) smime(16) mod(0) id-mod-rpkiCIR-2026(TBD) }
DEFINITIONS EXPLICIT TAGS ::=
BEGIN
IMPORTS
CONTENT-TYPE, Digest
FROM CryptographicMessageSyntax-2010 -- in [RFC6268]
;
ContentInfo ::= SEQUENCE {
contentType CONTENT-TYPE.&id({ContentSet}),
content [0] EXPLICIT CONTENT-TYPE.&Type({ContentSet}{@contentType}) }
ContentSet CONTENT-TYPE ::= {
ct-rpkiCanonicalInputRepresentation, ... }
ct-rpkiCanonicalInputRepresentation CONTENT-TYPE ::=
{ TYPE RpkiCanonicalInputRepresentation
IDENTIFIED BY id-ct-rpkiCanonicalInputRepresentation }
id-ct-rpkiCanonicalInputRepresentation OBJECT IDENTIFIER ::=
{ iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1)
pkcs-9(9) id-smime(16) id-ct(1) cir(TBD) }
RpkiCanonicalInputRepresentation ::= CHOICE {
baseCIR [0] BaseCIR,
deltaCIR [1] DeltaCIR
}
BaseCIR ::= SEQUENCE {
version INTEGER DEFAULT 0,
metaInfo CIRMetaInfo,
talList SEQUENCE OF URIAndHash,
objectList SEQUENCE OF URIAndHash
}
DeltaCIR ::= SEQUENCE {
version INTEGER DEFAULT 0,
metaInfo CIRMetaInfo,
talChanges [0] DeltaChanges OPTIONAL,
objectChanges [1] DeltaChanges
}
DeltaChanges ::= SEQUENCE {
upserted [0] SEQUENCE OF URIAndHash OPTIONAL,
removed [1] SEQUENCE OF IA5String OPTIONAL
}
CIRMetaInfo ::= SEQUENCE {
validationTime GeneralizedTime,
rpSoftware [0] UTF8String OPTIONAL,
rpVersion [1] UTF8String OPTIONAL,
observerID [2] UTF8String OPTIONAL
}
URIAndHash ::= SEQUENCE {
uri IA5String,
hash OCTET STRING,
source [0] SourceType OPTIONAL
}
SourceType ::= ENUMERATED {
rsync (0),
rrdp (1),
https (2),
erik (3),
cache (4),
other (5)
}
END
```
#### 4.1. version
The version field contains the format version for the structure. In this version of the specification, it MUST be `0`.
#### 4.2. metaInfo
The `metaInfo` structure provides crucial temporal and environmental context:
* **validationTime**: Contains a `GeneralizedTime` indicating the moment the synchronization concluded. This timestamp is REQUIRED, as it is strictly necessary to freeze the system clock when replaying RPKI validation logic to evaluate time-sensitive object expiration.
* **rpSoftware / rpVersion / observerID**: OPTIONAL metadata to identify the specific software and observation vantage point generating the CIR.
#### 4.3. BaseCIR Fields
* **talList**: A sequence of `URIAndHash` representing the Trust Anchor Locators used as the root of validation.
* **objectList**: A sequence of `URIAndHash` representing every raw file fetched by the RP. The `uri` MUST be the absolute logical address (e.g., `rsync://...`), and the `hash` MUST be the SHA-256 digest of the raw file.
* **source**: An OPTIONAL enumerated value indicating the network transport or cache layer from which the file was successfully obtained (e.g., `rrdp`, `rsync`).
#### 4.4. DeltaCIR Fields
To support compact archival, `DeltaCIR` describes changes relative to a preceding `BaseCIR` or `DeltaCIR`:
* **upserted**: A sequence of `URIAndHash` for newly discovered objects or objects where the URI remained identical but the cryptographic Hash changed.
* **removed**: A sequence of `IA5String` containing URIs that were present in the previous snapshot but are no longer observed.
### 5. Operational Considerations
#### 5.1. Differential Testing and Historical Replay
Because CIR captures the global input state *regardless* of object validity, it allows operators to construct an isolated physical sandbox matching the exact network state at `validationTime`. By injecting this state into different RP software implementations (using native functionality like `--disable-rrdp` coupled with local rsync wrappers), operators can perform deterministic differential testing. Discrepancies in the resulting CCR outputs indicate implementation bugs or vulnerabilities in boundary-case handling.
#### 5.2. Delta Compression for Archival
Given that the global RPKI repository experiences relatively low churn within short timeframes (e.g., 10-minute intervals), `DeltaCIR` significantly reduces storage overhead. Archival systems SHOULD compute `DeltaCIR` sequences from raw `BaseCIR` outputs to facilitate efficient streaming historical replays.
### 6. Security Considerations
Unlike RPKI signed objects, CIR objects are not cryptographically signed by CAs. They are observational records.
CIR explicitly permits the indexing of corrupted, malicious, or malformed ASN.1 objects. Parsers ingesting CIR to reconstruct sandboxes MUST NOT attempt to cryptographically decode or execute the objects referenced by the hashes, but simply treat them as opaque binary blobs to be placed in the file system for the target RP to evaluate.
### 7. IANA Considerations
IANA is requested to register the media type `application/rpki-cir`, the file extension `.cir`, and the necessary SMI Security for S/MIME Module Identifiers (OIDs), modeled identically to the IANA considerations defined in the CCR specification.
### 8. References
*[Standard IETF references for RFC 2119, RFC 8174, RFC 6488, RFC 8182, etc. to be populated]*
---
**Next Step Guidance**:
If you plan to officially submit this to the IETF SIDROPS working group, you'll need to allocate the `[TBD]` OID placeholders and potentially run the ASN.1 syntax through an official compiler (like `asn1c`) to ensure there are no implicit tagging ambiguities in the `CHOICE` and `OPTIONAL` fields. Would you like me to refine the ASN.1 tagging strategy further?

16264
specs/delta.excalidraw Normal file

File diff suppressed because it is too large Load Diff

629
specs/sync.excalidraw Normal file
View File

@ -0,0 +1,629 @@
{
"type": "excalidraw",
"version": 2,
"source": "https://marketplace.visualstudio.com/items?itemName=pomdtr.excalidraw-editor",
"elements": [
{
"id": "782wmN2vbn0vYfClUbwVT",
"type": "rectangle",
"x": 458.5143563406808,
"y": 224.57136099679133,
"width": 335.08570861816406,
"height": 143.99998474121094,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a0",
"roundness": {
"type": 3
},
"seed": 264486616,
"version": 304,
"versionNonce": 505039016,
"isDeleted": false,
"boundElements": [
{
"type": "text",
"id": "491W0AyWpioiNNXTRuXMb"
}
],
"updated": 1774499907328,
"link": null,
"locked": false
},
{
"id": "491W0AyWpioiNNXTRuXMb",
"type": "text",
"x": 514.987287248884,
"y": 246.5713533673968,
"width": 222.1398468017578,
"height": 100,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a1",
"roundness": null,
"seed": 1658952360,
"version": 360,
"versionNonce": 404419496,
"isDeleted": false,
"boundElements": null,
"updated": 1774499907328,
"link": null,
"locked": false,
"text": "RAW BY HASH\nsha256 -> file content\n(.mft/.roa/.cer)\n通过hash找原始文件",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "782wmN2vbn0vYfClUbwVT",
"originalText": "RAW BY HASH\nsha256 -> file content (.mft/.roa/.cer)\n通过hash找原始文件",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "4v-5vJwc-YSKwDDA6wnNy",
"type": "rectangle",
"x": 86.74286869594027,
"y": 224.1142785208566,
"width": 332.79998561314164,
"height": 141.99999128069192,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a2",
"roundness": {
"type": 3
},
"seed": 1176722904,
"version": 312,
"versionNonce": 1631513048,
"isDeleted": false,
"boundElements": [
{
"type": "text",
"id": "tug_6QGIm4LrnsrYiV18D"
}
],
"updated": 1774499913411,
"link": null,
"locked": false
},
{
"id": "tug_6QGIm4LrnsrYiV18D",
"type": "text",
"x": 109.85297502790172,
"y": 245.11427416120256,
"width": 286.57977294921875,
"height": 100,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a3",
"roundness": null,
"seed": 1923736280,
"version": 409,
"versionNonce": 980676312,
"isDeleted": false,
"boundElements": [],
"updated": 1774499913411,
"link": null,
"locked": false,
"text": "REPOSITORY VIEW\nuri -> sha256(current version\nfile)\n对象uri 查找最新hash",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "4v-5vJwc-YSKwDDA6wnNy",
"originalText": "REPOSITORY VIEW\nuri -> sha256(current version file)\n对象uri 查找最新hash",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "V2AcY1746pbG544Yh7A7q",
"type": "rectangle",
"x": 90.85720498221252,
"y": -2.5142985752651725,
"width": 218.51431492396773,
"height": 205.71430751255576,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a4",
"roundness": {
"type": 3
},
"seed": 436378328,
"version": 498,
"versionNonce": 113477080,
"isDeleted": false,
"boundElements": [
{
"type": "text",
"id": "WVFatIOjK3SB8FDetV4ts"
}
],
"updated": 1774499431839,
"link": null,
"locked": false
},
{
"id": "WVFatIOjK3SB8FDetV4ts",
"type": "text",
"x": 110.4543816702706,
"y": 37.84285518101271,
"width": 179.31996154785156,
"height": 125,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a5",
"roundness": null,
"seed": 1637449688,
"version": 675,
"versionNonce": 1502150312,
"isDeleted": false,
"boundElements": [],
"updated": 1774499518593,
"link": null,
"locked": false,
"text": "RRDP SOURCE\nSTATE\nnotify -> state\n(session, serial)\n不同rrdp源同步状态",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "V2AcY1746pbG544Yh7A7q",
"originalText": "RRDP SOURCE STATE\nnotify -> state (session, serial)\n不同rrdp源同步状态",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "KaZIF4nN5lJcP8jlzm2ze",
"type": "rectangle",
"x": 333.25717054094576,
"y": -5.54285212925501,
"width": 217.3714316231864,
"height": 206.7142813546317,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a6",
"roundness": {
"type": 3
},
"seed": 1602529240,
"version": 528,
"versionNonce": 138363048,
"isDeleted": false,
"boundElements": [
{
"type": "text",
"id": "zs86EXmSVt0DGU7Yilr06"
}
],
"updated": 1774499548936,
"link": null,
"locked": false
},
{
"id": "zs86EXmSVt0DGU7Yilr06",
"type": "text",
"x": 341.94288635253895,
"y": 22.814288548060844,
"width": 200,
"height": 150,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a7",
"roundness": null,
"seed": 626126040,
"version": 865,
"versionNonce": 1330001880,
"isDeleted": false,
"boundElements": [],
"updated": 1774499555053,
"link": null,
"locked": false,
"text": "RRDP SOURCE\nMEMBER\nsource+ uri ->\npresent/withdraw\n前缀遍历获取rrdp源下\n全部对象不同源混放",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "KaZIF4nN5lJcP8jlzm2ze",
"originalText": "RRDP SOURCE MEMBER\nsource+ uri -> present/withdraw\n前缀遍历获取rrdp源下全部对象不同源混放",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "w4ratSPiaf_sJhmxzs_zB",
"type": "rectangle",
"x": 575.7714941842216,
"y": -5.842858450753411,
"width": 217.3714316231864,
"height": 206.7142813546317,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a8",
"roundness": {
"type": 3
},
"seed": 1385028264,
"version": 564,
"versionNonce": 1503452584,
"isDeleted": false,
"boundElements": [
{
"type": "text",
"id": "t7Y2vDpAmPdo00qNs6Lxp"
}
],
"updated": 1774499442854,
"link": null,
"locked": false
},
{
"id": "t7Y2vDpAmPdo00qNs6Lxp",
"type": "text",
"x": 584.7972292218891,
"y": 47.51428222656244,
"width": 199.31996154785156,
"height": 100,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "a9",
"roundness": null,
"seed": 1548502440,
"version": 945,
"versionNonce": 1851848664,
"isDeleted": false,
"boundElements": [],
"updated": 1774499514167,
"link": null,
"locked": false,
"text": "RRDP URI OWNER\nuri -> source\n反查对象所属rrdp源\n防止跨源误删",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "w4ratSPiaf_sJhmxzs_zB",
"originalText": "RRDP URI OWNER\nuri -> source\n反查对象所属rrdp源防止跨源误删",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "iHWL3p3MaRLZ-l7Es74es",
"type": "rectangle",
"x": 89.02865600585938,
"y": 398.08574567522317,
"width": 339.42862374441967,
"height": 161.14283970424117,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aA",
"roundness": {
"type": 3
},
"seed": 600580568,
"version": 174,
"versionNonce": 863743704,
"isDeleted": false,
"boundElements": [
{
"type": "text",
"id": "gi3C5lbily2-D96ZNdUB_"
}
],
"updated": 1774499918860,
"link": null,
"locked": false
},
{
"id": "gi3C5lbily2-D96ZNdUB_",
"type": "text",
"x": 105.99298313685827,
"y": 441.15716552734375,
"width": 305.4999694824219,
"height": 75,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aAV",
"roundness": null,
"seed": 1096467112,
"version": 228,
"versionNonce": 315626456,
"isDeleted": false,
"boundElements": null,
"updated": 1774499918860,
"link": null,
"locked": false,
"text": "VCIR\n按照CA为单元记录已验证缓存的\nRPKI对象产物树状结构",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "iHWL3p3MaRLZ-l7Es74es",
"originalText": "VCIR\n按照CA为单元记录已验证缓存的RPKI对象产物树状结构",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "J6aHbqCN1b8plYxKAKfYT",
"type": "rectangle",
"x": 454.17152186802457,
"y": 399.80005972725996,
"width": 339.42862374441967,
"height": 161.14283970424117,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aC",
"roundness": {
"type": 3
},
"seed": 1274007512,
"version": 244,
"versionNonce": 877648088,
"isDeleted": false,
"boundElements": [
{
"type": "text",
"id": "ernK0EMAzhxJvpYUaWPiS"
}
],
"updated": 1774499918860,
"link": null,
"locked": false
},
{
"id": "ernK0EMAzhxJvpYUaWPiS",
"type": "text",
"x": 461.35585021972656,
"y": 442.87147957938055,
"width": 325.0599670410156,
"height": 75,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aD",
"roundness": null,
"seed": 706221272,
"version": 431,
"versionNonce": 2072968664,
"isDeleted": false,
"boundElements": [],
"updated": 1774499918860,
"link": null,
"locked": false,
"text": "AUDIT RULE INDEX\n溯源审计用户通过产物规则hash反\n向查找对应VCIR节点",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "center",
"verticalAlign": "middle",
"containerId": "J6aHbqCN1b8plYxKAKfYT",
"originalText": "AUDIT RULE INDEX\n溯源审计用户通过产物规则hash反向查找对应VCIR节点",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "kkkXT2D6yQsceW2UyfJPF",
"type": "text",
"x": -121.25701032366032,
"y": 73.5142887660437,
"width": 186.9999542236328,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aE",
"roundness": null,
"seed": 2098948056,
"version": 64,
"versionNonce": 1921034200,
"isDeleted": false,
"boundElements": null,
"updated": 1774499954136,
"link": null,
"locked": false,
"text": "RRDP 同步状态数据",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "RRDP 同步状态数据",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "BKIRb0Geq874XYWd0OLtS",
"type": "text",
"x": -92.68558175223177,
"y": 276.9428296770369,
"width": 120,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aF",
"roundness": null,
"seed": 791319000,
"version": 31,
"versionNonce": 703934936,
"isDeleted": false,
"boundElements": null,
"updated": 1774499970564,
"link": null,
"locked": false,
"text": "原始文件数据",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "原始文件数据",
"autoResize": true,
"lineHeight": 1.25
},
{
"id": "wNzjeS0S_Ji1bKTrKDoyd",
"type": "text",
"x": -101.82843017578091,
"y": 473.51426696777366,
"width": 140,
"height": 25,
"angle": 0,
"strokeColor": "#1e1e1e",
"backgroundColor": "transparent",
"fillStyle": "solid",
"strokeWidth": 2,
"strokeStyle": "solid",
"roughness": 1,
"opacity": 100,
"groupIds": [],
"frameId": null,
"index": "aG",
"roundness": null,
"seed": 1183095768,
"version": 26,
"versionNonce": 1711685800,
"isDeleted": false,
"boundElements": null,
"updated": 1774499982445,
"link": null,
"locked": false,
"text": "已验证产物数据",
"fontSize": 20,
"fontFamily": 5,
"textAlign": "left",
"verticalAlign": "top",
"containerId": null,
"originalText": "已验证产物数据",
"autoResize": true,
"lineHeight": 1.25
}
],
"appState": {
"gridSize": 20,
"gridStep": 5,
"gridModeEnabled": false,
"viewBackgroundColor": "#ffffff"
},
"files": {}
}

1
src/analysis/mod.rs Normal file
View File

@ -0,0 +1 @@
pub mod timing;

308
src/analysis/timing.rs Normal file
View File

@ -0,0 +1,308 @@
use std::collections::HashMap;
use std::path::Path;
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use serde::{Deserialize, Serialize};
#[derive(Clone)]
pub struct TimingHandle {
inner: Arc<Mutex<TimingCollector>>,
}
impl TimingHandle {
pub fn new(meta: TimingMeta) -> Self {
Self {
inner: Arc::new(Mutex::new(TimingCollector::new(meta))),
}
}
pub fn span_phase(&self, phase: &'static str) -> TimingSpanGuard<'_> {
TimingSpanGuard {
handle: self.clone(),
kind: TimingSpanKind::Phase(phase),
start: Instant::now(),
}
}
pub fn span_rrdp_repo<'a>(&self, repo_uri: &'a str) -> TimingSpanGuard<'a> {
TimingSpanGuard {
handle: self.clone(),
kind: TimingSpanKind::RrdpRepo(repo_uri),
start: Instant::now(),
}
}
pub fn span_rrdp_repo_step<'a>(
&self,
repo_uri: &'a str,
step: &'static str,
) -> TimingSpanGuard<'a> {
TimingSpanGuard {
handle: self.clone(),
kind: TimingSpanKind::RrdpRepoStep { repo_uri, step },
start: Instant::now(),
}
}
pub fn span_publication_point<'a>(&self, manifest_rsync_uri: &'a str) -> TimingSpanGuard<'a> {
TimingSpanGuard {
handle: self.clone(),
kind: TimingSpanKind::PublicationPoint(manifest_rsync_uri),
start: Instant::now(),
}
}
pub fn set_meta(&self, update: TimingMetaUpdate<'_>) {
let mut g = self.inner.lock().expect("timing lock");
if let Some(v) = update.tal_url {
g.meta.tal_url = Some(v.to_string());
}
if let Some(v) = update.db_path {
g.meta.db_path = Some(v.to_string());
}
}
pub fn record_count(&self, key: &'static str, inc: u64) {
let mut g = self.inner.lock().expect("timing lock");
g.counts
.entry(key)
.and_modify(|v| *v = v.saturating_add(inc))
.or_insert(inc);
}
/// Record a phase duration directly in nanoseconds.
///
/// This is useful when aggregating sub-phase timings locally (to reduce lock contention)
/// and then emitting a single record per publication point.
pub fn record_phase_nanos(&self, phase: &'static str, nanos: u64) {
let mut g = self.inner.lock().expect("timing lock");
g.phases.record(phase, nanos);
}
pub fn write_json(&self, path: &Path, top_n: usize) -> Result<(), String> {
let report = {
let g = self.inner.lock().expect("timing lock");
g.to_report(top_n)
};
let f = std::fs::File::create(path)
.map_err(|e| format!("create timing json failed: {}: {e}", path.display()))?;
serde_json::to_writer_pretty(f, &report)
.map_err(|e| format!("write timing json failed: {e}"))?;
Ok(())
}
fn record_duration(&self, kind: TimingSpanKind<'_>, duration: Duration) {
let nanos_u64 = duration.as_nanos().min(u128::from(u64::MAX)) as u64;
let mut g = self.inner.lock().expect("timing lock");
match kind {
TimingSpanKind::Phase(name) => g.phases.record(name, nanos_u64),
TimingSpanKind::RrdpRepo(uri) => g.rrdp_repos.record(uri, nanos_u64),
TimingSpanKind::RrdpRepoStep { repo_uri, step } => g
.rrdp_repo_steps
.record(&format!("{repo_uri}::{step}"), nanos_u64),
TimingSpanKind::PublicationPoint(uri) => g.publication_points.record(uri, nanos_u64),
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct TimingMeta {
pub recorded_at_utc_rfc3339: String,
pub validation_time_utc_rfc3339: String,
pub tal_url: Option<String>,
pub db_path: Option<String>,
}
#[derive(Clone, Debug, Default)]
pub struct TimingMetaUpdate<'a> {
pub tal_url: Option<&'a str>,
pub db_path: Option<&'a str>,
}
pub struct TimingSpanGuard<'a> {
handle: TimingHandle,
kind: TimingSpanKind<'a>,
start: Instant,
}
impl Drop for TimingSpanGuard<'_> {
fn drop(&mut self) {
self.handle
.record_duration(self.kind.clone(), self.start.elapsed());
}
}
#[derive(Clone, Debug)]
enum TimingSpanKind<'a> {
Phase(&'static str),
RrdpRepo(&'a str),
RrdpRepoStep {
repo_uri: &'a str,
step: &'static str,
},
PublicationPoint(&'a str),
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct DurationStats {
pub count: u64,
pub total_nanos: u64,
}
impl DurationStats {
fn record(&mut self, nanos: u64) {
self.count = self.count.saturating_add(1);
self.total_nanos = self.total_nanos.saturating_add(nanos);
}
}
#[derive(Clone, Debug, Default)]
struct DurationStatsMap {
map: HashMap<String, DurationStats>,
}
impl DurationStatsMap {
fn record(&mut self, key: &str, nanos: u64) {
self.map.entry(key.to_string()).or_default().record(nanos);
}
fn top(&self, n: usize) -> Vec<TopDurationEntry> {
let mut v = self
.map
.iter()
.map(|(k, s)| TopDurationEntry {
key: k.clone(),
count: s.count,
total_nanos: s.total_nanos,
})
.collect::<Vec<_>>();
v.sort_by(|a, b| b.total_nanos.cmp(&a.total_nanos));
v.truncate(n);
v
}
}
struct TimingCollector {
meta: TimingMeta,
counts: HashMap<&'static str, u64>,
phases: DurationStatsMap,
rrdp_repos: DurationStatsMap,
rrdp_repo_steps: DurationStatsMap,
publication_points: DurationStatsMap,
}
impl TimingCollector {
fn new(meta: TimingMeta) -> Self {
Self {
meta,
counts: HashMap::new(),
phases: DurationStatsMap::default(),
rrdp_repos: DurationStatsMap::default(),
rrdp_repo_steps: DurationStatsMap::default(),
publication_points: DurationStatsMap::default(),
}
}
fn to_report(&self, top_n: usize) -> TimingReportV1 {
TimingReportV1 {
format_version: 1,
meta: self.meta.clone(),
counts: self
.counts
.iter()
.map(|(k, v)| ((*k).to_string(), *v))
.collect(),
phases: self
.phases
.map
.iter()
.map(|(k, s)| (k.clone(), s.clone()))
.collect(),
top_rrdp_repos: self.rrdp_repos.top(top_n),
top_rrdp_repo_steps: self.rrdp_repo_steps.top(top_n),
top_publication_points: self.publication_points.top(top_n),
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct TimingReportV1 {
pub format_version: u64,
pub meta: TimingMeta,
pub counts: HashMap<String, u64>,
pub phases: HashMap<String, DurationStats>,
pub top_rrdp_repos: Vec<TopDurationEntry>,
pub top_rrdp_repo_steps: Vec<TopDurationEntry>,
pub top_publication_points: Vec<TopDurationEntry>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct TopDurationEntry {
pub key: String,
pub count: u64,
pub total_nanos: u64,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn timing_handle_writes_json_with_phases_and_tops() {
let meta = TimingMeta {
recorded_at_utc_rfc3339: "2026-02-28T00:00:00Z".to_string(),
validation_time_utc_rfc3339: "2026-02-28T00:00:00Z".to_string(),
tal_url: Some("https://example.test/x.tal".to_string()),
db_path: Some("db".to_string()),
};
let h = TimingHandle::new(meta);
{
let _p = h.span_phase("tal_bootstrap");
}
{
let _r = h.span_rrdp_repo("https://rrdp.example.test/notification.xml");
}
{
let _s = h.span_rrdp_repo_step(
"https://rrdp.example.test/notification.xml",
"fetch_notification",
);
}
{
let _pp = h.span_publication_point("rsync://example.test/repo/manifest.mft");
}
h.record_count("vrps", 42);
let dir = tempfile::tempdir().expect("tempdir");
let path = dir.path().join("timing.json");
h.write_json(&path, 10).expect("write_json");
let rep: TimingReportV1 =
serde_json::from_slice(&std::fs::read(&path).expect("read timing.json"))
.expect("parse timing.json");
assert_eq!(rep.format_version, 1);
assert!(rep.phases.contains_key("tal_bootstrap"));
assert_eq!(rep.counts.get("vrps").copied(), Some(42));
assert!(
rep.top_rrdp_repos
.iter()
.any(|e| e.key.contains("rrdp.example.test")),
"expected repo in top list"
);
assert!(
rep.top_rrdp_repo_steps
.iter()
.any(|e| e.key.contains("fetch_notification")),
"expected repo step in top list"
);
assert!(
rep.top_publication_points
.iter()
.any(|e| e.key.contains("manifest.mft")),
"expected PP in top list"
);
}
}

248
src/audit.rs Normal file
View File

@ -0,0 +1,248 @@
use serde::Serialize;
use sha2::Digest;
use crate::policy::Policy;
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum AuditObjectKind {
Manifest,
Crl,
Certificate,
RouterCertificate,
Roa,
Aspa,
Other,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum AuditObjectResult {
Ok,
Skipped,
Error,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct ObjectAuditEntry {
pub rsync_uri: String,
pub sha256_hex: String,
pub kind: AuditObjectKind,
pub result: AuditObjectResult,
#[serde(skip_serializing_if = "Option::is_none")]
pub detail: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditWarning {
pub message: String,
pub rfc_refs: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub context: Option<String>,
}
impl From<&crate::report::Warning> for AuditWarning {
fn from(w: &crate::report::Warning) -> Self {
Self {
message: w.message.clone(),
rfc_refs: w.rfc_refs.iter().map(|r| r.0.to_string()).collect(),
context: w.context.clone(),
}
}
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize)]
pub struct PublicationPointAudit {
/// Monotonic node ID assigned by the traversal engine.
///
/// Present when running via the Stage2 tree engine; may be absent in ad-hoc runs.
#[serde(skip_serializing_if = "Option::is_none")]
pub node_id: Option<u64>,
/// Parent node ID in the traversal tree.
#[serde(skip_serializing_if = "Option::is_none")]
pub parent_node_id: Option<u64>,
/// Provenance metadata for non-root nodes (how this CA instance was discovered).
#[serde(skip_serializing_if = "Option::is_none")]
pub discovered_from: Option<DiscoveredFrom>,
pub rsync_base_uri: String,
pub manifest_rsync_uri: String,
pub publication_point_rsync_uri: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub rrdp_notification_uri: Option<String>,
pub source: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub repo_sync_source: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub repo_sync_phase: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub repo_sync_duration_ms: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub repo_sync_error: Option<String>,
pub repo_terminal_state: String,
pub this_update_rfc3339_utc: String,
pub next_update_rfc3339_utc: String,
pub verified_at_rfc3339_utc: String,
pub warnings: Vec<AuditWarning>,
pub objects: Vec<ObjectAuditEntry>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct DiscoveredFrom {
pub parent_manifest_rsync_uri: String,
pub child_ca_certificate_rsync_uri: String,
pub child_ca_certificate_sha256_hex: String,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct TreeSummary {
pub instances_processed: usize,
pub instances_failed: usize,
pub warnings: Vec<AuditWarning>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditRunMeta {
pub validation_time_rfc3339_utc: String,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum AuditDownloadKind {
RrdpNotification,
RrdpSnapshot,
RrdpDelta,
Rsync,
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize)]
pub struct AuditDownloadObjectsStat {
pub objects_count: u64,
pub objects_bytes_total: u64,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditDownloadEvent {
pub kind: AuditDownloadKind,
pub uri: String,
pub started_at_rfc3339_utc: String,
pub finished_at_rfc3339_utc: String,
pub duration_ms: u64,
pub success: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub bytes: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub objects: Option<AuditDownloadObjectsStat>,
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize)]
pub struct AuditDownloadKindStats {
pub ok_total: u64,
pub fail_total: u64,
pub duration_ms_total: u64,
#[serde(skip_serializing_if = "Option::is_none")]
pub bytes_total: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub objects_count_total: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub objects_bytes_total: Option<u64>,
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize)]
pub struct AuditDownloadStats {
pub events_total: u64,
/// Statistics keyed by serialized `AuditDownloadKind` string (e.g. "rrdp_snapshot").
pub by_kind: std::collections::BTreeMap<String, AuditDownloadKindStats>,
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize)]
pub struct AuditRepoSyncStateStat {
pub count: u64,
pub duration_ms_total: u64,
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize)]
pub struct AuditRepoSyncStats {
pub publication_points_total: u64,
pub by_phase: std::collections::BTreeMap<String, AuditRepoSyncStateStat>,
pub by_terminal_state: std::collections::BTreeMap<String, AuditRepoSyncStateStat>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditReportV1 {
pub format_version: u32,
pub meta: AuditRunMeta,
pub policy: Policy,
pub tree: TreeSummary,
pub publication_points: Vec<PublicationPointAudit>,
pub vrps: Vec<VrpOutput>,
pub aspas: Vec<AspaOutput>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditReportV2 {
pub format_version: u32,
pub meta: AuditRunMeta,
pub policy: Policy,
pub tree: TreeSummary,
pub publication_points: Vec<PublicationPointAudit>,
pub vrps: Vec<VrpOutput>,
pub aspas: Vec<AspaOutput>,
pub downloads: Vec<AuditDownloadEvent>,
pub download_stats: AuditDownloadStats,
pub repo_sync_stats: AuditRepoSyncStats,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct VrpOutput {
pub asn: u32,
pub prefix: String,
pub max_length: u16,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AspaOutput {
pub customer_as_id: u32,
pub provider_as_ids: Vec<u32>,
}
pub fn sha256_hex_from_32(bytes: &[u8; 32]) -> String {
hex::encode(bytes)
}
pub fn sha256_hex(bytes: &[u8]) -> String {
let digest = sha2::Sha256::digest(bytes);
hex::encode(digest)
}
pub fn format_roa_ip_prefix(p: &crate::data_model::roa::IpPrefix) -> String {
let addr = p.addr_bytes();
match p.afi {
crate::data_model::roa::RoaAfi::Ipv4 => {
format!(
"{}.{}.{}.{}{}",
addr[0],
addr[1],
addr[2],
addr[3],
format!("/{}", p.prefix_len)
)
}
crate::data_model::roa::RoaAfi::Ipv6 => {
let mut parts = Vec::with_capacity(8);
for i in 0..8 {
let hi = addr[i * 2] as u16;
let lo = addr[i * 2 + 1] as u16;
parts.push(format!("{:x}", (hi << 8) | lo));
}
format!("{}{}", parts.join(":"), format!("/{}", p.prefix_len))
}
}
}

170
src/audit_downloads.rs Normal file
View File

@ -0,0 +1,170 @@
use std::collections::BTreeMap;
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use crate::audit::{
AuditDownloadEvent, AuditDownloadKind, AuditDownloadKindStats, AuditDownloadObjectsStat,
AuditDownloadStats,
};
#[derive(Clone, Debug, Default)]
pub struct DownloadLogHandle {
inner: Arc<Mutex<Vec<AuditDownloadEvent>>>,
}
impl DownloadLogHandle {
pub fn new() -> Self {
Self::default()
}
pub fn record_event(&self, event: AuditDownloadEvent) {
self.inner.lock().expect("download log lock").push(event);
}
pub fn snapshot_events(&self) -> Vec<AuditDownloadEvent> {
self.inner.lock().expect("download log lock").clone()
}
pub fn stats_from_events(events: &[AuditDownloadEvent]) -> AuditDownloadStats {
let mut out = AuditDownloadStats {
events_total: events.len() as u64,
by_kind: BTreeMap::new(),
};
for e in events {
let kind_key = match e.kind {
AuditDownloadKind::RrdpNotification => "rrdp_notification",
AuditDownloadKind::RrdpSnapshot => "rrdp_snapshot",
AuditDownloadKind::RrdpDelta => "rrdp_delta",
AuditDownloadKind::Rsync => "rsync",
}
.to_string();
let st = out
.by_kind
.entry(kind_key)
.or_insert_with(|| AuditDownloadKindStats {
ok_total: 0,
fail_total: 0,
duration_ms_total: 0,
bytes_total: None,
objects_count_total: None,
objects_bytes_total: None,
});
if e.success {
st.ok_total = st.ok_total.saturating_add(1);
} else {
st.fail_total = st.fail_total.saturating_add(1);
}
st.duration_ms_total = st.duration_ms_total.saturating_add(e.duration_ms);
if let Some(b) = e.bytes {
st.bytes_total = Some(st.bytes_total.unwrap_or(0).saturating_add(b));
}
if let Some(objects) = &e.objects {
st.objects_count_total = Some(
st.objects_count_total
.unwrap_or(0)
.saturating_add(objects.objects_count),
);
st.objects_bytes_total = Some(
st.objects_bytes_total
.unwrap_or(0)
.saturating_add(objects.objects_bytes_total),
);
}
}
out
}
pub fn stats(&self) -> AuditDownloadStats {
let events = self.snapshot_events();
Self::stats_from_events(&events)
}
pub fn span_download<'a>(
&'a self,
kind: AuditDownloadKind,
uri: &'a str,
) -> DownloadSpanGuard<'a> {
DownloadSpanGuard {
handle: self,
kind,
uri,
start_instant: Instant::now(),
started_at: time::OffsetDateTime::now_utc(),
bytes: None,
objects: None,
error: None,
success: None,
}
}
}
pub struct DownloadSpanGuard<'a> {
handle: &'a DownloadLogHandle,
kind: AuditDownloadKind,
uri: &'a str,
start_instant: Instant,
started_at: time::OffsetDateTime,
bytes: Option<u64>,
objects: Option<AuditDownloadObjectsStat>,
error: Option<String>,
success: Option<bool>,
}
impl DownloadSpanGuard<'_> {
pub fn set_bytes(&mut self, bytes: u64) {
self.bytes = Some(bytes);
}
pub fn set_objects(&mut self, objects_count: u64, objects_bytes_total: u64) {
self.objects = Some(AuditDownloadObjectsStat {
objects_count,
objects_bytes_total,
});
}
pub fn set_ok(&mut self) {
self.success = Some(true);
}
pub fn set_err(&mut self, msg: impl Into<String>) {
self.success = Some(false);
self.error = Some(msg.into());
}
}
impl Drop for DownloadSpanGuard<'_> {
fn drop(&mut self) {
use time::format_description::well_known::Rfc3339;
let finished_at = time::OffsetDateTime::now_utc();
let dur = self.start_instant.elapsed();
let duration_ms = duration_to_ms(dur);
let started_at_rfc3339_utc = self
.started_at
.to_offset(time::UtcOffset::UTC)
.format(&Rfc3339)
.unwrap_or_else(|_| "<format-error>".to_string());
let finished_at_rfc3339_utc = finished_at
.to_offset(time::UtcOffset::UTC)
.format(&Rfc3339)
.unwrap_or_else(|_| "<format-error>".to_string());
let success = self.success.unwrap_or(false);
let event = AuditDownloadEvent {
kind: self.kind.clone(),
uri: self.uri.to_string(),
started_at_rfc3339_utc,
finished_at_rfc3339_utc,
duration_ms,
success,
error: if success { None } else { self.error.clone() },
bytes: self.bytes,
objects: self.objects.clone(),
};
self.handle.record_event(event);
}
}
fn duration_to_ms(d: Duration) -> u64 {
let ms = d.as_millis();
ms.min(u128::from(u64::MAX)) as u64
}

640
src/audit_trace.rs Normal file
View File

@ -0,0 +1,640 @@
use crate::storage::{
AuditRuleIndexEntry, AuditRuleKind, RawByHashEntry, RocksStore, ValidatedCaInstanceResult,
VcirArtifactKind, VcirArtifactRole, VcirArtifactValidationStatus, VcirLocalOutput,
VcirOutputType,
};
use serde::Serialize;
use std::collections::HashSet;
#[derive(Debug, thiserror::Error)]
pub enum AuditTraceError {
#[error("storage error: {0}")]
Storage(#[from] crate::storage::StorageError),
#[error("audit rule index points to missing VCIR: {manifest_rsync_uri}")]
MissingVcir { manifest_rsync_uri: String },
#[error(
"audit rule index points to missing local output: rule_hash={rule_hash}, output_id={output_id}, manifest={manifest_rsync_uri}"
)]
MissingLocalOutput {
rule_hash: String,
output_id: String,
manifest_rsync_uri: String,
},
#[error("detected VCIR parent cycle at {manifest_rsync_uri}")]
ParentCycle { manifest_rsync_uri: String },
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditTraceRawRef {
pub sha256_hex: String,
pub raw_present: bool,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub origin_uris: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub object_type: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub byte_len: Option<usize>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditTraceArtifact {
pub artifact_role: VcirArtifactRole,
pub artifact_kind: VcirArtifactKind,
#[serde(skip_serializing_if = "Option::is_none")]
pub uri: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub object_type: Option<String>,
pub validation_status: VcirArtifactValidationStatus,
pub raw: AuditTraceRawRef,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditTraceChainNode {
pub manifest_rsync_uri: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub parent_manifest_rsync_uri: Option<String>,
pub tal_id: String,
pub ca_subject_name: String,
pub ca_ski: String,
pub issuer_ski: String,
pub current_manifest_rsync_uri: String,
pub current_crl_rsync_uri: String,
pub last_successful_validation_time_rfc3339_utc: String,
pub local_output_count: usize,
pub child_count: usize,
pub related_artifacts: Vec<AuditTraceArtifact>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditTraceResolvedOutput {
pub output_id: String,
pub output_type: VcirOutputType,
pub rule_hash: String,
pub source_object_uri: String,
pub source_object_type: String,
pub source_object_hash: String,
pub source_ee_cert_hash: String,
pub item_effective_until_rfc3339_utc: String,
pub payload_json: String,
pub validation_path_hint: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize)]
pub struct AuditRuleTrace {
pub rule: AuditRuleIndexEntry,
pub resolved_output: AuditTraceResolvedOutput,
pub source_object_raw: AuditTraceRawRef,
pub source_ee_cert_raw: AuditTraceRawRef,
pub chain_leaf_to_root: Vec<AuditTraceChainNode>,
}
pub fn trace_rule_to_root(
store: &RocksStore,
kind: AuditRuleKind,
rule_hash: &str,
) -> Result<Option<AuditRuleTrace>, AuditTraceError> {
let Some(rule) = store.get_audit_rule_index_entry(kind, rule_hash)? else {
return Ok(None);
};
let Some(leaf_vcir) = store.get_vcir(&rule.manifest_rsync_uri)? else {
return Err(AuditTraceError::MissingVcir {
manifest_rsync_uri: rule.manifest_rsync_uri.clone(),
});
};
let Some(local_output) = leaf_vcir
.local_outputs
.iter()
.find(|output| output.output_id == rule.output_id && output.rule_hash == rule.rule_hash)
.or_else(|| {
leaf_vcir
.local_outputs
.iter()
.find(|output| output.rule_hash == rule.rule_hash)
})
.cloned()
else {
return Err(AuditTraceError::MissingLocalOutput {
rule_hash: rule.rule_hash.clone(),
output_id: rule.output_id.clone(),
manifest_rsync_uri: rule.manifest_rsync_uri.clone(),
});
};
let chain = trace_vcir_chain_to_root(store, &leaf_vcir.manifest_rsync_uri)?
.expect("leaf VCIR already loaded must exist");
Ok(Some(AuditRuleTrace {
rule,
resolved_output: resolved_output_from_local(&local_output),
source_object_raw: resolve_raw_ref(store, &local_output.source_object_hash)?,
source_ee_cert_raw: resolve_raw_ref(store, &local_output.source_ee_cert_hash)?,
chain_leaf_to_root: chain,
}))
}
pub fn trace_vcir_chain_to_root(
store: &RocksStore,
manifest_rsync_uri: &str,
) -> Result<Option<Vec<AuditTraceChainNode>>, AuditTraceError> {
let Some(mut current) = store.get_vcir(manifest_rsync_uri)? else {
return Ok(None);
};
let mut seen = HashSet::new();
let mut chain = Vec::new();
loop {
if !seen.insert(current.manifest_rsync_uri.clone()) {
return Err(AuditTraceError::ParentCycle {
manifest_rsync_uri: current.manifest_rsync_uri,
});
}
let parent = current.parent_manifest_rsync_uri.clone();
chain.push(trace_chain_node(store, &current)?);
let Some(parent_manifest_rsync_uri) = parent else {
break;
};
let Some(parent_vcir) = store.get_vcir(&parent_manifest_rsync_uri)? else {
return Err(AuditTraceError::MissingVcir {
manifest_rsync_uri: parent_manifest_rsync_uri,
});
};
current = parent_vcir;
}
Ok(Some(chain))
}
fn trace_chain_node(
store: &RocksStore,
vcir: &ValidatedCaInstanceResult,
) -> Result<AuditTraceChainNode, AuditTraceError> {
let mut related_artifacts = Vec::with_capacity(vcir.related_artifacts.len());
for artifact in &vcir.related_artifacts {
related_artifacts.push(AuditTraceArtifact {
artifact_role: artifact.artifact_role,
artifact_kind: artifact.artifact_kind,
uri: artifact.uri.clone(),
object_type: artifact.object_type.clone(),
validation_status: artifact.validation_status,
raw: resolve_raw_ref(store, &artifact.sha256)?,
});
}
Ok(AuditTraceChainNode {
manifest_rsync_uri: vcir.manifest_rsync_uri.clone(),
parent_manifest_rsync_uri: vcir.parent_manifest_rsync_uri.clone(),
tal_id: vcir.tal_id.clone(),
ca_subject_name: vcir.ca_subject_name.clone(),
ca_ski: vcir.ca_ski.clone(),
issuer_ski: vcir.issuer_ski.clone(),
current_manifest_rsync_uri: vcir.current_manifest_rsync_uri.clone(),
current_crl_rsync_uri: vcir.current_crl_rsync_uri.clone(),
last_successful_validation_time_rfc3339_utc: vcir
.last_successful_validation_time
.rfc3339_utc
.clone(),
local_output_count: vcir.local_outputs.len(),
child_count: vcir.child_entries.len(),
related_artifacts,
})
}
fn resolved_output_from_local(local: &VcirLocalOutput) -> AuditTraceResolvedOutput {
AuditTraceResolvedOutput {
output_id: local.output_id.clone(),
output_type: local.output_type,
rule_hash: local.rule_hash.clone(),
source_object_uri: local.source_object_uri.clone(),
source_object_type: local.source_object_type.clone(),
source_object_hash: local.source_object_hash.clone(),
source_ee_cert_hash: local.source_ee_cert_hash.clone(),
item_effective_until_rfc3339_utc: local.item_effective_until.rfc3339_utc.clone(),
payload_json: local.payload_json.clone(),
validation_path_hint: local.validation_path_hint.clone(),
}
}
fn resolve_raw_ref(
store: &RocksStore,
sha256_hex: &str,
) -> Result<AuditTraceRawRef, AuditTraceError> {
let raw = store.get_raw_by_hash_entry(sha256_hex)?;
Ok(raw_ref_from_entry(sha256_hex, raw.as_ref()))
}
fn raw_ref_from_entry(sha256_hex: &str, entry: Option<&RawByHashEntry>) -> AuditTraceRawRef {
match entry {
Some(entry) => AuditTraceRawRef {
sha256_hex: sha256_hex.to_string(),
raw_present: true,
origin_uris: entry.origin_uris.clone(),
object_type: entry.object_type.clone(),
byte_len: Some(entry.bytes.len()),
},
None => AuditTraceRawRef {
sha256_hex: sha256_hex.to_string(),
raw_present: false,
origin_uris: Vec::new(),
object_type: None,
byte_len: None,
},
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::audit::sha256_hex;
use crate::storage::{
PackTime, ValidatedManifestMeta, VcirAuditSummary, VcirChildEntry, VcirInstanceGate,
VcirRelatedArtifact, VcirSummary,
};
use base64::Engine as _;
fn sample_vcir(
manifest_rsync_uri: &str,
parent_manifest_rsync_uri: Option<&str>,
tal_id: &str,
local_output: Option<VcirLocalOutput>,
related_artifacts: Vec<VcirRelatedArtifact>,
) -> ValidatedCaInstanceResult {
let now = time::OffsetDateTime::now_utc();
let next = PackTime::from_utc_offset_datetime(now + time::Duration::hours(1));
let local_outputs: Vec<VcirLocalOutput> = local_output.into_iter().collect();
ValidatedCaInstanceResult {
manifest_rsync_uri: manifest_rsync_uri.to_string(),
parent_manifest_rsync_uri: parent_manifest_rsync_uri.map(str::to_string),
tal_id: tal_id.to_string(),
ca_subject_name: format!("CN={manifest_rsync_uri}"),
ca_ski: "11".repeat(20),
issuer_ski: "22".repeat(20),
last_successful_validation_time: PackTime::from_utc_offset_datetime(now),
current_manifest_rsync_uri: manifest_rsync_uri.to_string(),
current_crl_rsync_uri: manifest_rsync_uri.replace(".mft", ".crl"),
validated_manifest_meta: ValidatedManifestMeta {
validated_manifest_number: vec![1],
validated_manifest_this_update: PackTime::from_utc_offset_datetime(now),
validated_manifest_next_update: next.clone(),
},
instance_gate: VcirInstanceGate {
manifest_next_update: next.clone(),
current_crl_next_update: next.clone(),
self_ca_not_after: PackTime::from_utc_offset_datetime(
now + time::Duration::hours(2),
),
instance_effective_until: next,
},
child_entries: vec![VcirChildEntry {
child_manifest_rsync_uri: "rsync://example.test/child/child.mft".to_string(),
child_cert_rsync_uri: "rsync://example.test/parent/child.cer".to_string(),
child_cert_hash: sha256_hex(b"child-cert"),
child_ski: "33".repeat(20),
child_rsync_base_uri: "rsync://example.test/child/".to_string(),
child_publication_point_rsync_uri: "rsync://example.test/child/".to_string(),
child_rrdp_notification_uri: Some(
"https://example.test/child/notify.xml".to_string(),
),
child_effective_ip_resources: None,
child_effective_as_resources: None,
accepted_at_validation_time: PackTime::from_utc_offset_datetime(now),
}],
summary: VcirSummary {
local_vrp_count: local_outputs
.iter()
.filter(|output| output.output_type == VcirOutputType::Vrp)
.count() as u32,
local_aspa_count: local_outputs
.iter()
.filter(|output| output.output_type == VcirOutputType::Aspa)
.count() as u32,
local_router_key_count: local_outputs
.iter()
.filter(|output| output.output_type == VcirOutputType::RouterKey)
.count() as u32,
child_count: 1,
accepted_object_count: related_artifacts.len() as u32,
rejected_object_count: 0,
},
local_outputs,
related_artifacts,
audit_summary: VcirAuditSummary {
failed_fetch_eligible: true,
last_failed_fetch_reason: None,
warning_count: 0,
audit_flags: Vec::new(),
},
}
}
fn sample_local_output(manifest_rsync_uri: &str) -> VcirLocalOutput {
let now = time::OffsetDateTime::now_utc();
VcirLocalOutput {
output_id: sha256_hex(b"vrp-output"),
output_type: VcirOutputType::Vrp,
item_effective_until: PackTime::from_utc_offset_datetime(
now + time::Duration::minutes(30),
),
source_object_uri: "rsync://example.test/leaf/a.roa".to_string(),
source_object_type: "roa".to_string(),
source_object_hash: sha256_hex(b"roa-raw"),
source_ee_cert_hash: sha256_hex(b"roa-ee"),
payload_json:
serde_json::json!({"asn": 64496, "prefix": "203.0.113.0/24", "max_length": 24})
.to_string(),
rule_hash: sha256_hex(b"roa-rule"),
validation_path_hint: vec![
manifest_rsync_uri.to_string(),
"rsync://example.test/leaf/a.roa".to_string(),
sha256_hex(b"roa-raw"),
],
}
}
fn sample_artifacts(manifest_rsync_uri: &str, roa_hash: &str) -> Vec<VcirRelatedArtifact> {
vec![
VcirRelatedArtifact {
artifact_role: VcirArtifactRole::Manifest,
artifact_kind: VcirArtifactKind::Mft,
uri: Some(manifest_rsync_uri.to_string()),
sha256: sha256_hex(manifest_rsync_uri.as_bytes()),
object_type: Some("mft".to_string()),
validation_status: VcirArtifactValidationStatus::Accepted,
},
VcirRelatedArtifact {
artifact_role: VcirArtifactRole::CurrentCrl,
artifact_kind: VcirArtifactKind::Crl,
uri: Some(manifest_rsync_uri.replace(".mft", ".crl")),
sha256: sha256_hex(format!("{}-crl", manifest_rsync_uri).as_bytes()),
object_type: Some("crl".to_string()),
validation_status: VcirArtifactValidationStatus::Accepted,
},
VcirRelatedArtifact {
artifact_role: VcirArtifactRole::SignedObject,
artifact_kind: VcirArtifactKind::Roa,
uri: Some("rsync://example.test/leaf/a.roa".to_string()),
sha256: roa_hash.to_string(),
object_type: Some("roa".to_string()),
validation_status: VcirArtifactValidationStatus::Accepted,
},
]
}
fn put_raw_evidence(store: &RocksStore, bytes: &[u8], uri: &str, object_type: &str) {
let mut entry = RawByHashEntry::from_bytes(sha256_hex(bytes), bytes.to_vec());
entry.origin_uris.push(uri.to_string());
entry.object_type = Some(object_type.to_string());
entry.encoding = Some("der".to_string());
store
.put_raw_by_hash_entry(&entry)
.expect("put raw evidence");
}
#[test]
fn trace_rule_to_root_returns_leaf_to_root_chain_and_evidence_refs() {
let store_dir = tempfile::tempdir().expect("store dir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
let root_manifest = "rsync://example.test/root/root.mft";
let leaf_manifest = "rsync://example.test/leaf/leaf.mft";
let local = sample_local_output(leaf_manifest);
let leaf_vcir = sample_vcir(
leaf_manifest,
Some(root_manifest),
"test-tal",
Some(local.clone()),
sample_artifacts(leaf_manifest, &local.source_object_hash),
);
let root_vcir = sample_vcir(
root_manifest,
None,
"test-tal",
None,
sample_artifacts(root_manifest, &sha256_hex(b"root-object")),
);
store.put_vcir(&leaf_vcir).expect("put leaf vcir");
store.put_vcir(&root_vcir).expect("put root vcir");
let rule_entry = AuditRuleIndexEntry {
kind: AuditRuleKind::Roa,
rule_hash: local.rule_hash.clone(),
manifest_rsync_uri: leaf_manifest.to_string(),
source_object_uri: local.source_object_uri.clone(),
source_object_hash: local.source_object_hash.clone(),
output_id: local.output_id.clone(),
item_effective_until: local.item_effective_until.clone(),
};
store
.put_audit_rule_index_entry(&rule_entry)
.expect("put rule index");
put_raw_evidence(&store, leaf_manifest.as_bytes(), leaf_manifest, "mft");
put_raw_evidence(
&store,
format!("{}-crl", leaf_manifest).as_bytes(),
&leaf_manifest.replace(".mft", ".crl"),
"crl",
);
put_raw_evidence(&store, b"roa-raw", &local.source_object_uri, "roa");
put_raw_evidence(&store, b"roa-ee", "rsync://example.test/leaf/a.ee", "cer");
put_raw_evidence(&store, root_manifest.as_bytes(), root_manifest, "mft");
put_raw_evidence(
&store,
format!("{}-crl", root_manifest).as_bytes(),
&root_manifest.replace(".mft", ".crl"),
"crl",
);
let trace = trace_rule_to_root(&store, AuditRuleKind::Roa, &local.rule_hash)
.expect("trace rule")
.expect("trace exists");
assert_eq!(trace.rule, rule_entry);
assert_eq!(trace.resolved_output.output_id, local.output_id);
assert_eq!(trace.chain_leaf_to_root.len(), 2);
assert_eq!(
trace.chain_leaf_to_root[0].manifest_rsync_uri,
leaf_manifest
);
assert_eq!(
trace.chain_leaf_to_root[1].manifest_rsync_uri,
root_manifest
);
assert_eq!(
trace.chain_leaf_to_root[0]
.parent_manifest_rsync_uri
.as_deref(),
Some(root_manifest)
);
assert!(trace.source_object_raw.raw_present);
assert!(trace.source_ee_cert_raw.raw_present);
assert!(
trace.chain_leaf_to_root[0]
.related_artifacts
.iter()
.any(|artifact| {
artifact.uri.as_deref() == Some(leaf_manifest) && artifact.raw.raw_present
})
);
}
#[test]
fn trace_rule_to_root_supports_router_key_rules() {
let store_dir = tempfile::tempdir().expect("store dir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
let manifest = "rsync://example.test/router/leaf.mft";
let mut local = sample_local_output(manifest);
local.output_type = VcirOutputType::RouterKey;
local.source_object_uri = "rsync://example.test/router/router.cer".to_string();
local.source_object_type = "router_key".to_string();
local.payload_json = serde_json::json!({
"as_id": 64496,
"ski_hex": "11".repeat(20),
"spki_der_base64": base64::engine::general_purpose::STANDARD.encode([0x30u8, 0x00]),
})
.to_string();
let mut vcir = sample_vcir(
manifest,
None,
"test-tal",
Some(local),
sample_artifacts(manifest, &sha256_hex(b"router-object")),
);
vcir.local_outputs[0].output_type = VcirOutputType::RouterKey;
vcir.local_outputs[0].source_object_uri =
"rsync://example.test/router/router.cer".to_string();
vcir.local_outputs[0].source_object_type = "router_key".to_string();
vcir.local_outputs[0].payload_json = serde_json::json!({
"as_id": 64496,
"ski_hex": "11".repeat(20),
"spki_der_base64": base64::engine::general_purpose::STANDARD.encode([0x30u8, 0x00]),
})
.to_string();
vcir.summary.local_vrp_count = 0;
vcir.summary.local_router_key_count = 1;
store.put_vcir(&vcir).expect("put vcir");
let rule_entry = AuditRuleIndexEntry {
kind: AuditRuleKind::RouterKey,
rule_hash: vcir.local_outputs[0].rule_hash.clone(),
manifest_rsync_uri: manifest.to_string(),
source_object_uri: vcir.local_outputs[0].source_object_uri.clone(),
source_object_hash: vcir.local_outputs[0].source_object_hash.clone(),
output_id: vcir.local_outputs[0].output_id.clone(),
item_effective_until: vcir.local_outputs[0].item_effective_until.clone(),
};
store
.put_audit_rule_index_entry(&rule_entry)
.expect("put rule");
let trace = trace_rule_to_root(&store, AuditRuleKind::RouterKey, &rule_entry.rule_hash)
.expect("trace rule")
.expect("trace exists");
assert_eq!(trace.rule.kind, AuditRuleKind::RouterKey);
assert_eq!(trace.resolved_output.output_type, VcirOutputType::RouterKey);
}
#[test]
fn trace_rule_to_root_returns_none_for_missing_rule_index() {
let store_dir = tempfile::tempdir().expect("store dir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
assert!(
trace_rule_to_root(&store, AuditRuleKind::Roa, &sha256_hex(b"missing"))
.expect("missing trace ok")
.is_none()
);
}
#[test]
fn trace_rule_to_root_errors_when_index_points_to_missing_vcir() {
let store_dir = tempfile::tempdir().expect("store dir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
let rule_hash = sha256_hex(b"missing-vcir-rule");
store
.put_audit_rule_index_entry(&AuditRuleIndexEntry {
kind: AuditRuleKind::Roa,
rule_hash: rule_hash.clone(),
manifest_rsync_uri: "rsync://example.test/missing.mft".to_string(),
source_object_uri: "rsync://example.test/missing.roa".to_string(),
source_object_hash: sha256_hex(b"missing-source"),
output_id: sha256_hex(b"missing-output"),
item_effective_until: PackTime::from_utc_offset_datetime(
time::OffsetDateTime::now_utc() + time::Duration::minutes(1),
),
})
.expect("put rule index");
let err = trace_rule_to_root(&store, AuditRuleKind::Roa, &rule_hash).unwrap_err();
assert!(matches!(
err,
AuditTraceError::MissingVcir { manifest_rsync_uri }
if manifest_rsync_uri == "rsync://example.test/missing.mft"
));
}
#[test]
fn trace_rule_to_root_errors_when_vcir_local_output_is_missing() {
let store_dir = tempfile::tempdir().expect("store dir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
let manifest = "rsync://example.test/leaf/leaf.mft";
let vcir = sample_vcir(
manifest,
None,
"test-tal",
None,
sample_artifacts(manifest, &sha256_hex(b"leaf-object")),
);
store.put_vcir(&vcir).expect("put vcir");
let rule_hash = sha256_hex(b"missing-output-rule");
store
.put_audit_rule_index_entry(&AuditRuleIndexEntry {
kind: AuditRuleKind::Roa,
rule_hash: rule_hash.clone(),
manifest_rsync_uri: manifest.to_string(),
source_object_uri: "rsync://example.test/leaf/a.roa".to_string(),
source_object_hash: sha256_hex(b"leaf-object"),
output_id: sha256_hex(b"missing-output"),
item_effective_until: PackTime::from_utc_offset_datetime(
time::OffsetDateTime::now_utc() + time::Duration::minutes(1),
),
})
.expect("put rule index");
let err = trace_rule_to_root(&store, AuditRuleKind::Roa, &rule_hash).unwrap_err();
assert!(matches!(err, AuditTraceError::MissingLocalOutput { .. }));
}
#[test]
fn trace_vcir_chain_to_root_detects_parent_cycle() {
let store_dir = tempfile::tempdir().expect("store dir");
let store = RocksStore::open(store_dir.path()).expect("open rocksdb");
let a_manifest = "rsync://example.test/a.mft";
let b_manifest = "rsync://example.test/b.mft";
let a_vcir = sample_vcir(
a_manifest,
Some(b_manifest),
"test-tal",
None,
sample_artifacts(a_manifest, &sha256_hex(b"a-object")),
);
let b_vcir = sample_vcir(
b_manifest,
Some(a_manifest),
"test-tal",
None,
sample_artifacts(b_manifest, &sha256_hex(b"b-object")),
);
store.put_vcir(&a_vcir).expect("put a");
store.put_vcir(&b_vcir).expect("put b");
let err = trace_vcir_chain_to_root(&store, a_manifest).unwrap_err();
assert!(matches!(
err,
AuditTraceError::ParentCycle { manifest_rsync_uri }
if manifest_rsync_uri == a_manifest
));
}
}

70
src/bin/ccr_dump.rs Normal file
View File

@ -0,0 +1,70 @@
use rpki::ccr::dump::dump_content_info_json_value;
#[derive(Debug, Default, PartialEq, Eq)]
struct Args {
ccr_path: Option<std::path::PathBuf>,
}
fn usage() -> &'static str {
"Usage: ccr_dump --ccr <path>"
}
fn parse_args(argv: &[String]) -> Result<Args, String> {
let mut args = Args::default();
let mut i = 1usize;
while i < argv.len() {
match argv[i].as_str() {
"--help" | "-h" => return Err(usage().to_string()),
"--ccr" => {
i += 1;
let v = argv.get(i).ok_or("--ccr requires a value")?;
args.ccr_path = Some(v.into());
}
other => return Err(format!("unknown argument: {other}\n{}", usage())),
}
i += 1;
}
if args.ccr_path.is_none() {
return Err(format!("--ccr is required\n{}", usage()));
}
Ok(args)
}
fn main() -> Result<(), String> {
let args = parse_args(&std::env::args().collect::<Vec<_>>())?;
let ccr_path = args.ccr_path.as_ref().unwrap();
let bytes = std::fs::read(ccr_path)
.map_err(|e| format!("read ccr failed: {}: {e}", ccr_path.display()))?;
let json = dump_content_info_json_value(&bytes).map_err(|e| e.to_string())?;
println!(
"{}",
serde_json::to_string_pretty(&json).map_err(|e| e.to_string())?
);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_args_accepts_ccr_path() {
let argv = vec![
"ccr_dump".to_string(),
"--ccr".to_string(),
"a.ccr".to_string(),
];
let args = parse_args(&argv).expect("parse");
assert_eq!(
args.ccr_path.as_deref(),
Some(std::path::Path::new("a.ccr"))
);
}
#[test]
fn parse_args_rejects_missing_required_ccr() {
let argv = vec!["ccr_dump".to_string()];
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("--ccr is required"), "{err}");
}
}

View File

@ -0,0 +1,124 @@
use rpki::bundle::{decode_ccr_compare_views, write_vap_csv, write_vrp_csv};
use rpki::ccr::decode_content_info;
#[derive(Debug, Default, PartialEq, Eq)]
struct Args {
ccr_path: Option<std::path::PathBuf>,
vrps_out_path: Option<std::path::PathBuf>,
vaps_out_path: Option<std::path::PathBuf>,
trust_anchor: String,
}
fn usage() -> &'static str {
"Usage: ccr_to_compare_views --ccr <path> --vrps-out <path> --vaps-out <path> [--trust-anchor <name>]"
}
fn parse_args(argv: &[String]) -> Result<Args, String> {
let mut args = Args {
trust_anchor: "unknown".to_string(),
..Args::default()
};
let mut i = 1usize;
while i < argv.len() {
match argv[i].as_str() {
"--ccr" => {
i += 1;
let v = argv.get(i).ok_or("--ccr requires a value")?;
args.ccr_path = Some(v.into());
}
"--vrps-out" => {
i += 1;
let v = argv.get(i).ok_or("--vrps-out requires a value")?;
args.vrps_out_path = Some(v.into());
}
"--vaps-out" => {
i += 1;
let v = argv.get(i).ok_or("--vaps-out requires a value")?;
args.vaps_out_path = Some(v.into());
}
"--trust-anchor" => {
i += 1;
let v = argv.get(i).ok_or("--trust-anchor requires a value")?;
args.trust_anchor = v.clone();
}
"-h" | "--help" => return Err(usage().to_string()),
other => return Err(format!("unknown argument: {other}\n{}", usage())),
}
i += 1;
}
if args.ccr_path.is_none() {
return Err(format!("--ccr is required\n{}", usage()));
}
if args.vrps_out_path.is_none() {
return Err(format!("--vrps-out is required\n{}", usage()));
}
if args.vaps_out_path.is_none() {
return Err(format!("--vaps-out is required\n{}", usage()));
}
Ok(args)
}
fn main() -> Result<(), String> {
let args = parse_args(&std::env::args().collect::<Vec<_>>())?;
let ccr_path = args.ccr_path.as_ref().unwrap();
let bytes = std::fs::read(ccr_path)
.map_err(|e| format!("read ccr failed: {}: {e}", ccr_path.display()))?;
let content_info = decode_content_info(&bytes).map_err(|e| e.to_string())?;
let (vrps, vaps) =
decode_ccr_compare_views(&content_info, &args.trust_anchor).map_err(|e| e.to_string())?;
write_vrp_csv(args.vrps_out_path.as_ref().unwrap(), &vrps)?;
write_vap_csv(args.vaps_out_path.as_ref().unwrap(), &vaps)?;
println!(
"{}\n{}",
args.vrps_out_path.as_ref().unwrap().display(),
args.vaps_out_path.as_ref().unwrap().display()
);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_args_accepts_required_flags() {
let argv = vec![
"ccr_to_compare_views".to_string(),
"--ccr".to_string(),
"a.ccr".to_string(),
"--vrps-out".to_string(),
"vrps.csv".to_string(),
"--vaps-out".to_string(),
"vaps.csv".to_string(),
"--trust-anchor".to_string(),
"apnic".to_string(),
];
let args = parse_args(&argv).expect("parse args");
assert_eq!(
args.ccr_path.as_deref(),
Some(std::path::Path::new("a.ccr"))
);
assert_eq!(
args.vrps_out_path.as_deref(),
Some(std::path::Path::new("vrps.csv"))
);
assert_eq!(
args.vaps_out_path.as_deref(),
Some(std::path::Path::new("vaps.csv"))
);
assert_eq!(args.trust_anchor, "apnic");
}
#[test]
fn parse_args_rejects_missing_required_flags() {
let argv = vec![
"ccr_to_compare_views".to_string(),
"--ccr".to_string(),
"a.ccr".to_string(),
"--vrps-out".to_string(),
"vrps.csv".to_string(),
];
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("--vaps-out is required"), "{err}");
}
}

View File

@ -0,0 +1,121 @@
use rpki::ccr::{decode_content_info, extract_vrp_rows};
use std::io::Write;
#[derive(Default, Debug)]
struct Args {
ccr_path: Option<std::path::PathBuf>,
out_path: Option<std::path::PathBuf>,
trust_anchor: String,
}
fn usage() -> &'static str {
"Usage: ccr_to_routinator_csv --ccr <path> --out <path> [--trust-anchor <name>]"
}
fn parse_args(argv: &[String]) -> Result<Args, String> {
let mut args = Args {
trust_anchor: "unknown".to_string(),
..Args::default()
};
let mut i = 1usize;
while i < argv.len() {
match argv[i].as_str() {
"--ccr" => {
i += 1;
let v = argv.get(i).ok_or("--ccr requires a value")?;
args.ccr_path = Some(v.into());
}
"--out" => {
i += 1;
let v = argv.get(i).ok_or("--out requires a value")?;
args.out_path = Some(v.into());
}
"--trust-anchor" => {
i += 1;
let v = argv.get(i).ok_or("--trust-anchor requires a value")?;
args.trust_anchor = v.clone();
}
"-h" | "--help" => return Err(usage().to_string()),
other => return Err(format!("unknown argument: {other}\n{}", usage())),
}
i += 1;
}
if args.ccr_path.is_none() {
return Err(format!("--ccr is required\n{}", usage()));
}
if args.out_path.is_none() {
return Err(format!("--out is required\n{}", usage()));
}
Ok(args)
}
fn collect_vrp_rows(
bytes: &[u8],
) -> Result<std::collections::BTreeSet<(u32, String, u16)>, String> {
let content_info = decode_content_info(bytes).map_err(|e| e.to_string())?;
extract_vrp_rows(&content_info).map_err(|e| e.to_string())
}
fn main() -> Result<(), String> {
let argv: Vec<String> = std::env::args().collect();
let args = parse_args(&argv)?;
let ccr_path = args.ccr_path.as_ref().unwrap();
let out_path = args.out_path.as_ref().unwrap();
let bytes = std::fs::read(ccr_path)
.map_err(|e| format!("read ccr failed: {}: {e}", ccr_path.display()))?;
let rows = collect_vrp_rows(&bytes)?;
if let Some(parent) = out_path.parent() {
std::fs::create_dir_all(parent)
.map_err(|e| format!("create parent dirs failed: {}: {e}", parent.display()))?;
}
let mut file = std::io::BufWriter::new(
std::fs::File::create(out_path)
.map_err(|e| format!("create output failed: {}: {e}", out_path.display()))?,
);
writeln!(file, "ASN,IP Prefix,Max Length,Trust Anchor").map_err(|e| e.to_string())?;
for (asn, prefix, max_len) in rows {
writeln!(file, "AS{asn},{prefix},{max_len},{}", args.trust_anchor)
.map_err(|e| e.to_string())?;
}
println!("{}", out_path.display());
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_args_accepts_required_flags() {
let argv = vec![
"ccr_to_routinator_csv".to_string(),
"--ccr".to_string(),
"a.ccr".to_string(),
"--out".to_string(),
"out.csv".to_string(),
"--trust-anchor".to_string(),
"apnic".to_string(),
];
let args = parse_args(&argv).expect("parse args");
assert_eq!(
args.ccr_path.as_deref(),
Some(std::path::Path::new("a.ccr"))
);
assert_eq!(
args.out_path.as_deref(),
Some(std::path::Path::new("out.csv"))
);
assert_eq!(args.trust_anchor, "apnic");
}
#[test]
fn parse_args_rejects_missing_required_flags() {
let argv = vec![
"ccr_to_routinator_csv".to_string(),
"--ccr".to_string(),
"a.ccr".to_string(),
];
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("--out is required"), "{err}");
}
}

101
src/bin/ccr_verify.rs Normal file
View File

@ -0,0 +1,101 @@
use rpki::ccr::{
decode_content_info, verify::verify_content_info, verify_against_report_json_path,
verify_against_vcir_store_path,
};
#[derive(Debug, Default, PartialEq, Eq)]
struct Args {
ccr_path: Option<std::path::PathBuf>,
report_json: Option<std::path::PathBuf>,
db_path: Option<std::path::PathBuf>,
}
fn usage() -> &'static str {
"Usage: ccr_verify --ccr <path> [--report-json <path>] [--db <path>]"
}
fn parse_args(argv: &[String]) -> Result<Args, String> {
let mut args = Args::default();
let mut i = 1usize;
while i < argv.len() {
match argv[i].as_str() {
"--help" | "-h" => return Err(usage().to_string()),
"--ccr" => {
i += 1;
let v = argv.get(i).ok_or("--ccr requires a value")?;
args.ccr_path = Some(v.into());
}
"--report-json" => {
i += 1;
let v = argv.get(i).ok_or("--report-json requires a value")?;
args.report_json = Some(v.into());
}
"--db" => {
i += 1;
let v = argv.get(i).ok_or("--db requires a value")?;
args.db_path = Some(v.into());
}
other => return Err(format!("unknown argument: {other}\n{}", usage())),
}
i += 1;
}
if args.ccr_path.is_none() {
return Err(format!("--ccr is required\n{}", usage()));
}
Ok(args)
}
fn main() -> Result<(), String> {
let args = parse_args(&std::env::args().collect::<Vec<_>>())?;
let ccr_path = args.ccr_path.as_ref().unwrap();
let bytes = std::fs::read(ccr_path)
.map_err(|e| format!("read ccr failed: {}: {e}", ccr_path.display()))?;
let ci = decode_content_info(&bytes).map_err(|e| e.to_string())?;
let summary = verify_content_info(&ci).map_err(|e| e.to_string())?;
if let Some(report_json) = args.report_json.as_ref() {
verify_against_report_json_path(&ci, report_json).map_err(|e| e.to_string())?;
}
if let Some(db_path) = args.db_path.as_ref() {
verify_against_vcir_store_path(&ci, db_path).map_err(|e| e.to_string())?;
}
println!(
"{}",
serde_json::to_string_pretty(&summary).map_err(|e| e.to_string())?
);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_args_accepts_all_flags() {
let argv = vec![
"ccr_verify".to_string(),
"--ccr".to_string(),
"a.ccr".to_string(),
"--report-json".to_string(),
"report.json".to_string(),
"--db".to_string(),
"db".to_string(),
];
let args = parse_args(&argv).expect("parse");
assert_eq!(
args.ccr_path.as_deref(),
Some(std::path::Path::new("a.ccr"))
);
assert_eq!(
args.report_json.as_deref(),
Some(std::path::Path::new("report.json"))
);
assert_eq!(args.db_path.as_deref(), Some(std::path::Path::new("db")));
}
#[test]
fn parse_args_rejects_missing_required_ccr() {
let argv = vec!["ccr_verify".to_string()];
let err = parse_args(&argv).unwrap_err();
assert!(err.contains("--ccr is required"), "{err}");
}
}

315
src/bin/cir_drop_report.rs Normal file
View File

@ -0,0 +1,315 @@
use std::collections::{BTreeMap, BTreeSet};
use std::path::PathBuf;
use rpki::blob_store::{ExternalRawStoreDb, RawObjectStore};
use rpki::bundle::decode_ccr_compare_views;
use rpki::ccr::decode_content_info;
use rpki::cir::{decode_cir, resolve_static_pool_file};
use rpki::data_model::roa::RoaObject;
const USAGE: &str = "Usage: cir_drop_report --cir <path> --ccr <path> --report-json <path> (--static-root <path> | --raw-store-db <path>) --json-out <path> --md-out <path>";
#[derive(serde::Serialize)]
struct DroppedObjectRecord {
uri: String,
sha256: String,
kind: String,
reason_code: String,
reason_text: Option<String>,
publication_point: Option<String>,
manifest_uri: Option<String>,
derived_vrp_count: usize,
}
fn classify_reason(detail: Option<&str>, result: &str) -> String {
let text = detail.unwrap_or("").to_ascii_lowercase();
if text.contains("fetch") {
"fetch_failed".to_string()
} else if text.contains("manifest") {
"manifest_invalid".to_string()
} else if text.contains("crl") {
"crl_invalid".to_string()
} else if text.contains("policy") {
"policy_rejected".to_string()
} else if text.contains("parse") {
"object_parse_failed".to_string()
} else if text.contains("signature") || text.contains("cms") {
"cms_signature_invalid".to_string()
} else if text.contains("resource") {
"resource_invalid".to_string()
} else if text.contains("expired") || text.contains("not yet valid") {
"expired_or_not_yet_valid".to_string()
} else if result == "skipped" {
"skipped".to_string()
} else if result == "error" {
"error".to_string()
} else {
"other".to_string()
}
}
fn parse_args(
argv: &[String],
) -> Result<
(
PathBuf,
PathBuf,
PathBuf,
Option<PathBuf>,
Option<PathBuf>,
PathBuf,
PathBuf,
),
String,
> {
let mut cir = None;
let mut ccr = None;
let mut report = None;
let mut static_root = None;
let mut raw_store_db = None;
let mut json_out = None;
let mut md_out = None;
let mut i = 1usize;
while i < argv.len() {
match argv[i].as_str() {
"--cir" => {
i += 1;
cir = Some(PathBuf::from(argv.get(i).ok_or("--cir requires a value")?));
}
"--ccr" => {
i += 1;
ccr = Some(PathBuf::from(argv.get(i).ok_or("--ccr requires a value")?));
}
"--report-json" => {
i += 1;
report = Some(PathBuf::from(
argv.get(i).ok_or("--report-json requires a value")?,
));
}
"--static-root" => {
i += 1;
static_root = Some(PathBuf::from(
argv.get(i).ok_or("--static-root requires a value")?,
));
}
"--raw-store-db" => {
i += 1;
raw_store_db = Some(PathBuf::from(
argv.get(i).ok_or("--raw-store-db requires a value")?,
));
}
"--json-out" => {
i += 1;
json_out = Some(PathBuf::from(
argv.get(i).ok_or("--json-out requires a value")?,
));
}
"--md-out" => {
i += 1;
md_out = Some(PathBuf::from(
argv.get(i).ok_or("--md-out requires a value")?,
));
}
"-h" | "--help" => return Err(USAGE.to_string()),
other => return Err(format!("unknown argument: {other}\n\n{USAGE}")),
}
i += 1;
}
Ok((
cir.ok_or_else(|| format!("--cir is required\n\n{USAGE}"))?,
ccr.ok_or_else(|| format!("--ccr is required\n\n{USAGE}"))?,
report.ok_or_else(|| format!("--report-json is required\n\n{USAGE}"))?,
static_root,
raw_store_db,
json_out.ok_or_else(|| format!("--json-out is required\n\n{USAGE}"))?,
md_out.ok_or_else(|| format!("--md-out is required\n\n{USAGE}"))?,
))
}
fn main() -> Result<(), String> {
let argv: Vec<String> = std::env::args().collect();
let (cir_path, ccr_path, report_path, static_root, raw_store_db, json_out, md_out) =
parse_args(&argv)?;
let backend_count = static_root.is_some() as u8 + raw_store_db.is_some() as u8;
if backend_count != 1 {
return Err(format!(
"must specify exactly one of --static-root or --raw-store-db\n\n{USAGE}"
));
}
let cir = decode_cir(&std::fs::read(&cir_path).map_err(|e| format!("read cir failed: {e}"))?)
.map_err(|e| format!("decode cir failed: {e}"))?;
let ccr = decode_content_info(
&std::fs::read(&ccr_path).map_err(|e| format!("read ccr failed: {e}"))?,
)
.map_err(|e| format!("decode ccr failed: {e}"))?;
let (vrps, vaps) = decode_ccr_compare_views(&ccr, "unknown")
.map_err(|e| format!("decode compare views failed: {e}"))?;
let report: serde_json::Value = serde_json::from_slice(
&std::fs::read(&report_path).map_err(|e| format!("read report failed: {e}"))?,
)
.map_err(|e| format!("parse report failed: {e}"))?;
let mut object_hash_by_uri = BTreeMap::new();
for object in &cir.objects {
object_hash_by_uri.insert(object.rsync_uri.clone(), hex::encode(&object.sha256));
}
let publication_points = report["publication_points"]
.as_array()
.ok_or("report.publication_points must be an array")?;
let mut dropped_objects = Vec::new();
let mut dropped_vrp_rows = BTreeSet::new();
let mut dropped_by_kind: BTreeMap<String, usize> = BTreeMap::new();
let mut dropped_by_reason: BTreeMap<String, usize> = BTreeMap::new();
let mut unknown_roa_objects = 0usize;
for pp in publication_points {
let publication_point = pp["publication_point_rsync_uri"]
.as_str()
.map(str::to_string);
let manifest_uri = pp["manifest_rsync_uri"].as_str().map(str::to_string);
for obj in pp["objects"].as_array().into_iter().flatten() {
let result = obj["result"].as_str().unwrap_or("unknown");
if result == "ok" {
continue;
}
let uri = obj["rsync_uri"].as_str().unwrap_or("").to_string();
let hash = obj["sha256_hex"]
.as_str()
.map(str::to_string)
.or_else(|| object_hash_by_uri.get(&uri).cloned())
.unwrap_or_default();
let kind = obj["kind"].as_str().unwrap_or("other").to_string();
let detail = obj["detail"].as_str().map(str::to_string);
let reason_code = classify_reason(detail.as_deref(), result);
*dropped_by_kind.entry(kind.clone()).or_insert(0) += 1;
*dropped_by_reason.entry(reason_code.clone()).or_insert(0) += 1;
let mut derived_vrp_count = 0usize;
if kind == "roa" && !hash.is_empty() {
let bytes_opt = if let Some(static_root) = static_root.as_ref() {
match resolve_static_pool_file(static_root, &hash) {
Ok(path) => std::fs::read(&path).ok(),
Err(_) => None,
}
} else if let Some(raw_store_db) = raw_store_db.as_ref() {
ExternalRawStoreDb::open(raw_store_db)
.ok()
.and_then(|store| store.get_raw_entry(&hash).ok().flatten())
.map(|entry| entry.bytes)
} else {
None
};
match bytes_opt {
Some(bytes) => {
if let Ok(roa) = RoaObject::decode_der(&bytes) {
for family in roa.roa.ip_addr_blocks {
for addr in family.addresses {
let prefix = match addr.prefix.afi {
rpki::data_model::roa::RoaAfi::Ipv4 => format!(
"{}.{}.{}.{}/{}",
addr.prefix.addr[0],
addr.prefix.addr[1],
addr.prefix.addr[2],
addr.prefix.addr[3],
addr.prefix.prefix_len
),
rpki::data_model::roa::RoaAfi::Ipv6 => {
let bytes: [u8; 16] = addr.prefix.addr;
format!(
"{}/{}",
std::net::Ipv6Addr::from(bytes),
addr.prefix.prefix_len
)
}
};
let max_len = addr.max_length.unwrap_or(addr.prefix.prefix_len);
dropped_vrp_rows.insert((roa.roa.as_id, prefix, max_len));
derived_vrp_count += 1;
}
}
} else {
unknown_roa_objects += 1;
}
}
None => unknown_roa_objects += 1,
}
}
dropped_objects.push(DroppedObjectRecord {
uri,
sha256: hash,
kind,
reason_code,
reason_text: detail,
publication_point: publication_point.clone(),
manifest_uri: manifest_uri.clone(),
derived_vrp_count,
});
}
}
let output = serde_json::json!({
"summary": {
"finalVrpCount": vrps.len(),
"finalVapCount": vaps.len(),
"droppedVrpCount": dropped_vrp_rows.len(),
"droppedObjectCount": dropped_objects.len(),
"droppedByKind": dropped_by_kind,
"droppedByReason": dropped_by_reason,
"unknownDroppedRoaObjects": unknown_roa_objects,
},
"objects": dropped_objects,
});
if let Some(parent) = json_out.parent() {
std::fs::create_dir_all(parent).map_err(|e| format!("create json parent failed: {e}"))?;
}
std::fs::write(&json_out, serde_json::to_vec_pretty(&output).unwrap())
.map_err(|e| format!("write json failed: {e}"))?;
let mut md = String::new();
md.push_str("# CIR Drop Report\n\n");
md.push_str(&format!("- `final_vrp_count`: `{}`\n", vrps.len()));
md.push_str(&format!("- `final_vap_count`: `{}`\n", vaps.len()));
md.push_str(&format!(
"- `dropped_vrp_count`: `{}`\n",
output["summary"]["droppedVrpCount"]
));
md.push_str(&format!(
"- `dropped_object_count`: `{}`\n",
output["summary"]["droppedObjectCount"]
));
md.push_str(&format!(
"- `unknown_dropped_roa_objects`: `{}`\n\n",
output["summary"]["unknownDroppedRoaObjects"]
));
md.push_str("## Dropped By Kind\n\n");
for (kind, count) in output["summary"]["droppedByKind"]
.as_object()
.into_iter()
.flatten()
{
md.push_str(&format!("- `{kind}`: `{}`\n", count.as_u64().unwrap_or(0)));
}
md.push_str("\n## Dropped By Reason\n\n");
for (reason, count) in output["summary"]["droppedByReason"]
.as_object()
.into_iter()
.flatten()
{
md.push_str(&format!(
"- `{reason}`: `{}`\n",
count.as_u64().unwrap_or(0)
));
}
if let Some(parent) = md_out.parent() {
std::fs::create_dir_all(parent)
.map_err(|e| format!("create markdown parent failed: {e}"))?;
}
std::fs::write(&md_out, md).map_err(|e| format!("write markdown failed: {e}"))?;
Ok(())
}

View File

@ -0,0 +1,82 @@
use std::path::PathBuf;
fn usage() -> &'static str {
"Usage: cir_extract_inputs --cir <path> --tals-dir <path> --meta-json <path>"
}
fn main() {
if let Err(err) = run(std::env::args().collect()) {
eprintln!("error: {err}");
std::process::exit(2);
}
}
fn run(argv: Vec<String>) -> Result<(), String> {
let mut cir_path: Option<PathBuf> = None;
let mut tals_dir: Option<PathBuf> = None;
let mut meta_json: Option<PathBuf> = None;
let mut i = 1usize;
while i < argv.len() {
match argv[i].as_str() {
"--help" | "-h" => return Err(usage().to_string()),
"--cir" => {
i += 1;
cir_path = Some(PathBuf::from(argv.get(i).ok_or("--cir requires a value")?));
}
"--tals-dir" => {
i += 1;
tals_dir = Some(PathBuf::from(
argv.get(i).ok_or("--tals-dir requires a value")?,
));
}
"--meta-json" => {
i += 1;
meta_json = Some(PathBuf::from(
argv.get(i).ok_or("--meta-json requires a value")?,
));
}
other => return Err(format!("unknown argument: {other}\n\n{}", usage())),
}
i += 1;
}
let cir_path = cir_path.ok_or_else(|| format!("--cir is required\n\n{}", usage()))?;
let tals_dir = tals_dir.ok_or_else(|| format!("--tals-dir is required\n\n{}", usage()))?;
let meta_json = meta_json.ok_or_else(|| format!("--meta-json is required\n\n{}", usage()))?;
let bytes = std::fs::read(&cir_path)
.map_err(|e| format!("read CIR failed: {}: {e}", cir_path.display()))?;
let cir = rpki::cir::decode_cir(&bytes).map_err(|e| e.to_string())?;
std::fs::create_dir_all(&tals_dir)
.map_err(|e| format!("create tals dir failed: {}: {e}", tals_dir.display()))?;
let mut tal_files = Vec::new();
for (idx, tal) in cir.tals.iter().enumerate() {
let filename = format!("tal-{:03}.tal", idx + 1);
let path = tals_dir.join(filename);
std::fs::write(&path, &tal.tal_bytes)
.map_err(|e| format!("write TAL failed: {}: {e}", path.display()))?;
tal_files.push(serde_json::json!({
"talUri": tal.tal_uri,
"path": path,
}));
}
let validation_time = cir
.validation_time
.format(&time::format_description::well_known::Rfc3339)
.map_err(|e| format!("format validationTime failed: {e}"))?;
let meta = serde_json::json!({
"validationTime": validation_time,
"talFiles": tal_files,
});
if let Some(parent) = meta_json.parent() {
std::fs::create_dir_all(parent)
.map_err(|e| format!("create meta parent failed: {}: {e}", parent.display()))?;
}
std::fs::write(&meta_json, serde_json::to_vec_pretty(&meta).unwrap())
.map_err(|e| format!("write meta json failed: {}: {e}", meta_json.display()))?;
Ok(())
}

View File

@ -0,0 +1,96 @@
use std::path::PathBuf;
fn usage() -> &'static str {
"Usage: cir_materialize --cir <path> (--static-root <path> | --raw-store-db <path>) --mirror-root <path> [--keep-db]"
}
fn main() {
if let Err(err) = run(std::env::args().collect()) {
eprintln!("error: {err}");
std::process::exit(2);
}
}
fn run(argv: Vec<String>) -> Result<(), String> {
let mut cir_path: Option<PathBuf> = None;
let mut static_root: Option<PathBuf> = None;
let mut raw_store_db: Option<PathBuf> = None;
let mut mirror_root: Option<PathBuf> = None;
let mut keep_db = false;
let mut i = 1usize;
while i < argv.len() {
match argv[i].as_str() {
"--help" | "-h" => return Err(usage().to_string()),
"--cir" => {
i += 1;
cir_path = Some(PathBuf::from(argv.get(i).ok_or("--cir requires a value")?));
}
"--static-root" => {
i += 1;
static_root = Some(PathBuf::from(
argv.get(i).ok_or("--static-root requires a value")?,
));
}
"--raw-store-db" => {
i += 1;
raw_store_db = Some(PathBuf::from(
argv.get(i).ok_or("--raw-store-db requires a value")?,
));
}
"--mirror-root" => {
i += 1;
mirror_root = Some(PathBuf::from(
argv.get(i).ok_or("--mirror-root requires a value")?,
));
}
"--keep-db" => keep_db = true,
other => return Err(format!("unknown argument: {other}\n\n{}", usage())),
}
i += 1;
}
let cir_path = cir_path.ok_or_else(|| format!("--cir is required\n\n{}", usage()))?;
let mirror_root =
mirror_root.ok_or_else(|| format!("--mirror-root is required\n\n{}", usage()))?;
let backend_count = static_root.is_some() as u8 + raw_store_db.is_some() as u8;
if backend_count != 1 {
return Err(format!(
"must specify exactly one of --static-root or --raw-store-db\n\n{}",
usage()
));
}
let bytes = std::fs::read(&cir_path)
.map_err(|e| format!("read CIR failed: {}: {e}", cir_path.display()))?;
let cir = rpki::cir::decode_cir(&bytes).map_err(|e| e.to_string())?;
let result = if let Some(static_root) = static_root {
rpki::cir::materialize_cir(&cir, &static_root, &mirror_root, true)
.map_err(|e| e.to_string())
} else if let Some(raw_store_db) = raw_store_db {
rpki::cir::materialize_cir_from_raw_store(&cir, &raw_store_db, &mirror_root, true)
.map_err(|e| e.to_string())
} else {
unreachable!("validated backend count")
};
match result {
Ok(summary) => {
eprintln!(
"materialized CIR: mirror={} objects={} linked={} copied={} keep_db={}",
mirror_root.display(),
summary.object_count,
summary.linked_files,
summary.copied_files,
keep_db
);
Ok(())
}
Err(err) => {
if !keep_db && mirror_root.exists() {
let _ = std::fs::remove_dir_all(&mirror_root);
}
Err(err.to_string())
}
}
}

View File

@ -0,0 +1,124 @@
use std::path::PathBuf;
use rpki::cir::{
CIR_VERSION_V1, CanonicalInputRepresentation, CirHashAlgorithm, CirObject, CirTal, encode_cir,
write_bytes_to_static_pool,
};
use sha2::Digest;
const USAGE: &str = "Usage: cir_ta_only_fixture --tal-path <path> --ta-path <path> --tal-uri <url> --validation-time <rfc3339> --cir-out <path> --static-root <path>";
fn parse_args(
argv: &[String],
) -> Result<
(
PathBuf,
PathBuf,
String,
time::OffsetDateTime,
PathBuf,
PathBuf,
),
String,
> {
let mut tal_path = None;
let mut ta_path = None;
let mut tal_uri = None;
let mut validation_time = None;
let mut cir_out = None;
let mut static_root = None;
let mut i = 1usize;
while i < argv.len() {
match argv[i].as_str() {
"--tal-path" => {
i += 1;
tal_path = Some(PathBuf::from(
argv.get(i).ok_or("--tal-path requires a value")?,
));
}
"--ta-path" => {
i += 1;
ta_path = Some(PathBuf::from(
argv.get(i).ok_or("--ta-path requires a value")?,
));
}
"--tal-uri" => {
i += 1;
tal_uri = Some(argv.get(i).ok_or("--tal-uri requires a value")?.clone());
}
"--validation-time" => {
i += 1;
let raw = argv.get(i).ok_or("--validation-time requires a value")?;
validation_time = Some(
time::OffsetDateTime::parse(
raw,
&time::format_description::well_known::Rfc3339,
)
.map_err(|e| format!("invalid validation time: {e}"))?,
);
}
"--cir-out" => {
i += 1;
cir_out = Some(PathBuf::from(
argv.get(i).ok_or("--cir-out requires a value")?,
));
}
"--static-root" => {
i += 1;
static_root = Some(PathBuf::from(
argv.get(i).ok_or("--static-root requires a value")?,
));
}
"-h" | "--help" => return Err(USAGE.to_string()),
other => return Err(format!("unknown argument: {other}\n\n{USAGE}")),
}
i += 1;
}
Ok((
tal_path.ok_or_else(|| format!("--tal-path is required\n\n{USAGE}"))?,
ta_path.ok_or_else(|| format!("--ta-path is required\n\n{USAGE}"))?,
tal_uri.ok_or_else(|| format!("--tal-uri is required\n\n{USAGE}"))?,
validation_time.ok_or_else(|| format!("--validation-time is required\n\n{USAGE}"))?,
cir_out.ok_or_else(|| format!("--cir-out is required\n\n{USAGE}"))?,
static_root.ok_or_else(|| format!("--static-root is required\n\n{USAGE}"))?,
))
}
fn main() -> Result<(), String> {
let argv: Vec<String> = std::env::args().collect();
let (tal_path, ta_path, tal_uri, validation_time, cir_out, static_root) = parse_args(&argv)?;
let tal_bytes = std::fs::read(&tal_path).map_err(|e| format!("read tal failed: {e}"))?;
let ta_bytes = std::fs::read(&ta_path).map_err(|e| format!("read ta failed: {e}"))?;
let tal = rpki::data_model::tal::Tal::decode_bytes(&tal_bytes)
.map_err(|e| format!("decode tal failed: {e}"))?;
let ta_rsync_uri = tal
.ta_uris
.iter()
.find(|uri| uri.scheme() == "rsync")
.ok_or("tal must contain an rsync URI")?
.as_str()
.to_string();
let sha = sha2::Sha256::digest(&ta_bytes);
let hash_hex = hex::encode(sha);
write_bytes_to_static_pool(&static_root, validation_time.date(), &hash_hex, &ta_bytes)
.map_err(|e| format!("write static pool failed: {e}"))?;
let cir = CanonicalInputRepresentation {
version: CIR_VERSION_V1,
hash_alg: CirHashAlgorithm::Sha256,
validation_time,
objects: vec![CirObject {
rsync_uri: ta_rsync_uri,
sha256: sha.to_vec(),
}],
tals: vec![CirTal { tal_uri, tal_bytes }],
};
let der = encode_cir(&cir).map_err(|e| format!("encode cir failed: {e}"))?;
if let Some(parent) = cir_out.parent() {
std::fs::create_dir_all(parent).map_err(|e| format!("create cir parent failed: {e}"))?;
}
std::fs::write(&cir_out, der).map_err(|e| format!("write cir failed: {e}"))?;
Ok(())
}

199
src/bin/db_stats.rs Normal file
View File

@ -0,0 +1,199 @@
use std::collections::BTreeMap;
use std::path::PathBuf;
use rocksdb::{DB, IteratorMode, Options};
use rpki::storage::{
ALL_COLUMN_FAMILY_NAMES, CF_AUDIT_RULE_INDEX, CF_RAW_BY_HASH, CF_REPOSITORY_VIEW,
CF_RRDP_SOURCE, CF_RRDP_SOURCE_MEMBER, CF_RRDP_URI_OWNER, CF_VCIR, column_family_descriptors,
};
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum DbStatsMode {
Estimate,
Exact,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
enum CfGroup {
CurrentRepositoryView,
CurrentValidationState,
CurrentRrdpState,
LegacyCompatibility,
}
impl CfGroup {
fn as_str(self) -> &'static str {
match self {
Self::CurrentRepositoryView => "current_repository_view",
Self::CurrentValidationState => "current_validation_state",
Self::CurrentRrdpState => "current_rrdp_state",
Self::LegacyCompatibility => "legacy_compatibility",
}
}
}
fn usage() -> String {
let bin = "db_stats";
format!(
"\
Usage:
{bin} --db <path> [--exact]
Options:
--db <path> RocksDB directory
--exact Iterate to count keys (slower; default uses RocksDB estimates)
--help Show this help
Output groups:
- current_repository_view: repository_view + raw_by_hash
- current_validation_state: vcir + audit_rule_index
- current_rrdp_state: rrdp_source + rrdp_source_member + rrdp_uri_owner
"
)
}
fn estimate_keys(db: &DB, cf_name: &str) -> Result<Option<u64>, Box<dyn std::error::Error>> {
let cf = db
.cf_handle(cf_name)
.ok_or_else(|| format!("missing column family: {cf_name}"))?;
Ok(db.property_int_value_cf(cf, "rocksdb.estimate-num-keys")?)
}
fn exact_keys(db: &DB, cf_name: &str) -> Result<u64, Box<dyn std::error::Error>> {
let cf = db
.cf_handle(cf_name)
.ok_or_else(|| format!("missing column family: {cf_name}"))?;
let mode = IteratorMode::Start;
let mut count = 0u64;
for res in db.iterator_cf(cf, mode) {
res?;
count += 1;
}
Ok(count)
}
fn cf_group(cf_name: &str) -> CfGroup {
match cf_name {
CF_REPOSITORY_VIEW | CF_RAW_BY_HASH => CfGroup::CurrentRepositoryView,
CF_VCIR | CF_AUDIT_RULE_INDEX => CfGroup::CurrentValidationState,
CF_RRDP_SOURCE | CF_RRDP_SOURCE_MEMBER | CF_RRDP_URI_OWNER => CfGroup::CurrentRrdpState,
_ => CfGroup::LegacyCompatibility,
}
}
fn summarize_counts<'a>(
counts: impl IntoIterator<Item = (&'a str, u64)>,
) -> BTreeMap<CfGroup, u64> {
let mut grouped = BTreeMap::new();
for (cf_name, count) in counts {
*grouped.entry(cf_group(cf_name)).or_insert(0) += count;
}
grouped
}
fn mode_label(mode: DbStatsMode) -> &'static str {
match mode {
DbStatsMode::Estimate => "estimate",
DbStatsMode::Exact => "exact",
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let argv: Vec<String> = std::env::args().collect();
if argv.iter().any(|a| a == "--help" || a == "-h") {
print!("{}", usage());
return Ok(());
}
let mut db_path: Option<PathBuf> = None;
let mut mode = DbStatsMode::Estimate;
let mut i = 1usize;
while i < argv.len() {
match argv[i].as_str() {
"--db" => {
i += 1;
let v = argv.get(i).ok_or("--db requires a value")?;
db_path = Some(PathBuf::from(v));
}
"--exact" => mode = DbStatsMode::Exact,
other => return Err(format!("unknown argument: {other}\n\n{}", usage()).into()),
}
i += 1;
}
let db_path = db_path.ok_or_else(|| format!("--db is required\n\n{}", usage()))?;
let mut opts = Options::default();
opts.create_if_missing(false);
opts.create_missing_column_families(false);
let db = DB::open_cf_descriptors(&opts, &db_path, column_family_descriptors())?;
println!("db={}", db_path.display());
println!("mode={}", mode_label(mode));
let mut per_cf = Vec::with_capacity(ALL_COLUMN_FAMILY_NAMES.len());
let mut total: u64 = 0;
for &name in ALL_COLUMN_FAMILY_NAMES {
let n = match mode {
DbStatsMode::Exact => exact_keys(&db, name)?,
DbStatsMode::Estimate => estimate_keys(&db, name)?.unwrap_or(0),
};
total = total.saturating_add(n);
per_cf.push((name, n));
println!("{name}={n}");
}
println!("total={total}");
for (group, count) in summarize_counts(per_cf.iter().copied()) {
println!("group_{}={count}", group.as_str());
}
let live = db.live_files()?;
println!("sst_files={}", live.len());
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn cf_group_classifies_current_and_legacy_keyspaces() {
assert_eq!(cf_group(CF_REPOSITORY_VIEW), CfGroup::CurrentRepositoryView);
assert_eq!(cf_group(CF_RAW_BY_HASH), CfGroup::CurrentRepositoryView);
assert_eq!(cf_group(CF_VCIR), CfGroup::CurrentValidationState);
assert_eq!(
cf_group(CF_AUDIT_RULE_INDEX),
CfGroup::CurrentValidationState
);
assert_eq!(cf_group(CF_RRDP_SOURCE), CfGroup::CurrentRrdpState);
assert_eq!(cf_group(CF_RRDP_URI_OWNER), CfGroup::CurrentRrdpState);
assert_eq!(cf_group("unknown_legacy"), CfGroup::LegacyCompatibility);
}
#[test]
fn summarize_counts_accumulates_by_group() {
let grouped = summarize_counts([
(CF_REPOSITORY_VIEW, 5),
(CF_RAW_BY_HASH, 7),
(CF_VCIR, 11),
(CF_AUDIT_RULE_INDEX, 13),
(CF_RRDP_SOURCE_MEMBER, 19),
]);
assert_eq!(grouped.get(&CfGroup::CurrentRepositoryView), Some(&12));
assert_eq!(grouped.get(&CfGroup::CurrentValidationState), Some(&24));
assert_eq!(grouped.get(&CfGroup::CurrentRrdpState), Some(&19));
assert_eq!(grouped.get(&CfGroup::LegacyCompatibility), None);
}
#[test]
fn usage_mentions_grouped_output_and_exact_mode() {
let text = usage();
assert!(text.contains("--exact"), "{text}");
assert!(text.contains("current_validation_state"), "{text}");
assert!(text.contains("current_rrdp_state"), "{text}");
}
}

View File

@ -0,0 +1,257 @@
use rpki::bundle::record_io::load_validation_time;
use rpki::storage::RocksStore;
use rpki::validation::run_tree_from_tal::{
run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit,
run_tree_from_tal_and_ta_der_payload_replay_serial_audit,
};
use rpki::validation::tree::TreeRunConfig;
use serde::Serialize;
use std::fs;
use std::path::{Path, PathBuf};
use std::time::Instant;
fn usage() -> &'static str {
"Usage: measure_sequence_replay --bundle-root <dir> [--rir <rir[,rir...]>] --out <path> [--keep-db]"
}
#[derive(Default)]
struct Args {
bundle_root: Option<PathBuf>,
rirs: Option<Vec<String>>,
out: Option<PathBuf>,
keep_db: bool,
}
fn parse_args() -> Result<Args, String> {
let mut out = Args::default();
let argv: Vec<String> = std::env::args().skip(1).collect();
let mut i = 0usize;
while i < argv.len() {
match argv[i].as_str() {
"--bundle-root" => {
i += 1;
out.bundle_root = Some(PathBuf::from(
argv.get(i).ok_or("--bundle-root requires a value")?,
));
}
"--rir" => {
i += 1;
let value = argv.get(i).ok_or("--rir requires a value")?;
out.rirs = Some(
value
.split(',')
.map(|s| s.trim().to_lowercase())
.filter(|s| !s.is_empty())
.collect(),
);
}
"--out" => {
i += 1;
out.out = Some(PathBuf::from(argv.get(i).ok_or("--out requires a value")?));
}
"--keep-db" => out.keep_db = true,
"--help" | "-h" => return Err(usage().to_string()),
other => return Err(format!("unknown argument: {other}\n{}", usage())),
}
i += 1;
}
if out.bundle_root.is_none() || out.out.is_none() {
return Err(format!("--bundle-root and --out are required\n{}", usage()));
}
Ok(out)
}
#[derive(Serialize)]
struct PhaseTiming {
duration_seconds: f64,
vrp_count: usize,
vap_count: usize,
}
#[derive(Serialize)]
struct RirTiming {
rir: String,
base: PhaseTiming,
steps: Vec<(String, PhaseTiming)>,
}
fn discover_rirs(bundle_root: &Path) -> Result<Vec<String>, String> {
let mut out = Vec::new();
for entry in fs::read_dir(bundle_root)
.map_err(|e| format!("read_dir failed: {}: {e}", bundle_root.display()))?
{
let entry = entry.map_err(|e| format!("read_dir entry failed: {e}"))?;
let path = entry.path();
if path.is_dir() && path.join("bundle.json").exists() && path.join("tal.tal").exists() {
out.push(
path.file_name()
.and_then(|s| s.to_str())
.ok_or_else(|| format!("invalid rir dir name: {}", path.display()))?
.to_string(),
);
}
}
out.sort();
Ok(out)
}
fn path_join(root: &Path, relative: &str) -> PathBuf {
root.join(relative)
}
fn main() {
if let Err(err) = real_main() {
eprintln!("{err}");
std::process::exit(1);
}
}
fn real_main() -> Result<(), String> {
let args = parse_args()?;
let bundle_root = args.bundle_root.unwrap();
let out_path = args.out.unwrap();
let rirs = match args.rirs {
Some(v) => v,
None => discover_rirs(&bundle_root)?,
};
let mut results = Vec::new();
let tmp_root = out_path
.parent()
.unwrap_or_else(|| Path::new("."))
.join(".tmp-sequence-replay");
fs::create_dir_all(&tmp_root)
.map_err(|e| format!("create tmp root failed: {}: {e}", tmp_root.display()))?;
for rir in rirs {
let rir_dir = bundle_root.join(&rir);
let bundle: serde_json::Value = serde_json::from_slice(
&fs::read(rir_dir.join("bundle.json"))
.map_err(|e| format!("read bundle failed: {}: {e}", rir_dir.display()))?,
)
.map_err(|e| format!("parse bundle failed for {}: {e}", rir_dir.display()))?;
let tal_bytes = fs::read(rir_dir.join("tal.tal"))
.map_err(|e| format!("read tal.tal failed for {}: {e}", rir_dir.display()))?;
let ta_bytes = fs::read(rir_dir.join("ta.cer"))
.map_err(|e| format!("read ta.cer failed for {}: {e}", rir_dir.display()))?;
let db_dir = tmp_root.join(format!("{rir}-db"));
if db_dir.exists() {
fs::remove_dir_all(&db_dir)
.map_err(|e| format!("remove old db failed: {}: {e}", db_dir.display()))?;
}
let store =
RocksStore::open(&db_dir).map_err(|e| format!("open rocksdb failed for {rir}: {e}"))?;
let base_archive = path_join(
&rir_dir,
bundle["base"]["relativeArchivePath"]
.as_str()
.ok_or("bundle missing base.relativeArchivePath")?,
);
let base_locks = path_join(
&rir_dir,
bundle["base"]["relativeLocksPath"]
.as_str()
.ok_or("bundle missing base.relativeLocksPath")?,
);
let base_validation_time = load_validation_time(&base_locks)
.map_err(|e| format!("load base validation time failed for {rir}: {e}"))?;
let start = Instant::now();
let base_out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
&store,
&rpki::policy::Policy::default(),
&tal_bytes,
&ta_bytes,
None,
&base_archive,
&base_locks,
base_validation_time,
&TreeRunConfig {
max_depth: None,
max_instances: None,
},
)
.map_err(|e| format!("base replay failed for {rir}: {e}"))?;
let base_timing = PhaseTiming {
duration_seconds: start.elapsed().as_secs_f64(),
vrp_count: base_out.tree.vrps.len(),
vap_count: base_out.tree.aspas.len(),
};
let mut previous_locks = base_locks.clone();
let mut step_timings = Vec::new();
for step in bundle["deltaSequence"]["steps"]
.as_array()
.ok_or("bundle missing deltaSequence.steps")?
{
let step_id = step["id"].as_str().ok_or("step missing id")?.to_string();
let step_dir = path_join(
&rir_dir,
step["relativePath"]
.as_str()
.ok_or("step missing relativePath")?,
);
let delta_archive = path_join(
&rir_dir,
step["relativeArchivePath"]
.as_str()
.ok_or("step missing relativeArchivePath")?,
);
let delta_locks = path_join(
&rir_dir,
step["relativeTransitionLocksPath"]
.as_str()
.ok_or("step missing relativeTransitionLocksPath")?,
);
let validation_time = load_validation_time(&delta_locks).map_err(|e| {
format!("load step validation time failed for {rir}/{step_id}: {e}")
})?;
let start = Instant::now();
let step_out = run_tree_from_tal_and_ta_der_payload_delta_replay_step_serial_audit(
&store,
&rpki::policy::Policy::default(),
&tal_bytes,
&ta_bytes,
None,
&delta_archive,
&previous_locks,
&delta_locks,
validation_time,
&TreeRunConfig {
max_depth: None,
max_instances: None,
},
)
.map_err(|e| format!("delta step replay failed for {rir}/{step_id}: {e}"))?;
step_timings.push((
step_id.clone(),
PhaseTiming {
duration_seconds: start.elapsed().as_secs_f64(),
vrp_count: step_out.tree.vrps.len(),
vap_count: step_out.tree.aspas.len(),
},
));
previous_locks = step_dir.join("target-locks.json");
}
results.push(RirTiming {
rir,
base: base_timing,
steps: step_timings,
});
if !args.keep_db && db_dir.exists() {
fs::remove_dir_all(&db_dir)
.map_err(|e| format!("remove db failed: {}: {e}", db_dir.display()))?;
}
}
fs::write(
&out_path,
serde_json::to_vec_pretty(&results).map_err(|e| format!("encode json failed: {e}"))?,
)
.map_err(|e| format!("write out failed: {}: {e}", out_path.display()))?;
println!("{}", out_path.display());
Ok(())
}

View File

@ -0,0 +1,413 @@
use rpki::bundle::{
RecordingHttpFetcher, RecordingRsyncFetcher, RirBundleMetadata,
build_single_rir_bundle_manifest, build_vap_compare_rows, build_vrp_compare_rows, sha256_hex,
write_json, write_live_base_replay_bundle_inputs, write_live_bundle_rir_readme,
write_live_bundle_top_readme, write_timing_json, write_vap_csv, write_vrp_csv,
};
use rpki::ccr::{build_ccr_from_run, verify_content_info, write_ccr_file};
use rpki::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig};
use rpki::fetch::rsync_system::{SystemRsyncConfig, SystemRsyncFetcher};
use rpki::policy::Policy;
use rpki::storage::RocksStore;
use rpki::validation::run_tree_from_tal::{
run_tree_from_tal_and_ta_der_payload_replay_serial_audit,
run_tree_from_tal_and_ta_der_serial_audit,
};
use rpki::validation::tree::TreeRunConfig;
use std::fs;
use std::path::PathBuf;
use std::time::Instant;
use time::format_description::well_known::Rfc3339;
#[derive(Debug, Default, PartialEq, Eq)]
struct Args {
rir: Option<String>,
out_dir: Option<PathBuf>,
tal_path: Option<PathBuf>,
ta_path: Option<PathBuf>,
validation_time: Option<time::OffsetDateTime>,
http_timeout_secs: u64,
rsync_timeout_secs: u64,
rsync_mirror_root: Option<PathBuf>,
max_depth: Option<usize>,
max_instances: Option<usize>,
trust_anchor: Option<String>,
}
fn usage() -> &'static str {
"Usage: replay_bundle_capture --rir <name> --out-dir <path> --tal-path <path> --ta-path <path> [--validation-time <rfc3339>] [--http-timeout-secs <n>] [--rsync-timeout-secs <n>] [--rsync-mirror-root <path>] [--max-depth <n>] [--max-instances <n>] [--trust-anchor <name>]"
}
fn parse_args(argv: &[String]) -> Result<Args, String> {
let mut args = Args {
http_timeout_secs: 20,
rsync_timeout_secs: 60,
..Args::default()
};
let mut i = 1usize;
while i < argv.len() {
match argv[i].as_str() {
"--help" | "-h" => return Err(usage().to_string()),
"--rir" => {
i += 1;
args.rir = Some(argv.get(i).ok_or("--rir requires a value")?.clone());
}
"--out-dir" => {
i += 1;
args.out_dir = Some(PathBuf::from(
argv.get(i).ok_or("--out-dir requires a value")?,
));
}
"--tal-path" => {
i += 1;
args.tal_path = Some(PathBuf::from(
argv.get(i).ok_or("--tal-path requires a value")?,
));
}
"--ta-path" => {
i += 1;
args.ta_path = Some(PathBuf::from(
argv.get(i).ok_or("--ta-path requires a value")?,
));
}
"--validation-time" => {
i += 1;
let value = argv.get(i).ok_or("--validation-time requires a value")?;
args.validation_time = Some(
time::OffsetDateTime::parse(value, &Rfc3339)
.map_err(|e| format!("invalid --validation-time: {e}"))?,
);
}
"--http-timeout-secs" => {
i += 1;
args.http_timeout_secs = argv
.get(i)
.ok_or("--http-timeout-secs requires a value")?
.parse()
.map_err(|e| format!("invalid --http-timeout-secs: {e}"))?;
}
"--rsync-timeout-secs" => {
i += 1;
args.rsync_timeout_secs = argv
.get(i)
.ok_or("--rsync-timeout-secs requires a value")?
.parse()
.map_err(|e| format!("invalid --rsync-timeout-secs: {e}"))?;
}
"--rsync-mirror-root" => {
i += 1;
args.rsync_mirror_root = Some(PathBuf::from(
argv.get(i).ok_or("--rsync-mirror-root requires a value")?,
));
}
"--max-depth" => {
i += 1;
args.max_depth = Some(
argv.get(i)
.ok_or("--max-depth requires a value")?
.parse()
.map_err(|e| format!("invalid --max-depth: {e}"))?,
);
}
"--max-instances" => {
i += 1;
args.max_instances = Some(
argv.get(i)
.ok_or("--max-instances requires a value")?
.parse()
.map_err(|e| format!("invalid --max-instances: {e}"))?,
);
}
"--trust-anchor" => {
i += 1;
args.trust_anchor = Some(
argv.get(i)
.ok_or("--trust-anchor requires a value")?
.clone(),
);
}
other => return Err(format!("unknown argument: {other}\n{}", usage())),
}
i += 1;
}
if args.rir.is_none() {
return Err(format!("--rir is required\n{}", usage()));
}
if args.out_dir.is_none() {
return Err(format!("--out-dir is required\n{}", usage()));
}
if args.tal_path.is_none() {
return Err(format!("--tal-path is required\n{}", usage()));
}
if args.ta_path.is_none() {
return Err(format!("--ta-path is required\n{}", usage()));
}
Ok(args)
}
fn run(args: Args) -> Result<PathBuf, String> {
let rir = args.rir.as_ref().unwrap();
let rir_normalized = rir.to_ascii_lowercase();
let trust_anchor = args
.trust_anchor
.clone()
.unwrap_or_else(|| rir_normalized.clone());
let out_root = args.out_dir.as_ref().unwrap();
let rir_dir = out_root.join(&rir_normalized);
fs::create_dir_all(&rir_dir)
.map_err(|e| format!("create rir dir failed: {}: {e}", rir_dir.display()))?;
let tal_bytes =
fs::read(args.tal_path.as_ref().unwrap()).map_err(|e| format!("read tal failed: {e}"))?;
let ta_bytes =
fs::read(args.ta_path.as_ref().unwrap()).map_err(|e| format!("read ta failed: {e}"))?;
let validation_time = args
.validation_time
.unwrap_or_else(time::OffsetDateTime::now_utc);
let db_dir = out_root.join(".tmp").join(format!("{rir}-live-base-db"));
let replay_db_dir = out_root.join(".tmp").join(format!("{rir}-self-replay-db"));
let _ = fs::remove_dir_all(&db_dir);
let _ = fs::remove_dir_all(&replay_db_dir);
if let Some(parent) = db_dir.parent() {
fs::create_dir_all(parent)
.map_err(|e| format!("create tmp dir failed: {}: {e}", parent.display()))?;
}
let store = RocksStore::open(&db_dir).map_err(|e| format!("open rocksdb failed: {e}"))?;
let http = RecordingHttpFetcher::new(
BlockingHttpFetcher::new(HttpFetcherConfig {
timeout: std::time::Duration::from_secs(args.http_timeout_secs),
..HttpFetcherConfig::default()
})
.map_err(|e| format!("create http fetcher failed: {e}"))?,
);
let rsync = RecordingRsyncFetcher::new(SystemRsyncFetcher::new(SystemRsyncConfig {
timeout: std::time::Duration::from_secs(args.rsync_timeout_secs),
mirror_root: args.rsync_mirror_root.clone(),
..SystemRsyncConfig::default()
}));
let started = Instant::now();
let out = run_tree_from_tal_and_ta_der_serial_audit(
&store,
&Policy::default(),
&tal_bytes,
&ta_bytes,
None,
&http,
&rsync,
validation_time,
&TreeRunConfig {
max_depth: args.max_depth,
max_instances: args.max_instances,
},
)
.map_err(|e| format!("live base run failed: {e}"))?;
let duration = started.elapsed();
let ccr = build_ccr_from_run(
&store,
&[out.discovery.trust_anchor.clone()],
&out.tree.vrps,
&out.tree.aspas,
&out.tree.router_keys,
validation_time,
)
.map_err(|e| format!("build ccr failed: {e}"))?;
let base_ccr_path = rir_dir.join("base.ccr");
write_ccr_file(&base_ccr_path, &ccr).map_err(|e| format!("write ccr failed: {e}"))?;
let ccr_bytes = fs::read(&base_ccr_path)
.map_err(|e| format!("read written ccr failed: {}: {e}", base_ccr_path.display()))?;
let decoded = rpki::ccr::decode_content_info(&ccr_bytes)
.map_err(|e| format!("decode written ccr failed: {e}"))?;
let verify = verify_content_info(&decoded).map_err(|e| format!("verify ccr failed: {e}"))?;
let vrp_rows = build_vrp_compare_rows(&out.tree.vrps, &trust_anchor);
let vap_rows = build_vap_compare_rows(&out.tree.aspas, &trust_anchor);
let (ccr_vrps, ccr_vaps) = rpki::bundle::decode_ccr_compare_views(&decoded, &trust_anchor)?;
if vrp_rows != ccr_vrps {
return Err("base-vrps compare view does not match base.ccr".to_string());
}
if vap_rows != ccr_vaps {
return Err("base-vaps compare view does not match base.ccr".to_string());
}
write_vrp_csv(&rir_dir.join("base-vrps.csv"), &vrp_rows)?;
write_vap_csv(&rir_dir.join("base-vaps.csv"), &vap_rows)?;
fs::write(rir_dir.join("tal.tal"), &tal_bytes).map_err(|e| format!("write tal failed: {e}"))?;
fs::write(rir_dir.join("ta.cer"), &ta_bytes).map_err(|e| format!("write ta failed: {e}"))?;
let capture = write_live_base_replay_bundle_inputs(
&rir_dir,
&rir_normalized,
validation_time,
&out.publication_points,
&store,
&http.snapshot_responses(),
&rsync.snapshot_fetches(),
)?;
let replay_store = RocksStore::open(&replay_db_dir)
.map_err(|e| format!("open self replay rocksdb failed: {e}"))?;
let replay_out = run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
&replay_store,
&Policy::default(),
&tal_bytes,
&ta_bytes,
None,
&rir_dir.join("base-payload-archive"),
&rir_dir.join("base-locks.json"),
validation_time,
&TreeRunConfig {
max_depth: args.max_depth,
max_instances: args.max_instances,
},
)
.map_err(|e| format!("self replay failed: {e}"))?;
let replay_vrps = build_vrp_compare_rows(&replay_out.tree.vrps, &trust_anchor);
let replay_vaps = build_vap_compare_rows(&replay_out.tree.aspas, &trust_anchor);
if replay_vrps != vrp_rows {
return Err("self replay VRP compare view mismatch".to_string());
}
if replay_vaps != vap_rows {
return Err("self replay VAP compare view mismatch".to_string());
}
fs::create_dir_all(rir_dir.join("timings"))
.map_err(|e| format!("create timings dir failed: {e}"))?;
write_timing_json(
&rir_dir.join("timings").join("base-produce.json"),
"base",
&validation_time,
duration,
)?;
let metadata = RirBundleMetadata {
schema_version: "20260330-v1".to_string(),
bundle_producer: "ours".to_string(),
rir: rir_normalized.clone(),
base_validation_time: validation_time
.format(&Rfc3339)
.map_err(|e| format!("format validation time failed: {e}"))?,
delta_validation_time: None,
tal_sha256: sha256_hex(&tal_bytes),
ta_cert_sha256: sha256_hex(&ta_bytes),
base_ccr_sha256: sha256_hex(&ccr_bytes),
delta_ccr_sha256: None,
has_aspa: !vap_rows.is_empty(),
has_router_key: verify.router_key_count > 0,
base_vrp_count: vrp_rows.len(),
base_vap_count: vap_rows.len(),
delta_vrp_count: None,
delta_vap_count: None,
};
write_json(&rir_dir.join("bundle.json"), &metadata)?;
write_json(
&rir_dir.join("verification.json"),
&serde_json::json!({
"base": {
"validationTime": metadata.base_validation_time,
"ccr": {
"path": "base.ccr",
"sha256": metadata.base_ccr_sha256,
"stateHashesOk": verify.state_hashes_ok,
"manifestInstances": verify.manifest_instances,
"roaVrpCount": verify.roa_vrp_count,
"aspaPayloadSets": verify.aspa_payload_sets,
"routerKeyCount": verify.router_key_count,
},
"compareViews": {
"vrpsSelfMatch": true,
"vapsSelfMatch": true,
"baseVrpCount": metadata.base_vrp_count,
"baseVapCount": metadata.base_vap_count,
},
"capture": {
"captureId": capture.capture_id,
"rrdpRepoCount": capture.rrdp_repo_count,
"rsyncModuleCount": capture.rsync_module_count,
"selfReplayOk": true,
}
}
}),
)?;
write_live_bundle_top_readme(&out_root.join("README.md"), &rir_normalized)?;
write_live_bundle_rir_readme(
&rir_dir.join("README.md"),
&rir_normalized,
&metadata.base_validation_time,
)?;
write_json(
&out_root.join("bundle-manifest.json"),
&build_single_rir_bundle_manifest(
"20260330-v1",
"ours",
&rir_normalized,
&validation_time,
None,
metadata.has_aspa,
)?,
)?;
let _ = fs::remove_dir_all(&db_dir);
let _ = fs::remove_dir_all(&replay_db_dir);
Ok(out_root.clone())
}
fn main() -> Result<(), String> {
let args = parse_args(&std::env::args().collect::<Vec<_>>())?;
let out = run(args)?;
println!("{}", out.display());
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::tempdir;
#[test]
fn parse_args_requires_required_flags() {
let argv = vec![
"replay_bundle_capture".to_string(),
"--rir".to_string(),
"apnic".to_string(),
"--out-dir".to_string(),
"out".to_string(),
"--tal-path".to_string(),
"tal".to_string(),
"--ta-path".to_string(),
"ta".to_string(),
];
let args = parse_args(&argv).expect("parse");
assert_eq!(args.rir.as_deref(), Some("apnic"));
assert_eq!(args.out_dir.as_deref(), Some(std::path::Path::new("out")));
assert_eq!(args.http_timeout_secs, 20);
assert_eq!(args.rsync_timeout_secs, 60);
}
#[test]
fn parse_args_rejects_missing_requireds() {
let err = parse_args(&["replay_bundle_capture".to_string()]).unwrap_err();
assert!(err.contains("--rir is required"), "{err}");
}
#[test]
fn write_timing_json_writes_duration_and_mode() {
let td = tempdir().expect("tempdir");
let path = td.path().join("timings/base-produce.json");
write_timing_json(
&path,
"base",
&time::OffsetDateTime::parse("2026-03-30T00:00:00Z", &Rfc3339).expect("time"),
std::time::Duration::from_millis(1500),
)
.expect("write timing");
let json: serde_json::Value =
serde_json::from_slice(&std::fs::read(&path).expect("read timing")).expect("parse");
assert_eq!(json["mode"], "base");
assert_eq!(json["durationSeconds"], 1.5);
}
}

View File

@ -0,0 +1,483 @@
use rpki::bundle::{
RecordingHttpFetcher, RecordingRsyncFetcher, build_single_rir_bundle_manifest,
build_vap_compare_rows, build_vrp_compare_rows, copy_dir_all, load_validation_time, sha256_hex,
write_json, write_live_delta_replay_bundle_inputs, write_vap_csv, write_vrp_csv,
};
use rpki::ccr::{build_ccr_from_run, decode_content_info, verify_content_info, write_ccr_file};
use rpki::fetch::http::{BlockingHttpFetcher, HttpFetcherConfig};
use rpki::fetch::rsync_system::{SystemRsyncConfig, SystemRsyncFetcher};
use rpki::policy::Policy;
use rpki::storage::RocksStore;
use rpki::sync::rrdp::Fetcher;
use rpki::validation::run_tree_from_tal::{
run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit,
run_tree_from_tal_and_ta_der_payload_replay_serial_audit,
run_tree_from_tal_and_ta_der_serial_audit,
};
use rpki::validation::tree::TreeRunConfig;
use std::fs;
use std::path::{Path, PathBuf};
use std::time::Instant;
use time::format_description::well_known::Rfc3339;
#[derive(Debug, Default, PartialEq, Eq)]
struct Args {
rir: Option<String>,
base_bundle_dir: Option<PathBuf>,
out_dir: Option<PathBuf>,
validation_time: Option<time::OffsetDateTime>,
http_timeout_secs: u64,
rsync_timeout_secs: u64,
rsync_mirror_root: Option<PathBuf>,
max_depth: Option<usize>,
max_instances: Option<usize>,
trust_anchor: Option<String>,
}
fn usage() -> &'static str {
"Usage: replay_bundle_capture_delta --rir <name> --base-bundle-dir <path> --out-dir <path> [--validation-time <rfc3339>] [--http-timeout-secs <n>] [--rsync-timeout-secs <n>] [--rsync-mirror-root <path>] [--max-depth <n>] [--max-instances <n>] [--trust-anchor <name>]"
}
fn parse_args(argv: &[String]) -> Result<Args, String> {
let mut args = Args {
http_timeout_secs: 20,
rsync_timeout_secs: 60,
..Args::default()
};
let mut i = 1usize;
while i < argv.len() {
match argv[i].as_str() {
"--help" | "-h" => return Err(usage().to_string()),
"--rir" => {
i += 1;
args.rir = Some(argv.get(i).ok_or("--rir requires a value")?.clone());
}
"--base-bundle-dir" => {
i += 1;
args.base_bundle_dir = Some(PathBuf::from(
argv.get(i).ok_or("--base-bundle-dir requires a value")?,
));
}
"--out-dir" => {
i += 1;
args.out_dir = Some(PathBuf::from(
argv.get(i).ok_or("--out-dir requires a value")?,
));
}
"--validation-time" => {
i += 1;
let value = argv.get(i).ok_or("--validation-time requires a value")?;
args.validation_time = Some(
time::OffsetDateTime::parse(value, &Rfc3339)
.map_err(|e| format!("invalid --validation-time: {e}"))?,
);
}
"--http-timeout-secs" => {
i += 1;
args.http_timeout_secs = argv
.get(i)
.ok_or("--http-timeout-secs requires a value")?
.parse()
.map_err(|e| format!("invalid --http-timeout-secs: {e}"))?;
}
"--rsync-timeout-secs" => {
i += 1;
args.rsync_timeout_secs = argv
.get(i)
.ok_or("--rsync-timeout-secs requires a value")?
.parse()
.map_err(|e| format!("invalid --rsync-timeout-secs: {e}"))?;
}
"--rsync-mirror-root" => {
i += 1;
args.rsync_mirror_root = Some(PathBuf::from(
argv.get(i).ok_or("--rsync-mirror-root requires a value")?,
));
}
"--max-depth" => {
i += 1;
args.max_depth = Some(
argv.get(i)
.ok_or("--max-depth requires a value")?
.parse()
.map_err(|e| format!("invalid --max-depth: {e}"))?,
);
}
"--max-instances" => {
i += 1;
args.max_instances = Some(
argv.get(i)
.ok_or("--max-instances requires a value")?
.parse()
.map_err(|e| format!("invalid --max-instances: {e}"))?,
);
}
"--trust-anchor" => {
i += 1;
args.trust_anchor = Some(
argv.get(i)
.ok_or("--trust-anchor requires a value")?
.clone(),
);
}
other => return Err(format!("unknown argument: {other}\n{}", usage())),
}
i += 1;
}
if args.rir.is_none() {
return Err(format!("--rir is required\n{}", usage()));
}
if args.base_bundle_dir.is_none() {
return Err(format!("--base-bundle-dir is required\n{}", usage()));
}
if args.out_dir.is_none() {
return Err(format!("--out-dir is required\n{}", usage()));
}
Ok(args)
}
fn ensure_recorded_target_snapshots(
store: &RocksStore,
base_bundle_dir: &Path,
http: &RecordingHttpFetcher<BlockingHttpFetcher>,
) -> Result<(), String> {
let base_locks: serde_json::Value = serde_json::from_slice(
&fs::read(base_bundle_dir.join("base-locks.json"))
.map_err(|e| format!("read base locks failed: {e}"))?,
)
.map_err(|e| format!("parse base locks failed: {e}"))?;
let base_rrdp = base_locks
.get("rrdp")
.and_then(|v| v.as_object())
.cloned()
.unwrap_or_default();
for (notify_uri, base_lock) in base_rrdp {
let Some(base_transport) = base_lock.get("transport").and_then(|v| v.as_str()) else {
continue;
};
if base_transport != "rrdp" {
continue;
}
let Some(base_session) = base_lock.get("session").and_then(|v| v.as_str()) else {
continue;
};
let Some(base_serial) = base_lock.get("serial").and_then(|v| v.as_u64()) else {
continue;
};
let Some(record) = store
.get_rrdp_source_record(&notify_uri)
.map_err(|e| format!("read rrdp source record failed for {notify_uri}: {e}"))?
else {
continue;
};
let Some(target_session) = record.last_session_id.as_deref() else {
continue;
};
let Some(target_serial) = record.last_serial else {
continue;
};
if target_session != base_session || target_serial <= base_serial {
continue;
}
let Some(snapshot_uri) = record.last_snapshot_uri.as_deref() else {
continue;
};
if http.snapshot_responses().contains_key(snapshot_uri) {
continue;
}
let _ = http
.fetch(snapshot_uri)
.map_err(|e| format!("fetch target snapshot for {notify_uri} failed: {e}"))?;
}
Ok(())
}
fn run(args: Args) -> Result<PathBuf, String> {
let rir = args.rir.as_ref().unwrap();
let rir_normalized = rir.to_ascii_lowercase();
let out_root = args.out_dir.as_ref().unwrap();
let base_root = args.base_bundle_dir.as_ref().unwrap();
let base_rir_dir = base_root.join(&rir_normalized);
if !base_rir_dir.is_dir() {
return Err(format!(
"base bundle rir dir not found: {}",
base_rir_dir.display()
));
}
if out_root.exists() {
fs::remove_dir_all(out_root)
.map_err(|e| format!("remove old out dir failed: {}: {e}", out_root.display()))?;
}
copy_dir_all(base_root, out_root)?;
let rir_dir = out_root.join(&rir_normalized);
let trust_anchor = args
.trust_anchor
.clone()
.unwrap_or_else(|| rir_normalized.clone());
let tal_bytes = fs::read(rir_dir.join("tal.tal"))
.map_err(|e| format!("read tal from base bundle failed: {e}"))?;
let ta_bytes = fs::read(rir_dir.join("ta.cer"))
.map_err(|e| format!("read ta from base bundle failed: {e}"))?;
let base_validation_time = load_validation_time(&rir_dir.join("base-locks.json"))?;
let target_validation_time = args
.validation_time
.unwrap_or_else(time::OffsetDateTime::now_utc);
let target_store_dir = out_root.join(".tmp").join(format!("{rir}-live-target-db"));
let self_replay_dir = out_root.join(".tmp").join(format!("{rir}-self-delta-db"));
let _ = fs::remove_dir_all(&target_store_dir);
let _ = fs::remove_dir_all(&self_replay_dir);
if let Some(parent) = target_store_dir.parent() {
fs::create_dir_all(parent)
.map_err(|e| format!("create tmp dir failed: {}: {e}", parent.display()))?;
}
let target_store = RocksStore::open(&target_store_dir)
.map_err(|e| format!("open target rocksdb failed: {e}"))?;
let _base = run_tree_from_tal_and_ta_der_payload_replay_serial_audit(
&target_store,
&Policy::default(),
&tal_bytes,
&ta_bytes,
None,
&rir_dir.join("base-payload-archive"),
&rir_dir.join("base-locks.json"),
base_validation_time,
&TreeRunConfig {
max_depth: args.max_depth,
max_instances: args.max_instances,
},
)
.map_err(|e| format!("base bootstrap replay failed: {e}"))?;
let http = RecordingHttpFetcher::new(
BlockingHttpFetcher::new(HttpFetcherConfig {
timeout: std::time::Duration::from_secs(args.http_timeout_secs),
..HttpFetcherConfig::default()
})
.map_err(|e| format!("create http fetcher failed: {e}"))?,
);
let rsync = RecordingRsyncFetcher::new(SystemRsyncFetcher::new(SystemRsyncConfig {
timeout: std::time::Duration::from_secs(args.rsync_timeout_secs),
mirror_root: args.rsync_mirror_root.clone(),
..SystemRsyncConfig::default()
}));
let started = Instant::now();
let target_out = run_tree_from_tal_and_ta_der_serial_audit(
&target_store,
&Policy::default(),
&tal_bytes,
&ta_bytes,
None,
&http,
&rsync,
target_validation_time,
&TreeRunConfig {
max_depth: args.max_depth,
max_instances: args.max_instances,
},
)
.map_err(|e| format!("live target run failed: {e}"))?;
let duration = started.elapsed();
ensure_recorded_target_snapshots(&target_store, &rir_dir, &http)?;
let delta_ccr = build_ccr_from_run(
&target_store,
&[target_out.discovery.trust_anchor.clone()],
&target_out.tree.vrps,
&target_out.tree.aspas,
&target_out.tree.router_keys,
target_validation_time,
)
.map_err(|e| format!("build delta ccr failed: {e}"))?;
let delta_ccr_path = rir_dir.join("delta.ccr");
write_ccr_file(&delta_ccr_path, &delta_ccr)
.map_err(|e| format!("write delta ccr failed: {e}"))?;
let delta_ccr_bytes = fs::read(&delta_ccr_path)
.map_err(|e| format!("read delta ccr failed: {}: {e}", delta_ccr_path.display()))?;
let delta_decoded = decode_content_info(&delta_ccr_bytes)
.map_err(|e| format!("decode delta ccr failed: {e}"))?;
let delta_verify =
verify_content_info(&delta_decoded).map_err(|e| format!("verify delta ccr failed: {e}"))?;
let delta_vrp_rows = build_vrp_compare_rows(&target_out.tree.vrps, &trust_anchor);
let delta_vap_rows = build_vap_compare_rows(&target_out.tree.aspas, &trust_anchor);
let (ccr_vrps, ccr_vaps) =
rpki::bundle::decode_ccr_compare_views(&delta_decoded, &trust_anchor)?;
if delta_vrp_rows != ccr_vrps {
return Err("record-delta.csv compare view does not match delta.ccr".to_string());
}
if delta_vap_rows != ccr_vaps {
return Err("record-delta-vaps.csv compare view does not match delta.ccr".to_string());
}
write_vrp_csv(&rir_dir.join("record-delta.csv"), &delta_vrp_rows)?;
write_vap_csv(&rir_dir.join("record-delta-vaps.csv"), &delta_vap_rows)?;
let capture = write_live_delta_replay_bundle_inputs(
&rir_dir,
&rir_normalized,
target_validation_time,
&target_out.publication_points,
&target_store,
&http.snapshot_responses(),
&rsync.snapshot_fetches(),
)?;
let self_store = RocksStore::open(&self_replay_dir)
.map_err(|e| format!("open self replay db failed: {e}"))?;
let replay_out = run_tree_from_tal_and_ta_der_payload_delta_replay_serial_audit(
&self_store,
&Policy::default(),
&tal_bytes,
&ta_bytes,
None,
&rir_dir.join("base-payload-archive"),
&rir_dir.join("base-locks.json"),
&rir_dir.join("payload-delta-archive"),
&rir_dir.join("locks-delta.json"),
base_validation_time,
target_validation_time,
&TreeRunConfig {
max_depth: args.max_depth,
max_instances: args.max_instances,
},
)
.map_err(|e| format!("self delta replay failed: {e}"))?;
let replay_vrps = build_vrp_compare_rows(&replay_out.tree.vrps, &trust_anchor);
let replay_vaps = build_vap_compare_rows(&replay_out.tree.aspas, &trust_anchor);
if replay_vrps != delta_vrp_rows {
return Err("self delta replay VRP compare view mismatch".to_string());
}
if replay_vaps != delta_vap_rows {
return Err("self delta replay VAP compare view mismatch".to_string());
}
fs::create_dir_all(rir_dir.join("timings"))
.map_err(|e| format!("create timings dir failed: {e}"))?;
write_json(
&rir_dir.join("timings").join("delta-produce.json"),
&serde_json::json!({
"mode": "delta",
"validationTime": target_validation_time
.format(&Rfc3339)
.map_err(|e| format!("format validation time failed: {e}"))?,
"durationSeconds": duration.as_secs_f64(),
}),
)?;
let mut bundle_json: serde_json::Value = serde_json::from_slice(
&fs::read(rir_dir.join("bundle.json"))
.map_err(|e| format!("read base bundle.json failed: {e}"))?,
)
.map_err(|e| format!("parse base bundle.json failed: {e}"))?;
bundle_json["deltaValidationTime"] = serde_json::Value::String(
target_validation_time
.format(&Rfc3339)
.map_err(|e| format!("format delta validation time failed: {e}"))?,
);
bundle_json["deltaCcrSha256"] = serde_json::Value::String(sha256_hex(&delta_ccr_bytes));
bundle_json["deltaVrpCount"] = serde_json::Value::from(delta_vrp_rows.len() as u64);
bundle_json["deltaVapCount"] = serde_json::Value::from(delta_vap_rows.len() as u64);
bundle_json["hasAspa"] = serde_json::Value::Bool(
bundle_json
.get("hasAspa")
.and_then(|v| v.as_bool())
.unwrap_or(false)
|| !delta_vap_rows.is_empty(),
);
bundle_json["hasRouterKey"] = serde_json::Value::Bool(
bundle_json
.get("hasRouterKey")
.and_then(|v| v.as_bool())
.unwrap_or(false)
|| delta_verify.router_key_count > 0,
);
write_json(&rir_dir.join("bundle.json"), &bundle_json)?;
let mut verification_json: serde_json::Value = serde_json::from_slice(
&fs::read(rir_dir.join("verification.json"))
.map_err(|e| format!("read base verification.json failed: {e}"))?,
)
.map_err(|e| format!("parse base verification.json failed: {e}"))?;
verification_json["delta"] = serde_json::json!({
"validationTime": target_validation_time
.format(&Rfc3339)
.map_err(|e| format!("format delta validation time failed: {e}"))?,
"ccr": {
"path": "delta.ccr",
"sha256": sha256_hex(&delta_ccr_bytes),
"stateHashesOk": delta_verify.state_hashes_ok,
"manifestInstances": delta_verify.manifest_instances,
"roaVrpCount": delta_verify.roa_vrp_count,
"aspaPayloadSets": delta_verify.aspa_payload_sets,
"routerKeyCount": delta_verify.router_key_count,
},
"compareViews": {
"vrpsSelfMatch": true,
"vapsSelfMatch": true,
"deltaVrpCount": delta_vrp_rows.len(),
"deltaVapCount": delta_vap_rows.len(),
},
"capture": {
"captureId": capture.capture_id,
"rrdpRepoCount": capture.rrdp_repo_count,
"rsyncModuleCount": capture.rsync_module_count,
"selfReplayOk": true,
}
});
write_json(&rir_dir.join("verification.json"), &verification_json)?;
let bundle_manifest = build_single_rir_bundle_manifest(
"20260330-v1",
"ours",
&rir_normalized,
&base_validation_time,
Some(&target_validation_time),
bundle_json["hasAspa"].as_bool().unwrap_or(false),
)?;
write_json(&out_root.join("bundle-manifest.json"), &bundle_manifest)?;
let _ = fs::remove_dir_all(&target_store_dir);
let _ = fs::remove_dir_all(&self_replay_dir);
Ok(out_root.clone())
}
fn main() -> Result<(), String> {
let args = parse_args(&std::env::args().collect::<Vec<_>>())?;
let out = run(args)?;
println!("{}", out.display());
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_args_requires_required_flags() {
let argv = vec![
"replay_bundle_capture_delta".to_string(),
"--rir".to_string(),
"apnic".to_string(),
"--base-bundle-dir".to_string(),
"base".to_string(),
"--out-dir".to_string(),
"out".to_string(),
];
let args = parse_args(&argv).expect("parse");
assert_eq!(args.rir.as_deref(), Some("apnic"));
assert_eq!(args.base_bundle_dir.as_deref(), Some(Path::new("base")));
assert_eq!(args.out_dir.as_deref(), Some(Path::new("out")));
}
#[test]
fn parse_args_rejects_missing_requireds() {
let err = parse_args(&["replay_bundle_capture_delta".to_string()]).unwrap_err();
assert!(err.contains("--rir is required"), "{err}");
}
}

Some files were not shown because too many files have changed in this diff Show More