优化rtr

This commit is contained in:
xiuting.xu 2026-04-08 14:03:23 +08:00
parent b483c2af8b
commit 23cdad095d
16 changed files with 1835 additions and 1606 deletions

View File

@ -152,3 +152,55 @@ cargo run --bin rtr_debug_client -- \
```sh
--keep-after-error
```
## 存储模型与边界约束(多视图)
当前 RTR cache 持久化采用三视图模型(按协议版本拆分):
- v0`RouteOrigin`
- v1`RouteOrigin + RouterKey`
- v2`RouteOrigin + RouterKey + Aspa`
RocksDB 中的核心状态按版本独立保存:
- `session_id_v0/v1/v2`
- `serial_v0/v1/v2`
- `current_v0/v1/v2`snapshot
- `delta_min_v0/v1/v2``delta_max_v0/v1/v2`
- delta key 采用 `D + version + serial_be`
### 唯一写入口
为保证 `session_id/serial/snapshot/delta_window` 的一致性,生产代码只允许通过:
- `RtrStore::save_cache_state_versioned(...)`
且该入口应仅由:
- `src/rtr/cache/store.rs`
发起调用(即由 cache 层统一批量原子写入)。
### 恢复约束
重启恢复时,按版本读取并校验:
- `availability`
- `snapshot_for_version(version)`
- `session_id_for_version(version)`
- `serial_for_version(version)`
- `delta_window_for_version(version)``load_delta_window_for_version(...)`
只有三视图状态完整且自洽时才走 store 恢复;否则回退到输入源加载流程。
### 边界防回归测试
新增边界测试用于限制写调用点,防止后续出现绕写路径:
- `tests/test_store_boundary.rs`
可单独执行:
```sh
cargo test --test test_store_boundary -- --nocapture
```

View File

@ -1,19 +1,30 @@
#!/usr/bin/env sh
set -eu
export RPKI_RTR_ENABLE_TLS=false
export RPKI_RTR_TCP_ADDR=0.0.0.0:323
: "${RPKI_RTR_ENABLE_TLS:=false}"
: "${RPKI_RTR_TCP_ADDR:=0.0.0.0:323}"
export RPKI_RTR_ENABLE_TLS
export RPKI_RTR_TCP_ADDR
export RPKI_RTR_DB_PATH=./rtr-db
export RPKI_RTR_CCR_DIR=./data
: "${RPKI_RTR_DB_PATH:=./rtr-db}"
: "${RPKI_RTR_CCR_DIR:=./data}"
export RPKI_RTR_DB_PATH
export RPKI_RTR_CCR_DIR
export RPKI_RTR_MAX_DELTA=100
export RPKI_RTR_STRICT_CCR_VALIDATION=false
export RPKI_RTR_REFRESH_INTERVAL_SECS=300
export RPKI_RTR_MAX_CONNECTIONS=512
export RPKI_RTR_NOTIFY_QUEUE_SIZE=1024
: "${RPKI_RTR_MAX_DELTA:=100}"
: "${RPKI_RTR_STRICT_CCR_VALIDATION:=false}"
: "${RPKI_RTR_REFRESH_INTERVAL_SECS:=300}"
: "${RPKI_RTR_MAX_CONNECTIONS:=512}"
: "${RPKI_RTR_NOTIFY_QUEUE_SIZE:=1024}"
export RPKI_RTR_MAX_DELTA
export RPKI_RTR_STRICT_CCR_VALIDATION
export RPKI_RTR_REFRESH_INTERVAL_SECS
export RPKI_RTR_MAX_CONNECTIONS
export RPKI_RTR_NOTIFY_QUEUE_SIZE
export RPKI_RTR_TCP_KEEPALIVE_SECS=60
export RPKI_RTR_WARN_INSECURE_TCP=true
: "${RPKI_RTR_TCP_KEEPALIVE_SECS:=60}"
: "${RPKI_RTR_WARN_INSECURE_TCP:=true}"
export RPKI_RTR_TCP_KEEPALIVE_SECS
export RPKI_RTR_WARN_INSECURE_TCP
cargo run

View File

@ -1,25 +1,41 @@
#!/usr/bin/env sh
set -eu
export RPKI_RTR_ENABLE_TLS=true
export RPKI_RTR_TCP_ADDR=0.0.0.0:323
export RPKI_RTR_TLS_ADDR=0.0.0.0:324
: "${RPKI_RTR_ENABLE_TLS:=true}"
: "${RPKI_RTR_TCP_ADDR:=0.0.0.0:323}"
: "${RPKI_RTR_TLS_ADDR:=0.0.0.0:324}"
export RPKI_RTR_ENABLE_TLS
export RPKI_RTR_TCP_ADDR
export RPKI_RTR_TLS_ADDR
export RPKI_RTR_DB_PATH=./rtr-db
export RPKI_RTR_CCR_DIR=./data
: "${RPKI_RTR_DB_PATH:=./rtr-db}"
: "${RPKI_RTR_CCR_DIR:=./data}"
export RPKI_RTR_DB_PATH
export RPKI_RTR_CCR_DIR
export RPKI_RTR_TLS_CERT_PATH=./certs/server-dns.crt
export RPKI_RTR_TLS_KEY_PATH=./certs/server-dns.key
export RPKI_RTR_TLS_CLIENT_CA_PATH=./certs/client-ca.crt
: "${RPKI_RTR_TLS_CERT_PATH:=./certs/server-dns.crt}"
: "${RPKI_RTR_TLS_KEY_PATH:=./certs/server-dns.key}"
: "${RPKI_RTR_TLS_CLIENT_CA_PATH:=./certs/client-ca.crt}"
export RPKI_RTR_TLS_CERT_PATH
export RPKI_RTR_TLS_KEY_PATH
export RPKI_RTR_TLS_CLIENT_CA_PATH
export RPKI_RTR_MAX_DELTA=100
export RPKI_RTR_STRICT_CCR_VALIDATION=false
export RPKI_RTR_REFRESH_INTERVAL_SECS=300
export RPKI_RTR_MAX_CONNECTIONS=512
export RPKI_RTR_NOTIFY_QUEUE_SIZE=1024
: "${RPKI_RTR_MAX_DELTA:=100}"
: "${RPKI_RTR_STRICT_CCR_VALIDATION:=false}"
: "${RPKI_RTR_REFRESH_INTERVAL_SECS:=300}"
: "${RPKI_RTR_MAX_CONNECTIONS:=512}"
: "${RPKI_RTR_NOTIFY_QUEUE_SIZE:=1024}"
export RPKI_RTR_MAX_DELTA
export RPKI_RTR_STRICT_CCR_VALIDATION
export RPKI_RTR_REFRESH_INTERVAL_SECS
export RPKI_RTR_MAX_CONNECTIONS
export RPKI_RTR_NOTIFY_QUEUE_SIZE
export RPKI_RTR_TCP_KEEPALIVE_SECS=60
export RPKI_RTR_WARN_INSECURE_TCP=true
export RPKI_RTR_REQUIRE_TLS_SERVER_DNS_NAME_SAN=true
: "${RPKI_RTR_TCP_KEEPALIVE_SECS:=60}"
: "${RPKI_RTR_WARN_INSECURE_TCP:=true}"
: "${RPKI_RTR_REQUIRE_TLS_SERVER_DNS_NAME_SAN:=true}"
export RPKI_RTR_TCP_KEEPALIVE_SECS
export RPKI_RTR_WARN_INSECURE_TCP
export RPKI_RTR_REQUIRE_TLS_SERVER_DNS_NAME_SAN
cargo run

View File

@ -1,25 +1,41 @@
#!/usr/bin/env sh
set -eu
export RPKI_RTR_ENABLE_TLS=true
export RPKI_RTR_TCP_ADDR=0.0.0.0:323
export RPKI_RTR_TLS_ADDR=0.0.0.0:324
: "${RPKI_RTR_ENABLE_TLS:=true}"
: "${RPKI_RTR_TCP_ADDR:=0.0.0.0:323}"
: "${RPKI_RTR_TLS_ADDR:=0.0.0.0:324}"
export RPKI_RTR_ENABLE_TLS
export RPKI_RTR_TCP_ADDR
export RPKI_RTR_TLS_ADDR
export RPKI_RTR_DB_PATH=./rtr-db
export RPKI_RTR_CCR_DIR=./data
export RPKI_RTR_STRICT_CCR_VALIDATION=false
: "${RPKI_RTR_DB_PATH:=./rtr-db}"
: "${RPKI_RTR_CCR_DIR:=./data}"
: "${RPKI_RTR_STRICT_CCR_VALIDATION:=false}"
export RPKI_RTR_DB_PATH
export RPKI_RTR_CCR_DIR
export RPKI_RTR_STRICT_CCR_VALIDATION
export RPKI_RTR_TLS_CERT_PATH=./certs/server-dns.crt
export RPKI_RTR_TLS_KEY_PATH=./certs/server-dns.key
export RPKI_RTR_TLS_CLIENT_CA_PATH=./certs/client-ca.crt
: "${RPKI_RTR_TLS_CERT_PATH:=./certs/server-dns.crt}"
: "${RPKI_RTR_TLS_KEY_PATH:=./certs/server-dns.key}"
: "${RPKI_RTR_TLS_CLIENT_CA_PATH:=./certs/client-ca.crt}"
export RPKI_RTR_TLS_CERT_PATH
export RPKI_RTR_TLS_KEY_PATH
export RPKI_RTR_TLS_CLIENT_CA_PATH
export RPKI_RTR_MAX_DELTA=100
export RPKI_RTR_REFRESH_INTERVAL_SECS=300
export RPKI_RTR_MAX_CONNECTIONS=512
export RPKI_RTR_NOTIFY_QUEUE_SIZE=1024
: "${RPKI_RTR_MAX_DELTA:=100}"
: "${RPKI_RTR_REFRESH_INTERVAL_SECS:=300}"
: "${RPKI_RTR_MAX_CONNECTIONS:=512}"
: "${RPKI_RTR_NOTIFY_QUEUE_SIZE:=1024}"
export RPKI_RTR_MAX_DELTA
export RPKI_RTR_REFRESH_INTERVAL_SECS
export RPKI_RTR_MAX_CONNECTIONS
export RPKI_RTR_NOTIFY_QUEUE_SIZE
export RPKI_RTR_TCP_KEEPALIVE_SECS=60
export RPKI_RTR_WARN_INSECURE_TCP=true
export RPKI_RTR_REQUIRE_TLS_SERVER_DNS_NAME_SAN=true
: "${RPKI_RTR_TCP_KEEPALIVE_SECS:=60}"
: "${RPKI_RTR_WARN_INSECURE_TCP:=true}"
: "${RPKI_RTR_REQUIRE_TLS_SERVER_DNS_NAME_SAN:=true}"
export RPKI_RTR_TCP_KEEPALIVE_SECS
export RPKI_RTR_WARN_INSECURE_TCP
export RPKI_RTR_REQUIRE_TLS_SERVER_DNS_NAME_SAN
cargo run

290
src/bin/ccr_fixture_gen.rs Normal file
View File

@ -0,0 +1,290 @@
use std::fs;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use std::path::{Path, PathBuf};
use anyhow::{Context, Result};
use sha2::{Digest, Sha256};
const CONTENT_TYPE_OID: &str = "1.2.840.113549.1.9.16.1.54";
const SHA256_OID: &str = "2.16.840.1.101.3.4.2.1";
#[derive(Clone)]
struct Vrp {
addr: IpAddr,
prefix_len: u8,
max_len: u8,
asn: u32,
}
#[derive(Clone)]
struct Vap {
customer_asn: u32,
providers: Vec<u32>,
}
fn main() -> Result<()> {
let out_dir = parse_out_dir_arg();
fs::create_dir_all(&out_dir)
.with_context(|| format!("failed to create output dir {}", out_dir.display()))?;
write_snapshot(
&out_dir.join("20260403T000001Z-mini-a.ccr"),
"20260403000001Z",
vec![
Vrp {
addr: IpAddr::V4(Ipv4Addr::new(10, 0, 0, 0)),
prefix_len: 24,
max_len: 24,
asn: 65001,
},
Vrp {
addr: IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 1, 0, 0, 0, 0, 0)),
prefix_len: 48,
max_len: 48,
asn: 65002,
},
],
vec![Vap {
customer_asn: 65010,
providers: vec![65011, 65012],
}],
)?;
write_snapshot(
&out_dir.join("20260403T000101Z-mini-b.ccr"),
"20260403000101Z",
vec![
Vrp {
addr: IpAddr::V4(Ipv4Addr::new(10, 0, 0, 0)),
prefix_len: 24,
max_len: 24,
asn: 65001,
},
Vrp {
addr: IpAddr::V4(Ipv4Addr::new(10, 0, 1, 0)),
prefix_len: 24,
max_len: 24,
asn: 65003,
},
Vrp {
addr: IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 1, 0, 0, 0, 0, 0)),
prefix_len: 48,
max_len: 48,
asn: 65002,
},
],
vec![Vap {
customer_asn: 65010,
providers: vec![65011, 65012, 65013],
}],
)?;
write_snapshot(
&out_dir.join("20260403T000201Z-mini-c.ccr"),
"20260403000201Z",
vec![
Vrp {
addr: IpAddr::V4(Ipv4Addr::new(10, 0, 1, 0)),
prefix_len: 24,
max_len: 24,
asn: 65003,
},
Vrp {
addr: IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 2, 0, 0, 0, 0, 0)),
prefix_len: 48,
max_len: 48,
asn: 65004,
},
],
vec![
Vap {
customer_asn: 65010,
providers: vec![65012, 65013],
},
Vap {
customer_asn: 65020,
providers: vec![65021],
},
],
)?;
println!("generated CCR fixtures under {}", out_dir.display());
Ok(())
}
fn parse_out_dir_arg() -> PathBuf {
let mut args = std::env::args().skip(1);
let mut out_dir = PathBuf::from("data");
while let Some(arg) = args.next() {
if arg == "--out-dir" {
if let Some(v) = args.next() {
out_dir = PathBuf::from(v);
}
}
}
out_dir
}
fn write_snapshot(path: &Path, produced_at: &str, vrps: Vec<Vrp>, vaps: Vec<Vap>) -> Result<()> {
let bytes = encode_ccr_snapshot(produced_at, vrps, vaps);
fs::write(path, bytes).with_context(|| format!("failed to write {}", path.display()))?;
println!("wrote {}", path.display());
Ok(())
}
fn encode_ccr_snapshot(produced_at: &str, vrps: Vec<Vrp>, vaps: Vec<Vap>) -> Vec<u8> {
let vrp_sets = vrps
.into_iter()
.map(encode_roa_payload_set)
.collect::<Vec<_>>();
let vap_sets = vaps
.into_iter()
.map(encode_aspa_payload_set)
.collect::<Vec<_>>();
let vrp_set_seq = der_sequence(vrp_sets);
let vap_set_seq = der_sequence(vap_sets);
let vrp_hash = Sha256::digest(&vrp_set_seq).to_vec();
let vap_hash = Sha256::digest(&vap_set_seq).to_vec();
// draft-ietf-sidrops-rpki-ccr-02:
// ROAPayloadState/ASPAPayloadState include payload-set sequence + hash.
let vrp_state = der_sequence(vec![vrp_set_seq, der_octet_string(vrp_hash)]);
let vap_state = der_sequence(vec![vap_set_seq, der_octet_string(vap_hash)]);
// AlgorithmIdentifier for SHA-256.
let hash_alg = der_sequence(vec![der_oid(SHA256_OID), der_null()]);
let payload = der_sequence(vec![
der_integer(0),
hash_alg,
der_generalized_time(produced_at),
der_ctx(2, vrp_state),
der_ctx(3, vap_state),
]);
der_sequence(vec![der_oid(CONTENT_TYPE_OID), der_ctx(0, payload)])
}
fn encode_roa_payload_set(v: Vrp) -> Vec<u8> {
let (afi, addr_bytes) = match v.addr {
IpAddr::V4(ip) => ([0u8, 1u8].to_vec(), ip.octets().to_vec()),
IpAddr::V6(ip) => ([0u8, 2u8].to_vec(), ip.octets().to_vec()),
};
let bit_string = prefix_to_bit_string(&addr_bytes, v.prefix_len);
let roa_ip = der_sequence(vec![
der_bit_string(0, bit_string),
der_integer(u32::from(v.max_len)),
]);
let family = der_sequence(vec![der_octet_string(afi), der_sequence(vec![roa_ip])]);
der_sequence(vec![der_integer(v.asn), der_sequence(vec![family])])
}
fn encode_aspa_payload_set(v: Vap) -> Vec<u8> {
let providers = v.providers.into_iter().map(der_integer).collect::<Vec<_>>();
der_sequence(vec![der_integer(v.customer_asn), der_sequence(providers)])
}
fn prefix_to_bit_string(addr: &[u8], prefix_len: u8) -> Vec<u8> {
let byte_len = usize::from(prefix_len).div_ceil(8);
let mut out = addr[..byte_len].to_vec();
let rem = prefix_len % 8;
if rem != 0 {
let mask = 0xFFu8 << (8 - rem);
let last = out.len() - 1;
out[last] &= mask;
}
out
}
fn der_sequence(items: Vec<Vec<u8>>) -> Vec<u8> {
let content = items.concat();
der_tlv(0x30, content)
}
fn der_integer(v: u32) -> Vec<u8> {
if v == 0 {
return der_tlv(0x02, vec![0]);
}
let mut bytes = v.to_be_bytes().to_vec();
while bytes.len() > 1 && bytes[0] == 0 {
bytes.remove(0);
}
if bytes[0] & 0x80 != 0 {
bytes.insert(0, 0);
}
der_tlv(0x02, bytes)
}
fn der_oid(oid: &str) -> Vec<u8> {
let parts = oid
.split('.')
.map(|s| s.parse::<u32>().unwrap())
.collect::<Vec<_>>();
assert!(parts.len() >= 2);
let mut out = Vec::new();
out.push((parts[0] * 40 + parts[1]) as u8);
for &part in &parts[2..] {
out.extend(base128(part));
}
der_tlv(0x06, out)
}
fn base128(mut n: u32) -> Vec<u8> {
let mut buf = vec![(n & 0x7F) as u8];
n >>= 7;
while n > 0 {
buf.push(((n & 0x7F) as u8) | 0x80);
n >>= 7;
}
buf.reverse();
buf
}
fn der_octet_string(bytes: Vec<u8>) -> Vec<u8> {
der_tlv(0x04, bytes)
}
fn der_null() -> Vec<u8> {
der_tlv(0x05, Vec::new())
}
fn der_bit_string(unused_bits: u8, bytes: Vec<u8>) -> Vec<u8> {
let mut content = Vec::with_capacity(1 + bytes.len());
content.push(unused_bits);
content.extend(bytes);
der_tlv(0x03, content)
}
fn der_generalized_time(v: &str) -> Vec<u8> {
der_tlv(0x18, v.as_bytes().to_vec())
}
fn der_ctx(tag_no: u8, encoded_inner_der: Vec<u8>) -> Vec<u8> {
der_tlv(0xA0 + tag_no, encoded_inner_der)
}
fn der_tlv(tag: u8, content: Vec<u8>) -> Vec<u8> {
let mut out = Vec::with_capacity(2 + content.len());
out.push(tag);
out.extend(der_len(content.len()));
out.extend(content);
out
}
fn der_len(len: usize) -> Vec<u8> {
if len < 128 {
return vec![len as u8];
}
let mut bytes = Vec::new();
let mut n = len;
while n > 0 {
bytes.push((n & 0xFF) as u8);
n >>= 8;
}
bytes.reverse();
let mut out = vec![0x80 | (bytes.len() as u8)];
out.extend(bytes);
out
}

View File

@ -94,7 +94,7 @@ async fn main() -> io::Result<()> {
Ok(Some(line)) => {
match handle_console_command(
&line,
&mut writer,
Some(&mut writer),
&mut state,
).await {
Ok(should_quit) => {
@ -169,7 +169,47 @@ async fn main() -> io::Result<()> {
let delay = state.reconnect_delay_secs();
state.current_session_id = None;
println!("[reconnect] transport disconnected, retry after {}s", delay);
tokio::time::sleep(Duration::from_secs(delay)).await;
let reconnect_sleep = tokio::time::sleep(Duration::from_secs(delay));
tokio::pin!(reconnect_sleep);
let mut reconnect_now = false;
loop {
tokio::select! {
_ = &mut reconnect_sleep => break,
line = stdin_lines.next_line() => {
match line {
Ok(Some(line)) => {
match handle_console_command(&line, None, &mut state).await {
Ok(should_quit) => {
if should_quit {
println!("quit requested, closing client.");
return Ok(());
}
}
Ok(false) => {
if state.take_reconnect_now() {
reconnect_now = true;
break;
}
}
Err(err) => return Err(err),
}
}
Ok(None) => {
println!("stdin closed, continue reconnect loop.");
}
Err(err) => {
eprintln!("read stdin failed: {}", err);
}
}
}
}
}
if reconnect_now {
println!("[reconnect] user requested immediate reconnect");
}
}
}
}
@ -179,6 +219,16 @@ async fn send_resume_query(
state: &mut ClientState,
mode: &QueryMode,
) -> io::Result<()> {
if state.force_reset_on_reconnect {
state.force_reset_on_reconnect = false;
state.session_id = None;
state.serial = None;
state.current_session_id = None;
send_reset_query(writer, state.version).await?;
println!("reconnected, send Reset Query (forced)");
return Ok(());
}
match (state.session_id, state.serial) {
(Some(session_id), Some(serial)) => {
println!(
@ -380,7 +430,7 @@ async fn handle_poll_tick(writer: &mut ClientWriter, state: &mut ClientState) ->
async fn handle_console_command(
line: &str,
writer: &mut ClientWriter,
mut writer: Option<&mut ClientWriter>,
state: &mut ClientState,
) -> io::Result<bool> {
let line = line.trim();
@ -400,10 +450,35 @@ async fn handle_console_command(
print_state(state);
}
["version"] => {
println!("current RTR version: {}", state.version);
}
["version", version] => {
let version = match version.parse::<u8>() {
Ok(v) => v,
Err(err) => {
println!("invalid version: {}", err);
return Ok(false);
}
};
state.version = version;
println!("updated RTR version to {}", state.version);
}
["reset"] => {
println!("manual command: send Reset Query");
if let Some(writer) = writer.as_mut() {
send_reset_query(writer, state.version).await?;
state.schedule_next_poll();
} else {
state.force_reset_on_reconnect = true;
state.request_reconnect_now();
state.session_id = None;
state.serial = None;
state.current_session_id = None;
println!("not connected, queued Reset Query for next reconnect");
}
}
["serial"] => match (state.session_id, state.serial) {
@ -412,8 +487,12 @@ async fn handle_console_command(
"manual command: send Serial Query with current state: session_id={}, serial={}",
session_id, serial
);
if let Some(writer) = writer.as_mut() {
send_serial_query(writer, state.version, session_id, serial).await?;
state.schedule_next_poll();
} else {
println!("not connected, will send Serial Query on reconnect");
}
}
_ => {
println!(
@ -445,8 +524,13 @@ async fn handle_console_command(
);
state.session_id = Some(session_id);
state.serial = Some(serial);
if let Some(writer) = writer.as_mut() {
send_serial_query(writer, state.version, session_id, serial).await?;
state.schedule_next_poll();
} else {
state.force_reset_on_reconnect = false;
println!("not connected, queued Serial Query for next reconnect");
}
}
["timeout"] => {
@ -533,6 +617,8 @@ fn print_help() {
println!("available commands:");
println!(" help show this help");
println!(" state print current client state");
println!(" version show current RTR version");
println!(" version <n> update RTR version");
println!(" reset send Reset Query");
println!(" serial send Serial Query with current session_id/serial");
println!(" serial <sid> <serial> send Serial Query with explicit values");
@ -583,6 +669,8 @@ struct ClientState {
default_poll_secs: u64,
next_poll_deadline: Instant,
poll_paused: bool,
force_reset_on_reconnect: bool,
reconnect_now: bool,
}
impl ClientState {
@ -606,6 +694,8 @@ impl ClientState {
default_poll_secs,
next_poll_deadline: Instant::now() + Duration::from_secs(default_poll_secs),
poll_paused: false,
force_reset_on_reconnect: false,
reconnect_now: false,
}
}
@ -667,6 +757,19 @@ impl ClientState {
self.default_poll_secs
}
}
fn request_reconnect_now(&mut self) {
self.reconnect_now = true;
}
fn take_reconnect_now(&mut self) -> bool {
if self.reconnect_now {
self.reconnect_now = false;
true
} else {
false
}
}
}
#[derive(Debug)]
@ -767,12 +870,7 @@ impl Config {
.transpose()?
.unwrap_or(1);
if version > 2 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("unsupported RTR version {}, expected 0..=2", version),
));
}
// Allow any version here; server will validate and respond.
let mode = match positional.next().as_deref() {
None | Some("reset") => QueryMode::Reset,

View File

@ -68,6 +68,7 @@ impl AppConfig {
fn from_env() -> Result<Self> {
let mut config = Self::default();
// TLS and TCP
if let Some(value) = env_var("RPKI_RTR_ENABLE_TLS")? {
config.enable_tls = parse_bool(&value, "RPKI_RTR_ENABLE_TLS")?;
}
@ -81,6 +82,8 @@ impl AppConfig {
.parse()
.map_err(|err| anyhow!("invalid RPKI_RTR_TLS_ADDR '{}': {}", value, err))?;
}
// data
if let Some(value) = env_var("RPKI_RTR_DB_PATH")? {
config.db_path = value;
}
@ -105,9 +108,16 @@ impl AppConfig {
config.tls_client_ca_path = value;
}
if let Some(value) = env_var("RPKI_RTR_MAX_DELTA")? {
config.max_delta = value
let parsed: u8 = value
.parse()
.map_err(|err| anyhow!("invalid RPKI_RTR_MAX_DELTA '{}': {}", value, err))?;
if parsed == 0 {
return Err(anyhow!(
"invalid RPKI_RTR_MAX_DELTA '{}': must be >= 1",
value
));
}
config.max_delta = parsed;
}
if let Some(value) = env_var("RPKI_RTR_PRUNE_DELTA_BY_SNAPSHOT_SIZE")? {
config.prune_delta_by_snapshot_size =
@ -214,9 +224,9 @@ fn init_shared_cache(config: &AppConfig, store: &RtrStore) -> Result<SharedRtrCa
.map_err(|_| anyhow!("cache read lock poisoned during startup"))?;
info!(
"cache initialized: session_ids={:?}, serial={}",
"cache initialized: session_ids={:?}, serials={:?}",
cache.session_ids(),
cache.serial()
cache.serials()
);
}
@ -270,11 +280,11 @@ fn spawn_refresh_task(
}
};
let old_serial = cache.serial();
let old_serial = cache.serial_for_version(2);
match cache.update(payloads, &store) {
Ok(()) => {
let new_serial = cache.serial();
let new_serial = cache.serial_for_version(2);
if new_serial != old_serial {
info!(
"RTR cache refresh applied: ccr_dir={}, payload_count={}, old_serial={}, new_serial={}",

487
src/rtr/cache/core.rs vendored
View File

@ -11,6 +11,7 @@ use super::model::{Delta, DualTime, Snapshot};
use super::ordering::{ChangeKey, change_key};
const SERIAL_HALF_RANGE: u32 = 1 << 31;
const VERSION_COUNT: usize = 3;
#[derive(Debug, Clone, Copy, Serialize, Deserialize, Eq, PartialEq)]
pub enum CacheAvailability {
@ -20,16 +21,16 @@ pub enum CacheAvailability {
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
pub struct SessionIds {
ids: [u16; 3],
ids: [u16; VERSION_COUNT],
}
impl SessionIds {
pub fn from_array(ids: [u16; 3]) -> Self {
pub fn from_array(ids: [u16; VERSION_COUNT]) -> Self {
Self { ids }
}
pub fn random_distinct() -> Self {
let mut ids = [0u16; 3];
let mut ids = [0u16; VERSION_COUNT];
for idx in 0..ids.len() {
loop {
let candidate: u16 = rand::random();
@ -45,15 +46,36 @@ impl SessionIds {
pub fn get(&self, version: u8) -> u16 {
self.ids[version_index(version)]
}
pub fn as_array(&self) -> [u16; VERSION_COUNT] {
self.ids
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VersionState {
session_id: u16,
serial: u32,
snapshot: Snapshot,
#[serde(skip)]
deltas: VecDeque<Arc<Delta>>,
}
impl VersionState {
fn new(session_id: u16, serial: u32, snapshot: Snapshot, max_delta: u8) -> Self {
Self {
session_id,
serial,
snapshot,
deltas: VecDeque::with_capacity(max_delta as usize),
}
}
}
#[derive(Debug)]
pub struct RtrCache {
availability: CacheAvailability,
session_ids: SessionIds,
serial: u32,
snapshot: Snapshot,
deltas: VecDeque<Arc<Delta>>,
versions: [VersionState; VERSION_COUNT],
max_delta: u8,
prune_delta_by_snapshot_size: bool,
timing: Timing,
@ -65,12 +87,13 @@ pub struct RtrCache {
impl Default for RtrCache {
fn default() -> Self {
let now = DualTime::now();
let session_ids = SessionIds::random_distinct();
let versions = std::array::from_fn(|idx| {
VersionState::new(session_ids.as_array()[idx], 0, Snapshot::empty(), 100)
});
Self {
availability: CacheAvailability::Ready,
session_ids: SessionIds::random_distinct(),
serial: 0,
snapshot: Snapshot::empty(),
deltas: VecDeque::with_capacity(100),
versions,
max_delta: 100,
prune_delta_by_snapshot_size: false,
timing: Timing::default(),
@ -87,9 +110,9 @@ pub struct RtrCacheBuilder {
max_delta: Option<u8>,
prune_delta_by_snapshot_size: Option<bool>,
timing: Option<Timing>,
serial: Option<u32>,
snapshot: Option<Snapshot>,
deltas: Option<VecDeque<Arc<Delta>>>,
serials: Option<[u32; VERSION_COUNT]>,
snapshots: Option<[Snapshot; VERSION_COUNT]>,
deltas: Option<[VecDeque<Arc<Delta>>; VERSION_COUNT]>,
created_at: Option<DualTime>,
}
@ -101,8 +124,8 @@ impl RtrCacheBuilder {
max_delta: None,
prune_delta_by_snapshot_size: None,
timing: None,
serial: None,
snapshot: None,
serials: None,
snapshots: None,
deltas: None,
created_at: None,
}
@ -133,17 +156,17 @@ impl RtrCacheBuilder {
self
}
pub fn serial(mut self, v: u32) -> Self {
self.serial = Some(v);
pub fn serials(mut self, v: [u32; VERSION_COUNT]) -> Self {
self.serials = Some(v);
self
}
pub fn snapshot(mut self, v: Snapshot) -> Self {
self.snapshot = Some(v);
pub fn snapshots(mut self, v: [Snapshot; VERSION_COUNT]) -> Self {
self.snapshots = Some(v);
self
}
pub fn deltas(mut self, v: VecDeque<Arc<Delta>>) -> Self {
pub fn deltas_by_version(mut self, v: [VecDeque<Arc<Delta>>; VERSION_COUNT]) -> Self {
self.deltas = Some(v);
self
}
@ -158,22 +181,28 @@ impl RtrCacheBuilder {
let max_delta = self.max_delta.unwrap_or(100);
let prune_delta_by_snapshot_size = self.prune_delta_by_snapshot_size.unwrap_or(false);
let timing = self.timing.unwrap_or_default();
let snapshot = self.snapshot.unwrap_or_else(Snapshot::empty);
let deltas = self
.deltas
.unwrap_or_else(|| VecDeque::with_capacity(max_delta.into()));
let session_ids = self.session_ids.unwrap_or_else(SessionIds::random_distinct);
let serials = self.serials.unwrap_or([0; VERSION_COUNT]);
let snapshots = self
.snapshots
.unwrap_or_else(|| std::array::from_fn(|_| Snapshot::empty()));
let deltas = self.deltas.unwrap_or_else(|| {
std::array::from_fn(|_| VecDeque::with_capacity(max_delta as usize))
});
let versions = std::array::from_fn(|idx| VersionState {
session_id: session_ids.as_array()[idx],
serial: serials[idx],
snapshot: snapshots[idx].clone(),
deltas: deltas[idx].clone(),
});
let serial = self.serial.unwrap_or(0);
let created_at = self.created_at.unwrap_or_else(|| now.clone());
let availability = self.availability.unwrap_or(CacheAvailability::Ready);
let session_ids = self.session_ids.unwrap_or_else(SessionIds::random_distinct);
RtrCache {
availability,
session_ids,
serial,
snapshot,
deltas,
versions,
max_delta,
prune_delta_by_snapshot_size,
timing,
@ -187,72 +216,73 @@ impl RtrCacheBuilder {
impl RtrCache {
fn set_unavailable(&mut self) {
warn!(
"RTR cache entering NoDataAvailable: old_serial={}, snapshot_empty={}, delta_count={}",
self.serial,
self.snapshot.is_empty(),
self.deltas.len()
"RTR cache entering NoDataAvailable: serials={:?}",
self.serials()
);
self.availability = CacheAvailability::NoDataAvailable;
self.snapshot = Snapshot::empty();
self.deltas.clear();
for version_state in &mut self.versions {
version_state.snapshot = Snapshot::empty();
version_state.deltas.clear();
}
}
fn reinitialize_from_snapshot(&mut self, snapshot: Snapshot) -> AppliedUpdate {
let old_serial = self.serial;
let old_session_ids = self.session_ids.clone();
fn reinitialize_from_snapshot(&mut self, source_snapshot: &Snapshot) -> AppliedUpdate {
let old_serials = self.serials();
let old_session_ids = self.session_ids();
let new_session_ids = SessionIds::random_distinct();
self.availability = CacheAvailability::Ready;
self.session_ids = SessionIds::random_distinct();
self.serial = 1;
self.snapshot = snapshot.clone();
self.deltas.clear();
for version in 0..VERSION_COUNT {
let v = version as u8;
let state = &mut self.versions[version];
state.session_id = new_session_ids.get(v);
state.serial = 1;
state.snapshot = project_snapshot_for_version(source_snapshot, v);
state.deltas.clear();
}
self.last_update_end = DualTime::now();
info!(
"RTR cache reinitialized from usable snapshot: old_serial={}, new_serial={}, old_session_ids={:?}, new_session_ids={:?}, payloads(route_origins={}, router_keys={}, aspas={})",
old_serial,
self.serial,
"RTR cache reinitialized from usable snapshot: old_serials={:?}, new_serials={:?}, old_session_ids={:?}, new_session_ids={:?}",
old_serials,
self.serials(),
old_session_ids,
self.session_ids,
snapshot.origins().len(),
snapshot.router_keys().len(),
snapshot.aspas().len()
new_session_ids
);
AppliedUpdate {
availability: self.availability,
snapshot,
serial: self.serial,
session_ids: self.session_ids.clone(),
delta: None,
delta_window: None,
clear_delta_window: true,
}
self.applied_update_with_clear()
}
fn next_serial(&mut self) -> u32 {
let old = self.serial;
self.serial = self.serial.wrapping_add(1);
fn next_serial(state: &mut VersionState) -> u32 {
let old = state.serial;
state.serial = state.serial.wrapping_add(1);
debug!(
"RTR cache advanced serial: old_serial={}, new_serial={}",
old, self.serial
"RTR cache advanced serial for version state: old_serial={}, new_serial={}",
old, state.serial
);
self.serial
state.serial
}
fn push_delta(&mut self, delta: Arc<Delta>) {
if self.deltas.len() >= self.max_delta as usize {
self.deltas.pop_front();
fn push_delta(
state: &mut VersionState,
max_delta: u8,
prune_delta_by_snapshot_size: bool,
delta: Arc<Delta>,
) {
let max_keep = usize::from(max_delta.max(1));
while state.deltas.len() >= max_keep {
state.deltas.pop_front();
}
self.deltas.push_back(delta);
state.deltas.push_back(delta);
let mut dropped_serials = Vec::new();
if self.prune_delta_by_snapshot_size {
let snapshot_wire_size = estimate_snapshot_payload_wire_size(&self.snapshot);
if prune_delta_by_snapshot_size {
let snapshot_wire_size = estimate_snapshot_payload_wire_size(&state.snapshot);
let mut cumulative_delta_wire_size =
estimate_delta_window_payload_wire_size(&self.deltas);
while !self.deltas.is_empty() && cumulative_delta_wire_size >= snapshot_wire_size {
if let Some(oldest) = self.deltas.pop_front() {
estimate_delta_window_payload_wire_size(&state.deltas);
while !state.deltas.is_empty() && cumulative_delta_wire_size >= snapshot_wire_size {
if let Some(oldest) = state.deltas.pop_front() {
dropped_serials.push(oldest.serial());
cumulative_delta_wire_size =
estimate_delta_window_payload_wire_size(&self.deltas);
estimate_delta_window_payload_wire_size(&state.deltas);
}
}
debug!(
@ -260,25 +290,11 @@ impl RtrCache {
snapshot_wire_size, cumulative_delta_wire_size, dropped_serials
);
}
debug!(
"RTR cache pushing delta into window: delta_serial={}, announced={}, withdrawn={}, dropped_oldest_serials={:?}, window_size_after={}, max_delta={}, prune_delta_by_snapshot_size={}",
self.deltas.back().map(|d| d.serial()).unwrap_or(0),
self.deltas.back().map(|d| d.announced().len()).unwrap_or(0),
self.deltas.back().map(|d| d.withdrawn().len()).unwrap_or(0),
dropped_serials,
self.deltas.len(),
self.max_delta,
self.prune_delta_by_snapshot_size
);
}
fn replace_snapshot(&mut self, snapshot: Snapshot) {
self.snapshot = snapshot;
}
fn delta_window(&self) -> Option<(u32, u32)> {
let min = self.deltas.front().map(|d| d.serial());
let max = self.deltas.back().map(|d| d.serial());
fn delta_window(state: &VersionState) -> Option<(u32, u32)> {
let min = state.deltas.front().map(|d| d.serial());
let max = state.deltas.back().map(|d| d.serial());
match (min, max) {
(Some(min), Some(max)) => Some((min, max)),
_ => None,
@ -291,116 +307,104 @@ impl RtrCache {
) -> Result<Option<AppliedUpdate>> {
self.last_update_begin = DualTime::now();
info!(
"RTR cache applying update: availability={:?}, current_serial={}, incoming_payloads={}",
"RTR cache applying update: availability={:?}, current_serials={:?}, incoming_payloads={}",
self.availability,
self.serial,
self.serials(),
new_payloads.len()
);
let new_snapshot = Snapshot::from_payloads(new_payloads);
debug!(
"RTR cache built new snapshot from update: route_origins={}, router_keys={}, aspas={}, snapshot_empty={}",
new_snapshot.origins().len(),
new_snapshot.router_keys().len(),
new_snapshot.aspas().len(),
new_snapshot.is_empty()
);
if new_snapshot.is_empty() {
let source_snapshot = Snapshot::from_payloads(new_payloads);
if source_snapshot.is_empty() {
let changed = self.availability != CacheAvailability::NoDataAvailable
|| !self.snapshot.is_empty()
|| !self.deltas.is_empty();
|| self.versions.iter().any(|state| !state.snapshot.is_empty())
|| self.versions.iter().any(|state| !state.deltas.is_empty());
self.set_unavailable();
self.last_update_end = DualTime::now();
if !changed {
debug!(
"RTR cache update produced empty snapshot but cache was already unavailable; no state change"
);
return Ok(None);
}
info!(
"RTR cache update cleared usable data and marked cache unavailable: serial={}, session_ids={:?}",
self.serial, self.session_ids
);
return Ok(Some(AppliedUpdate {
availability: self.availability,
snapshot: Snapshot::empty(),
serial: self.serial,
session_ids: self.session_ids.clone(),
delta: None,
delta_window: None,
clear_delta_window: true,
}));
return Ok(Some(self.applied_update_with_clear()));
}
if self.availability == CacheAvailability::NoDataAvailable {
info!("RTR cache recovered from NoDataAvailable with non-empty snapshot");
return Ok(Some(self.reinitialize_from_snapshot(new_snapshot)));
return Ok(Some(self.reinitialize_from_snapshot(&source_snapshot)));
}
if self.snapshot.same_content(&new_snapshot) {
self.last_update_end = DualTime::now();
debug!(
"RTR cache update detected identical snapshot content: serial={}, session_ids={:?}",
self.serial, self.session_ids
);
return Ok(None);
let mut changed_any = false;
for version in 0..VERSION_COUNT {
let v = version as u8;
let projected = project_snapshot_for_version(&source_snapshot, v);
let state = &mut self.versions[version];
if state.snapshot.same_content(&projected) {
continue;
}
let (announced, withdrawn) = self.snapshot.diff(&new_snapshot);
debug!(
"RTR cache diff computed: announced={}, withdrawn={}, current_serial={}",
announced.len(),
withdrawn.len(),
self.serial
);
let (announced, withdrawn) = state.snapshot.diff(&projected);
if announced.is_empty() && withdrawn.is_empty() {
self.last_update_end = DualTime::now();
debug!("RTR cache diff was empty after normalization; no update applied");
return Ok(None);
continue;
}
let new_serial = self.next_serial();
let new_serial = Self::next_serial(state);
let delta = Arc::new(Delta::new(new_serial, announced, withdrawn));
if delta.is_empty() {
self.last_update_end = DualTime::now();
debug!(
"RTR cache delta collapsed to empty after dedup/order normalization: serial={}",
new_serial
continue;
}
state.snapshot = projected;
Self::push_delta(
state,
self.max_delta,
self.prune_delta_by_snapshot_size,
delta,
);
changed_any = true;
}
self.last_update_end = DualTime::now();
if !changed_any {
return Ok(None);
}
self.replace_snapshot(new_snapshot.clone());
self.push_delta(delta.clone());
self.last_update_end = DualTime::now();
let delta_window = self.delta_window();
info!(
"RTR cache applied update: serial={}, announced={}, withdrawn={}, delta_window={:?}, snapshot(route_origins={}, router_keys={}, aspas={})",
new_serial,
delta.announced().len(),
delta.withdrawn().len(),
delta_window,
new_snapshot.origins().len(),
new_snapshot.router_keys().len(),
new_snapshot.aspas().len()
"RTR cache applied update: serials={:?}, session_ids={:?}, delta_lengths={:?}",
self.serials(),
self.session_ids(),
self.delta_lengths()
);
Ok(Some(self.applied_update_with_windows()))
}
Ok(Some(AppliedUpdate {
fn applied_update_with_clear(&self) -> AppliedUpdate {
let snapshots = std::array::from_fn(|idx| self.versions[idx].snapshot.clone());
let serials = std::array::from_fn(|idx| self.versions[idx].serial);
let session_ids = std::array::from_fn(|idx| self.versions[idx].session_id);
AppliedUpdate {
availability: self.availability,
snapshot: new_snapshot,
serial: new_serial,
session_ids: self.session_ids.clone(),
delta: Some(delta),
delta_window,
clear_delta_window: false,
}))
snapshots,
serials,
session_ids,
deltas: [None, None, None],
delta_windows: [None, None, None],
clear_delta_windows: [true, true, true],
}
}
fn applied_update_with_windows(&self) -> AppliedUpdate {
let snapshots = std::array::from_fn(|idx| self.versions[idx].snapshot.clone());
let serials = std::array::from_fn(|idx| self.versions[idx].serial);
let session_ids = std::array::from_fn(|idx| self.versions[idx].session_id);
let deltas = std::array::from_fn(|idx| self.versions[idx].deltas.back().cloned());
let delta_windows = std::array::from_fn(|idx| Self::delta_window(&self.versions[idx]));
AppliedUpdate {
availability: self.availability,
snapshots,
serials,
session_ids,
deltas,
delta_windows,
clear_delta_windows: [false, false, false],
}
}
pub fn is_data_available(&self) -> bool {
@ -412,29 +416,33 @@ impl RtrCache {
}
pub fn session_id_for_version(&self, version: u8) -> u16 {
self.session_ids.get(version)
self.versions[version_index(version)].session_id
}
pub fn session_ids(&self) -> SessionIds {
self.session_ids.clone()
SessionIds::from_array(std::array::from_fn(|idx| self.versions[idx].session_id))
}
pub fn snapshot(&self) -> Snapshot {
self.snapshot.clone()
pub fn snapshot_for_version(&self, version: u8) -> Snapshot {
self.versions[version_index(version)].snapshot.clone()
}
pub fn serial(&self) -> u32 {
self.serial
pub fn serial_for_version(&self, version: u8) -> u32 {
self.versions[version_index(version)].serial
}
pub fn serials(&self) -> [u32; VERSION_COUNT] {
std::array::from_fn(|idx| self.versions[idx].serial)
}
pub fn delta_lengths(&self) -> [usize; VERSION_COUNT] {
std::array::from_fn(|idx| self.versions[idx].deltas.len())
}
pub fn timing(&self) -> Timing {
self.timing
}
pub fn current_snapshot_with_session_ids(&self) -> (&Snapshot, u32, SessionIds) {
(&self.snapshot, self.serial, self.session_ids.clone())
}
pub fn last_update_begin(&self) -> DualTime {
self.last_update_begin.clone()
}
@ -447,76 +455,43 @@ impl RtrCache {
self.created_at.clone()
}
pub fn get_deltas_since(&self, client_serial: u32) -> SerialResult {
if client_serial == self.serial {
debug!(
"RTR cache delta query is already up to date: client_serial={}, cache_serial={}",
client_serial, self.serial
);
pub fn get_deltas_since_for_version(&self, version: u8, client_serial: u32) -> SerialResult {
let state = &self.versions[version_index(version)];
if client_serial == state.serial {
return SerialResult::UpToDate;
}
if matches!(
serial_cmp(client_serial, self.serial),
serial_cmp(client_serial, state.serial),
Some(Ordering::Greater) | None
) {
warn!(
"RTR cache delta query requires reset due to invalid/newer client serial: client_serial={}, cache_serial={}",
client_serial, self.serial
);
return SerialResult::ResetRequired;
}
let deltas = match self.collect_deltas_since(client_serial) {
let deltas = match collect_deltas_since(state, client_serial) {
Some(deltas) => deltas,
None => {
warn!(
"RTR cache delta query requires reset because requested serial is outside delta window: client_serial={}, cache_serial={}, delta_window={:?}",
client_serial,
self.serial,
self.delta_window()
);
return SerialResult::ResetRequired;
}
None => return SerialResult::ResetRequired,
};
if deltas.is_empty() {
debug!(
"RTR cache delta query resolved to no deltas: client_serial={}, cache_serial={}",
client_serial, self.serial
);
return SerialResult::UpToDate;
}
let merged = self.merge_deltas_minimally(&deltas);
let merged = merge_deltas_minimally(state.serial, &deltas);
if merged.is_empty() {
debug!(
"RTR cache merged delta query to empty result: client_serial={}, cache_serial={}, source_deltas={}",
client_serial,
self.serial,
deltas.len()
);
SerialResult::UpToDate
} else {
info!(
"RTR cache serving delta query: client_serial={}, cache_serial={}, source_deltas={}, merged_announced={}, merged_withdrawn={}",
client_serial,
self.serial,
deltas.len(),
merged.announced().len(),
merged.withdrawn().len()
);
SerialResult::Delta(merged)
}
}
}
fn collect_deltas_since(&self, client_serial: u32) -> Option<Vec<Arc<Delta>>> {
if self.deltas.is_empty() {
fn collect_deltas_since(state: &VersionState, client_serial: u32) -> Option<Vec<Arc<Delta>>> {
if state.deltas.is_empty() {
return None;
}
let oldest_serial = self.deltas.front().unwrap().serial();
let oldest_serial = state.deltas.front().unwrap().serial();
let min_supported = oldest_serial.wrapping_sub(1);
if matches!(
@ -527,7 +502,7 @@ impl RtrCache {
}
let mut result = Vec::new();
for delta in &self.deltas {
for delta in &state.deltas {
if serial_gt(delta.serial(), client_serial) {
result.push(delta.clone());
}
@ -540,16 +515,15 @@ impl RtrCache {
}
Some(result)
}
}
fn merge_deltas_minimally(&self, deltas: &[Arc<Delta>]) -> Delta {
fn merge_deltas_minimally(current_serial: u32, deltas: &[Arc<Delta>]) -> Delta {
let mut states = BTreeMap::<ChangeKey, LogicalState>::new();
for delta in deltas {
for payload in delta.withdrawn() {
let key = change_key(payload);
let state = states.entry(key).or_insert_with(LogicalState::new);
if state.before.is_none() && state.after.is_none() {
state.before = Some(payload.clone());
}
@ -559,23 +533,17 @@ impl RtrCache {
for payload in delta.announced() {
let key = change_key(payload);
let state = states.entry(key).or_insert_with(LogicalState::new);
state.after = Some(payload.clone());
}
}
let mut announced = Vec::new();
let mut withdrawn = Vec::new();
for (_key, state) in states {
match (state.before, state.after) {
(None, None) => {}
(None, Some(new_payload)) => {
announced.push(new_payload);
}
(Some(old_payload), None) => {
withdrawn.push(old_payload);
}
(None, Some(new_payload)) => announced.push(new_payload),
(Some(old_payload), None) => withdrawn.push(old_payload),
(Some(old_payload), Some(new_payload)) => {
if old_payload != new_payload {
if matches!(old_payload, Payload::Aspa(_))
@ -591,7 +559,36 @@ impl RtrCache {
}
}
Delta::new(self.serial, announced, withdrawn)
Delta::new(current_serial, announced, withdrawn)
}
fn project_snapshot_for_version(snapshot: &Snapshot, version: u8) -> Snapshot {
let mut payloads = Vec::new();
for payload in snapshot.payloads() {
if let Some(projected) = project_payload_for_version(&payload, version) {
payloads.push(projected);
}
}
Snapshot::from_payloads(payloads)
}
fn project_payload_for_version(payload: &Payload, version: u8) -> Option<Payload> {
match payload {
Payload::RouteOrigin(origin) => Some(Payload::RouteOrigin(origin.clone())),
Payload::RouterKey(key) => {
if version >= 1 {
Some(Payload::RouterKey(key.clone()))
} else {
None
}
}
Payload::Aspa(aspa) => {
if version >= 2 {
Some(Payload::Aspa(aspa.clone()))
} else {
None
}
}
}
}
@ -659,12 +656,12 @@ pub enum SerialResult {
pub(super) struct AppliedUpdate {
pub(super) availability: CacheAvailability,
pub(super) snapshot: Snapshot,
pub(super) serial: u32,
pub(super) session_ids: SessionIds,
pub(super) delta: Option<Arc<Delta>>,
pub(super) delta_window: Option<(u32, u32)>,
pub(super) clear_delta_window: bool,
pub(super) snapshots: [Snapshot; VERSION_COUNT],
pub(super) serials: [u32; VERSION_COUNT],
pub(super) session_ids: [u16; VERSION_COUNT],
pub(super) deltas: [Option<Arc<Delta>>; VERSION_COUNT],
pub(super) delta_windows: [Option<(u32, u32)>; VERSION_COUNT],
pub(super) clear_delta_windows: [bool; VERSION_COUNT],
}
fn serial_cmp(a: u32, b: u32) -> Option<Ordering> {

196
src/rtr/cache/store.rs vendored
View File

@ -7,7 +7,9 @@ use crate::rtr::payload::{Payload, Timing};
use crate::rtr::store::RtrStore;
use super::core::{AppliedUpdate, CacheAvailability, RtrCache, RtrCacheBuilder, SessionIds};
use super::model::Snapshot;
use super::model::{Delta, Snapshot};
const VERSION_COUNT: usize = 3;
impl RtrCache {
pub fn init(
@ -22,13 +24,10 @@ impl RtrCache {
try_restore_from_store(store, max_delta, prune_delta_by_snapshot_size, timing)?
{
tracing::info!(
"RTR cache restored from store: availability={:?}, session_ids={:?}, serial={}, snapshot(route_origins={}, router_keys={}, aspas={})",
"RTR cache restored from store: availability={:?}, session_ids={:?}, serials={:?}",
cache.availability(),
cache.session_ids(),
cache.serial(),
cache.snapshot().origins().len(),
cache.snapshot().router_keys().len(),
cache.snapshot().aspas().len()
cache.serials()
);
return Ok(cache);
}
@ -36,43 +35,37 @@ impl RtrCache {
tracing::warn!("RTR cache store unavailable or invalid, fallback to file loader");
let payloads = file_loader()?;
let session_ids = SessionIds::random_distinct();
let snapshot = Snapshot::from_payloads(payloads);
let availability = if snapshot.is_empty() {
let source_snapshot = Snapshot::from_payloads(payloads);
let availability = if source_snapshot.is_empty() {
CacheAvailability::NoDataAvailable
} else {
CacheAvailability::Ready
};
let serial = if snapshot.is_empty() { 0 } else { 1 };
let session_ids = SessionIds::random_distinct();
let serial = if source_snapshot.is_empty() { 0 } else { 1 };
if snapshot.is_empty() {
tracing::warn!(
"RTR cache initialized without usable data: session_ids={:?}, serial={}",
session_ids,
serial
);
} else {
tracing::info!(
"RTR cache initialized from file loader: session_ids={:?}, serial={}",
session_ids,
serial
);
}
let snapshot_for_store = snapshot.clone();
let session_ids_for_store = session_ids.clone();
let snapshots = std::array::from_fn(|version| {
project_snapshot_for_version(&source_snapshot, version as u8)
});
let serials = [serial; VERSION_COUNT];
let deltas = std::array::from_fn(|_| VecDeque::<Arc<Delta>>::with_capacity(max_delta as usize));
tokio::spawn({
let store = store.clone();
let snapshots_for_store = snapshots.clone();
let session_ids_for_store = session_ids.as_array();
async move {
if let Err(e) = store.save_cache_state(
let deltas_none: [Option<&Delta>; 3] = [None, None, None];
let windows_none: [Option<(u32, u32)>; 3] = [None, None, None];
let clear = [true, true, true];
if let Err(e) = store.save_cache_state_versioned(
availability,
&snapshot_for_store,
&snapshots_for_store,
&session_ids_for_store,
serial,
None,
None,
true,
&serials,
&deltas_none,
&windows_none,
&clear,
) {
tracing::error!("persist cache state failed: {:?}", e);
}
@ -85,8 +78,9 @@ impl RtrCache {
.max_delta(max_delta)
.prune_delta_by_snapshot_size(prune_delta_by_snapshot_size)
.timing(timing)
.serial(serial)
.snapshot(snapshot)
.serials(serials)
.snapshots(snapshots)
.deltas_by_version(deltas)
.build())
}
@ -94,7 +88,6 @@ impl RtrCache {
if let Some(update) = self.apply_update(new_payloads)? {
spawn_store_sync(store, update);
}
Ok(())
}
}
@ -105,101 +98,98 @@ fn try_restore_from_store(
prune_delta_by_snapshot_size: bool,
timing: Timing,
) -> Result<Option<RtrCache>> {
let snapshot = store.get_snapshot()?;
let session_ids = store.get_session_ids()?;
let serial = store.get_serial()?;
let availability = store.get_availability()?;
let (snapshot, session_ids, serial) = match (snapshot, session_ids, serial) {
(Some(snapshot), Some(session_ids), Some(serial)) => (snapshot, session_ids, serial),
_ => {
tracing::warn!("RTR cache store incomplete: snapshot/session_ids/serial missing");
return Ok(None);
}
let mut snapshots = std::array::from_fn(|_| Snapshot::empty());
let mut session_ids = [0u16; VERSION_COUNT];
let mut serials = [0u32; VERSION_COUNT];
let mut deltas = std::array::from_fn(|_| VecDeque::<Arc<Delta>>::with_capacity(max_delta as usize));
for version in 0u8..=2 {
let idx = version as usize;
let snapshot = store.get_snapshot_for_version(version)?;
let session_id = store.get_session_id_for_version(version)?;
let serial = store.get_serial_for_version(version)?;
let (snapshot, session_id, serial) = match (snapshot, session_id, serial) {
(Some(snapshot), Some(session_id), Some(serial)) => (snapshot, session_id, serial),
_ => return Ok(None),
};
snapshots[idx] = snapshot;
session_ids[idx] = session_id;
serials[idx] = serial;
let availability = availability.unwrap_or_else(|| {
tracing::warn!("RTR cache store missing availability metadata, defaulting to Ready");
CacheAvailability::Ready
});
if availability == Some(CacheAvailability::NoDataAvailable) {
continue;
}
let deltas = if availability == CacheAvailability::NoDataAvailable {
tracing::warn!("RTR cache store restored in no-data-available state");
VecDeque::with_capacity(max_delta.into())
} else {
match store.get_delta_window()? {
Some((min_serial, max_serial)) => {
match store.load_delta_window(min_serial, max_serial) {
Ok(deltas) => deltas.into_iter().map(Arc::new).collect(),
Err(err) => {
if let Some((min_serial, max_serial)) = store.get_delta_window_for_version(version)? {
let mut loaded = store.load_delta_window_for_version(version, min_serial, max_serial)?;
let max_keep = usize::from(max_delta.max(1));
if loaded.len() > max_keep {
let drop_count = loaded.len() - max_keep;
let dropped_serials = loaded
.iter()
.take(drop_count)
.map(Delta::serial)
.collect::<Vec<_>>();
loaded.drain(..drop_count);
tracing::warn!(
"RTR cache store delta recovery failed, treat store as unusable: {:?}",
err
"RTR cache restore truncated persisted deltas to max_delta: version={}, max_delta={}, dropped_count={}, dropped_serials={:?}",
version,
max_delta,
drop_count,
dropped_serials
);
return Ok(None);
}
deltas[idx] = loaded.into_iter().map(Arc::new).collect();
}
}
}
None => {
tracing::info!("RTR cache store has no delta window, restore snapshot only");
VecDeque::with_capacity(max_delta.into())
}
}
};
let availability = availability.unwrap_or(CacheAvailability::Ready);
Ok(Some(
RtrCacheBuilder::new()
.availability(availability)
.session_ids(session_ids)
.session_ids(SessionIds::from_array(session_ids))
.max_delta(max_delta)
.prune_delta_by_snapshot_size(prune_delta_by_snapshot_size)
.timing(timing)
.serial(serial)
.snapshot(snapshot)
.deltas(deltas)
.serials(serials)
.snapshots(snapshots)
.deltas_by_version(deltas)
.build(),
))
}
fn spawn_store_sync(store: &RtrStore, update: AppliedUpdate) {
let AppliedUpdate {
availability,
snapshot,
serial,
session_ids,
delta,
delta_window,
clear_delta_window,
} = update;
tokio::spawn({
let store = store.clone();
async move {
tracing::debug!(
"persisting RTR cache state: availability={:?}, serial={}, session_ids={:?}, delta_present={}, delta_window={:?}, clear_delta_window={}, snapshot(route_origins={}, router_keys={}, aspas={})",
availability,
serial,
session_ids,
delta.is_some(),
delta_window,
clear_delta_window,
snapshot.origins().len(),
snapshot.router_keys().len(),
snapshot.aspas().len()
);
if let Err(e) = store.save_cache_state(
availability,
&snapshot,
&session_ids,
serial,
delta.as_deref(),
delta_window,
clear_delta_window,
let delta_refs: [Option<&Delta>; 3] =
std::array::from_fn(|idx| update.deltas[idx].as_deref());
if let Err(e) = store.save_cache_state_versioned(
update.availability,
&update.snapshots,
&update.session_ids,
&update.serials,
&delta_refs,
&update.delta_windows,
&update.clear_delta_windows,
) {
tracing::error!("persist cache state failed: {:?}", e);
} else {
tracing::debug!("persist RTR cache state completed: serial={}", serial);
}
}
});
}
fn project_snapshot_for_version(snapshot: &Snapshot, version: u8) -> Snapshot {
let mut payloads = Vec::new();
for payload in snapshot.payloads() {
match payload {
Payload::RouteOrigin(_) => payloads.push(payload),
Payload::RouterKey(_) if version >= 1 => payloads.push(payload),
Payload::Aspa(_) if version >= 2 => payloads.push(payload),
_ => {}
}
}
Snapshot::from_payloads(payloads)
}

View File

@ -558,10 +558,10 @@ where
.read()
.map_err(|_| anyhow!("cache read lock poisoned"))?;
let data_available = cache.is_data_available();
let snapshot = cache.snapshot();
let snapshot = cache.snapshot_for_version(version);
let payloads = snapshot.payloads_for_rtr();
let session_id = cache.session_id_for_version(version);
let serial = cache.serial();
let serial = cache.serial_for_version(version);
(data_available, payloads, session_id, serial)
};
@ -644,7 +644,7 @@ where
.cache
.read()
.map_err(|_| anyhow!("cache read lock poisoned"))?;
cache.get_deltas_since(client_serial)
cache.get_deltas_since_for_version(version, client_serial)
};
match serial_result {
@ -665,7 +665,10 @@ where
.cache
.read()
.map_err(|_| anyhow!("cache read lock poisoned"))?;
(cache.session_id_for_version(version), cache.serial())
(
cache.session_id_for_version(version),
cache.serial_for_version(version),
)
};
self.write_end_of_data(current_session, current_serial)
@ -687,7 +690,10 @@ where
.cache
.read()
.map_err(|_| anyhow!("cache read lock poisoned"))?;
(cache.session_id_for_version(version), cache.serial())
(
cache.session_id_for_version(version),
cache.serial_for_version(version),
)
};
self.write_cache_response(current_session).await?;
@ -734,7 +740,10 @@ where
.cache
.read()
.map_err(|_| anyhow!("cache read lock poisoned"))?;
(cache.session_id_for_version(version), cache.serial())
(
cache.session_id_for_version(version),
cache.serial_for_version(version),
)
};
debug!(
@ -772,7 +781,8 @@ where
.cache
.read()
.ok()
.map(|cache| cache.serial().to_string())
.and_then(|cache| self.version.map(|version| cache.serial_for_version(version)))
.map(|serial| serial.to_string())
.unwrap_or_else(|| "<unavailable>".to_string());
let session_id = self
.version

View File

@ -1,41 +1,49 @@
use anyhow::{Result, anyhow};
use rocksdb::{ColumnFamilyDescriptor, DB, Direction, IteratorMode, Options, WriteBatch};
use serde::{Serialize, de::DeserializeOwned};
use rocksdb::{ColumnFamilyDescriptor, DB, IteratorMode, Options, WriteBatch};
use serde::de::DeserializeOwned;
use std::path::Path;
use std::sync::Arc;
use tokio::task;
use tracing::{debug, info, warn};
use tracing::{info, warn};
use crate::rtr::cache::{CacheAvailability, Delta, SessionIds, Snapshot};
use crate::rtr::state::State;
use crate::rtr::cache::{CacheAvailability, Delta, Snapshot};
const CF_META: &str = "meta";
const CF_SNAPSHOT: &str = "snapshot";
const CF_DELTA: &str = "delta";
const META_STATE: &[u8] = b"state";
const META_SESSION_IDS: &[u8] = b"session_ids";
const META_SERIAL: &[u8] = b"serial";
const META_AVAILABILITY: &[u8] = b"availability";
const META_DELTA_MIN: &[u8] = b"delta_min";
const META_DELTA_MAX: &[u8] = b"delta_max";
const META_SESSION_ID_PREFIX: &str = "session_id_v";
const META_SERIAL_PREFIX: &str = "serial_v";
const META_DELTA_MIN_PREFIX: &str = "delta_min_v";
const META_DELTA_MAX_PREFIX: &str = "delta_max_v";
const SNAPSHOT_CURRENT_PREFIX: &str = "current_v";
const DELTA_KEY_PREFIX: u8 = b'd';
const DELTA_KEY_V2_PREFIX: u8 = b'D';
fn delta_key(serial: u32) -> [u8; 5] {
let mut key = [0u8; 5];
key[0] = DELTA_KEY_PREFIX;
key[1..].copy_from_slice(&serial.to_be_bytes());
fn delta_key_v2(version: u8, serial: u32) -> [u8; 6] {
let mut key = [0u8; 6];
key[0] = DELTA_KEY_V2_PREFIX;
key[1] = version;
key[2..].copy_from_slice(&serial.to_be_bytes());
key
}
fn delta_key_serial(key: &[u8]) -> Option<u32> {
if key.len() != 5 || key[0] != DELTA_KEY_PREFIX {
fn delta_key_v2_serial(key: &[u8]) -> Option<(u8, u32)> {
if key.len() != 6 || key[0] != DELTA_KEY_V2_PREFIX {
return None;
}
let version = key[1];
let mut bytes = [0u8; 4];
bytes.copy_from_slice(&key[1..]);
Some(u32::from_be_bytes(bytes))
bytes.copy_from_slice(&key[2..]);
Some((version, u32::from_be_bytes(bytes)))
}
fn meta_key(prefix: &str, version: u8) -> Vec<u8> {
format!("{}{}", prefix, version).into_bytes()
}
fn snapshot_key(version: u8) -> Vec<u8> {
format!("{}{}", SNAPSHOT_CURRENT_PREFIX, version).into_bytes()
}
#[derive(Clone)]
@ -44,7 +52,6 @@ pub struct RtrStore {
}
impl RtrStore {
/// Open or create DB with required column families.
pub fn open<P: AsRef<Path>>(path: P) -> Result<Self> {
let path_ref = path.as_ref();
let mut opts = Options::default();
@ -60,22 +67,9 @@ impl RtrStore {
info!("opening RTR RocksDB store at {}", path_ref.display());
let db = Arc::new(DB::open_cf_descriptors(&opts, path_ref, cfs)?);
info!("opened RTR RocksDB store at {}", path_ref.display());
Ok(Self { db })
}
/// Common serialize/put.
fn put_cf<T: Serialize>(&self, cf: &str, key: &[u8], value: &T) -> Result<()> {
let cf_handle = self
.db
.cf_handle(cf)
.ok_or_else(|| anyhow!("CF not found"))?;
let data = serde_json::to_vec(value)?;
self.db.put_cf(cf_handle, key, data)?;
Ok(())
}
/// Common get/deserialize.
fn get_cf<T: DeserializeOwned>(&self, cf: &str, key: &[u8]) -> Result<Option<T>> {
let cf_handle = self
.db
@ -89,211 +83,134 @@ impl RtrStore {
}
}
/// Common delete.
fn delete_cf(&self, cf: &str, key: &[u8]) -> Result<()> {
let cf_handle = self
.db
.cf_handle(cf)
.ok_or_else(|| anyhow!("CF not found"))?;
self.db.delete_cf(cf_handle, key)?;
Ok(())
}
// ===============================
// Meta/state
// ===============================
pub fn set_state(&self, state: &State) -> Result<()> {
self.put_cf(CF_META, META_STATE, &state)
}
pub fn get_state(&self) -> Result<Option<State>> {
self.get_cf(CF_META, META_STATE)
}
pub fn set_meta(&self, meta: &State) -> Result<()> {
self.set_state(meta)
}
pub fn get_meta(&self) -> Result<Option<State>> {
self.get_state()
}
pub fn set_session_ids(&self, session_ids: &SessionIds) -> Result<()> {
self.put_cf(CF_META, META_SESSION_IDS, session_ids)
}
pub fn get_session_ids(&self) -> Result<Option<SessionIds>> {
self.get_cf(CF_META, META_SESSION_IDS)
}
pub fn set_serial(&self, serial: u32) -> Result<()> {
self.put_cf(CF_META, META_SERIAL, &serial)
}
pub fn get_serial(&self) -> Result<Option<u32>> {
self.get_cf(CF_META, META_SERIAL)
}
pub fn set_availability(&self, availability: CacheAvailability) -> Result<()> {
self.put_cf(CF_META, META_AVAILABILITY, &availability)
}
pub fn get_availability(&self) -> Result<Option<CacheAvailability>> {
self.get_cf(CF_META, META_AVAILABILITY)
}
pub fn set_delta_window(&self, min_serial: u32, max_serial: u32) -> Result<()> {
debug!(
"RTR store persisting delta window metadata: min_serial={}, max_serial={}",
min_serial, max_serial
);
let meta_cf = self
.db
.cf_handle(CF_META)
.ok_or_else(|| anyhow!("CF_META not found"))?;
let mut batch = WriteBatch::default();
batch.put_cf(meta_cf, META_DELTA_MIN, serde_json::to_vec(&min_serial)?);
batch.put_cf(meta_cf, META_DELTA_MAX, serde_json::to_vec(&max_serial)?);
self.db.write(batch)?;
Ok(())
pub fn get_session_id_for_version(&self, version: u8) -> Result<Option<u16>> {
let key = meta_key(META_SESSION_ID_PREFIX, version);
self.get_cf(CF_META, &key)
}
pub fn clear_delta_window(&self) -> Result<()> {
debug!("RTR store clearing delta window metadata");
let meta_cf = self
.db
.cf_handle(CF_META)
.ok_or_else(|| anyhow!("CF_META not found"))?;
let mut batch = WriteBatch::default();
batch.delete_cf(meta_cf, META_DELTA_MIN);
batch.delete_cf(meta_cf, META_DELTA_MAX);
self.db.write(batch)?;
Ok(())
pub fn get_serial_for_version(&self, version: u8) -> Result<Option<u32>> {
let key = meta_key(META_SERIAL_PREFIX, version);
self.get_cf(CF_META, &key)
}
pub fn get_delta_window(&self) -> Result<Option<(u32, u32)>> {
let min: Option<u32> = self.get_cf(CF_META, META_DELTA_MIN)?;
let max: Option<u32> = self.get_cf(CF_META, META_DELTA_MAX)?;
pub fn get_delta_window_for_version(&self, version: u8) -> Result<Option<(u32, u32)>> {
let min_key = meta_key(META_DELTA_MIN_PREFIX, version);
let max_key = meta_key(META_DELTA_MAX_PREFIX, version);
let min: Option<u32> = self.get_cf(CF_META, &min_key)?;
let max: Option<u32> = self.get_cf(CF_META, &max_key)?;
match (min, max) {
(Some(min), Some(max)) => {
debug!(
"RTR store loaded delta window metadata: min_serial={}, max_serial={}",
min, max
);
Ok(Some((min, max)))
}
(Some(min), Some(max)) => Ok(Some((min, max))),
(None, None) => Ok(None),
_ => Err(anyhow!("Inconsistent DB state: delta window mismatch")),
_ => Err(anyhow!(
"Inconsistent DB state: delta window mismatch for version {}",
version
)),
}
}
pub fn delete_state(&self) -> Result<()> {
self.delete_cf(CF_META, META_STATE)
pub fn get_snapshot_for_version(&self, version: u8) -> Result<Option<Snapshot>> {
let key = snapshot_key(version);
self.get_cf(CF_SNAPSHOT, &key)
}
pub fn delete_serial(&self) -> Result<()> {
self.delete_cf(CF_META, META_SERIAL)
pub fn get_delta_for_version(&self, version: u8, serial: u32) -> Result<Option<Delta>> {
self.get_cf(CF_DELTA, &delta_key_v2(version, serial))
}
// ===============================
// Snapshot
// ===============================
pub fn save_snapshot(&self, snapshot: &Snapshot) -> Result<()> {
pub fn load_delta_window_for_version(
&self,
version: u8,
min_serial: u32,
max_serial: u32,
) -> Result<Vec<Delta>> {
let cf_handle = self
.db
.cf_handle(CF_SNAPSHOT)
.ok_or_else(|| anyhow!("CF_SNAPSHOT not found"))?;
let mut batch = WriteBatch::default();
let data = serde_json::to_vec(snapshot)?;
batch.put_cf(cf_handle, b"current", data);
self.db.write(batch)?;
Ok(())
.cf_handle(CF_DELTA)
.ok_or_else(|| anyhow!("CF_DELTA not found"))?;
let iter = self.db.iterator_cf(cf_handle, IteratorMode::Start);
let mut out = Vec::new();
for item in iter {
let (key, value) = item.map_err(|e| anyhow!("rocksdb iterator error: {}", e))?;
let Some((key_version, serial)) = delta_key_v2_serial(key.as_ref()) else {
continue;
};
if key_version != version {
continue;
}
if serial_in_window(serial, min_serial, max_serial) {
let delta: Delta = serde_json::from_slice(value.as_ref())?;
out.push(delta);
}
}
out.sort_by_key(|delta| delta.serial().wrapping_sub(min_serial));
validate_delta_window(&out, min_serial, max_serial)?;
Ok(out)
}
pub fn get_snapshot(&self) -> Result<Option<Snapshot>> {
self.get_cf(CF_SNAPSHOT, b"current")
}
pub fn delete_snapshot(&self) -> Result<()> {
self.delete_cf(CF_SNAPSHOT, b"current")
}
pub fn save_snapshot_and_state(&self, snapshot: &Snapshot, state: &State) -> Result<()> {
let snapshot_cf = self
fn list_delta_keys_for_version(&self, version: u8) -> Result<Vec<Vec<u8>>> {
let cf_handle = self
.db
.cf_handle(CF_SNAPSHOT)
.ok_or_else(|| anyhow!("CF_SNAPSHOT not found"))?;
let meta_cf = self
.db
.cf_handle(CF_META)
.ok_or_else(|| anyhow!("CF_META not found"))?;
let mut batch = WriteBatch::default();
batch.put_cf(snapshot_cf, b"current", serde_json::to_vec(snapshot)?);
batch.put_cf(meta_cf, META_STATE, serde_json::to_vec(state)?);
batch.put_cf(
meta_cf,
META_SESSION_IDS,
serde_json::to_vec(&state.clone().session_ids())?,
);
batch.put_cf(
meta_cf,
META_SERIAL,
serde_json::to_vec(&state.clone().serial())?,
);
self.db.write(batch)?;
Ok(())
.cf_handle(CF_DELTA)
.ok_or_else(|| anyhow!("CF_DELTA not found"))?;
let iter = self.db.iterator_cf(cf_handle, IteratorMode::Start);
let mut keys = Vec::new();
for item in iter {
let (key, _value) = item.map_err(|e| anyhow!("rocksdb iterator error: {}", e))?;
if matches!(delta_key_v2_serial(key.as_ref()), Some((v, _)) if v == version) {
keys.push(key.to_vec());
}
}
Ok(keys)
}
pub fn save_snapshot_and_meta(
fn list_delta_keys_outside_window_for_version(
&self,
snapshot: &Snapshot,
session_ids: &SessionIds,
serial: u32,
) -> Result<()> {
let mut batch = WriteBatch::default();
let snapshot_cf = self
version: u8,
min_serial: u32,
max_serial: u32,
) -> Result<Vec<Vec<u8>>> {
let cf_handle = self
.db
.cf_handle(CF_SNAPSHOT)
.ok_or_else(|| anyhow!("CF_SNAPSHOT not found"))?;
let meta_cf = self
.db
.cf_handle(CF_META)
.ok_or_else(|| anyhow!("CF_META not found"))?;
batch.put_cf(snapshot_cf, b"current", serde_json::to_vec(snapshot)?);
batch.put_cf(meta_cf, META_SESSION_IDS, serde_json::to_vec(session_ids)?);
batch.put_cf(meta_cf, META_SERIAL, serde_json::to_vec(&serial)?);
self.db.write(batch)?;
Ok(())
.cf_handle(CF_DELTA)
.ok_or_else(|| anyhow!("CF_DELTA not found"))?;
let iter = self.db.iterator_cf(cf_handle, IteratorMode::Start);
let mut keys = Vec::new();
for item in iter {
let (key, _value) = item.map_err(|e| anyhow!("rocksdb iterator error: {}", e))?;
let Some((key_version, serial)) = delta_key_v2_serial(key.as_ref()) else {
continue;
};
if key_version != version {
continue;
}
if !serial_in_window(serial, min_serial, max_serial) {
keys.push(key.to_vec());
}
}
Ok(keys)
}
pub fn save_cache_state(
/// Persist full versioned RTR cache state in one atomic batch.
///
/// Write-boundary contract:
/// - Production writes must go through `src/rtr/cache/store.rs`.
/// - Direct callers should be limited to DB contract tests.
/// - Do not introduce ad-hoc write paths outside cache/store, otherwise
/// session_id/serial/snapshot/delta_window consistency can be broken.
pub fn save_cache_state_versioned(
&self,
availability: CacheAvailability,
snapshot: &Snapshot,
session_ids: &SessionIds,
serial: u32,
delta: Option<&Delta>,
delta_window: Option<(u32, u32)>,
clear_delta_window: bool,
snapshots: &[Snapshot; 3],
session_ids: &[u16; 3],
serials: &[u32; 3],
deltas: &[Option<&Delta>; 3],
delta_windows: &[Option<(u32, u32)>; 3],
clear_delta_windows: &[bool; 3],
) -> Result<()> {
debug!(
"RTR store save_cache_state start: availability={:?}, serial={}, session_ids={:?}, delta_present={}, delta_window={:?}, clear_delta_window={}, snapshot(route_origins={}, router_keys={}, aspas={})",
availability,
serial,
session_ids,
delta.is_some(),
delta_window,
clear_delta_window,
snapshot.origins().len(),
snapshot.router_keys().len(),
snapshot.aspas().len()
);
let snapshot_cf = self
.db
.cf_handle(CF_SNAPSHOT)
@ -306,275 +223,68 @@ impl RtrStore {
.db
.cf_handle(CF_DELTA)
.ok_or_else(|| anyhow!("CF_DELTA not found"))?;
let mut batch = WriteBatch::default();
batch.put_cf(snapshot_cf, b"current", serde_json::to_vec(snapshot)?);
batch.put_cf(meta_cf, META_SESSION_IDS, serde_json::to_vec(session_ids)?);
batch.put_cf(meta_cf, META_SERIAL, serde_json::to_vec(&serial)?);
let mut batch = WriteBatch::default();
batch.put_cf(
meta_cf,
META_AVAILABILITY,
serde_json::to_vec(&availability)?,
);
if let Some(delta) = delta {
debug!(
"RTR store persisting delta: serial={}, announced={}, withdrawn={}",
delta.serial(),
delta.announced().len(),
delta.withdrawn().len()
for version in 0u8..=2 {
let idx = version as usize;
batch.put_cf(
snapshot_cf,
snapshot_key(version),
serde_json::to_vec(&snapshots[idx])?,
);
batch.put_cf(
meta_cf,
meta_key(META_SESSION_ID_PREFIX, version),
serde_json::to_vec(&session_ids[idx])?,
);
batch.put_cf(
meta_cf,
meta_key(META_SERIAL_PREFIX, version),
serde_json::to_vec(&serials[idx])?,
);
if let Some(delta) = deltas[idx] {
batch.put_cf(
delta_cf,
delta_key(delta.serial()),
delta_key_v2(version, delta.serial()),
serde_json::to_vec(delta)?,
);
}
if clear_delta_window {
let existing_keys = self.list_delta_keys()?;
let existing_serials = summarize_delta_keys(&existing_keys);
info!(
"RTR store clearing persisted delta window: deleting {} delta records, serials={}",
existing_keys.len(),
existing_serials
);
batch.delete_cf(meta_cf, META_DELTA_MIN);
batch.delete_cf(meta_cf, META_DELTA_MAX);
for key in existing_keys {
if clear_delta_windows[idx] {
batch.delete_cf(meta_cf, meta_key(META_DELTA_MIN_PREFIX, version));
batch.delete_cf(meta_cf, meta_key(META_DELTA_MAX_PREFIX, version));
for key in self.list_delta_keys_for_version(version)? {
batch.delete_cf(delta_cf, key);
}
} else if let Some((min_serial, max_serial)) = delta_window {
batch.put_cf(meta_cf, META_DELTA_MIN, serde_json::to_vec(&min_serial)?);
batch.put_cf(meta_cf, META_DELTA_MAX, serde_json::to_vec(&max_serial)?);
// Serial numbers are compared in RFC 1982 ring order, while RocksDB stores
// keys in plain lexicographic order. After wraparound, a window such as
// [u32::MAX, 1] is contiguous in serial space but split in key space, so a
// simple delete_range() would leave stale high-serial keys behind.
let stale_keys = self.list_delta_keys_outside_window(min_serial, max_serial)?;
if !stale_keys.is_empty() {
info!(
"RTR store pruning stale delta records outside window [{}, {}]: count={}, serials={}",
min_serial,
max_serial,
stale_keys.len(),
summarize_delta_keys(&stale_keys)
} else if let Some((min_serial, max_serial)) = delta_windows[idx] {
batch.put_cf(
meta_cf,
meta_key(META_DELTA_MIN_PREFIX, version),
serde_json::to_vec(&min_serial)?,
);
} else {
debug!(
"RTR store found no stale delta records outside window [{}, {}]",
min_serial, max_serial
batch.put_cf(
meta_cf,
meta_key(META_DELTA_MAX_PREFIX, version),
serde_json::to_vec(&max_serial)?,
);
}
for key in stale_keys {
for key in self
.list_delta_keys_outside_window_for_version(version, min_serial, max_serial)?
{
batch.delete_cf(delta_cf, key);
}
}
}
self.db.write(batch)?;
debug!("RTR store save_cache_state completed: serial={}", serial);
Ok(())
}
pub fn save_snapshot_and_serial(&self, snapshot: &Snapshot, serial: u32) -> Result<()> {
let mut batch = WriteBatch::default();
let snapshot_cf = self
.db
.cf_handle(CF_SNAPSHOT)
.ok_or_else(|| anyhow!("CF_SNAPSHOT not found"))?;
let meta_cf = self
.db
.cf_handle(CF_META)
.ok_or_else(|| anyhow!("CF_META not found"))?;
batch.put_cf(snapshot_cf, b"current", serde_json::to_vec(snapshot)?);
batch.put_cf(meta_cf, META_SERIAL, serde_json::to_vec(&serial)?);
self.db.write(batch)?;
Ok(())
}
pub async fn save_snapshot_and_serial_async(
self: Arc<Self>,
snapshot: Snapshot,
serial: u32,
) -> Result<()> {
let snapshot_bytes = serde_json::to_vec(&snapshot)?;
let serial_bytes = serde_json::to_vec(&serial)?;
task::spawn_blocking(move || {
let mut batch = WriteBatch::default();
let snapshot_cf = self
.db
.cf_handle(CF_SNAPSHOT)
.ok_or_else(|| anyhow!("CF_SNAPSHOT not found"))?;
let meta_cf = self
.db
.cf_handle(CF_META)
.ok_or_else(|| anyhow!("CF_META not found"))?;
batch.put_cf(snapshot_cf, b"current", snapshot_bytes);
batch.put_cf(meta_cf, META_SERIAL, serial_bytes);
self.db.write(batch)?;
Ok::<_, anyhow::Error>(())
})
.await??;
Ok(())
}
pub fn load_snapshot_and_state(&self) -> Result<Option<(Snapshot, State)>> {
let snapshot: Option<Snapshot> = self.get_snapshot()?;
let state: Option<State> = self.get_state()?;
match (snapshot, state) {
(Some(snap), Some(state)) => Ok(Some((snap, state))),
(None, None) => Ok(None),
_ => Err(anyhow!(
"Inconsistent DB state: snapshot and state mismatch"
)),
}
}
pub fn load_snapshot_and_serial(&self) -> Result<Option<(Snapshot, u32)>> {
let snapshot: Option<Snapshot> = self.get_snapshot()?;
let serial: Option<u32> = self.get_serial()?;
match (snapshot, serial) {
(Some(snap), Some(serial)) => Ok(Some((snap, serial))),
(None, None) => Ok(None),
_ => Err(anyhow!(
"Inconsistent DB state: snapshot and serial mismatch"
)),
}
}
// ===============================
// Delta
// ===============================
pub fn save_delta(&self, delta: &Delta) -> Result<()> {
self.put_cf(CF_DELTA, &delta_key(delta.serial()), delta)
}
pub fn get_delta(&self, serial: u32) -> Result<Option<Delta>> {
self.get_cf(CF_DELTA, &delta_key(serial))
}
pub fn load_deltas_since(&self, serial: u32) -> Result<Vec<Delta>> {
let cf_handle = self
.db
.cf_handle(CF_DELTA)
.ok_or_else(|| anyhow!("CF_DELTA not found"))?;
let start_key = delta_key(serial.wrapping_add(1));
let iter = self.db.iterator_cf(
cf_handle,
IteratorMode::From(&start_key, Direction::Forward),
);
let mut out = Vec::new();
for item in iter {
let (key, value) = item.map_err(|e| anyhow!("rocksdb iterator error: {}", e))?;
let parsed =
delta_key_serial(key.as_ref()).ok_or_else(|| anyhow!("Invalid delta key"))?;
if parsed <= serial {
continue;
}
let delta: Delta = serde_json::from_slice(value.as_ref())?;
out.push(delta);
}
Ok(out)
}
pub fn load_delta_window(&self, min_serial: u32, max_serial: u32) -> Result<Vec<Delta>> {
info!(
"RTR store loading persisted delta window: min_serial={}, max_serial={}",
min_serial, max_serial
);
let cf_handle = self
.db
.cf_handle(CF_DELTA)
.ok_or_else(|| anyhow!("CF_DELTA not found"))?;
let iter = self.db.iterator_cf(cf_handle, IteratorMode::Start);
let mut out = Vec::new();
for item in iter {
let (key, value) = item.map_err(|e| anyhow!("rocksdb iterator error: {}", e))?;
let parsed =
delta_key_serial(key.as_ref()).ok_or_else(|| anyhow!("Invalid delta key"))?;
// Restore by the persisted window bounds instead of load_deltas_since().
// The latter follows lexicographic key order and is not safe across serial
// wraparound, where older high-valued keys may otherwise be pulled back in.
if serial_in_window(parsed, min_serial, max_serial) {
let delta: Delta = serde_json::from_slice(value.as_ref())?;
out.push(delta);
}
}
out.sort_by_key(|delta| delta.serial().wrapping_sub(min_serial));
debug!(
"RTR store loaded delta candidates for window [{}, {}]: count={}, serials={}",
min_serial,
max_serial,
out.len(),
summarize_delta_serials(&out)
);
validate_delta_window(&out, min_serial, max_serial)?;
info!(
"RTR store restored valid delta window: min_serial={}, max_serial={}, count={}, serials={}",
min_serial,
max_serial,
out.len(),
summarize_delta_serials(&out)
);
Ok(out)
}
pub fn delete_delta(&self, serial: u32) -> Result<()> {
self.delete_cf(CF_DELTA, &delta_key(serial))
}
fn list_delta_keys(&self) -> Result<Vec<Vec<u8>>> {
let cf_handle = self
.db
.cf_handle(CF_DELTA)
.ok_or_else(|| anyhow!("CF_DELTA not found"))?;
let iter = self.db.iterator_cf(cf_handle, IteratorMode::Start);
let mut keys = Vec::new();
for item in iter {
let (key, _value) = item.map_err(|e| anyhow!("rocksdb iterator error: {}", e))?;
keys.push(key.to_vec());
}
Ok(keys)
}
fn list_delta_keys_outside_window(
&self,
min_serial: u32,
max_serial: u32,
) -> Result<Vec<Vec<u8>>> {
let cf_handle = self
.db
.cf_handle(CF_DELTA)
.ok_or_else(|| anyhow!("CF_DELTA not found"))?;
let iter = self.db.iterator_cf(cf_handle, IteratorMode::Start);
let mut keys = Vec::new();
for item in iter {
let (key, _value) = item.map_err(|e| anyhow!("rocksdb iterator error: {}", e))?;
let serial =
delta_key_serial(key.as_ref()).ok_or_else(|| anyhow!("Invalid delta key"))?;
if !serial_in_window(serial, min_serial, max_serial) {
keys.push(key.to_vec());
}
}
Ok(keys)
}
}
fn serial_in_window(serial: u32, min_serial: u32, max_serial: u32) -> bool {
@ -641,41 +351,3 @@ fn validate_delta_window(deltas: &[Delta], min_serial: u32, max_serial: u32) ->
Ok(())
}
fn summarize_delta_keys(keys: &[Vec<u8>]) -> String {
let serials: Vec<u32> = keys
.iter()
.filter_map(|key| delta_key_serial(key))
.collect();
summarize_serials(&serials)
}
fn summarize_delta_serials(deltas: &[Delta]) -> String {
let serials: Vec<u32> = deltas.iter().map(Delta::serial).collect();
summarize_serials(&serials)
}
fn summarize_serials(serials: &[u32]) -> String {
const MAX_INLINE: usize = 12;
if serials.is_empty() {
return "[]".to_string();
}
if serials.len() <= MAX_INLINE {
return format!("{:?}", serials);
}
let head: Vec<u32> = serials.iter().take(6).copied().collect();
let tail: Vec<u32> = serials
.iter()
.rev()
.take(3)
.copied()
.collect::<Vec<_>>()
.into_iter()
.rev()
.collect();
format!("{:?} ... {:?} (total={})", head, tail, serials.len())
}

View File

@ -59,6 +59,18 @@ fn snapshot_hashes_and_sorted_view_to_string(snapshot: &Snapshot) -> String {
)
}
fn serials_all(serial: u32) -> [u32; 3] {
[serial; 3]
}
fn snapshots_all(snapshot: Snapshot) -> [Snapshot; 3] {
[snapshot.clone(), snapshot.clone(), snapshot]
}
fn deltas_all(deltas: VecDeque<Arc<Delta>>) -> [VecDeque<Arc<Delta>>; 3] {
[deltas.clone(), deltas.clone(), deltas]
}
/// Snapshot ?hash ?
/// payload snapshot_hash / origins_hash ?
#[test]
@ -127,8 +139,8 @@ async fn init_keeps_cache_running_when_file_loader_returns_no_data() {
.unwrap();
assert!(!cache.is_data_available());
assert_eq!(cache.serial(), 0);
assert!(cache.snapshot().payloads_for_rtr().is_empty());
assert_eq!(cache.serial_for_version(2), 0);
assert!(cache.snapshot_for_version(2).payloads_for_rtr().is_empty());
}
#[tokio::test]
@ -162,37 +174,40 @@ async fn init_restores_wraparound_delta_window_from_store() {
vec![],
);
let snapshots = snapshots_all(snapshot.clone());
let session_ids = session_ids.as_array();
store
.save_cache_state(
.save_cache_state_versioned(
CacheAvailability::Ready,
&snapshot,
&snapshots,
&session_ids,
u32::MAX,
Some(&d_max),
Some((u32::MAX, u32::MAX)),
false,
&serials_all(u32::MAX),
&[None, None, Some(&d_max)],
&[None, None, Some((u32::MAX, u32::MAX))],
&[false, false, false],
)
.unwrap();
store
.save_cache_state(
.save_cache_state_versioned(
CacheAvailability::Ready,
&snapshot,
&snapshots,
&session_ids,
0,
Some(&d_zero),
Some((u32::MAX, 0)),
false,
&serials_all(0),
&[None, None, Some(&d_zero)],
&[None, None, Some((u32::MAX, 0))],
&[false, false, false],
)
.unwrap();
store
.save_cache_state(
.save_cache_state_versioned(
CacheAvailability::Ready,
&snapshot,
&snapshots,
&session_ids,
1,
Some(&d_one),
Some((u32::MAX, 1)),
false,
&serials_all(1),
&[None, None, Some(&d_one)],
&[None, None, Some((u32::MAX, 1))],
&[false, false, false],
)
.unwrap();
@ -202,7 +217,7 @@ async fn init_restores_wraparound_delta_window_from_store() {
})
.unwrap();
match cache.get_deltas_since(u32::MAX.wrapping_sub(1)) {
match cache.get_deltas_since_for_version(2, u32::MAX.wrapping_sub(1)) {
SerialResult::Delta(delta) => {
assert_eq!(delta.serial(), 1);
assert_eq!(delta.announced().len(), 3);
@ -211,6 +226,60 @@ async fn init_restores_wraparound_delta_window_from_store() {
}
}
#[tokio::test]
async fn init_restore_respects_max_delta_limit() {
let dir = tempfile::tempdir().unwrap();
let store = RtrStore::open(dir.path()).unwrap();
let session_ids = SessionIds::from_array([42, 43, 44]).as_array();
let a = Payload::RouteOrigin(v4_origin(192, 0, 2, 0, 24, 24, 64496));
let b = Payload::RouteOrigin(v4_origin(198, 51, 100, 0, 24, 24, 64497));
let snapshot = Snapshot::from_payloads(vec![a.clone(), b.clone()]);
let snapshots = snapshots_all(snapshot);
let d17 = Delta::new(17, vec![a], vec![]);
let d18 = Delta::new(18, vec![b], vec![]);
store
.save_cache_state_versioned(
CacheAvailability::Ready,
&snapshots,
&session_ids,
&serials_all(17),
&[None, None, Some(&d17)],
&[None, None, Some((17, 17))],
&[false, false, false],
)
.unwrap();
store
.save_cache_state_versioned(
CacheAvailability::Ready,
&snapshots,
&session_ids,
&serials_all(18),
&[None, None, Some(&d18)],
&[None, None, Some((17, 18))],
&[false, false, false],
)
.unwrap();
let cache = rpki::rtr::cache::RtrCache::default()
.init(&store, 1, false, Timing::new(600, 600, 7200), || {
Ok(Vec::new())
})
.unwrap();
match cache.get_deltas_since_for_version(2, 16) {
SerialResult::ResetRequired => {}
_ => panic!("expected ResetRequired for serial older than retained max_delta window"),
}
match cache.get_deltas_since_for_version(2, 17) {
SerialResult::Delta(delta) => assert_eq!(delta.serial(), 18),
_ => panic!("expected one retained delta from 17 -> 18"),
}
}
#[tokio::test]
async fn update_prunes_delta_window_when_cumulative_delta_size_reaches_snapshot_size() {
let dir = tempfile::tempdir().unwrap();
@ -229,8 +298,8 @@ async fn update_prunes_delta_window_when_cumulative_delta_size_reaches_snapshot_
let mut cache = RtrCacheBuilder::new()
.availability(CacheAvailability::Ready)
.session_ids(SessionIds::from_array([42, 43, 44]))
.serial(1)
.snapshot(initial_snapshot)
.serials(serials_all(1))
.snapshots(snapshots_all(initial_snapshot))
.max_delta(16)
.prune_delta_by_snapshot_size(true)
.timing(Timing::new(600, 600, 7200))
@ -247,7 +316,7 @@ async fn update_prunes_delta_window_when_cumulative_delta_size_reaches_snapshot_
)
.unwrap();
match cache.get_deltas_since(1) {
match cache.get_deltas_since_for_version(2, 1) {
SerialResult::ResetRequired => {}
_ => panic!(
"expected delta window to be pruned when cumulative delta size exceeds snapshot size"
@ -444,14 +513,14 @@ fn delta_new_sorts_announced_descending_and_withdrawn_ascending() {
fn get_deltas_since_returns_up_to_date_when_client_serial_matches_current() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::default())
.build();
let result = cache.get_deltas_since(100);
let result = cache.get_deltas_since_for_version(2, 100);
let input =
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial(), 100);
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial_for_version(2), 100);
let output = serial_result_detail_to_string(&result);
test_report(
@ -488,16 +557,16 @@ fn get_deltas_since_returns_reset_required_when_client_serial_is_too_old() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(102)
.serials(serials_all(102))
.timing(Timing::default())
.deltas(deltas.clone())
.deltas_by_version(deltas_all(deltas.clone()))
.build();
let result = cache.get_deltas_since(99);
let result = cache.get_deltas_since_for_version(2, 99);
let input = format!(
"{}delta_window:\n{}",
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial(), 99),
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial_for_version(2), 99),
indent_block(&deltas_window_to_string(&deltas), 2),
);
let output = serial_result_detail_to_string(&result);
@ -552,17 +621,17 @@ fn get_deltas_since_returns_minimal_merged_delta() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(103)
.serials(serials_all(103))
.timing(Timing::default())
.snapshot(final_snapshot)
.deltas(deltas.clone())
.snapshots(snapshots_all(final_snapshot))
.deltas_by_version(deltas_all(deltas.clone()))
.build();
let result = cache.get_deltas_since(101);
let result = cache.get_deltas_since_for_version(2, 101);
let input = format!(
"{}delta_window:\n{}",
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial(), 101),
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial_for_version(2), 101),
indent_block(&deltas_window_to_string(&deltas), 2),
);
let output = serial_result_detail_to_string(&result);
@ -602,14 +671,14 @@ fn get_deltas_since_returns_minimal_merged_delta() {
fn get_deltas_since_returns_reset_required_when_client_serial_is_in_future() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::default())
.build();
let result = cache.get_deltas_since(101);
let result = cache.get_deltas_since_for_version(2, 101);
let input =
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial(), 101);
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial_for_version(2), 101);
let output = serial_result_detail_to_string(&result);
test_report(
@ -648,19 +717,19 @@ fn get_deltas_since_supports_incremental_updates_across_serial_wraparound() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(0)
.serials(serials_all(0))
.timing(Timing::default())
.snapshot(final_snapshot)
.deltas(deltas.clone())
.snapshots(snapshots_all(final_snapshot))
.deltas_by_version(deltas_all(deltas.clone()))
.build();
let result = cache.get_deltas_since(u32::MAX.wrapping_sub(1));
let result = cache.get_deltas_since_for_version(2, u32::MAX.wrapping_sub(1));
let input = format!(
"{}delta_window:\n{}",
get_deltas_since_input_to_string(
cache.session_id_for_version(1),
cache.serial(),
cache.serial_for_version(2),
u32::MAX.wrapping_sub(1)
),
indent_block(&deltas_window_to_string(&deltas), 2),
@ -723,24 +792,24 @@ fn get_deltas_since_returns_reset_required_when_client_serial_is_too_old_across_
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(1)
.serials(serials_all(1))
.timing(Timing::default())
.snapshot(Snapshot::from_payloads(vec![
.snapshots(snapshots_all(Snapshot::from_payloads(vec![
Payload::RouteOrigin(v4_origin(192, 0, 2, 0, 24, 24, 64496)),
Payload::RouteOrigin(v4_origin(198, 51, 100, 0, 24, 24, 64497)),
Payload::RouteOrigin(v4_origin(203, 0, 113, 0, 24, 24, 64498)),
]))
.deltas(deltas.clone())
])))
.deltas_by_version(deltas_all(deltas.clone()))
.build();
let client_serial = u32::MAX.wrapping_sub(2);
let result = cache.get_deltas_since(client_serial);
let result = cache.get_deltas_since_for_version(2, client_serial);
let input = format!(
"{}delta_window:\n{}",
get_deltas_since_input_to_string(
cache.session_id_for_version(1),
cache.serial(),
cache.serial_for_version(2),
client_serial
),
indent_block(&deltas_window_to_string(&deltas), 2),
@ -764,14 +833,14 @@ fn get_deltas_since_returns_reset_required_when_client_serial_is_too_old_across_
fn get_deltas_since_returns_reset_required_when_client_serial_is_in_future_across_wraparound() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(u32::MAX)
.serials(serials_all(u32::MAX))
.timing(Timing::default())
.build();
let result = cache.get_deltas_since(0);
let result = cache.get_deltas_since_for_version(2, 0);
let input =
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial(), 0);
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial_for_version(2), 0);
let output = serial_result_detail_to_string(&result);
test_report(
@ -802,9 +871,9 @@ async fn update_no_change_keeps_serial_and_produces_no_delta() {
let mut cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::default())
.snapshot(snapshot.clone())
.snapshots(snapshots_all(snapshot.clone()))
.build();
let dir = tempfile::tempdir().unwrap();
@ -814,8 +883,8 @@ async fn update_no_change_keeps_serial_and_produces_no_delta() {
cache.update(new_payloads.clone(), &store).unwrap();
let current_snapshot = cache.snapshot();
let result = cache.get_deltas_since(100);
let current_snapshot = cache.snapshot_for_version(2);
let result = cache.get_deltas_since_for_version(2, 100);
let input = format!(
"old_snapshot :\n{}new_payloads :\n{}",
@ -825,7 +894,7 @@ async fn update_no_change_keeps_serial_and_produces_no_delta() {
let output = format!(
"cache.serial_after_update: {}\ncurrent_snapshot:\n{}get_deltas_since(100):\n{}",
cache.serial(),
cache.serial_for_version(2),
indent_block(
&snapshot_hashes_and_sorted_view_to_string(&current_snapshot),
2
@ -840,8 +909,8 @@ async fn update_no_change_keeps_serial_and_produces_no_delta() {
&output,
);
assert_eq!(cache.serial(), 100);
assert!(cache.snapshot().same_content(&snapshot));
assert_eq!(cache.serial_for_version(2), 100);
assert!(cache.snapshot_for_version(2).same_content(&snapshot));
match result {
SerialResult::UpToDate => {}
@ -861,9 +930,9 @@ async fn update_add_only_increments_serial_and_generates_announced_delta() {
let mut cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::default())
.snapshot(old_snapshot.clone())
.snapshots(snapshots_all(old_snapshot.clone()))
.build();
let dir = tempfile::tempdir().unwrap();
@ -876,8 +945,8 @@ async fn update_add_only_increments_serial_and_generates_announced_delta() {
cache.update(new_payloads.clone(), &store).unwrap();
let current_snapshot = cache.snapshot();
let result = cache.get_deltas_since(100);
let current_snapshot = cache.snapshot_for_version(2);
let result = cache.get_deltas_since_for_version(2, 100);
let input = format!(
"old_snapshot :\n{}new_payloads :\n{}",
@ -887,7 +956,7 @@ async fn update_add_only_increments_serial_and_generates_announced_delta() {
let output = format!(
"cache.serial_after_update: {}\ncurrent_snapshot:\n{}get_deltas_since(100):\n{}",
cache.serial(),
cache.serial_for_version(2),
indent_block(
&snapshot_hashes_and_sorted_view_to_string(&current_snapshot),
2
@ -902,7 +971,7 @@ async fn update_add_only_increments_serial_and_generates_announced_delta() {
&output,
);
assert_eq!(cache.serial(), 101);
assert_eq!(cache.serial_for_version(2), 101);
match result {
SerialResult::Delta(delta) => {
@ -934,9 +1003,9 @@ async fn update_remove_only_increments_serial_and_generates_withdrawn_delta() {
let mut cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::default())
.snapshot(old_snapshot.clone())
.snapshots(snapshots_all(old_snapshot.clone()))
.build();
let dir = tempfile::tempdir().unwrap();
@ -946,8 +1015,8 @@ async fn update_remove_only_increments_serial_and_generates_withdrawn_delta() {
cache.update(new_payloads.clone(), &store).unwrap();
let current_snapshot = cache.snapshot();
let result = cache.get_deltas_since(100);
let current_snapshot = cache.snapshot_for_version(2);
let result = cache.get_deltas_since_for_version(2, 100);
let input = format!(
"old_snapshot :\n{}new_payloads :\n{}",
@ -957,7 +1026,7 @@ async fn update_remove_only_increments_serial_and_generates_withdrawn_delta() {
let output = format!(
"cache.serial_after_update: {}\ncurrent_snapshot:\n{}get_deltas_since(100):\n{}",
cache.serial(),
cache.serial_for_version(2),
indent_block(
&snapshot_hashes_and_sorted_view_to_string(&current_snapshot),
2
@ -972,7 +1041,7 @@ async fn update_remove_only_increments_serial_and_generates_withdrawn_delta() {
&output,
);
assert_eq!(cache.serial(), 101);
assert_eq!(cache.serial_for_version(2), 101);
match result {
SerialResult::Delta(delta) => {
@ -1010,9 +1079,9 @@ async fn update_add_and_remove_increments_serial_and_generates_both_sides() {
let mut cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::default())
.snapshot(old_snapshot.clone())
.snapshots(snapshots_all(old_snapshot.clone()))
.build();
let dir = tempfile::tempdir().unwrap();
@ -1025,8 +1094,8 @@ async fn update_add_and_remove_increments_serial_and_generates_both_sides() {
cache.update(new_payloads.clone(), &store).unwrap();
let current_snapshot = cache.snapshot();
let result = cache.get_deltas_since(100);
let current_snapshot = cache.snapshot_for_version(2);
let result = cache.get_deltas_since_for_version(2, 100);
let input = format!(
"old_snapshot :\n{}new_payloads :\n{}",
@ -1036,7 +1105,7 @@ async fn update_add_and_remove_increments_serial_and_generates_both_sides() {
let output = format!(
"cache.serial_after_update: {}\ncurrent_snapshot:\n{}get_deltas_since(100):\n{}",
cache.serial(),
cache.serial_for_version(2),
indent_block(
&snapshot_hashes_and_sorted_view_to_string(&current_snapshot),
2
@ -1051,7 +1120,7 @@ async fn update_add_and_remove_increments_serial_and_generates_both_sides() {
&output,
);
assert_eq!(cache.serial(), 101);
assert_eq!(cache.serial_for_version(2), 101);
match result {
SerialResult::Delta(delta) => {
@ -1099,17 +1168,17 @@ fn get_deltas_since_cancels_announce_then_withdraw_for_same_prefix() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(102)
.serials(serials_all(102))
.timing(Timing::default())
.snapshot(final_snapshot)
.deltas(deltas.clone())
.snapshots(snapshots_all(final_snapshot))
.deltas_by_version(deltas_all(deltas.clone()))
.build();
let result = cache.get_deltas_since(100);
let result = cache.get_deltas_since_for_version(2, 100);
let input = format!(
"{}delta_window:\n{}",
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial(), 100),
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial_for_version(2), 100),
indent_block(&deltas_window_to_string(&deltas), 2),
);
let output = serial_result_detail_to_string(&result);
@ -1153,17 +1222,17 @@ fn get_deltas_since_cancels_withdraw_then_announce_for_same_prefix() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(102)
.serials(serials_all(102))
.timing(Timing::default())
.snapshot(final_snapshot)
.deltas(deltas.clone())
.snapshots(snapshots_all(final_snapshot))
.deltas_by_version(deltas_all(deltas.clone()))
.build();
let result = cache.get_deltas_since(100);
let result = cache.get_deltas_since_for_version(2, 100);
let input = format!(
"{}delta_window:\n{}",
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial(), 100),
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial_for_version(2), 100),
indent_block(&deltas_window_to_string(&deltas), 2),
);
let output = serial_result_detail_to_string(&result);
@ -1207,17 +1276,17 @@ fn get_deltas_since_merges_replacement_into_withdraw_and_announce() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(102)
.serials(serials_all(102))
.timing(Timing::default())
.snapshot(final_snapshot)
.deltas(deltas.clone())
.snapshots(snapshots_all(final_snapshot))
.deltas_by_version(deltas_all(deltas.clone()))
.build();
let result = cache.get_deltas_since(100);
let result = cache.get_deltas_since_for_version(2, 100);
let input = format!(
"{}delta_window:\n{}",
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial(), 100),
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial_for_version(2), 100),
indent_block(&deltas_window_to_string(&deltas), 2),
);
let output = serial_result_detail_to_string(&result);
@ -1285,18 +1354,18 @@ fn get_deltas_since_merges_multiple_deltas_to_final_minimal_view() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(103)
.serials(serials_all(103))
.timing(Timing::default())
.snapshot(final_snapshot)
.deltas(deltas.clone())
.snapshots(snapshots_all(final_snapshot))
.deltas_by_version(deltas_all(deltas.clone()))
.build();
// ?serial=100 A/B ?+C
let result = cache.get_deltas_since(100);
let result = cache.get_deltas_since_for_version(2, 100);
let input = format!(
"{}delta_window:\n{}",
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial(), 100),
get_deltas_since_input_to_string(cache.session_id_for_version(1), cache.serial_for_version(2), 100),
indent_block(&deltas_window_to_string(&deltas), 2),
);
let output = serial_result_detail_to_string(&result);
@ -1381,13 +1450,13 @@ fn get_deltas_since_merges_aspa_replacement_into_single_announcement() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(102)
.serials(serials_all(102))
.timing(Timing::default())
.snapshot(Snapshot::from_payloads(vec![Payload::Aspa(new.clone())]))
.deltas(deltas)
.snapshots(snapshots_all(Snapshot::from_payloads(vec![Payload::Aspa(new.clone())])))
.deltas_by_version(deltas_all(deltas))
.build();
let result = cache.get_deltas_since(100);
let result = cache.get_deltas_since_for_version(2, 100);
match result {
SerialResult::Delta(delta) => {

View File

@ -13,7 +13,10 @@ fn fixture_path(name: &str) -> PathBuf {
#[test]
#[ignore = "manual CCR loader smoke test for local samples"]
fn ccr_loader_smoke() {
let snapshot = load_ccr_snapshot_from_file(fixture_path("20260324T000037Z-sng1.ccr"))
// let path = "./mini_data/20260403T000001Z-mini-a.ccr";
// let path = "20260403T000101Z-mini-b.ccr";
let path = "20260403T000201Z-mini-c.ccr";
let snapshot = load_ccr_snapshot_from_file(fixture_path(path))
.expect("load CCR snapshot");
println!("content_type_oid: {}", snapshot.content_type_oid);
@ -22,6 +25,8 @@ fn ccr_loader_smoke() {
println!("vap_count : {}", snapshot.vaps.len());
println!("first_vrp : {:?}", snapshot.vrps.first());
println!("first_vap : {:?}", snapshot.vaps.first());
println!("vrps : {:?}", snapshot.vrps);
println!("vaps : {:?}", snapshot.vaps);
}
#[test]
@ -65,3 +70,19 @@ fn snapshot_to_payloads_with_options_skips_invalid_aspa_when_not_strict() {
assert_eq!(conversion.invalid_vaps.len(), 1);
assert!(conversion.invalid_vaps[0].contains("provider list must not contain AS0"));
}
#[test]
fn generated_mini_ccr_files_are_parseable() {
let cases = [
("20260403T000001Z-mini-a.ccr", 2usize, 1usize),
("20260403T000101Z-mini-b.ccr", 3usize, 1usize),
("20260403T000201Z-mini-c.ccr", 2usize, 2usize),
];
for (name, expect_vrps, expect_vaps) in cases {
let snapshot = load_ccr_snapshot_from_file(fixture_path(name))
.unwrap_or_else(|e| panic!("failed to parse {}: {:?}", name, e));
assert_eq!(snapshot.vrps.len(), expect_vrps, "vrp count mismatch for {name}");
assert_eq!(snapshot.vaps.len(), expect_vaps, "vap count mismatch for {name}");
}
}

View File

@ -1,4 +1,4 @@
mod common;
mod common;
use std::collections::VecDeque;
use std::fs::File;
@ -31,6 +31,7 @@ use rpki::rtr::pdu::{
Aspa as AspaPdu, CacheReset, CacheResponse, EndOfDataV1, ErrorReport, Header, IPv4Prefix,
IPv6Prefix, ResetQuery, RouterKey as RouterKeyPdu, SerialNotify, SerialQuery,
};
use rpki::rtr::store::RtrStore;
use rpki::rtr::server::connection::handle_tls_connection;
use rpki::rtr::server::tls::load_rustls_server_config_with_options;
use rpki::rtr::session::RtrSession;
@ -273,6 +274,42 @@ fn assert_error_report_matches(
assert_eq!(report.erroneous_pdu(), offending_pdu);
}
fn serials_all(serial: u32) -> [u32; 3] {
[serial; 3]
}
fn snapshots_all(snapshot: Snapshot) -> [Snapshot; 3] {
[snapshot.clone(), snapshot.clone(), snapshot]
}
fn deltas_all(deltas: VecDeque<Arc<Delta>>) -> [VecDeque<Arc<Delta>>; 3] {
[deltas.clone(), deltas.clone(), deltas]
}
async fn wait_for_store_serials(store: &RtrStore, expected: [u32; 3]) {
for _ in 0..100 {
let current = [
store.get_serial_for_version(0).unwrap(),
store.get_serial_for_version(1).unwrap(),
store.get_serial_for_version(2).unwrap(),
];
if current == [Some(expected[0]), Some(expected[1]), Some(expected[2])] {
return;
}
tokio::time::sleep(Duration::from_millis(20)).await;
}
panic!(
"timed out waiting store serials {:?}, current={:?}",
expected,
[
store.get_serial_for_version(0).unwrap(),
store.get_serial_for_version(1).unwrap(),
store.get_serial_for_version(2).unwrap(),
]
);
}
/// 测试Reset Query 会返回完整 snapshot并以 End of Data 结束响应。
#[tokio::test]
async fn reset_query_returns_snapshot_and_end_of_data() {
@ -285,9 +322,9 @@ async fn reset_query_returns_snapshot_and_end_of_data() {
let snapshot = Snapshot::from_payloads(vec![Payload::RouteOrigin(origin)]);
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.snapshot(snapshot)
.snapshots(snapshots_all(snapshot))
.build();
let server_cache = shared_cache(cache);
@ -333,7 +370,7 @@ async fn reset_query_returns_snapshot_and_end_of_data() {
async fn reset_query_uses_version_specific_session_id() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([40, 41, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -360,12 +397,98 @@ async fn reset_query_uses_version_specific_session_id() {
shutdown_server(client, shutdown_tx, server_handle).await;
}
#[tokio::test]
async fn restart_restores_versioned_state_and_serves_queries() {
let dir = tempfile::tempdir().unwrap();
let store = RtrStore::open(dir.path()).unwrap();
let prefix = IPAddressPrefix {
address: IPAddress::from_ipv4(Ipv4Addr::new(192, 0, 2, 0)),
prefix_length: 24,
};
let origin = Payload::RouteOrigin(RouteOrigin::new(prefix, 24, 64496u32.into()));
let valid_spki = vec![
0x30, 0x13, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01,
0x01, 0x05, 0x00, 0x03, 0x02, 0x00, 0x00,
];
let router_key = Payload::RouterKey(RouterKey::new(
Ski::default(),
Asn::from(64496u32),
valid_spki.clone(),
));
let aspa_old = Payload::Aspa(Aspa::new(Asn::from(64496u32), vec![Asn::from(64497u32)]));
let aspa_new = Payload::Aspa(Aspa::new(
Asn::from(64496u32),
vec![Asn::from(64497u32), Asn::from(64498u32)],
));
let initial_payloads = vec![origin.clone(), router_key.clone(), aspa_old];
let mut cache = rpki::rtr::cache::RtrCache::default()
.init(&store, 16, false, Timing::new(600, 600, 7200), || {
Ok(initial_payloads.clone())
})
.unwrap();
cache
.update(vec![origin.clone(), router_key.clone(), aspa_new], &store)
.unwrap();
wait_for_store_serials(&store, [1, 1, 2]).await;
let restored = rpki::rtr::cache::RtrCache::default()
.init(&store, 16, false, Timing::new(600, 600, 7200), || {
panic!("file loader should not be used when restoring from store")
})
.unwrap();
assert_eq!(restored.serial_for_version(0), 1);
assert_eq!(restored.serial_for_version(1), 1);
assert_eq!(restored.serial_for_version(2), 2);
let shared = shared_cache(restored);
let (addr, _notify_tx, shutdown_tx, server_handle) = start_session_server(shared.clone()).await;
let mut client = TcpStream::connect(addr).await.unwrap();
ResetQuery::new(0).write(&mut client).await.unwrap();
let response = CacheResponse::read(&mut client).await.unwrap();
assert_eq!(response.version(), 0);
let expected_sid_v0 = shared.read().unwrap().session_id_for_version(0);
assert_eq!(response.session_id(), expected_sid_v0);
let _v4 = IPv4Prefix::read(&mut client).await.unwrap();
let eod_v0 = rpki::rtr::pdu::EndOfDataV0::read(&mut client).await.unwrap();
assert_eq!(eod_v0.serial_number(), 1);
shutdown_server(client, shutdown_tx, server_handle).await;
let (addr, _notify_tx, shutdown_tx, server_handle) = start_session_server(shared.clone()).await;
let mut client = TcpStream::connect(addr).await.unwrap();
ResetQuery::new(1).write(&mut client).await.unwrap();
let response = CacheResponse::read(&mut client).await.unwrap();
assert_eq!(response.version(), 1);
let expected_sid_v1 = shared.read().unwrap().session_id_for_version(1);
assert_eq!(response.session_id(), expected_sid_v1);
let _v4 = IPv4Prefix::read(&mut client).await.unwrap();
let _rk = RouterKeyPdu::read(&mut client).await.unwrap();
let eod_v1 = EndOfDataV1::read(&mut client).await.unwrap();
assert_eq!(eod_v1.serial_number(), 1);
shutdown_server(client, shutdown_tx, server_handle).await;
let (addr, _notify_tx, shutdown_tx, server_handle) = start_session_server(shared.clone()).await;
let mut client = TcpStream::connect(addr).await.unwrap();
let sid_v2 = shared.read().unwrap().session_id_for_version(2);
SerialQuery::new(2, sid_v2, 1).write(&mut client).await.unwrap();
let response = CacheResponse::read(&mut client).await.unwrap();
assert_eq!(response.version(), 2);
assert_eq!(response.session_id(), sid_v2);
let _aspa = AspaPdu::read(&mut client).await.unwrap();
let eod_v2 = EndOfDataV1::read(&mut client).await.unwrap();
assert_eq!(eod_v2.serial_number(), 2);
shutdown_server(client, shutdown_tx, server_handle).await;
}
/// 测试:当 Serial Query 的 session_id 和 serial 都与当前 cache 一致时,仅返回 End of Data。
#[tokio::test]
async fn serial_query_returns_end_of_data_when_up_to_date() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing {
refresh: 600,
retry: 600,
@ -402,7 +525,7 @@ async fn serial_query_returns_end_of_data_when_up_to_date() {
async fn serial_query_returns_corrupt_data_when_session_id_mismatch() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing {
refresh: 600,
retry: 600,
@ -457,13 +580,13 @@ async fn serial_query_returns_deltas_when_incremental_update_available() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(101)
.serials(serials_all(101))
.timing(Timing {
refresh: 600,
retry: 600,
expire: 7200,
})
.deltas(deltas)
.deltas_by_version(deltas_all(deltas))
.build();
let server_cache = shared_cache(cache);
@ -542,10 +665,10 @@ async fn serial_query_returns_deltas_across_serial_wraparound() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(0)
.serials(serials_all(0))
.timing(Timing::new(600, 600, 7200))
.snapshot(snapshot)
.deltas(deltas)
.snapshots(snapshots_all(snapshot))
.deltas_by_version(deltas_all(deltas))
.build();
let server_cache = shared_cache(cache);
@ -587,7 +710,7 @@ async fn serial_query_returns_deltas_across_serial_wraparound() {
async fn serial_query_returns_cache_reset_for_future_serial_across_wraparound() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(u32::MAX)
.serials(serials_all(u32::MAX))
.timing(Timing::new(600, 600, 7200))
.build();
@ -637,9 +760,9 @@ async fn reset_query_returns_payloads_in_rtr_order() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.snapshot(snapshot)
.snapshots(snapshots_all(snapshot))
.build();
let server_cache = shared_cache(cache);
@ -748,13 +871,13 @@ async fn serial_query_returns_announcements_before_withdrawals() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(101)
.serials(serials_all(101))
.timing(Timing {
refresh: 600,
retry: 600,
expire: 7200,
})
.deltas(deltas)
.deltas_by_version(deltas_all(deltas))
.build();
let server_cache = shared_cache(cache);
@ -840,7 +963,7 @@ async fn serial_query_returns_announcements_before_withdrawals() {
async fn established_session_sends_serial_notify() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -882,7 +1005,7 @@ async fn established_session_sends_serial_notify() {
async fn first_pdu_with_too_high_version_returns_unsupported_version_error() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -922,7 +1045,7 @@ async fn first_pdu_with_too_high_version_returns_unsupported_version_error() {
async fn session_rejects_version_change_after_negotiation() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -975,7 +1098,7 @@ async fn session_rejects_version_change_after_negotiation() {
async fn notify_is_not_sent_before_version_negotiation() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -1012,7 +1135,7 @@ async fn notify_is_not_sent_before_version_negotiation() {
async fn serial_notify_is_rate_limited_to_once_per_minute() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -1073,7 +1196,7 @@ async fn reset_query_returns_no_data_available_when_cache_is_unavailable() {
let cache = RtrCacheBuilder::new()
.availability(rpki::rtr::cache::CacheAvailability::NoDataAvailable)
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -1108,7 +1231,7 @@ async fn serial_query_returns_no_data_available_when_cache_is_unavailable() {
let cache = RtrCacheBuilder::new()
.availability(rpki::rtr::cache::CacheAvailability::NoDataAvailable)
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -1142,7 +1265,7 @@ async fn serial_query_returns_no_data_available_when_cache_is_unavailable() {
async fn first_pdu_with_invalid_length_returns_corrupt_data() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -1211,7 +1334,7 @@ async fn first_pdu_with_invalid_length_returns_corrupt_data() {
async fn established_session_closes_after_receiving_error_report() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -1260,7 +1383,7 @@ async fn established_session_closes_after_receiving_error_report() {
async fn established_session_invalid_header_returns_corrupt_data() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -1324,7 +1447,7 @@ async fn established_session_invalid_header_returns_corrupt_data() {
async fn established_session_unknown_pdu_returns_unsupported_pdu_type() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -1393,9 +1516,9 @@ async fn version_zero_does_not_send_router_key_or_aspa() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.snapshot(snapshot)
.snapshots(snapshots_all(snapshot))
.build();
let server_cache = shared_cache(cache);
@ -1436,9 +1559,9 @@ async fn version_two_aspa_withdraw_has_empty_provider_list() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(101)
.serials(serials_all(101))
.timing(Timing::new(600, 600, 7200))
.deltas(deltas)
.deltas_by_version(deltas_all(deltas))
.build();
let server_cache = shared_cache(cache);
@ -1477,7 +1600,11 @@ async fn version_two_aspa_withdraw_has_empty_provider_list() {
#[tokio::test]
async fn version_one_sends_router_key_but_not_aspa() {
let router_key = RouterKey::new(Ski::default(), Asn::from(64496u32), vec![1u8; 32]);
let valid_spki = vec![
0x30, 0x13, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01,
0x01, 0x05, 0x00, 0x03, 0x02, 0x00, 0x00,
];
let router_key = RouterKey::new(Ski::default(), Asn::from(64496u32), valid_spki);
let aspa = Aspa::new(Asn::from(64496u32), vec![Asn::from(64497u32)]);
let snapshot = Snapshot::from_payloads(vec![
Payload::RouterKey(router_key),
@ -1486,9 +1613,9 @@ async fn version_one_sends_router_key_but_not_aspa() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.snapshot(snapshot)
.snapshots(snapshots_all(snapshot))
.build();
let server_cache = shared_cache(cache);
@ -1530,7 +1657,7 @@ async fn version_one_sends_router_key_but_not_aspa() {
async fn established_session_idle_timeout_returns_transport_failed() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -1588,7 +1715,7 @@ async fn established_session_idle_timeout_returns_transport_failed() {
async fn tls_client_with_matching_san_ip_is_accepted() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -1618,7 +1745,7 @@ async fn tls_client_with_matching_san_ip_is_accepted() {
async fn tls_client_accepts_server_certificate_with_dns_san() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -1655,7 +1782,7 @@ async fn tls_client_accepts_server_certificate_with_dns_san() {
async fn tls_server_dns_name_san_strict_mode_rejects_ip_only_certificate() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -1678,7 +1805,7 @@ async fn tls_server_dns_name_san_strict_mode_rejects_ip_only_certificate() {
async fn tls_client_with_mismatched_san_ip_is_rejected() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.build();
@ -1716,7 +1843,7 @@ async fn tls_client_with_mismatched_san_ip_is_rejected() {
async fn invalid_timing_prevents_end_of_data_response() {
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 8000, 7200))
.build();
@ -1752,9 +1879,9 @@ async fn invalid_aspa_prevents_snapshot_response() {
))]);
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.snapshot(snapshot)
.snapshots(snapshots_all(snapshot))
.build();
let server_cache = shared_cache(cache);
@ -1790,9 +1917,9 @@ async fn invalid_router_key_prevents_snapshot_response() {
))]);
let cache = RtrCacheBuilder::new()
.session_ids(SessionIds::from_array([42, 42, 42]))
.serial(100)
.serials(serials_all(100))
.timing(Timing::new(600, 600, 7200))
.snapshot(snapshot)
.snapshots(snapshots_all(snapshot))
.build();
let server_cache = shared_cache(cache);

View File

@ -0,0 +1,45 @@
use std::fs;
use std::path::{Path, PathBuf};
fn collect_rs_files(dir: &Path, out: &mut Vec<PathBuf>) {
let entries = fs::read_dir(dir).unwrap();
for entry in entries {
let entry = entry.unwrap();
let path = entry.path();
if path.is_dir() {
collect_rs_files(&path, out);
} else if path.extension().and_then(|s| s.to_str()) == Some("rs") {
out.push(path);
}
}
}
#[test]
fn save_cache_state_versioned_has_limited_call_sites() {
let repo_root = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let mut files = Vec::new();
collect_rs_files(&repo_root.join("src"), &mut files);
collect_rs_files(&repo_root.join("tests"), &mut files);
let allowed = [
repo_root.join("src/rtr/store.rs"),
repo_root.join("src/rtr/cache/store.rs"),
repo_root.join("tests/test_store_db.rs"),
repo_root.join("tests/test_cache.rs"),
repo_root.join("tests/test_store_boundary.rs"),
];
let mut violations = Vec::new();
for file in files {
let content = fs::read_to_string(&file).unwrap();
if content.contains(".save_cache_state_versioned(") && !allowed.contains(&file) {
violations.push(file);
}
}
assert!(
violations.is_empty(),
"unexpected direct save_cache_state_versioned() call sites: {:?}",
violations
);
}

View File

@ -2,371 +2,110 @@ mod common;
use std::net::Ipv6Addr;
use common::test_helper::{
indent_block, payloads_to_string, test_report, v4_origin, v6_origin,
};
use common::test_helper::{v4_origin, v6_origin};
use rpki::rtr::cache::{CacheAvailability, Delta, SessionIds, Snapshot};
use rpki::rtr::cache::{CacheAvailability, Delta, Snapshot};
use rpki::rtr::payload::Payload;
use rpki::rtr::store::RtrStore;
fn snapshot_to_string(snapshot: &Snapshot) -> String {
let payloads = snapshot.payloads_for_rtr();
payloads_to_string(&payloads)
}
fn delta_to_string(delta: &Delta) -> String {
format!(
"serial: {}\nannounced:\n{}withdrawn:\n{}",
delta.serial(),
indent_block(&payloads_to_string(delta.announced()), 2),
indent_block(&payloads_to_string(delta.withdrawn()), 2),
)
}
#[test]
fn store_db_save_and_get_snapshot() {
fn store_db_versioned_state_persists_and_restores_all_versions() {
let dir = tempfile::tempdir().unwrap();
let store = RtrStore::open(dir.path()).unwrap();
let input_payloads = vec![
let snapshots = [
Snapshot::from_payloads(vec![Payload::RouteOrigin(v4_origin(
192, 0, 2, 0, 24, 24, 64496,
))]),
Snapshot::from_payloads(vec![
Payload::RouteOrigin(v4_origin(192, 0, 2, 0, 24, 24, 64496)),
Payload::RouteOrigin(v4_origin(198, 51, 100, 0, 24, 24, 64497)),
]),
Snapshot::from_payloads(vec![
Payload::RouteOrigin(v4_origin(192, 0, 2, 0, 24, 24, 64496)),
Payload::RouteOrigin(v4_origin(198, 51, 100, 0, 24, 24, 64497)),
Payload::RouteOrigin(v6_origin(
Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0),
32,
48,
64497,
64498,
)),
]),
];
let snapshot = Snapshot::from_payloads(input_payloads.clone());
let session_ids = [410u16, 411u16, 412u16];
let serials = [100u32, 200u32, 300u32];
store.save_snapshot(&snapshot).unwrap();
let loaded = store.get_snapshot().unwrap().expect("snapshot should exist");
let input = format!(
"db_path: {}\nsnapshot:\n{}",
dir.path().display(),
indent_block(&payloads_to_string(&input_payloads), 2),
);
let output = format!(
"loaded snapshot:\n{}same_content: {}\n",
indent_block(&snapshot_to_string(&loaded), 2),
snapshot.same_content(&loaded),
);
test_report(
"store_db_save_and_get_snapshot",
"验证 save_snapshot() 后可以通过 get_snapshot() 正确读回 Snapshot。",
&input,
&output,
);
assert!(snapshot.same_content(&loaded));
}
#[test]
fn store_db_set_and_get_meta_fields() {
let dir = tempfile::tempdir().unwrap();
let store = RtrStore::open(dir.path()).unwrap();
let session_ids = SessionIds::from_array([40, 41, 42]);
store.set_session_ids(&session_ids).unwrap();
store.set_serial(100).unwrap();
store.set_delta_window(101, 110).unwrap();
let loaded_session_ids = store.get_session_ids().unwrap();
let serial = store.get_serial().unwrap();
let window = store.get_delta_window().unwrap();
let input = format!(
"db_path: {}\nset_session_ids={:?}\nset_serial=100\nset_delta_window=(101, 110)\n",
dir.path().display(),
session_ids,
);
let output = format!(
"get_session_ids: {:?}\nget_serial: {:?}\nget_delta_window: {:?}\n",
loaded_session_ids, serial, window,
);
test_report(
"store_db_set_and_get_meta_fields",
"验证 session_ids / serial / delta_window 能正确写入并读回。",
&input,
&output,
);
assert_eq!(loaded_session_ids, Some(session_ids));
assert_eq!(serial, Some(100));
assert_eq!(window, Some((101, 110)));
}
#[test]
fn store_db_clear_delta_window_removes_both_bounds() {
let dir = tempfile::tempdir().unwrap();
let store = RtrStore::open(dir.path()).unwrap();
store.set_delta_window(101, 110).unwrap();
assert_eq!(store.get_delta_window().unwrap(), Some((101, 110)));
store.clear_delta_window().unwrap();
assert_eq!(store.get_delta_window().unwrap(), None);
}
#[test]
fn store_db_save_and_get_delta() {
let dir = tempfile::tempdir().unwrap();
let store = RtrStore::open(dir.path()).unwrap();
let delta = Delta::new(
101,
vec![Payload::RouteOrigin(v4_origin(198, 51, 100, 0, 24, 24, 64497))],
vec![Payload::RouteOrigin(v4_origin(192, 0, 2, 0, 24, 24, 64496))],
);
store.save_delta(&delta).unwrap();
let loaded = store.get_delta(101).unwrap().expect("delta should exist");
let input = format!(
"db_path: {}\ndelta:\n{}",
dir.path().display(),
indent_block(&delta_to_string(&delta), 2),
);
let output = format!(
"loaded delta:\n{}",
indent_block(&delta_to_string(&loaded), 2),
);
test_report(
"store_db_save_and_get_delta",
"验证 save_delta() 后可以通过 get_delta(serial) 正确读回 Delta。",
&input,
&output,
);
assert_eq!(loaded.serial(), 101);
assert_eq!(loaded.announced().len(), 1);
assert_eq!(loaded.withdrawn().len(), 1);
}
#[test]
fn store_db_load_deltas_since_returns_only_newer_deltas_in_order() {
let dir = tempfile::tempdir().unwrap();
let store = RtrStore::open(dir.path()).unwrap();
let d101 = Delta::new(
101,
let d0 = Delta::new(
100,
vec![Payload::RouteOrigin(v4_origin(192, 0, 2, 0, 24, 24, 64496))],
vec![],
);
let d102 = Delta::new(
102,
vec![Payload::RouteOrigin(v4_origin(198, 51, 100, 0, 24, 24, 64497))],
vec![],
);
let d103 = Delta::new(
103,
vec![Payload::RouteOrigin(v4_origin(203, 0, 113, 0, 24, 24, 64498))],
let d2 = Delta::new(
300,
vec![Payload::RouteOrigin(v6_origin(
Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0),
32,
48,
64498,
))],
vec![],
);
store.save_delta(&d101).unwrap();
store.save_delta(&d102).unwrap();
store.save_delta(&d103).unwrap();
store
.save_cache_state_versioned(
CacheAvailability::Ready,
&snapshots,
&session_ids,
&serials,
&[Some(&d0), None, Some(&d2)],
&[Some((100, 100)), None, Some((300, 300))],
&[false, false, false],
)
.unwrap();
let loaded = store.load_deltas_since(101).unwrap();
assert_eq!(store.get_availability().unwrap(), Some(CacheAvailability::Ready));
let input = format!(
"db_path: {}\nsaved delta serials: [101, 102, 103]\nload_deltas_since(101)\n",
dir.path().display(),
);
for version in 0u8..=2 {
let idx = version as usize;
let loaded_snapshot = store
.get_snapshot_for_version(version)
.unwrap()
.expect("snapshot should exist");
let loaded_session_id = store
.get_session_id_for_version(version)
.unwrap()
.expect("session_id should exist");
let loaded_serial = store
.get_serial_for_version(version)
.unwrap()
.expect("serial should exist");
let output = {
let mut s = String::new();
for (idx, d) in loaded.iter().enumerate() {
s.push_str(&format!("loaded[{}]:\n", idx));
s.push_str(&indent_block(&delta_to_string(d), 2));
assert!(snapshots[idx].same_content(&loaded_snapshot));
assert_eq!(loaded_session_id, session_ids[idx]);
assert_eq!(loaded_serial, serials[idx]);
}
s
};
test_report(
"store_db_load_deltas_since_returns_only_newer_deltas_in_order",
"验证 load_deltas_since(x) 只返回 serial > x 的 Delta且顺序正确。",
&input,
&output,
assert_eq!(store.get_delta_window_for_version(0).unwrap(), Some((100, 100)));
assert_eq!(store.get_delta_window_for_version(1).unwrap(), None);
assert_eq!(store.get_delta_window_for_version(2).unwrap(), Some((300, 300)));
assert_eq!(
store.get_delta_for_version(0, 100).unwrap().map(|d| d.serial()),
Some(100)
);
assert!(store.get_delta_for_version(1, 200).unwrap().is_none());
assert_eq!(
store.get_delta_for_version(2, 300).unwrap().map(|d| d.serial()),
Some(300)
);
assert_eq!(loaded.len(), 2);
assert_eq!(loaded[0].serial(), 102);
assert_eq!(loaded[1].serial(), 103);
}
#[test]
fn store_db_save_snapshot_and_meta_writes_all_fields() {
fn store_db_versioned_delta_window_wraparound_is_isolated_by_version() {
let dir = tempfile::tempdir().unwrap();
let store = RtrStore::open(dir.path()).unwrap();
let session_ids = SessionIds::from_array([40, 41, 42]);
let snapshot = Snapshot::from_payloads(vec![
Payload::RouteOrigin(v4_origin(192, 0, 2, 0, 24, 24, 64496)),
Payload::RouteOrigin(v4_origin(198, 51, 100, 0, 24, 24, 64497)),
]);
store
.save_snapshot_and_meta(&snapshot, &session_ids, 100)
.unwrap();
let loaded_snapshot = store.get_snapshot().unwrap().expect("snapshot should exist");
let loaded_session_ids = store.get_session_ids().unwrap();
let loaded_serial = store.get_serial().unwrap();
let input = format!(
"db_path: {}\nsnapshot:\n{}session_ids={:?}\nserial=100\n",
dir.path().display(),
indent_block(&snapshot_to_string(&snapshot), 2),
session_ids,
);
let output = format!(
"loaded_snapshot:\n{}loaded_session_ids: {:?}\nloaded_serial: {:?}\n",
indent_block(&snapshot_to_string(&loaded_snapshot), 2),
loaded_session_ids,
loaded_serial,
);
test_report(
"store_db_save_snapshot_and_meta_writes_all_fields",
"验证 save_snapshot_and_meta() 会同时写入 snapshot、session_ids 和 serial。",
&input,
&output,
);
assert!(snapshot.same_content(&loaded_snapshot));
assert_eq!(loaded_session_ids, Some(session_ids));
assert_eq!(loaded_serial, Some(100));
}
#[test]
fn store_db_save_cache_state_writes_delta_snapshot_meta_and_window_together() {
let dir = tempfile::tempdir().unwrap();
let store = RtrStore::open(dir.path()).unwrap();
let session_ids = SessionIds::from_array([40, 41, 42]);
let snapshot = Snapshot::from_payloads(vec![
Payload::RouteOrigin(v4_origin(192, 0, 2, 0, 24, 24, 64496)),
Payload::RouteOrigin(v4_origin(198, 51, 100, 0, 24, 24, 64497)),
]);
let delta = Delta::new(
101,
vec![Payload::RouteOrigin(v4_origin(198, 51, 100, 0, 24, 24, 64497))],
vec![Payload::RouteOrigin(v4_origin(192, 0, 2, 0, 24, 24, 64496))],
);
store
.save_cache_state(
CacheAvailability::Ready,
&snapshot,
&session_ids,
101,
Some(&delta),
Some((101, 101)),
false,
)
.unwrap();
let loaded_snapshot = store.get_snapshot().unwrap().expect("snapshot should exist");
let loaded_session_ids = store.get_session_ids().unwrap();
let loaded_serial = store.get_serial().unwrap();
let loaded_availability = store.get_availability().unwrap();
let loaded_delta = store.get_delta(101).unwrap().expect("delta should exist");
let loaded_window = store.get_delta_window().unwrap();
assert!(snapshot.same_content(&loaded_snapshot));
assert_eq!(loaded_session_ids, Some(session_ids));
assert_eq!(loaded_serial, Some(101));
assert_eq!(loaded_availability, Some(CacheAvailability::Ready));
assert_eq!(loaded_delta.serial(), 101);
assert_eq!(loaded_window, Some((101, 101)));
}
#[test]
fn store_db_save_cache_state_prunes_deltas_older_than_window_min() {
let dir = tempfile::tempdir().unwrap();
let store = RtrStore::open(dir.path()).unwrap();
let session_ids = SessionIds::from_array([40, 41, 42]);
let snapshot = Snapshot::from_payloads(vec![
Payload::RouteOrigin(v4_origin(192, 0, 2, 0, 24, 24, 64496)),
]);
let d101 = Delta::new(
101,
vec![Payload::RouteOrigin(v4_origin(192, 0, 2, 0, 24, 24, 64496))],
vec![],
);
let d102 = Delta::new(
102,
vec![Payload::RouteOrigin(v4_origin(198, 51, 100, 0, 24, 24, 64497))],
vec![],
);
let d103 = Delta::new(
103,
vec![Payload::RouteOrigin(v4_origin(203, 0, 113, 0, 24, 24, 64498))],
vec![],
);
store
.save_cache_state(
CacheAvailability::Ready,
&snapshot,
&session_ids,
101,
Some(&d101),
Some((101, 101)),
false,
)
.unwrap();
store
.save_cache_state(
CacheAvailability::Ready,
&snapshot,
&session_ids,
102,
Some(&d102),
Some((101, 102)),
false,
)
.unwrap();
store
.save_cache_state(
CacheAvailability::Ready,
&snapshot,
&session_ids,
103,
Some(&d103),
Some((103, 103)),
false,
)
.unwrap();
assert!(store.get_delta(101).unwrap().is_none());
assert!(store.get_delta(102).unwrap().is_none());
assert_eq!(store.get_delta(103).unwrap().map(|d| d.serial()), Some(103));
assert_eq!(store.get_delta_window().unwrap(), Some((103, 103)));
}
#[test]
fn store_db_load_delta_window_restores_wraparound_window_in_serial_order() {
let dir = tempfile::tempdir().unwrap();
let store = RtrStore::open(dir.path()).unwrap();
let session_ids = SessionIds::from_array([40, 41, 42]);
let snapshot = Snapshot::from_payloads(vec![
Payload::RouteOrigin(v4_origin(192, 0, 2, 0, 24, 24, 64496)),
]);
let snapshots = std::array::from_fn(|_| Snapshot::empty());
let session_ids = [600u16, 601u16, 602u16];
let serials = [0u32, 0u32, 0u32];
let d_max = Delta::new(
u32::MAX,
@ -383,162 +122,228 @@ fn store_db_load_delta_window_restores_wraparound_window_in_serial_order() {
vec![Payload::RouteOrigin(v4_origin(203, 0, 113, 0, 24, 24, 64498))],
vec![],
);
store
.save_cache_state(
CacheAvailability::Ready,
&snapshot,
&session_ids,
u32::MAX,
Some(&d_max),
Some((u32::MAX, u32::MAX)),
false,
)
.unwrap();
store
.save_cache_state(
CacheAvailability::Ready,
&snapshot,
&session_ids,
let d_v1_only = Delta::new(
0,
Some(&d_zero),
Some((u32::MAX, 0)),
false,
)
.unwrap();
store
.save_cache_state(
CacheAvailability::Ready,
&snapshot,
&session_ids,
1,
Some(&d_one),
Some((u32::MAX, 1)),
false,
)
.unwrap();
let loaded = store.load_delta_window(u32::MAX, 1).unwrap();
assert_eq!(loaded.iter().map(Delta::serial).collect::<Vec<_>>(), vec![u32::MAX, 0, 1]);
}
#[test]
fn store_db_load_snapshot_and_serial_returns_consistent_pair() {
let dir = tempfile::tempdir().unwrap();
let store = RtrStore::open(dir.path()).unwrap();
let snapshot = Snapshot::from_payloads(vec![
Payload::RouteOrigin(v4_origin(203, 0, 113, 0, 24, 24, 64498)),
]);
store.save_snapshot_and_serial(&snapshot, 200).unwrap();
let loaded = store
.load_snapshot_and_serial()
.unwrap()
.expect("snapshot+serial should exist");
let input = format!(
"db_path: {}\nsnapshot:\n{}serial=200\n",
dir.path().display(),
indent_block(&snapshot_to_string(&snapshot), 2),
);
let output = format!(
"loaded_snapshot:\n{}loaded_serial: {}\n",
indent_block(&snapshot_to_string(&loaded.0), 2),
loaded.1,
);
test_report(
"store_db_load_snapshot_and_serial_returns_consistent_pair",
"验证 load_snapshot_and_serial() 能正确返回一致的 snapshot 与 serial。",
&input,
&output,
);
assert!(snapshot.same_content(&loaded.0));
assert_eq!(loaded.1, 200);
}
#[test]
fn store_db_delete_snapshot_delta_and_serial_removes_data() {
let dir = tempfile::tempdir().unwrap();
let store = RtrStore::open(dir.path()).unwrap();
let snapshot = Snapshot::from_payloads(vec![
Payload::RouteOrigin(v4_origin(192, 0, 2, 0, 24, 24, 64496)),
]);
let delta = Delta::new(
101,
vec![Payload::RouteOrigin(v4_origin(198, 51, 100, 0, 24, 24, 64497))],
vec![Payload::RouteOrigin(v4_origin(10, 0, 0, 0, 24, 24, 64500))],
vec![],
);
store.save_snapshot(&snapshot).unwrap();
store.save_delta(&delta).unwrap();
store.set_serial(100).unwrap();
store
.save_cache_state_versioned(
CacheAvailability::Ready,
&snapshots,
&session_ids,
&serials,
&[None, None, Some(&d_max)],
&[None, None, None],
&[false, false, false],
)
.unwrap();
store
.save_cache_state_versioned(
CacheAvailability::Ready,
&snapshots,
&session_ids,
&serials,
&[None, None, Some(&d_zero)],
&[None, None, None],
&[false, false, false],
)
.unwrap();
store
.save_cache_state_versioned(
CacheAvailability::Ready,
&snapshots,
&session_ids,
&serials,
&[None, None, Some(&d_one)],
&[None, None, None],
&[false, false, false],
)
.unwrap();
store
.save_cache_state_versioned(
CacheAvailability::Ready,
&snapshots,
&session_ids,
&serials,
&[None, Some(&d_v1_only), None],
&[None, None, None],
&[false, false, false],
)
.unwrap();
store.delete_snapshot().unwrap();
store.delete_delta(101).unwrap();
store.delete_serial().unwrap();
let loaded_snapshot = store.get_snapshot().unwrap();
let loaded_delta = store.get_delta(101).unwrap();
let loaded_serial = store.get_serial().unwrap();
let input = format!(
"db_path: {}\nsave snapshot + delta(101) + serial(100), then delete all three.\n",
dir.path().display(),
let v2_loaded = store.load_delta_window_for_version(2, u32::MAX, 1).unwrap();
assert_eq!(
v2_loaded.iter().map(Delta::serial).collect::<Vec<_>>(),
vec![u32::MAX, 0, 1]
);
let output = format!(
"get_snapshot: {:?}\nget_delta(101): {:?}\nget_serial: {:?}\n",
loaded_snapshot.as_ref().map(|_| "Some(snapshot)"),
loaded_delta.as_ref().map(|_| "Some(delta)"),
loaded_serial,
);
test_report(
"store_db_delete_snapshot_delta_and_serial_removes_data",
"验证 delete_snapshot()/delete_delta()/delete_serial() 后,对应数据不再可读。",
&input,
&output,
);
assert!(loaded_snapshot.is_none());
assert!(loaded_delta.is_none());
assert!(loaded_serial.is_none());
let v1_loaded = store.load_delta_window_for_version(1, 0, 0).unwrap();
assert_eq!(v1_loaded.iter().map(Delta::serial).collect::<Vec<_>>(), vec![0]);
assert_eq!(v1_loaded[0].announced().len(), 1);
}
#[test]
fn store_db_load_snapshot_and_serial_errors_on_inconsistent_state() {
fn store_db_versioned_clear_window_affects_only_target_version() {
let dir = tempfile::tempdir().unwrap();
let store = RtrStore::open(dir.path()).unwrap();
let snapshot = Snapshot::from_payloads(vec![
Payload::RouteOrigin(v4_origin(192, 0, 2, 0, 24, 24, 64496)),
]);
store.save_snapshot(&snapshot).unwrap();
// 故意不写 serial制造不一致状态
let result = store.load_snapshot_and_serial();
let input = format!(
"db_path: {}\n仅保存 snapshot不保存 serial。\n",
dir.path().display(),
let snapshots = [
Snapshot::from_payloads(vec![Payload::RouteOrigin(v4_origin(
192, 0, 2, 0, 24, 24, 64496,
))]),
Snapshot::from_payloads(vec![Payload::RouteOrigin(v4_origin(
198, 51, 100, 0, 24, 24, 64497,
))]),
Snapshot::from_payloads(vec![Payload::RouteOrigin(v4_origin(
203, 0, 113, 0, 24, 24, 64498,
))]),
];
let session_ids = [420u16, 421u16, 422u16];
let serials = [10u32, 20u32, 30u32];
let d0 = Delta::new(
10,
vec![Payload::RouteOrigin(v4_origin(192, 0, 2, 0, 24, 24, 64496))],
vec![],
);
let d2 = Delta::new(
30,
vec![Payload::RouteOrigin(v4_origin(203, 0, 113, 0, 24, 24, 64498))],
vec![],
);
let output = format!("load_snapshot_and_serial result: {:?}\n", result);
store
.save_cache_state_versioned(
CacheAvailability::Ready,
&snapshots,
&session_ids,
&serials,
&[Some(&d0), None, Some(&d2)],
&[Some((10, 10)), None, Some((30, 30))],
&[false, false, false],
)
.unwrap();
test_report(
"store_db_load_snapshot_and_serial_errors_on_inconsistent_state",
"验证当 snapshot 和 serial 状态不一致时load_snapshot_and_serial() 返回错误。",
&input,
&output,
store
.save_cache_state_versioned(
CacheAvailability::Ready,
&snapshots,
&session_ids,
&serials,
&[None, None, None],
&[None, None, None],
&[true, false, false],
)
.unwrap();
assert_eq!(store.get_delta_window_for_version(0).unwrap(), None);
assert!(store.get_delta_for_version(0, 10).unwrap().is_none());
assert_eq!(store.get_delta_window_for_version(2).unwrap(), Some((30, 30)));
assert_eq!(
store.get_delta_for_version(2, 30).unwrap().map(|d| d.serial()),
Some(30)
);
assert!(result.is_err());
}
#[test]
fn store_db_versioned_prunes_outside_window() {
let dir = tempfile::tempdir().unwrap();
let store = RtrStore::open(dir.path()).unwrap();
let snapshots = std::array::from_fn(|_| {
Snapshot::from_payloads(vec![Payload::RouteOrigin(v4_origin(
192, 0, 2, 0, 24, 24, 64496,
))])
});
let session_ids = [500u16, 501u16, 502u16];
let serials = [102u32, 0u32, 0u32];
let d100 = Delta::new(
100,
vec![Payload::RouteOrigin(v4_origin(10, 0, 0, 0, 24, 24, 65001))],
vec![],
);
let d101 = Delta::new(
101,
vec![Payload::RouteOrigin(v4_origin(10, 0, 1, 0, 24, 24, 65002))],
vec![],
);
let d102 = Delta::new(
102,
vec![Payload::RouteOrigin(v4_origin(10, 0, 2, 0, 24, 24, 65003))],
vec![],
);
store
.save_cache_state_versioned(
CacheAvailability::Ready,
&snapshots,
&session_ids,
&serials,
&[Some(&d100), None, None],
&[Some((100, 100)), None, None],
&[false, false, false],
)
.unwrap();
store
.save_cache_state_versioned(
CacheAvailability::Ready,
&snapshots,
&session_ids,
&serials,
&[Some(&d101), None, None],
&[Some((100, 101)), None, None],
&[false, false, false],
)
.unwrap();
store
.save_cache_state_versioned(
CacheAvailability::Ready,
&snapshots,
&session_ids,
&serials,
&[Some(&d102), None, None],
&[Some((102, 102)), None, None],
&[false, false, false],
)
.unwrap();
assert!(store.get_delta_for_version(0, 100).unwrap().is_none());
assert!(store.get_delta_for_version(0, 101).unwrap().is_none());
assert_eq!(
store.get_delta_for_version(0, 102).unwrap().map(|d| d.serial()),
Some(102)
);
}
#[test]
fn store_db_versioned_load_delta_window_requires_complete_range() {
let dir = tempfile::tempdir().unwrap();
let store = RtrStore::open(dir.path()).unwrap();
let snapshots = std::array::from_fn(|_| Snapshot::empty());
let session_ids = [700u16, 701u16, 702u16];
let serials = [0u32, 0u32, 0u32];
let d11 = Delta::new(
11,
vec![Payload::RouteOrigin(v4_origin(198, 51, 100, 0, 24, 24, 64497))],
vec![],
);
store
.save_cache_state_versioned(
CacheAvailability::Ready,
&snapshots,
&session_ids,
&serials,
&[None, Some(&d11), None],
&[None, None, None],
&[false, false, false],
)
.unwrap();
let err = store.load_delta_window_for_version(1, 10, 11).unwrap_err();
assert!(err
.to_string()
.contains("delta window starts at 10, but first persisted delta is Some(11)"));
}