151 lines
5.3 KiB
Bash
Executable File
151 lines
5.3 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
set -euo pipefail
|
|
|
|
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
|
cd "$ROOT_DIR"
|
|
|
|
TAL_PATH="${TAL_PATH:-$ROOT_DIR/tests/fixtures/tal/apnic-rfc7730-https.tal}"
|
|
TA_PATH="${TA_PATH:-$ROOT_DIR/tests/fixtures/ta/apnic-ta.cer}"
|
|
PAYLOAD_REPLAY_ARCHIVE="${PAYLOAD_REPLAY_ARCHIVE:-$ROOT_DIR/target/live/payload_replay/payload-archive}"
|
|
PAYLOAD_REPLAY_LOCKS="${PAYLOAD_REPLAY_LOCKS:-$ROOT_DIR/target/live/payload_replay/locks.json}"
|
|
VALIDATION_TIME="${VALIDATION_TIME:-2026-03-13T02:30:00Z}"
|
|
TRUST_ANCHOR="${TRUST_ANCHOR:-apnic}"
|
|
ROUTINATOR_RECORD_CSV="${ROUTINATOR_RECORD_CSV:-$ROOT_DIR/target/live/payload_replay/record.csv}"
|
|
MAX_DEPTH="${MAX_DEPTH:-}"
|
|
MAX_INSTANCES="${MAX_INSTANCES:-}"
|
|
OUT_DIR="${OUT_DIR:-$ROOT_DIR/target/live/payload_replay_runs}"
|
|
mkdir -p "$OUT_DIR"
|
|
|
|
TS="$(date -u +%Y%m%dT%H%M%SZ)"
|
|
RUN_NAME="${RUN_NAME:-apnic_replay_${TS}}"
|
|
DB_DIR="${DB_DIR:-$OUT_DIR/${RUN_NAME}_db}"
|
|
REPORT_JSON="${REPORT_JSON:-$OUT_DIR/${RUN_NAME}_report.json}"
|
|
RUN_LOG="${RUN_LOG:-$OUT_DIR/${RUN_NAME}_run.log}"
|
|
META_JSON="${META_JSON:-$OUT_DIR/${RUN_NAME}_meta.json}"
|
|
SUMMARY_MD="${SUMMARY_MD:-$OUT_DIR/${RUN_NAME}_summary.md}"
|
|
VRPS_CSV="${VRPS_CSV:-$OUT_DIR/${RUN_NAME}_vrps.csv}"
|
|
COMPARE_SUMMARY_MD="${COMPARE_SUMMARY_MD:-$OUT_DIR/${RUN_NAME}_compare_summary.md}"
|
|
ONLY_IN_OURS_CSV="${ONLY_IN_OURS_CSV:-$OUT_DIR/${RUN_NAME}_only_in_ours.csv}"
|
|
ONLY_IN_RECORD_CSV="${ONLY_IN_RECORD_CSV:-$OUT_DIR/${RUN_NAME}_only_in_record.csv}"
|
|
|
|
cmd=(cargo run --release --bin rpki --
|
|
--db "$DB_DIR"
|
|
--tal-path "$TAL_PATH"
|
|
--ta-path "$TA_PATH"
|
|
--payload-replay-archive "$PAYLOAD_REPLAY_ARCHIVE"
|
|
--payload-replay-locks "$PAYLOAD_REPLAY_LOCKS"
|
|
--validation-time "$VALIDATION_TIME"
|
|
--report-json "$REPORT_JSON")
|
|
|
|
if [[ -n "$MAX_DEPTH" ]]; then
|
|
cmd+=(--max-depth "$MAX_DEPTH")
|
|
fi
|
|
if [[ -n "$MAX_INSTANCES" ]]; then
|
|
cmd+=(--max-instances "$MAX_INSTANCES")
|
|
fi
|
|
|
|
run_start_s="$(date +%s)"
|
|
(
|
|
echo "# command:"
|
|
printf '%q ' "${cmd[@]}"
|
|
echo
|
|
echo
|
|
"${cmd[@]}"
|
|
) 2>&1 | tee "$RUN_LOG" >/dev/null
|
|
run_end_s="$(date +%s)"
|
|
run_duration_s="$((run_end_s - run_start_s))"
|
|
|
|
TAL_PATH="$TAL_PATH" \
|
|
TA_PATH="$TA_PATH" \
|
|
PAYLOAD_REPLAY_ARCHIVE="$PAYLOAD_REPLAY_ARCHIVE" \
|
|
PAYLOAD_REPLAY_LOCKS="$PAYLOAD_REPLAY_LOCKS" \
|
|
DB_DIR="$DB_DIR" \
|
|
REPORT_JSON="$REPORT_JSON" \
|
|
RUN_LOG="$RUN_LOG" \
|
|
VALIDATION_TIME="$VALIDATION_TIME" \
|
|
RUN_DURATION_S="$run_duration_s" \
|
|
python3 - "$REPORT_JSON" "$META_JSON" "$SUMMARY_MD" <<'PY'
|
|
import json
|
|
import os
|
|
import sys
|
|
from datetime import datetime, timezone
|
|
from pathlib import Path
|
|
|
|
report_path = Path(sys.argv[1])
|
|
meta_path = Path(sys.argv[2])
|
|
summary_path = Path(sys.argv[3])
|
|
rep = json.loads(report_path.read_text(encoding="utf-8"))
|
|
now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
meta = {
|
|
"recorded_at_utc": now,
|
|
"tal_path": os.environ["TAL_PATH"],
|
|
"ta_path": os.environ["TA_PATH"],
|
|
"payload_replay_archive": os.environ["PAYLOAD_REPLAY_ARCHIVE"],
|
|
"payload_replay_locks": os.environ["PAYLOAD_REPLAY_LOCKS"],
|
|
"db_dir": os.environ["DB_DIR"],
|
|
"report_json": os.environ["REPORT_JSON"],
|
|
"run_log": os.environ["RUN_LOG"],
|
|
"validation_time_arg": os.environ["VALIDATION_TIME"],
|
|
"durations_secs": {
|
|
"rpki_run": int(os.environ["RUN_DURATION_S"]),
|
|
},
|
|
"counts": {
|
|
"publication_points_processed": rep["tree"]["instances_processed"],
|
|
"publication_points_failed": rep["tree"]["instances_failed"],
|
|
"vrps": len(rep["vrps"]),
|
|
"aspas": len(rep["aspas"]),
|
|
"audit_publication_points": len(rep["publication_points"]),
|
|
},
|
|
}
|
|
meta_path.write_text(json.dumps(meta, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
|
|
summary = []
|
|
summary.append("# Payload Replay Summary\n\n")
|
|
summary.append(f"- recorded_at_utc: `{now}`\n")
|
|
summary.append(f"- tal_path: `{meta['tal_path']}`\n")
|
|
summary.append(f"- ta_path: `{meta['ta_path']}`\n")
|
|
summary.append(f"- payload_replay_archive: `{meta['payload_replay_archive']}`\n")
|
|
summary.append(f"- payload_replay_locks: `{meta['payload_replay_locks']}`\n")
|
|
summary.append(f"- db: `{meta['db_dir']}`\n")
|
|
summary.append(f"- report_json: `{meta['report_json']}`\n")
|
|
summary.append(f"- validation_time_arg: `{meta['validation_time_arg']}`\n\n")
|
|
summary.append("## Results\n\n")
|
|
summary.append("| metric | value |\n")
|
|
summary.append("|---|---:|\n")
|
|
for k, v in meta["counts"].items():
|
|
summary.append(f"| {k} | {v} |\n")
|
|
summary.append("\n## Durations\n\n")
|
|
summary.append("| step | seconds |\n")
|
|
summary.append("|---|---:|\n")
|
|
for k, v in meta["durations_secs"].items():
|
|
summary.append(f"| {k} | {v} |\n")
|
|
summary_path.write_text("".join(summary), encoding="utf-8")
|
|
print(summary_path)
|
|
PY
|
|
|
|
python3 scripts/payload_replay/report_to_routinator_csv.py \
|
|
--report "$REPORT_JSON" \
|
|
--out "$VRPS_CSV" \
|
|
--trust-anchor "$TRUST_ANCHOR" >/dev/null
|
|
|
|
if [[ -f "$ROUTINATOR_RECORD_CSV" ]]; then
|
|
./scripts/payload_replay/compare_with_routinator_record.sh \
|
|
"$VRPS_CSV" \
|
|
"$ROUTINATOR_RECORD_CSV" \
|
|
"$COMPARE_SUMMARY_MD" \
|
|
"$ONLY_IN_OURS_CSV" \
|
|
"$ONLY_IN_RECORD_CSV" >/dev/null
|
|
fi
|
|
|
|
echo "== payload replay run complete ==" >&2
|
|
echo "- db: $DB_DIR" >&2
|
|
echo "- report: $REPORT_JSON" >&2
|
|
echo "- run log: $RUN_LOG" >&2
|
|
echo "- meta json: $META_JSON" >&2
|
|
echo "- summary md: $SUMMARY_MD" >&2
|
|
echo "- vrps csv: $VRPS_CSV" >&2
|
|
if [[ -f "$COMPARE_SUMMARY_MD" ]]; then
|
|
echo "- compare summary: $COMPARE_SUMMARY_MD" >&2
|
|
echo "- only in ours: $ONLY_IN_OURS_CSV" >&2
|
|
echo "- only in record: $ONLY_IN_RECORD_CSV" >&2
|
|
fi
|