196 lines
5.4 KiB
Bash
Executable File
196 lines
5.4 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
set -euo pipefail
|
|
|
|
# M2: Run per-sample decode+profile benchmark (Ours vs Routinator) on selected_der fixtures.
|
|
#
|
|
# Outputs:
|
|
# - specs/develop/20260224/data/m2_manifest_decode_profile_compare.csv
|
|
# - specs/develop/20260224/data/m2_raw.log
|
|
#
|
|
# Note: This script assumes Routinator benchmark repo exists at:
|
|
# /home/yuyr/dev/rust_playground/routinator/benchmark
|
|
#
|
|
# It also assumes fixtures exist under:
|
|
# rpki/tests/benchmark/selected_der/*.mft
|
|
# routinator/benchmark/fixtures/selected_der/*.mft
|
|
|
|
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
|
RPKI_DIR="$ROOT_DIR"
|
|
OURS_BENCH_DIR="$RPKI_DIR/benchmark/ours_manifest_bench"
|
|
|
|
ROUT_BENCH_DIR="${ROUT_BENCH_DIR:-/home/yuyr/dev/rust_playground/routinator/benchmark}"
|
|
ROUT_BIN="$ROUT_BENCH_DIR/target/release/routinator-manifest-benchmark"
|
|
|
|
DATE_TAG="${DATE_TAG:-20260224}"
|
|
OUT_DIR="$RPKI_DIR/../specs/develop/${DATE_TAG}/data"
|
|
OUT_CSV="${OUT_CSV:-$OUT_DIR/m2_manifest_decode_profile_compare.csv}"
|
|
OUT_RAW="${OUT_RAW:-$OUT_DIR/m2_raw.log}"
|
|
|
|
REPEATS="${REPEATS:-3}"
|
|
|
|
# Iterations / warmups (kept moderate for interactive iteration).
|
|
ITER_SMALL="${ITER_SMALL:-20000}"
|
|
ITER_MEDIUM="${ITER_MEDIUM:-20000}"
|
|
ITER_LARGE="${ITER_LARGE:-20000}"
|
|
ITER_XLARGE="${ITER_XLARGE:-2000}"
|
|
|
|
WARM_SMALL="${WARM_SMALL:-2000}"
|
|
WARM_MEDIUM="${WARM_MEDIUM:-2000}"
|
|
WARM_LARGE="${WARM_LARGE:-2000}"
|
|
WARM_XLARGE="${WARM_XLARGE:-200}"
|
|
|
|
SAMPLES=(
|
|
small-01
|
|
small-02
|
|
medium-01
|
|
medium-02
|
|
large-01
|
|
large-02
|
|
xlarge-01
|
|
xlarge-02
|
|
)
|
|
|
|
mkdir -p "$OUT_DIR"
|
|
: > "$OUT_RAW"
|
|
|
|
echo "sample,bucket,manifest_file_count,ours_avg_ns_per_op,ours_ops_per_s,rout_avg_ns_per_op,rout_ops_per_s,ratio_ours_over_rout,iterations,repeats,warmup" > "$OUT_CSV"
|
|
|
|
echo "[1/3] Build ours benchmark (release)..." | tee -a "$OUT_RAW"
|
|
(cd "$OURS_BENCH_DIR" && cargo build --release -q)
|
|
OURS_BIN="$OURS_BENCH_DIR/target/release/ours-manifest-bench"
|
|
|
|
echo "[2/3] Build routinator benchmark (release)..." | tee -a "$OUT_RAW"
|
|
(cd "$ROUT_BENCH_DIR" && cargo build --release -q)
|
|
|
|
taskset_prefix=""
|
|
if command -v taskset >/dev/null 2>&1; then
|
|
if [[ -n "${TASKSET_CPU:-}" ]]; then
|
|
taskset_prefix="taskset -c ${TASKSET_CPU}"
|
|
fi
|
|
fi
|
|
|
|
bucket_for() {
|
|
local s="$1"
|
|
case "$s" in
|
|
small-*) echo "small" ;;
|
|
medium-*) echo "medium" ;;
|
|
large-*) echo "large" ;;
|
|
xlarge-*) echo "xlarge" ;;
|
|
*) echo "unknown" ;;
|
|
esac
|
|
}
|
|
|
|
iters_for() {
|
|
local b="$1"
|
|
case "$b" in
|
|
small) echo "$ITER_SMALL" ;;
|
|
medium) echo "$ITER_MEDIUM" ;;
|
|
large) echo "$ITER_LARGE" ;;
|
|
xlarge) echo "$ITER_XLARGE" ;;
|
|
*) echo "$ITER_MEDIUM" ;;
|
|
esac
|
|
}
|
|
|
|
warm_for() {
|
|
local b="$1"
|
|
case "$b" in
|
|
small) echo "$WARM_SMALL" ;;
|
|
medium) echo "$WARM_MEDIUM" ;;
|
|
large) echo "$WARM_LARGE" ;;
|
|
xlarge) echo "$WARM_XLARGE" ;;
|
|
*) echo "$WARM_MEDIUM" ;;
|
|
esac
|
|
}
|
|
|
|
run_ours() {
|
|
local sample="$1"
|
|
local iters="$2"
|
|
local warm="$3"
|
|
local ours_fixture="$RPKI_DIR/tests/benchmark/selected_der/${sample}.mft"
|
|
if [[ ! -f "$ours_fixture" ]]; then
|
|
echo "ours fixture not found: $ours_fixture" >&2
|
|
exit 1
|
|
fi
|
|
|
|
echo "### ours $sample" >> "$OUT_RAW"
|
|
local out
|
|
out=$($taskset_prefix "$OURS_BIN" --manifest "$ours_fixture" --iterations "$iters" --warmup-iterations "$warm" --repeats "$REPEATS")
|
|
echo "$out" >> "$OUT_RAW"
|
|
|
|
local line
|
|
line=$(echo "$out" | rg "^\\| ${sample} \\|" | tail -n 1)
|
|
if [[ -z "${line:-}" ]]; then
|
|
echo "failed to parse ours output for $sample" >&2
|
|
exit 1
|
|
fi
|
|
# Expected final row: | sample | avg ns/op | ops/s | file count |
|
|
local avg ops cnt
|
|
avg=$(echo "$line" | awk -F'|' '{gsub(/^ +| +$/,"",$3); print $3}')
|
|
ops=$(echo "$line" | awk -F'|' '{gsub(/^ +| +$/,"",$4); print $4}')
|
|
cnt=$(echo "$line" | awk -F'|' '{gsub(/^ +| +$/,"",$5); print $5}')
|
|
echo "$avg,$ops,$cnt"
|
|
}
|
|
|
|
run_rout() {
|
|
local sample="$1"
|
|
local iters="$2"
|
|
local warm="$3"
|
|
local rout_fixture="$ROUT_BENCH_DIR/fixtures/selected_der/${sample}.mft"
|
|
if [[ ! -f "$rout_fixture" ]]; then
|
|
echo "routinator fixture not found: $rout_fixture" >&2
|
|
exit 1
|
|
fi
|
|
|
|
echo "### routinator $sample" >> "$OUT_RAW"
|
|
local out
|
|
out=$(
|
|
$taskset_prefix "$ROUT_BIN" \
|
|
--target decode_only \
|
|
--manifest "$rout_fixture" \
|
|
--issuer "$ROUT_BENCH_DIR/fixtures/ta.cer" \
|
|
--iterations "$iters" \
|
|
--repeats "$REPEATS" \
|
|
--warmup-iterations "$warm" \
|
|
--strict false
|
|
)
|
|
echo "$out" >> "$OUT_RAW"
|
|
|
|
local avg_line cnt_line
|
|
avg_line=$(echo "$out" | rg "^ avg:")
|
|
cnt_line=$(echo "$out" | rg "^ manifest_file_count:")
|
|
|
|
local avg_ns ops_s cnt
|
|
avg_ns=$(echo "$avg_line" | awk '{print $2}')
|
|
ops_s=$(echo "$avg_line" | awk '{gsub(/[()]/,"",$4); print $4}')
|
|
cnt=$(echo "$cnt_line" | awk '{print $2}')
|
|
echo "$avg_ns,$ops_s,$cnt"
|
|
}
|
|
|
|
echo "[3/3] Run per-sample benchmarks..." | tee -a "$OUT_RAW"
|
|
for s in "${SAMPLES[@]}"; do
|
|
b=$(bucket_for "$s")
|
|
it=$(iters_for "$b")
|
|
warm=$(warm_for "$b")
|
|
|
|
IFS=, read -r ours_avg ours_ops ours_cnt < <(run_ours "$s" "$it" "$warm")
|
|
IFS=, read -r rout_avg rout_ops rout_cnt < <(run_rout "$s" "$it" "$warm")
|
|
|
|
if [[ "$ours_cnt" != "$rout_cnt" ]]; then
|
|
echo "WARNING: file count differs for $s (ours=$ours_cnt rout=$rout_cnt)" | tee -a "$OUT_RAW"
|
|
fi
|
|
|
|
ratio=$(python3 - <<PY
|
|
o=float("$ours_avg")
|
|
r=float("$rout_avg")
|
|
print(f"{(o/r):.4f}" if r != 0 else "inf")
|
|
PY
|
|
)
|
|
|
|
echo "$s,$b,$ours_cnt,$ours_avg,$ours_ops,$rout_avg,$rout_ops,$ratio,$it,$REPEATS,$warm" >> "$OUT_CSV"
|
|
echo >> "$OUT_RAW"
|
|
done
|
|
|
|
echo "Done."
|
|
echo "- CSV: $OUT_CSV"
|
|
echo "- Raw: $OUT_RAW"
|