From d96c901df6752532af65b739bab37b79bb7f87c1 Mon Sep 17 00:00:00 2001 From: Srikanth Muppandam Date: Mon, 27 Apr 2026 06:47:32 +0530 Subject: [PATCH 1/4] utils: centralize RT test wrapper helpers Add shared helpers used by RT test wrappers for command execution, JSON parsing, KPI aggregation, heartbeat handling, interrupt-aware cleanup, and PASS/FAIL/SKIP result emission. This avoids duplicating wrapper logic across individual rt-tests and provides a common path for preserving partial results during user interrupts. The helpers are kept POSIX shell compatible so they can run on minimal Yocto/rootfs environments used in CI and LAVA. Signed-off-by: Srikanth Muppandam --- Runner/utils/lib_rt.sh | 1986 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1986 insertions(+) create mode 100755 Runner/utils/lib_rt.sh diff --git a/Runner/utils/lib_rt.sh b/Runner/utils/lib_rt.sh new file mode 100755 index 00000000..556637d5 --- /dev/null +++ b/Runner/utils/lib_rt.sh @@ -0,0 +1,1986 @@ +#!/bin/sh +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# lib_rt.sh - helpers for RT-tests wrappers + +: "${log_info:=:}" +: "${log_warn:=:}" +: "${log_error:=:}" +: "${log_fail:=:}" +: "${log_skip:=:}" +: "${log_pass:=:}" + +# shellcheck disable=SC2034 +PERF_RT_BG_PID="" +# shellcheck disable=SC2034 +PERF_RT_BG_NAME="" +# shellcheck disable=SC2034 +PERF_RT_BG_CMD="" +PERF_RT_BG_KILL_TIMEOUT="${PERF_RT_BG_KILL_TIMEOUT:-3}" +PERF_RT_BG_KILL_SIGNAL="${PERF_RT_BG_KILL_SIGNAL:-KILL}" +PERF_RT_BG_LOGFILE="${PERF_RT_BG_LOGFILE:-}" + +INTERRUPTED=0 +export INTERRUPTED + +RT_STREAM_TESTNAME="rt-tests" +export RT_STREAM_TESTNAME + +RT_INTERRUPTED="${RT_INTERRUPTED:-0}" +export RT_INTERRUPTED + +RT_CUR_TESTNAME="${RT_CUR_TESTNAME:-RTTest}" +RT_CUR_BIN_PID="${RT_CUR_BIN_PID:-}" +RT_CUR_TEE_PID="${RT_CUR_TEE_PID:-}" +RT_CUR_FIFO="${RT_CUR_FIFO:-}" + +# shellcheck disable=SC2034 +RT_HEARTBEAT_PID="" +export RT_HEARTBEAT_PID + +# shellcheck disable=SC2034 +RT_HEARTBEAT_INLINE=0 +export RT_HEARTBEAT_INLINE + +# shellcheck disable=SC2034 +RT_HEARTBEAT_TTY="" +export RT_HEARTBEAT_TTY + +PERF_RT_RETURN_CODE="" +export PERF_RT_RETURN_CODE + +# shellcheck disable=SC2034 +RT_RUN_TARGET_DURATION_SECS="" +export RT_RUN_TARGET_DURATION_SECS + +# shellcheck disable=SC2034 +RT_RUN_RC=1 +# shellcheck disable=SC2034 +RT_RUN_JSON_OK=0 +# shellcheck disable=SC2034 +RT_RUN_STDOUTLOG="" +# shellcheck disable=SC2034 +RT_RUN_JSONFILE="" +export RT_RUN_RC RT_RUN_JSON_OK RT_RUN_STDOUTLOG RT_RUN_JSONFILE + +# shellcheck disable=SC2034 +RT_BASELINE_VALUE="" +# shellcheck disable=SC2034 +RT_BASELINE_FAIL_COUNT="" +# shellcheck disable=SC2034 +RT_BASELINE_FAIL_LIMIT="" +export RT_BASELINE_VALUE RT_BASELINE_FAIL_COUNT RT_BASELINE_FAIL_LIMIT + +# --------------------------------------------------------------------------- +# rt_stream_init +# Initialize generic streaming state for a wrapper. +# --------------------------------------------------------------------------- +rt_stream_init() { + testname=$1 + outdir=$2 + + RT_STREAM_TESTNAME="${testname:-rt-tests}" + export RT_STREAM_TESTNAME + + INTERRUPTED=0 + export INTERRUPTED + + RT_STREAM_OUTDIR="${outdir:-/tmp}" + export RT_STREAM_OUTDIR +} + +# --------------------------------------------------------------------------- +# rt_stream_on_sigint +# Mark a streaming run as interrupted and log a partial-results message. +# --------------------------------------------------------------------------- +rt_stream_on_sigint() { + INTERRUPTED=1 + export INTERRUPTED + log_warn "${RT_STREAM_TESTNAME:-rt-tests}: Ctrl-C received, stopping test and reporting results collected so far..." +} + +# --------------------------------------------------------------------------- +# rt_run_with_progress [args...] +# Run a command in the background and log progress every step_sec seconds. +# --------------------------------------------------------------------------- +rt_run_with_progress() { + name=$1 + step=$2 + outfile=$3 + shift 3 + + [ -n "$name" ] || name="rt-test" + case "$step" in ''|*[!0-9]*|0) step=5 ;; esac + [ -n "$outfile" ] || outfile="/tmp/rt_run_with_progress.out" + + : >"$outfile" 2>/dev/null || true + + "$@" >"$outfile" 2>&1 & + pid=$! + + case "$pid" in + ''|*[!0-9]*) + log_warn "$name: failed to start command (invalid pid='$pid')" + return 1 + ;; + esac + + elapsed=0 + while kill -0 "$pid" >/dev/null 2>&1; do + sleep "$step" + elapsed=$((elapsed + step)) + log_info "$name: running... ${elapsed}s elapsed" + done + + wait "$pid" + return $? +} + +# --------------------------------------------------------------------------- +# rt_handle_int +# Interrupt handler for RT wrappers. +# Marks the run interrupted, clears inline heartbeat, and forwards SIGINT to +# the active child when one is known. +# --------------------------------------------------------------------------- +rt_handle_int() { + RT_INTERRUPTED=1 + export RT_INTERRUPTED + + log_warn "${RT_CUR_TESTNAME:-RTTest}: Ctrl-C received; stopping test and reporting results collected so far..." + + rt_heartbeat_clear >/dev/null 2>&1 || true + + if [ -n "$RT_CUR_BIN_PID" ]; then + kill -INT "$RT_CUR_BIN_PID" 2>/dev/null || true + fi +} + +# --------------------------------------------------------------------------- +# rt_cleanup_pipes +# Best-effort cleanup for FIFO/tee based streaming runs. +# --------------------------------------------------------------------------- +rt_cleanup_pipes() { + if [ -n "$RT_CUR_TEE_PID" ]; then + kill "$RT_CUR_TEE_PID" 2>/dev/null || true + fi + + if [ -n "$RT_CUR_FIFO" ]; then + rm -f "$RT_CUR_FIFO" 2>/dev/null || true + fi + + RT_CUR_BIN_PID="" + RT_CUR_TEE_PID="" + RT_CUR_FIFO="" +} + +# --------------------------------------------------------------------------- +# rt_stream_run_json [args...] +# Stream command output via FIFO into outfile while preserving return code. +# --------------------------------------------------------------------------- +rt_stream_run_json() { + outfile=$1 + shift + + [ -n "$outfile" ] || return 1 + [ "$#" -gt 0 ] || return 1 + + outdir=$(dirname "$outfile") + mkdir -p "$outdir" 2>/dev/null || true + + fifo="${outfile}.fifo.$$" + rm -f "$fifo" 2>/dev/null || true + if ! mkfifo "$fifo"; then + return 1 + fi + + RT_CUR_FIFO="$fifo" + + tee -a "$outfile" <"$fifo" & + RT_CUR_TEE_PID=$! + + "$@" >"$fifo" 2>&1 & + RT_CUR_BIN_PID=$! + + wait "$RT_CUR_BIN_PID" + rc=$? + + if [ -n "${RT_CUR_TEE_PID:-}" ]; then + wait "$RT_CUR_TEE_PID" 2>/dev/null || true + fi + + rm -f "$fifo" 2>/dev/null || true + RT_CUR_FIFO="" + RT_CUR_BIN_PID="" + RT_CUR_TEE_PID="" + + return "$rc" +} + +# --------------------------------------------------------------------------- +# rt_log_kernel_rt_status +# Best-effort RT kernel detection based on uname. +# --------------------------------------------------------------------------- +rt_log_kernel_rt_status() { + rel=$(uname -r 2>/dev/null || echo "") + ver=$(uname -v 2>/dev/null || echo "") + + case "$rel $ver" in + *-rt*|*PREEMPT_RT*) + log_info "Kernel appears to be RT-enabled: uname -r='$rel'" + return 0 + ;; + *) + log_warn "Kernel does NOT look RT-enabled: uname -r='$rel' (results may be worse)" + return 1 + ;; + esac +} + +# --------------------------------------------------------------------------- +# perf_rt_bg_start +# Start an optional background workload. +# --------------------------------------------------------------------------- +perf_rt_bg_start() { + PERF_RT_BG_NAME="${1:-rt-tests}" + PERF_RT_BG_CMD="${2:-}" + + [ -n "$PERF_RT_BG_CMD" ] || return 0 + + perf_rt_bg_stop >/dev/null 2>&1 || true + + if [ -n "${PERF_RT_BG_LOGFILE:-}" ]; then + bg_dir=$(dirname "$PERF_RT_BG_LOGFILE" 2>/dev/null || echo "") + if [ -n "$bg_dir" ]; then + mkdir -p "$bg_dir" 2>/dev/null || true + fi + log_info "$PERF_RT_BG_NAME: starting background cmd (logging -> $PERF_RT_BG_LOGFILE): $PERF_RT_BG_CMD" + sh -c "$PERF_RT_BG_CMD" >>"$PERF_RT_BG_LOGFILE" 2>&1 & + else + log_info "$PERF_RT_BG_NAME: starting background cmd: $PERF_RT_BG_CMD" + sh -c "$PERF_RT_BG_CMD" >/dev/null 2>&1 & + fi + + PERF_RT_BG_PID=$! + + case "$PERF_RT_BG_PID" in + ''|*[!0-9]*) + log_warn "$PERF_RT_BG_NAME: background cmd started but PID invalid: '$PERF_RT_BG_PID'" + PERF_RT_BG_PID="" + return 0 + ;; + esac + + return 0 +} + +# --------------------------------------------------------------------------- +# perf_rt_bg_stop +# Stop an optional background workload. +# --------------------------------------------------------------------------- +perf_rt_bg_stop() { + [ -n "$PERF_RT_BG_PID" ] || return 0 + + case "$PERF_RT_BG_PID" in + ''|*[!0-9]*) + PERF_RT_BG_PID="" + PERF_RT_BG_CMD="" + PERF_RT_BG_NAME="" + return 0 + ;; + esac + + log_info "${PERF_RT_BG_NAME:-rt-tests}: stopping background cmd pid=$PERF_RT_BG_PID" + + kill "$PERF_RT_BG_PID" >/dev/null 2>&1 || true + + t=0 + while [ "$t" -lt "$PERF_RT_BG_KILL_TIMEOUT" ] 2>/dev/null; do + if kill -0 "$PERF_RT_BG_PID" >/dev/null 2>&1; then + sleep 1 + t=$((t + 1)) + continue + fi + break + done + + if kill -0 "$PERF_RT_BG_PID" >/dev/null 2>&1; then + log_warn "${PERF_RT_BG_NAME:-rt-tests}: background cmd still running; sending ${PERF_RT_BG_KILL_SIGNAL}" + kill -"$PERF_RT_BG_KILL_SIGNAL" "$PERF_RT_BG_PID" >/dev/null 2>&1 || true + fi + + wait "$PERF_RT_BG_PID" >/dev/null 2>&1 || true + + PERF_RT_BG_PID="" + PERF_RT_BG_CMD="" + PERF_RT_BG_NAME="" + return 0 +} + +# --------------------------------------------------------------------------- +# rt_check_clock_sanity +# Best-effort clock sanity check. +# --------------------------------------------------------------------------- +rt_check_clock_sanity() { + testname=$1 + [ -n "$testname" ] || testname="RTTest" + + log_info "Ensuring system clock is reasonable before $testname..." + if command -v ensure_reasonable_clock >/dev/null 2>&1; then + if ! ensure_reasonable_clock; then + log_error "Clock is not reasonable; $testname results may be impacted." + return 1 + fi + return 0 + fi + + log_info "ensure_reasonable_clock() not available, continuing without clock sanity check." + return 0 +} + +# --------------------------------------------------------------------------- +# rt_fmt_num +# Normalize numeric values for KPI output. +# - Integers are emitted unchanged. +# - Floating-point values are rounded to 3 decimals. +# - Trailing zeros and trailing decimal point are removed. +# - Non-numeric input is returned as-is. +# --------------------------------------------------------------------------- +rt_fmt_num() { + v=$1 + + [ -n "$v" ] || { + echo "" + return 0 + } + + printf '%s\n' "$v" | awk ' + function trim(x) { + sub(/0+$/, "", x) + sub(/\.$/, "", x) + return x + } + { + if ($0 ~ /^-?[0-9]+$/) { + print $0 + exit + } + if ($0 ~ /^-?[0-9]+(\.[0-9]+)?$/) { + x=sprintf("%.3f", $0) + print trim(x) + exit + } + print $0 + } + ' +} + +# --------------------------------------------------------------------------- +# rt_aggregate_iter_latencies +# Aggregate iteration-prefixed latency KPI lines across all iterations. +# --------------------------------------------------------------------------- +rt_aggregate_iter_latencies() { + prefix=$1 + infile=$2 + + [ -n "$prefix" ] || return 1 + [ -n "$infile" ] || return 1 + [ -r "$infile" ] || return 1 + + awk -v pfx="$prefix" ' + function isnum(x) { return (x ~ /^-?[0-9]+(\.[0-9]+)?$/) } + + BEGIN { + min_min=""; min_max=""; min_sum=0; min_n=0 + avg_min=""; avg_max=""; avg_sum=0; avg_n=0 + max_min=""; max_max=""; max_sum=0; max_n=0 + worst_max=""; worst_tid="" + } + + /^iteration-[0-9]+-t[0-9]+-(min|avg|max)-latency[[:space:]]+pass[[:space:]]+/ { + key=$1 + sub(/^iteration-[0-9]+-/, "", key) + + tid=key + sub(/-.*$/, "", tid) + + metric=key + sub(/^t[0-9]+-/, "", metric) + + val=$3 + if (!isnum(val)) + next + + if (metric == "min-latency") { + if (min_min == "" || (val + 0) < (min_min + 0)) + min_min=val + if (min_max == "" || (val + 0) > (min_max + 0)) + min_max=val + min_sum += (val + 0) + min_n++ + } else if (metric == "avg-latency") { + if (avg_min == "" || (val + 0) < (avg_min + 0)) + avg_min=val + if (avg_max == "" || (val + 0) > (avg_max + 0)) + avg_max=val + avg_sum += (val + 0) + avg_n++ + } else if (metric == "max-latency") { + if (max_min == "" || (val + 0) < (max_min + 0)) + max_min=val + if (max_max == "" || (val + 0) > (max_max + 0)) + max_max=val + max_sum += (val + 0) + max_n++ + + if (worst_max == "" || (val + 0) > (worst_max + 0)) { + worst_max=val + worst_tid=tid + } + } + } + + END { + if (min_n > 0) { + printf "%s-all-min-latency-min pass %s us\n", pfx, min_min + printf "%s-all-min-latency-mean pass %.6f us\n", pfx, (min_sum / min_n) + printf "%s-all-min-latency-max pass %s us\n", pfx, min_max + } + if (avg_n > 0) { + printf "%s-all-avg-latency-min pass %s us\n", pfx, avg_min + printf "%s-all-avg-latency-mean pass %.6f us\n", pfx, (avg_sum / avg_n) + printf "%s-all-avg-latency-max pass %s us\n", pfx, avg_max + } + if (max_n > 0) { + printf "%s-all-max-latency-min pass %s us\n", pfx, max_min + printf "%s-all-max-latency-mean pass %.6f us\n", pfx, (max_sum / max_n) + printf "%s-all-max-latency-max pass %s us\n", pfx, max_max + } + if (worst_tid != "" && worst_max != "") { + printf "%s-worst-thread-max-latency pass %s us\n", pfx, worst_max + sub(/^t/, "", worst_tid) + printf "%s-worst-thread-id pass %s id\n", pfx, worst_tid + } + } + ' "$infile" 2>/dev/null | while IFS= read -r line; do + kpi=$(printf '%s\n' "$line" | awk '{print $1}') + stat=$(printf '%s\n' "$line" | awk '{print $2}') + num=$(printf '%s\n' "$line" | awk '{print $3}') + unit=$(printf '%s\n' "$line" | awk '{print $4}') + + if [ -n "$kpi" ] && [ "$stat" = "pass" ] && [ -n "$num" ] && [ -n "$unit" ]; then + num2=$(rt_fmt_num "$num") + printf '%s pass %s %s\n' "$kpi" "$num2" "$unit" + else + printf '%s\n' "$line" + fi + done +} + +# --------------------------------------------------------------------------- +# rt_aggregate_iter_latencies_per_thread +# Aggregate iteration-prefixed latency KPI lines per thread across iterations. +# --------------------------------------------------------------------------- +rt_aggregate_iter_latencies_per_thread() { + prefix=$1 + infile=$2 + + [ -n "$prefix" ] || return 1 + [ -n "$infile" ] || return 1 + [ -r "$infile" ] || return 1 + + awk -v pfx="$prefix" ' + function isnum(x) { return (x ~ /^-?[0-9]+(\.[0-9]+)?$/) } + + /^iteration-[0-9]+-t[0-9]+-(min|avg|max)-latency[[:space:]]+pass[[:space:]]+/ { + key=$1 + sub(/^iteration-[0-9]+-/, "", key) + + tid=key + sub(/-.*$/, "", tid) + + metric=key + sub(/^t[0-9]+-/, "", metric) + + val=$3 + if (!isnum(val)) + next + + idx=tid "|" metric + + if (!(idx in seen)) { + seen[idx]=1 + minv[idx]=val + maxv[idx]=val + sumv[idx]=(val + 0) + cntv[idx]=1 + order[++nidx]=idx + } else { + if ((val + 0) < (minv[idx] + 0)) + minv[idx]=val + if ((val + 0) > (maxv[idx] + 0)) + maxv[idx]=val + sumv[idx]+=(val + 0) + cntv[idx]++ + } + } + + END { + for (i=1; i<=nidx; i++) { + idx=order[i] + split(idx, parts, /\|/) + tid=parts[1] + metric=parts[2] + + printf "%s-%s-%s-min pass %s us\n", pfx, tid, metric, minv[idx] + printf "%s-%s-%s-mean pass %.6f us\n", pfx, tid, metric, (sumv[idx] / cntv[idx]) + printf "%s-%s-%s-max pass %s us\n", pfx, tid, metric, maxv[idx] + } + } + ' "$infile" 2>/dev/null | while IFS= read -r line; do + kpi=$(printf '%s\n' "$line" | awk '{print $1}') + stat=$(printf '%s\n' "$line" | awk '{print $2}') + num=$(printf '%s\n' "$line" | awk '{print $3}') + unit=$(printf '%s\n' "$line" | awk '{print $4}') + + if [ -n "$kpi" ] && [ "$stat" = "pass" ] && [ -n "$num" ] && [ -n "$unit" ]; then + num2=$(rt_fmt_num "$num") + printf '%s pass %s %s\n' "$kpi" "$num2" "$unit" + else + printf '%s\n' "$line" + fi + done +} + +# --------------------------------------------------------------------------- +# rt_require_duration_seconds +# Convert a wrapper duration string to integer seconds. +# Logs FAIL and returns 1 for invalid values. +# --------------------------------------------------------------------------- +rt_require_duration_seconds() { + testname=$1 + duration_str=$2 + + duration_secs=$(rt_duration_to_seconds "$duration_str" 2>/dev/null) + + case "$duration_secs" in + ''|*[!0-9]*|0) + log_fail "$testname: invalid duration '$duration_str'" + return 1 + ;; + esac + + printf '%s\n' "$duration_secs" + return 0 +} + +# --------------------------------------------------------------------------- +# perf_parse_rt_tests_json +# Parse rt-tests style JSON output and emit KPI lines. +# --------------------------------------------------------------------------- +perf_parse_rt_tests_json() { + testname=$1 + jsonfile=$2 + + [ -n "$testname" ] || return 1 + [ -n "$jsonfile" ] || return 1 + [ -r "$jsonfile" ] || return 1 + + rt_json_get_top_num() { + key=$1 + awk -v k="$key" ' + { + s=$0 + while (match(s, "\"" k "\"[[:space:]]*:[[:space:]]*\"?-?[0-9]+(\\.[0-9]+)?\"?")) { + m=substr(s, RSTART, RLENGTH) + sub(/.*:[[:space:]]*\"?/, "", m) + sub(/\"?$/, "", m) + print m + exit + } + } + ' "$jsonfile" 2>/dev/null | head -n 1 + } + + printed=0 + + thread_lines=$( + awk ' + function brace_delta(s, t,o,c) { + t=s; o=gsub(/\{/, "", t) + t=s; c=gsub(/\}/, "", t) + return (o - c) + } + + function extract_tid(line, t) { + t=line + sub(/^[[:space:]]*"/, "", t) + sub(/".*$/, "", t) + return t + } + + function extract_num(line, m) { + if (match(line, /:[[:space:]]*-?[0-9]+(\.[0-9]+)?/)) { + m=substr(line, RSTART, RLENGTH) + sub(/^:[[:space:]]*/, "", m) + gsub(/[[:space:]]/, "", m) + return m + } + return "" + } + + BEGIN { + in_thread=0 + thread_depth=0 + in_tid=0 + tid="" + tid_depth=0 + in_recv=0 + recv_depth=0 + min="" + avg="" + max="" + } + + { + line=$0 + gsub(/\r/, "", line) + + if (!in_thread) { + if (match(line, /"thread"[[:space:]]*:[[:space:]]*\{/)) { + in_thread=1 + s=substr(line, RSTART) + b=index(s, "{") + if (b > 0) { + chunk=substr(s, b) + thread_depth = brace_delta(chunk) + } else { + thread_depth = 1 + } + } + next + } + + thread_depth += brace_delta(line) + + if (!in_tid && match(line, /^[[:space:]]*"[0-9][0-9]*"[[:space:]]*:[[:space:]]*\{/)) { + tid = extract_tid(line) + min=""; avg=""; max="" + in_tid=1 + + s3=line + b3=index(s3, "{") + if (b3 > 0) { + chunk3=substr(s3, b3) + tid_depth = brace_delta(chunk3) + } else { + tid_depth = 1 + } + + in_recv=0 + recv_depth=0 + next + } + + if (in_tid) { + tid_depth += brace_delta(line) + + if (!in_recv && match(line, /"receiver"[[:space:]]*:[[:space:]]*\{/)) { + in_recv=1 + s2=substr(line, RSTART) + b2=index(s2, "{") + if (b2 > 0) { + chunk2=substr(s2, b2) + recv_depth = brace_delta(chunk2) + } else { + recv_depth = 1 + } + next + } + + if (in_recv) { + if (min=="" && line ~ /"min"[[:space:]]*:/) { n=extract_num(line); if (n!="") min=n } + else if (avg=="" && line ~ /"avg"[[:space:]]*:/) { n=extract_num(line); if (n!="") avg=n } + else if (max=="" && line ~ /"max"[[:space:]]*:/) { n=extract_num(line); if (n!="") max=n } + + recv_depth += brace_delta(line) + if (recv_depth <= 0) { + in_recv=0 + recv_depth=0 + } + } else { + if (min=="" && line ~ /^[[:space:]]*"min"[[:space:]]*:/) { n=extract_num(line); if (n!="") min=n } + else if (avg=="" && line ~ /^[[:space:]]*"avg"[[:space:]]*:/) { n=extract_num(line); if (n!="") avg=n } + else if (max=="" && line ~ /^[[:space:]]*"max"[[:space:]]*:/) { n=extract_num(line); if (n!="") max=n } + } + + if (tid_depth <= 0) { + if (tid != "") { + if (min != "") printf "t%s-min-latency pass %s us\n", tid, min + if (avg != "") printf "t%s-avg-latency pass %s us\n", tid, avg + if (max != "") printf "t%s-max-latency pass %s us\n", tid, max + } + in_tid=0 + tid="" + tid_depth=0 + in_recv=0 + recv_depth=0 + min=""; avg=""; max="" + } + } + + if (thread_depth <= 0) { + exit + } + } + ' "$jsonfile" 2>/dev/null + ) + + if [ -n "$thread_lines" ]; then + printf '%s\n' "$thread_lines" + printed=1 + fi + + if [ "$printed" -ne 1 ] 2>/dev/null; then + one=$( + tr '\n' ' ' <"$jsonfile" 2>/dev/null | awk ' + function getnum(s, key, m) { + if (match(s, "\"" key "\"[[:space:]]*:[[:space:]]*\"?-?[0-9]+(\\.[0-9]+)?\"?")) { + m=substr(s, RSTART, RLENGTH) + sub(/.*:[[:space:]]*\"?/, "", m) + sub(/\"?$/, "", m) + return m + } + return "" + } + { + s=$0 + min=getnum(s, "min") + avg=getnum(s, "avg") + max=getnum(s, "max") + if (min!="" || avg!="" || max!="") { + print "0|" min "|" avg "|" max + exit + } + } + ' 2>/dev/null + ) + + if [ -n "$one" ]; then + tid=$(printf '%s' "$one" | awk -F'|' '{print $1}') + min=$(printf '%s' "$one" | awk -F'|' '{print $2}') + avg=$(printf '%s' "$one" | awk -F'|' '{print $3}') + max=$(printf '%s' "$one" | awk -F'|' '{print $4}') + [ -n "$min" ] && echo "t${tid}-min-latency pass ${min} us" + [ -n "$avg" ] && echo "t${tid}-avg-latency pass ${avg} us" + [ -n "$max" ] && echo "t${tid}-max-latency pass ${max} us" + fi + fi + + inv=$(rt_json_get_top_num inversion) + [ -n "$inv" ] && echo "inversion pass ${inv} count" + + rc=$(rt_json_get_top_num return_code) + [ -n "$rc" ] || rc=$(rt_json_get_top_num return) + + PERF_RT_RETURN_CODE="${rc:-}" + export PERF_RT_RETURN_CODE + + case "$rc" in + 0|0.0) + echo "${testname}-ok pass 1 ok" + echo "${testname}-rc pass 0 rc" + echo "$testname pass" + ;; + *) + echo "${testname}-ok pass 0 ok" + if [ -n "$rc" ]; then + echo "${testname}-rc pass ${rc} rc" + else + echo "${testname}-rc pass -1 rc" + fi + echo "$testname fail" + ;; + esac + + return 0 +} + +# --------------------------------------------------------------------------- +# rt_require_common_tools [tool...] +# Check that all requested tools are available. +# --------------------------------------------------------------------------- +rt_require_common_tools() { + if command -v check_dependencies >/dev/null 2>&1; then + if ! CHECK_DEPS_NO_EXIT=1 check_dependencies "$@"; then + return 1 + fi + return 0 + fi + + for rt_tool in "$@"; do + if ! command -v "$rt_tool" >/dev/null 2>&1; then + return 1 + fi + done + + return 0 +} + +# --------------------------------------------------------------------------- +# rt_require_json_helpers +# Ensure the JSON parser and latency aggregate helpers exist. +# --------------------------------------------------------------------------- +rt_require_json_helpers() { + if ! command -v perf_parse_rt_tests_json >/dev/null 2>&1; then + log_skip "RT helper missing: perf_parse_rt_tests_json" + return 1 + fi + + if ! command -v rt_aggregate_iter_latencies >/dev/null 2>&1; then + log_skip "RT helper missing: rt_aggregate_iter_latencies" + return 1 + fi + + if ! command -v rt_aggregate_iter_latencies_per_thread >/dev/null 2>&1; then + log_skip "RT helper missing: rt_aggregate_iter_latencies_per_thread" + return 1 + fi + + return 0 +} + +# --------------------------------------------------------------------------- +# rt_require_stream_helpers +# Ensure streaming helper exists. +# --------------------------------------------------------------------------- +rt_require_stream_helpers() { + if ! command -v rt_stream_run_json >/dev/null 2>&1; then + log_skip "RT helper missing: rt_stream_run_json" + return 1 + fi + return 0 +} + +# --------------------------------------------------------------------------- +# rt_normalize_common_params +# Normalize common wrapper environment variables to safe defaults. +# --------------------------------------------------------------------------- +rt_normalize_common_params() { + case "${INTERVAL:-}" in ''|*[!0-9]*) INTERVAL=1000 ;; esac + case "${STEP:-}" in ''|*[!0-9]*) STEP=500 ;; esac + case "${THREADS:-}" in ''|*[!0-9]*) THREADS=1 ;; esac + case "${ITERATIONS:-}" in ''|*[!0-9]*|0) ITERATIONS=1 ;; esac + case "${PROGRESS_EVERY:-}" in ''|*[!0-9]*|0) PROGRESS_EVERY=1 ;; esac + case "${HEARTBEAT_SEC:-}" in ''|*[!0-9]*|0) HEARTBEAT_SEC=10 ;; esac + case "${USER_BASELINE:-}" in '' ) ;; *[!0-9.]* ) USER_BASELINE="" ;; esac + + if [ "$THREADS" -eq 0 ] 2>/dev/null; then + if command -v nproc >/dev/null 2>&1; then + THREADS=$(nproc 2>/dev/null || echo 0) + else + THREADS=0 + fi + case "$THREADS" in ''|*[!0-9]*|0) THREADS=1 ;; esac + fi + + export INTERVAL STEP THREADS ITERATIONS PROGRESS_EVERY HEARTBEAT_SEC USER_BASELINE + return 0 +} + +# --------------------------------------------------------------------------- +# rt_resolve_binary +# Resolve a binary path either from an explicit path or PATH lookup. +# --------------------------------------------------------------------------- +rt_resolve_binary() { + bin_name=$1 + explicit_bin=$2 + + if [ -n "$explicit_bin" ]; then + if [ -x "$explicit_bin" ]; then + printf '%s\n' "$explicit_bin" + return 0 + fi + return 1 + fi + + if command -v "$bin_name" >/dev/null 2>&1; then + resolved_bin=$(command -v "$bin_name" 2>/dev/null || echo "") + if [ -n "$resolved_bin" ] && [ -x "$resolved_bin" ]; then + printf '%s\n' "$resolved_bin" + return 0 + fi + fi + + return 1 +} + +# --------------------------------------------------------------------------- +# rt_prepare_output_layout [extra files...] +# Create output directories and truncate result/log files. +# --------------------------------------------------------------------------- +rt_prepare_output_layout() { + rt_out_dir=$1 + rt_result_txt=$2 + shift 2 + + if [ -n "$rt_out_dir" ]; then + mkdir -p "$rt_out_dir" 2>/dev/null || true + fi + + if [ -n "$rt_result_txt" ]; then + mkdir -p "$(dirname "$rt_result_txt")" 2>/dev/null || true + : >"$rt_result_txt" 2>/dev/null || true + fi + + while [ "$#" -gt 0 ]; do + rt_file=$1 + shift + if [ -n "$rt_file" ]; then + mkdir -p "$(dirname "$rt_file")" 2>/dev/null || true + : >"$rt_file" 2>/dev/null || true + fi + done + + return 0 +} + +# --------------------------------------------------------------------------- +# rt_log_common_runtime_env +# Emit common environment and runtime metadata for debugging. +# --------------------------------------------------------------------------- +rt_log_common_runtime_env() { + testname=$1 + rt_bin=$2 + + [ -n "$testname" ] || testname="RTTest" + + if command -v rt_log_kernel_rt_status >/dev/null 2>&1; then + rt_log_kernel_rt_status || true + fi + + log_info "$testname: uname -a: $(uname -a 2>/dev/null || echo n/a)" + log_info "$testname: sched_rt_runtime_us=$(cat /proc/sys/kernel/sched_rt_runtime_us 2>/dev/null || echo n/a)" + log_info "$testname: sched_rt_period_us=$(cat /proc/sys/kernel/sched_rt_period_us 2>/dev/null || echo n/a)" + + rt_memlock_line=$(awk ' + $1=="Max" && $2=="locked" && $3=="memory" { + soft=$4; hard=$5; unit=$6 + if (soft=="" || hard=="") print "n/a" + else printf("%s/%s %s\n", soft, hard, unit) + exit + } + ' /proc/self/limits 2>/dev/null) + + [ -n "$rt_memlock_line" ] || rt_memlock_line="n/a" + log_info "$testname: memlock(soft/hard)=$rt_memlock_line" + + if command -v nproc >/dev/null 2>&1; then + log_info "$testname: nproc=$(nproc 2>/dev/null || echo n/a)" + else + log_info "$testname: nproc=n/a" + fi + + log_info "$testname: cpu_online=$(cat /sys/devices/system/cpu/online 2>/dev/null || echo n/a)" + rt_gov0=$(cat /sys/devices/system/cpu/cpufreq/policy0/scaling_governor 2>/dev/null || echo n/a) + log_info "$testname: governor0=$rt_gov0" + + if [ -n "$rt_bin" ]; then + log_info "$testname: BIN=$rt_bin" + fi + return 0 +} + +# --------------------------------------------------------------------------- +# rt_start_heartbeat +# Start background heartbeat progress reporting. +# Prefer writing inline progress to /dev/tty when available. +# Otherwise stay silent and rely on the final completion line. +# --------------------------------------------------------------------------- +rt_start_heartbeat() { + testname=$1 + heartbeat_sec=$2 + + [ -n "$testname" ] || testname="RTTest" + case "$heartbeat_sec" in ''|*[!0-9]*|0) heartbeat_sec=10 ;; esac + + rt_stop_heartbeat >/dev/null 2>&1 || true + + RT_HEARTBEAT_INLINE=0 + RT_HEARTBEAT_TTY="" + if [ -c /dev/tty ] && [ -w /dev/tty ]; then + RT_HEARTBEAT_INLINE=1 + RT_HEARTBEAT_TTY="/dev/tty" + fi + export RT_HEARTBEAT_INLINE RT_HEARTBEAT_TTY + + ( + elapsed=0 + while :; do + sleep "$heartbeat_sec" + elapsed=$((elapsed + heartbeat_sec)) + + if [ "${RT_HEARTBEAT_INLINE:-0}" -eq 1 ] 2>/dev/null && [ -n "${RT_HEARTBEAT_TTY:-}" ]; then + printf '\r[INFO] %s: elapsed %ss' "$testname" "$elapsed" >"$RT_HEARTBEAT_TTY" + fi + done + ) & + RT_HEARTBEAT_PID=$! + export RT_HEARTBEAT_PID + return 0 +} + +# --------------------------------------------------------------------------- +# rt_stop_heartbeat +# Stop heartbeat progress reporting. +# --------------------------------------------------------------------------- +rt_stop_heartbeat() { + if [ -n "${RT_HEARTBEAT_PID:-}" ]; then + kill "$RT_HEARTBEAT_PID" 2>/dev/null || true + wait "$RT_HEARTBEAT_PID" 2>/dev/null || true + fi + + if [ "${RT_HEARTBEAT_INLINE:-0}" -eq 1 ] 2>/dev/null && [ -n "${RT_HEARTBEAT_TTY:-}" ]; then + printf '\r\033[K' >"$RT_HEARTBEAT_TTY" + fi + + RT_HEARTBEAT_PID="" + RT_HEARTBEAT_INLINE=0 + RT_HEARTBEAT_TTY="" + export RT_HEARTBEAT_PID RT_HEARTBEAT_INLINE RT_HEARTBEAT_TTY + return 0 +} + +# --------------------------------------------------------------------------- +# rt_heartbeat_render +# Render a single-line elapsed timer to /dev/tty when available. +# --------------------------------------------------------------------------- +rt_heartbeat_render() { + testname=$1 + elapsed=$2 + + if [ -c /dev/tty ] && [ -w /dev/tty ]; then + printf '\r[INFO] %s: elapsed %ss' "$testname" "$elapsed" > /dev/tty + fi +} + +# --------------------------------------------------------------------------- +# rt_heartbeat_clear +# Clear the single-line elapsed timer from /dev/tty when available. +# --------------------------------------------------------------------------- +rt_heartbeat_clear() { + if [ -c /dev/tty ] && [ -w /dev/tty ]; then + printf '\r\033[K' > /dev/tty + fi +} + +# --------------------------------------------------------------------------- +# rt_run_and_capture [args...] +# Run a command, capture stdout/stderr to stdoutlog, and preserve exit status. +# --------------------------------------------------------------------------- +rt_run_and_capture() { + testname=$1 + heartbeat_sec=$2 + stdoutlog=$3 + shift 3 + + RT_RUN_RC=1 + RT_RUN_STDOUTLOG="$stdoutlog" + export RT_RUN_RC RT_RUN_STDOUTLOG + + [ -n "$testname" ] || testname="RTTest" + case "$heartbeat_sec" in + ''|*[!0-9]*|0) + heartbeat_sec=10 + ;; + esac + [ -n "$stdoutlog" ] || return 1 + [ "$#" -gt 0 ] || return 1 + + mkdir -p "$(dirname "$stdoutlog")" 2>/dev/null || true + : >"$stdoutlog" 2>/dev/null || true + + RT_CUR_TESTNAME="$testname" + export RT_CUR_TESTNAME + + start_ts=$(date +%s 2>/dev/null || echo 0) + + "$@" >"$stdoutlog" 2>&1 & + run_pid=$! + RT_CUR_BIN_PID="$run_pid" + export RT_CUR_BIN_PID + + case "$run_pid" in + ''|*[!0-9]*) + log_fail "$testname: failed to start test binary" + RT_RUN_RC=1 + RT_CUR_BIN_PID="" + export RT_RUN_RC RT_CUR_BIN_PID + return 1 + ;; + esac + + while kill -0 "$run_pid" 2>/dev/null; do + sleep "$heartbeat_sec" + if kill -0 "$run_pid" 2>/dev/null; then + now_ts=$(date +%s 2>/dev/null || echo 0) + if [ "$start_ts" -gt 0 ] 2>/dev/null && [ "$now_ts" -ge "$start_ts" ] 2>/dev/null; then + elapsed=$((now_ts - start_ts)) + rt_heartbeat_render "$testname" "$elapsed" + fi + fi + done + + wait "$run_pid" + RT_RUN_RC=$? + export RT_RUN_RC + + end_ts=$(date +%s 2>/dev/null || echo 0) + rt_heartbeat_clear + + RT_CUR_BIN_PID="" + export RT_CUR_BIN_PID + + if [ -n "${RT_RUN_TARGET_DURATION_SECS:-}" ]; then + log_info "$testname: completed requested duration ${RT_RUN_TARGET_DURATION_SECS}s" + + if [ "${VERBOSE:-0}" -eq 1 ] 2>/dev/null && \ + [ "$start_ts" -gt 0 ] 2>/dev/null && \ + [ "$end_ts" -ge "$start_ts" ] 2>/dev/null; then + elapsed=$((end_ts - start_ts)) + log_info "$testname: actual elapsed ${elapsed}s" + fi + else + if [ "$start_ts" -gt 0 ] 2>/dev/null && [ "$end_ts" -ge "$start_ts" ] 2>/dev/null; then + elapsed=$((end_ts - start_ts)) + log_info "$testname: completed after ${elapsed}s" + fi + fi + + if [ "$RT_RUN_RC" -ne 0 ] 2>/dev/null; then + return 1 + fi + return 0 +} + +# --------------------------------------------------------------------------- +# rt_run_json_iteration [args...] +# Runs a single RT test iteration directly, emits heartbeat progress while the +# binary is alive, and exports: +# RT_RUN_RC +# RT_RUN_JSON_OK +# RT_RUN_STDOUTLOG +# RT_RUN_JSONFILE +# Supports Ctrl-C via rt_handle_int(). +# --------------------------------------------------------------------------- +rt_run_json_iteration() { + testname=$1 + heartbeat_sec=$2 + stdoutlog=$3 + jsonfile=$4 + shift 4 + + RT_RUN_RC=1 + RT_RUN_JSON_OK=0 + RT_RUN_STDOUTLOG="$stdoutlog" + RT_RUN_JSONFILE="$jsonfile" + export RT_RUN_RC RT_RUN_JSON_OK RT_RUN_STDOUTLOG RT_RUN_JSONFILE + + [ -n "$testname" ] || testname="RTTest" + + case "$heartbeat_sec" in + ''|*[!0-9]*|0) + heartbeat_sec=10 + ;; + esac + + [ -n "$stdoutlog" ] || return 1 + [ -n "$jsonfile" ] || return 1 + [ "$#" -gt 0 ] || return 1 + + mkdir -p "$(dirname "$stdoutlog")" 2>/dev/null || true + mkdir -p "$(dirname "$jsonfile")" 2>/dev/null || true + : >"$stdoutlog" 2>/dev/null || true + + RT_CUR_TESTNAME="$testname" + export RT_CUR_TESTNAME + + start_ts=$(date +%s 2>/dev/null || echo 0) + + "$@" >"$stdoutlog" 2>&1 & + run_pid=$! + RT_CUR_BIN_PID="$run_pid" + export RT_CUR_BIN_PID + + case "$run_pid" in + ''|*[!0-9]*) + log_fail "$testname: failed to start test binary" + RT_RUN_RC=1 + RT_CUR_BIN_PID="" + export RT_RUN_RC RT_CUR_BIN_PID + return 1 + ;; + esac + + while kill -0 "$run_pid" 2>/dev/null; do + sleep "$heartbeat_sec" + if kill -0 "$run_pid" 2>/dev/null; then + now_ts=$(date +%s 2>/dev/null || echo 0) + if [ "$start_ts" -gt 0 ] 2>/dev/null && [ "$now_ts" -ge "$start_ts" ] 2>/dev/null; then + elapsed=$((now_ts - start_ts)) + rt_heartbeat_render "$testname" "$elapsed" + fi + fi + done + + wait "$run_pid" + RT_RUN_RC=$? + export RT_RUN_RC + + end_ts=$(date +%s 2>/dev/null || echo 0) + rt_heartbeat_clear + + RT_CUR_BIN_PID="" + export RT_CUR_BIN_PID + + if [ -n "${RT_RUN_TARGET_DURATION_SECS:-}" ]; then + log_info "$testname: completed requested duration ${RT_RUN_TARGET_DURATION_SECS}s" + + if [ "${VERBOSE:-0}" -eq 1 ] 2>/dev/null && \ + [ "$start_ts" -gt 0 ] 2>/dev/null && \ + [ "$end_ts" -ge "$start_ts" ] 2>/dev/null; then + elapsed=$((end_ts - start_ts)) + log_info "$testname: actual elapsed ${elapsed}s" + fi + else + if [ "$start_ts" -gt 0 ] 2>/dev/null && [ "$end_ts" -ge "$start_ts" ] 2>/dev/null; then + elapsed=$((end_ts - start_ts)) + log_info "$testname: completed after ${elapsed}s" + fi + fi + + if [ -r "$jsonfile" ]; then + RT_RUN_JSON_OK=1 + else + RT_RUN_JSON_OK=0 + fi + export RT_RUN_JSON_OK + + if [ "$RT_RUN_RC" -ne 0 ] 2>/dev/null; then + return 1 + fi + + if [ "$RT_RUN_JSON_OK" -ne 1 ] 2>/dev/null; then + return 1 + fi + + return 0 +} + +# --------------------------------------------------------------------------- +# rt_run_streaming_iteration [args...] +# Run a JSON-producing test iteration with live streamed output. +# --------------------------------------------------------------------------- +rt_run_streaming_iteration() { + testname=$1 + heartbeat_sec=$2 + stdoutlog=$3 + jsonfile=$4 + shift 4 + + RT_RUN_RC=1 + RT_RUN_JSON_OK=0 + RT_RUN_STDOUTLOG="$stdoutlog" + RT_RUN_JSONFILE="$jsonfile" + export RT_RUN_RC RT_RUN_JSON_OK RT_RUN_STDOUTLOG RT_RUN_JSONFILE + + [ -n "$testname" ] || testname="RTTest" + case "$heartbeat_sec" in ''|*[!0-9]*|0) heartbeat_sec=10 ;; esac + [ -n "$stdoutlog" ] || return 1 + [ -n "$jsonfile" ] || return 1 + [ "$#" -gt 0 ] || return 1 + + mkdir -p "$(dirname "$stdoutlog")" 2>/dev/null || true + mkdir -p "$(dirname "$jsonfile")" 2>/dev/null || true + : >"$stdoutlog" 2>/dev/null || true + + RT_CUR_TESTNAME="$testname" + export RT_CUR_TESTNAME + + rt_stream_run_json "$stdoutlog" "$@" & + run_pid=$! + + case "$run_pid" in + ''|*[!0-9]*) + log_fail "$testname: failed to start streaming test helper" + return 1 + ;; + esac + + start_ts=$(date +%s 2>/dev/null || echo 0) + + rt_start_heartbeat "$testname" "$heartbeat_sec" + while kill -0 "$run_pid" 2>/dev/null; do + sleep 1 + done + + wait "$run_pid" + RT_RUN_RC=$? + export RT_RUN_RC + end_ts=$(date +%s 2>/dev/null || echo 0) + rt_stop_heartbeat + + if [ "$start_ts" -gt 0 ] 2>/dev/null && [ "$end_ts" -ge "$start_ts" ] 2>/dev/null; then + elapsed=$((end_ts - start_ts)) + log_info "$testname: completed after ${elapsed}s" + fi + + if [ -r "$jsonfile" ]; then + RT_RUN_JSON_OK=1 + else + RT_RUN_JSON_OK=0 + fi + export RT_RUN_JSON_OK + + if [ "$RT_RUN_RC" -ne 0 ] 2>/dev/null; then + return 1 + fi + if [ "$RT_RUN_JSON_OK" -ne 1 ] 2>/dev/null; then + return 1 + fi + return 0 +} + +rt_run_json_iteration_streaming() { + rt_run_streaming_iteration "$@" +} + +# --------------------------------------------------------------------------- +# rt_log_iteration_progress [label] +# Emit controlled iteration progress logs. +# --------------------------------------------------------------------------- +rt_log_iteration_progress() { + testname=$1 + iter_num=$2 + total_iters=$3 + progress_every=$4 + label=${5:-starting} + + [ -n "$testname" ] || testname="RTTest" + case "$iter_num" in ''|*[!0-9]*) return 0 ;; esac + case "$total_iters" in ''|*[!0-9]*|0) total_iters=1 ;; esac + case "$progress_every" in ''|*[!0-9]*|0) progress_every=1 ;; esac + + if [ "$iter_num" -eq 1 ] 2>/dev/null || [ "$iter_num" -eq "$total_iters" ] 2>/dev/null; then + log_info "$testname: iteration $iter_num/$total_iters $label" + return 0 + fi + + rem=$((iter_num % progress_every)) + if [ "$rem" -eq 0 ] 2>/dev/null; then + log_info "$testname: iteration $iter_num/$total_iters $label" + fi + return 0 +} + +# --------------------------------------------------------------------------- +# rt_append_iteration_kpi +# Prefix parsed KPI lines with iteration-N- and append them to iter_kpi/result. +# --------------------------------------------------------------------------- +rt_append_iteration_kpi() { + iter_num=$1 + tmp_one=$2 + iter_kpi=$3 + result_txt=$4 + + [ -n "$iter_num" ] || return 1 + [ -r "$tmp_one" ] || return 1 + [ -n "$iter_kpi" ] || return 1 + [ -n "$result_txt" ] || return 1 + + awk -v p="iteration-${iter_num}-" 'NF { print p $0 }' "$tmp_one" >>"$iter_kpi" 2>/dev/null || return 1 + awk -v p="iteration-${iter_num}-" 'NF { print p $0 }' "$tmp_one" >>"$result_txt" 2>/dev/null || return 1 + return 0 +} + +# --------------------------------------------------------------------------- +# rt_parse_and_append_iteration_kpi +# Parse a JSON file using perf_parse_rt_tests_json() and store iteration-tagged KPI lines. +# --------------------------------------------------------------------------- +rt_parse_and_append_iteration_kpi() { + prefix=$1 + jsonfile=$2 + tmp_one=$3 + iter_kpi=$4 + result_txt=$5 + iter_num=$6 + + [ -n "$prefix" ] || return 1 + [ -r "$jsonfile" ] || return 1 + [ -n "$tmp_one" ] || return 1 + [ -n "$iter_kpi" ] || return 1 + [ -n "$result_txt" ] || return 1 + [ -n "$iter_num" ] || return 1 + + : >"$tmp_one" 2>/dev/null || true + if ! perf_parse_rt_tests_json "$prefix" "$jsonfile" >"$tmp_one" 2>/dev/null; then + return 1 + fi + + rt_append_iteration_kpi "$iter_num" "$tmp_one" "$iter_kpi" "$result_txt" +} + +# --------------------------------------------------------------------------- +# rt_kpi_file_has_fail +# Return success if the KPI file contains a fail line for the prefix. +# --------------------------------------------------------------------------- +rt_kpi_file_has_fail() { + prefix=$1 + kpi_file=$2 + [ -n "$prefix" ] || return 1 + [ -r "$kpi_file" ] || return 1 + grep -Eq "^iteration-[0-9]+-${prefix}[[:space:]]+fail$|^${prefix}[[:space:]]+fail$" "$kpi_file" 2>/dev/null +} + +# --------------------------------------------------------------------------- +# rt_emit_kpi_block <file> +# Print a KPI block through rt_print_kpi_block() if available, otherwise via log_info. +# --------------------------------------------------------------------------- +rt_emit_kpi_block() { + testname=$1 + title=$2 + file=$3 + + [ -n "$file" ] || return 0 + [ -s "$file" ] || return 0 + + if command -v rt_print_kpi_block >/dev/null 2>&1; then + rt_print_kpi_block "$testname" "$title" "$file" + return 0 + fi + + log_info "$testname: ---------------- ${title} ----------------" + while IFS= read -r line; do + if [ -n "$line" ]; then + log_info "$testname: $line" + fi + done <"$file" + log_info "$testname: ------------------------------------------------------" + return 0 +} + +# --------------------------------------------------------------------------- +# rt_emit_aggregate_kpi <testname> <prefix> <iter_kpi> <agg_kpi> <result_txt> +# Aggregate per-iteration latencies across all threads and iterations. +# --------------------------------------------------------------------------- +rt_emit_aggregate_kpi() { + testname=$1 + prefix=$2 + iter_kpi=$3 + agg_kpi=$4 + result_txt=$5 + + : >"$agg_kpi" 2>/dev/null || true + + if rt_aggregate_iter_latencies "$prefix" "$iter_kpi" >"$agg_kpi" 2>/dev/null; then + if [ -s "$agg_kpi" ]; then + cat "$agg_kpi" >>"$result_txt" 2>/dev/null || true + rt_emit_kpi_block "$testname" "aggregate results" "$agg_kpi" + fi + return 0 + fi + + log_warn "$testname: aggregate KPI generation failed (rt_aggregate_iter_latencies)" + return 1 +} + +# --------------------------------------------------------------------------- +# rt_emit_thread_aggregate_kpi <testname> <prefix> <iter_kpi> <thread_agg_kpi> <result_txt> +# Aggregate per-thread latencies across iterations. +# --------------------------------------------------------------------------- +rt_emit_thread_aggregate_kpi() { + testname=$1 + prefix=$2 + iter_kpi=$3 + thread_agg_kpi=$4 + result_txt=$5 + + : >"$thread_agg_kpi" 2>/dev/null || true + + if rt_aggregate_iter_latencies_per_thread "$prefix" "$iter_kpi" >"$thread_agg_kpi" 2>/dev/null; then + if [ -s "$thread_agg_kpi" ]; then + cat "$thread_agg_kpi" >>"$result_txt" 2>/dev/null || true + rt_emit_kpi_block "$testname" "per-thread aggregate results" "$thread_agg_kpi" + fi + return 0 + fi + + log_warn "$testname: per-thread aggregate KPI generation failed (rt_aggregate_iter_latencies_per_thread)" + return 1 +} + +# --------------------------------------------------------------------------- +# rt_append_named_metric <name> <value> <unit> <out_file> +# Append a single KPI line to a file. +# --------------------------------------------------------------------------- +rt_append_named_metric() { + name=$1 + value=$2 + unit=$3 + out_file=$4 + + [ -n "$name" ] || return 1 + [ -n "$value" ] || return 1 + [ -n "$out_file" ] || return 1 + + if [ -n "$unit" ]; then + echo "$name pass $value $unit" >>"$out_file" 2>/dev/null || return 1 + else + echo "$name pass $value" >>"$out_file" 2>/dev/null || return 1 + fi + return 0 +} + +# --------------------------------------------------------------------------- +# rt_emit_pass_fail_result <testname> <res_file> <result_txt> <out_dir> <overall_fail> +# Emit final PASS/FAIL result and update the .res file. +# --------------------------------------------------------------------------- +rt_emit_pass_fail_result() { + testname=$1 + res_file=$2 + result_txt=$3 + out_dir=$4 + overall_fail=$5 + + [ -n "$testname" ] || testname="RTTest" + [ -n "$res_file" ] || return 1 + + if [ "$overall_fail" -eq 0 ] 2>/dev/null; then + log_pass "$testname: PASS" + echo "$testname PASS" >"$res_file" + else + if [ -n "$out_dir" ]; then + log_fail "$testname: FAIL (see $result_txt and $out_dir)" + else + log_fail "$testname: FAIL (see $result_txt)" + fi + echo "$testname FAIL" >"$res_file" + fi + return 0 +} + +# --------------------------------------------------------------------------- +# rt_emit_interrupt_aware_result <testname> <res_file> <result_txt> <out_dir> <interrupted> <overall_fail> +# Emit SKIP for user interrupt, otherwise defer to normal PASS/FAIL emission. +# --------------------------------------------------------------------------- +rt_emit_interrupt_aware_result() { + testname=$1 + res_file=$2 + result_txt=$3 + out_dir=$4 + interrupted=$5 + overall_fail=$6 + + [ -n "$testname" ] || testname="RTTest" + [ -n "$res_file" ] || return 1 + + if [ "$interrupted" -eq 1 ] 2>/dev/null; then + log_skip "$testname: SKIP (interrupted by user; partial results kept in $result_txt)" + echo "$testname SKIP" >"$res_file" + return 0 + fi + + rt_emit_pass_fail_result "$testname" "$res_file" "$result_txt" "$out_dir" "$overall_fail" +} + +# --------------------------------------------------------------------------- +# rt_extract_numeric_samples_from_log <logfile> <token> <out_file> +# Extract numeric samples from a logfile where the numeric value follows token. +# --------------------------------------------------------------------------- +rt_extract_numeric_samples_from_log() { + logfile=$1 + token=$2 + out_file=$3 + + [ -r "$logfile" ] || return 1 + [ -n "$token" ] || return 1 + [ -n "$out_file" ] || return 1 + + : >"$out_file" 2>/dev/null || true + + awk -v tok="$token" ' + function isnum(x) { return (x ~ /^-?[0-9]+(\.[0-9]+)?$/) } + { + for (i=1; i<=NF; i++) { + if ($i == tok) { + v=$(i+1) + gsub(/^[[:space:]]+|[[:space:]]+$/, "", v) + if (isnum(v)) print v + } else if ($i ~ ("^" tok "[0-9]")) { + v=$i + sub("^" tok, "", v) + if (isnum(v)) print v + } + } + } + ' "$logfile" >"$out_file" 2>/dev/null + + [ -s "$out_file" ] +} + +# --------------------------------------------------------------------------- +# rt_emit_numeric_summary <metric_prefix> <values_file> <unit> <result_txt> [testname] [parsed_file] +# Compute min/mean/max from a file of numeric values and append KPI lines. +# --------------------------------------------------------------------------- +rt_emit_numeric_summary() { + metric_prefix=$1 + values_file=$2 + unit=$3 + result_txt=$4 + testname=$5 + parsed_file=$6 + + [ -n "$metric_prefix" ] || return 1 + [ -r "$values_file" ] || return 1 + [ -n "$result_txt" ] || return 1 + + summary=$(awk ' + BEGIN { min=""; max=""; sum=0; n=0 } + /^[0-9]+(\.[0-9]+)?$/ { + v=$1+0 + if (min=="" || v<min) min=v + if (max=="" || v>max) max=v + sum+=v + n++ + } + END { + if (n>0) printf("%s|%.6f|%s|%d\n", min, sum/n, max, n) + } + ' "$values_file" 2>/dev/null) + + [ -n "$summary" ] || return 1 + + s_min=$(printf '%s' "$summary" | awk -F'|' '{print $1}') + s_mean=$(printf '%s' "$summary" | awk -F'|' '{print $2}') + s_max=$(printf '%s' "$summary" | awk -F'|' '{print $3}') + + rt_append_named_metric "${metric_prefix}-min" "$(rt_fmt_num "$s_min")" "$unit" "$result_txt" || return 1 + rt_append_named_metric "${metric_prefix}-mean" "$(rt_fmt_num "$s_mean")" "$unit" "$result_txt" || return 1 + rt_append_named_metric "${metric_prefix}-max" "$(rt_fmt_num "$s_max")" "$unit" "$result_txt" || return 1 + + if [ -n "$parsed_file" ]; then + rt_append_named_metric "${metric_prefix}-min" "$(rt_fmt_num "$s_min")" "$unit" "$parsed_file" || true + rt_append_named_metric "${metric_prefix}-mean" "$(rt_fmt_num "$s_mean")" "$unit" "$parsed_file" || true + rt_append_named_metric "${metric_prefix}-max" "$(rt_fmt_num "$s_max")" "$unit" "$parsed_file" || true + fi + + if [ -n "$testname" ]; then + log_info "$testname: ${metric_prefix}-min pass $(rt_fmt_num "$s_min") $unit" + log_info "$testname: ${metric_prefix}-mean pass $(rt_fmt_num "$s_mean") $unit" + log_info "$testname: ${metric_prefix}-max pass $(rt_fmt_num "$s_max") $unit" + fi + + return 0 +} + +# --------------------------------------------------------------------------- +# rt_emit_numeric_summary_from_log <metric_prefix> <logfile> <token> <unit> <result_txt> [testname] [parsed_file] +# Extract numeric samples from a log and emit min/mean/max KPI lines. +# --------------------------------------------------------------------------- +rt_emit_numeric_summary_from_log() { + metric_prefix=$1 + logfile=$2 + token=$3 + unit=$4 + result_txt=$5 + testname=$6 + parsed_file=$7 + + values_file="${result_txt}.values.$$" + if ! rt_extract_numeric_samples_from_log "$logfile" "$token" "$values_file"; then + rm -f "$values_file" 2>/dev/null || true + return 1 + fi + + rc=0 + rt_emit_numeric_summary "$metric_prefix" "$values_file" "$unit" "$result_txt" "$testname" "$parsed_file" || rc=1 + rm -f "$values_file" 2>/dev/null || true + return "$rc" +} + +# --------------------------------------------------------------------------- +# rt_emit_worst_sample_from_log <metric_name> <logfile> <token> <unit> <parsed_file> <result_txt> [testname] +# Find the worst sample from a tokenized numeric log stream and emit it as KPI. +# --------------------------------------------------------------------------- +rt_emit_worst_sample_from_log() { + metric_name=$1 + logfile=$2 + token=$3 + unit=$4 + parsed_file=$5 + result_txt=$6 + testname=$7 + + [ -n "$metric_name" ] || return 1 + [ -r "$logfile" ] || return 1 + [ -n "$token" ] || return 1 + [ -n "$result_txt" ] || return 1 + + worst_line=$( + awk -v tok="$token" ' + function isnum(x) { return (x ~ /^-?[0-9]+(\.[0-9]+)?$/) } + { + for (i=1; i<=NF; i++) { + if ($i == tok) { + v=$(i+1) + gsub(/^[[:space:]]+|[[:space:]]+$/, "", v) + if (isnum(v)) { + if (max=="" || (v+0) > (max+0)) { max=v; line=$0 } + } + } else if ($i ~ ("^" tok "[0-9]")) { + v=$i + sub("^" tok, "", v) + if (isnum(v)) { + if (max=="" || (v+0) > (max+0)) { max=v; line=$0 } + } + } + } + } + END { if (line!="") print line } + ' "$logfile" 2>/dev/null + ) + + [ -n "$worst_line" ] || return 1 + + worst_value=$(printf '%s\n' "$worst_line" | awk -v tok="$token" ' + function isnum(x) { return (x ~ /^-?[0-9]+(\.[0-9]+)?$/) } + { + for (i=1; i<=NF; i++) { + if ($i == tok) { + v=$(i+1) + gsub(/^[[:space:]]+|[[:space:]]+$/, "", v) + if (isnum(v)) { print v; exit } + } else if ($i ~ ("^" tok "[0-9]")) { + v=$i + sub("^" tok, "", v) + if (isnum(v)) { print v; exit } + } + } + } + ' 2>/dev/null) + + [ -n "$worst_value" ] || return 1 + + rt_append_named_metric "$metric_name" "$worst_value" "$unit" "$result_txt" || return 1 + if [ -n "$parsed_file" ]; then + rt_append_named_metric "$metric_name" "$worst_value" "$unit" "$parsed_file" || true + fi + + if [ -n "$testname" ]; then + log_info "$testname: worst-sample $worst_line" + fi + + return 0 +} + +# --------------------------------------------------------------------------- +# rt_parse_token_numeric_samples <metric_prefix> <logfile> <token> <unit> +# Parse tokenized numeric samples from a plain text log and emit KPI lines. +# --------------------------------------------------------------------------- +rt_parse_token_numeric_samples() { + metric_prefix=$1 + logfile=$2 + token=$3 + unit=$4 + + [ -n "$metric_prefix" ] || return 1 + [ -r "$logfile" ] || return 1 + [ -n "$token" ] || return 1 + + values_file="${TMPDIR:-/tmp}/rt_samples.$$" + if ! rt_extract_numeric_samples_from_log "$logfile" "$token" "$values_file"; then + rm -f "$values_file" 2>/dev/null || true + return 1 + fi + + summary=$(awk ' + BEGIN { min=""; max=""; sum=0; n=0 } + /^[0-9]+(\.[0-9]+)?$/ { + v=$1+0 + if (min=="" || v<min) min=v + if (max=="" || v>max) max=v + sum+=v + n++ + } + END { + if (n>0) printf("%s|%.6f|%s\n", min, sum/n, max) + } + ' "$values_file" 2>/dev/null) + + rm -f "$values_file" 2>/dev/null || true + [ -n "$summary" ] || return 1 + + s_min=$(printf '%s' "$summary" | awk -F'|' '{print $1}') + s_mean=$(printf '%s' "$summary" | awk -F'|' '{print $2}') + s_max=$(printf '%s' "$summary" | awk -F'|' '{print $3}') + + printf '%s-min pass %s %s\n' "$metric_prefix" "$(rt_fmt_num "$s_min")" "$unit" + printf '%s-mean pass %s %s\n' "$metric_prefix" "$(rt_fmt_num "$s_mean")" "$unit" + printf '%s-max pass %s %s\n' "$metric_prefix" "$(rt_fmt_num "$s_max")" "$unit" + return 0 +} + +# --------------------------------------------------------------------------- +# rt_majority_fail_limit <iterations> +# Return majority fail threshold for N iterations. +# --------------------------------------------------------------------------- +rt_majority_fail_limit() { + iterations=$1 + case "$iterations" in ''|*[!0-9]*|0) iterations=1 ;; esac + printf '%s\n' $(((iterations + 1) / 2)) + return 0 +} + +# --------------------------------------------------------------------------- +# rt_collect_named_metric_values <result_txt> <metric_name> <output_file> +# Extract numeric values for iteration-tagged KPI lines containing metric_name. +# --------------------------------------------------------------------------- +rt_collect_named_metric_values() { + result_txt=$1 + metric_name=$2 + output_file=$3 + + [ -r "$result_txt" ] || return 1 + [ -n "$metric_name" ] || return 1 + [ -n "$output_file" ] || return 1 + + mkdir -p "$(dirname "$output_file")" 2>/dev/null || true + : >"$output_file" 2>/dev/null || true + + awk -v metric="$metric_name" ' + /^iteration-/ { + if (index($1, metric) > 0 && NF >= 3) { + print $(NF - 1) + } + } + ' "$result_txt" >"$output_file" 2>/dev/null + + [ -s "$output_file" ] +} + +rt_collect_max_latency_values() { + rt_collect_named_metric_values "$1" "max-latency" "$2" +} + +# --------------------------------------------------------------------------- +# rt_evaluate_majority_threshold_gate <testname> <iterations> <values_file> <gate_kpi> <result_txt> <user_baseline> <metric_label> <unit> +# Evaluate a majority-based threshold gate against a set of numeric values. +# --------------------------------------------------------------------------- +rt_evaluate_majority_threshold_gate() { + testname=$1 + iterations=$2 + values_file=$3 + gate_kpi=$4 + result_txt=$5 + user_baseline=$6 + metric_label=$7 + unit=$8 + + RT_BASELINE_VALUE="" + RT_BASELINE_FAIL_COUNT="" + RT_BASELINE_FAIL_LIMIT="" + export RT_BASELINE_VALUE RT_BASELINE_FAIL_COUNT RT_BASELINE_FAIL_LIMIT + + [ -n "$testname" ] || testname="RTTest" + [ -r "$values_file" ] || return 1 + [ -n "$gate_kpi" ] || return 1 + [ -n "$result_txt" ] || return 1 + [ -n "$metric_label" ] || metric_label="baseline" + + if [ ! -s "$values_file" ]; then + log_warn "$testname: no metric values found for threshold comparison" + return 1 + fi + + if [ -n "$user_baseline" ]; then + baseline_value="$user_baseline" + log_info "$testname: using user-provided baseline: $baseline_value" + else + baseline_value=$(sort -n "$values_file" | head -n 1) + log_info "$testname: using derived baseline (minimum observed value): $baseline_value" + fi + + fail_count=$(awk -v b="$baseline_value" ' + BEGIN { c=0 } + { + if (($1 + 0) > (b + 0)) c++ + } + END { print c } + ' "$values_file") + + fail_limit=$(rt_majority_fail_limit "$iterations") + + RT_BASELINE_VALUE="$baseline_value" + RT_BASELINE_FAIL_COUNT="$fail_count" + RT_BASELINE_FAIL_LIMIT="$fail_limit" + export RT_BASELINE_VALUE RT_BASELINE_FAIL_COUNT RT_BASELINE_FAIL_LIMIT + + : >"$gate_kpi" 2>/dev/null || true + rt_append_named_metric "${metric_label}-baseline" "$baseline_value" "$unit" "$gate_kpi" || true + rt_append_named_metric "${metric_label}-fail-limit" "$fail_limit" "count" "$gate_kpi" || true + rt_append_named_metric "${metric_label}-fail-count" "$fail_count" "count" "$gate_kpi" || true + + cat "$gate_kpi" >>"$result_txt" 2>/dev/null || true + rt_emit_kpi_block "$testname" "baseline comparison results" "$gate_kpi" + + if [ "$fail_count" -ge "$fail_limit" ] 2>/dev/null; then + return 1 + fi + return 0 +} + +rt_evaluate_baseline_gate() { + rt_evaluate_majority_threshold_gate "$1" "$2" "$3" "$4" "$5" "$6" "baseline" "us" +} + +# --------------------------------------------------------------------------- +# rt_duration_to_seconds <duration> +# Convert compact duration strings like 90, 5m, 1h, 1m30s to integer seconds. +# --------------------------------------------------------------------------- +rt_duration_to_seconds() { + dur=$1 + dur=$(printf '%s' "$dur" | tr -d '[:space:]' 2>/dev/null) + + [ -n "$dur" ] || { + echo 0 + return 0 + } + + case "$dur" in + *[!0-9]*) ;; + *) echo "$dur"; return 0 ;; + esac + + printf '%s' "$dur" | awk ' + function add(v,u) { + if (u=="s") t += v + else if (u=="m") t += v*60 + else if (u=="h") t += v*3600 + else if (u=="d") t += v*86400 + else ok = 0 + } + BEGIN { t=0; ok=1 } + { + s=$0 + while (match(s, /^[0-9]+[smhd]/)) { + v = substr(s, 1, RLENGTH-1) + 0 + u = substr(s, RLENGTH, 1) + add(v,u) + s = substr(s, RLENGTH+1) + } + if (s != "") ok = 0 + if (ok && t >= 0) print int(t) + else print 0 + } + ' 2>/dev/null +} From 8bc2925bd28d6e7971f8d9a9d6a63854b130df17 Mon Sep 17 00:00:00 2001 From: Srikanth Muppandam <smuppand@qti.qualcomm.com> Date: Mon, 27 Apr 2026 06:47:47 +0530 Subject: [PATCH 2/4] rt-tests: standardize JSON latency wrappers Update common JSON-producing RT test wrappers to use the shared lib_rt.sh execution, parsing, KPI aggregation, and result emission helpers. This keeps per-test behavior unchanged while making the wrappers more consistent, easier to maintain, and safer for CI/LAVA use. The wrappers now share interrupt-aware handling so Ctrl-C or TERM stops the active test cleanly and preserves any collected partial results. Signed-off-by: Srikanth Muppandam <smuppand@qti.qualcomm.com> --- .../CyclicDeadline/CyclicDeadline.yaml | 30 ++ .../CyclicDeadline/CyclicDeadline_README.md | 187 +++++++++ .../Kernel/RT-tests/CyclicDeadline/run.sh | 294 ++++++++++++++ .../Cyclictest/README_RT_Cyclictest.md | 238 ++++++++++++ .../RT-tests/Cyclictest/RT_Cyclictest.yaml | 33 ++ .../suites/Kernel/RT-tests/Cyclictest/run.sh | 262 +++++++++++++ .../Kernel/RT-tests/PMQTest/PMQTest_README.md | 217 +++++++++++ .../Kernel/RT-tests/PMQTest/pmqtest.yaml | 31 ++ Runner/suites/Kernel/RT-tests/PMQTest/run.sh | 249 ++++++++++++ .../RT-tests/PTSEMATest/PTSEMATest_README.md | 196 ++++++++++ .../RT-tests/PTSEMATest/ptsematest.yaml | 31 ++ .../suites/Kernel/RT-tests/PTSEMATest/run.sh | 245 ++++++++++++ .../RTMigrateTest/RTMigrateTest_README.md | 157 ++++++++ .../RTMigrateTest/rt-migrate-test-full.yaml | 38 ++ .../RTMigrateTest/rt-migrate-test.yaml | 32 ++ .../Kernel/RT-tests/RTMigrateTest/run.sh | 333 ++++++++++++++++ .../RT-tests/SignalTest/SignalTest_README.md | 232 +++++++++++ .../suites/Kernel/RT-tests/SignalTest/run.sh | 337 ++++++++++++++++ .../RT-tests/SignalTest/signaltest.yaml | 38 ++ .../SigwaitTest/SigwaitTest_README.md | 244 ++++++++++++ .../suites/Kernel/RT-tests/SigwaitTest/run.sh | 364 ++++++++++++++++++ .../RT-tests/SigwaitTest/sigwaittest.yaml | 41 ++ 22 files changed, 3829 insertions(+) create mode 100755 Runner/suites/Kernel/RT-tests/CyclicDeadline/CyclicDeadline.yaml create mode 100644 Runner/suites/Kernel/RT-tests/CyclicDeadline/CyclicDeadline_README.md create mode 100755 Runner/suites/Kernel/RT-tests/CyclicDeadline/run.sh create mode 100644 Runner/suites/Kernel/RT-tests/Cyclictest/README_RT_Cyclictest.md create mode 100755 Runner/suites/Kernel/RT-tests/Cyclictest/RT_Cyclictest.yaml create mode 100755 Runner/suites/Kernel/RT-tests/Cyclictest/run.sh create mode 100644 Runner/suites/Kernel/RT-tests/PMQTest/PMQTest_README.md create mode 100755 Runner/suites/Kernel/RT-tests/PMQTest/pmqtest.yaml create mode 100755 Runner/suites/Kernel/RT-tests/PMQTest/run.sh create mode 100644 Runner/suites/Kernel/RT-tests/PTSEMATest/PTSEMATest_README.md create mode 100644 Runner/suites/Kernel/RT-tests/PTSEMATest/ptsematest.yaml create mode 100755 Runner/suites/Kernel/RT-tests/PTSEMATest/run.sh create mode 100644 Runner/suites/Kernel/RT-tests/RTMigrateTest/RTMigrateTest_README.md create mode 100755 Runner/suites/Kernel/RT-tests/RTMigrateTest/rt-migrate-test-full.yaml create mode 100755 Runner/suites/Kernel/RT-tests/RTMigrateTest/rt-migrate-test.yaml create mode 100755 Runner/suites/Kernel/RT-tests/RTMigrateTest/run.sh create mode 100644 Runner/suites/Kernel/RT-tests/SignalTest/SignalTest_README.md create mode 100755 Runner/suites/Kernel/RT-tests/SignalTest/run.sh create mode 100644 Runner/suites/Kernel/RT-tests/SignalTest/signaltest.yaml create mode 100644 Runner/suites/Kernel/RT-tests/SigwaitTest/SigwaitTest_README.md create mode 100755 Runner/suites/Kernel/RT-tests/SigwaitTest/run.sh create mode 100755 Runner/suites/Kernel/RT-tests/SigwaitTest/sigwaittest.yaml diff --git a/Runner/suites/Kernel/RT-tests/CyclicDeadline/CyclicDeadline.yaml b/Runner/suites/Kernel/RT-tests/CyclicDeadline/CyclicDeadline.yaml new file mode 100755 index 00000000..f584ae30 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/CyclicDeadline/CyclicDeadline.yaml @@ -0,0 +1,30 @@ +metadata: + name: CyclicDeadline + format: "Lava-Test Test Definition 1.0" + description: "Run rt-tests cyclicdeadline in JSON mode and parse results without requiring python3." + os: + - linux + scope: + - performance + - preempt-rt + +params: + INTERVAL: "1000" + STEP: "500" + THREADS: "1" + DURATION: "2m" + BACKGROUND_CMD: "" + ITERATIONS: "1" + USER_BASELINE: "" + BINARY: "" + OUT_DIR: "./logs_CyclicDeadline" + VERBOSE: "0" + PROGRESS_EVERY: "1" + HEARTBEAT_SEC: "10" + +run: + steps: + - REPO_PATH=$PWD + - cd Runner/suites/Kernel/RT-tests/CyclicDeadline + - ./run.sh --interval "${INTERVAL}" --step "${STEP}" --threads "${THREADS}" --duration "${DURATION}" --iterations "${ITERATIONS}" --background-cmd "${BACKGROUND_CMD}" --user-baseline "${USER_BASELINE}" --binary "${BINARY}" --out "${OUT_DIR}" --progress-every "${PROGRESS_EVERY}" --heartbeat-sec "${HEARTBEAT_SEC}" $( [ "${VERBOSE}" = "1" ] && echo "--verbose" ) || true + - $REPO_PATH/Runner/utils/send-to-lava.sh CyclicDeadline.res diff --git a/Runner/suites/Kernel/RT-tests/CyclicDeadline/CyclicDeadline_README.md b/Runner/suites/Kernel/RT-tests/CyclicDeadline/CyclicDeadline_README.md new file mode 100644 index 00000000..3cdd6f2d --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/CyclicDeadline/CyclicDeadline_README.md @@ -0,0 +1,187 @@ +# CyclicDeadline + +## Overview + +`CyclicDeadline` is the qcom-linux-testkit wrapper for the `rt-tests` `cyclicdeadline` binary. + +It is similar to `cyclictest`, but instead of using `SCHED_FIFO` with `nanosleep()` to measure jitter, it uses `SCHED_DEADLINE` and treats the deadline as the wakeup interval. + +This wrapper: + +- runs `cyclicdeadline` in JSON mode +- parses KPI using `lib_rt.sh` +- supports repeated iterations +- prints per-iteration, aggregate, and per-thread aggregate results +- can keep partial results on user interrupt when supported by the common RT helpers +- writes final PASS/FAIL/SKIP summary to `CyclicDeadline.res` + +The script is LAVA-friendly and always exits `0`. CI gating should use the `.res` file. + +## Defaults + +Defaults are aligned to the Linaro test-definition behavior unless explicitly overridden. + +- `INTERVAL=1000` +- `STEP=500` +- `THREADS=1` +- `DURATION=5m` +- `BACKGROUND_CMD=""` +- `ITERATIONS=1` +- `USER_BASELINE=""` + +## Files generated + +By default, the test writes logs under: + +`./logs_CyclicDeadline` + +Typical outputs: + +- `CyclicDeadline.res` - final PASS/FAIL/SKIP summary +- `logs_CyclicDeadline/result.txt` - parsed KPI output +- `logs_CyclicDeadline/iter_kpi.txt` - per-iteration KPI +- `logs_CyclicDeadline/agg_kpi.txt` - overall aggregate KPI +- `logs_CyclicDeadline/thread_agg_kpi.txt` - per-thread aggregate KPI +- `logs_CyclicDeadline/cyclicdeadline-<N>.json` - raw JSON per iteration +- `logs_CyclicDeadline/cyclicdeadline_stdout_iter<N>.log` - console/stdout capture per iteration +- `logs_CyclicDeadline/max_latencies.txt` - extracted max latency values when baseline comparison is used + +## Usage + +```sh +./run.sh [OPTIONS] +``` + +## Supported wrapper options + +### Wrapper control + +- `--out DIR` + - Output directory +- `--result FILE` + - Result text file path +- `--duration TIME` + - Test duration passed as `-D TIME` +- `--iterations N` + - Number of iterations to run +- `--background-cmd CMD` + - Optional background workload to run during the test +- `--binary PATH` + - Explicit path to the `cyclicdeadline` binary +- `--progress-every N` + - Progress message cadence across iterations +- `--heartbeat-sec N` + - Periodic "still running" heartbeat while a long iteration is executing +- `--verbose` + - Enable additional wrapper logging + +### cyclicdeadline options supported by the wrapper + +- `--interval-us USEC` + - Base interval in microseconds, maps to `-i` +- `--step-us USEC` + - Step size in microseconds, maps to `-s` +- `--threads N` + - Number of threads, maps to `-t` + - If set to `0`, wrapper expands it to `nproc` +- `--user-baseline VALUE` + - Baseline max latency to compare against when iteration count is high enough + +## Baseline comparison behavior + +When `ITERATIONS` is greater than `2`, the wrapper can evaluate max latency results against a baseline. + +Behavior: + +- extracts all `max-latency` values from per-iteration parsed output +- if `USER_BASELINE` is set, that value is used as the baseline +- otherwise, the minimum observed max latency becomes the baseline +- counts how many max latency values are above the baseline +- compares that count against `ITERATIONS / 2` + +This provides a simple consistency check across repeated runs. + +## Examples + +Run one default iteration using auto-detected binary: + +```sh +./run.sh +``` + +Run with explicit binary, 3 iterations, and 1 minute duration: + +```sh +./run.sh --binary /tmp/cyclicdeadline --duration 1m --iterations 3 +``` + +Run with one thread per CPU: + +```sh +./run.sh --threads 0 --duration 1m +``` + +Run with custom interval and step: + +```sh +./run.sh --interval-us 1000 --step-us 500 --threads 4 --duration 60s +``` + +Run with baseline comparison: + +```sh +./run.sh --binary /tmp/cyclicdeadline --iterations 5 --user-baseline 120 +``` + +Run with heartbeat messages every 10 seconds: + +```sh +./run.sh --binary /tmp/cyclicdeadline --duration 60s --heartbeat-sec 10 +``` + +## LAVA integration notes + +Typical YAML wiring passes parameters into `run.sh` and reports using: + +```sh +$REPO_PATH/Runner/utils/send-to-lava.sh CyclicDeadline.res +``` + +Recommended CI behavior: + +- rely on `CyclicDeadline.res` for PASS/FAIL/SKIP +- keep `result.txt` and JSON files as artifacts for debugging +- use `--binary` when the binary is staged outside standard PATH + +## Expected console behavior + +The wrapper may print: + +- environment and scheduler context +- selected binary and options +- per-iteration start messages +- optional heartbeat messages for long-running iterations +- per-iteration KPI +- aggregate KPI +- per-thread aggregate KPI +- final PASS/FAIL/SKIP summary + +## Interrupt behavior + +If the shared RT helper functions in `lib_rt.sh` are present and enabled, `Ctrl-C` can preserve partial output and mark the run as `SKIP` instead of `FAIL`. + +This depends on the common RT helper implementation already being present in your tree. + +## Dependencies + +The wrapper expects: + +- `cyclicdeadline` binary available either in `PATH` or via `--binary` +- `functestlib.sh` +- `lib_rt.sh` +- standard shell utilities such as `awk`, `sed`, `grep`, `tee`, `mkdir`, `cat`, `tr`, and `date` + +## Notes + +- Keep changes aligned with existing qcom-linux-testkit conventions. +- For CI, use the `.res` file as the authoritative result. diff --git a/Runner/suites/Kernel/RT-tests/CyclicDeadline/run.sh b/Runner/suites/Kernel/RT-tests/CyclicDeadline/run.sh new file mode 100755 index 00000000..19578870 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/CyclicDeadline/run.sh @@ -0,0 +1,294 @@ +#!/bin/sh +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# CyclicDeadline wrapper for qcom-linux-testkit +# - Runs rt-tests cyclicdeadline ITERATIONS times (JSON output) +# - Parses KPI using lib_rt.sh (no python required) +# - Emits KPI lines to result.txt and summary PASS/FAIL/SKIP to CyclicDeadline.res +# +# Notes: +# - Always exits 0 (LAVA-friendly). Use CyclicDeadline.res for gating. + +SCRIPT_DIR="$( + cd "$(dirname "$0")" || exit 1 + pwd +)" + +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "${__INIT_ENV_LOADED:-}" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" + __INIT_ENV_LOADED=1 +fi + +# shellcheck disable=SC1091 +. "$TOOLS/functestlib.sh" +# shellcheck disable=SC1091 +. "$TOOLS/lib_rt.sh" + +TESTNAME="CyclicDeadline" +test_path=$(find_test_case_by_name "$TESTNAME") +if [ -n "$test_path" ]; then + : +else + test_path="$SCRIPT_DIR" +fi + +RES_FILE="$test_path/${TESTNAME}.res" +OUT_DIR="${OUT_DIR:-$test_path/logs_${TESTNAME}}" +RESULT_TXT="${RESULT_TXT:-$OUT_DIR/result.txt}" + +INTERVAL="${INTERVAL:-1000}" +STEP="${STEP:-500}" +THREADS="${THREADS:-1}" +DURATION="${DURATION:-5m}" +BACKGROUND_CMD="${BACKGROUND_CMD:-}" +ITERATIONS="${ITERATIONS:-1}" +USER_BASELINE="${USER_BASELINE:-}" +QUIET="${QUIET:-true}" +BINARY="${BINARY:-}" +VERBOSE="${VERBOSE:-0}" +PROGRESS_EVERY="${PROGRESS_EVERY:-1}" +HEARTBEAT_SEC="${HEARTBEAT_SEC:-10}" + +usage() { + cat <<EOF +Usage: $0 [OPTIONS] + --out DIR + --result FILE + --background-cmd CMD + --binary PATH + --progress-every N + --heartbeat-sec N + --verbose + --interval N + --step N + --threads N + --duration STR + --iterations N + --user-baseline N Max-latency baseline in us for majority gate (optional) + --quiet BOOL + +Notes: + When --user-baseline is not provided, baseline gating is skipped and + latency KPIs are reported only. +EOF +} + +while [ "$#" -gt 0 ]; do + case "$1" in + -h|--help) + usage + exit 0 + ;; + --out) + shift + OUT_DIR="$1" + ;; + --result) + shift + RESULT_TXT="$1" + ;; + --background-cmd) + shift + BACKGROUND_CMD="$1" + ;; + --binary) + shift + BINARY="$1" + ;; + --progress-every) + shift + PROGRESS_EVERY="$1" + ;; + --heartbeat-sec) + shift + HEARTBEAT_SEC="$1" + ;; + --verbose) + VERBOSE=1 + ;; + --interval) + shift + INTERVAL="$1" + ;; + --step) + shift + STEP="$1" + ;; + --threads) + shift + THREADS="$1" + ;; + --duration) + shift + DURATION="$1" + ;; + --iterations) + shift + ITERATIONS="$1" + ;; + --user-baseline) + shift + USER_BASELINE="$1" + ;; + --quiet) + shift + QUIET="$1" + ;; + *) + log_warn "Unknown option: $1" + usage + echo "$TESTNAME FAIL" >"$RES_FILE" + exit 0 + ;; + esac + shift +done + +LOG_PREFIX="$OUT_DIR/cyclicdeadline" +TMP_ONE="$OUT_DIR/tmp_result_one.txt" +ITER_KPI="$OUT_DIR/iter_kpi.txt" +AGG_KPI="$OUT_DIR/agg_kpi.txt" +THREAD_AGG_KPI="$OUT_DIR/thread_agg_kpi.txt" +MAX_LAT_FILE="$OUT_DIR/max_latencies.txt" +GATE_KPI="$OUT_DIR/gate_kpi.txt" + +rt_prepare_output_layout \ + "$OUT_DIR" \ + "$RESULT_TXT" \ + "$TMP_ONE" \ + "$ITER_KPI" \ + "$AGG_KPI" \ + "$THREAD_AGG_KPI" \ + "$MAX_LAT_FILE" \ + "$GATE_KPI" + +rt_check_clock_sanity "$TESTNAME" || true + +log_info "------------------- Starting $TESTNAME -------------------" +log_info "$TESTNAME: Checking for the tools required to run cyclicdeadline" + +if ! rt_require_common_tools uname awk sed grep tr head tail mkdir cat sh sleep kill date sort wc; then + log_skip "$TESTNAME: basic tools missing" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if ! rt_require_json_helpers; then + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_normalize_common_params + +CDL_BIN=$(rt_resolve_binary cyclicdeadline "$BINARY" 2>/dev/null || echo "") +if [ -z "$CDL_BIN" ] || [ ! -x "$CDL_BIN" ]; then + log_skip "$TESTNAME: cyclicdeadline binary not found/executable (${CDL_BIN:-none})" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_log_common_runtime_env "$TESTNAME" "$CDL_BIN" +log_info "$TESTNAME: iterations=$ITERATIONS duration=$DURATION interval=$INTERVAL step=$STEP threads=$THREADS" +log_info "$TESTNAME: heartbeat=$HEARTBEAT_SEC seconds" + +RT_INTERRUPTED=0 +export RT_INTERRUPTED + +trap 'rt_handle_int; perf_rt_bg_stop >/dev/null 2>&1 || true' INT TERM +trap 'perf_rt_bg_stop >/dev/null 2>&1 || true' EXIT + +perf_rt_bg_start "$TESTNAME" "$BACKGROUND_CMD" + +overall_fail=0 + +i=1 +while [ "$i" -le "$ITERATIONS" ] 2>/dev/null; do + rt_log_iteration_progress "$TESTNAME" "$i" "$ITERATIONS" "$PROGRESS_EVERY" + + jsonfile="${LOG_PREFIX}-${i}.json" + stdoutlog="${OUT_DIR}/cyclicdeadline_stdout_iter${i}.log" + + set -- "$CDL_BIN" + case "$QUIET" in + true|TRUE|1|yes|YES) + set -- "$@" -q + ;; + esac + set -- "$@" -i "$INTERVAL" -s "$STEP" -t "$THREADS" -D "$DURATION" --json="$jsonfile" + + if rt_run_json_iteration "$TESTNAME" "$HEARTBEAT_SEC" "$stdoutlog" "$jsonfile" "$@"; then + rc=$RT_RUN_RC + else + rc=$RT_RUN_RC + fi + + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: interrupted by user during iteration $i/$ITERATIONS" + break + fi + + if [ "$rc" -ne 0 ] 2>/dev/null; then + log_fail "$TESTNAME: cyclicdeadline exited rc=$rc (iter $i/$ITERATIONS)" + overall_fail=1 + fi + + if [ "${RT_RUN_JSON_OK:-0}" -ne 1 ] 2>/dev/null; then + log_fail "$TESTNAME: missing json output: $jsonfile" + overall_fail=1 + i=$((i + 1)) + continue + fi + + if ! rt_parse_and_append_iteration_kpi "cyclicdeadline" "$jsonfile" "$TMP_ONE" "$ITER_KPI" "$RESULT_TXT" "$i"; then + log_fail "$TESTNAME: failed to parse/store KPI (iter $i/$ITERATIONS): $jsonfile" + overall_fail=1 + fi + + i=$((i + 1)) +done + +perf_rt_bg_stop >/dev/null 2>&1 || true + +rt_emit_kpi_block "$TESTNAME" "per-iteration results" "$ITER_KPI" +rt_emit_aggregate_kpi "$TESTNAME" "cyclicdeadline" "$ITER_KPI" "$AGG_KPI" "$RESULT_TXT" || true +rt_emit_thread_aggregate_kpi "$TESTNAME" "cyclicdeadline" "$ITER_KPI" "$THREAD_AGG_KPI" "$RESULT_TXT" || true + +if [ "${RT_INTERRUPTED:-0}" -ne 1 ] 2>/dev/null && [ "$ITERATIONS" -gt 2 ] 2>/dev/null; then + if rt_collect_named_metric_values "$RESULT_TXT" "max-latency" "$MAX_LAT_FILE"; then + if [ -n "$USER_BASELINE" ]; then + if ! rt_evaluate_majority_threshold_gate "$TESTNAME" "$ITERATIONS" "$MAX_LAT_FILE" "$GATE_KPI" "$RESULT_TXT" "$USER_BASELINE" "max-latency" "us"; then + log_fail "$TESTNAME: baseline gate failed (${RT_BASELINE_FAIL_COUNT} >= ${RT_BASELINE_FAIL_LIMIT})" + overall_fail=1 + fi + else + log_info "$TESTNAME: no user baseline provided; skipping baseline gate" + fi + else + log_warn "$TESTNAME: no max-latency values found for baseline comparison" + overall_fail=1 + fi +fi + +if rt_kpi_file_has_fail "cyclicdeadline" "$ITER_KPI"; then + overall_fail=1 +fi + +rt_emit_interrupt_aware_result "$TESTNAME" "$RES_FILE" "$RESULT_TXT" "$OUT_DIR" "${RT_INTERRUPTED:-0}" "$overall_fail" +exit 0 diff --git a/Runner/suites/Kernel/RT-tests/Cyclictest/README_RT_Cyclictest.md b/Runner/suites/Kernel/RT-tests/Cyclictest/README_RT_Cyclictest.md new file mode 100644 index 00000000..e921d79f --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/Cyclictest/README_RT_Cyclictest.md @@ -0,0 +1,238 @@ +# Cyclictest (rt-tests / cyclictest) + +This test is part of **qcom-linux-testkit** and wraps `rt-tests` **cyclictest** to measure timer wakeup latency and report KPIs (min/avg/max) in a LAVA-friendly way. + +It is designed to work on minimal images **without Python modules** by parsing cyclictest JSON output using POSIX shell helpers in `Runner/utils/lib_rt.sh`. + +--- + +## What this test does + +For each iteration, the wrapper: + +1. Validates required tools and privileges (must run as **root**). +2. Optionally starts a background load command (`BACKGROUND_CMD`) to stress the system. +3. Runs `cyclictest` with `--json=<file>` and captures console output into a per-iteration `.out`. +4. Parses the JSON and emits KPI lines like: + - `t0-min-latency pass <us> us` + - `t0-avg-latency pass <us> us` + - `t0-max-latency pass <us> us` + - … (for additional threads, `t1-…`, `t2-…`, etc., if present in JSON) +5. Aggregates **t0** latency KPIs across iterations and reports averages. +6. Emits `<TESTNAME>.res` as PASS/FAIL/SKIP for CI/LAVA gating. + +> Note: The test is **warn-only** if the kernel is not RT-enabled. It will still run and report latencies. + +--- + +## Location + +- Test: `Runner/suites/Kernel/RT-tests/Cyclictest/run.sh` +- Helpers: `Runner/utils/lib_rt.sh` (JSON parsing, progress logging, background load helpers) + +--- + +## Prerequisites + +### Permissions +- Must run as **root** (`id -u == 0`), since cyclictest typically uses RT scheduling and `mlockall()`. + +### Tools required +The wrapper expects the following tools to exist (or it will SKIP): +- `uname`, `awk`, `sed`, `grep`, `tr`, `head`, `tail`, `mkdir`, `cat`, `sh`, `tee`, `sleep`, `kill`, `date` +- `cyclictest` executable (either in `$PATH` or provided via `--binary` / `BINARY`) + +### Kernel considerations (recommended) +- RT kernel (PREEMPT_RT) is recommended for meaningful RT KPIs. +- The script prints a warning if `uname -r` / `uname -v` doesn’t look RT-enabled. + +Useful runtime knobs (optional): +- `/proc/sys/kernel/sched_rt_runtime_us` (RT bandwidth) +- system frequency governor / CPU online state can affect results. + +--- + +## Basic usage + +From the test directory: + +```sh +cd Runner/suites/Kernel/RT-tests/Cyclictest +sudo ./run.sh +``` + +If cyclictest is not in PATH: + +```sh +sudo ./run.sh --binary /tmp/cyclictest +``` + +Run multiple iterations (example: 5 runs): + +```sh +sudo ./run.sh --binary /tmp/cyclictest --iterations 5 +``` + +Use more threads (example: 8): + +```sh +sudo ./run.sh --binary /tmp/cyclictest --threads 8 +``` + +Change duration (example: 30 seconds): + +```sh +sudo ./run.sh --binary /tmp/cyclictest --duration 30s +``` + +> Note: `THREADS=0` means “auto”: uses `nproc` and sets `AFFINITY=all`. + +--- + +## Parameters (Environment / LAVA params) + +All options can be provided as environment variables (LAVA `params:`) or as CLI options. + +### Output / logging +- `OUT_DIR` (default: `./logs_Cyclictest`) +- `RESULT_TXT` (default: `$OUT_DIR/result.txt`) +- `VERBOSE` (default: `0`) + +### cyclictest control +- `PRIORITY` (default: `98`) → cyclictest `-p` +- `INTERVAL` (default: `1000`) microseconds → cyclictest `-i` +- `THREADS` (default: `1`) → cyclictest `-t` +- `AFFINITY` (default: `0`) CPU id or `all` → cyclictest `-a` +- `DURATION` (default: `1m`) → cyclictest `-D` +- `HISTOGRAM_MAX` (default: empty) → cyclictest `-h` (optional) + +### Iteration / progress +- `ITERATIONS` (default: `1`) number of iterations +- `PROGRESS_STEP` (default: `5`) seconds between “still running…” progress logs + +### Background load +- `BACKGROUND_CMD` (default: empty) command to run during test (stopped afterward) + +### Binary override +- `BINARY` (default: empty) explicit path to cyclictest executable + +--- + +## CLI options + +`run.sh` supports: + +- `--out DIR` +- `--result FILE` +- `--priority N` +- `--interval USEC` +- `--threads N` +- `--affinity CPU|all` +- `--duration DUR` +- `--histogram-max USEC` +- `--iterations N` +- `--progress-step S` +- `--background-cmd CMD` +- `--binary PATH` +- `--verbose` +- `-h, --help` + +--- + +## Output files + +Within `OUT_DIR` (default: `logs_Cyclictest`): + +- `cyclictest_iterN.json` : cyclictest JSON output per iteration +- `cyclictest_iterN.out` : cyclictest stdout/stderr per iteration +- `parsed_iterN.txt` : parsed KPI lines per iteration (from JSON) +- `metrics_all.txt` : KPI lines used for averaging (t0 only by default) +- `average_summary.txt` : computed averages across iterations (t0 min/avg/max) +- `result.txt` : concatenated per-iteration KPI lines + averages + final verdict + +At the test root: +- `Cyclictest.res` : single-line PASS/FAIL/SKIP result for LAVA + +--- + +## Console output (what to expect) + +You’ll see: + +- Start banner +- Tool checks +- RT kernel status (INFO or WARN) +- System context (uname, nproc, cpu online, governor, etc.) +- Progress logs every `PROGRESS_STEP` seconds while cyclictest runs +- cyclictest’s own output (the `T:` lines) +- Parsed KPI lines per iteration: + - `t0-min-latency pass ... us` + - `t0-avg-latency pass ... us` + - `t0-max-latency pass ... us` +- Final averages across iterations (t0) +- PASS/FAIL + +--- + +## LAVA integration + +### Do we need to pass variables in the `run:` step? + +Usually **no**. LAVA exports `params:` as environment variables before executing `run.steps`. +So this is typically enough: + +```yaml +run: + steps: + - REPO_PATH=$PWD + - cd Runner/suites/Kernel/RT-tests/Cyclictest + - ./run.sh || true + - $REPO_PATH/Runner/utils/send-to-lava.sh Cyclictest.res +``` + +If you want to override a param only for one step, you can still prefix env vars inline. + +--- + +## Troubleshooting + +### 1) “must run as root” / SKIP +Run via `sudo` or ensure the test is executed as root in LAVA. + +### 2) “cyclictest binary not found” +- Install `rt-tests`, or +- Provide explicit path: `--binary /path/to/cyclictest` or `BINARY=/path/to/cyclictest`. + +### 3) Latency lines not printed +Ensure JSON parsing is working: +- Check `OUT_DIR/parsed_iterN.txt` exists and contains `t*-min/avg/max-latency` lines. +- If the JSON format changed, update the parser in `Runner/utils/lib_rt.sh`. + +### 4) Very large max latency spikes (e.g., tens of ms) +Common causes: +- Non-RT kernel or RT throttling (`sched_rt_runtime_us`) +- CPU frequency scaling / idle states +- Interrupt storms / background load +- Thermal throttling +Try: +- Use RT kernel +- Pin affinity (`AFFINITY=0` or isolated CPU) +- Reduce system activity / background load +- Increase priority cautiously + +--- + +## Notes / Design choices + +- This wrapper is POSIX shell and ShellCheck-friendly (avoid python dependency). +- It produces `.res` and always exits `0` (LAVA-friendly), while still gating via `.res`. +- KPI lines are intended to be easy to post-process and trend. + +--- + +## Maintainers / Contribution + +If you update `lib_rt.sh`, please keep: +- POSIX compatibility +- ShellCheck cleanliness (avoid `A && B || C` for control flow, avoid unused vars) +- Robustness on minimal images diff --git a/Runner/suites/Kernel/RT-tests/Cyclictest/RT_Cyclictest.yaml b/Runner/suites/Kernel/RT-tests/Cyclictest/RT_Cyclictest.yaml new file mode 100755 index 00000000..641e9f24 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/Cyclictest/RT_Cyclictest.yaml @@ -0,0 +1,33 @@ +metadata: + name: Cyclictest + format: "Lava-Test Test Definition 1.0" + description: "Run rt-tests cyclictest and collect latency KPI in JSON; parse results without requiring python3." + os: + - linux + scope: + - performance + - preempt-rt + +params: + INTERVAL: "1000" + THREADS: "1" + DURATION: "2m" + BACKGROUND_CMD: "" + ITERATIONS: "1" + PRIO: "98" + QUIET: "true" + MLOCKALL: "true" + SMP: "false" + BINARY: "" + VERBOSE: "0" + PROGRESS_EVERY: "1" + HEARTBEAT_SEC: "10" + OUT_DIR: "./logs_Cyclictest" + +run: + steps: + - 'REPO_PATH="$PWD"' + - 'cd Runner/suites/Kernel/RT-tests/Cyclictest' + - './run.sh --out "${OUT_DIR}" --interval "${INTERVAL}" --threads "${THREADS}" --duration "${DURATION}" --iterations "${ITERATIONS}" --background-cmd "${BACKGROUND_CMD}" --binary "${BINARY}" --progress-every "${PROGRESS_EVERY}" --heartbeat-sec "${HEARTBEAT_SEC}" $( [ "${VERBOSE}" = "1" ] && echo "--verbose" ) || true' + - 'if [ -f CyclicTest.res ]; then sed "s/^CyclicTest /Cyclictest /" CyclicTest.res > Cyclictest.res; fi' + - '$REPO_PATH/Runner/utils/send-to-lava.sh Cyclictest.res' diff --git a/Runner/suites/Kernel/RT-tests/Cyclictest/run.sh b/Runner/suites/Kernel/RT-tests/Cyclictest/run.sh new file mode 100755 index 00000000..f3287bd8 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/Cyclictest/run.sh @@ -0,0 +1,262 @@ +#!/bin/sh +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# CyclicTest wrapper for qcom-linux-testkit +# - Runs rt-tests cyclictest ITERATIONS times (JSON output) +# - Parses KPI using lib_rt.sh (no python required) +# - Emits KPI lines to result.txt and summary PASS/FAIL/SKIP to CyclicTest.res +# +# Notes: +# - Always exits 0 (LAVA-friendly). Use CyclicTest.res for gating. + +SCRIPT_DIR="$( + cd "$(dirname "$0")" || exit 1 + pwd +)" + +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "${__INIT_ENV_LOADED:-}" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" + __INIT_ENV_LOADED=1 +fi + +# shellcheck disable=SC1091 +. "$TOOLS/functestlib.sh" +# shellcheck disable=SC1091 +. "$TOOLS/lib_rt.sh" + +TESTNAME="CyclicTest" +test_path=$(find_test_case_by_name "$TESTNAME") +[ -n "$test_path" ] || test_path="$SCRIPT_DIR" + +RES_FILE="$test_path/${TESTNAME}.res" +OUT_DIR="${OUT_DIR:-$test_path/logs_${TESTNAME}}" +RESULT_TXT="${RESULT_TXT:-$OUT_DIR/result.txt}" + +INTERVAL="${INTERVAL:-1000}" +THREADS="${THREADS:-1}" +DURATION="${DURATION:-5m}" +BACKGROUND_CMD="${BACKGROUND_CMD:-}" +ITERATIONS="${ITERATIONS:-1}" +PRIO="${PRIO:-98}" +QUIET="${QUIET:-true}" +MLOCKALL="${MLOCKALL:-true}" +SMP="${SMP:-false}" +BINARY="${BINARY:-}" +VERBOSE="${VERBOSE:-0}" +PROGRESS_EVERY="${PROGRESS_EVERY:-1}" +HEARTBEAT_SEC="${HEARTBEAT_SEC:-10}" + +usage() { + cat <<EOF +Usage: $0 [OPTIONS] + --out DIR + --result FILE + --interval N + --threads N + --duration STR + --iterations N + --background-cmd CMD + --binary PATH + --progress-every N + --heartbeat-sec N + --verbose +EOF +} + +while [ "$#" -gt 0 ]; do + case "$1" in + -h|--help) + usage + exit 0 + ;; + --out) + shift + OUT_DIR="$1" + ;; + --result) + shift + RESULT_TXT="$1" + ;; + --interval) + shift + INTERVAL="$1" + ;; + --threads) + shift + THREADS="$1" + ;; + --duration) + shift + DURATION="$1" + ;; + --iterations) + shift + ITERATIONS="$1" + ;; + --background-cmd) + shift + BACKGROUND_CMD="$1" + ;; + --binary) + shift + BINARY="$1" + ;; + --progress-every) + shift + PROGRESS_EVERY="$1" + ;; + --heartbeat-sec) + shift + HEARTBEAT_SEC="$1" + ;; + --verbose) + VERBOSE=1 + ;; + *) + log_warn "Unknown option: $1" + usage + echo "$TESTNAME FAIL" >"$RES_FILE" + exit 0 + ;; + esac + shift +done + +LOG_PREFIX="$OUT_DIR/cyclictest" +TMP_ONE="$OUT_DIR/tmp_result_one.txt" +ITER_KPI="$OUT_DIR/iter_kpi.txt" +AGG_KPI="$OUT_DIR/agg_kpi.txt" +THREAD_AGG_KPI="$OUT_DIR/thread_agg_kpi.txt" + +rt_prepare_output_layout \ + "$OUT_DIR" \ + "$RESULT_TXT" \ + "$TMP_ONE" \ + "$ITER_KPI" \ + "$AGG_KPI" \ + "$THREAD_AGG_KPI" + +rt_check_clock_sanity "$TESTNAME" || true + +log_info "------------------- Starting $TESTNAME -------------------" +log_info "$TESTNAME: Checking for the tools required to run cyclictest" + +if ! rt_require_common_tools uname awk sed grep tr head tail mkdir cat sh sleep kill date sort wc; then + log_skip "$TESTNAME: basic tools missing" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if ! rt_require_json_helpers; then + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_normalize_common_params + +CYCLICTEST_BIN=$(rt_resolve_binary cyclictest "$BINARY" 2>/dev/null || echo "") +if [ -z "$CYCLICTEST_BIN" ] || [ ! -x "$CYCLICTEST_BIN" ]; then + log_skip "$TESTNAME: cyclictest binary not found/executable (${CYCLICTEST_BIN:-none})" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_log_common_runtime_env "$TESTNAME" "$CYCLICTEST_BIN" +log_info "$TESTNAME: iterations=$ITERATIONS duration=$DURATION interval=$INTERVAL threads=$THREADS prio=$PRIO" +log_info "$TESTNAME: heartbeat=$HEARTBEAT_SEC seconds" + +RT_INTERRUPTED=0 +export RT_INTERRUPTED + +trap 'rt_handle_int; perf_rt_bg_stop >/dev/null 2>&1 || true' INT TERM +trap 'perf_rt_bg_stop >/dev/null 2>&1 || true' EXIT + +perf_rt_bg_start "$TESTNAME" "$BACKGROUND_CMD" + +overall_fail=0 + +i=1 +while [ "$i" -le "$ITERATIONS" ] 2>/dev/null; do + rt_log_iteration_progress "$TESTNAME" "$i" "$ITERATIONS" "$PROGRESS_EVERY" + + jsonfile="${LOG_PREFIX}-${i}.json" + stdoutlog="${OUT_DIR}/cyclictest_stdout_iter${i}.log" + + set -- "$CYCLICTEST_BIN" + case "$QUIET" in + true|TRUE|1|yes|YES) + set -- "$@" -q + ;; + esac + case "$MLOCKALL" in + true|TRUE|1|yes|YES) + set -- "$@" -m + ;; + esac + case "$SMP" in + true|TRUE|1|yes|YES) + set -- "$@" -S + ;; + esac + set -- "$@" -p "$PRIO" -i "$INTERVAL" -t "$THREADS" -D "$DURATION" --json="$jsonfile" + + if rt_run_json_iteration "$TESTNAME" "$HEARTBEAT_SEC" "$stdoutlog" "$jsonfile" "$@"; then + rc=$RT_RUN_RC + else + rc=$RT_RUN_RC + fi + + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: interrupted by user during iteration $i/$ITERATIONS" + break + fi + + if [ "$rc" -ne 0 ] 2>/dev/null; then + log_fail "$TESTNAME: cyclictest exited rc=$rc (iter $i/$ITERATIONS)" + overall_fail=1 + fi + + if [ "${RT_RUN_JSON_OK:-0}" -ne 1 ] 2>/dev/null; then + log_fail "$TESTNAME: missing json output: $jsonfile" + overall_fail=1 + i=$((i + 1)) + continue + fi + + if ! rt_parse_and_append_iteration_kpi "cyclictest" "$jsonfile" "$TMP_ONE" "$ITER_KPI" "$RESULT_TXT" "$i"; then + log_fail "$TESTNAME: failed to parse/store KPI (iter $i/$ITERATIONS): $jsonfile" + overall_fail=1 + fi + + i=$((i + 1)) +done + +perf_rt_bg_stop >/dev/null 2>&1 || true + +rt_emit_kpi_block "$TESTNAME" "per-iteration results" "$ITER_KPI" +rt_emit_aggregate_kpi "$TESTNAME" "cyclictest" "$ITER_KPI" "$AGG_KPI" "$RESULT_TXT" || true +rt_emit_thread_aggregate_kpi "$TESTNAME" "cyclictest" "$ITER_KPI" "$THREAD_AGG_KPI" "$RESULT_TXT" || true + +if rt_kpi_file_has_fail "cyclictest" "$ITER_KPI"; then + overall_fail=1 +fi + +rt_emit_interrupt_aware_result "$TESTNAME" "$RES_FILE" "$RESULT_TXT" "$OUT_DIR" "${RT_INTERRUPTED:-0}" "$overall_fail" +exit 0 diff --git a/Runner/suites/Kernel/RT-tests/PMQTest/PMQTest_README.md b/Runner/suites/Kernel/RT-tests/PMQTest/PMQTest_README.md new file mode 100644 index 00000000..03a9418c --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/PMQTest/PMQTest_README.md @@ -0,0 +1,217 @@ +<!-- +Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +SPDX-License-Identifier: BSD-3-Clause-Clear +--> + +# PMQTest (rt-tests) — Runner integration + +This test runs **pmqtest** (from the `rt-tests` suite) and reports latency KPIs per-thread and as aggregates, following the same conventions used by other RT-tests in `qcom-linux-testkit`. + +The runner is designed to be: +- **POSIX + ShellCheck clean** +- **LAVA-friendly** (writes a `.res` summary, logs to `logs_<TESTNAME>/`, exits 0) +- **Deterministic KPIs** (parses JSON output from pmqtest) + +--- + +## What PMQTest measures + +`pmqtest` is a real-time scheduling latency workload from `rt-tests`. It creates multiple sender/receiver threads and records receiver latencies: + +- **min** latency (µs) +- **avg** latency (µs) +- **max** latency (µs) + +In this runner, KPIs are emitted: +- **Per-thread** (`t0..tN`) for each iteration +- **Aggregated across all threads and iterations** +- **Worst-thread** (thread id with the highest observed `max` latency) + +--- + +## Prerequisites + +### On DUT +- `pmqtest` binary available on the DUT (e.g. from `rt-tests`) +- Root privileges recommended (RT priority / memlock / scheduling) +- Tools (typical): `awk`, `sed`, `grep`, `tr`, `head`, `date`, `uname`, `nproc` + +### Recommended kernel/runtime settings +- PREEMPT / RT kernel for meaningful comparison +- `sched_rt_runtime_us` configured appropriately +- `ulimit -l` (memlock) sufficiently high (runner prints current values) + +> Note: The runner may warn if the kernel does not look RT-enabled. This warning does **not** fail the test. + +--- + +## How it runs + +For each iteration, the runner executes: + +1. `pmqtest` with JSON output enabled (one JSON per iteration) +2. Parses thread metrics from the JSON and emits standardized KPI lines +3. Computes aggregates (min/mean/max) across all observed threads and iterations + +--- + +## Usage + +From the PMQTest directory: + +```sh +./run.sh --binary /tmp/pmqtest --duration 1m --iterations 3 +``` + +### Arguments + +| Option | Description | Default | +|---|---|---| +| `--binary <path>` | Path to pmqtest binary | required (or runner default if set) | +| `--duration <time>` | Test duration per iteration (e.g. `10s`, `1m`) | `1m` | +| `--iterations <N>` | Number of iterations | `1` | +| `--prio <prio>` | RT priority passed to pmqtest | `98` | +| `--extra "<opts>"` | Extra pmqtest args appended (optional) | empty | + +> The runner prints the effective `pmqtest opts:` line before running. + +--- + +## Output files + +All outputs are stored under: + +- `logs_PMQTest/` (or `logs_<TESTNAME>/`) + +Typical files: + +- `pmqtest-<iter>.json` + Raw JSON generated by pmqtest for each iteration. + +- `pmqtest_stdout_iter<iter>.log` + Raw stdout captured from pmqtest. + +- `result.txt` + Standardized KPI lines emitted by the runner (per-iteration + aggregate). + +- `iter_kpi.txt`, `agg_kpi.txt` (if present) + Split KPI files for CI consumption. + +- `<TESTNAME>.res` + Single-line PASS/FAIL/SKIP summary for LAVA gating. + +--- + +## KPI format + +### Per-iteration, per-thread KPIs +The runner emits (example for iteration 1): + +```text +iteration-1-t0-min-latency pass 4 us +iteration-1-t0-avg-latency pass 7.80 us +iteration-1-t0-max-latency pass 268 us +... +iteration-1-t7-max-latency pass 71 us +iteration-1-pmqtest-ok pass 1 ok +iteration-1-pmqtest-rc pass 0 rc +iteration-1-pmqtest pass +``` + +Thread IDs (`t0..tN`) come from the pmqtest JSON `"thread"` map. On an 8-CPU DUT you typically see `t0..t7`, but the runner will emit **whatever thread count** appears in JSON. + +### Aggregate KPIs (across all threads and iterations) +After all iterations, the runner emits: + +```text +pmqtest-all-min-latency-min pass <val> us +pmqtest-all-min-latency-mean pass <val> us +pmqtest-all-min-latency-max pass <val> us + +pmqtest-all-avg-latency-min pass <val> us +pmqtest-all-avg-latency-mean pass <val> us +pmqtest-all-avg-latency-max pass <val> us + +pmqtest-all-max-latency-min pass <val> us +pmqtest-all-max-latency-mean pass <val> us +pmqtest-all-max-latency-max pass <val> us + +pmqtest-worst-thread-max-latency pass <val> us +pmqtest-worst-thread-id pass <tid> id +``` + +**Important:** These aggregates are computed from the parsed per-thread lines (`t0..tN`) across all iterations, so `worst-thread-*` truly reflects the maximum `tN-max-latency` observed across **all** threads and iterations. + +--- + +## PASS / FAIL / SKIP semantics + +- **PASS** + - `pmqtest` return code is `0` for all iterations + - Parsing succeeded and KPIs were emitted + +- **FAIL** + - Any iteration return code is non-zero + - Or required artifacts are missing + +- **SKIP** + - Missing dependencies or binary not found/usable + +The runner always writes: +- `.res` summary +- detailed logs under `logs_<TESTNAME>/` + +--- + +## LAVA integration notes + +Typical LAVA test step (example): + +```yaml +- test: + timeout: + minutes: 10 + definitions: + - repository: https://github.com/qualcomm-linux/qcom-linux-testkit.git + from: git + path: Runner/suites/Kernel/RT-tests/PMQTest + name: pmqtest + parameters: + binary: /tmp/pmqtest + duration: 1m + iterations: 3 +``` + +(Adjust to your lab conventions for deploying `pmqtest` and selecting args.) + +--- + +## Troubleshooting + +### Verify aggregates are based on t0..tN +If your per-iteration section shows `t0..t7` (or `t0..tN`) lines, then aggregates and `worst-thread-*` are derived from them. + +Quick check: +- Identify the largest `iteration-*-tX-max-latency` and confirm it matches: + - `pmqtest-all-max-latency-max` + - `pmqtest-worst-thread-max-latency` + - and `pmqtest-worst-thread-id` equals that `tX`. + +Example (from your sample): +- `iteration-1-t2-max-latency ... 364 us` +- aggregates show `pmqtest-all-max-latency-max ... 364 us` +- worst thread shows `... 364 us` and `id ... 2` + +This confirms aggregates are computed across all threads. + +### Kernel not RT-enabled warning +This indicates the kernel does not appear fully RT. It does not fail the test, but latencies may be worse. Use it as a signal when comparing platforms/kernels. + +### Interpreting "worst thread" +Worst thread id is the thread with the maximum observed `tN-max-latency` across all iterations. + +--- + +## Maintainers +- Qualcomm Linux Testkit team +- RT-tests suite: upstream `rt-tests` project (pmqtest) diff --git a/Runner/suites/Kernel/RT-tests/PMQTest/pmqtest.yaml b/Runner/suites/Kernel/RT-tests/PMQTest/pmqtest.yaml new file mode 100755 index 00000000..752938e9 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/PMQTest/pmqtest.yaml @@ -0,0 +1,31 @@ +metadata: + name: PMQTest + format: "Lava-Test Test Definition 1.0" + description: "Run rt-tests pmqtest (POSIX message queue latency) in JSON mode and parse results without requiring python3." + os: + - linux + scope: + - performance + - preempt-rt + +params: + DURATION: "2m" + BACKGROUND_CMD: "" + ITERATIONS: "1" + + PRIO: "98" + MODE_S: "true" + QUIET: "true" + + BINARY: "" + OUT_DIR: "./logs_PMQTest" + VERBOSE: "0" + PROGRESS_EVERY: "1" + HEARTBEAT_SEC: "10" + +run: + steps: + - REPO_PATH=$PWD + - cd Runner/suites/Kernel/RT-tests/PMQTest + - PRIO="${PRIO}" MODE_S="${MODE_S}" QUIET="${QUIET}" ./run.sh --duration "${DURATION}" --iterations "${ITERATIONS}" --background-cmd "${BACKGROUND_CMD}" --binary "${BINARY}" --out "${OUT_DIR}" --progress-every "${PROGRESS_EVERY}" --heartbeat-sec "${HEARTBEAT_SEC}" $( [ "${VERBOSE}" = "1" ] && echo "--verbose" ) || true + - $REPO_PATH/Runner/utils/send-to-lava.sh PMQTest.res diff --git a/Runner/suites/Kernel/RT-tests/PMQTest/run.sh b/Runner/suites/Kernel/RT-tests/PMQTest/run.sh new file mode 100755 index 00000000..b0ae2471 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/PMQTest/run.sh @@ -0,0 +1,249 @@ +#!/bin/sh +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# PMQTest wrapper for qcom-linux-testkit +# - Runs rt-tests pmqtest ITERATIONS times (JSON output) +# - Parses KPI using lib_rt.sh (no python required) +# - Emits KPI lines to result.txt and summary PASS/FAIL/SKIP to PMQTest.res +# +# Notes: +# - Always exits 0 (LAVA-friendly). Use PMQTest.res for gating. + +SCRIPT_DIR="$( + cd "$(dirname "$0")" || exit 1 + pwd +)" + +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "${__INIT_ENV_LOADED:-}" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" + __INIT_ENV_LOADED=1 +fi + +# shellcheck disable=SC1091 +. "$TOOLS/functestlib.sh" +# shellcheck disable=SC1091 +. "$TOOLS/lib_rt.sh" + +TESTNAME="PMQTest" +test_path=$(find_test_case_by_name "$TESTNAME") +[ -n "$test_path" ] || test_path="$SCRIPT_DIR" + +RES_FILE="$test_path/${TESTNAME}.res" +OUT_DIR="${OUT_DIR:-$test_path/logs_${TESTNAME}}" +RESULT_TXT="${RESULT_TXT:-$OUT_DIR/result.txt}" + +DURATION="${DURATION:-5m}" +BACKGROUND_CMD="${BACKGROUND_CMD:-}" +ITERATIONS="${ITERATIONS:-1}" +PRIO="${PRIO:-98}" +MODE_S="${MODE_S:-true}" +QUIET="${QUIET:-true}" +BINARY="${BINARY:-}" +VERBOSE="${VERBOSE:-0}" +PROGRESS_EVERY="${PROGRESS_EVERY:-1}" +HEARTBEAT_SEC="${HEARTBEAT_SEC:-10}" + +usage() { + cat <<EOF +Usage: $0 [OPTIONS] + +Options: + --out DIR + --result FILE + --duration STR + --iterations N + --background-cmd CMD + --binary PATH + --progress-every N + --heartbeat-sec N + --verbose +EOF +} + +while [ "$#" -gt 0 ]; do + case "$1" in + -h|--help) + usage + exit 0 + ;; + --out) + shift + OUT_DIR="$1" + ;; + --result) + shift + RESULT_TXT="$1" + ;; + --duration) + shift + DURATION="$1" + ;; + --iterations) + shift + ITERATIONS="$1" + ;; + --background-cmd) + shift + BACKGROUND_CMD="$1" + ;; + --binary) + shift + BINARY="$1" + ;; + --progress-every) + shift + PROGRESS_EVERY="$1" + ;; + --heartbeat-sec) + shift + HEARTBEAT_SEC="$1" + ;; + --verbose) + VERBOSE=1 + ;; + *) + log_warn "Unknown option: $1" + usage + echo "$TESTNAME FAIL" >"$RES_FILE" + exit 0 + ;; + esac + shift +done + +LOG_PREFIX="$OUT_DIR/pmqtest" +TMP_ONE="$OUT_DIR/tmp_result_one.txt" +ITER_KPI="$OUT_DIR/iter_kpi.txt" +AGG_KPI="$OUT_DIR/agg_kpi.txt" +THREAD_AGG_KPI="$OUT_DIR/thread_agg_kpi.txt" + +rt_prepare_output_layout \ + "$OUT_DIR" \ + "$RESULT_TXT" \ + "$TMP_ONE" \ + "$ITER_KPI" \ + "$AGG_KPI" \ + "$THREAD_AGG_KPI" + +rt_check_clock_sanity "$TESTNAME" || true + +log_info "------------------- Starting $TESTNAME -------------------" +log_info "$TESTNAME: Checking for the tools required to run pmqtest" + +if ! rt_require_common_tools uname awk sed grep tr head tail mkdir cat sh sleep kill date sort wc; then + log_skip "$TESTNAME: basic tools missing" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if ! rt_require_json_helpers; then + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_normalize_common_params + +PMQ_BIN=$(rt_resolve_binary pmqtest "$BINARY" 2>/dev/null || echo "") +if [ -z "$PMQ_BIN" ] || [ ! -x "$PMQ_BIN" ]; then + log_skip "$TESTNAME: pmqtest binary not found/executable (${PMQ_BIN:-none})" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_log_common_runtime_env "$TESTNAME" "$PMQ_BIN" +log_info "$TESTNAME: iterations=$ITERATIONS duration=$DURATION prio=$PRIO" +log_info "$TESTNAME: heartbeat=$HEARTBEAT_SEC seconds" + +RT_INTERRUPTED=0 +export RT_INTERRUPTED + +trap 'rt_handle_int; perf_rt_bg_stop >/dev/null 2>&1 || true' INT TERM +trap 'perf_rt_bg_stop >/dev/null 2>&1 || true' EXIT + +perf_rt_bg_start "$TESTNAME" "$BACKGROUND_CMD" + +overall_fail=0 + +i=1 +while [ "$i" -le "$ITERATIONS" ] 2>/dev/null; do + rt_log_iteration_progress "$TESTNAME" "$i" "$ITERATIONS" "$PROGRESS_EVERY" + + jsonfile="${LOG_PREFIX}-${i}.json" + stdoutlog="${OUT_DIR}/pmqtest_stdout_iter${i}.log" + + set -- "$PMQ_BIN" + + case "$QUIET" in + true|TRUE|1|yes|YES) + set -- "$@" -q + ;; + esac + + case "$MODE_S" in + true|TRUE|1|yes|YES) + set -- "$@" -S + ;; + esac + + set -- "$@" -p "$PRIO" -D "$DURATION" --json="$jsonfile" + + if rt_run_json_iteration "$TESTNAME" "$HEARTBEAT_SEC" "$stdoutlog" "$jsonfile" "$@"; then + rc=$RT_RUN_RC + else + rc=$RT_RUN_RC + fi + + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: interrupted by user during iteration $i/$ITERATIONS" + break + fi + + if [ "$rc" -ne 0 ] 2>/dev/null; then + log_fail "$TESTNAME: pmqtest exited rc=$rc (iter $i/$ITERATIONS)" + overall_fail=1 + fi + + if [ "${RT_RUN_JSON_OK:-0}" -ne 1 ] 2>/dev/null; then + log_fail "$TESTNAME: missing json output: $jsonfile" + overall_fail=1 + i=$((i + 1)) + continue + fi + + if ! rt_parse_and_append_iteration_kpi "pmqtest" "$jsonfile" "$TMP_ONE" "$ITER_KPI" "$RESULT_TXT" "$i"; then + log_fail "$TESTNAME: failed to parse/store KPI (iter $i/$ITERATIONS): $jsonfile" + overall_fail=1 + fi + + i=$((i + 1)) +done + +perf_rt_bg_stop >/dev/null 2>&1 || true + +rt_emit_kpi_block "$TESTNAME" "per-iteration results" "$ITER_KPI" +rt_emit_aggregate_kpi "$TESTNAME" "pmqtest" "$ITER_KPI" "$AGG_KPI" "$RESULT_TXT" || true +rt_emit_thread_aggregate_kpi "$TESTNAME" "pmqtest" "$ITER_KPI" "$THREAD_AGG_KPI" "$RESULT_TXT" || true + +if rt_kpi_file_has_fail "pmqtest" "$ITER_KPI"; then + overall_fail=1 +fi + +rt_emit_interrupt_aware_result "$TESTNAME" "$RES_FILE" "$RESULT_TXT" "$OUT_DIR" "${RT_INTERRUPTED:-0}" "$overall_fail" +exit 0 diff --git a/Runner/suites/Kernel/RT-tests/PTSEMATest/PTSEMATest_README.md b/Runner/suites/Kernel/RT-tests/PTSEMATest/PTSEMATest_README.md new file mode 100644 index 00000000..ef44bcd5 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/PTSEMATest/PTSEMATest_README.md @@ -0,0 +1,196 @@ +# PTSEMATest (rt-tests) — qcom-linux-testkit + +PTSEMATest is a wrapper around **rt-tests `ptsematest`** to measure POSIX threads synchronization latency (threads synchronized using POSIX primitives, reported per-thread min/avg/max latencies). + +This test case is integrated into **qcom-linux-testkit** with a consistent flow used across other RT-tests: + +- Runs `ptsematest` in **JSON** mode (`--json=<file>`) +- Parses KPI using `Runner/utils/lib_rt.sh` (**no python required**) +- Writes detailed KPI lines to `result.txt` +- Writes a **PASS/FAIL/SKIP** summary to `PTSEMATest.res` +- Always exits `0` (LAVA-friendly). Use `PTSEMATest.res` for gating. + +--- + +## Location + +``` +Runner/suites/Kernel/RT-tests/PTSEMATest/ +``` + +--- + +## Prerequisites + +- Must run as **root** +- `ptsematest` binary must be present and executable: + - Either available in `PATH` (e.g. provided by rt-tests package), or + - Provided explicitly via `--binary <path>` +- `init_env` must exist somewhere above the test directory so the runner can load: + - `Runner/utils/functestlib.sh` + - `Runner/utils/lib_rt.sh` + +The script performs dependency checks for basic tools (e.g. `uname`, `awk`, `grep`, `tr`, `tee`, etc.). If required pieces are missing, the test will **SKIP**. + +--- + +## What gets measured + +For each iteration, the test produces per-thread latency KPIs emitted by `perf_parse_rt_tests_json` (from `lib_rt.sh`): + +- `t<tid>-min-latency pass <N> us` +- `t<tid>-avg-latency pass <N> us` +- `t<tid>-max-latency pass <N> us` +- `ptsematest-ok pass 1 ok` (or `0 ok` on failure) +- `ptsematest-rc pass <rc> rc` +- `ptsematest pass|fail` + +The wrapper prefixes iteration KPI lines like: + +- `iteration-1-t0-max-latency pass 268 us` +- `iteration-2-t7-avg-latency pass 2.47 us` +- ... + +Aggregate KPIs are computed from **ALL threads across ALL iterations** using `rt_aggregate_iter_latencies` (from `lib_rt.sh`), for example: + +- `ptsematest-all-max-latency-min pass ... us` +- `ptsematest-all-max-latency-mean pass ... us` +- `ptsematest-all-max-latency-max pass ... us` +- `ptsematest-worst-thread-max-latency pass ... us` +- `ptsematest-worst-thread-id pass ... id` + +--- + +## Running locally + +### Examples + +Run `ptsematest` for 1 minute, 2 iterations, with explicit binary: + +```sh +cd Runner/suites/Kernel/RT-tests/PTSEMATest +./run.sh --binary /tmp/ptsematest --duration 1m --iterations 2 +``` + +Add an optional background workload (example only): + +```sh +./run.sh --duration 2m --iterations 3 --background-cmd "stress-ng --cpu 4 --timeout 2m" +``` + +### CLI options + +`run.sh` supports the same parameter flow as other RT-tests wrappers: + +- `--duration <STR>` : Duration passed to `ptsematest -D` (default: `5m`) +- `--iterations <N>` : Number of iterations (default: `1`) +- `--background-cmd <CMD>` : Optional background workload during measurement +- `--binary <PATH>` : Explicit `ptsematest` binary path +- `--out <DIR>` : Output directory (default: `./logs_PTSEMATest` under the test directory) +- `--result <FILE>` : Output KPI file (default: `<OUT_DIR>/result.txt`) +- `--prio <N>` : RT priority (default: `98`) +- `--mode-s <true|false>` : Include `-S` (default: `true`) +- `--quiet <true|false>` : Include `-q` (default: `true`) +- `--verbose` : Additional logs +- `-h|--help` : Help + +> Note: The wrapper is designed to be POSIX/ShellCheck-friendly and consistent with other RT-tests in this repo. + +--- + +## Output files + +By default outputs are written under: + +``` +Runner/suites/Kernel/RT-tests/PTSEMATest/logs_PTSEMATest/ +``` + +Typical outputs: + +- `PTSEMATest.res` + Summary line for gating: + - `PTSEMATest PASS` + - `PTSEMATest FAIL` + - `PTSEMATest SKIP` + +- `logs_PTSEMATest/result.txt` + KPI lines for LAVA artifact capture / debugging. + +- `logs_PTSEMATest/ptsematest-<iter>.json` + Raw JSON from `ptsematest` for each iteration. + +- `logs_PTSEMATest/ptsematest_stdout_iter<iter>.log` + Console/stdout log captured per iteration. + +--- + +## LAVA YAML (test definition) + +Use the standardized RT-tests YAML flow used across this repo: + +```yaml +metadata: + name: ptsematest + format: "Lava-Test Test Definition 1.0" + description: "Run rt-tests ptsematest in JSON mode and parse results without requiring python3." + os: + - linux + scope: + - performance + - preempt-rt + +params: + DURATION: "5m" + BACKGROUND_CMD: "" + ITERATIONS: "1" + + PRIO: "98" + MODE_S: "true" + QUIET: "true" + + BINARY: "" + OUT_DIR: "./logs_PTSEMATest" + +run: + steps: + - REPO_PATH=$PWD + - cd Runner/suites/Kernel/RT-tests/PTSEMATest + - ./run.sh --duration "${DURATION}" --iterations "${ITERATIONS}" --background-cmd "${BACKGROUND_CMD}" --prio "${PRIO}" --mode-s "${MODE_S}" --quiet "${QUIET}" --binary "${BINARY}" --out "${OUT_DIR}" || true + - $REPO_PATH/Runner/utils/send-to-lava.sh PTSEMATest.res +``` + +--- + +## Troubleshooting + +### Clock looks invalid (1970) + +RT latency numbers can be misleading if the system time starts at epoch. The runner uses `ensure_reasonable_clock` (if available) and may seed time from kernel build time when no network is available. + +If your environment has NTP or RTC issues, fix them before relying on performance baselines. + +### Binary not found + +- Ensure `ptsematest` is installed and in `PATH`, or +- Provide an explicit path: `--binary /tmp/ptsematest` + +### Result says SKIP + +Common causes: +- Missing dependencies (basic shell tools) +- `lib_rt.sh` not loaded via `init_env` +- Binary not executable + +Check console logs for the SKIP reason and confirm `init_env` + `Runner/utils` are present. + +--- + +## Notes for CI + +- The runner always exits `0`. CI/LAVA should gate on: + - `Runner/suites/Kernel/RT-tests/PTSEMATest/PTSEMATest.res` +- For deeper analysis, archive: + - `logs_PTSEMATest/result.txt` + - `logs_PTSEMATest/*.json` + - `logs_PTSEMATest/*stdout*` diff --git a/Runner/suites/Kernel/RT-tests/PTSEMATest/ptsematest.yaml b/Runner/suites/Kernel/RT-tests/PTSEMATest/ptsematest.yaml new file mode 100644 index 00000000..85c93edc --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/PTSEMATest/ptsematest.yaml @@ -0,0 +1,31 @@ +metadata: + name: PTSEMATest + format: "Lava-Test Test Definition 1.0" + description: "Run rt-tests ptsematest (POSIX threads semaphore/mutex latency) in JSON mode and parse results without requiring python3." + os: + - linux + scope: + - performance + - preempt-rt + +params: + DURATION: "2m" + BACKGROUND_CMD: "" + ITERATIONS: "1" + + PRIO: "98" + MODE_S: "true" + QUIET: "true" + + BINARY: "" + OUT_DIR: "./logs_PTSEMATest" + VERBOSE: "0" + PROGRESS_EVERY: "1" + HEARTBEAT_SEC: "10" + +run: + steps: + - REPO_PATH=$PWD + - cd Runner/suites/Kernel/RT-tests/PTSEMATest + - PRIO="${PRIO}" MODE_S="${MODE_S}" QUIET="${QUIET}" ./run.sh --duration "${DURATION}" --iterations "${ITERATIONS}" --background-cmd "${BACKGROUND_CMD}" --binary "${BINARY}" --out "${OUT_DIR}" --progress-every "${PROGRESS_EVERY}" --heartbeat-sec "${HEARTBEAT_SEC}" $( [ "${VERBOSE}" = "1" ] && echo "--verbose" ) || true + - $REPO_PATH/Runner/utils/send-to-lava.sh PTSEMATest.res diff --git a/Runner/suites/Kernel/RT-tests/PTSEMATest/run.sh b/Runner/suites/Kernel/RT-tests/PTSEMATest/run.sh new file mode 100755 index 00000000..14878626 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/PTSEMATest/run.sh @@ -0,0 +1,245 @@ +#!/bin/sh +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# PTSEMATest wrapper for qcom-linux-testkit +# - Runs rt-tests ptsematest ITERATIONS times (JSON output) +# - Parses KPI using lib_rt.sh (no python required) +# - Emits KPI lines to result.txt and summary PASS/FAIL/SKIP to PTSEMATest.res +# +# Notes: +# - Always exits 0 (LAVA-friendly). Use PTSEMATest.res for gating. + +SCRIPT_DIR="$( + cd "$(dirname "$0")" || exit 1 + pwd +)" + +INIT_ENV="" +SEARCH="$SCRIPT_DIR" + +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "${__INIT_ENV_LOADED:-}" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" + __INIT_ENV_LOADED=1 +fi + +# shellcheck disable=SC1091 +. "$TOOLS/functestlib.sh" +# shellcheck disable=SC1091 +. "$TOOLS/lib_rt.sh" + +TESTNAME="PTSEMATest" +test_path=$(find_test_case_by_name "$TESTNAME") +[ -n "$test_path" ] || test_path="$SCRIPT_DIR" + +RES_FILE="$test_path/${TESTNAME}.res" +OUT_DIR="${OUT_DIR:-$test_path/logs_${TESTNAME}}" +RESULT_TXT="${RESULT_TXT:-$OUT_DIR/result.txt}" + +DURATION="${DURATION:-5m}" +BACKGROUND_CMD="${BACKGROUND_CMD:-}" +ITERATIONS="${ITERATIONS:-1}" +PRIO="${PRIO:-98}" +MODE_S="${MODE_S:-true}" +QUIET="${QUIET:-true}" +BINARY="${BINARY:-}" +VERBOSE="${VERBOSE:-0}" +PROGRESS_EVERY="${PROGRESS_EVERY:-1}" +HEARTBEAT_SEC="${HEARTBEAT_SEC:-10}" + +usage() { + cat <<EOF +Usage: $0 [OPTIONS] + --out DIR + --result FILE + --duration STR + --iterations N + --background-cmd CMD + --binary PATH + --progress-every N + --heartbeat-sec N + --verbose +EOF +} + +while [ "$#" -gt 0 ]; do + case "$1" in + -h|--help) + usage + exit 0 + ;; + --out) + shift + OUT_DIR="$1" + ;; + --result) + shift + RESULT_TXT="$1" + ;; + --duration) + shift + DURATION="$1" + ;; + --iterations) + shift + ITERATIONS="$1" + ;; + --background-cmd) + shift + BACKGROUND_CMD="$1" + ;; + --binary) + shift + BINARY="$1" + ;; + --progress-every) + shift + PROGRESS_EVERY="$1" + ;; + --heartbeat-sec) + shift + HEARTBEAT_SEC="$1" + ;; + --verbose) + VERBOSE=1 + ;; + *) + log_warn "Unknown option: $1" + usage + echo "$TESTNAME FAIL" >"$RES_FILE" + exit 0 + ;; + esac + shift +done + +LOG_PREFIX="$OUT_DIR/ptsematest" +TMP_ONE="$OUT_DIR/tmp_result_one.txt" +ITER_KPI="$OUT_DIR/iter_kpi.txt" +AGG_KPI="$OUT_DIR/agg_kpi.txt" + +rt_prepare_output_layout \ + "$OUT_DIR" \ + "$RESULT_TXT" \ + "$TMP_ONE" \ + "$ITER_KPI" \ + "$AGG_KPI" + +rt_check_clock_sanity "$TESTNAME" || true + +log_info "------------------- Starting $TESTNAME -------------------" +log_info "$TESTNAME: Checking for the tools required to run ptsematest" + +if ! rt_require_common_tools uname awk sed grep tr head tail mkdir cat sh sleep kill date sort wc; then + log_skip "$TESTNAME: basic tools missing" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if ! rt_require_json_helpers; then + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_normalize_common_params + +PSEMA_BIN=$(rt_resolve_binary ptsematest "$BINARY" 2>/dev/null || echo "") +if [ -z "$PSEMA_BIN" ] || [ ! -x "$PSEMA_BIN" ]; then + log_skip "$TESTNAME: ptsematest binary not found/executable (${PSEMA_BIN:-none})" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_log_common_runtime_env "$TESTNAME" "$PSEMA_BIN" +log_info "$TESTNAME: iterations=$ITERATIONS duration=$DURATION prio=$PRIO" +log_info "$TESTNAME: heartbeat=$HEARTBEAT_SEC seconds" + +RT_INTERRUPTED=0 +export RT_INTERRUPTED + +trap 'rt_handle_int; perf_rt_bg_stop >/dev/null 2>&1 || true' INT TERM +trap 'perf_rt_bg_stop >/dev/null 2>&1 || true' EXIT + +perf_rt_bg_start "$TESTNAME" "$BACKGROUND_CMD" + +overall_fail=0 + +i=1 +while [ "$i" -le "$ITERATIONS" ] 2>/dev/null; do + rt_log_iteration_progress "$TESTNAME" "$i" "$ITERATIONS" "$PROGRESS_EVERY" + + jsonfile="${LOG_PREFIX}-${i}.json" + stdoutlog="${OUT_DIR}/ptsematest_stdout_iter${i}.log" + + set -- "$PSEMA_BIN" + + case "$QUIET" in + true|TRUE|1|yes|YES) + set -- "$@" -q + ;; + esac + + case "$MODE_S" in + true|TRUE|1|yes|YES) + set -- "$@" -S + ;; + esac + + set -- "$@" -p "$PRIO" -D "$DURATION" --json="$jsonfile" + + if rt_run_json_iteration "$TESTNAME" "$HEARTBEAT_SEC" "$stdoutlog" "$jsonfile" "$@"; then + rc=$RT_RUN_RC + else + rc=$RT_RUN_RC + fi + + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: interrupted by user during iteration $i/$ITERATIONS" + break + fi + + if [ "$rc" -ne 0 ] 2>/dev/null; then + log_fail "$TESTNAME: ptsematest exited rc=$rc (iter $i/$ITERATIONS)" + overall_fail=1 + fi + + if [ "${RT_RUN_JSON_OK:-0}" -ne 1 ] 2>/dev/null; then + log_fail "$TESTNAME: missing json output: $jsonfile" + overall_fail=1 + i=$((i + 1)) + continue + fi + + if ! rt_parse_and_append_iteration_kpi "ptsematest" "$jsonfile" "$TMP_ONE" "$ITER_KPI" "$RESULT_TXT" "$i"; then + log_fail "$TESTNAME: failed to parse/store KPI (iter $i/$ITERATIONS): $jsonfile" + overall_fail=1 + fi + + i=$((i + 1)) +done + +perf_rt_bg_stop >/dev/null 2>&1 || true + +rt_emit_kpi_block "$TESTNAME" "per-iteration results" "$ITER_KPI" +rt_emit_aggregate_kpi "$TESTNAME" "ptsematest" "$ITER_KPI" "$AGG_KPI" "$RESULT_TXT" || true + +if rt_kpi_file_has_fail "ptsematest" "$ITER_KPI"; then + overall_fail=1 +fi + +rt_emit_interrupt_aware_result "$TESTNAME" "$RES_FILE" "$RESULT_TXT" "$OUT_DIR" "${RT_INTERRUPTED:-0}" "$overall_fail" +exit 0 diff --git a/Runner/suites/Kernel/RT-tests/RTMigrateTest/RTMigrateTest_README.md b/Runner/suites/Kernel/RT-tests/RTMigrateTest/RTMigrateTest_README.md new file mode 100644 index 00000000..2eb34f5f --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/RTMigrateTest/RTMigrateTest_README.md @@ -0,0 +1,157 @@ +# RTMigrateTest + +`RTMigrateTest` wraps the **rt-tests** `rt-migrate-test` workload in the **qcom-linux-testkit** RT-tests framework. + +It follows the same single-flow style as other RT wrappers (e.g., **PMQTest**, **PTSEMATest**): + +- Locates `init_env` from the repository root +- Sources `functestlib.sh` and `lib_rt.sh` +- Runs `rt-migrate-test` in JSON mode for `ITERATIONS` +- Parses KPI lines via `perf_parse_rt_tests_json` (no python required) +- Produces: + - `logs_RTMigrateTest/result.txt` (all KPI lines, per-iteration + aggregate) + - `RTMigrateTest.res` (PASS/FAIL/SKIP summary for LAVA gating) +- Always exits `0` (LAVA-friendly). Use the `.res` file to gate. + +--- + +## Location + +``` +Runner/suites/Kernel/RT-tests/RTMigrateTest/ +├── run.sh +├── RTMigrateTest.yaml +└── README.md +``` + +--- + +## What it measures + +`rt-migrate-test` verifies **RT thread scheduler balancing** and migration behavior under RT scheduling. The wrapper extracts per-thread latency metrics (when present in the JSON) and aggregates them across threads and iterations. + +Typical KPIs (examples): + +- `t<id>-min-latency pass <val> us` +- `t<id>-avg-latency pass <val> us` +- `t<id>-max-latency pass <val> us` + +Aggregate KPIs: + +- `<prefix>-all-min-latency-{min,mean,max} pass <val> us` +- `<prefix>-all-avg-latency-{min,mean,max} pass <val> us` +- `<prefix>-all-max-latency-{min,mean,max} pass <val> us` +- `<prefix>-worst-thread-max-latency pass <val> us` +- `<prefix>-worst-thread-id pass <tid> id` + +> Note: The exact KPI set depends on the JSON schema emitted by your `rt-migrate-test` build. The wrapper is compatible with the same thread-style JSON format used by other rt-tests workloads. + +--- + +## Requirements + +### Runtime requirements + +- Root access (RT scheduling + priority usage) +- `rt-migrate-test` binary available on the target + +### Framework requirements + +- `Runner/init_env` +- `Runner/utils/functestlib.sh` +- `Runner/utils/lib_rt.sh` (must provide `perf_parse_rt_tests_json` and `rt_aggregate_iter_latencies`) + +--- + +## Running locally + +From the test directory: + +```sh +cd Runner/suites/Kernel/RT-tests/RTMigrateTest +./run.sh +``` + +### Common examples + +Run for 1 minute, 3 iterations: + +```sh +./run.sh --duration 1m --iterations 3 +``` + +Run with a background workload (example): + +```sh +./run.sh --duration 2m --iterations 2 --background-cmd "stress-ng --cpu 4 --timeout 120s" +``` + +Use an explicit binary path: + +```sh +./run.sh --binary /tmp/rt-migrate-test --duration 1m +``` + +Override output directory: + +```sh +./run.sh --out ./logs_RTMigrateTest --duration 1m --iterations 2 +``` + +--- + +## Parameters + +Parameters can be provided either via environment variables (LAVA `params`) or via CLI flags. + +| Parameter | Default | Meaning | +|---|---:|---| +| `DURATION` / `--duration` | `5m` | How long each iteration runs (`rt-migrate-test -D`) | +| `ITERATIONS` / `--iterations` | `1` | Number of iterations | +| `BACKGROUND_CMD` / `--background-cmd` | empty | Background workload started during the test | +| `PRIO` / `--prio` | `51` | Lowest thread RT priority (`rt-migrate-test -p`) | +| `QUIET` / `--quiet` | `true` | Add `-q` | +| `MODE_S` / `--mode-s` | `true` | Add `-S` | +| `MODE_C` / `--mode-c` | `true` | Add `-c` | +| `BINARY` / `--binary` | empty | Explicit `rt-migrate-test` path | +| `OUT_DIR` / `--out` | `./logs_RTMigrateTest` | Output directory | + +--- + +## Output files + +- **`${OUT_DIR}/result.txt`** + - Contains all KPI lines: + - per-iteration KPIs (prefixed with `iteration-<N>-`) + - aggregate KPIs (computed across all iterations/threads) + +- **`${OUT_DIR}/rt-migrate-test-<N>.json`** + - JSON output from each iteration + +- **`RTMigrateTest.res`** + - One line summary for LAVA gating: + - `RTMigrateTest PASS` + - `RTMigrateTest FAIL` + - `RTMigrateTest SKIP` + +--- + +## LAVA usage + +Use `RTMigrateTest.yaml` as a test definition (or include its steps in your job). + +The YAML follows the standardized format used by other RT-tests in this repo. + +Key steps: + +1. `cd Runner/suites/Kernel/RT-tests/RTMigrateTest` +2. Run `./run.sh` with parameters +3. Send the `.res` file via `Runner/utils/send-to-lava.sh` + +--- + +## Notes / Troubleshooting + +- If the kernel is not PREEMPT_RT-enabled, results may be worse. The wrapper logs a warning via `rt_log_kernel_rt_status`. +- If `rt-migrate-test` is missing or not executable, the test is marked **SKIP**. +- If any iteration fails or parsing detects a `fail` verdict line, overall result becomes **FAIL** (written to `RTMigrateTest.res`). diff --git a/Runner/suites/Kernel/RT-tests/RTMigrateTest/rt-migrate-test-full.yaml b/Runner/suites/Kernel/RT-tests/RTMigrateTest/rt-migrate-test-full.yaml new file mode 100755 index 00000000..ae0725f7 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/RTMigrateTest/rt-migrate-test-full.yaml @@ -0,0 +1,38 @@ +metadata: + name: RTMigrateTest-full + format: "Lava-Test Test Definition 1.0" + description: "Run rt-tests rt-migrate-test with extended options in JSON mode and parse results without requiring python3." + os: + - linux + scope: + - performance + - preempt-rt + +params: + DURATION: "2m" + BACKGROUND_CMD: "" + ITERATIONS: "1" + + PRIO: "51" + QUIET: "true" + CHECK: "false" + EQUAL: "false" + LOOPS: "" + MAXERR_US: "" + RUN_TIME_MS: "" + SLEEP_TIME_MS: "" + NR_TASKS: "" + + PROGRESS_EVERY: "1" + HEARTBEAT_SEC: "10" + VERBOSE: "0" + + BINARY: "" + OUT_DIR: "./logs_RTMigrateTest" + +run: + steps: + - REPO_PATH=$PWD + - cd Runner/suites/Kernel/RT-tests/RTMigrateTest + - ./run.sh --duration "${DURATION}" --iterations "${ITERATIONS}" --background-cmd "${BACKGROUND_CMD}" --prio "${PRIO}" --quiet "${QUIET}" --check "${CHECK}" --equal "${EQUAL}" --loops "${LOOPS}" --maxerr-us "${MAXERR_US}" --run-time-ms "${RUN_TIME_MS}" --sleep-time-ms "${SLEEP_TIME_MS}" --nr-tasks "${NR_TASKS}" --progress-every "${PROGRESS_EVERY}" --heartbeat-sec "${HEARTBEAT_SEC}" --out "${OUT_DIR}" --binary "${BINARY}" $( [ "${VERBOSE}" = "1" ] && echo "--verbose" ) || true + - $REPO_PATH/Runner/utils/send-to-lava.sh RTMigrateTest.res diff --git a/Runner/suites/Kernel/RT-tests/RTMigrateTest/rt-migrate-test.yaml b/Runner/suites/Kernel/RT-tests/RTMigrateTest/rt-migrate-test.yaml new file mode 100755 index 00000000..196788a5 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/RTMigrateTest/rt-migrate-test.yaml @@ -0,0 +1,32 @@ +metadata: + name: RTMigrateTest + format: "Lava-Test Test Definition 1.0" + description: "Run rt-tests rt-migrate-test in JSON mode and parse results without requiring python3." + os: + - linux + scope: + - performance + - preempt-rt + +params: + DURATION: "2m" + BACKGROUND_CMD: "" + ITERATIONS: "1" + + PRIO: "51" + QUIET: "true" + CHECK: "false" + EQUAL: "false" + + BINARY: "" + OUT_DIR: "./logs_RTMigrateTest" + VERBOSE: "0" + PROGRESS_EVERY: "1" + HEARTBEAT_SEC: "10" + +run: + steps: + - REPO_PATH=$PWD + - cd Runner/suites/Kernel/RT-tests/RTMigrateTest + - ./run.sh --duration "${DURATION}" --iterations "${ITERATIONS}" --background-cmd "${BACKGROUND_CMD}" --prio "${PRIO}" --quiet "${QUIET}" --check "${CHECK}" --equal "${EQUAL}" --binary "${BINARY}" --out "${OUT_DIR}" --progress-every "${PROGRESS_EVERY}" --heartbeat-sec "${HEARTBEAT_SEC}" $( [ "${VERBOSE}" = "1" ] && echo "--verbose" ) || true + - $REPO_PATH/Runner/utils/send-to-lava.sh RTMigrateTest.res diff --git a/Runner/suites/Kernel/RT-tests/RTMigrateTest/run.sh b/Runner/suites/Kernel/RT-tests/RTMigrateTest/run.sh new file mode 100755 index 00000000..3fb98598 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/RTMigrateTest/run.sh @@ -0,0 +1,333 @@ +#!/bin/sh +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# RTMigrateTest wrapper for qcom-linux-testkit +# - Runs rt-tests rt-migrate-test ITERATIONS times (JSON output) +# - Parses KPI using lib_rt.sh (no python required) +# - Emits KPI lines to result.txt and summary PASS/FAIL/SKIP to RTMigrateTest.res +# +# Notes: +# - Always exits 0 (LAVA-friendly). Use RTMigrateTest.res for gating. + +SCRIPT_DIR="$( + cd "$(dirname "$0")" || exit 1 + pwd +)" + +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "${__INIT_ENV_LOADED:-}" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" + __INIT_ENV_LOADED=1 +fi + +# shellcheck disable=SC1091 +. "$TOOLS/functestlib.sh" +# shellcheck disable=SC1091 +. "$TOOLS/lib_rt.sh" + +TESTNAME="RTMigrateTest" +test_path=$(find_test_case_by_name "$TESTNAME") +[ -n "$test_path" ] || test_path="$SCRIPT_DIR" + +RES_FILE="$test_path/${TESTNAME}.res" +OUT_DIR="${OUT_DIR:-$test_path/logs_${TESTNAME}}" +RESULT_TXT="${RESULT_TXT:-$OUT_DIR/result.txt}" + +DURATION="${DURATION:-1m}" +BACKGROUND_CMD="${BACKGROUND_CMD:-}" +ITERATIONS="${ITERATIONS:-1}" +PRIO="${PRIO:-51}" +QUIET="${QUIET:-true}" +CHECK="${CHECK:-false}" +EQUAL="${EQUAL:-false}" +LOOPS="${LOOPS:-}" +MAXERR_US="${MAXERR_US:-}" +RUN_TIME_MS="${RUN_TIME_MS:-}" +SLEEP_TIME_MS="${SLEEP_TIME_MS:-}" +NR_TASKS="${NR_TASKS:-}" +BINARY="${BINARY:-}" +VERBOSE="${VERBOSE:-0}" +PROGRESS_EVERY="${PROGRESS_EVERY:-1}" +HEARTBEAT_SEC="${HEARTBEAT_SEC:-10}" + +usage() { + cat <<EOF +Usage: $0 [OPTIONS] + --out DIR + --result FILE + --duration STR + --iterations N + --background-cmd CMD + --binary PATH + --progress-every N + --heartbeat-sec N + --verbose + --prio N + --quiet BOOL + --check BOOL + --equal BOOL + --loops N + --maxerr-us N + --run-time-ms N + --sleep-time-ms N + --nr-tasks N +EOF +} + +while [ "$#" -gt 0 ]; do + case "$1" in + -h|--help) + usage + exit 0 + ;; + --out) + shift + OUT_DIR="$1" + ;; + --result) + shift + RESULT_TXT="$1" + ;; + --duration) + shift + DURATION="$1" + ;; + --iterations) + shift + ITERATIONS="$1" + ;; + --background-cmd) + shift + BACKGROUND_CMD="$1" + ;; + --binary) + shift + BINARY="$1" + ;; + --progress-every) + shift + PROGRESS_EVERY="$1" + ;; + --heartbeat-sec) + shift + HEARTBEAT_SEC="$1" + ;; + --verbose) + VERBOSE=1 + ;; + --prio) + shift + PRIO="$1" + ;; + --quiet) + shift + QUIET="$1" + ;; + --check) + shift + CHECK="$1" + ;; + --equal) + shift + EQUAL="$1" + ;; + --loops) + shift + LOOPS="$1" + ;; + --maxerr-us) + shift + MAXERR_US="$1" + ;; + --run-time-ms) + shift + RUN_TIME_MS="$1" + ;; + --sleep-time-ms) + shift + SLEEP_TIME_MS="$1" + ;; + --nr-tasks) + shift + NR_TASKS="$1" + ;; + *) + log_warn "Unknown option: $1" + usage + echo "$TESTNAME FAIL" >"$RES_FILE" + exit 0 + ;; + esac + shift +done + +LOG_PREFIX="$OUT_DIR/rt-migrate-test" +TMP_ONE="$OUT_DIR/tmp_result_one.txt" +ITER_KPI="$OUT_DIR/iter_kpi.txt" +AGG_KPI="$OUT_DIR/agg_kpi.txt" +THREAD_AGG_KPI="$OUT_DIR/thread_agg_kpi.txt" + +rt_prepare_output_layout \ + "$OUT_DIR" \ + "$RESULT_TXT" \ + "$TMP_ONE" \ + "$ITER_KPI" \ + "$AGG_KPI" \ + "$THREAD_AGG_KPI" + +rt_check_clock_sanity "$TESTNAME" || true + +log_info "------------------- Starting $TESTNAME -------------------" +log_info "$TESTNAME: Checking for the tools required to run rt-migrate-test" + +if ! rt_require_common_tools uname awk sed grep tr head tail mkdir cat sh sleep kill date sort wc; then + log_skip "$TESTNAME: basic tools missing" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if ! rt_require_json_helpers; then + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_normalize_common_params + +case "$PRIO" in ''|*[!0-9]*) PRIO=51 ;; esac +case "$LOOPS" in ''|*[!0-9]*) LOOPS="" ;; esac +case "$MAXERR_US" in ''|*[!0-9]*) MAXERR_US="" ;; esac +case "$RUN_TIME_MS" in ''|*[!0-9]*) RUN_TIME_MS="" ;; esac +case "$SLEEP_TIME_MS" in ''|*[!0-9]*) SLEEP_TIME_MS="" ;; esac +case "$NR_TASKS" in ''|*[!0-9]*) NR_TASKS="" ;; esac + +RTM_BIN=$(rt_resolve_binary rt-migrate-test "$BINARY" 2>/dev/null || echo "") +if [ -z "$RTM_BIN" ] || [ ! -x "$RTM_BIN" ]; then + log_skip "$TESTNAME: rt-migrate-test binary not found/executable (${RTM_BIN:-none})" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_log_common_runtime_env "$TESTNAME" "$RTM_BIN" +log_info "$TESTNAME: iterations=$ITERATIONS duration=$DURATION prio=$PRIO" +log_info "$TESTNAME: heartbeat=$HEARTBEAT_SEC seconds" + +RT_INTERRUPTED=0 +export RT_INTERRUPTED + +trap 'rt_handle_int; perf_rt_bg_stop >/dev/null 2>&1 || true' INT TERM +trap 'perf_rt_bg_stop >/dev/null 2>&1 || true' EXIT + +perf_rt_bg_start "$TESTNAME" "$BACKGROUND_CMD" + +overall_fail=0 + +i=1 +while [ "$i" -le "$ITERATIONS" ] 2>/dev/null; do + rt_log_iteration_progress "$TESTNAME" "$i" "$ITERATIONS" "$PROGRESS_EVERY" + + jsonfile="${LOG_PREFIX}-${i}.json" + stdoutlog="${OUT_DIR}/rt_migrate_test_stdout_iter${i}.log" + + set -- "$RTM_BIN" + + case "$QUIET" in + true|TRUE|1|yes|YES) + set -- "$@" -q + ;; + esac + + case "$CHECK" in + true|TRUE|1|yes|YES) + set -- "$@" -c + ;; + esac + + case "$EQUAL" in + true|TRUE|1|yes|YES) + set -- "$@" -e + ;; + esac + + if [ -n "$LOOPS" ]; then + set -- "$@" -l "$LOOPS" + fi + + if [ -n "$MAXERR_US" ]; then + set -- "$@" -m "$MAXERR_US" + fi + + if [ -n "$RUN_TIME_MS" ]; then + set -- "$@" -r "$RUN_TIME_MS" + fi + + if [ -n "$SLEEP_TIME_MS" ]; then + set -- "$@" -s "$SLEEP_TIME_MS" + fi + + set -- "$@" -p "$PRIO" -D "$DURATION" + + if [ -n "$NR_TASKS" ]; then + set -- "$@" "$NR_TASKS" + fi + + set -- "$@" --json="$jsonfile" + + if rt_run_json_iteration "$TESTNAME" "$HEARTBEAT_SEC" "$stdoutlog" "$jsonfile" "$@"; then + rc=$RT_RUN_RC + else + rc=$RT_RUN_RC + fi + + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: interrupted by user during iteration $i/$ITERATIONS" + break + fi + + if [ "$rc" -ne 0 ] 2>/dev/null; then + log_fail "$TESTNAME: rt-migrate-test exited rc=$rc (iter $i/$ITERATIONS)" + overall_fail=1 + fi + + if [ "${RT_RUN_JSON_OK:-0}" -ne 1 ] 2>/dev/null; then + log_fail "$TESTNAME: missing json output: $jsonfile" + overall_fail=1 + i=$((i + 1)) + continue + fi + + if ! rt_parse_and_append_iteration_kpi "rt-migrate-test" "$jsonfile" "$TMP_ONE" "$ITER_KPI" "$RESULT_TXT" "$i"; then + log_fail "$TESTNAME: failed to parse/store KPI (iter $i/$ITERATIONS): $jsonfile" + overall_fail=1 + fi + + i=$((i + 1)) +done + +perf_rt_bg_stop >/dev/null 2>&1 || true + +rt_emit_kpi_block "$TESTNAME" "per-iteration results" "$ITER_KPI" +rt_emit_aggregate_kpi "$TESTNAME" "rt-migrate-test" "$ITER_KPI" "$AGG_KPI" "$RESULT_TXT" || true +rt_emit_thread_aggregate_kpi "$TESTNAME" "rt-migrate-test" "$ITER_KPI" "$THREAD_AGG_KPI" "$RESULT_TXT" || true + +if rt_kpi_file_has_fail "rt-migrate-test" "$ITER_KPI"; then + overall_fail=1 +fi + +rt_emit_interrupt_aware_result "$TESTNAME" "$RES_FILE" "$RESULT_TXT" "$OUT_DIR" "${RT_INTERRUPTED:-0}" "$overall_fail" +exit 0 diff --git a/Runner/suites/Kernel/RT-tests/SignalTest/SignalTest_README.md b/Runner/suites/Kernel/RT-tests/SignalTest/SignalTest_README.md new file mode 100644 index 00000000..aaefb292 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/SignalTest/SignalTest_README.md @@ -0,0 +1,232 @@ +# SignalTest (RT signal roundtrip latency) + +This test wraps **`signaltest`** from the *rt-tests* suite and integrates it into the **qcom-linux-testkit** runner style: + +- Runs `signaltest` for a configurable duration and number of iterations +- Captures per-iteration JSON output +- Parses KPIs via `lib_rt.sh` (no Python required) +- Produces: + - `SignalTest.res` (PASS/FAIL/SKIP summary for LAVA gating) + - `logs_SignalTest/result.txt` (detailed KPI lines for LAVA upload / human review) + - additional debug logs and aggregate KPI files + +> **LAVA behavior:** `run.sh` always exits `0` (LAVA-friendly). Use `SignalTest.res` as the gating signal. + +--- + +## Location + +Typical path in repo: + +``` +Runner/suites/Kernel/RT-tests/SignalTest/ + run.sh + README.md +``` + +--- + +## What this test measures + +`signaltest` measures **signal roundtrip latency** between real-time threads. It reports per-thread statistics such as: +- min latency (µs) +- average latency (µs) +- max latency (µs) + +The wrapper records these per iteration and also computes aggregates across: +- **all iterations + all threads** +- **per-thread across iterations** + +--- + +## Requirements + +### Runtime dependencies +- `signaltest` binary (from rt-tests) +- Common userland tools used by the wrapper: + - `uname awk sed grep tr head tail mkdir cat sh tee sleep kill date` +- The test is typically run as **root** (RT scheduling + mlockall, etc.) + +### qcom-linux-testkit dependencies +`run.sh` expects the standard testkit environment: + +- `init_env` available somewhere above this directory (auto-discovered) +- `${TOOLS}/functestlib.sh` available (loaded via init_env) +- `${TOOLS}/lib_rt.sh` available (loaded via init_env) + - Must provide JSON parsing helpers such as `perf_parse_rt_tests_json` + - Must provide aggregation helpers such as `rt_aggregate_iter_latencies` + - Optional: `rt_print_kpi_block` for pretty KPI blocks + +If any of the required components are missing, the test will **SKIP** and write `SignalTest SKIP` to `SignalTest.res`. + +--- + +## Default behavior (matches Linaro test-definitions defaults) + +Defaults follow the *linaro test-definitions* signaltest baseline: + +| Parameter | Default | Meaning | +|---|---:|---| +| Duration | `1m` | `signaltest -D` runtime | +| Priority | `98` | `signaltest -p` | +| Threads | `2` | `signaltest -t` | +| Iterations | `1` | wrapper-level iterations | +| Background cmd | empty | optional stress/workload | +| Quiet | enabled | `-q` summary only | +| Mlockall | enabled | `-m` lock memory | +| Affinity | enabled | `-a` try to pin threads (best-effort) | + +> Note: Threads can be higher than `nproc`. `signaltest` will still run, but results may reflect oversubscription effects. + +--- + +## Usage + +### Run locally (recommended) + +From the test directory: + +```sh +./run.sh +``` + +### Typical override example + +```sh +./run.sh --binary /tmp/signaltest --duration 1m --iterations 3 --prio 98 --threads 24 +``` + +### Show help + +```sh +./run.sh --help +``` + +--- + +## `run.sh` options + +The wrapper accepts long options (testkit style). Common options: + +| Option | Example | Description | +|---|---|---| +| `--duration` | `--duration 5m` | `signaltest -D` runtime (`s/m/h/d` supported) | +| `--iterations` | `--iterations 3` | Number of iterations (wrapper loops) | +| `--background-cmd` | `--background-cmd "stress-ng --cpu 4"` | Optional background workload | +| `--binary` | `--binary /tmp/signaltest` | Explicit `signaltest` path | +| `--out` | `--out ./logs_SignalTest` | Output directory | +| `--result` | `--result ./logs_SignalTest/result.txt` | Result file path | +| `--progress-every` | `--progress-every 5` | Progress log frequency | +| `--verbose` | `--verbose` | Additional debug logs | +| `--prio` | `--prio 98` | `signaltest -p` priority | +| `--threads` | `--threads 2` | `signaltest -t` threads | +| `--quiet` | `--quiet true` | Enable `-q` | +| `--mlockall` | `--mlockall true` | Enable `-m` | +| `--affinity` | `--affinity true` | Enable `-a` | +| `--loops` | `--loops 1000` | `signaltest -l` loops | +| `--breaktrace-us` | `--breaktrace-us 50` | `signaltest -b USEC` | +| `--json` | *(internal)* | Wrapper always uses `--json=FILE` per iteration | + +> The wrapper uses `--json=FILENAME` (equals form), matching `signaltest` expectations. + +--- + +## Outputs + +After running, you should see: + +``` +SignalTest.res +logs_SignalTest/ + result.txt + iter_kpi.txt + agg_kpi.txt + thread_agg_kpi.txt + signaltest-1.json + signaltest-2.json + ... + signaltest_stdout_iter1.log + signaltest_stdout_iter2.log + ... +``` + +### Meaning of key files +- **`SignalTest.res`**: Single-line PASS/FAIL/SKIP (for CI/LAVA gating) +- **`logs_SignalTest/result.txt`**: Detailed KPI lines (per-iteration + aggregates) +- **`iter_kpi.txt`**: Parsed KPIs per iteration, prefixed with `iteration-N-...` +- **`agg_kpi.txt`**: Aggregates across all iterations + all threads +- **`thread_agg_kpi.txt`**: Aggregates per thread (t0..tN) across iterations +- **`signaltest_stdout_iterN.log`**: Raw stdout/stderr for debugging +- **`signaltest-N.json`**: Raw JSON output from `signaltest` + +--- + +## Interpreting results + +Example KPI lines (from `result.txt`): + +- Per-iteration: + - `iteration-1-t0-min-latency pass 187 us` + - `iteration-1-t0-avg-latency pass 4801.31 us` + - `iteration-1-t0-max-latency pass 7747 us` +- Aggregate (all threads/iterations): + - `signaltest-all-max-latency-max pass 18796 us` + - `signaltest-worst-thread-id pass 20 id` + +If any iteration run fails, JSON is missing, or parsing fails, the wrapper marks the test as **FAIL**. + +--- + +## LAVA integration + +A typical qcom-linux-testkit LAVA test definition will: +1. `cd` into the test directory +2. execute `./run.sh ... || true` +3. upload/send result file via `send-to-lava.sh SignalTest.res` + +Example `run` steps: + +```yaml +run: + steps: + - REPO_PATH=$PWD + - cd Runner/suites/Kernel/RT-tests/SignalTest + - ./run.sh --duration "${DURATION}" --iterations "${ITERATIONS}" --background-cmd "${BACKGROUND_CMD}" --prio "${PRIO}" --threads "${THREADS}" --binary "${BINARY}" --out "${OUT_DIR}" || true + - $REPO_PATH/Runner/utils/send-to-lava.sh SignalTest.res +``` + +--- + +## Notes / best practices + +- Run on an RT-enabled kernel for meaningful RT latency characterization. +- Consider setting CPU governor to `performance` for tighter jitter bounds, if allowed by your lab policy. +- Oversubscribing threads (threads >> cores) can inflate average/max latency and jitter. +- Use `BACKGROUND_CMD` to reproduce realistic system load conditions. + +--- + +## Troubleshooting + +### Test is SKIP +Common causes: +- `signaltest` binary not found or not executable +- `lib_rt.sh` not loaded or missing parser/aggregator helpers +- missing basic tools + +### Test is FAIL +Common causes: +- `signaltest` non-zero exit code +- JSON output not created +- Parser failed (malformed JSON or unexpected schema) + +Check: +- `logs_SignalTest/signaltest_stdout_iterN.log` +- `logs_SignalTest/signaltest-N.json` +- `logs_SignalTest/result.txt` + +--- + +## Maintainers + +Qualcomm Linux Testkit team (internal). Update this README alongside any interface changes to `run.sh`. diff --git a/Runner/suites/Kernel/RT-tests/SignalTest/run.sh b/Runner/suites/Kernel/RT-tests/SignalTest/run.sh new file mode 100755 index 00000000..74da0ded --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/SignalTest/run.sh @@ -0,0 +1,337 @@ +#!/bin/sh +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# SignalTest wrapper for qcom-linux-testkit +# - Runs rt-tests signaltest ITERATIONS times (JSON output) +# - Parses KPI using lib_rt.sh (no python required) +# - Emits KPI lines to result.txt and summary PASS/FAIL/SKIP to SignalTest.res +# +# Notes: +# - Always exits 0 (LAVA-friendly). Use SignalTest.res for gating. + +SCRIPT_DIR="$( + cd "$(dirname "$0")" || exit 1 + pwd +)" + +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "${__INIT_ENV_LOADED:-}" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" + __INIT_ENV_LOADED=1 +fi + +# shellcheck disable=SC1091 +. "$TOOLS/functestlib.sh" +# shellcheck disable=SC1091 +. "$TOOLS/lib_rt.sh" + +TESTNAME="SignalTest" +test_path=$(find_test_case_by_name "$TESTNAME") +[ -n "$test_path" ] || test_path="$SCRIPT_DIR" + +RES_FILE="$test_path/${TESTNAME}.res" +OUT_DIR="${OUT_DIR:-$test_path/logs_${TESTNAME}}" +RESULT_TXT="${RESULT_TXT:-$OUT_DIR/result.txt}" + +DURATION="${DURATION:-1m}" +BACKGROUND_CMD="${BACKGROUND_CMD:-}" +ITERATIONS="${ITERATIONS:-1}" +PRIO="${PRIO:-98}" +THREADS="${THREADS:-2}" +QUIET="${QUIET:-true}" +AFFINITY="${AFFINITY:-true}" +AFFINITY_CPU="${AFFINITY_CPU:-}" +MLOCKALL="${MLOCKALL:-true}" +BREAKTRACE_US="${BREAKTRACE_US:-}" +LOOPS="${LOOPS:-}" +SIG_VERBOSE="${SIG_VERBOSE:-false}" +BINARY="${BINARY:-}" +VERBOSE="${VERBOSE:-0}" +PROGRESS_EVERY="${PROGRESS_EVERY:-1}" +HEARTBEAT_SEC="${HEARTBEAT_SEC:-10}" + +usage() { + cat <<EOF +Usage: $0 [OPTIONS] + --out DIR + --result FILE + --duration STR + --iterations N + --background-cmd CMD + --binary PATH + --progress-every N + --heartbeat-sec N + --verbose + --prio N + --threads N + --quiet BOOL + --affinity BOOL + --affinity-cpu N + --mlockall BOOL + --breaktrace-us N + --loops N + --sig-verbose BOOL +EOF +} + +while [ "$#" -gt 0 ]; do + case "$1" in + -h|--help) + usage + exit 0 + ;; + --out) + shift + OUT_DIR="$1" + ;; + --result) + shift + RESULT_TXT="$1" + ;; + --duration) + shift + DURATION="$1" + ;; + --iterations) + shift + ITERATIONS="$1" + ;; + --background-cmd) + shift + BACKGROUND_CMD="$1" + ;; + --binary) + shift + BINARY="$1" + ;; + --progress-every) + shift + PROGRESS_EVERY="$1" + ;; + --heartbeat-sec) + shift + HEARTBEAT_SEC="$1" + ;; + --verbose) + VERBOSE=1 + ;; + --prio) + shift + PRIO="$1" + ;; + --threads) + shift + THREADS="$1" + ;; + --quiet) + shift + QUIET="$1" + ;; + --affinity) + shift + AFFINITY="$1" + ;; + --affinity-cpu) + shift + AFFINITY_CPU="$1" + ;; + --mlockall) + shift + MLOCKALL="$1" + ;; + --breaktrace-us) + shift + BREAKTRACE_US="$1" + ;; + --loops) + shift + LOOPS="$1" + ;; + --sig-verbose) + shift + SIG_VERBOSE="$1" + ;; + *) + log_warn "Unknown option: $1" + usage + echo "$TESTNAME FAIL" >"$RES_FILE" + exit 0 + ;; + esac + shift +done + +LOG_PREFIX="$OUT_DIR/signaltest" +TMP_ONE="$OUT_DIR/tmp_result_one.txt" +ITER_KPI="$OUT_DIR/iter_kpi.txt" +AGG_KPI="$OUT_DIR/agg_kpi.txt" +THREAD_AGG_KPI="$OUT_DIR/thread_agg_kpi.txt" + +rt_prepare_output_layout \ + "$OUT_DIR" \ + "$RESULT_TXT" \ + "$TMP_ONE" \ + "$ITER_KPI" \ + "$AGG_KPI" \ + "$THREAD_AGG_KPI" + +rt_check_clock_sanity "$TESTNAME" || true + +log_info "------------------- Starting $TESTNAME -------------------" +log_info "$TESTNAME: Checking for the tools required to run signaltest" + +if ! rt_require_common_tools uname awk sed grep tr head tail mkdir cat sh sleep kill date sort wc; then + log_skip "$TESTNAME: basic tools missing" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if ! rt_require_json_helpers; then + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_normalize_common_params + +case "$PRIO" in ''|*[!0-9]*) PRIO=98 ;; esac +case "$THREADS" in ''|*[!0-9]*) THREADS=2 ;; esac +case "$AFFINITY_CPU" in ''|*[!0-9]*) AFFINITY_CPU="" ;; esac +case "$BREAKTRACE_US" in ''|*[!0-9]*) BREAKTRACE_US="" ;; esac +case "$LOOPS" in ''|*[!0-9]*) LOOPS="" ;; esac + +SIG_BIN=$(rt_resolve_binary signaltest "$BINARY" 2>/dev/null || echo "") +if [ -z "$SIG_BIN" ] || [ ! -x "$SIG_BIN" ]; then + log_skip "$TESTNAME: signaltest binary not found/executable (${SIG_BIN:-none})" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if [ "$THREADS" -eq 0 ] 2>/dev/null; then + if command -v nproc >/dev/null 2>&1; then + THREADS=$(nproc 2>/dev/null || echo 0) + else + THREADS=0 + fi + case "$THREADS" in ''|*[!0-9]*|0) THREADS=1 ;; esac +fi + +rt_log_common_runtime_env "$TESTNAME" "$SIG_BIN" +log_info "$TESTNAME: iterations=$ITERATIONS duration=$DURATION prio=$PRIO threads=$THREADS" +log_info "$TESTNAME: heartbeat=$HEARTBEAT_SEC seconds" + +RT_INTERRUPTED=0 +export RT_INTERRUPTED + +trap 'rt_handle_int; perf_rt_bg_stop >/dev/null 2>&1 || true' INT TERM +trap 'perf_rt_bg_stop >/dev/null 2>&1 || true' EXIT + +perf_rt_bg_start "$TESTNAME" "$BACKGROUND_CMD" + +overall_fail=0 + +i=1 +while [ "$i" -le "$ITERATIONS" ] 2>/dev/null; do + rt_log_iteration_progress "$TESTNAME" "$i" "$ITERATIONS" "$PROGRESS_EVERY" + + jsonfile="${LOG_PREFIX}-${i}.json" + stdoutlog="${OUT_DIR}/signaltest_stdout_iter${i}.log" + + set -- "$SIG_BIN" + + case "$QUIET" in + true|TRUE|1|yes|YES) + set -- "$@" -q + ;; + esac + + case "$MLOCKALL" in + true|TRUE|1|yes|YES) + set -- "$@" -m + ;; + esac + + case "$SIG_VERBOSE" in + true|TRUE|1|yes|YES) + set -- "$@" -v + ;; + esac + + case "$AFFINITY" in + true|TRUE|1|yes|YES) + if [ -n "$AFFINITY_CPU" ]; then + set -- "$@" -a "$AFFINITY_CPU" + else + set -- "$@" -a + fi + ;; + esac + + if [ -n "$BREAKTRACE_US" ]; then + set -- "$@" -b "$BREAKTRACE_US" + fi + + if [ -n "$LOOPS" ]; then + set -- "$@" -l "$LOOPS" + fi + + set -- "$@" -p "$PRIO" -t "$THREADS" -D "$DURATION" --json="$jsonfile" + + if rt_run_json_iteration "$TESTNAME" "$HEARTBEAT_SEC" "$stdoutlog" "$jsonfile" "$@"; then + rc=$RT_RUN_RC + else + rc=$RT_RUN_RC + fi + + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: interrupted by user during iteration $i/$ITERATIONS" + break + fi + + if [ "$rc" -ne 0 ] 2>/dev/null; then + log_fail "$TESTNAME: signaltest exited rc=$rc (iter $i/$ITERATIONS)" + overall_fail=1 + fi + + if [ "${RT_RUN_JSON_OK:-0}" -ne 1 ] 2>/dev/null; then + log_fail "$TESTNAME: missing json output: $jsonfile" + overall_fail=1 + i=$((i + 1)) + continue + fi + + if ! rt_parse_and_append_iteration_kpi "signaltest" "$jsonfile" "$TMP_ONE" "$ITER_KPI" "$RESULT_TXT" "$i"; then + log_fail "$TESTNAME: failed to parse/store KPI (iter $i/$ITERATIONS): $jsonfile" + overall_fail=1 + fi + + i=$((i + 1)) +done + +perf_rt_bg_stop >/dev/null 2>&1 || true + +rt_emit_kpi_block "$TESTNAME" "per-iteration results" "$ITER_KPI" +rt_emit_aggregate_kpi "$TESTNAME" "signaltest" "$ITER_KPI" "$AGG_KPI" "$RESULT_TXT" || true +rt_emit_thread_aggregate_kpi "$TESTNAME" "signaltest" "$ITER_KPI" "$THREAD_AGG_KPI" "$RESULT_TXT" || true + +if rt_kpi_file_has_fail "signaltest" "$ITER_KPI"; then + overall_fail=1 +fi + +rt_emit_interrupt_aware_result "$TESTNAME" "$RES_FILE" "$RESULT_TXT" "$OUT_DIR" "${RT_INTERRUPTED:-0}" "$overall_fail" +exit 0 diff --git a/Runner/suites/Kernel/RT-tests/SignalTest/signaltest.yaml b/Runner/suites/Kernel/RT-tests/SignalTest/signaltest.yaml new file mode 100644 index 00000000..11509bf2 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/SignalTest/signaltest.yaml @@ -0,0 +1,38 @@ +metadata: + name: SignalTest + format: "Lava-Test Test Definition 1.0" + description: "Run rt-tests signaltest (RT signal roundtrip) in JSON mode and parse results without requiring python3." + os: + - linux + scope: + - performance + - preempt-rt + +params: + DURATION: "1m" + BACKGROUND_CMD: "" + ITERATIONS: "1" + + PRIO: "98" + THREADS: "2" + QUIET: "true" + AFFINITY: "true" + AFFINITY_CPU: "" + MLOCKALL: "true" + BREAKTRACE_US: "" + LOOPS: "" + SIG_VERBOSE: "false" + + BINARY: "" + OUT_DIR: "./logs_SignalTest" + + VERBOSE: "0" + PROGRESS_EVERY: "1" + HEARTBEAT_SEC: "10" + +run: + steps: + - REPO_PATH=$PWD + - cd Runner/suites/Kernel/RT-tests/SignalTest + - ./run.sh --duration "${DURATION}" --iterations "${ITERATIONS}" --background-cmd "${BACKGROUND_CMD}" --prio "${PRIO}" --threads "${THREADS}" --quiet "${QUIET}" --affinity "${AFFINITY}" --affinity-cpu "${AFFINITY_CPU}" --mlockall "${MLOCKALL}" --breaktrace-us "${BREAKTRACE_US}" --loops "${LOOPS}" --sig-verbose "${SIG_VERBOSE}" --binary "${BINARY}" --out "${OUT_DIR}" --progress-every "${PROGRESS_EVERY}" --heartbeat-sec "${HEARTBEAT_SEC}" $( [ "${VERBOSE}" = "1" ] && echo "--verbose" ) || true + - $REPO_PATH/Runner/utils/send-to-lava.sh SignalTest.res diff --git a/Runner/suites/Kernel/RT-tests/SigwaitTest/SigwaitTest_README.md b/Runner/suites/Kernel/RT-tests/SigwaitTest/SigwaitTest_README.md new file mode 100644 index 00000000..124803ba --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/SigwaitTest/SigwaitTest_README.md @@ -0,0 +1,244 @@ +# SigwaitTest + +`SigwaitTest` is a qcom-linux-testkit wrapper around **rt-tests** `sigwaittest`, which measures the latency between sending a signal and returning from `sigwait()`. + +This test: +- Runs `sigwaittest` in **JSON output** mode (one JSON per iteration) +- Parses KPIs using **`lib_rt.sh`** helpers (no python dependency required in the wrapper) +- Produces a human-readable KPI log (`result.txt`) and a one-line LAVA gating file (`SigwaitTest.res`) + +> **LAVA note:** `run.sh` always exits `0` (LAVA-friendly). Gate on `SigwaitTest.res`. + +--- + +## Location in repo + +``` +Runner/suites/Kernel/RT-tests/SigwaitTest/ + run.sh + SigwaitTest.yaml + README.md +``` + +--- + +## Default behavior (aligned to Linaro test-definitions) + +Defaults are chosen to match Linaro’s `sigwaittest` test definition, with explicit thread defaulting for our wrapper/YAML alignment: + +- Duration: `5m` +- Priority: `98` +- Threads: `2` +- Quiet mode: enabled (`-q`) +- Affinity: enabled (`-a`) +- Iterations: `1` +- Background command: empty + +The wrapper supports additional `sigwaittest` options (see below) while keeping defaults conservative. + +--- + +## Prerequisites + +- Must run as **root** (RT scheduling + memory locking behavior can require elevated privileges). +- `sigwaittest` binary must be present and executable: + - Either in `$PATH` (preferred), or + - Provided explicitly via `--binary /path/to/sigwaittest` +- Testkit environment must be available: + - `init_env` must exist in a parent directory + - `functestlib.sh` and `lib_rt.sh` must load successfully via `init_env` + +--- + +## Quick start + +### Run with defaults +```sh +cd Runner/suites/Kernel/RT-tests/SigwaitTest +./run.sh +``` + +### Run for 1 minute, 3 iterations +```sh +./run.sh --duration 1m --iterations 3 +``` + +### Use all CPUs (threads=0 => nproc) +```sh +./run.sh --threads 0 +``` + +### Use an explicit binary path +```sh +./run.sh --binary /tmp/sigwaittest +``` + +### Run with background workload +```sh +./run.sh --background-cmd "stress-ng --cpu 4 --timeout 5m" +``` + +--- + +## `run.sh` usage + +```text +./run.sh [OPTIONS] + +Wrapper options: + -h, --help Show this help and exit + --out DIR Output directory + (default: ./logs_SigwaitTest under test folder) + --result FILE Result KPI file path + (default: <out>/result.txt) + --duration TIME sigwaittest duration (passes -D TIME) + Supports suffix: m/h/d (e.g., 30s, 1m, 2h) + (default: 5m) + --iterations N Number of iterations to run + (default: 1) + --background-cmd CMD Optional background workload command + (default: empty) + --binary PATH Explicit path to sigwaittest binary + --progress-every N Log progress every N iterations + (default: 1) + --verbose Extra wrapper logs + +sigwaittest passthrough options: + --prio N Priority (passes -p N) (default: 98) + --threads N Thread count (passes -t N) + If N=0, wrapper expands to nproc + (default: 2) + --quiet BOOL Enable/disable quiet mode (passes -q) + Values: true/false/1/0/yes/no + (default: true) + --affinity BOOL Enable/disable CPU affinity (passes -a) + Values: true/false/1/0/yes/no + (default: true) + --affinity-cpu NUM When affinity enabled, pass "-a NUM" + (optional) + --breaktrace-us USEC Breaktrace threshold in microseconds (passes -b USEC) + (optional) + --loops N Loop count (passes -l N) (optional) + --distance USEC Distance in microseconds (passes -d USEC) (optional) + --interval USEC Interval in microseconds (passes -i USEC) (optional) + --fork BOOL Enable/disable process mode (passes -f) + Values: true/false/1/0/yes/no + (default: false) + --fork-opt OPT Optional argument to -f (depends on rt-tests build) + (optional) +``` + +--- + +## How options map to `sigwaittest` + +The wrapper builds the `sigwaittest` command using the options above and always forces JSON output: + +- `--json=<file>` is always appended (one file per iteration) +- Quiet: `--quiet true` -> `-q` +- Threads: `--threads N` -> `-t N` (wrapper always supplies `-t`) +- Affinity: `--affinity true` -> `-a` (or `-a NUM` when `--affinity-cpu NUM`) +- Priority: `--prio N` -> `-p N` +- Duration: `--duration TIME` -> `-D TIME` +- Optional knobs: `-b`, `-l`, `-d`, `-i`, `-f` when set + +--- + +## Outputs + +By default, output goes to: + +``` +Runner/suites/Kernel/RT-tests/SigwaitTest/logs_SigwaitTest/ +``` + +Typical files: + +- `sigwaittest-<iter>.json` + JSON produced by `sigwaittest` for each iteration +- `sigwaittest_stdout_iter<iter>.log` + Captured stdout/stderr for that iteration +- `iter_kpi.txt` + Parsed KPI lines per iteration (prefixed with `iteration-<N>-`) +- `agg_kpi.txt` + Aggregate KPI across all iterations/threads (if supported by parser) +- `thread_agg_kpi.txt` + Per-thread aggregate KPIs (if supported by parser) +- `result.txt` + Combined KPI output (this file is sent to LAVA as test output) +- `SigwaitTest.res` + One-line summary used for gating: + - `SigwaitTest PASS` + - `SigwaitTest FAIL` + - `SigwaitTest SKIP` + +--- + +## LAVA YAML integration + +A typical YAML invokes the wrapper like: + +```yaml +- cd Runner/suites/Kernel/RT-tests/SigwaitTest +- ./run.sh + --duration "${DURATION}" + --iterations "${ITERATIONS}" + --background-cmd "${BACKGROUND_CMD}" + --prio "${PRIO}" + --threads "${THREADS}" + --quiet "${QUIET}" + --affinity "${AFFINITY}" + --affinity-cpu "${AFFINITY_CPU}" + --breaktrace-us "${BREAKTRACE_US}" + --loops "${LOOPS}" + --distance "${DISTANCE}" + --interval "${INTERVAL}" + --fork "${FORK}" + --fork-opt "${FORK_OPT}" + --binary "${BINARY}" + --out "${OUT_DIR}" + $( [ "${VERBOSE}" = "1" ] && echo "--verbose" ) + --progress-every "${PROGRESS_EVERY}" + || true +- $REPO_PATH/Runner/utils/send-to-lava.sh SigwaitTest.res +``` + +### YAML params (recommended) + +- `DURATION`: `"5m"` +- `BACKGROUND_CMD`: `""` +- `ITERATIONS`: `"1"` +- `PRIO`: `"98"` +- `THREADS`: `"2"` +- `QUIET`: `"true"` +- `AFFINITY`: `"true"` +- Optional advanced params: + - `AFFINITY_CPU`, `BREAKTRACE_US`, `LOOPS`, `DISTANCE`, `INTERVAL`, `FORK`, `FORK_OPT` +- Wrapper extras: + - `BINARY`, `OUT_DIR`, `VERBOSE`, `PROGRESS_EVERY` + +--- + +## Troubleshooting + +- **SKIP: binary not found** + - Ensure `sigwaittest` is installed and in `$PATH`, or pass `--binary`. +- **Non-RT kernel warning** + - The wrapper may warn if the kernel does not look RT-enabled. Results are still captured, but latencies may be worse. +- **No KPIs / parse failure** + - Ensure `lib_rt.sh` is present and exports: + - `perf_parse_rt_tests_json` + - `rt_aggregate_iter_latencies` + - `rt_aggregate_iter_latencies_per_thread` +- **Background command issues** + - Provide a single command string; wrapper starts/stops it using `perf_rt_bg_start/stop`. + +--- + +## Upstream tool reference + +`sigwaittest` comes from **rt-tests**. To see binary-level help on target: + +```sh +sigwaittest --help +``` diff --git a/Runner/suites/Kernel/RT-tests/SigwaitTest/run.sh b/Runner/suites/Kernel/RT-tests/SigwaitTest/run.sh new file mode 100755 index 00000000..38587457 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/SigwaitTest/run.sh @@ -0,0 +1,364 @@ +#!/bin/sh +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# SigwaitTest wrapper for qcom-linux-testkit +# - Runs rt-tests sigwaittest ITERATIONS times (JSON output) +# - Parses KPI using lib_rt.sh (no python required) +# - Emits KPI lines to result.txt and summary PASS/FAIL/SKIP to SigwaitTest.res +# +# Notes: +# - Always exits 0 (LAVA-friendly). Use SigwaitTest.res for gating. + +SCRIPT_DIR="$( + cd "$(dirname "$0")" || exit 1 + pwd +)" + +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "${__INIT_ENV_LOADED:-}" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" + __INIT_ENV_LOADED=1 +fi + +# shellcheck disable=SC1091 +. "$TOOLS/functestlib.sh" +# shellcheck disable=SC1091 +. "$TOOLS/lib_rt.sh" + +TESTNAME="SigwaitTest" +test_path=$(find_test_case_by_name "$TESTNAME") +[ -n "$test_path" ] || test_path="$SCRIPT_DIR" + +RES_FILE="$test_path/${TESTNAME}.res" +OUT_DIR="${OUT_DIR:-$test_path/logs_${TESTNAME}}" +RESULT_TXT="${RESULT_TXT:-$OUT_DIR/result.txt}" + +DURATION="${DURATION:-5m}" +BACKGROUND_CMD="${BACKGROUND_CMD:-}" +ITERATIONS="${ITERATIONS:-1}" +PRIO="${PRIO:-98}" +QUIET="${QUIET:-true}" +THREADS="${THREADS:-true}" +THREADS_NUM="${THREADS_NUM:-}" +AFFINITY="${AFFINITY:-true}" +AFFINITY_CPU="${AFFINITY_CPU:-}" +BREAKTRACE_US="${BREAKTRACE_US:-}" +LOOPS="${LOOPS:-}" +DISTANCE="${DISTANCE:-}" +INTERVAL="${INTERVAL:-}" +FORK="${FORK:-false}" +FORK_OPT="${FORK_OPT:-}" +BINARY="${BINARY:-}" +VERBOSE="${VERBOSE:-0}" +PROGRESS_EVERY="${PROGRESS_EVERY:-1}" +HEARTBEAT_SEC="${HEARTBEAT_SEC:-10}" + +usage() { + cat <<EOF +Usage: $0 [OPTIONS] + --out DIR + --result FILE + --duration STR + --iterations N + --background-cmd CMD + --binary PATH + --progress-every N + --heartbeat-sec N + --verbose + --prio N + --quiet BOOL + --threads BOOL + --threads-num N + --affinity BOOL + --affinity-cpu N + --breaktrace-us N + --loops N + --distance N + --interval N + --fork BOOL + --fork-opt OPT +EOF +} + +while [ "$#" -gt 0 ]; do + case "$1" in + -h|--help) + usage + exit 0 + ;; + --out) + shift + OUT_DIR="$1" + ;; + --result) + shift + RESULT_TXT="$1" + ;; + --duration) + shift + DURATION="$1" + ;; + --iterations) + shift + ITERATIONS="$1" + ;; + --background-cmd) + shift + BACKGROUND_CMD="$1" + ;; + --binary) + shift + BINARY="$1" + ;; + --progress-every) + shift + PROGRESS_EVERY="$1" + ;; + --heartbeat-sec) + shift + HEARTBEAT_SEC="$1" + ;; + --verbose) + VERBOSE=1 + ;; + --prio) + shift + PRIO="$1" + ;; + --quiet) + shift + QUIET="$1" + ;; + --threads) + shift + THREADS="$1" + ;; + --threads-num) + shift + THREADS_NUM="$1" + ;; + --affinity) + shift + AFFINITY="$1" + ;; + --affinity-cpu) + shift + AFFINITY_CPU="$1" + ;; + --breaktrace-us) + shift + BREAKTRACE_US="$1" + ;; + --loops) + shift + LOOPS="$1" + ;; + --distance) + shift + DISTANCE="$1" + ;; + --interval) + shift + INTERVAL="$1" + ;; + --fork) + shift + FORK="$1" + ;; + --fork-opt) + shift + FORK_OPT="$1" + ;; + *) + log_warn "Unknown option: $1" + usage + echo "$TESTNAME FAIL" >"$RES_FILE" + exit 0 + ;; + esac + shift +done + +LOG_PREFIX="$OUT_DIR/sigwaittest" +TMP_ONE="$OUT_DIR/tmp_result_one.txt" +ITER_KPI="$OUT_DIR/iter_kpi.txt" +AGG_KPI="$OUT_DIR/agg_kpi.txt" +THREAD_AGG_KPI="$OUT_DIR/thread_agg_kpi.txt" + +rt_prepare_output_layout \ + "$OUT_DIR" \ + "$RESULT_TXT" \ + "$TMP_ONE" \ + "$ITER_KPI" \ + "$AGG_KPI" \ + "$THREAD_AGG_KPI" + +rt_check_clock_sanity "$TESTNAME" || true + +log_info "------------------- Starting $TESTNAME -------------------" +log_info "$TESTNAME: Checking for the tools required to run sigwaittest" + +if ! rt_require_common_tools uname awk sed grep tr head tail mkdir cat sh sleep kill date sort wc; then + log_skip "$TESTNAME: basic tools missing" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if ! rt_require_json_helpers; then + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_normalize_common_params + +case "$PRIO" in ''|*[!0-9]*) PRIO=98 ;; esac +case "$THREADS_NUM" in ''|*[!0-9]*) THREADS_NUM="" ;; esac +case "$AFFINITY_CPU" in ''|*[!0-9]*) AFFINITY_CPU="" ;; esac +case "$BREAKTRACE_US" in ''|*[!0-9]*) BREAKTRACE_US="" ;; esac +case "$LOOPS" in ''|*[!0-9]*) LOOPS="" ;; esac +case "$DISTANCE" in ''|*[!0-9]*) DISTANCE="" ;; esac +case "$INTERVAL" in ''|*[!0-9]*) INTERVAL="" ;; esac + +SIGWAIT_BIN=$(rt_resolve_binary sigwaittest "$BINARY" 2>/dev/null || echo "") +if [ -z "$SIGWAIT_BIN" ] || [ ! -x "$SIGWAIT_BIN" ]; then + log_skip "$TESTNAME: sigwaittest binary not found/executable (${SIGWAIT_BIN:-none})" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_log_common_runtime_env "$TESTNAME" "$SIGWAIT_BIN" +log_info "$TESTNAME: iterations=$ITERATIONS duration=$DURATION prio=$PRIO" +log_info "$TESTNAME: heartbeat=$HEARTBEAT_SEC seconds" + +RT_INTERRUPTED=0 +export RT_INTERRUPTED + +trap 'rt_handle_int; perf_rt_bg_stop >/dev/null 2>&1 || true' INT TERM +trap 'perf_rt_bg_stop >/dev/null 2>&1 || true' EXIT + +perf_rt_bg_start "$TESTNAME" "$BACKGROUND_CMD" + +overall_fail=0 + +i=1 +while [ "$i" -le "$ITERATIONS" ] 2>/dev/null; do + rt_log_iteration_progress "$TESTNAME" "$i" "$ITERATIONS" "$PROGRESS_EVERY" + + jsonfile="${LOG_PREFIX}-${i}.json" + stdoutlog="${OUT_DIR}/sigwaittest_stdout_iter${i}.log" + + set -- "$SIGWAIT_BIN" + + case "$QUIET" in + true|TRUE|1|yes|YES) + set -- "$@" -q + ;; + esac + + case "$THREADS" in + true|TRUE|1|yes|YES) + if [ -n "$THREADS_NUM" ]; then + set -- "$@" -t "$THREADS_NUM" + else + set -- "$@" -t + fi + ;; + esac + + case "$AFFINITY" in + true|TRUE|1|yes|YES) + if [ -n "$AFFINITY_CPU" ]; then + set -- "$@" -a "$AFFINITY_CPU" + else + set -- "$@" -a + fi + ;; + esac + + case "$FORK" in + true|TRUE|1|yes|YES) + if [ -n "$FORK_OPT" ]; then + set -- "$@" -f "$FORK_OPT" + else + set -- "$@" -f + fi + ;; + esac + + if [ -n "$BREAKTRACE_US" ]; then + set -- "$@" -b "$BREAKTRACE_US" + fi + + if [ -n "$LOOPS" ]; then + set -- "$@" -l "$LOOPS" + fi + + if [ -n "$DISTANCE" ]; then + set -- "$@" -d "$DISTANCE" + fi + + if [ -n "$INTERVAL" ]; then + set -- "$@" -i "$INTERVAL" + fi + + set -- "$@" -p "$PRIO" -D "$DURATION" --json="$jsonfile" + + if rt_run_json_iteration "$TESTNAME" "$HEARTBEAT_SEC" "$stdoutlog" "$jsonfile" "$@"; then + rc=$RT_RUN_RC + else + rc=$RT_RUN_RC + fi + + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: interrupted by user during iteration $i/$ITERATIONS" + break + fi + + if [ "$rc" -ne 0 ] 2>/dev/null; then + log_fail "$TESTNAME: sigwaittest exited rc=$rc (iter $i/$ITERATIONS)" + overall_fail=1 + fi + + if [ "${RT_RUN_JSON_OK:-0}" -ne 1 ] 2>/dev/null; then + log_fail "$TESTNAME: missing json output: $jsonfile" + overall_fail=1 + i=$((i + 1)) + continue + fi + + if ! rt_parse_and_append_iteration_kpi "sigwaittest" "$jsonfile" "$TMP_ONE" "$ITER_KPI" "$RESULT_TXT" "$i"; then + log_fail "$TESTNAME: failed to parse/store KPI (iter $i/$ITERATIONS): $jsonfile" + overall_fail=1 + fi + + i=$((i + 1)) +done + +perf_rt_bg_stop >/dev/null 2>&1 || true + +rt_emit_kpi_block "$TESTNAME" "per-iteration results" "$ITER_KPI" +rt_emit_aggregate_kpi "$TESTNAME" "sigwaittest" "$ITER_KPI" "$AGG_KPI" "$RESULT_TXT" || true +rt_emit_thread_aggregate_kpi "$TESTNAME" "sigwaittest" "$ITER_KPI" "$THREAD_AGG_KPI" "$RESULT_TXT" || true + +if rt_kpi_file_has_fail "sigwaittest" "$ITER_KPI"; then + overall_fail=1 +fi + +rt_emit_interrupt_aware_result "$TESTNAME" "$RES_FILE" "$RESULT_TXT" "$OUT_DIR" "${RT_INTERRUPTED:-0}" "$overall_fail" +exit 0 diff --git a/Runner/suites/Kernel/RT-tests/SigwaitTest/sigwaittest.yaml b/Runner/suites/Kernel/RT-tests/SigwaitTest/sigwaittest.yaml new file mode 100755 index 00000000..59a2dc32 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/SigwaitTest/sigwaittest.yaml @@ -0,0 +1,41 @@ +metadata: + name: SigwaitTest + format: "Lava-Test Test Definition 1.0" + description: "Run rt-tests sigwaittest (sigwait() latency) in JSON mode and parse results without requiring python3." + os: + - linux + scope: + - performance + - preempt-rt + +params: + DURATION: "2m" + BACKGROUND_CMD: "" + ITERATIONS: "1" + + PRIO: "98" + QUIET: "true" + THREADS: "true" + THREADS_NUM: "2" + AFFINITY: "true" + AFFINITY_CPU: "" + BREAKTRACE_US: "" + LOOPS: "" + DISTANCE: "" + INTERVAL: "" + FORK: "false" + FORK_OPT: "" + + BINARY: "" + OUT_DIR: "./logs_SigwaitTest" + + VERBOSE: "0" + PROGRESS_EVERY: "1" + HEARTBEAT_SEC: "10" + +run: + steps: + - REPO_PATH=$PWD + - cd Runner/suites/Kernel/RT-tests/SigwaitTest + - ./run.sh --duration "${DURATION}" --iterations "${ITERATIONS}" --background-cmd "${BACKGROUND_CMD}" --prio "${PRIO}" --quiet "${QUIET}" --threads "${THREADS}" --threads-num "${THREADS_NUM}" --affinity "${AFFINITY}" --affinity-cpu "${AFFINITY_CPU}" --breaktrace-us "${BREAKTRACE_US}" --loops "${LOOPS}" --distance "${DISTANCE}" --interval "${INTERVAL}" --fork "${FORK}" --fork-opt "${FORK_OPT}" --binary "${BINARY}" --out "${OUT_DIR}" --progress-every "${PROGRESS_EVERY}" --heartbeat-sec "${HEARTBEAT_SEC}" $( [ "${VERBOSE}" = "1" ] && echo "--verbose" ) || true + - $REPO_PATH/Runner/utils/send-to-lava.sh SigwaitTest.res From 75e0b728ecf601271a004cfae12c406f5b9b91d2 Mon Sep 17 00:00:00 2001 From: Srikanth Muppandam <smuppand@qti.qualcomm.com> Date: Mon, 27 Apr 2026 06:49:03 +0530 Subject: [PATCH 3/4] rt-tests: harden streaming RT wrappers Update streaming RT test wrappers to use the shared FIFO, heartbeat, cleanup, JSON parsing, and interrupt-aware result helpers. These tests stream output while running, so they need stronger cleanup for child processes, pipes, heartbeat state, and background workloads. This improves reliability when tests are interrupted or fail mid-run while preserving partial logs and KPI data for debugging. Signed-off-by: Srikanth Muppandam <smuppand@qti.qualcomm.com> --- .../Kernel/RT-tests/OSLat/OSLAT_README.md | 228 ++++++++++ .../suites/Kernel/RT-tests/OSLat/oslat.yaml | 40 ++ Runner/suites/Kernel/RT-tests/OSLat/run.sh | 389 +++++++++++++++++ .../Kernel/RT-tests/SSDD/SSDD_README.md | 202 +++++++++ Runner/suites/Kernel/RT-tests/SSDD/run.sh | 271 ++++++++++++ Runner/suites/Kernel/RT-tests/SSDD/ssdd.yaml | 31 ++ .../RT-tests/SVSematest/SVSematest_README.md | 301 +++++++++++++ .../suites/Kernel/RT-tests/SVSematest/run.sh | 400 ++++++++++++++++++ .../Kernel/RT-tests/SVSematest/svsemtest.yaml | 42 ++ 9 files changed, 1904 insertions(+) create mode 100644 Runner/suites/Kernel/RT-tests/OSLat/OSLAT_README.md create mode 100755 Runner/suites/Kernel/RT-tests/OSLat/oslat.yaml create mode 100755 Runner/suites/Kernel/RT-tests/OSLat/run.sh create mode 100644 Runner/suites/Kernel/RT-tests/SSDD/SSDD_README.md create mode 100755 Runner/suites/Kernel/RT-tests/SSDD/run.sh create mode 100755 Runner/suites/Kernel/RT-tests/SSDD/ssdd.yaml create mode 100644 Runner/suites/Kernel/RT-tests/SVSematest/SVSematest_README.md create mode 100755 Runner/suites/Kernel/RT-tests/SVSematest/run.sh create mode 100755 Runner/suites/Kernel/RT-tests/SVSematest/svsemtest.yaml diff --git a/Runner/suites/Kernel/RT-tests/OSLat/OSLAT_README.md b/Runner/suites/Kernel/RT-tests/OSLat/OSLAT_README.md new file mode 100644 index 00000000..186cec9d --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/OSLat/OSLAT_README.md @@ -0,0 +1,228 @@ +# OSLAT + +## Overview + +OSLAT is an OS latency detector from rt-tests. It runs busy loops on selected CPUs and measures operating system induced latency while optionally applying workloads such as `memmove`. In the qcom-linux-testkit wrapper, OSLAT is executed in JSON mode, parsed through `lib_rt.sh`, and summarized into machine-friendly and human-readable result files. + +This wrapper follows the same structure used for the other RT tests in `Runner/suites/Kernel/RT-tests`, including: + +- standard `run.sh` flow +- `PASS` / `FAIL` / `SKIP` summary in `OSLAT.res` +- detailed KPI in `logs_OSLAT/result.txt` +- aggregate KPI files under `logs_OSLAT/` +- LAVA-friendly behavior with exit code `0` +- heartbeat logging for long-running executions +- partial-result preservation on user interrupt + +## Default behavior + +The wrapper defaults are chosen to be safe and practical for embedded boards while still matching the oslat binary options. + +Default wrapper values: + +- `DURATION=1m` +- `ITERATIONS=1` +- `BACKGROUND_CMD=""` +- `QUIET=true` +- `WORKLOAD=no` +- `CPU_MAIN_THREAD=0` +- `PROGRESS_EVERY=1` +- `HEARTBEAT_SEC=10` + +Unset binary options are passed only when explicitly requested. + +## Files generated + +Typical output directory: + +`logs_OSLAT/` + +Generated files include: + +- `result.txt` - all parsed KPI lines and summary data +- `iter_kpi.txt` - per-iteration KPI lines +- `agg_kpi.txt` - aggregate KPI across iterations +- `thread_agg_kpi.txt` - per-thread aggregate KPI +- `oslat-<N>.json` - raw JSON output from each iteration +- `oslat_stdout_iter<N>.log` - captured console output for each iteration +- `tmp_result_one.txt` - temporary per-iteration parsed result file +- `OSLAT.res` - final summary result used by LAVA gating + +## Supported wrapper options + +### Wrapper options + +- `--out DIR` + Override output directory. + +- `--result FILE` + Override result file path. + +- `--duration TIME` + Test duration passed to oslat via `-D`. + +- `--iterations N` + Number of iterations to run. + +- `--background-cmd CMD` + Background workload command launched alongside the test. + +- `--binary PATH` + Explicit path to `oslat` binary. + +- `--progress-every N` + Iteration start progress cadence. + +- `--heartbeat-sec N` + Emit periodic "still running" messages while the binary is executing. + +- `--verbose` + Enable extra wrapper debug output. + +## Supported oslat options in run.sh + +The wrapper is expected to support the full set of useful oslat runtime options. + +- `--bucket-size N` + Pass `-b N`. + +- `--bias BOOL` + Pass `-B` when enabled. + +- `--cpu-list LIST` + Pass `-c LIST`, for example `1,3,5,7-15`. + +- `--cpu-main-thread CPU` + Pass `-C CPU`. Default is `0`. + +- `--rtprio N` + Pass `-f N`. + +- `--workload-mem SIZE` + Pass `-m SIZE`, for example `4K`, `1M`. + +- `--quiet BOOL` + Pass `-q` when enabled. + +- `--single-preheat BOOL` + Pass `-s` when enabled. + +- `--trace-threshold USEC` + Pass `-T USEC`. + +- `--workload TYPE` + Pass `-w TYPE`. Supported by oslat: `no`, `memmove`. + +- `--bucket-width NS` + Pass `-W NS`. + +- `--zero-omit BOOL` + Pass `-z` when enabled. + +## Example commands + +Run with defaults using an explicit binary: + +```sh +./run.sh --binary /tmp/oslat +``` + +Run on selected CPUs for 1 minute with memmove workload: + +```sh +./run.sh \ + --binary /tmp/oslat \ + --duration 1m \ + --cpu-list 0-3 \ + --cpu-main-thread 0 \ + --workload memmove \ + --workload-mem 1M +``` + +Run 3 iterations with heartbeat and FIFO priority: + +```sh +./run.sh \ + --binary /tmp/oslat \ + --duration 60s \ + --iterations 3 \ + --rtprio 95 \ + --heartbeat-sec 10 +``` + +Run with histogram tuning: + +```sh +./run.sh \ + --binary /tmp/oslat \ + --bucket-size 128 \ + --bucket-width 1000 \ + --bias true \ + --zero-omit true +``` + +## Result interpretation + +The parser extracts latency KPI from the JSON output and emits standard lines such as: + +- per-thread minimum latency +- per-thread average latency +- per-thread maximum latency +- test return code and verdict + +Aggregate summaries typically include: + +- all-thread minimum latency min / mean / max +- all-thread average latency min / mean / max +- all-thread maximum latency min / mean / max +- worst thread maximum latency +- worst thread id +- per-thread aggregate summaries across iterations + +These results are appended to `logs_OSLAT/result.txt` and also echoed to stdout in the standard qcom-linux-testkit format. + +## Interrupt behavior + +If the user presses `Ctrl-C` during execution: + +- the wrapper asks the running binary to exit cleanly +- partial stdout and any flushed JSON are preserved +- parsed results collected so far are still printed +- final status is marked as `SKIP` instead of `FAIL` + +This matches the improved handling used in the recent RT test wrappers. + +## Expected repository layout + +Typical placement inside qcom-linux-testkit: + +`Runner/suites/Kernel/RT-tests/OSLAT/` + +Expected files: + +- `run.sh` +- `oslat.yaml` +- `README.md` + +And supporting utilities: + +- `Runner/utils/functestlib.sh` +- `Runner/utils/lib_rt.sh` + +## LAVA integration notes + +The wrapper is designed to integrate with the existing RT test YAML style used in your repository: + +- repository-relative `cd` into the test directory +- invoke `./run.sh` with YAML params +- always call `send-to-lava.sh OSLAT.res` + +The `.res` file is the gating artifact. The detailed KPI remains under `logs_OSLAT/`. + +## Notes + +- OSLAT should be run as root. +- CPU list and main-thread CPU should be chosen carefully on small embedded systems. +- `--single-preheat` should only be used when CPU frequency behavior is understood and controlled. +- `--trace-threshold` is useful only if ftrace is configured and available on the target. +- `--workload memmove` plus large `--workload-mem` can significantly increase system pressure. diff --git a/Runner/suites/Kernel/RT-tests/OSLat/oslat.yaml b/Runner/suites/Kernel/RT-tests/OSLat/oslat.yaml new file mode 100755 index 00000000..26c985c1 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/OSLat/oslat.yaml @@ -0,0 +1,40 @@ +metadata: + name: OSLat + format: "Lava-Test Test Definition 1.0" + description: "Run rt-tests oslat (OS latency detector) in JSON mode and parse results without requiring python3." + os: + - linux + scope: + - performance + - preempt-rt + +params: + DURATION: "1m" + BACKGROUND_CMD: "" + ITERATIONS: "1" + + BUCKET_SIZE: "" + BIAS: "false" + CPU_LIST: "" + CPU_MAIN_THREAD: "" + RTPRIO: "" + WORKLOAD_MEM: "" + QUIET: "true" + SINGLE_PREHEAT: "false" + TRACE_THRESHOLD_US: "" + WORKLOAD: "" + BUCKET_WIDTH_NS: "" + ZERO_OMIT: "false" + + BINARY: "" + OUT_DIR: "./logs_OSLat" + VERBOSE: "0" + PROGRESS_EVERY: "1" + HEARTBEAT_SEC: "10" + +run: + steps: + - REPO_PATH=$PWD + - cd Runner/suites/Kernel/RT-tests/OSLat + - ./run.sh --duration "${DURATION}" --iterations "${ITERATIONS}" --background-cmd "${BACKGROUND_CMD}" --bucket-size "${BUCKET_SIZE}" --bias "${BIAS}" --cpu-list "${CPU_LIST}" --cpu-main-thread "${CPU_MAIN_THREAD}" --rtprio "${RTPRIO}" --workload-mem "${WORKLOAD_MEM}" --quiet "${QUIET}" --single-preheat "${SINGLE_PREHEAT}" --trace-threshold-us "${TRACE_THRESHOLD_US}" --workload "${WORKLOAD}" --bucket-width-ns "${BUCKET_WIDTH_NS}" --zero-omit "${ZERO_OMIT}" --binary "${BINARY}" --out "${OUT_DIR}" --progress-every "${PROGRESS_EVERY}" --heartbeat-sec "${HEARTBEAT_SEC}" $( [ "${VERBOSE}" = "1" ] && echo "--verbose" ) || true + - $REPO_PATH/Runner/utils/send-to-lava.sh OSLat.res diff --git a/Runner/suites/Kernel/RT-tests/OSLat/run.sh b/Runner/suites/Kernel/RT-tests/OSLat/run.sh new file mode 100755 index 00000000..25151de3 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/OSLat/run.sh @@ -0,0 +1,389 @@ +#!/bin/sh +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# OSLat wrapper for qcom-linux-testkit +# - Runs rt-tests oslat ITERATIONS times (JSON output) +# - Parses KPI using lib_rt.sh (no python required) +# - Emits KPI lines to result.txt and summary PASS/FAIL/SKIP to OSLat.res +# +# Notes: +# - Always exits 0 (LAVA-friendly). Use OSLat.res for gating. +# - Ctrl-C/user interrupt is treated as SKIP and partial results are preserved. +# - Heartbeat is enabled by default. + +SCRIPT_DIR="$( + cd "$(dirname "$0")" || exit 1 + pwd +)" + +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "${__INIT_ENV_LOADED:-}" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" + __INIT_ENV_LOADED=1 +fi + +# shellcheck disable=SC1091 +. "$TOOLS/functestlib.sh" +# shellcheck disable=SC1091 +. "$TOOLS/lib_rt.sh" + +TESTNAME="OSLat" +RT_CUR_TESTNAME="$TESTNAME" +export RT_CUR_TESTNAME + +test_path=$(find_test_case_by_name "$TESTNAME") +if [ -z "$test_path" ]; then + test_path="$SCRIPT_DIR" +fi + +RES_FILE="$test_path/${TESTNAME}.res" +OUT_DIR="${OUT_DIR:-$test_path/logs_${TESTNAME}}" +RESULT_TXT="${RESULT_TXT:-$OUT_DIR/result.txt}" + +DURATION="${DURATION:-1m}" +BACKGROUND_CMD="${BACKGROUND_CMD:-}" +ITERATIONS="${ITERATIONS:-1}" +BUCKET_SIZE="${BUCKET_SIZE:-}" +BIAS="${BIAS:-false}" +CPU_LIST="${CPU_LIST:-}" +CPU_MAIN_THREAD="${CPU_MAIN_THREAD:-}" +RTPRIO="${RTPRIO:-}" +WORKLOAD_MEM="${WORKLOAD_MEM:-}" +QUIET="${QUIET:-true}" +SINGLE_PREHEAT="${SINGLE_PREHEAT:-false}" +TRACE_THRESHOLD_US="${TRACE_THRESHOLD_US:-}" +WORKLOAD="${WORKLOAD:-}" +BUCKET_WIDTH_NS="${BUCKET_WIDTH_NS:-}" +ZERO_OMIT="${ZERO_OMIT:-false}" +BINARY="${BINARY:-}" +VERBOSE="${VERBOSE:-0}" +PROGRESS_EVERY="${PROGRESS_EVERY:-1}" +HEARTBEAT_SEC="${HEARTBEAT_SEC:-10}" + +usage() { + cat <<EOF +Usage: $0 [OPTIONS] + --out DIR + --result FILE + --duration TIME + --iterations N + --background-cmd CMD + --binary PATH + --progress-every N + --heartbeat-sec N + --verbose + --bucket-size N + --bias BOOL + --cpu-list LIST + --cpu-main-thread CPU + --rtprio N + --workload-mem SIZE + --quiet BOOL + --single-preheat BOOL + --trace-threshold-us N + --workload KIND + --bucket-width-ns N + --zero-omit BOOL +EOF +} + +while [ "$#" -gt 0 ]; do + case "$1" in + -h|--help) + usage + exit 0 + ;; + --out) + shift + OUT_DIR="$1" + ;; + --result) + shift + RESULT_TXT="$1" + ;; + --duration) + shift + DURATION="$1" + ;; + --iterations) + shift + ITERATIONS="$1" + ;; + --background-cmd) + shift + BACKGROUND_CMD="$1" + ;; + --binary) + shift + BINARY="$1" + ;; + --progress-every) + shift + PROGRESS_EVERY="$1" + ;; + --heartbeat-sec) + shift + HEARTBEAT_SEC="$1" + ;; + --verbose) + VERBOSE=1 + ;; + --bucket-size) + shift + BUCKET_SIZE="$1" + ;; + --bias) + shift + BIAS="$1" + ;; + --cpu-list) + shift + CPU_LIST="$1" + ;; + --cpu-main-thread) + shift + CPU_MAIN_THREAD="$1" + ;; + --rtprio) + shift + RTPRIO="$1" + ;; + --workload-mem) + shift + WORKLOAD_MEM="$1" + ;; + --quiet) + shift + QUIET="$1" + ;; + --single-preheat) + shift + SINGLE_PREHEAT="$1" + ;; + --trace-threshold-us) + shift + TRACE_THRESHOLD_US="$1" + ;; + --workload) + shift + WORKLOAD="$1" + ;; + --bucket-width-ns) + shift + BUCKET_WIDTH_NS="$1" + ;; + --zero-omit) + shift + ZERO_OMIT="$1" + ;; + *) + log_warn "Unknown option: $1" + usage + echo "$TESTNAME FAIL" >"$RES_FILE" + exit 0 + ;; + esac + shift +done + +LOG_PREFIX="$OUT_DIR/oslat" +TMP_ONE="$OUT_DIR/tmp_result_one.txt" +ITER_KPI="$OUT_DIR/iter_kpi.txt" +AGG_KPI="$OUT_DIR/agg_kpi.txt" +THREAD_AGG_KPI="$OUT_DIR/thread_agg_kpi.txt" + +rt_prepare_output_layout \ + "$OUT_DIR" \ + "$RESULT_TXT" \ + "$TMP_ONE" \ + "$ITER_KPI" \ + "$AGG_KPI" \ + "$THREAD_AGG_KPI" + +rt_check_clock_sanity "$TESTNAME" || true + +log_info "------------------- Starting $TESTNAME -------------------" +log_info "$TESTNAME: Checking for the tools required to run oslat" + +if ! rt_require_common_tools uname awk sed grep tr head tail mkdir cat sh sleep kill date mkfifo rm tee sort wc; then + log_skip "$TESTNAME: basic tools missing" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if ! rt_require_json_helpers; then + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if ! rt_require_stream_helpers; then + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_normalize_common_params + +case "$BUCKET_SIZE" in ''|*[!0-9]*) BUCKET_SIZE="" ;; esac +case "$CPU_MAIN_THREAD" in ''|*[!0-9]*) CPU_MAIN_THREAD="" ;; esac +case "$RTPRIO" in ''|*[!0-9]*) RTPRIO="" ;; esac +case "$TRACE_THRESHOLD_US" in ''|*[!0-9]*) TRACE_THRESHOLD_US="" ;; esac +case "$BUCKET_WIDTH_NS" in ''|*[!0-9]*) BUCKET_WIDTH_NS="" ;; esac + +OSLAT_BIN=$(rt_resolve_binary oslat "$BINARY" 2>/dev/null || echo "") +if [ -z "$OSLAT_BIN" ] || [ ! -x "$OSLAT_BIN" ]; then + log_skip "$TESTNAME: oslat binary not found/executable (${OSLAT_BIN:-none})" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_log_common_runtime_env "$TESTNAME" "$OSLAT_BIN" +log_info "$TESTNAME: iterations=$ITERATIONS duration=$DURATION" +log_info "$TESTNAME: heartbeat=$HEARTBEAT_SEC seconds" + +RT_INTERRUPTED=0 +export RT_INTERRUPTED + +trap 'rt_handle_int; rt_cleanup_pipes; rt_stop_heartbeat; perf_rt_bg_stop >/dev/null 2>&1 || true' INT TERM +trap 'rt_cleanup_pipes; rt_stop_heartbeat; perf_rt_bg_stop >/dev/null 2>&1 || true' EXIT + +perf_rt_bg_start "$TESTNAME" "$BACKGROUND_CMD" + +overall_fail=0 +i=1 +while [ "$i" -le "$ITERATIONS" ] 2>/dev/null; do + rt_log_iteration_progress "$TESTNAME" "$i" "$ITERATIONS" "$PROGRESS_EVERY" + + jsonfile="${LOG_PREFIX}-${i}.json" + stdoutlog="${OUT_DIR}/oslat_stdout_iter${i}.log" + + set -- "$OSLAT_BIN" + + case "$QUIET" in + true|TRUE|1|yes|YES) + set -- "$@" -q + ;; + esac + + case "$BIAS" in + true|TRUE|1|yes|YES) + set -- "$@" -B + ;; + esac + + case "$SINGLE_PREHEAT" in + true|TRUE|1|yes|YES) + set -- "$@" -s + ;; + esac + + case "$ZERO_OMIT" in + true|TRUE|1|yes|YES) + set -- "$@" -z + ;; + esac + + if [ -n "$BUCKET_SIZE" ]; then + set -- "$@" -b "$BUCKET_SIZE" + fi + + if [ -n "$CPU_LIST" ]; then + set -- "$@" -c "$CPU_LIST" + fi + + if [ -n "$CPU_MAIN_THREAD" ]; then + set -- "$@" -C "$CPU_MAIN_THREAD" + fi + + if [ -n "$RTPRIO" ]; then + set -- "$@" -f "$RTPRIO" + fi + + if [ -n "$WORKLOAD_MEM" ]; then + set -- "$@" -m "$WORKLOAD_MEM" + fi + + if [ -n "$TRACE_THRESHOLD_US" ]; then + set -- "$@" -T "$TRACE_THRESHOLD_US" + fi + + if [ -n "$WORKLOAD" ]; then + set -- "$@" -w "$WORKLOAD" + fi + + if [ -n "$BUCKET_WIDTH_NS" ]; then + set -- "$@" -W "$BUCKET_WIDTH_NS" + fi + + set -- "$@" -D "$DURATION" --json="$jsonfile" + + if rt_run_streaming_iteration "$TESTNAME" "$HEARTBEAT_SEC" "$stdoutlog" "$jsonfile" "$@"; then + rc=$RT_RUN_RC + else + rc=$RT_RUN_RC + fi + + if [ "$rc" -ne 0 ] 2>/dev/null; then + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null && [ "$rc" -eq 130 ] 2>/dev/null; then + log_warn "$TESTNAME: oslat interrupted by user (rc=$rc); reporting partial results" + else + log_fail "$TESTNAME: oslat exited rc=$rc (iter $i/$ITERATIONS)" + overall_fail=1 + fi + fi + + if [ "${RT_RUN_JSON_OK:-0}" -ne 1 ] 2>/dev/null; then + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: json output not available after interrupt: $jsonfile" + break + fi + + log_fail "$TESTNAME: missing json output: $jsonfile" + overall_fail=1 + i=$((i + 1)) + continue + fi + + if ! rt_parse_and_append_iteration_kpi "oslat" "$jsonfile" "$TMP_ONE" "$ITER_KPI" "$RESULT_TXT" "$i"; then + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: parse incomplete after interrupt (iter $i/$ITERATIONS): $jsonfile" + else + log_fail "$TESTNAME: failed to parse/store KPI (iter $i/$ITERATIONS): $jsonfile" + overall_fail=1 + fi + fi + + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + break + fi + + i=$((i + 1)) +done + +perf_rt_bg_stop >/dev/null 2>&1 || true + +rt_emit_kpi_block "$TESTNAME" "per-iteration results" "$ITER_KPI" +rt_emit_aggregate_kpi "$TESTNAME" "oslat" "$ITER_KPI" "$AGG_KPI" "$RESULT_TXT" || true +rt_emit_thread_aggregate_kpi "$TESTNAME" "oslat" "$ITER_KPI" "$THREAD_AGG_KPI" "$RESULT_TXT" || true + +if rt_kpi_file_has_fail "oslat" "$ITER_KPI"; then + overall_fail=1 +fi + +rt_emit_interrupt_aware_result "$TESTNAME" "$RES_FILE" "$RESULT_TXT" "$OUT_DIR" "${RT_INTERRUPTED:-0}" "$overall_fail" +exit 0 diff --git a/Runner/suites/Kernel/RT-tests/SSDD/SSDD_README.md b/Runner/suites/Kernel/RT-tests/SSDD/SSDD_README.md new file mode 100644 index 00000000..04c5d6cf --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/SSDD/SSDD_README.md @@ -0,0 +1,202 @@ +# SSDD + +## Overview + +**SSDD** is an rt-tests utility that stresses `ptrace` single-step behavior by creating multiple tracer/tracee pairs and repeatedly issuing `PTRACE_SINGLESTEP` operations. It is useful for checking scheduler behavior and interference when many tracer/tracee pairs are running concurrently. + +In the qcom-linux-testkit flow, the **SSDD** wrapper follows the same style as the other RT tests: +- structured logging using `functestlib.sh` +- JSON based result capture +- parsed KPI output without requiring Python at runtime +- `.res` summary file for LAVA gating +- `result.txt` with detailed per-iteration and aggregate KPI +- heartbeat/progress logs for long runs + +## What the test does + +The test: +- launches a configurable number of tracer/tracee pairs +- performs a configurable number of `PTRACE_SINGLESTEP` iterations per pair +- verifies `waitpid(2)` return behavior during stepping +- records final test results in JSON format +- emits pass/fail style KPI lines consumable by the testkit + +## Defaults + +The wrapper should keep defaults aligned with the tool defaults unless you intentionally override them: + +- **forks**: `10` +- **iters**: `10000` +- **iterations** (wrapper level): `1` +- **background workload**: empty +- **quiet**: enabled when supported by wrapper design + +## Binary usage reference + +`ssdd` supports the following options: + +- `-f`, `--forks=NUM` + Number of tracer/tracee pairs to fork. Default is `10`. + +- `-h`, `--help` + Display usage. + +- `-i`, `--iters=NUM` + Number of `PTRACE_SINGLESTEP` iterations per tracer/tracee pair. Default is `10000`. Must be at least `1`. + +- `--json=FILENAME` + Write final results into `FILENAME` in JSON format. + +## Expected wrapper behavior + +The qcom-linux-testkit `run.sh` for **SSDD** should follow the same conventions used in the earlier RT test wrappers: + +- detect and source `init_env` +- load `functestlib.sh` and `lib_rt.sh` +- resolve `TESTNAME="SSDD"` +- write summary result to `SSDD.res` +- write detailed logs under `logs_SSDD/` +- support explicit `--binary PATH` +- support wrapper iteration count separate from `ssdd --iters` +- preserve partial results on interrupt when practical +- always exit `0` for LAVA friendliness, with gating based on `SSDD.res` + +## Suggested wrapper options + +The wrapper can expose these arguments in the same style as the other RT tests: + +### Wrapper level options + +- `--out DIR` + Output directory. + +- `--result FILE` + Result text file path. + +- `--iterations N` + Number of wrapper iterations. + +- `--background-cmd CMD` + Optional background workload. + +- `--binary PATH` + Explicit path to the `ssdd` binary. + +- `--progress-every N` + Print iteration progress every N iterations. + +- `--heartbeat SEC` + Periodic liveness logging while the test is running. + +- `--verbose` + Enable extra wrapper logs. + +### SSDD specific options + +- `--forks NUM` + Maps to `-f` / `--forks=NUM`. + +- `--iters NUM` + Maps to `-i` / `--iters=NUM`. + +## Example commands + +Run with defaults from PATH: + +```sh +./run.sh +``` + +Run a specific binary with more tracer/tracee pairs: + +```sh +./run.sh --binary /tmp/ssdd --forks 16 --iters 20000 +``` + +Run multiple wrapper iterations: + +```sh +./run.sh --binary /tmp/ssdd --forks 8 --iters 5000 --iterations 3 +``` + +Run with a background workload: + +```sh +./run.sh --binary /tmp/ssdd --forks 12 --iters 10000 --background-cmd "stress-ng --cpu 4 --timeout 60" +``` + +## Output files + +A typical wrapper layout should look like this: + +```text +SSDD/ +|-- SSDD.res +|-- run.sh +|-- ssdd.yaml +|-- README.md +`-- logs_SSDD/ + |-- result.txt + |-- iter_kpi.txt + |-- agg_kpi.txt + |-- ssdd-1.json + |-- ssdd_stdout_iter1.log + `-- tmp_result_one.txt +``` + +### Important files + +- **SSDD.res** + Final summary for LAVA. Example: + - `SSDD PASS` + - `SSDD FAIL` + - `SSDD SKIP` + +- **logs_SSDD/result.txt** + Detailed parsed KPI for all iterations. + +- **logs_SSDD/iter_kpi.txt** + Per-iteration KPI lines. + +- **logs_SSDD/agg_kpi.txt** + Aggregate KPI lines across iterations. + +- **logs_SSDD/ssdd-<n>.json** + Raw JSON result emitted by the binary. + +- **logs_SSDD/ssdd_stdout_iter<n>.log** + Captured stdout/stderr for each iteration. + +## Pass, fail, and skip model + +Typical wrapper result handling should be: + +- **PASS** when all iterations run successfully, JSON is generated, and parsed verdicts indicate success +- **FAIL** when the binary exits unexpectedly, JSON is missing, or parsing indicates failure +- **SKIP** when required tools are missing, the binary is unavailable, or the run is intentionally interrupted and the wrapper is designed to preserve partial results + +## Notes for integration + +- Keep the script POSIX compliant and ShellCheck clean. +- Avoid `A && B || C` style patterns. +- Reuse existing helpers from `functestlib.sh` and `lib_rt.sh` instead of inventing new wrapper-local helper names unless necessary. +- Preserve naming consistency with earlier RT tests. +- Use the same logging style and result file conventions as `PMQTest`, `PTSEMATest`, `SignalTest`, `SVSematest`, and `CyclicDeadline`. + +## Suggested YAML parameters + +A matching YAML would typically expose: + +- `FORKS` +- `ITERS` +- `ITERATIONS` +- `BACKGROUND_CMD` +- `BINARY` +- `OUT_DIR` +- `VERBOSE` +- `PROGRESS_EVERY` +- optional heartbeat parameter if your wrapper supports it + +## Summary + +**SSDD** is a useful RT stress test for validating `ptrace` single-step behavior under concurrent tracer/tracee activity. In qcom-linux-testkit it should be wrapped exactly like the other RT tests: structured logs, JSON parsing, deterministic result files, and LAVA-friendly summary handling. diff --git a/Runner/suites/Kernel/RT-tests/SSDD/run.sh b/Runner/suites/Kernel/RT-tests/SSDD/run.sh new file mode 100755 index 00000000..fe4fc11b --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/SSDD/run.sh @@ -0,0 +1,271 @@ +#!/bin/sh +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# SSDD wrapper for qcom-linux-testkit +# - Runs rt-tests ssdd ITERATIONS times (JSON output) +# - Parses KPI using lib_rt.sh (no python required) +# - Emits KPI lines to result.txt and summary PASS/FAIL/SKIP to SSDD.res +# +# Notes: +# - Always exits 0 (LAVA-friendly). Use SSDD.res for gating. +# - Ctrl-C/user interrupt is treated as SKIP and partial results are preserved. + +SCRIPT_DIR="$( + cd "$(dirname "$0")" || exit 1 + pwd +)" + +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "${__INIT_ENV_LOADED:-}" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" + __INIT_ENV_LOADED=1 +fi + +# shellcheck disable=SC1091 +. "$TOOLS/functestlib.sh" +# shellcheck disable=SC1091 +. "$TOOLS/lib_rt.sh" + +TESTNAME="SSDD" +RT_CUR_TESTNAME="$TESTNAME" +export RT_CUR_TESTNAME + +test_path=$(find_test_case_by_name "$TESTNAME") +if [ -z "$test_path" ]; then + test_path="$SCRIPT_DIR" +fi + +RES_FILE="$test_path/${TESTNAME}.res" +OUT_DIR="${OUT_DIR:-$test_path/logs_${TESTNAME}}" +RESULT_TXT="${RESULT_TXT:-$OUT_DIR/result.txt}" + +BACKGROUND_CMD="${BACKGROUND_CMD:-}" +ITERATIONS="${ITERATIONS:-1}" +FORKS="${FORKS:-10}" +SSDD_ITERS="${SSDD_ITERS:-10000}" +BINARY="${BINARY:-}" +QUIET="${QUIET:-true}" +VERBOSE="${VERBOSE:-0}" +PROGRESS_EVERY="${PROGRESS_EVERY:-1}" +HEARTBEAT_SEC="${HEARTBEAT_SEC:-10}" + +usage() { + cat <<EOF +Usage: $0 [OPTIONS] + --out DIR + --result FILE + --iterations N + --background-cmd CMD + --binary PATH + --progress-every N + --heartbeat-sec N + --verbose + --forks NUM + --ssdd-iters NUM + --quiet BOOL +EOF +} + +while [ "$#" -gt 0 ]; do + case "$1" in + -h|--help) + usage + exit 0 + ;; + --out) + shift + OUT_DIR="$1" + ;; + --result) + shift + RESULT_TXT="$1" + ;; + --iterations) + shift + ITERATIONS="$1" + ;; + --background-cmd) + shift + BACKGROUND_CMD="$1" + ;; + --binary) + shift + BINARY="$1" + ;; + --progress-every) + shift + PROGRESS_EVERY="$1" + ;; + --heartbeat-sec) + shift + HEARTBEAT_SEC="$1" + ;; + --verbose) + VERBOSE=1 + ;; + --forks) + shift + FORKS="$1" + ;; + --ssdd-iters) + shift + SSDD_ITERS="$1" + ;; + --quiet) + shift + QUIET="$1" + ;; + *) + log_warn "Unknown option: $1" + usage + echo "$TESTNAME FAIL" >"$RES_FILE" + exit 0 + ;; + esac + shift +done + +LOG_PREFIX="$OUT_DIR/ssdd" +TMP_ONE="$OUT_DIR/tmp_result_one.txt" +ITER_KPI="$OUT_DIR/iter_kpi.txt" + +rt_prepare_output_layout \ + "$OUT_DIR" \ + "$RESULT_TXT" \ + "$TMP_ONE" \ + "$ITER_KPI" + +rt_check_clock_sanity "$TESTNAME" || true + +log_info "------------------- Starting $TESTNAME -------------------" +log_info "$TESTNAME: Checking for the tools required to run ssdd" + +if ! rt_require_common_tools uname awk sed grep tr head tail mkdir cat sh tee sleep kill date mkfifo rm sort wc; then + log_skip "$TESTNAME: basic tools missing" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if ! rt_require_json_helpers; then + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if ! rt_require_stream_helpers; then + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_normalize_common_params + +case "$FORKS" in + ''|*[!0-9]*|0) + FORKS=10 + ;; +esac + +case "$SSDD_ITERS" in + ''|*[!0-9]*|0) + SSDD_ITERS=10000 + ;; +esac + +SSDD_BIN=$(rt_resolve_binary ssdd "$BINARY" 2>/dev/null || echo "") +if [ -z "$SSDD_BIN" ] || [ ! -x "$SSDD_BIN" ]; then + log_skip "$TESTNAME: ssdd binary not found/executable (${SSDD_BIN:-none})" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_log_common_runtime_env "$TESTNAME" "$SSDD_BIN" +log_info "$TESTNAME: iterations=$ITERATIONS forks=$FORKS ssdd-iters=$SSDD_ITERS" +log_info "$TESTNAME: heartbeat=$HEARTBEAT_SEC seconds" + +RT_INTERRUPTED=0 +export RT_INTERRUPTED + +trap 'rt_handle_int; rt_cleanup_pipes; rt_stop_heartbeat; perf_rt_bg_stop >/dev/null 2>&1 || true' INT TERM +trap 'rt_cleanup_pipes; rt_stop_heartbeat; perf_rt_bg_stop >/dev/null 2>&1 || true' EXIT + +perf_rt_bg_start "$TESTNAME" "$BACKGROUND_CMD" + +overall_fail=0 + +i=1 +while [ "$i" -le "$ITERATIONS" ] 2>/dev/null; do + rt_log_iteration_progress "$TESTNAME" "$i" "$ITERATIONS" "$PROGRESS_EVERY" + + jsonfile="${LOG_PREFIX}-${i}.json" + stdoutlog="${OUT_DIR}/ssdd_stdout_iter${i}.log" + + set -- "$SSDD_BIN" "--forks=$FORKS" "--iters=$SSDD_ITERS" "--json=$jsonfile" + + if rt_run_streaming_iteration "$TESTNAME" "$HEARTBEAT_SEC" "$stdoutlog" "$jsonfile" "$@"; then + rc=$RT_RUN_RC + else + rc=$RT_RUN_RC + fi + + if [ "$rc" -ne 0 ] 2>/dev/null; then + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null && [ "$rc" -eq 130 ] 2>/dev/null; then + log_warn "$TESTNAME: ssdd interrupted by user (rc=$rc); reporting partial results" + else + log_fail "$TESTNAME: ssdd exited rc=$rc (iter $i/$ITERATIONS)" + overall_fail=1 + fi + fi + + if [ "${RT_RUN_JSON_OK:-0}" -ne 1 ] 2>/dev/null; then + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: json output not available after interrupt: $jsonfile" + break + fi + + log_fail "$TESTNAME: missing json output: $jsonfile" + overall_fail=1 + i=$((i + 1)) + continue + fi + + if ! rt_parse_and_append_iteration_kpi "ssdd" "$jsonfile" "$TMP_ONE" "$ITER_KPI" "$RESULT_TXT" "$i"; then + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: parse incomplete after interrupt (iter $i/$ITERATIONS): $jsonfile" + else + log_fail "$TESTNAME: failed to parse/store KPI (iter $i/$ITERATIONS): $jsonfile" + overall_fail=1 + fi + fi + + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + break + fi + + i=$((i + 1)) +done + +perf_rt_bg_stop >/dev/null 2>&1 || true + +rt_emit_kpi_block "$TESTNAME" "per-iteration results" "$ITER_KPI" + +if rt_kpi_file_has_fail "ssdd" "$ITER_KPI"; then + overall_fail=1 +fi + +rt_emit_interrupt_aware_result "$TESTNAME" "$RES_FILE" "$RESULT_TXT" "$OUT_DIR" "${RT_INTERRUPTED:-0}" "$overall_fail" +exit 0 diff --git a/Runner/suites/Kernel/RT-tests/SSDD/ssdd.yaml b/Runner/suites/Kernel/RT-tests/SSDD/ssdd.yaml new file mode 100755 index 00000000..832378dd --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/SSDD/ssdd.yaml @@ -0,0 +1,31 @@ +metadata: + name: SSDD + format: "Lava-Test Test Definition 1.0" + description: "Run rt-tests ssdd (PTRACE_SINGLESTEP interference test) in JSON mode and parse results without requiring python3." + os: + - linux + scope: + - performance + - preempt-rt + +params: + BACKGROUND_CMD: "" + ITERATIONS: "1" + + FORKS: "10" + SSDD_ITERS: "10000" + QUIET: "true" + + BINARY: "" + OUT_DIR: "./logs_SSDD" + + VERBOSE: "0" + PROGRESS_EVERY: "1" + HEARTBEAT_SEC: "10" + +run: + steps: + - REPO_PATH=$PWD + - cd Runner/suites/Kernel/RT-tests/SSDD + - ./run.sh --iterations "${ITERATIONS}" --background-cmd "${BACKGROUND_CMD}" --forks "${FORKS}" --ssdd-iters "${SSDD_ITERS}" --quiet "${QUIET}" --binary "${BINARY}" --out "${OUT_DIR}" --progress-every "${PROGRESS_EVERY}" --heartbeat-sec "${HEARTBEAT_SEC}" $( [ "${VERBOSE}" = "1" ] && echo "--verbose" ) || true + - $REPO_PATH/Runner/utils/send-to-lava.sh SSDD.res diff --git a/Runner/suites/Kernel/RT-tests/SVSematest/SVSematest_README.md b/Runner/suites/Kernel/RT-tests/SVSematest/SVSematest_README.md new file mode 100644 index 00000000..ac84306e --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/SVSematest/SVSematest_README.md @@ -0,0 +1,301 @@ +# SVSematest + +## Overview + +`SVSematest` is the qcom-linux-testkit wrapper for the `rt-tests` `svsematest` binary. It runs the SYSV semaphore latency test in JSON mode, parses results using `lib_rt.sh`, prints KPIs to the console, writes detailed results to log files, and emits a summary result file for LAVA. + +This test follows the same style as the earlier RT tests in the suite, such as `PTSEMATest`, `PMQTest`, `RTMigrateTest`, `SignalTest`, and `SigwaitTest`. + +## What the test validates + +`svsematest` measures the latency between releasing a SYSV semaphore on one side and acquiring it on the other side. It is useful for validating RT scheduling behavior and semaphore wakeup latency under PREEMPT/RT-capable kernels or RT-focused workloads. + +## Default behavior + +The wrapper defaults are aligned with the Linaro test-definitions flow: + +- `DURATION=5m` +- `BACKGROUND_CMD=""` +- `ITERATIONS=1` +- `PRIO=98` +- `QUIET=true` +- `THREADS=true` +- `AFFINITY=true` +- `SMP=false` +- `FORK_MODE=false` + +By default, the effective command shape is equivalent to: + +- `svsematest -q -t -a -p 98 -D 5m --json=<file>` + +## Files produced + +The test creates a log directory like: + +- `logs_SVSematest/` + +Typical generated files: + +- `result.txt` - detailed KPI output used for review +- `iter_kpi.txt` - per-iteration KPI lines +- `agg_kpi.txt` - aggregate KPI across all iterations/threads +- `thread_agg_kpi.txt` - per-thread aggregate KPI summary +- `svsematest-<N>.json` - raw JSON output from `svsematest` +- `svsematest_stdout_iter<N>.log` - stdout/stderr captured from the binary +- `tmp_result_one.txt` - temporary parsed KPI file +- `SVSematest.res` - final PASS/FAIL/SKIP summary for LAVA gating + +## Console behavior + +The wrapper is designed to be operator-friendly: + +- prints environment and runtime context before execution +- streams `svsematest` stdout to the console +- supports heartbeat progress messages while the binary is running +- prints per-iteration KPI summary at the end +- prints aggregate KPI summary at the end +- prints per-thread aggregate summary at the end + +For long runs, a heartbeat message is shown by default so the user knows the test is still active. + +Example heartbeat lines: + +- `SVSematest: still running... 10s elapsed` +- `SVSematest: still running... 20s elapsed` + +## Interrupt behavior + +If the user presses `Ctrl-C` during execution: + +- the wrapper requests the running `svsematest` process to exit cleanly +- any partial stdout/JSON data already produced is preserved +- collected KPI is still parsed if possible +- final result is marked as `SKIP` + +This is intentional so partially collected data is not lost. + +## Prerequisites + +- root access +- `svsematest` binary available either in `PATH` or provided explicitly with `--binary` +- qcom-linux-testkit environment initialized through `init_env` +- `functestlib.sh` +- `lib_rt.sh` +- basic user-space tools such as: + - `uname` + - `awk` + - `sed` + - `grep` + - `tr` + - `head` + - `tail` + - `mkdir` + - `cat` + - `sh` + - `tee` + - `sleep` + - `kill` + - `date` + - `mkfifo` + - `rm` + +## Command-line usage + +```sh +./run.sh [OPTIONS] +``` + +### Wrapper options + +- `--out DIR` + Output directory. Default: `./logs_SVSematest` + +- `--result FILE` + Result file path. Default: `<OUT_DIR>/result.txt` + +- `--duration TIME` + Passes `-D TIME` to `svsematest`. Default: `5m` + +- `--iterations N` + Number of iterations to run. Default: `1` + +- `--background-cmd CMD` + Optional background workload command + +- `--binary PATH` + Explicit path to `svsematest` + +- `--progress-every N` + Iteration progress print interval. Default: `1` + +- `--verbose` + Enable extra wrapper-side debug logging + +### Supported svsematest options + +The wrapper supports the full practical set used by the binary. + +- `--affinity BOOL` + Enable or disable `-a` + +- `--affinity-cpu NUM` + Use `-a NUM` + +- `--breaktrace-us USEC` + Use `-b USEC` + +- `--distance-us USEC` + Use `-d DIST` + +- `--fork BOOL` + Enable or disable `-f` + +- `--fork-opt OPT` + Use `-f OPT` + +- `--interval-us USEC` + Use `-i INTV` + +- `--loops N` + Use `-l LOOPS` + +- `--prio N` + Use `-p PRIO`. Default: `98` + +- `--quiet BOOL` + Enable or disable `-q` + +- `--smp BOOL` + Enable or disable `-S` + +- `--threads BOOL` + Enable or disable `-t` + +- `--threads-num NUM` + Use `-t NUM` + +## Binary help reference + +The wrapper is designed around the `svsematest` usage below: + +```text +svsematest V 2.20 +Usage: +svsematest <options> + +Function: test SYSV semaphore latency + +Available options: +-a [NUM] --affinity run thread #N on processor #N, if possible + with NUM pin all threads to the processor NUM +-b USEC --breaktrace=USEC send break trace command when latency > USEC +-d DIST --distance=DIST distance of thread intervals in us default=500 +-D --duration=TIME specify a length for the test run +-f [OPT] --fork[=OPT] fork new processes instead of creating threads +-i INTV --interval=INTV base interval of thread in us default=1000 + --json=FILENAME write final results into FILENAME, JSON formatted +-l LOOPS --loops=LOOPS number of loops: default=0 (endless) +-p PRIO --prio=PRIO priority +-S --smp SMP testing: options -a -t and same priority + of all threads +-t --threads one thread per available processor +-t [NUM] --threads[=NUM] number of threads + without NUM, threads = max_cpus + without -t default = 1 +``` + +## Example commands + +Run with defaults aligned to Linaro behavior: + +```sh +./run.sh +``` + +Run with an explicit binary path: + +```sh +./run.sh --binary /tmp/svsematest +``` + +Run with one thread per CPU and explicit affinity CPU selection: + +```sh +./run.sh --binary /tmp/svsematest --threads true --threads-num 8 --affinity true --affinity-cpu 0 +``` + +Run for 60 seconds with explicit interval: + +```sh +./run.sh --binary /tmp/svsematest --threads true --threads-num 8 --affinity true --affinity-cpu 0 --interval-us 1000 --duration 60s +``` + +Run in fork mode: + +```sh +./run.sh --binary /tmp/svsematest --fork true --fork-opt 2 --distance-us 500 --interval-us 1000 +``` + +Run multiple iterations: + +```sh +./run.sh --binary /tmp/svsematest --iterations 3 +``` + +## Result interpretation + +The wrapper emits parsed KPI in a normalized format such as: + +- `t0-min-latency pass 5 us` +- `t0-avg-latency pass 9.25 us` +- `t0-max-latency pass 66 us` +- `svsematest pass` + +It also generates aggregate KPIs such as: + +- `svsematest-all-min-latency-min` +- `svsematest-all-avg-latency-mean` +- `svsematest-all-max-latency-max` +- `svsematest-worst-thread-max-latency` +- `svsematest-worst-thread-id` + +And per-thread aggregate KPIs such as: + +- `svsematest-t0-min-latency-mean` +- `svsematest-t0-avg-latency-mean` +- `svsematest-t0-max-latency-max` + +## LAVA integration + +The YAML for this test should invoke `run.sh` and then publish: + +- `SVSematest.res` through `send-to-lava.sh` + +The wrapper always exits `0` for LAVA friendliness. Gating should be based on: + +- `SVSematest.res` + +Possible final states: + +- `SVSematest PASS` +- `SVSematest FAIL` +- `SVSematest SKIP` + +## Notes + +- `--duration` controls runtime, not `--interval-us` +- `--interval-us` is the base thread interval in microseconds +- `--threads-num` is only meaningful when `--threads true` +- `--affinity-cpu` is only meaningful when `--affinity true` +- if the user deletes the log directory and reruns the test, the wrapper recreates it automatically +- heartbeat logging is expected during long-running test execution + +## Maintainer intent + +This wrapper is intentionally consistent with the Qualcomm RT test wrappers already present in the suite. Future changes should preserve: + +- naming consistency +- POSIX shell compatibility +- ShellCheck cleanliness +- LAVA-friendly behavior +- reuse of shared helpers from `functestlib.sh` and `lib_rt.sh` diff --git a/Runner/suites/Kernel/RT-tests/SVSematest/run.sh b/Runner/suites/Kernel/RT-tests/SVSematest/run.sh new file mode 100755 index 00000000..02ae8e97 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/SVSematest/run.sh @@ -0,0 +1,400 @@ +#!/bin/sh +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# SVSematest wrapper for qcom-linux-testkit +# - Runs rt-tests svsematest ITERATIONS times (JSON output) +# - Parses KPI using lib_rt.sh (no python required) +# - Emits KPI lines to result.txt and summary PASS/FAIL/SKIP to SVSematest.res +# +# Notes: +# - Always exits 0 (LAVA-friendly). Use SVSematest.res for gating. +# - Ctrl-C/user interrupt is treated as SKIP and partial results are preserved. +# - Streaming progress from svsematest is shown live on stdout. +# - Heartbeat is shown by default while the test is running. + +SCRIPT_DIR="$( + cd "$(dirname "$0")" || exit 1 + pwd +)" + +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "${__INIT_ENV_LOADED:-}" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" + __INIT_ENV_LOADED=1 +fi + +# shellcheck disable=SC1091 +. "$TOOLS/functestlib.sh" +# shellcheck disable=SC1091 +. "$TOOLS/lib_rt.sh" + +TESTNAME="SVSematest" +RT_CUR_TESTNAME="$TESTNAME" +export RT_CUR_TESTNAME + +test_path=$(find_test_case_by_name "$TESTNAME") +if [ -z "$test_path" ]; then + test_path="$SCRIPT_DIR" +fi + +RES_FILE="$test_path/${TESTNAME}.res" +OUT_DIR="${OUT_DIR:-$test_path/logs_${TESTNAME}}" +RESULT_TXT="${RESULT_TXT:-$OUT_DIR/result.txt}" + +DURATION="${DURATION:-5m}" +BACKGROUND_CMD="${BACKGROUND_CMD:-}" +ITERATIONS="${ITERATIONS:-1}" +PRIO="${PRIO:-98}" +QUIET="${QUIET:-true}" +THREADS="${THREADS:-true}" +THREADS_NUM="${THREADS_NUM:-}" +AFFINITY="${AFFINITY:-true}" +AFFINITY_CPU="${AFFINITY_CPU:-}" +BREAKTRACE_US="${BREAKTRACE_US:-}" +DISTANCE_US="${DISTANCE_US:-}" +FORK_MODE="${FORK_MODE:-false}" +FORK_OPT="${FORK_OPT:-}" +INTERVAL_US="${INTERVAL_US:-}" +LOOPS="${LOOPS:-}" +SMP="${SMP:-false}" +BINARY="${BINARY:-}" +VERBOSE="${VERBOSE:-0}" +PROGRESS_EVERY="${PROGRESS_EVERY:-1}" +HEARTBEAT_SEC="${HEARTBEAT_SEC:-10}" + +usage() { + cat <<EOF +Usage: $0 [OPTIONS] + --out DIR + --result FILE + --duration TIME + --iterations N + --background-cmd CMD + --binary PATH + --progress-every N + --heartbeat-sec N + --verbose + --prio N + --quiet BOOL + --threads BOOL + --threads-num NUM + --affinity BOOL + --affinity-cpu NUM + --breaktrace-us USEC + --distance-us USEC + --fork BOOL + --fork-opt OPT + --interval-us USEC + --loops N + --smp BOOL +EOF +} + +while [ "$#" -gt 0 ]; do + case "$1" in + -h|--help) + usage + exit 0 + ;; + --out) + shift + OUT_DIR="$1" + ;; + --result) + shift + RESULT_TXT="$1" + ;; + --duration) + shift + DURATION="$1" + ;; + --iterations) + shift + ITERATIONS="$1" + ;; + --background-cmd) + shift + BACKGROUND_CMD="$1" + ;; + --binary) + shift + BINARY="$1" + ;; + --progress-every) + shift + PROGRESS_EVERY="$1" + ;; + --heartbeat-sec) + shift + HEARTBEAT_SEC="$1" + ;; + --verbose) + VERBOSE=1 + ;; + --prio) + shift + PRIO="$1" + ;; + --quiet) + shift + QUIET="$1" + ;; + --threads) + shift + THREADS="$1" + ;; + --threads-num) + shift + THREADS_NUM="$1" + ;; + --affinity) + shift + AFFINITY="$1" + ;; + --affinity-cpu) + shift + AFFINITY_CPU="$1" + ;; + --breaktrace-us) + shift + BREAKTRACE_US="$1" + ;; + --distance-us) + shift + DISTANCE_US="$1" + ;; + --fork) + shift + FORK_MODE="$1" + ;; + --fork-opt) + shift + FORK_OPT="$1" + ;; + --interval-us) + shift + INTERVAL_US="$1" + ;; + --loops) + shift + LOOPS="$1" + ;; + --smp) + shift + SMP="$1" + ;; + *) + log_warn "Unknown option: $1" + usage + echo "$TESTNAME FAIL" >"$RES_FILE" + exit 0 + ;; + esac + shift +done + +LOG_PREFIX="$OUT_DIR/svsematest" +TMP_ONE="$OUT_DIR/tmp_result_one.txt" +ITER_KPI="$OUT_DIR/iter_kpi.txt" +AGG_KPI="$OUT_DIR/agg_kpi.txt" +THREAD_AGG_KPI="$OUT_DIR/thread_agg_kpi.txt" + +rt_prepare_output_layout \ + "$OUT_DIR" \ + "$RESULT_TXT" \ + "$TMP_ONE" \ + "$ITER_KPI" \ + "$AGG_KPI" \ + "$THREAD_AGG_KPI" + +rt_check_clock_sanity "$TESTNAME" || true + +log_info "------------------- Starting $TESTNAME -------------------" +log_info "$TESTNAME: Checking for the tools required to run svsematest" + +if ! rt_require_common_tools uname awk sed grep tr head tail mkdir cat sh tee sleep kill date mkfifo rm sort wc; then + log_skip "$TESTNAME: basic tools missing" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if ! rt_require_json_helpers; then + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if ! rt_require_stream_helpers; then + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_normalize_common_params + +case "$PRIO" in ''|*[!0-9]*) PRIO=98 ;; esac +case "$THREADS_NUM" in ''|*[!0-9]*) THREADS_NUM="" ;; esac +case "$AFFINITY_CPU" in ''|*[!0-9]*) AFFINITY_CPU="" ;; esac +case "$BREAKTRACE_US" in ''|*[!0-9]*) BREAKTRACE_US="" ;; esac +case "$DISTANCE_US" in ''|*[!0-9]*) DISTANCE_US="" ;; esac +case "$INTERVAL_US" in ''|*[!0-9]*) INTERVAL_US="" ;; esac +case "$LOOPS" in ''|*[!0-9]*) LOOPS="" ;; esac + +SVSEMA_BIN=$(rt_resolve_binary svsematest "$BINARY" 2>/dev/null || echo "") +if [ -z "$SVSEMA_BIN" ] || [ ! -x "$SVSEMA_BIN" ]; then + log_skip "$TESTNAME: svsematest binary not found/executable (${SVSEMA_BIN:-none})" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_log_common_runtime_env "$TESTNAME" "$SVSEMA_BIN" +log_info "$TESTNAME: iterations=$ITERATIONS duration=$DURATION prio=$PRIO" +log_info "$TESTNAME: heartbeat=$HEARTBEAT_SEC seconds" + +RT_INTERRUPTED=0 +export RT_INTERRUPTED + +trap 'rt_handle_int; rt_cleanup_pipes; rt_stop_heartbeat; perf_rt_bg_stop >/dev/null 2>&1 || true' INT TERM +trap 'rt_cleanup_pipes; rt_stop_heartbeat; perf_rt_bg_stop >/dev/null 2>&1 || true' EXIT + +perf_rt_bg_start "$TESTNAME" "$BACKGROUND_CMD" + +overall_fail=0 +i=1 +while [ "$i" -le "$ITERATIONS" ] 2>/dev/null; do + rt_log_iteration_progress "$TESTNAME" "$i" "$ITERATIONS" "$PROGRESS_EVERY" + + jsonfile="${LOG_PREFIX}-${i}.json" + stdoutlog="${OUT_DIR}/svsematest_stdout_iter${i}.log" + + set -- "$SVSEMA_BIN" + + case "$QUIET" in + true|TRUE|1|yes|YES) + set -- "$@" -q + ;; + esac + + case "$SMP" in + true|TRUE|1|yes|YES) + set -- "$@" -S + ;; + esac + + case "$AFFINITY" in + true|TRUE|1|yes|YES) + if [ -n "$AFFINITY_CPU" ]; then + set -- "$@" -a "$AFFINITY_CPU" + else + set -- "$@" -a + fi + ;; + esac + + case "$THREADS" in + true|TRUE|1|yes|YES) + if [ -n "$THREADS_NUM" ]; then + set -- "$@" -t "$THREADS_NUM" + else + set -- "$@" -t + fi + ;; + esac + + case "$FORK_MODE" in + true|TRUE|1|yes|YES) + if [ -n "$FORK_OPT" ]; then + set -- "$@" -f "$FORK_OPT" + else + set -- "$@" -f + fi + ;; + esac + + if [ -n "$BREAKTRACE_US" ]; then + set -- "$@" -b "$BREAKTRACE_US" + fi + + if [ -n "$DISTANCE_US" ]; then + set -- "$@" -d "$DISTANCE_US" + fi + + if [ -n "$INTERVAL_US" ]; then + set -- "$@" -i "$INTERVAL_US" + fi + + if [ -n "$LOOPS" ]; then + set -- "$@" -l "$LOOPS" + fi + + set -- "$@" -p "$PRIO" -D "$DURATION" --json="$jsonfile" + + if rt_run_streaming_iteration "$TESTNAME" "$HEARTBEAT_SEC" "$stdoutlog" "$jsonfile" "$@"; then + rc=$RT_RUN_RC + else + rc=$RT_RUN_RC + fi + + if [ "$rc" -ne 0 ] 2>/dev/null; then + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null && [ "$rc" -eq 130 ] 2>/dev/null; then + log_warn "$TESTNAME: svsematest interrupted by user (rc=$rc); reporting partial results" + else + log_fail "$TESTNAME: svsematest exited rc=$rc (iter $i/$ITERATIONS)" + overall_fail=1 + fi + fi + + if [ "${RT_RUN_JSON_OK:-0}" -ne 1 ] 2>/dev/null; then + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: json output not available after interrupt: $jsonfile" + break + fi + + log_fail "$TESTNAME: missing json output: $jsonfile" + overall_fail=1 + i=$((i + 1)) + continue + fi + + if ! rt_parse_and_append_iteration_kpi "svsematest" "$jsonfile" "$TMP_ONE" "$ITER_KPI" "$RESULT_TXT" "$i"; then + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: parse incomplete after interrupt (iter $i/$ITERATIONS): $jsonfile" + else + log_fail "$TESTNAME: failed to parse/store KPI (iter $i/$ITERATIONS): $jsonfile" + overall_fail=1 + fi + fi + + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + break + fi + + i=$((i + 1)) +done + +perf_rt_bg_stop >/dev/null 2>&1 || true + +rt_emit_kpi_block "$TESTNAME" "per-iteration results" "$ITER_KPI" +rt_emit_aggregate_kpi "$TESTNAME" "svsematest" "$ITER_KPI" "$AGG_KPI" "$RESULT_TXT" || true +rt_emit_thread_aggregate_kpi "$TESTNAME" "svsematest" "$ITER_KPI" "$THREAD_AGG_KPI" "$RESULT_TXT" || true + +if rt_kpi_file_has_fail "svsematest" "$ITER_KPI"; then + overall_fail=1 +fi + +rt_emit_interrupt_aware_result "$TESTNAME" "$RES_FILE" "$RESULT_TXT" "$OUT_DIR" "${RT_INTERRUPTED:-0}" "$overall_fail" +exit 0 diff --git a/Runner/suites/Kernel/RT-tests/SVSematest/svsemtest.yaml b/Runner/suites/Kernel/RT-tests/SVSematest/svsemtest.yaml new file mode 100755 index 00000000..f5452f22 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/SVSematest/svsemtest.yaml @@ -0,0 +1,42 @@ +metadata: + name: SVSematest + format: "Lava-Test Test Definition 1.0" + description: "Run rt-tests svsematest (SYSV semaphore latency) in JSON mode and parse results without requiring python3." + os: + - linux + scope: + - performance + - preempt-rt + +params: + DURATION: "1m" + BACKGROUND_CMD: "" + ITERATIONS: "1" + + PRIO: "98" + QUIET: "true" + THREADS: "true" + THREADS_NUM: "" + AFFINITY: "true" + AFFINITY_CPU: "" + BREAKTRACE_US: "" + DISTANCE_US: "" + FORK_MODE: "false" + FORK_OPT: "" + INTERVAL_US: "" + LOOPS: "" + SMP: "false" + + BINARY: "" + OUT_DIR: "./logs_SVSematest" + + VERBOSE: "0" + PROGRESS_EVERY: "1" + HEARTBEAT_SEC: "10" + +run: + steps: + - REPO_PATH=$PWD + - cd Runner/suites/Kernel/RT-tests/SVSematest + - ./run.sh --duration "${DURATION}" --iterations "${ITERATIONS}" --background-cmd "${BACKGROUND_CMD}" --prio "${PRIO}" --quiet "${QUIET}" --threads "${THREADS}" --threads-num "${THREADS_NUM}" --affinity "${AFFINITY}" --affinity-cpu "${AFFINITY_CPU}" --breaktrace-us "${BREAKTRACE_US}" --distance-us "${DISTANCE_US}" --fork "${FORK_MODE}" --fork-opt "${FORK_OPT}" --interval-us "${INTERVAL_US}" --loops "${LOOPS}" --smp "${SMP}" --binary "${BINARY}" --out "${OUT_DIR}" --progress-every "${PROGRESS_EVERY}" --heartbeat-sec "${HEARTBEAT_SEC}" $( [ "${VERBOSE}" = "1" ] && echo "--verbose" ) || true + - $REPO_PATH/Runner/utils/send-to-lava.sh SVSematest.res From 14b85a848f43d4c0347bafb1e176a2ae5225b450 Mon Sep 17 00:00:00 2001 From: Srikanth Muppandam <smuppand@qti.qualcomm.com> Date: Mon, 27 Apr 2026 06:49:17 +0530 Subject: [PATCH 4/4] rt-tests: improve special-case workload wrappers Update Hackbench and PI_Stress wrappers while preserving their test-specific behavior. Hackbench keeps its Time: log parsing flow but uses shared command execution, heartbeat, interrupt handling, and result emission helpers. PI_Stress keeps its special TERM handling and uses the validated duration flow with -D seconds, while preserving inversion count aggregation, optional baseline gating, and partial result reporting on user interrupt. Signed-off-by: Srikanth Muppandam <smuppand@qti.qualcomm.com> --- .../RT-tests/Hackbench/README_Hackbench.md | 229 ++++++++++ .../Kernel/RT-tests/Hackbench/hackbench.yaml | 38 ++ .../suites/Kernel/RT-tests/Hackbench/run.sh | 302 ++++++++++++ .../RT-tests/PI_Stress/README_PI_Stress.md | 176 +++++++ .../Kernel/RT-tests/PI_Stress/pi_stress.yaml | 31 ++ .../suites/Kernel/RT-tests/PI_Stress/run.sh | 432 ++++++++++++++++++ 6 files changed, 1208 insertions(+) create mode 100644 Runner/suites/Kernel/RT-tests/Hackbench/README_Hackbench.md create mode 100755 Runner/suites/Kernel/RT-tests/Hackbench/hackbench.yaml create mode 100755 Runner/suites/Kernel/RT-tests/Hackbench/run.sh create mode 100644 Runner/suites/Kernel/RT-tests/PI_Stress/README_PI_Stress.md create mode 100755 Runner/suites/Kernel/RT-tests/PI_Stress/pi_stress.yaml create mode 100755 Runner/suites/Kernel/RT-tests/PI_Stress/run.sh diff --git a/Runner/suites/Kernel/RT-tests/Hackbench/README_Hackbench.md b/Runner/suites/Kernel/RT-tests/Hackbench/README_Hackbench.md new file mode 100644 index 00000000..ec4910a9 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/Hackbench/README_Hackbench.md @@ -0,0 +1,229 @@ +# Hackbench (qcom-linux-testkit) + +Hackbench is both a benchmark and a stress test for the Linux kernel scheduler. It creates groups of communicating tasks (threads or processes) via sockets or pipes and measures how long they take to exchange data. + +This test wrapper runs `hackbench` for **N iterations**, captures all output, parses `Time:` samples, and emits KPI lines (mean/min/max and worst-sample), plus a LAVA-friendly `.res` verdict. + +--- + +## Location + +- Test: `Runner/suites/Kernel/RT-tests/Hackbench/run.sh` +- Shared helpers: `Runner/utils/lib_rt.sh` +- Logging/helpers: `Runner/utils/functestlib.sh` + +--- + +## What this test produces + +### Console (examples) + +You will see high-signal context and KPI lines, for example: + +- `Hackbench: uname -a: ...` +- `Hackbench: sched_rt_runtime_us=...` +- `Hackbench: hackbench opts: -s 100 -l 100 -g 10 -f 20 -T` +- `hackbench-mean pass 0.220660 s` +- `hackbench-min pass 0.185000 s` +- `hackbench-max pass 0.272000 s` +- `hackbench-worst pass 0.272000 s` *(worst-sample = max for the run)* + +> Note: On some hackbench versions, the output lines are `Time: <seconds>`. + +### Files + +By default, output is written under: + +- `logs_Hackbench/` (or `OUT_DIR` if overridden) + - `hackbench-output-host.txt` – raw log with all `Time:` samples + - `parsed_hackbench.txt` – parsed KPI lines + - `result.txt` – same KPI lines used for LAVA result submission +- `Hackbench.res` – single-line verdict (`Hackbench PASS|FAIL|SKIP`) + +--- + +## Requirements + +### Mandatory +- `hackbench` binary (either in `PATH` or provided via `--binary`) +- Standard tools: `uname`, `awk`, `sed`, `grep`, `tr`, `head`, `tail`, `mkdir`, `cat`, `sh`, `tee`, `sleep`, `kill`, `date` + +The script uses your testkit’s `check_dependencies` to validate the above. + +### Optional (nice-to-have) +- `ensure_reasonable_clock()` from `functestlib.sh` + If available, it will be used to avoid epoch timestamps (e.g., 1970) in logs. +- Background workload command (`--background-cmd`) to apply system load while measuring. + +--- + +## Usage + +Run from the test folder: + +```sh +cd Runner/suites/Kernel/RT-tests/Hackbench +./run.sh +``` + +### Common examples + +Run 200 iterations with threaded mode: + +```sh +./run.sh --iteration 200 --threads true +``` + +Run with pipes (instead of sockets): + +```sh +./run.sh --iteration 200 --pipe true +``` + +Explicit hackbench binary path: + +```sh +./run.sh --binary /tmp/hackbench --iteration 200 +``` + +Increase message size / loops / groups: + +```sh +./run.sh --datasize 1024 --loops 200 --grps 20 --fds 20 --iteration 100 +``` + +Add background workload: + +```sh +./run.sh --background-cmd "sh -c 'while :; do :; done'" --iteration 200 +``` + +Control progress logging (default: every 50 iterations): + +```sh +./run.sh --iteration 500 --progress-every 25 +``` + +Verbose mode: + +```sh +./run.sh --verbose +``` + +--- + +## Parameters + +The wrapper accepts both **CLI arguments** and **environment variables**. +If both are set, the CLI argument wins. + +### Output control +- `--out DIR` / `OUT_DIR` + Output directory (default: `./logs_Hackbench` under the test path). +- `--result FILE` / `RESULT_TXT` + KPI output file (default: `${OUT_DIR}/result.txt`). +- `--log FILE` / `TEST_LOG` + Raw hackbench log (default: `${OUT_DIR}/hackbench-output-host.txt`). + +### Hackbench workload knobs (Linaro-style) +- `--iteration N` / `ITERATION` (default: `1000`) +- `--target host|kvm` / `TARGET` *(informational label only)* +- `--datasize BYTES` / `DATASIZE` → `-s` +- `--loops N` / `LOOPS` → `-l` +- `--grps N` / `GRPS` → `-g` +- `--fds N` / `FDS` → `-f` +- `--pipe true|false` / `PIPE` + Adds `-p` when true. +- `--threads true|false` / `THREADS` + Adds `-T` when true. (Default is process mode.) + +### Testkit extras +- `--background-cmd CMD` / `BACKGROUND_CMD` + Runs a background workload during the benchmark (best-effort stop on exit). +- `--binary PATH` / `BINARY` + Explicit `hackbench` path. +- `--progress-every N` / `PROGRESS_EVERY` + Progress log cadence (default: `50`). +- `--verbose` / `VERBOSE=1` + +--- + +## Result parsing and KPIs + +The parsing is done by `rt_hackbench_parse_times` from `Runner/utils/lib_rt.sh`. + +It extracts all lines like: + +``` +Time: 0.210 +``` + +…and computes: + +- `hackbench-mean pass <seconds> s` +- `hackbench-min pass <seconds> s` +- `hackbench-max pass <seconds> s` +- `hackbench-worst pass <seconds> s` *(worst-sample = max)* + +These are written to: +- `${OUT_DIR}/parsed_hackbench.txt` +- `${OUT_DIR}/result.txt` + +--- + +## LAVA integration + +A typical test definition YAML can run this via CLI args: + +```yaml +run: + steps: + - cd Runner/suites/Kernel/RT-tests/Hackbench + - >- + ./run.sh + --out "${OUT_DIR}" + --iteration "${ITERATION}" + --datasize "${DATASIZE}" + --loops "${LOOPS}" + --grps "${GRPS}" + --fds "${FDS}" + --pipe "${PIPE}" + --threads "${THREADS}" + --background-cmd "${BACKGROUND_CMD}" + --binary "${BINARY}" + --progress-every "${PROGRESS_EVERY}" + $( [ "${VERBOSE}" = "1" ] && echo "--verbose" ) + || true + - ../../../../utils/send-to-lava.sh Hackbench.res +``` + +> LAVA exports `params:` variables automatically into the test shell environment. +> Using CLI args makes the command line explicit and reproducible, matching the Linaro style. + +--- + +## Troubleshooting + +### 1) Timestamps show 1970-01-01 +- Your board clock is likely not set. +- If `ensure_reasonable_clock()` exists in `functestlib.sh`, the script can call it before running. +- Otherwise, set time via NTP / RTC / manual `date`. + +### 2) No KPI lines (mean/min/max) +- Check that `${OUT_DIR}/hackbench-output-host.txt` contains `Time:` lines. +- If the hackbench output format differs (some variants use `Time:` with different formatting), update the parser in `lib_rt.sh` accordingly. + +### 3) Hackbench not found +- Provide `--binary /path/to/hackbench`, or ensure `hackbench` is in `PATH`. + +### 4) High variance / outliers +- Run with a background workload to characterize worst-case scheduling. +- Increase iterations to stabilize mean. +- Pin CPU frequency governor if needed (platform policy dependent). + +--- + +## Notes + +- The `.res` file is always created for LAVA. +- The script is intended to be POSIX `sh` compatible and CI-friendly. diff --git a/Runner/suites/Kernel/RT-tests/Hackbench/hackbench.yaml b/Runner/suites/Kernel/RT-tests/Hackbench/hackbench.yaml new file mode 100755 index 00000000..82d2aa3f --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/Hackbench/hackbench.yaml @@ -0,0 +1,38 @@ +metadata: + name: Hackbench + format: "Lava-Test Test Definition 1.0" + description: > + Hackbench is both a benchmark and a stress test for the Linux kernel scheduler. + It creates groups of communicating tasks via sockets or pipes and measures the + time taken. This wrapper runs hackbench for N iterations, parses Time samples + into mean/min/max and worst-sample, and emits Hackbench.res. + os: + - linux + scope: + - performance + - preempt-rt + +params: + OUT_DIR: "./logs_Hackbench" + VERBOSE: "0" + PROGRESS_EVERY: "1" + HEARTBEAT_SEC: "10" + + ITERATIONS: "5" + TARGET: "host" + DATASIZE: "100" + LOOPS: "100" + GRPS: "10" + FDS: "20" + PIPE: "false" + THREADS: "false" + + BACKGROUND_CMD: "" + BINARY: "" + +run: + steps: + - REPO_PATH=$PWD + - cd Runner/suites/Kernel/RT-tests/Hackbench + - ./run.sh --out "${OUT_DIR}" --iterations "${ITERATIONS}" --target "${TARGET}" --datasize "${DATASIZE}" --loops "${LOOPS}" --grps "${GRPS}" --fds "${FDS}" --pipe "${PIPE}" --threads "${THREADS}" --background-cmd "${BACKGROUND_CMD}" --binary "${BINARY}" --progress-every "${PROGRESS_EVERY}" --heartbeat-sec "${HEARTBEAT_SEC}" $( [ "${VERBOSE}" = "1" ] && echo "--verbose" ) || true + - $REPO_PATH/Runner/utils/send-to-lava.sh Hackbench.res diff --git a/Runner/suites/Kernel/RT-tests/Hackbench/run.sh b/Runner/suites/Kernel/RT-tests/Hackbench/run.sh new file mode 100755 index 00000000..6bad8930 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/Hackbench/run.sh @@ -0,0 +1,302 @@ +#!/bin/sh +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# Hackbench wrapper for qcom-linux-testkit +# - Runs hackbench ITERATIONS times +# - Captures output to a log file +# - Parses Time lines -> min/mean/max via lib_rt.sh +# - Adds worst-sample Time for quick debug visibility +# - Emits Hackbench.res PASS/FAIL/SKIP +# +# Notes: +# - Always exits 0 (LAVA-friendly). Use Hackbench.res for gating. +# - --iteration is kept as a compatibility alias for --iterations. + +SCRIPT_DIR="$( + cd "$(dirname "$0")" || exit 1 + pwd +)" + +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "${__INIT_ENV_LOADED:-}" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" + __INIT_ENV_LOADED=1 +fi + +# shellcheck disable=SC1091 +. "$TOOLS/functestlib.sh" +# shellcheck disable=SC1091 +. "$TOOLS/lib_rt.sh" + +TESTNAME="Hackbench" +test_path=$(find_test_case_by_name "$TESTNAME") +[ -n "$test_path" ] || test_path="$SCRIPT_DIR" + +RES_FILE="$test_path/${TESTNAME}.res" +OUT_DIR="${OUT_DIR:-$test_path/logs_${TESTNAME}}" +RESULT_TXT="${RESULT_TXT:-$OUT_DIR/result.txt}" +TEST_LOG="${TEST_LOG:-$OUT_DIR/hackbench-output-host.txt}" + +ITERATIONS="${ITERATIONS:-1000}" +TARGET="${TARGET:-host}" +DATASIZE="${DATASIZE:-100}" +LOOPS="${LOOPS:-100}" +GRPS="${GRPS:-10}" +FDS="${FDS:-20}" +PIPE="${PIPE:-false}" +THREADS="${THREADS:-false}" +BACKGROUND_CMD="${BACKGROUND_CMD:-}" +BINARY="${BINARY:-}" +VERBOSE="${VERBOSE:-0}" +PROGRESS_EVERY="${PROGRESS_EVERY:-50}" +HEARTBEAT_SEC="${HEARTBEAT_SEC:-10}" + +usage() { + cat <<EOF +Usage: $0 [OPTIONS] + --out DIR + --result FILE + --log FILE + --iterations N + --iteration N Deprecated compatibility alias for --iterations + --target host|kvm + --datasize BYTES + --loops N + --grps N + --fds N + --pipe true|false + --threads true|false + --background-cmd CMD + --binary PATH + --progress-every N + --heartbeat-sec N + --verbose +EOF +} + +while [ "$#" -gt 0 ]; do + case "$1" in + -h|--help) + usage + exit 0 + ;; + --out) + shift + OUT_DIR="$1" + ;; + --result) + shift + RESULT_TXT="$1" + ;; + --log) + shift + TEST_LOG="$1" + ;; + --iterations|--iteration) + shift + ITERATIONS="$1" + ;; + --target) + shift + TARGET="$1" + ;; + --datasize) + shift + DATASIZE="$1" + ;; + --loops) + shift + LOOPS="$1" + ;; + --grps) + shift + GRPS="$1" + ;; + --fds) + shift + FDS="$1" + ;; + --pipe) + shift + PIPE="$1" + ;; + --threads) + shift + THREADS="$1" + ;; + --background-cmd) + shift + BACKGROUND_CMD="$1" + ;; + --binary) + shift + BINARY="$1" + ;; + --progress-every) + shift + PROGRESS_EVERY="$1" + ;; + --heartbeat-sec) + shift + HEARTBEAT_SEC="$1" + ;; + --verbose) + VERBOSE=1 + ;; + *) + log_warn "Unknown option: $1" + usage + echo "$TESTNAME FAIL" >"$RES_FILE" + exit 0 + ;; + esac + shift +done + +case "$ITERATIONS" in ''|*[!0-9]*|0) ITERATIONS=1 ;; esac +case "$PROGRESS_EVERY" in ''|*[!0-9]*|0) PROGRESS_EVERY=50 ;; esac +case "$HEARTBEAT_SEC" in ''|*[!0-9]*|0) HEARTBEAT_SEC=10 ;; esac +case "$DATASIZE" in ''|*[!0-9]*) DATASIZE=100 ;; esac +case "$LOOPS" in ''|*[!0-9]*) LOOPS=100 ;; esac +case "$GRPS" in ''|*[!0-9]*) GRPS=10 ;; esac +case "$FDS" in ''|*[!0-9]*) FDS=20 ;; esac + +PARSED="$OUT_DIR/parsed_hackbench.txt" + +rt_prepare_output_layout \ + "$OUT_DIR" \ + "$RESULT_TXT" \ + "$TEST_LOG" \ + "$PARSED" + +rt_check_clock_sanity "$TESTNAME" || true + +log_info "------------------- Starting $TESTNAME -------------------" +log_info "$TESTNAME: Checking for the tools required to run hackbench" + +if ! rt_require_common_tools uname awk sed grep tr head tail mkdir cat sh sleep kill date sort wc; then + log_skip "$TESTNAME: basic tools missing" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if ! command -v rt_parse_token_numeric_samples >/dev/null 2>&1; then + log_skip "$TESTNAME: rt_parse_token_numeric_samples missing (lib_rt.sh not loaded?)" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +HB_BIN=$(rt_resolve_binary hackbench "$BINARY" 2>/dev/null || echo "") +if [ -z "$HB_BIN" ] || [ ! -x "$HB_BIN" ]; then + log_skip "$TESTNAME: hackbench binary not found/executable (${HB_BIN:-none})" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +rt_log_common_runtime_env "$TESTNAME" "$HB_BIN" +log_info "$TESTNAME: iterations=$ITERATIONS target=$TARGET" +log_info "$TESTNAME: datasize=$DATASIZE loops=$LOOPS grps=$GRPS fds=$FDS pipe=$PIPE threads=$THREADS" +log_info "$TESTNAME: heartbeat=$HEARTBEAT_SEC seconds" + +RT_INTERRUPTED=0 +export RT_INTERRUPTED + +trap 'rt_handle_int; perf_rt_bg_stop >/dev/null 2>&1 || true' INT TERM +trap 'perf_rt_bg_stop >/dev/null 2>&1 || true' EXIT + +perf_rt_bg_start "$TESTNAME" "$BACKGROUND_CMD" + +overall_fail=0 + +i=1 +while [ "$i" -le "$ITERATIONS" ] 2>/dev/null; do + rt_log_iteration_progress "$TESTNAME" "$i" "$ITERATIONS" "$PROGRESS_EVERY" "running" + + iter_log="${TEST_LOG}.iter${i}" + + set -- "$HB_BIN" -s "$DATASIZE" -l "$LOOPS" -g "$GRPS" -f "$FDS" + case "$PIPE" in + true|TRUE|1|yes|YES) + set -- "$@" -p + ;; + esac + case "$THREADS" in + true|TRUE|1|yes|YES) + set -- "$@" -T + ;; + esac + + if rt_run_and_capture "$TESTNAME" "$HEARTBEAT_SEC" "$iter_log" "$@"; then + rc=$RT_RUN_RC + else + rc=$RT_RUN_RC + fi + + if [ -r "$iter_log" ]; then + cat "$iter_log" >>"$TEST_LOG" 2>/dev/null || true + rm -f "$iter_log" 2>/dev/null || true + fi + + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: interrupted by user during iteration $i/$ITERATIONS" + break + fi + + if [ "$rc" -ne 0 ] 2>/dev/null; then + log_fail "$TESTNAME: hackbench failed rc=$rc (iter $i/$ITERATIONS)" + overall_fail=1 + break + fi + + i=$((i + 1)) +done + +perf_rt_bg_stop >/dev/null 2>&1 || true +: >"$PARSED" 2>/dev/null || true + +if [ "$overall_fail" -eq 0 ] 2>/dev/null; then + if [ -s "$TEST_LOG" ]; then + if rt_parse_token_numeric_samples "hackbench-time" "$TEST_LOG" "Time:" "s" >"$PARSED" 2>/dev/null; then + cat "$PARSED" >>"$RESULT_TXT" 2>/dev/null || true + rt_emit_worst_sample_from_log "hackbench-worst-sample" "$TEST_LOG" "Time:" "s" "$PARSED" "$RESULT_TXT" "$TESTNAME" || true + + while IFS= read -r line; do + [ -n "$line" ] || continue + log_info "$TESTNAME: $line" + done <"$PARSED" + else + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: no complete Time samples collected before interrupt" + else + log_fail "$TESTNAME: unable to parse any Time lines from $TEST_LOG" + overall_fail=1 + fi + fi + else + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: no output collected before interrupt" + else + log_fail "$TESTNAME: hackbench output log is empty: $TEST_LOG" + overall_fail=1 + fi + fi +fi + +rt_emit_interrupt_aware_result "$TESTNAME" "$RES_FILE" "$RESULT_TXT" "$OUT_DIR" "${RT_INTERRUPTED:-0}" "$overall_fail" +exit 0 diff --git a/Runner/suites/Kernel/RT-tests/PI_Stress/README_PI_Stress.md b/Runner/suites/Kernel/RT-tests/PI_Stress/README_PI_Stress.md new file mode 100644 index 00000000..8f452b4a --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/PI_Stress/README_PI_Stress.md @@ -0,0 +1,176 @@ +# PI_Stress (rt-tests pi_stress) — qcom-linux-testkit + +This test wraps **rt-tests `pi_stress`** (Priority Inheritance stress) for **qcom-linux-testkit** and LAVA. +It runs one or more `pi_stress` iterations, collects JSON output, parses KPIs **without requiring Python**, and emits a `.res` summary for LAVA gating. + +> **What it measures** +> +> `pi_stress` exercises PI mutexes (priority inheritance) by creating intentional priority-inversion scenarios. +> The JSON output includes an **`inversion` counter** (total inversions observed/generated in that run). With `--iterations 1`, +> you’ll often see min/mean/max all equal (one sample). + +--- + +## Location + +``` +Runner/suites/Kernel/RT-tests/PI_Stress/ +├── run.sh +├── PI_Stress.res # created at runtime +└── logs_PI_Stress/ # created at runtime (default) + ├── pi_stress_iter1.json + ├── parsed_pi_stress.txt + └── result.txt +``` + +--- + +## Requirements + +- Run as **root** (recommended/required for best behavior; `--mlockall` especially). +- `pi_stress` binary available on target, either: + - in `$PATH` (`command -v pi_stress`), or + - provided via `--binary /path/to/pi_stress` +- Common tools: `uname`, `awk`, `sed`, `grep`, `tr`, `head`, `tail`, `mkdir`, `cat`, `sh`, `tee`, `sleep`, `kill`, `date` + +This test uses helpers from: + +- `Runner/utils/functestlib.sh` (logging, deps, background workload helper, clock sanity helper if available) +- `Runner/utils/lib_rt.sh` (rt-tests JSON parsing helpers) + +--- + +## Quick start + +### Run with a custom binary +```sh +cd Runner/suites/Kernel/RT-tests/PI_Stress +./run.sh --binary /tmp/pi_stress --duration 1m +``` + +### Enable mlockall and SCHED_RR threads +```sh +./run.sh --binary /tmp/pi_stress --duration 1m --mlockall true --rr true +``` + +### Multiple iterations +```sh +./run.sh --binary /tmp/pi_stress --duration 1m --iterations 5 +``` + +### Optional: add background workload +```sh +./run.sh --binary /tmp/pi_stress --duration 1m --background-cmd "stress-ng --cpu 4 --timeout 60s" +``` + +--- + +## Command line options (run.sh) + +```text +--out DIR Output directory (default: ./logs_PI_Stress) +--result FILE Result file path (default: <out>/result.txt) + +--duration D pi_stress runtime per iteration (default: 5m) +--iterations N Number of iterations (default: 1) + +--mlockall true|false Enable --mlockall (default: false) +--rr true|false Enable --rr (SCHED_RR) (default: false) + +--background-cmd CMD Optional background workload command (default: empty) +--binary PATH Explicit pi_stress binary path (default: auto-detect) +--verbose Extra logs +-h, --help Show help +``` + +**Notes** +- `--mlockall true` may fail if memlock limits are too low; your scripts print memlock(soft/hard) from `/proc/self/limits`. +- `--rr true` switches to SCHED_RR; default is SCHED_FIFO in `pi_stress`. + +--- + +## Outputs + +### Result files +- `PI_Stress.res` + Contains only the **PASS/FAIL/SKIP** summary for LAVA. +- `logs_PI_Stress/result.txt` + Contains parsed KPI lines for LAVA test parsing and artifact collection. +- `logs_PI_Stress/parsed_pi_stress.txt` + Same KPI lines (intermediate), helpful for debugging. +- `logs_PI_Stress/pi_stress_iterN.json` + Raw JSON output from `pi_stress`. + +### Example KPI lines +```text +pi-stress-inversion-min pass 13630990 count +pi-stress-inversion-mean pass 13630990 count +pi-stress-inversion-max pass 13630990 count +pi-stress pass +``` + +> With only one iteration, min/mean/max are identical because there’s one inversion value sample. + +--- + +## LAVA integration + +1) Ensure the repository is available on the DUT (or fetched by your job). +2) Use a LAVA test definition YAML to call `run.sh` and then send `.res` to LAVA. + +Minimal example (Linaro-style CLI mapping): + +```yaml +metadata: + name: pi-stress + format: "Lava-Test Test Definition 1.0" + description: "Run rt-tests pi_stress and collect inversion KPI in JSON; parse results without requiring python3." + os: + - linux + scope: + - functional + - preempt-rt + +params: + OUT_DIR: "./logs_PI_Stress" + DURATION: "5m" + ITERATIONS: "1" + MLOCKALL: "false" + RR: "false" + BACKGROUND_CMD: "" + BINARY: "" + +run: + steps: + - cd Runner/suites/Kernel/RT-tests/PI_Stress + - ./run.sh --out "${OUT_DIR}" --duration "${DURATION}" --iterations "${ITERATIONS}" --mlockall "${MLOCKALL}" --rr "${RR}" --background-cmd "${BACKGROUND_CMD}" --binary "${BINARY}" || true + - ../../../utils/send-to-lava.sh PI_Stress.res +``` + +--- + +## Troubleshooting + +### Timestamps show 1970-01-01 +If the system clock is invalid at boot, logs may show epoch time. If `functestlib.sh` provides `ensure_reasonable_clock()`, +the script attempts a **local-only** clock sanity step (RTC / kernel build time) before running. + +### pi_stress prints large inversion counts +The `inversion` KPI is a **total counter** per run; large values can be normal depending on CPU and load. +Use multiple iterations to compare distribution across runs. + +### Missing binary +Provide `--binary /path/to/pi_stress` or ensure `pi_stress` is in `$PATH`. + +--- + +## Exit codes + +- The script always exits `0` (LAVA-friendly). PASS/FAIL/SKIP is communicated via `PI_Stress.res`. + +--- + +## Maintainers / notes + +- Keep the implementation POSIX `sh` compatible and ShellCheck-clean. +- Prefer existing helpers in `functestlib.sh` and `lib_rt.sh` instead of adding new ones, unless necessary. diff --git a/Runner/suites/Kernel/RT-tests/PI_Stress/pi_stress.yaml b/Runner/suites/Kernel/RT-tests/PI_Stress/pi_stress.yaml new file mode 100755 index 00000000..3e0c9b83 --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/PI_Stress/pi_stress.yaml @@ -0,0 +1,31 @@ +metadata: + name: PI_Stress + format: "Lava-Test Test Definition 1.0" + description: "Run rt-tests pi_stress priority inversion stress test and collect inversion KPIs from JSON without requiring python3." + os: + - linux + scope: + - functional + - preempt-rt + +params: + DURATION: "1m" + MLOCKALL: "false" + RR: "false" + BACKGROUND_CMD: "" + ITERATIONS: "1" + USER_BASELINE: "" + + BINARY: "" + OUT_DIR: "./logs_PI_Stress" + + VERBOSE: "0" + PROGRESS_EVERY: "1" + HEARTBEAT_SEC: "10" + +run: + steps: + - REPO_PATH=$PWD + - cd Runner/suites/Kernel/RT-tests/PI_Stress + - ./run.sh --duration "${DURATION}" --mlockall "${MLOCKALL}" --rr "${RR}" --iterations "${ITERATIONS}" --background-cmd "${BACKGROUND_CMD}" --user-baseline "${USER_BASELINE}" --binary "${BINARY}" --out "${OUT_DIR}" --progress-every "${PROGRESS_EVERY}" --heartbeat-sec "${HEARTBEAT_SEC}" $( [ "${VERBOSE}" = "1" ] && echo "--verbose" ) || true + - $REPO_PATH/Runner/utils/send-to-lava.sh PI_Stress.res diff --git a/Runner/suites/Kernel/RT-tests/PI_Stress/run.sh b/Runner/suites/Kernel/RT-tests/PI_Stress/run.sh new file mode 100755 index 00000000..3f85566b --- /dev/null +++ b/Runner/suites/Kernel/RT-tests/PI_Stress/run.sh @@ -0,0 +1,432 @@ +#!/bin/sh +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# SPDX-License-Identifier: BSD-3-Clause +# +# PI_Stress wrapper for qcom-linux-testkit +# - Runs rt-tests pi_stress ITERATIONS times (JSON output) +# - Parses inversion count + pass/fail using lib_rt.sh (no python required) +# - Emits KPI lines to result.txt and summary PASS/FAIL/SKIP to PI_Stress.res +# +# Notes: +# - pi_stress may send SIGTERM when it detects failures; we ignore TERM so the +# wrapper can continue and still collect logs. +# - Always exits 0 (LAVA-friendly). Use PI_Stress.res for gating. + +SCRIPT_DIR="$( + cd "$(dirname "$0")" || exit 1 + pwd +)" + +INIT_ENV="" +SEARCH="$SCRIPT_DIR" +while [ "$SEARCH" != "/" ]; do + if [ -f "$SEARCH/init_env" ]; then + INIT_ENV="$SEARCH/init_env" + break + fi + SEARCH=$(dirname "$SEARCH") +done + +if [ -z "$INIT_ENV" ]; then + echo "[ERROR] Could not find init_env (starting at $SCRIPT_DIR)" >&2 + exit 1 +fi + +if [ -z "${__INIT_ENV_LOADED:-}" ]; then + # shellcheck disable=SC1090 + . "$INIT_ENV" + __INIT_ENV_LOADED=1 +fi + +# shellcheck disable=SC1091 +. "$TOOLS/functestlib.sh" +# shellcheck disable=SC1091 +. "$TOOLS/lib_rt.sh" + +TESTNAME="PI_Stress" +test_path=$(find_test_case_by_name "$TESTNAME") +[ -n "$test_path" ] || test_path="$SCRIPT_DIR" + +RES_FILE="$test_path/${TESTNAME}.res" +OUT_DIR="${OUT_DIR:-$test_path/logs_${TESTNAME}}" +RESULT_TXT="${RESULT_TXT:-$OUT_DIR/result.txt}" + +# params (env/LAVA can override) +DURATION="${DURATION:-5m}" +MLOCKALL="${MLOCKALL:-false}" +RR="${RR:-false}" +BACKGROUND_CMD="${BACKGROUND_CMD:-}" +ITERATIONS="${ITERATIONS:-1}" +USER_BASELINE="${USER_BASELINE:-}" + +# Optional extras +BINARY="${BINARY:-}" +VERBOSE="${VERBOSE:-0}" +PROGRESS_EVERY="${PROGRESS_EVERY:-1}" +HEARTBEAT_SEC="${HEARTBEAT_SEC:-10}" + +usage() { + cat <<EOF +Usage: $0 [OPTIONS] + +Options: + --out DIR Output directory (default: $OUT_DIR) + --result FILE Result file path (default: $RESULT_TXT) + + --duration STR Requested duration (e.g. 10, 1m, 2m30s) + --mlockall BOOL true|false -> add --mlockall (default: $MLOCKALL) + --rr BOOL true|false -> add --rr (default: $RR) + --iterations N Number of iterations (default: $ITERATIONS) + --user-baseline N Optional inversion baseline (count). If set, FAIL when + a majority of iterations exceed this baseline + (requires ITERATIONS >= 3) + + --background-cmd CMD Optional background workload + --binary PATH Explicit pi_stress binary path + --progress-every N Log progress every N iterations (default: $PROGRESS_EVERY) + --heartbeat-sec N Heartbeat interval in seconds (default: $HEARTBEAT_SEC) + --verbose Extra logs + -h, --help Help + +Examples: + $0 --binary /tmp/pi_stress --duration 1m --mlockall true --rr false --iterations 3 + $0 --duration 10 --iterations 3 --heartbeat-sec 1 + $0 --iterations 5 --user-baseline 10 +EOF +} + +while [ "$#" -gt 0 ]; do + case "$1" in + -h|--help) + usage + exit 0 + ;; + --out) + shift + OUT_DIR="$1" + ;; + --result) + shift + RESULT_TXT="$1" + ;; + --duration) + shift + DURATION="$1" + ;; + --mlockall) + shift + MLOCKALL="$1" + ;; + --rr) + shift + RR="$1" + ;; + --iterations) + shift + ITERATIONS="$1" + ;; + --user-baseline) + shift + USER_BASELINE="$1" + ;; + --background-cmd) + shift + BACKGROUND_CMD="$1" + ;; + --binary) + shift + BINARY="$1" + ;; + --progress-every) + shift + PROGRESS_EVERY="$1" + ;; + --heartbeat-sec) + shift + HEARTBEAT_SEC="$1" + ;; + --verbose) + VERBOSE=1 + ;; + *) + log_warn "Unknown option: $1" + usage + echo "$TESTNAME FAIL" >"$RES_FILE" + exit 1 + ;; + esac + shift +done + +LOG_PREFIX="$OUT_DIR/pi-stress" +TMP_ONE="$OUT_DIR/tmp_result_one.txt" +ITER_KPI="$OUT_DIR/iter_kpi.txt" +INV_VALUES="$OUT_DIR/inversion_values.txt" +GATE_KPI="$OUT_DIR/gate_kpi.txt" + +rt_prepare_output_layout \ + "$OUT_DIR" \ + "$RESULT_TXT" \ + "$TMP_ONE" \ + "$ITER_KPI" \ + "$INV_VALUES" \ + "$GATE_KPI" + +rt_check_clock_sanity "$TESTNAME" || true + +log_info "------------------- Starting $TESTNAME -------------------" +log_info "$TESTNAME: Checking for the tools required to run pi_stress" + +if ! rt_require_common_tools uname awk sed grep tr head tail mkdir cat sh tee sleep kill date sort wc; then + log_skip "$TESTNAME: basic tools missing" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if ! command -v perf_parse_rt_tests_json >/dev/null 2>&1; then + log_skip "$TESTNAME: perf_parse_rt_tests_json missing (lib_rt.sh not loaded?)" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +if ! command -v rt_require_duration_seconds >/dev/null 2>&1; then + log_skip "$TESTNAME: rt_require_duration_seconds missing (lib_rt.sh not updated/loaded?)" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +case "$ITERATIONS" in + ''|*[!0-9]*|0) + ITERATIONS=1 + ;; +esac + +case "$PROGRESS_EVERY" in + ''|*[!0-9]*|0) + PROGRESS_EVERY=1 + ;; +esac + +case "$HEARTBEAT_SEC" in + ''|*[!0-9]*|0) + HEARTBEAT_SEC=10 + ;; +esac + +PI_BIN=$(rt_resolve_binary pi_stress "$BINARY" 2>/dev/null || echo "") +if [ -z "$PI_BIN" ] || [ ! -x "$PI_BIN" ]; then + log_skip "$TESTNAME: pi_stress binary not found/executable (${PI_BIN:-none})" + echo "$TESTNAME SKIP" >"$RES_FILE" + exit 0 +fi + +PI_DURATION_SECS=$(rt_require_duration_seconds "$TESTNAME" "$DURATION") || { + echo "$TESTNAME FAIL" >"$RES_FILE" + exit 0 +} + +rt_log_common_runtime_env "$TESTNAME" "$PI_BIN" +log_info "$TESTNAME: iterations=$ITERATIONS duration=$DURATION (${PI_DURATION_SECS}s) mlockall=$MLOCKALL rr=$RR" +log_info "$TESTNAME: heartbeat=$HEARTBEAT_SEC seconds" + +if [ "$VERBOSE" -eq 1 ] 2>/dev/null; then + log_info "$TESTNAME: OUT_DIR=$OUT_DIR" + log_info "$TESTNAME: RESULT_TXT=$RESULT_TXT" + log_info "$TESTNAME: BACKGROUND_CMD=${BACKGROUND_CMD:-none}" + log_info "$TESTNAME: USER_BASELINE=${USER_BASELINE:-none}" +fi + +RT_INTERRUPTED=0 +export RT_INTERRUPTED + +trap '' TERM +trap 'rt_handle_int; perf_rt_bg_stop >/dev/null 2>&1 || true' INT +trap 'perf_rt_bg_stop >/dev/null 2>&1 || true' EXIT + +perf_rt_bg_start "$TESTNAME" "$BACKGROUND_CMD" + +overall_fail=0 +fail_count=0 + +baseline_ok=0 +case "$USER_BASELINE" in + '') + baseline_ok=0 + ;; + *[!0-9]*) + baseline_ok=0 + ;; + *) + baseline_ok=1 + ;; +esac + +RT_RUN_TARGET_DURATION_SECS="$PI_DURATION_SECS" +export RT_RUN_TARGET_DURATION_SECS + +i=1 +while [ "$i" -le "$ITERATIONS" ] 2>/dev/null; do + rt_log_iteration_progress "$TESTNAME" "$i" "$ITERATIONS" "$PROGRESS_EVERY" + + jsonfile="${LOG_PREFIX}-${i}.json" + stdoutlog="${OUT_DIR}/pi_stress_stdout_iter${i}.log" + + set -- "$PI_BIN" "-q" "-D" "$PI_DURATION_SECS" + + case "$MLOCKALL" in + true|TRUE|1|yes|YES) + set -- "$@" "--mlockall" + ;; + esac + + case "$RR" in + true|TRUE|1|yes|YES) + set -- "$@" "--rr" + ;; + esac + + set -- "$@" "--json=$jsonfile" + + if rt_run_json_iteration "$TESTNAME" "$HEARTBEAT_SEC" "$stdoutlog" "$jsonfile" "$@"; then + rc=$RT_RUN_RC + else + rc=$RT_RUN_RC + fi + + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + if [ -r "$jsonfile" ]; then + : >"$TMP_ONE" 2>/dev/null || true + if perf_parse_rt_tests_json "pi-stress" "$jsonfile" >"$TMP_ONE" 2>/dev/null; then + rt_append_iteration_kpi "$i" "$TMP_ONE" "$ITER_KPI" "$RESULT_TXT" || true + + inv=$(awk '/^inversion[[:space:]]+pass[[:space:]]+[0-9]+/ { print $3; exit }' "$TMP_ONE" 2>/dev/null) + if [ -n "$inv" ]; then + printf '%s\n' "$inv" >>"$INV_VALUES" 2>/dev/null || true + fi + fi + fi + + log_warn "$TESTNAME: interrupted by user during iteration $i/$ITERATIONS" + break + fi + + if [ "$rc" -ne 0 ] 2>/dev/null; then + log_fail "$TESTNAME: pi_stress exited rc=$rc (iter $i/$ITERATIONS)" + overall_fail=1 + fi + + if [ ! -r "$jsonfile" ]; then + log_fail "$TESTNAME: missing json output: $jsonfile" + overall_fail=1 + i=$((i + 1)) + continue + fi + + : >"$TMP_ONE" 2>/dev/null || true + if perf_parse_rt_tests_json "pi-stress" "$jsonfile" >"$TMP_ONE" 2>/dev/null; then + rt_append_iteration_kpi "$i" "$TMP_ONE" "$ITER_KPI" "$RESULT_TXT" || true + + inv=$(awk '/^inversion[[:space:]]+pass[[:space:]]+[0-9]+/ { print $3; exit }' "$TMP_ONE" 2>/dev/null) + if [ -n "$inv" ]; then + printf '%s\n' "$inv" >>"$INV_VALUES" 2>/dev/null || true + fi + + if [ "$baseline_ok" -eq 1 ] 2>/dev/null && [ -n "$inv" ]; then + if [ "$inv" -gt "$USER_BASELINE" ] 2>/dev/null; then + fail_count=$((fail_count + 1)) + fi + fi + else + log_fail "$TESTNAME: failed to parse json (iter $i/$ITERATIONS): $jsonfile" + overall_fail=1 + fi + + i=$((i + 1)) +done + +RT_RUN_TARGET_DURATION_SECS="" +export RT_RUN_TARGET_DURATION_SECS + +perf_rt_bg_stop >/dev/null 2>&1 || true + +if [ -s "$ITER_KPI" ]; then + rt_emit_kpi_block "$TESTNAME" "per-iteration results" "$ITER_KPI" +else + if [ "${RT_INTERRUPTED:-0}" -eq 1 ] 2>/dev/null; then + log_warn "$TESTNAME: no completed iteration data collected before interrupt" + fi +fi + +if [ -s "$INV_VALUES" ]; then + agg=$( + awk ' + BEGIN { min=""; max=""; sum=0; n=0 } + /^[0-9]+$/ { + v=$1 + if (min=="" || v<min) min=v + if (max=="" || v>max) max=v + sum+=v + n++ + } + END { + if (n>0) { + mean=sum/n + if (mean==int(mean)) printf("%d|%d|%d|%d\n", min, int(mean), max, n) + else printf("%d|%.3f|%d|%d\n", min, mean, max, n) + } + } + ' "$INV_VALUES" 2>/dev/null + ) + + if [ -n "$agg" ]; then + inv_min=$(printf '%s' "$agg" | awk -F'|' '{print $1}') + inv_mean=$(printf '%s' "$agg" | awk -F'|' '{print $2}') + inv_max=$(printf '%s' "$agg" | awk -F'|' '{print $3}') + inv_n=$(printf '%s' "$agg" | awk -F'|' '{print $4}') + + echo "pi-stress-inversion-min pass ${inv_min} count" >>"$RESULT_TXT" 2>/dev/null || true + echo "pi-stress-inversion-mean pass ${inv_mean} count" >>"$RESULT_TXT" 2>/dev/null || true + echo "pi-stress-inversion-max pass ${inv_max} count" >>"$RESULT_TXT" 2>/dev/null || true + + log_info "$TESTNAME: pi-stress-inversion-min pass ${inv_min} count" + log_info "$TESTNAME: pi-stress-inversion-mean pass ${inv_mean} count" + log_info "$TESTNAME: pi-stress-inversion-max pass ${inv_max} count" + + if [ "$PI_DURATION_SECS" -gt 0 ] 2>/dev/null; then + inv_rate=$( + awk -v inv="$inv_mean" -v sec="$PI_DURATION_SECS" 'BEGIN { + if (sec > 0) printf("%.6f", inv/sec) + else printf("0.000000") + }' 2>/dev/null + ) + + echo "pi-stress-inversion-rate pass ${inv_rate} inv/s" >>"$RESULT_TXT" 2>/dev/null || true + log_info "$TESTNAME: pi-stress-inversion-rate pass ${inv_rate} inv/s" + fi + + if [ "$baseline_ok" -eq 1 ] 2>/dev/null; then + log_info "$TESTNAME: USER_BASELINE=$USER_BASELINE (fail_count=$fail_count over $inv_n runs)" + fi + fi +fi + +if [ "${RT_INTERRUPTED:-0}" -ne 1 ] 2>/dev/null && \ + [ "$baseline_ok" -eq 1 ] 2>/dev/null && \ + [ "$ITERATIONS" -ge 3 ] 2>/dev/null; then + fail_limit=$(((ITERATIONS + 1) / 2)) + : >"$GATE_KPI" 2>/dev/null || true + + echo "inversion-baseline pass ${USER_BASELINE} count" >"$GATE_KPI" + echo "inversion-fail-limit pass ${fail_limit} count" >>"$GATE_KPI" + echo "inversion-fail-count pass ${fail_count} count" >>"$GATE_KPI" + + cat "$GATE_KPI" >>"$RESULT_TXT" 2>/dev/null || true + rt_emit_kpi_block "$TESTNAME" "baseline comparison results" "$GATE_KPI" + + if [ "$fail_count" -ge "$fail_limit" ] 2>/dev/null; then + overall_fail=1 + fi +fi + +rt_emit_interrupt_aware_result "$TESTNAME" "$RES_FILE" "$RESULT_TXT" "$OUT_DIR" "${RT_INTERRUPTED:-0}" "$overall_fail" +exit 0