diff --git a/.github/workflows/sycl-linux-run-tests.yml b/.github/workflows/sycl-linux-run-tests.yml index 089b6020d257..3a691edb9858 100644 --- a/.github/workflows/sycl-linux-run-tests.yml +++ b/.github/workflows/sycl-linux-run-tests.yml @@ -364,14 +364,14 @@ jobs: exit $ret - - name: Run sycl-bench microbenchmarks + - name: Run compute-benchmarks id: run_benchmarks if: inputs.tests_selector == 'benchmark' run: ./devops/scripts/benchmarking/benchmark.sh - - name: Upload sycl-bench microbenchmark results - if: inputs.tests_selector == 'benchmark' && steps.run_benchmarks.outcome == 'success' - uses: actions/upload-artifact@v4 - with: - name: sycl_benchmark_res_${{ steps.run_benchmarks.outputs.TIMESTAMP }} - path: ${{ steps.run_benchmarks.outputs.BENCHMARK_RESULTS }} - retention-days: 7 + # - name: Upload sycl-bench microbenchmark results + # if: inputs.tests_selector == 'benchmark' && steps.run_benchmarks.outcome == 'success' + # uses: actions/upload-artifact@v4 + # with: + # name: sycl_benchmark_res_${{ steps.run_benchmarks.outputs.TIMESTAMP }} + # path: ${{ steps.run_benchmarks.outputs.BENCHMARK_RESULTS }} + # retention-days: 7 diff --git a/devops/scripts/benchmarking/benchmark.sh b/devops/scripts/benchmarking/benchmark.sh index 66c75fcdd8d8..e550fe6892ae 100755 --- a/devops/scripts/benchmarking/benchmark.sh +++ b/devops/scripts/benchmarking/benchmark.sh @@ -4,10 +4,15 @@ # benchmark.sh: Benchmark dpcpp using compute-benchmarks # -# TODO fix usage () { >&2 echo "Usage: $0 [-B ] -B Path to clone and build compute-benchmarks on + -p Path to compute-benchmarks (or directory to build compute-benchmarks in) + -r Git repo url to use for compute-benchmarks origin + -b Git branch to use within compute-benchmarks + -f Compile flags passed into building compute-benchmarks + -c _cleanup=1 ;; + -C _cleanup=1 && _exit_after_cleanup=1 ;; This script builds and runs benchmarks from compute-benchmarks." exit 1 @@ -32,38 +37,38 @@ clone_compute_bench() { build_compute_bench() { echo "### Building compute-benchmarks ($COMPUTE_BENCH_GIT_REPO:$COMPUTE_BENCH_BRANCH) ###" mkdir $COMPUTE_BENCH_PATH/build && cd $COMPUTE_BENCH_PATH/build && - cmake .. -DBUILD_SYCL=ON && cmake --build . - compute_bench_build_stat=$? + cmake .. -DBUILD_SYCL=ON && cmake --build . $COMPUTE_BENCH_COMPILE_FLAGS + #compute_bench_build_stat=$? cd - - [ "$compute_bench_build_stat" -ne 0 ] && exit $compute_bench_build_stat + #[ "$compute_bench_build_stat" -ne 0 ] && exit $compute_bench_build_stat } -print_bench_res() { - # Usage: print_bench_res - if [ ! -s $1 ]; then - printf "NO OUTPUT! (Status $2)\n" | tee -a $3 - return # Do not proceed if file is empty - fi - - get_csv_col_index $1 run-time-mean - tmp_run_time_mean_i=$tmp_csv_col_i - get_csv_col_index $1 run-time-median - tmp_run_time_median_i=$tmp_csv_col_i - get_csv_col_index $1 run-time-throughput - tmp_run_time_throughput_i=$tmp_csv_col_i - - # `sycl-bench` output seems to like inserting the header multiple times. - # Here we cache the header to make sure it prints only once: - tmp_header_title="$(cat $1 | head -n 1 | sed 's/^\# Benchmark name/benchmark/')" - tmp_result="$(cat $1 | grep '^[^\#]')" - - printf "%s\n%s" "$tmp_header_title" "$tmp_result" \ - | awk -F',' -v me="$tmp_run_time_mean_i" \ - -v md="$tmp_run_time_median_i" \ - -v th="$tmp_run_time_throughput_i" \ - '{printf "%-57s %-13s %-15s %-20s\n", $1, $me, $md, $th }' \ - | tee -a $3 # Print to summary file -} +# print_bench_res() { +# # Usage: print_bench_res +# if [ ! -s $1 ]; then +# printf "NO OUTPUT! (Status $2)\n" | tee -a $3 +# return # Do not proceed if file is empty +# fi +# +# get_csv_col_index $1 run-time-mean +# tmp_run_time_mean_i=$tmp_csv_col_i +# get_csv_col_index $1 run-time-median +# tmp_run_time_median_i=$tmp_csv_col_i +# get_csv_col_index $1 run-time-throughput +# tmp_run_time_throughput_i=$tmp_csv_col_i +# +# # `sycl-bench` output seems to like inserting the header multiple times. +# # Here we cache the header to make sure it prints only once: +# tmp_header_title="$(cat $1 | head -n 1 | sed 's/^\# Benchmark name/benchmark/')" +# tmp_result="$(cat $1 | grep '^[^\#]')" +# +# printf "%s\n%s" "$tmp_header_title" "$tmp_result" \ +# | awk -F',' -v me="$tmp_run_time_mean_i" \ +# -v md="$tmp_run_time_median_i" \ +# -v th="$tmp_run_time_throughput_i" \ +# '{printf "%-57s %-13s %-15s %-20s\n", $1, $me, $md, $th }' \ +# | tee -a $3 # Print to summary file +# } ### STATUS_SUCCESS=0 @@ -102,7 +107,6 @@ check_and_cache() { } process_benchmarks() { - TIMESTAMP="$(date '+%Y%m%d_%H%M%S')" mkdir -p "$PERF_RES_PATH" echo "### Running and processing selected benchmarks ###" @@ -126,7 +130,10 @@ process_benchmarks() { } cleanup() { - rm -r $COMPUTE_BENCH_PATH + echo "### Cleaning up compute-benchmark builds from prior runs ###" + rm -rf $COMPUTE_BENCH_PATH + #rm -rf $PERF_RES_PATH + [ ! -z "$_exit_after_cleanup" ] && exit } load_configs() { @@ -152,18 +159,32 @@ load_configs() { . $BENCHMARK_CI_CONFIG } +COMPUTE_BENCH_COMPILE_FLAGS="" +TIMESTAMP="$(date '+%Y%m%d_%H%M%S')" + load_configs # CLI overrides to configuration options -while getopts "p:b:r:" opt; do +while getopts "p:b:r:f:cC" opt; do case $opt in p) COMPUTE_BENCH_PATH=$OPTARG ;; r) COMPUTE_BENCH_GIT_REPO=$OPTARG ;; b) COMPUTE_BENCH_BRANCH=$OPTARG ;; + f) COMPUTE_BENCH_COMPILE_FLAGS=$OPTARG ;; + # Cleanup status is saved in a var to ensure all arguments are processed before + # performing cleanup + c) _cleanup=1 ;; + C) _cleanup=1 && _exit_after_cleanup=1 ;; \?) usage ;; esac done +if [ -z "$CMPLR_ROOT" ]; then + echo "Please set \$CMPLR_ROOT first; it is needed by compute-benchmarks to build." + exit 1 +fi +[ ! -z "$_cleanup" ] && cleanup + [ ! -d "$PERF_RES_PATH" ] && clone_perf_res [ ! -d "$COMPUTE_BENCH_PATH" ] && clone_compute_bench [ ! -d "$COMPUTE_BENCH_PATH/build" ] && build_compute_bench diff --git a/devops/scripts/benchmarking/compare.py b/devops/scripts/benchmarking/compare.py index 998793825633..e2e9b12b0a8e 100644 --- a/devops/scripts/benchmarking/compare.py +++ b/devops/scripts/benchmarking/compare.py @@ -1,3 +1,4 @@ +import os import csv import sys from pathlib import Path @@ -6,8 +7,18 @@ # TODO compare_to(metric) instead? def compare_to_median(test_name: str, test_csv_path: str): + median_path = f"{common.PERF_RES_PATH}/{test_name}/{test_name}-median.csv" + + if not os.path.isfile(test_csv_path): + print("Invalid test file provided: " + test_csv_path) + exit(-1) + if not os.path.isfile(median_path): + print(f"Median file for test {test_name} not found at {median_path}.\n" + + "Please build the median using the aggregate workflow.") + exit(-1) + median = dict() - with open(f"{common.PERF_RES_PATH}/{test_name}/{test_name}-median.csv", mode='r') as median_csv: + with open(median_path, mode='r') as median_csv: for stat in csv.DictReader(median_csv): median[stat["TestCase"]] = \ { metric: float(stat[metric]) for metric in common.metrics_variance }