From ced5b696a8d116339dc56259147f43925d037bb7 Mon Sep 17 00:00:00 2001 From: Nathaniel Ruiz Nowell Date: Wed, 9 Dec 2020 17:40:04 -0800 Subject: [PATCH] Add performance tests to AWS SDK Extension Co-authored-by: Aaron Abbott --- .github/workflows/test.yml | 29 ++++++++++++++- CONTRIBUTING.md | 25 +++++++++++++ .../test_benchmark_aws_xray_format.py | 37 +++++++++++++++++++ .../test_benchmark_aws_xray_ids_generator.py | 25 +++++++++++++ tox.ini | 1 + 5 files changed, 115 insertions(+), 2 deletions(-) create mode 100644 sdk-extension/opentelemetry-sdk-extension-aws/tests/performance/benchmarks/trace/propagation/test_benchmark_aws_xray_format.py create mode 100644 sdk-extension/opentelemetry-sdk-extension-aws/tests/performance/benchmarks/trace/test_benchmark_aws_xray_ids_generator.py diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index aa3059dfa8..348914c92f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -17,6 +17,7 @@ jobs: py37: 3.7 py38: 3.8 pypy3: pypy3 + RUN_MATRIX_COMBINATION: ${{ matrix.python-version }}-${{ matrix.package }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: fail-fast: false # ensures the entire test matrix is run, even if one permutation fails @@ -53,9 +54,33 @@ jobs: uses: actions/cache@v2 with: path: .tox - key: tox-cache-${{ matrix.python-version }}-${{ matrix.package }}-${{ matrix.os }}-${{ hashFiles('tox.ini', 'dev-requirements.txt') }} + key: tox-cache-${{ env.RUN_MATRIX_COMBINATION }}-${{ hashFiles('tox.ini', 'dev-requirements.txt') }} - name: run tox - run: tox -f ${{ matrix.python-version }}-${{ matrix.package }} + run: tox -f ${{ matrix.python-version }}-${{ matrix.package }} -- --benchmark-json=${{ env.RUN_MATRIX_COMBINATION }}-benchmark.json + - name: Find and merge benchmarks + # TODO: Add at least one benchmark to every package type to remove this + if: matrix.package == 'sdkextension' + run: >- + jq -s '.[0].benchmarks = ([.[].benchmarks] | add) + | if .[0].benchmarks == null then null else .[0] end' + **/**/tests/*${{ matrix.package }}*-benchmark.json > output.json + - name: Report on benchmark results + # TODO: Add at least one benchmark to every package type to remove this + if: matrix.package == 'sdkextension' + uses: rhysd/github-action-benchmark@v1 + with: + name: OpenTelemetry Python Benchmarks - Python ${{ env[matrix.python-version ]}} - ${{ matrix.package-group }} + tool: pytest + output-file-path: output.json + github-token: ${{ secrets.GITHUB_TOKEN }} + # Alert with a commit comment on possible performance regression + alert-threshold: 200% + comment-always: true + fail-on-alert: true + # Make a commit on `gh-pages` with benchmarks from previous step + auto-push: ${{ github.ref == 'refs/heads/master' }} + gh-pages-branch: master + benchmark-data-dir-path: benchmarks misc: strategy: fail-fast: false diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c3176b51d3..322631c577 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -49,6 +49,31 @@ See [`tox.ini`](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/master/tox.ini) for more detail on available tox commands. +### Benchmarks + +Performance progression of benchmarks for packages distributed by OpenTelemetry Python can be viewed as a [graph of throughput vs commit history](https://open-telemetry.github.io/opentelemetry-python-contrib/benchmarks/index.html). From this page, you can download a JSON file with the performance results. + +Running the `tox` tests also runs the performance tests if any are available. Benchmarking tests are done with `pytest-benchmark` and they output a table with results to the console. + +To write benchmarks, simply use the [pytest benchmark fixture](https://pytest-benchmark.readthedocs.io/en/latest/usage.html#usage) like the following: + +```python +def test_simple_start_span(benchmark): + def benchmark_start_as_current_span(span_name, attribute_num): + span = tracer.start_span( + span_name, + attributes={"count": attribute_num}, + ) + span.end() + + benchmark(benchmark_start_as_current_span, "benchmarkedSpan", 42) +``` + +Make sure the test file is under the `tests/performance/benchmarks/` folder of +the package it is benchmarking and further has a path that corresponds to the +file in the package it is testing. Make sure that the file name begins with +`test_benchmark_`. (e.g. `sdk-extension/opentelemetry-sdk-extension-aws/tests/performance/benchmarks/trace/propagation/test_benchmark_aws_xray_format.py`) + ## Pull Requests ### How to Send Pull Requests diff --git a/sdk-extension/opentelemetry-sdk-extension-aws/tests/performance/benchmarks/trace/propagation/test_benchmark_aws_xray_format.py b/sdk-extension/opentelemetry-sdk-extension-aws/tests/performance/benchmarks/trace/propagation/test_benchmark_aws_xray_format.py new file mode 100644 index 0000000000..cf4c42dcf7 --- /dev/null +++ b/sdk-extension/opentelemetry-sdk-extension-aws/tests/performance/benchmarks/trace/propagation/test_benchmark_aws_xray_format.py @@ -0,0 +1,37 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from requests.structures import CaseInsensitiveDict + +from opentelemetry.sdk.extension.aws.trace.propagation.aws_xray_format import ( + TRACE_HEADER_KEY, + AwsXRayFormat, +) +from opentelemetry.trace.propagation.textmap import DictGetter + +XRAY_PROPAGATOR = AwsXRayFormat() + + +def test_extract_single_header(benchmark): + benchmark( + XRAY_PROPAGATOR.extract, + DictGetter(), + { + TRACE_HEADER_KEY: "bdb5b63237ed38aea578af665aa5aa60-00000000000000000c32d953d73ad225" + }, + ) + + +def test_inject_empty_context(benchmark): + benchmark(XRAY_PROPAGATOR.inject, CaseInsensitiveDict.__setitem__, {}) diff --git a/sdk-extension/opentelemetry-sdk-extension-aws/tests/performance/benchmarks/trace/test_benchmark_aws_xray_ids_generator.py b/sdk-extension/opentelemetry-sdk-extension-aws/tests/performance/benchmarks/trace/test_benchmark_aws_xray_ids_generator.py new file mode 100644 index 0000000000..3ac091facc --- /dev/null +++ b/sdk-extension/opentelemetry-sdk-extension-aws/tests/performance/benchmarks/trace/test_benchmark_aws_xray_ids_generator.py @@ -0,0 +1,25 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from opentelemetry.sdk.extension.aws.trace import AwsXRayIdsGenerator + +ids_generator = AwsXRayIdsGenerator() + + +def test_generate_xray_trace_id(benchmark): + benchmark(ids_generator.generate_trace_id) + + +def test_generate_xray_span_id(benchmark): + benchmark(ids_generator.generate_span_id) diff --git a/tox.ini b/tox.ini index 96208d7f54..3b1516a073 100644 --- a/tox.ini +++ b/tox.ini @@ -141,6 +141,7 @@ envlist = deps = -c dev-requirements.txt test: pytest + test: pytest-benchmark coverage: pytest coverage: pytest-cov elasticsearch2: elasticsearch-dsl>=2.0,<3.0