From 7997e79bb9dd352c6dafbbd1a92a3634f43500de Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Wed, 31 Jul 2024 09:04:10 -0700 Subject: [PATCH 1/5] Python: Vertex AI Connector (#7481) ### Motivation and Context We are adding a new AI connector for the talking to the Gemini API on Google Vertex AI: https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/overview Note that this is for Gemini hosted on Vertex AI. Google also offers Gemini access on their Google AI platform: https://cloud.google.com/vertex-ai/generative-ai/docs/migrate/migrate-google-ai. We will have another connector along with this for accessing Gemini on Google AI. ### Description The new connector contains 3 AI services: - Chat completion - Text completion - Text embedding TODO: Function calling. > Function calling is not included in this PR to reduce to the size of the PR. ### Contribution Checklist - [x] The code builds clean without any errors or warnings - [x] The PR follows the [SK Contribution Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md) and the [pre-submission formatting script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts) raises no violations - [x] All unit tests pass, and I have added new tests where possible - [x] I didn't break anyone :smile: --------- Co-authored-by: Evan Mattson <35585003+moonbox3@users.noreply.github.com> --- .../workflows/python-integration-tests.yml | 22 ++ python/.cspell.json | 4 +- python/poetry.lock | 321 +++++++++++++++++- python/pyproject.toml | 5 +- .../connectors/ai/google/README.md | 50 +++ .../services/google_ai_chat_completion.py | 2 +- .../ai/google/google_ai/services/utils.py | 22 +- .../connectors/ai/google/shared_utils.py | 23 ++ .../ai/google/vertex_ai/__init__.py | 0 .../ai/google/vertex_ai/services/__init__.py | 0 .../ai/google/vertex_ai/services/utils.py | 74 ++++ .../vertex_ai/services/vertex_ai_base.py | 12 + .../services/vertex_ai_chat_completion.py | 263 ++++++++++++++ .../services/vertex_ai_text_completion.py | 206 +++++++++++ .../services/vertex_ai_text_embedding.py | 106 ++++++ .../vertex_ai_prompt_execution_settings.py | 59 ++++ .../ai/google/vertex_ai/vertex_ai_settings.py | 34 ++ .../completions/test_chat_completions.py | 41 ++- .../completions/test_text_completion.py | 12 + .../test_vertex_ai_embedding_service.py | 28 ++ .../tests/unit/connectors/google/conftest.py | 22 ++ .../connectors/google/google_ai/conftest.py | 19 -- .../test_google_ai_text_completion.py | 6 +- .../services/test_google_ai_text_embedding.py | 2 +- .../services/test_google_ai_utils.py | 23 -- .../connectors/google/test_shared_utils.py | 29 ++ .../connectors/google/vertex_ai/conftest.py | 127 +++++++ .../test_vertex_ai_chat_completion.py | 169 +++++++++ .../test_vertex_ai_text_completion.py | 128 +++++++ .../services/test_vertex_ai_text_embedding.py | 165 +++++++++ .../services/test_vertex_ai_utils.py | 78 +++++ 31 files changed, 1978 insertions(+), 74 deletions(-) create mode 100644 python/semantic_kernel/connectors/ai/google/README.md create mode 100644 python/semantic_kernel/connectors/ai/google/shared_utils.py create mode 100644 python/semantic_kernel/connectors/ai/google/vertex_ai/__init__.py create mode 100644 python/semantic_kernel/connectors/ai/google/vertex_ai/services/__init__.py create mode 100644 python/semantic_kernel/connectors/ai/google/vertex_ai/services/utils.py create mode 100644 python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_base.py create mode 100644 python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_chat_completion.py create mode 100644 python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_text_completion.py create mode 100644 python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_text_embedding.py create mode 100644 python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_prompt_execution_settings.py create mode 100644 python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_settings.py create mode 100644 python/tests/integration/embeddings/test_vertex_ai_embedding_service.py create mode 100644 python/tests/unit/connectors/google/conftest.py create mode 100644 python/tests/unit/connectors/google/test_shared_utils.py create mode 100644 python/tests/unit/connectors/google/vertex_ai/conftest.py create mode 100644 python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_chat_completion.py create mode 100644 python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_text_completion.py create mode 100644 python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_text_embedding.py create mode 100644 python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_utils.py diff --git a/.github/workflows/python-integration-tests.yml b/.github/workflows/python-integration-tests.yml index 1189ed8098af..c2b3b34ca376 100644 --- a/.github/workflows/python-integration-tests.yml +++ b/.github/workflows/python-integration-tests.yml @@ -81,6 +81,13 @@ jobs: run: | ollama pull ${{ vars.OLLAMA_MODEL }} ollama list + - name: Google auth + uses: google-github-actions/auth@v2 + with: + project_id: ${{ vars.VERTEX_AI_PROJECT_ID }} + credentials_json: ${{ secrets.VERTEX_AI_SERVICE_ACCOUNT_KEY }} + - name: Set up gcloud + uses: google-github-actions/setup-gcloud@v2 - name: Run Integration Tests id: run_tests shell: bash @@ -113,6 +120,9 @@ jobs: GOOGLE_AI_GEMINI_MODEL_ID: ${{ vars.GOOGLE_AI_GEMINI_MODEL_ID }} GOOGLE_AI_EMBEDDING_MODEL_ID: ${{ vars.GOOGLE_AI_EMBEDDING_MODEL_ID }} GOOGLE_AI_API_KEY: ${{ secrets.GOOGLE_AI_API_KEY }} + VERTEX_AI_PROJECT_ID: ${{ vars.VERTEX_AI_PROJECT_ID }} + VERTEX_AI_GEMINI_MODEL_ID: ${{ vars.VERTEX_AI_GEMINI_MODEL_ID }} + VERTEX_AI_EMBEDDING_MODEL_ID: ${{ vars.VERTEX_AI_EMBEDDING_MODEL_ID }} run: | if ${{ matrix.os == 'ubuntu-latest' }}; then docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest @@ -167,6 +177,15 @@ jobs: ollama pull ${{ vars.OLLAMA_MODEL }} ollama list + - name: Google auth + uses: google-github-actions/auth@v2 + with: + project_id: ${{ vars.VERTEX_AI_PROJECT_ID }} + credentials_json: ${{ secrets.VERTEX_AI_SERVICE_ACCOUNT_KEY }} + + - name: Set up gcloud + uses: google-github-actions/setup-gcloud@v2 + - name: Run Integration Tests id: run_tests shell: bash @@ -199,6 +218,9 @@ jobs: GOOGLE_AI_GEMINI_MODEL_ID: ${{ vars.GOOGLE_AI_GEMINI_MODEL_ID }} GOOGLE_AI_EMBEDDING_MODEL_ID: ${{ vars.GOOGLE_AI_EMBEDDING_MODEL_ID }} GOOGLE_AI_API_KEY: ${{ secrets.GOOGLE_AI_API_KEY }} + VERTEX_AI_PROJECT_ID: ${{ vars.VERTEX_AI_PROJECT_ID }} + VERTEX_AI_GEMINI_MODEL_ID: ${{ vars.VERTEX_AI_GEMINI_MODEL_ID }} + VERTEX_AI_EMBEDDING_MODEL_ID: ${{ vars.VERTEX_AI_EMBEDDING_MODEL_ID }} run: | if ${{ matrix.os == 'ubuntu-latest' }}; then docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest diff --git a/python/.cspell.json b/python/.cspell.json index 00961beae80c..3b3b3c06d526 100644 --- a/python/.cspell.json +++ b/python/.cspell.json @@ -45,6 +45,8 @@ "generativeai", "genai", "protos", - "endregion" + "endregion", + "vertexai", + "aiplatform" ] } \ No newline at end of file diff --git a/python/poetry.lock b/python/poetry.lock index 979b5a2cb51f..5f2acbb0f0b4 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -1068,6 +1068,17 @@ idna = ["idna (>=3.6)"] trio = ["trio (>=0.23)"] wmi = ["wmi (>=1.5.1)"] +[[package]] +name = "docstring-parser" +version = "0.16" +description = "Parse Python docstrings in reST, Google and Numpydoc format" +optional = false +python-versions = ">=3.6,<4.0" +files = [ + {file = "docstring_parser-0.16-py3-none-any.whl", hash = "sha256:bf0a1387354d3691d102edef7ec124f219ef639982d096e26e3b60aeffa90637"}, + {file = "docstring_parser-0.16.tar.gz", hash = "sha256:538beabd0af1e2db0146b6bd3caa526c35a34d61af9fd2887f3a8a27a739aa6e"}, +] + [[package]] name = "email-validator" version = "2.1.1" @@ -1445,6 +1456,224 @@ files = [ google-auth = "*" httplib2 = ">=0.19.0" +[[package]] +name = "google-cloud-aiplatform" +version = "1.60.0" +description = "Vertex AI API client library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "google-cloud-aiplatform-1.60.0.tar.gz", hash = "sha256:782c7f1ec0e77a7c7daabef3b65bfd506ed2b4b1dc2186753c43cd6faf8dd04e"}, + {file = "google_cloud_aiplatform-1.60.0-py2.py3-none-any.whl", hash = "sha256:5f14159c9575f4b46335027e3ceb8fa57bd5eaa76a07f858105b8c6c034ec0d6"}, +] + +[package.dependencies] +docstring-parser = "<1" +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.8.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<3.0.0dev" +google-cloud-bigquery = ">=1.15.0,<3.20.0 || >3.20.0,<4.0.0dev" +google-cloud-resource-manager = ">=1.3.3,<3.0.0dev" +google-cloud-storage = ">=1.32.0,<3.0.0dev" +packaging = ">=14.3" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" +pydantic = "<3" +shapely = "<3.0.0dev" + +[package.extras] +autologging = ["mlflow (>=1.27.0,<=2.1.1)"] +cloud-profiler = ["tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "werkzeug (>=2.0.0,<2.1.0dev)"] +datasets = ["pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)"] +endpoint = ["requests (>=2.28.1)"] +full = ["cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.109.1)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "setuptools (<70.0.0)", "starlette (>=0.17.1)", "tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "tqdm (>=4.23.0)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)", "werkzeug (>=2.0.0,<2.1.0dev)"] +langchain = ["langchain (>=0.1.16,<0.3)", "langchain-core (<0.3)", "langchain-google-vertexai (<2)", "openinference-instrumentation-langchain (>=0.1.19,<0.2)", "tenacity (<=8.3)"] +langchain-testing = ["absl-py", "cloudpickle (>=3.0,<4.0)", "google-cloud-trace (<2)", "langchain (>=0.1.16,<0.3)", "langchain-core (<0.3)", "langchain-google-vertexai (<2)", "openinference-instrumentation-langchain (>=0.1.19,<0.2)", "opentelemetry-exporter-gcp-trace (<2)", "opentelemetry-sdk (<2)", "pydantic (>=2.6.3,<3)", "pytest-xdist", "tenacity (<=8.3)"] +lit = ["explainable-ai-sdk (>=1.0.0)", "lit-nlp (==0.4.0)", "pandas (>=1.0.0)", "tensorflow (>=2.3.0,<3.0.0dev)"] +metadata = ["numpy (>=1.15.0)", "pandas (>=1.0.0)"] +pipelines = ["pyyaml (>=5.3.1,<7)"] +prediction = ["docker (>=5.0.3)", "fastapi (>=0.71.0,<=0.109.1)", "httpx (>=0.23.0,<0.25.0)", "starlette (>=0.17.1)", "uvicorn[standard] (>=0.16.0)"] +preview = ["cloudpickle (<3.0)", "google-cloud-logging (<4.0)"] +private-endpoints = ["requests (>=2.28.1)", "urllib3 (>=1.21.1,<1.27)"] +rapid-evaluation = ["pandas (>=1.0.0,<2.2.0)", "tqdm (>=4.23.0)"] +ray = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "setuptools (<70.0.0)"] +ray-testing = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pytest-xdist", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "ray[train] (==2.9.3)", "scikit-learn", "setuptools (<70.0.0)", "tensorflow", "torch (>=2.0.0,<2.1.0)", "xgboost", "xgboost-ray"] +reasoningengine = ["cloudpickle (>=3.0,<4.0)", "google-cloud-trace (<2)", "opentelemetry-exporter-gcp-trace (<2)", "opentelemetry-sdk (<2)", "pydantic (>=2.6.3,<3)"] +tensorboard = ["tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "werkzeug (>=2.0.0,<2.1.0dev)"] +testing = ["bigframes", "cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.109.1)", "google-api-core (>=2.11,<3.0.0)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "grpcio-testing", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "ipython", "kfp (>=2.6.0,<3.0.0)", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "nltk", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyfakefs", "pytest-asyncio", "pytest-xdist", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "requests-toolbelt (<1.0.0)", "scikit-learn", "sentencepiece (>=0.2.0)", "setuptools (<70.0.0)", "starlette (>=0.17.1)", "tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (==2.13.0)", "tensorflow (==2.16.1)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "torch (>=2.0.0,<2.1.0)", "torch (>=2.2.0)", "tqdm (>=4.23.0)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)", "werkzeug (>=2.0.0,<2.1.0dev)", "xgboost"] +tokenization = ["sentencepiece (>=0.2.0)"] +vizier = ["google-vizier (>=0.1.6)"] +xai = ["tensorflow (>=2.3.0,<3.0.0dev)"] + +[[package]] +name = "google-cloud-bigquery" +version = "3.25.0" +description = "Google BigQuery API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-bigquery-3.25.0.tar.gz", hash = "sha256:5b2aff3205a854481117436836ae1403f11f2594e6810a98886afd57eda28509"}, + {file = "google_cloud_bigquery-3.25.0-py2.py3-none-any.whl", hash = "sha256:7f0c371bc74d2a7fb74dacbc00ac0f90c8c2bec2289b51dd6685a275873b1ce9"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<3.0.0dev" +google-cloud-core = ">=1.6.0,<3.0.0dev" +google-resumable-media = ">=0.6.0,<3.0dev" +packaging = ">=20.0.0" +python-dateutil = ">=2.7.2,<3.0dev" +requests = ">=2.21.0,<3.0.0dev" + +[package.extras] +all = ["Shapely (>=1.8.4,<3.0.0dev)", "db-dtypes (>=0.3.0,<2.0.0dev)", "geopandas (>=0.9.0,<1.0dev)", "google-cloud-bigquery-storage (>=2.6.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "importlib-metadata (>=1.0.0)", "ipykernel (>=6.0.0)", "ipython (>=7.23.1,!=8.1.0)", "ipywidgets (>=7.7.0)", "opentelemetry-api (>=1.1.0)", "opentelemetry-instrumentation (>=0.20b0)", "opentelemetry-sdk (>=1.1.0)", "pandas (>=1.1.0)", "proto-plus (>=1.15.0,<2.0.0dev)", "protobuf (>=3.19.5,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev)", "pyarrow (>=3.0.0)", "tqdm (>=4.7.4,<5.0.0dev)"] +bigquery-v2 = ["proto-plus (>=1.15.0,<2.0.0dev)", "protobuf (>=3.19.5,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev)"] +bqstorage = ["google-cloud-bigquery-storage (>=2.6.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "pyarrow (>=3.0.0)"] +geopandas = ["Shapely (>=1.8.4,<3.0.0dev)", "geopandas (>=0.9.0,<1.0dev)"] +ipython = ["ipykernel (>=6.0.0)", "ipython (>=7.23.1,!=8.1.0)"] +ipywidgets = ["ipykernel (>=6.0.0)", "ipywidgets (>=7.7.0)"] +opentelemetry = ["opentelemetry-api (>=1.1.0)", "opentelemetry-instrumentation (>=0.20b0)", "opentelemetry-sdk (>=1.1.0)"] +pandas = ["db-dtypes (>=0.3.0,<2.0.0dev)", "importlib-metadata (>=1.0.0)", "pandas (>=1.1.0)", "pyarrow (>=3.0.0)"] +tqdm = ["tqdm (>=4.7.4,<5.0.0dev)"] + +[[package]] +name = "google-cloud-core" +version = "2.4.1" +description = "Google Cloud API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-core-2.4.1.tar.gz", hash = "sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073"}, + {file = "google_cloud_core-2.4.1-py2.py3-none-any.whl", hash = "sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61"}, +] + +[package.dependencies] +google-api-core = ">=1.31.6,<2.0.dev0 || >2.3.0,<3.0.0dev" +google-auth = ">=1.25.0,<3.0dev" + +[package.extras] +grpc = ["grpcio (>=1.38.0,<2.0dev)", "grpcio-status (>=1.38.0,<2.0.dev0)"] + +[[package]] +name = "google-cloud-resource-manager" +version = "1.12.4" +description = "Google Cloud Resource Manager API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-resource-manager-1.12.4.tar.gz", hash = "sha256:3eda914a925e92465ef80faaab7e0f7a9312d486dd4e123d2c76e04bac688ff0"}, + {file = "google_cloud_resource_manager-1.12.4-py2.py3-none-any.whl", hash = "sha256:0b6663585f7f862166c0fb4c55fdda721fce4dc2dc1d5b52d03ee4bf2653a85f"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0dev" +grpc-google-iam-v1 = ">=0.12.4,<1.0.0dev" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "google-cloud-storage" +version = "2.18.0" +description = "Google Cloud Storage API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_cloud_storage-2.18.0-py2.py3-none-any.whl", hash = "sha256:e8e1a9577952143c3fca8163005ecfadd2d70ec080fa158a8b305000e2c22fbb"}, + {file = "google_cloud_storage-2.18.0.tar.gz", hash = "sha256:0aa3f7c57f3632f81b455d91558d2b27ada96eee2de3aaa17f689db1470d9578"}, +] + +[package.dependencies] +google-api-core = ">=2.15.0,<3.0.0dev" +google-auth = ">=2.26.1,<3.0dev" +google-cloud-core = ">=2.3.0,<3.0dev" +google-crc32c = ">=1.0,<2.0dev" +google-resumable-media = ">=2.6.0" +requests = ">=2.18.0,<3.0.0dev" + +[package.extras] +protobuf = ["protobuf (<6.0.0dev)"] +tracing = ["opentelemetry-api (>=1.1.0)"] + +[[package]] +name = "google-crc32c" +version = "1.5.0" +description = "A python wrapper of the C library 'Google CRC32C'" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-crc32c-1.5.0.tar.gz", hash = "sha256:89284716bc6a5a415d4eaa11b1726d2d60a0cd12aadf5439828353662ede9dd7"}, + {file = "google_crc32c-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:596d1f98fc70232fcb6590c439f43b350cb762fb5d61ce7b0e9db4539654cc13"}, + {file = "google_crc32c-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:be82c3c8cfb15b30f36768797a640e800513793d6ae1724aaaafe5bf86f8f346"}, + {file = "google_crc32c-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:461665ff58895f508e2866824a47bdee72497b091c730071f2b7575d5762ab65"}, + {file = "google_crc32c-1.5.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2096eddb4e7c7bdae4bd69ad364e55e07b8316653234a56552d9c988bd2d61b"}, + {file = "google_crc32c-1.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:116a7c3c616dd14a3de8c64a965828b197e5f2d121fedd2f8c5585c547e87b02"}, + {file = "google_crc32c-1.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5829b792bf5822fd0a6f6eb34c5f81dd074f01d570ed7f36aa101d6fc7a0a6e4"}, + {file = "google_crc32c-1.5.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:64e52e2b3970bd891309c113b54cf0e4384762c934d5ae56e283f9a0afcd953e"}, + {file = "google_crc32c-1.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:02ebb8bf46c13e36998aeaad1de9b48f4caf545e91d14041270d9dca767b780c"}, + {file = "google_crc32c-1.5.0-cp310-cp310-win32.whl", hash = "sha256:2e920d506ec85eb4ba50cd4228c2bec05642894d4c73c59b3a2fe20346bd00ee"}, + {file = "google_crc32c-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:07eb3c611ce363c51a933bf6bd7f8e3878a51d124acfc89452a75120bc436289"}, + {file = "google_crc32c-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:cae0274952c079886567f3f4f685bcaf5708f0a23a5f5216fdab71f81a6c0273"}, + {file = "google_crc32c-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1034d91442ead5a95b5aaef90dbfaca8633b0247d1e41621d1e9f9db88c36298"}, + {file = "google_crc32c-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c42c70cd1d362284289c6273adda4c6af8039a8ae12dc451dcd61cdabb8ab57"}, + {file = "google_crc32c-1.5.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8485b340a6a9e76c62a7dce3c98e5f102c9219f4cfbf896a00cf48caf078d438"}, + {file = "google_crc32c-1.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77e2fd3057c9d78e225fa0a2160f96b64a824de17840351b26825b0848022906"}, + {file = "google_crc32c-1.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f583edb943cf2e09c60441b910d6a20b4d9d626c75a36c8fcac01a6c96c01183"}, + {file = "google_crc32c-1.5.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:a1fd716e7a01f8e717490fbe2e431d2905ab8aa598b9b12f8d10abebb36b04dd"}, + {file = "google_crc32c-1.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:72218785ce41b9cfd2fc1d6a017dc1ff7acfc4c17d01053265c41a2c0cc39b8c"}, + {file = "google_crc32c-1.5.0-cp311-cp311-win32.whl", hash = "sha256:66741ef4ee08ea0b2cc3c86916ab66b6aef03768525627fd6a1b34968b4e3709"}, + {file = "google_crc32c-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:ba1eb1843304b1e5537e1fca632fa894d6f6deca8d6389636ee5b4797affb968"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:98cb4d057f285bd80d8778ebc4fde6b4d509ac3f331758fb1528b733215443ae"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19e0a019d2c4dcc5e598cd4a4bc7b008546b0358bd322537c74ad47a5386884f"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02c65b9817512edc6a4ae7c7e987fea799d2e0ee40c53ec573a692bee24de876"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6ac08d24c1f16bd2bf5eca8eaf8304812f44af5cfe5062006ec676e7e1d50afc"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3359fc442a743e870f4588fcf5dcbc1bf929df1fad8fb9905cd94e5edb02e84c"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e986b206dae4476f41bcec1faa057851f3889503a70e1bdb2378d406223994a"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:de06adc872bcd8c2a4e0dc51250e9e65ef2ca91be023b9d13ebd67c2ba552e1e"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-win32.whl", hash = "sha256:d3515f198eaa2f0ed49f8819d5732d70698c3fa37384146079b3799b97667a94"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:67b741654b851abafb7bc625b6d1cdd520a379074e64b6a128e3b688c3c04740"}, + {file = "google_crc32c-1.5.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c02ec1c5856179f171e032a31d6f8bf84e5a75c45c33b2e20a3de353b266ebd8"}, + {file = "google_crc32c-1.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:edfedb64740750e1a3b16152620220f51d58ff1b4abceb339ca92e934775c27a"}, + {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84e6e8cd997930fc66d5bb4fde61e2b62ba19d62b7abd7a69920406f9ecca946"}, + {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a"}, + {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:998679bf62b7fb599d2878aa3ed06b9ce688b8974893e7223c60db155f26bd8d"}, + {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:83c681c526a3439b5cf94f7420471705bbf96262f49a6fe546a6db5f687a3d4a"}, + {file = "google_crc32c-1.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4c6fdd4fccbec90cc8a01fc00773fcd5fa28db683c116ee3cb35cd5da9ef6c37"}, + {file = "google_crc32c-1.5.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5ae44e10a8e3407dbe138984f21e536583f2bba1be9491239f942c2464ac0894"}, + {file = "google_crc32c-1.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:37933ec6e693e51a5b07505bd05de57eee12f3e8c32b07da7e73669398e6630a"}, + {file = "google_crc32c-1.5.0-cp38-cp38-win32.whl", hash = "sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4"}, + {file = "google_crc32c-1.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:74dea7751d98034887dbd821b7aae3e1d36eda111d6ca36c206c44478035709c"}, + {file = "google_crc32c-1.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c6c777a480337ac14f38564ac88ae82d4cd238bf293f0a22295b66eb89ffced7"}, + {file = "google_crc32c-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:759ce4851a4bb15ecabae28f4d2e18983c244eddd767f560165563bf9aefbc8d"}, + {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f13cae8cc389a440def0c8c52057f37359014ccbc9dc1f0827936bcd367c6100"}, + {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e560628513ed34759456a416bf86b54b2476c59144a9138165c9a1575801d0d9"}, + {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1674e4307fa3024fc897ca774e9c7562c957af85df55efe2988ed9056dc4e57"}, + {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:278d2ed7c16cfc075c91378c4f47924c0625f5fc84b2d50d921b18b7975bd210"}, + {file = "google_crc32c-1.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d5280312b9af0976231f9e317c20e4a61cd2f9629b7bfea6a693d1878a264ebd"}, + {file = "google_crc32c-1.5.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8b87e1a59c38f275c0e3676fc2ab6d59eccecfd460be267ac360cc31f7bcde96"}, + {file = "google_crc32c-1.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7c074fece789b5034b9b1404a1f8208fc2d4c6ce9decdd16e8220c5a793e6f61"}, + {file = "google_crc32c-1.5.0-cp39-cp39-win32.whl", hash = "sha256:7f57f14606cd1dd0f0de396e1e53824c371e9544a822648cd76c034d209b559c"}, + {file = "google_crc32c-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:a2355cba1f4ad8b6988a4ca3feed5bff33f6af2d7f134852cf279c2aebfde541"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f314013e7dcd5cf45ab1945d92e713eec788166262ae8deb2cfacd53def27325"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b747a674c20a67343cb61d43fdd9207ce5da6a99f629c6e2541aa0e89215bcd"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f24ed114432de109aa9fd317278518a5af2d31ac2ea6b952b2f7782b43da091"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8667b48e7a7ef66afba2c81e1094ef526388d35b873966d8a9a447974ed9178"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:1c7abdac90433b09bad6c43a43af253e688c9cfc1c86d332aed13f9a7c7f65e2"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6f998db4e71b645350b9ac28a2167e6632c239963ca9da411523bb439c5c514d"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c99616c853bb585301df6de07ca2cadad344fd1ada6d62bb30aec05219c45d2"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ad40e31093a4af319dadf503b2467ccdc8f67c72e4bcba97f8c10cb078207b5"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd67cf24a553339d5062eff51013780a00d6f97a39ca062781d06b3a73b15462"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:398af5e3ba9cf768787eef45c803ff9614cc3e22a5b2f7d7ae116df8b11e3314"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b1f8133c9a275df5613a451e73f36c2aea4fe13c5c8997e22cf355ebd7bd0728"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ba053c5f50430a3fcfd36f75aff9caeba0440b2d076afdb79a318d6ca245f88"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:272d3892a1e1a2dbc39cc5cde96834c236d5327e2122d3aaa19f6614531bb6eb"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:635f5d4dd18758a1fbd1049a8e8d2fee4ffed124462d837d1a02a0e009c3ab31"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c672d99a345849301784604bfeaeba4db0c7aae50b95be04dd651fd2a7310b93"}, +] + +[package.extras] +testing = ["pytest"] + [[package]] name = "google-generativeai" version = "0.7.2" @@ -1468,6 +1697,24 @@ typing-extensions = "*" [package.extras] dev = ["Pillow", "absl-py", "black", "ipython", "nose2", "pandas", "pytype", "pyyaml"] +[[package]] +name = "google-resumable-media" +version = "2.7.1" +description = "Utilities for Google Media Downloads and Resumable Uploads" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-resumable-media-2.7.1.tar.gz", hash = "sha256:eae451a7b2e2cdbaaa0fd2eb00cc8a1ee5e95e16b55597359cbc3d27d7d90e33"}, + {file = "google_resumable_media-2.7.1-py2.py3-none-any.whl", hash = "sha256:103ebc4ba331ab1bfdac0250f8033627a2cd7cde09e7ccff9181e31ba4315b2c"}, +] + +[package.dependencies] +google-crc32c = ">=1.0,<2.0dev" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0dev)", "google-auth (>=1.22.0,<2.0dev)"] +requests = ["requests (>=2.18.0,<3.0.0dev)"] + [[package]] name = "googleapis-common-protos" version = "1.63.0" @@ -1480,11 +1727,28 @@ files = [ ] [package.dependencies] +grpcio = {version = ">=1.44.0,<2.0.0.dev0", optional = true, markers = "extra == \"grpc\""} protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" [package.extras] grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] +[[package]] +name = "grpc-google-iam-v1" +version = "0.13.1" +description = "IAM API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "grpc-google-iam-v1-0.13.1.tar.gz", hash = "sha256:3ff4b2fd9d990965e410965253c0da6f66205d5a8291c4c31c6ebecca18a9001"}, + {file = "grpc_google_iam_v1-0.13.1-py2.py3-none-any.whl", hash = "sha256:c3e86151a981811f30d5e7330f271cee53e73bb87755e88cc3b6f0c7b5fe374e"}, +] + +[package.dependencies] +googleapis-common-protos = {version = ">=1.56.0,<2.0.0dev", extras = ["grpc"]} +grpcio = ">=1.44.0,<2.0.0dev" +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + [[package]] name = "grpcio" version = "1.63.0" @@ -3248,7 +3512,6 @@ description = "Nvidia JIT LTO Library" optional = false python-versions = ">=3" files = [ - {file = "nvidia_nvjitlink_cu12-12.5.40-py3-none-manylinux2014_aarch64.whl", hash = "sha256:004186d5ea6a57758fd6d57052a123c73a4815adf365eb8dd6a85c9eaa7535ff"}, {file = "nvidia_nvjitlink_cu12-12.5.40-py3-none-manylinux2014_x86_64.whl", hash = "sha256:d9714f27c1d0f0895cd8915c07a87a1d0029a0aa36acaf9156952ec2a8a12189"}, {file = "nvidia_nvjitlink_cu12-12.5.40-py3-none-win_amd64.whl", hash = "sha256:c3401dc8543b52d3a8158007a0c1ab4e9c768fcbd24153a48c86972102197ddd"}, ] @@ -5756,6 +6019,58 @@ files = [ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +[[package]] +name = "shapely" +version = "2.0.5" +description = "Manipulation and analysis of geometric objects" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shapely-2.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:89d34787c44f77a7d37d55ae821f3a784fa33592b9d217a45053a93ade899375"}, + {file = "shapely-2.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:798090b426142df2c5258779c1d8d5734ec6942f778dab6c6c30cfe7f3bf64ff"}, + {file = "shapely-2.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45211276900c4790d6bfc6105cbf1030742da67594ea4161a9ce6812a6721e68"}, + {file = "shapely-2.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e119444bc27ca33e786772b81760f2028d930ac55dafe9bc50ef538b794a8e1"}, + {file = "shapely-2.0.5-cp310-cp310-win32.whl", hash = "sha256:9a4492a2b2ccbeaebf181e7310d2dfff4fdd505aef59d6cb0f217607cb042fb3"}, + {file = "shapely-2.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:1e5cb5ee72f1bc7ace737c9ecd30dc174a5295fae412972d3879bac2e82c8fae"}, + {file = "shapely-2.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5bbfb048a74cf273db9091ff3155d373020852805a37dfc846ab71dde4be93ec"}, + {file = "shapely-2.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93be600cbe2fbaa86c8eb70656369f2f7104cd231f0d6585c7d0aa555d6878b8"}, + {file = "shapely-2.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f8e71bb9a46814019f6644c4e2560a09d44b80100e46e371578f35eaaa9da1c"}, + {file = "shapely-2.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5251c28a29012e92de01d2e84f11637eb1d48184ee8f22e2df6c8c578d26760"}, + {file = "shapely-2.0.5-cp311-cp311-win32.whl", hash = "sha256:35110e80070d664781ec7955c7de557456b25727a0257b354830abb759bf8311"}, + {file = "shapely-2.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c6b78c0007a34ce7144f98b7418800e0a6a5d9a762f2244b00ea560525290c9"}, + {file = "shapely-2.0.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:03bd7b5fa5deb44795cc0a503999d10ae9d8a22df54ae8d4a4cd2e8a93466195"}, + {file = "shapely-2.0.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ff9521991ed9e201c2e923da014e766c1aa04771bc93e6fe97c27dcf0d40ace"}, + {file = "shapely-2.0.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b65365cfbf657604e50d15161ffcc68de5cdb22a601bbf7823540ab4918a98d"}, + {file = "shapely-2.0.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21f64e647a025b61b19585d2247137b3a38a35314ea68c66aaf507a1c03ef6fe"}, + {file = "shapely-2.0.5-cp312-cp312-win32.whl", hash = "sha256:3ac7dc1350700c139c956b03d9c3df49a5b34aaf91d024d1510a09717ea39199"}, + {file = "shapely-2.0.5-cp312-cp312-win_amd64.whl", hash = "sha256:30e8737983c9d954cd17feb49eb169f02f1da49e24e5171122cf2c2b62d65c95"}, + {file = "shapely-2.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ff7731fea5face9ec08a861ed351734a79475631b7540ceb0b66fb9732a5f529"}, + {file = "shapely-2.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff9e520af0c5a578e174bca3c18713cd47a6c6a15b6cf1f50ac17dc8bb8db6a2"}, + {file = "shapely-2.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49b299b91557b04acb75e9732645428470825061f871a2edc36b9417d66c1fc5"}, + {file = "shapely-2.0.5-cp37-cp37m-win32.whl", hash = "sha256:b5870633f8e684bf6d1ae4df527ddcb6f3895f7b12bced5c13266ac04f47d231"}, + {file = "shapely-2.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:401cb794c5067598f50518e5a997e270cd7642c4992645479b915c503866abed"}, + {file = "shapely-2.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e91ee179af539100eb520281ba5394919067c6b51824e6ab132ad4b3b3e76dd0"}, + {file = "shapely-2.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8af6f7260f809c0862741ad08b1b89cb60c130ae30efab62320bbf4ee9cc71fa"}, + {file = "shapely-2.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5456dd522800306ba3faef77c5ba847ec30a0bd73ab087a25e0acdd4db2514f"}, + {file = "shapely-2.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b714a840402cde66fd7b663bb08cacb7211fa4412ea2a209688f671e0d0631fd"}, + {file = "shapely-2.0.5-cp38-cp38-win32.whl", hash = "sha256:7e8cf5c252fac1ea51b3162be2ec3faddedc82c256a1160fc0e8ddbec81b06d2"}, + {file = "shapely-2.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:4461509afdb15051e73ab178fae79974387f39c47ab635a7330d7fee02c68a3f"}, + {file = "shapely-2.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7545a39c55cad1562be302d74c74586f79e07b592df8ada56b79a209731c0219"}, + {file = "shapely-2.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4c83a36f12ec8dee2066946d98d4d841ab6512a6ed7eb742e026a64854019b5f"}, + {file = "shapely-2.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89e640c2cd37378480caf2eeda9a51be64201f01f786d127e78eaeff091ec897"}, + {file = "shapely-2.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06efe39beafde3a18a21dde169d32f315c57da962826a6d7d22630025200c5e6"}, + {file = "shapely-2.0.5-cp39-cp39-win32.whl", hash = "sha256:8203a8b2d44dcb366becbc8c3d553670320e4acf0616c39e218c9561dd738d92"}, + {file = "shapely-2.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:7fed9dbfbcfec2682d9a047b9699db8dcc890dfca857ecba872c42185fc9e64e"}, + {file = "shapely-2.0.5.tar.gz", hash = "sha256:bff2366bc786bfa6cb353d6b47d0443c570c32776612e527ee47b6df63fcfe32"}, +] + +[package.dependencies] +numpy = ">=1.14,<3" + +[package.extras] +docs = ["matplotlib", "numpydoc (==1.1.*)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] +test = ["pytest", "pytest-cov"] + [[package]] name = "shellingham" version = "1.5.4" @@ -7043,7 +7358,7 @@ test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-it all = ["azure-ai-inference", "azure-core", "azure-cosmos", "azure-identity", "azure-search-documents", "chromadb", "ipykernel", "milvus", "mistralai", "motor", "ollama", "pinecone-client", "psycopg", "pyarrow", "pymilvus", "qdrant-client", "redis", "sentence-transformers", "transformers", "usearch", "weaviate-client"] azure = ["azure-ai-inference", "azure-core", "azure-cosmos", "azure-identity", "azure-search-documents"] chromadb = ["chromadb"] -google = ["google-generativeai"] +google = ["google-cloud-aiplatform", "google-generativeai"] hugging-face = ["sentence-transformers", "transformers"] milvus = ["milvus", "pymilvus"] mistralai = ["mistralai"] @@ -7060,4 +7375,4 @@ weaviate = ["weaviate-client"] [metadata] lock-version = "2.0" python-versions = "^3.10,<3.13" -content-hash = "c29fb1fca8d1da50daf3538331cce8f45bbdc9949d0699feaced0fe049787251" +content-hash = "b06f07ed9eb865636ad02f5e67be234042aba91da5090093248b45039369094a" diff --git a/python/pyproject.toml b/python/pyproject.toml index 96557cf8aa7a..b8490b0dedf9 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -48,6 +48,7 @@ azure-cosmos = { version = "^4.7.0", optional = true} # chroma chromadb = { version = ">=0.4.13,<0.6.0", optional = true} # google +google-cloud-aiplatform = { version = "^1.60.0", optional = true} google-generativeai = { version = "^0.7.2", optional = true} # hugging face transformers = { version = "^4.28.1", extras=["torch"], optional = true} @@ -99,6 +100,7 @@ azure-core = "^1.28.0" azure-cosmos = "^4.7.0" mistralai = "^0.4.1" ollama = "^0.2.1" +google-cloud-aiplatform = "^1.60.0" google-generativeai = "^0.7.2" transformers = { version = "^4.28.1", extras=["torch"]} sentence-transformers = "^2.2.2" @@ -117,6 +119,7 @@ msgraph-sdk = "^1.2.0" # chroma chromadb = ">=0.4.13,<0.6.0" # google +google-cloud-aiplatform = "^1.60.0" google-generativeai = "^0.7.2" # hugging face transformers = { version = "^4.28.1", extras=["torch"]} @@ -150,7 +153,7 @@ all = ["transformers", "sentence-transformers", "qdrant-client", "chromadb", "py azure = ["azure-ai-inference", "azure-search-documents", "azure-core", "azure-identity", "azure-cosmos", "msgraph-sdk"] chromadb = ["chromadb"] -google = ["google-generativeai"] +google = ["google-cloud-aiplatform", "google-generativeai"] hugging_face = ["transformers", "sentence-transformers"] milvus = ["pymilvus", "milvus"] mistralai = ["mistralai"] diff --git a/python/semantic_kernel/connectors/ai/google/README.md b/python/semantic_kernel/connectors/ai/google/README.md new file mode 100644 index 000000000000..febfe59ff94f --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/README.md @@ -0,0 +1,50 @@ +# Google - Gemini + +Gemini models are Google's large language models. Semantic Kernel provides two connectors to access these models from Google Cloud. + +## Google AI + +You can access the Gemini API from Google AI Studio. This mode of access is for quick prototyping as it relies on API keys. + +Follow [these instructions](https://cloud.google.com/docs/authentication/api-keys) to create an API key. + +Once you have an API key, you can start using Gemini models in SK using the `google_ai` connector. Example: + +```Python +kernel = Kernel() +kernel.add_service( + GoogleAIChatCompletion( + gemini_model_id="gemini-1.5-flash", + api_key="...", + ) +) +... +``` + +> Alternatively, you can use an .env file to store the model id and api key. + +## Vertex AI + +Google also offers access to Gemini through its Vertex AI platform. Vertex AI provides a more complete solution to build your enterprise AI applications end-to-end. You can read more about it [here](https://cloud.google.com/vertex-ai/generative-ai/docs/migrate/migrate-google-ai). + +This mode of access requires a Google Cloud service account. Follow these [instructions](https://cloud.google.com/vertex-ai/generative-ai/docs/migrate/migrate-google-ai) to create a Google Cloud project if you don't have one already. Remember the `project id` as it is required to access the models. + +Follow the steps below to set up your environment to use the Vertex AI API: + +- [Install the gcloud CLI](https://cloud.google.com/sdk/docs/install) +- [Initialize the gcloud CLI](https://cloud.google.com/sdk/docs/initializing) + +Once you have your project and your environment is set up, you can start using Gemini models in SK using the `vertex_ai` connector. Example: + +```Python +kernel = Kernel() +kernel.add_service( + VertexAIChatCompletion( + project_id="...", + gemini_model_id="gemini-1.5-flash", + ) +) +... +``` + +> Alternatively, you can use an .env file to store the model id and project id. diff --git a/python/semantic_kernel/connectors/ai/google/google_ai/services/google_ai_chat_completion.py b/python/semantic_kernel/connectors/ai/google/google_ai/services/google_ai_chat_completion.py index 8f928e05059a..8ebb106a4b99 100644 --- a/python/semantic_kernel/connectors/ai/google/google_ai/services/google_ai_chat_completion.py +++ b/python/semantic_kernel/connectors/ai/google/google_ai/services/google_ai_chat_completion.py @@ -16,11 +16,11 @@ ) from semantic_kernel.connectors.ai.google.google_ai.services.google_ai_base import GoogleAIBase from semantic_kernel.connectors.ai.google.google_ai.services.utils import ( - filter_system_message, finish_reason_from_google_ai_to_semantic_kernel, format_assistant_message, format_user_message, ) +from semantic_kernel.connectors.ai.google.shared_utils import filter_system_message from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent from semantic_kernel.contents.utils.author_role import AuthorRole from semantic_kernel.contents.utils.finish_reason import FinishReason diff --git a/python/semantic_kernel/connectors/ai/google/google_ai/services/utils.py b/python/semantic_kernel/connectors/ai/google/google_ai/services/utils.py index b1eb6aa1bc57..7c7d3517d515 100644 --- a/python/semantic_kernel/connectors/ai/google/google_ai/services/utils.py +++ b/python/semantic_kernel/connectors/ai/google/google_ai/services/utils.py @@ -4,11 +4,9 @@ from google.generativeai.protos import Blob, Candidate, Part -from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.contents.chat_message_content import ChatMessageContent from semantic_kernel.contents.image_content import ImageContent from semantic_kernel.contents.text_content import TextContent -from semantic_kernel.contents.utils.author_role import AuthorRole from semantic_kernel.contents.utils.finish_reason import FinishReason as SemanticKernelFinishReason from semantic_kernel.exceptions.service_exceptions import ServiceInvalidRequestError @@ -34,24 +32,6 @@ def finish_reason_from_google_ai_to_semantic_kernel( return None -def filter_system_message(chat_history: ChatHistory) -> str | None: - """Filter the first system message from the chat history. - - If there are multiple system messages, raise an error. - If there are no system messages, return None. - """ - if len([message for message in chat_history if message.role == AuthorRole.SYSTEM]) > 1: - raise ServiceInvalidRequestError( - "Multiple system messages in chat history. Only one system message is expected." - ) - - for message in chat_history: - if message.role == AuthorRole.SYSTEM: - return message.content - - return None - - def format_user_message(message: ChatMessageContent) -> list[Part]: """Format a user message to the expected object for the client. @@ -72,7 +52,7 @@ def format_user_message(message: ChatMessageContent) -> list[Part]: if item.data_uri: parts.append(Part(inline_data=Blob(mime_type=item.mime_type, data=item.data))) else: - # The Google AI API doesn't support image from an arbitrary URI: + # The Google AI API doesn't support images from arbitrary URIs: # https://github.com/google-gemini/generative-ai-python/issues/357 raise ServiceInvalidRequestError( "ImageContent without data_uri in User message while formatting chat history for Google AI" diff --git a/python/semantic_kernel/connectors/ai/google/shared_utils.py b/python/semantic_kernel/connectors/ai/google/shared_utils.py new file mode 100644 index 000000000000..e898c9a8f3e1 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/shared_utils.py @@ -0,0 +1,23 @@ +# Copyright (c) Microsoft. All rights reserved. + +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.exceptions.service_exceptions import ServiceInvalidRequestError + + +def filter_system_message(chat_history: ChatHistory) -> str | None: + """Filter the first system message from the chat history. + + If there are multiple system messages, raise an error. + If there are no system messages, return None. + """ + if len([message for message in chat_history if message.role == AuthorRole.SYSTEM]) > 1: + raise ServiceInvalidRequestError( + "Multiple system messages in chat history. Only one system message is expected." + ) + + for message in chat_history: + if message.role == AuthorRole.SYSTEM: + return message.content + + return None diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/__init__.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/services/__init__.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/services/utils.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/utils.py new file mode 100644 index 000000000000..5ff800986115 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/utils.py @@ -0,0 +1,74 @@ +# Copyright (c) Microsoft. All rights reserved. + +from google.cloud.aiplatform_v1beta1.types.content import Blob, Candidate, Part + +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.image_content import ImageContent +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.finish_reason import FinishReason as SemanticKernelFinishReason +from semantic_kernel.exceptions.service_exceptions import ServiceInvalidRequestError + + +def finish_reason_from_vertex_ai_to_semantic_kernel( + finish_reason: Candidate.FinishReason, +) -> SemanticKernelFinishReason | None: + """Convert a Vertex AI FinishReason to a Semantic Kernel FinishReason. + + This is best effort and may not cover all cases as the enums are not identical. + """ + if finish_reason == Candidate.FinishReason.STOP: + return SemanticKernelFinishReason.STOP + + if finish_reason == Candidate.FinishReason.MAX_TOKENS: + return SemanticKernelFinishReason.LENGTH + + if finish_reason == Candidate.FinishReason.SAFETY: + return SemanticKernelFinishReason.CONTENT_FILTER + + return None + + +def format_user_message(message: ChatMessageContent) -> list[Part]: + """Format a user message to the expected object for the client. + + Args: + message: The user message. + + Returns: + The formatted user message as a list of parts. + """ + if not any(isinstance(item, (ImageContent)) for item in message.items): + return [Part(text=message.content)] + + parts: list[Part] = [] + for item in message.items: + if isinstance(item, TextContent): + parts.append(Part(text=message.content)) + elif isinstance(item, ImageContent): + if item.data_uri: + parts.append(Part(inline_data=Blob(mime_type=item.mime_type, data=item.data))) + else: + # The Google AI API doesn't support images from arbitrary URIs: + # https://github.com/google-gemini/generative-ai-python/issues/357 + raise ServiceInvalidRequestError( + "ImageContent without data_uri in User message while formatting chat history for Google AI" + ) + else: + raise ServiceInvalidRequestError( + "Unsupported item type in User message while formatting chat history for Google AI" + f" Inference: {type(item)}" + ) + + return parts + + +def format_assistant_message(message: ChatMessageContent) -> list[Part]: + """Format an assistant message to the expected object for the client. + + Args: + message: The assistant message. + + Returns: + The formatted assistant message as a list of parts. + """ + return [Part(text=message.content)] diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_base.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_base.py new file mode 100644 index 000000000000..e17b1994424d --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_base.py @@ -0,0 +1,12 @@ +# Copyright (c) Microsoft. All rights reserved. + +from abc import ABC + +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_settings import VertexAISettings +from semantic_kernel.kernel_pydantic import KernelBaseModel + + +class VertexAIBase(KernelBaseModel, ABC): + """Vertex AI Service.""" + + service_settings: VertexAISettings diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_chat_completion.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_chat_completion.py new file mode 100644 index 000000000000..00dd251a0a9d --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_chat_completion.py @@ -0,0 +1,263 @@ +# Copyright (c) Microsoft. All rights reserved. + +import sys +from collections.abc import AsyncGenerator, AsyncIterable +from typing import Any + +import vertexai +from google.cloud.aiplatform_v1beta1.types.content import Content +from pydantic import ValidationError +from vertexai.generative_models import Candidate, GenerationResponse, GenerativeModel + +from semantic_kernel.connectors.ai.google.shared_utils import filter_system_message +from semantic_kernel.connectors.ai.google.vertex_ai.services.utils import ( + finish_reason_from_vertex_ai_to_semantic_kernel, + format_assistant_message, + format_user_message, +) +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_base import VertexAIBase +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAIChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_settings import VertexAISettings +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.contents.utils.finish_reason import FinishReason +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + +from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase + + +class VertexAIChatCompletion(VertexAIBase, ChatCompletionClientBase): + """Google Vertex AI Chat Completion Service.""" + + def __init__( + self, + project_id: str | None = None, + gemini_model_id: str | None = None, + service_id: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> None: + """Initialize the Google Vertex AI Chat Completion Service. + + If no arguments are provided, the service will attempt to load the settings from the environment. + The following environment variables are used: + - VERTEX_AI_GEMINI_MODEL_ID + - VERTEX_AI_PROJECT_ID + + Args: + project_id (str): The Google Cloud project ID. + gemini_model_id (str): The Gemini model ID. + service_id (str): The Vertex AI service ID. + env_file_path (str): The path to the environment file. + env_file_encoding (str): The encoding of the environment file. + """ + try: + vertex_ai_settings = VertexAISettings.create( + project_id=project_id, + gemini_model_id=gemini_model_id, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as e: + raise ServiceInitializationError(f"Failed to validate Vertex AI settings: {e}") from e + if not vertex_ai_settings.gemini_model_id: + raise ServiceInitializationError("The Vertex AI Gemini model ID is required.") + + super().__init__( + ai_model_id=vertex_ai_settings.gemini_model_id, + service_id=service_id or vertex_ai_settings.gemini_model_id, + service_settings=vertex_ai_settings, + ) + + # region Non-streaming + @override + async def get_chat_message_contents( + self, + chat_history: ChatHistory, + settings: "PromptExecutionSettings", + **kwargs: Any, + ) -> list[ChatMessageContent]: + settings = self.get_prompt_execution_settings_from_settings(settings) + assert isinstance(settings, VertexAIChatPromptExecutionSettings) # nosec + + return await self._send_chat_request(chat_history, settings) + + async def _send_chat_request( + self, chat_history: ChatHistory, settings: VertexAIChatPromptExecutionSettings + ) -> list[ChatMessageContent]: + """Send a chat request to the Vertex AI service.""" + vertexai.init(project=self.service_settings.project_id) + model = GenerativeModel( + self.service_settings.gemini_model_id, + system_instruction=filter_system_message(chat_history), + ) + + response: GenerationResponse = await model.generate_content_async( + contents=self._prepare_chat_history_for_request(chat_history), + generation_config=settings.prepare_settings_dict(), + ) + + return [self._create_chat_message_content(response, candidate) for candidate in response.candidates] + + def _create_chat_message_content(self, response: GenerationResponse, candidate: Candidate) -> ChatMessageContent: + """Create a chat message content object. + + Args: + response: The response from the service. + candidate: The candidate from the response. + + Returns: + A chat message content object. + """ + # Best effort conversion of finish reason. The raw value will be available in metadata. + finish_reason: FinishReason | None = finish_reason_from_vertex_ai_to_semantic_kernel(candidate.finish_reason) + response_metadata = self._get_metadata_from_response(response) + response_metadata.update(self._get_metadata_from_candidate(candidate)) + + return ChatMessageContent( + ai_model_id=self.ai_model_id, + role=AuthorRole.ASSISTANT, + content=candidate.content.parts[0].text, + inner_content=response, + finish_reason=finish_reason, + metadata=response_metadata, + ) + + # endregion + + # region Streaming + @override + async def get_streaming_chat_message_contents( + self, + chat_history: ChatHistory, + settings: "PromptExecutionSettings", + **kwargs: Any, + ) -> AsyncGenerator[list[StreamingChatMessageContent], Any]: + settings = self.get_prompt_execution_settings_from_settings(settings) + assert isinstance(settings, VertexAIChatPromptExecutionSettings) # nosec + + async_generator = self._send_chat_streaming_request(chat_history, settings) + + async for messages in async_generator: + yield messages + + async def _send_chat_streaming_request( + self, + chat_history: ChatHistory, + settings: VertexAIChatPromptExecutionSettings, + ) -> AsyncGenerator[list[StreamingChatMessageContent], Any]: + """Send a streaming chat request to the Vertex AI service.""" + vertexai.init(project=self.service_settings.project_id) + model = GenerativeModel( + self.service_settings.gemini_model_id, + system_instruction=filter_system_message(chat_history), + ) + + response: AsyncIterable[GenerationResponse] = await model.generate_content_async( + contents=self._prepare_chat_history_for_request(chat_history), + generation_config=settings.prepare_settings_dict(), + stream=True, + ) + + async for chunk in response: + yield [self._create_streaming_chat_message_content(chunk, candidate) for candidate in chunk.candidates] + + def _create_streaming_chat_message_content( + self, + chunk: GenerationResponse, + candidate: Candidate, + ) -> StreamingChatMessageContent: + """Create a streaming chat message content object. + + Args: + chunk: The response from the service. + candidate: The candidate from the response. + + Returns: + A streaming chat message content object. + """ + # Best effort conversion of finish reason. The raw value will be available in metadata. + finish_reason: FinishReason | None = finish_reason_from_vertex_ai_to_semantic_kernel(candidate.finish_reason) + response_metadata = self._get_metadata_from_response(chunk) + response_metadata.update(self._get_metadata_from_candidate(candidate)) + + return StreamingChatMessageContent( + ai_model_id=self.ai_model_id, + role=AuthorRole.ASSISTANT, + choice_index=candidate.index, + content=candidate.content.parts[0].text, + inner_content=chunk, + finish_reason=finish_reason, + metadata=response_metadata, + ) + + # endregion + + @override + def _prepare_chat_history_for_request( + self, + chat_history: ChatHistory, + role_key: str = "role", + content_key: str = "content", + ) -> list[Content]: + chat_request_messages: list[Content] = [] + + for message in chat_history.messages: + if message.role == AuthorRole.SYSTEM: + # Skip system messages since they are not part of the chat request. + # System message will be provided as system_instruction in the model. + continue + if message.role == AuthorRole.USER: + chat_request_messages.append(Content(role="user", parts=format_user_message(message))) + elif message.role == AuthorRole.ASSISTANT: + chat_request_messages.append(Content(role="model", parts=format_assistant_message(message))) + else: + raise ValueError(f"Unsupported role: {message.role}") + + return chat_request_messages + + def _get_metadata_from_response(self, response: GenerationResponse) -> dict[str, Any]: + """Get metadata from the response. + + Args: + response: The response from the service. + + Returns: + A dictionary containing metadata. + """ + return { + "prompt_feedback": response.prompt_feedback, + "usage": response.usage_metadata, + } + + def _get_metadata_from_candidate(self, candidate: Candidate) -> dict[str, Any]: + """Get metadata from the candidate. + + Args: + candidate: The candidate from the response. + + Returns: + A dictionary containing metadata. + """ + return { + "index": candidate.index, + "finish_reason": candidate.finish_reason, + "safety_ratings": candidate.safety_ratings, + } + + @override + def get_prompt_execution_settings_class( + self, + ) -> type["PromptExecutionSettings"]: + """Get the request settings class.""" + return VertexAIChatPromptExecutionSettings diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_text_completion.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_text_completion.py new file mode 100644 index 000000000000..4cc9ba8da8a8 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_text_completion.py @@ -0,0 +1,206 @@ +# Copyright (c) Microsoft. All rights reserved. + + +import sys +from collections.abc import AsyncGenerator, AsyncIterable +from typing import Any + +import vertexai +from pydantic import ValidationError +from vertexai.generative_models import Candidate, GenerationResponse, GenerativeModel + +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_base import VertexAIBase +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAITextPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_settings import VertexAISettings +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.connectors.ai.text_completion_client_base import TextCompletionClientBase +from semantic_kernel.contents.streaming_text_content import StreamingTextContent +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + + +class VertexAITextCompletion(VertexAIBase, TextCompletionClientBase): + """Vertex AI Text Completion Client.""" + + def __init__( + self, + project_id: str | None = None, + gemini_model_id: str | None = None, + service_id: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> None: + """Initialize the Google Vertex AI Text Completion Service. + + If no arguments are provided, the service will attempt to load the settings from the environment. + The following environment variables are used: + - VERTEX_AI_GEMINI_MODEL_ID + - VERTEX_AI_PROJECT_ID + + Args: + project_id (str): The Google Cloud project ID. + gemini_model_id (str): The Gemini model ID. + service_id (str): The Vertex AI service ID. + env_file_path (str): The path to the environment file. + env_file_encoding (str): The encoding of the environment file. + """ + try: + vertex_ai_settings = VertexAISettings.create( + project_id=project_id, + gemini_model_id=gemini_model_id, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as e: + raise ServiceInitializationError(f"Failed to validate Vertex AI settings: {e}") from e + if not vertex_ai_settings.gemini_model_id: + raise ServiceInitializationError("The Vertex AI Gemini model ID is required.") + + super().__init__( + ai_model_id=vertex_ai_settings.gemini_model_id, + service_id=service_id or vertex_ai_settings.gemini_model_id, + service_settings=vertex_ai_settings, + ) + + # region Non-streaming + @override + async def get_text_contents( + self, + prompt: str, + settings: "PromptExecutionSettings", + ) -> list[TextContent]: + settings = self.get_prompt_execution_settings_from_settings(settings) + assert isinstance(settings, VertexAITextPromptExecutionSettings) # nosec + + return await self._send_request(prompt, settings) + + async def _send_request(self, prompt: str, settings: VertexAITextPromptExecutionSettings) -> list[TextContent]: + """Send a text generation request to the Vertex AI service.""" + vertexai.init(project=self.service_settings.project_id) + model = GenerativeModel(self.service_settings.gemini_model_id) + + response: GenerationResponse = await model.generate_content_async( + contents=prompt, + generation_config=settings.prepare_settings_dict(), + ) + + return [self._create_text_content(response, candidate) for candidate in response.candidates] + + def _create_text_content(self, response: GenerationResponse, candidate: Candidate) -> TextContent: + """Create a text content object. + + Args: + response: The response from the service. + candidate: The candidate from the response. + + Returns: + A text content object. + """ + response_metadata = self._get_metadata_from_response(response) + response_metadata.update(self._get_metadata_from_candidate(candidate)) + + return TextContent( + ai_model_id=self.ai_model_id, + text=candidate.content.parts[0].text, + inner_content=response, + metadata=response_metadata, + ) + + # endregion + + # region Streaming + @override + async def get_streaming_text_contents( + self, + prompt: str, + settings: "PromptExecutionSettings", + ) -> AsyncGenerator[list["StreamingTextContent"], Any]: + settings = self.get_prompt_execution_settings_from_settings(settings) + assert isinstance(settings, VertexAITextPromptExecutionSettings) # nosec + + async_generator = self._send_streaming_request(prompt, settings) + + async for text_contents in async_generator: + yield text_contents + + async def _send_streaming_request( + self, prompt: str, settings: VertexAITextPromptExecutionSettings + ) -> AsyncGenerator[list[StreamingTextContent], Any]: + """Send a text generation request to the Vertex AI service.""" + vertexai.init(project=self.service_settings.project_id) + model = GenerativeModel(self.service_settings.gemini_model_id) + + response: AsyncIterable[GenerationResponse] = await model.generate_content_async( + contents=prompt, + generation_config=settings.prepare_settings_dict(), + stream=True, + ) + + async for chunk in response: + yield [self._create_streaming_text_content(chunk, candidate) for candidate in chunk.candidates] + + def _create_streaming_text_content(self, chunk: GenerationResponse, candidate: Candidate) -> StreamingTextContent: + """Create a streaming text content object. + + Args: + chunk: The response from the service. + candidate: The candidate from the response. + + Returns: + A streaming text content object. + """ + response_metadata = self._get_metadata_from_response(chunk) + response_metadata.update(self._get_metadata_from_candidate(candidate)) + + return StreamingTextContent( + ai_model_id=self.ai_model_id, + choice_index=candidate.index, + text=candidate.content.parts[0].text, + inner_content=chunk, + metadata=response_metadata, + ) + + # endregion + + def _get_metadata_from_response(self, response: GenerationResponse) -> dict[str, Any]: + """Get metadata from the response. + + Args: + response: The response from the service. + + Returns: + A dictionary containing metadata. + """ + return { + "prompt_feedback": response.prompt_feedback, + "usage": response.usage_metadata, + } + + def _get_metadata_from_candidate(self, candidate: Candidate) -> dict[str, Any]: + """Get metadata from the candidate. + + Args: + candidate: The candidate from the response. + + Returns: + A dictionary containing metadata. + """ + return { + "index": candidate.index, + "finish_reason": candidate.finish_reason, + "safety_ratings": candidate.safety_ratings, + } + + @override + def get_prompt_execution_settings_class( + self, + ) -> type["PromptExecutionSettings"]: + """Get the request settings class.""" + return VertexAITextPromptExecutionSettings diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_text_embedding.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_text_embedding.py new file mode 100644 index 000000000000..71d7c649b1bf --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_text_embedding.py @@ -0,0 +1,106 @@ +# Copyright (c) Microsoft. All rights reserved. + +import sys +from typing import Any + +import vertexai +from numpy import array, ndarray +from pydantic import ValidationError +from vertexai.language_models import TextEmbedding, TextEmbeddingModel + +from semantic_kernel.connectors.ai.embeddings.embedding_generator_base import EmbeddingGeneratorBase +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_base import VertexAIBase +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAIEmbeddingPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_settings import VertexAISettings +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + + +class VertexAITextEmbedding(VertexAIBase, EmbeddingGeneratorBase): + """Vertex AI Text Embedding Service.""" + + def __init__( + self, + project_id: str | None = None, + embedding_model_id: str | None = None, + service_id: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> None: + """Initialize the Google Vertex AI Chat Completion Service. + + If no arguments are provided, the service will attempt to load the settings from the environment. + The following environment variables are used: + - VERTEX_AI_EMBEDDING_MODEL_ID + - VERTEX_AI_PROJECT_ID + + Args: + project_id (str): The Google Cloud project ID. + embedding_model_id (str): The Gemini model ID. + service_id (str): The Vertex AI service ID. + env_file_path (str): The path to the environment file. + env_file_encoding (str): The encoding of the environment file. + """ + try: + vertex_ai_settings = VertexAISettings.create( + project_id=project_id, + embedding_model_id=embedding_model_id, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as e: + raise ServiceInitializationError(f"Failed to validate Vertex AI settings: {e}") from e + if not vertex_ai_settings.embedding_model_id: + raise ServiceInitializationError("The Vertex AI embedding model ID is required.") + + super().__init__( + ai_model_id=vertex_ai_settings.embedding_model_id, + service_id=service_id or vertex_ai_settings.embedding_model_id, + service_settings=vertex_ai_settings, + ) + + @override + async def generate_embeddings( + self, + texts: list[str], + settings: "PromptExecutionSettings | None" = None, + **kwargs: Any, + ) -> ndarray: + raw_embeddings = await self.generate_raw_embeddings(texts, settings, **kwargs) + return array(raw_embeddings) + + @override + async def generate_raw_embeddings( + self, + texts: list[str], + settings: "PromptExecutionSettings | None" = None, + **kwargs: Any, + ) -> list[list[float]]: + if not settings: + settings = VertexAIEmbeddingPromptExecutionSettings() + else: + settings = self.get_prompt_execution_settings_from_settings(settings) + assert isinstance(settings, VertexAIEmbeddingPromptExecutionSettings) # nosec + + vertexai.init(project=self.service_settings.project_id) + model = TextEmbeddingModel.from_pretrained(self.service_settings.embedding_model_id) + response: list[TextEmbedding] = await model.get_embeddings_async( + texts, + **settings.prepare_settings_dict(), + ) + + return [text_embedding.values for text_embedding in response] + + @override + def get_prompt_execution_settings_class( + self, + ) -> type["PromptExecutionSettings"]: + """Get the request settings class.""" + return VertexAIEmbeddingPromptExecutionSettings diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_prompt_execution_settings.py new file mode 100644 index 000000000000..fb2501079666 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_prompt_execution_settings.py @@ -0,0 +1,59 @@ +# Copyright (c) Microsoft. All rights reserved. + +import sys +from typing import Any, Literal + +from pydantic import Field + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings + + +class VertexAIPromptExecutionSettings(PromptExecutionSettings): + """Vertex AI Prompt Execution Settings.""" + + stop_sequences: list[str] | None = Field(None, max_length=5) + response_mime_type: Literal["text/plain", "application/json"] | None = None + response_schema: Any | None = None + candidate_count: int | None = Field(None, ge=1) + max_output_tokens: int | None = Field(None, ge=1) + temperature: float | None = Field(None, ge=0.0, le=2.0) + top_p: float | None = None + top_k: int | None = None + + +class VertexAITextPromptExecutionSettings(VertexAIPromptExecutionSettings): + """Vertex AI Text Prompt Execution Settings.""" + + pass + + +class VertexAIChatPromptExecutionSettings(VertexAIPromptExecutionSettings): + """Vertex AI Chat Prompt Execution Settings.""" + + tools: list[dict[str, Any]] | None = Field(None, max_length=64) + tool_choice: str | None = None + + @override + def prepare_settings_dict(self, **kwargs) -> dict[str, Any]: + """Prepare the settings as a dictionary for sending to the AI service. + + This method removes the tools and tool_choice keys from the settings dictionary, as + the Vertex AI service mandates these two settings to be sent as separate parameters. + """ + settings_dict = super().prepare_settings_dict(**kwargs) + settings_dict.pop("tools", None) + settings_dict.pop("tool_choice", None) + + return settings_dict + + +class VertexAIEmbeddingPromptExecutionSettings(PromptExecutionSettings): + """Google AI Embedding Prompt Execution Settings.""" + + auto_truncate: bool | None = None + output_dimensionality: int | None = None diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_settings.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_settings.py new file mode 100644 index 000000000000..698d06f5ea67 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_settings.py @@ -0,0 +1,34 @@ +# Copyright (c) Microsoft. All rights reserved. + +from typing import ClassVar + +from semantic_kernel.kernel_pydantic import KernelBaseSettings + + +class VertexAISettings(KernelBaseSettings): + """Vertex AI settings. + + The settings are first loaded from environment variables with + the prefix 'VERTEX_AI_'. + If the environment variables are not found, the settings can + be loaded from a .env file with the encoding 'utf-8'. + If the settings are not found in the .env file, the settings + are ignored; however, validation will fail alerting that the + settings are missing. + + Required settings for prefix 'VERTEX_AI_' are: + - gemini_model_id: str - The Gemini model ID for the Vertex AI service, i.e. gemini-1.5-pro + This value can be found in the Vertex AI service deployment. + (Env var VERTEX_AI_GEMINI_MODEL_ID) + - embedding_model_id: str - The embedding model ID for the Vertex AI service, i.e. text-embedding-004 + This value can be found in the Vertex AI service deployment. + (Env var VERTEX_AI_EMBEDDING_MODEL_ID) + - project_id: str - The Google Cloud project ID. + (Env var VERTEX_AI_PROJECT_ID) + """ + + env_prefix: ClassVar[str] = "VERTEX_AI_" + + gemini_model_id: str | None = None + embedding_model_id: str | None = None + project_id: str diff --git a/python/tests/integration/completions/test_chat_completions.py b/python/tests/integration/completions/test_chat_completions.py index 9f16b22bd1a3..f77a761c5a1e 100644 --- a/python/tests/integration/completions/test_chat_completions.py +++ b/python/tests/integration/completions/test_chat_completions.py @@ -22,6 +22,10 @@ GoogleAIChatPromptExecutionSettings, ) from semantic_kernel.connectors.ai.google.google_ai.services.google_ai_chat_completion import GoogleAIChatCompletion +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_chat_completion import VertexAIChatCompletion +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAIChatPromptExecutionSettings, +) from semantic_kernel.connectors.ai.mistral_ai.prompt_execution_settings.mistral_ai_prompt_execution_settings import ( MistralAIChatPromptExecutionSettings, ) @@ -116,6 +120,7 @@ def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecution "mistral_ai": (MistralAIChatCompletion() if mistral_ai_setup else None, MistralAIChatPromptExecutionSettings), "ollama": (OllamaChatCompletion(), OllamaChatPromptExecutionSettings), "google_ai": (GoogleAIChatCompletion(), GoogleAIChatPromptExecutionSettings), + "vertex_ai": (VertexAIChatCompletion(), VertexAIChatPromptExecutionSettings), } @@ -459,7 +464,8 @@ def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecution ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="How are you today?")]), ], ["Hello", "well"], - marks=pytest.mark.skipif(not ollama_setup, reason="Need local Ollama setup"), + # marks=pytest.mark.skipif(not ollama_setup, reason="Need local Ollama setup"), + marks=pytest.mark.skip(reason="Flaky test"), id="ollama_text_input", ), pytest.param( @@ -495,6 +501,39 @@ def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecution ["house", "germany"], id="google_ai_image_input_file", ), + pytest.param( + "vertex_ai", + {}, + [ + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Hello")]), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="How are you today?")]), + ], + ["Hello", "well"], + id="vertex_ai_text_input", + ), + pytest.param( + "vertex_ai", + { + "max_tokens": 256, + }, + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="What is in this image?"), + ImageContent.from_image_path( + image_path=os.path.join(os.path.dirname(__file__), "../../", "assets/sample_image.jpg") + ), + ], + ), + ChatMessageContent( + role=AuthorRole.USER, + items=[TextContent(text="Where was it made? Make a guess if you are not sure.")], + ), + ], + ["house", "germany"], + id="vertex_ai_image_input_file", + ), ], ) diff --git a/python/tests/integration/completions/test_text_completion.py b/python/tests/integration/completions/test_text_completion.py index 506ea5b9c301..047acce9fe11 100644 --- a/python/tests/integration/completions/test_text_completion.py +++ b/python/tests/integration/completions/test_text_completion.py @@ -11,6 +11,10 @@ GoogleAITextPromptExecutionSettings, ) from semantic_kernel.connectors.ai.google.google_ai.services.google_ai_text_completion import GoogleAITextCompletion +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_text_completion import VertexAITextCompletion +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAITextPromptExecutionSettings, +) from semantic_kernel.connectors.ai.hugging_face.hf_prompt_execution_settings import HuggingFacePromptExecutionSettings from semantic_kernel.connectors.ai.hugging_face.services.hf_text_completion import HuggingFaceTextCompletion from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import ( @@ -70,6 +74,7 @@ def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecution HuggingFacePromptExecutionSettings, ), "google_ai": (GoogleAITextCompletion(), GoogleAITextPromptExecutionSettings), + "vertex_ai": (VertexAITextCompletion(), VertexAITextPromptExecutionSettings), } @@ -126,6 +131,13 @@ def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecution ["Hello"], id="google_ai_text_input", ), + pytest.param( + "vertex_ai", + {}, + ["Repeat the word Hello"], + ["Hello"], + id="vertex_ai_text_input", + ), ], ) diff --git a/python/tests/integration/embeddings/test_vertex_ai_embedding_service.py b/python/tests/integration/embeddings/test_vertex_ai_embedding_service.py new file mode 100644 index 000000000000..c4226d6d713e --- /dev/null +++ b/python/tests/integration/embeddings/test_vertex_ai_embedding_service.py @@ -0,0 +1,28 @@ +# Copyright (c) Microsoft. All rights reserved. + + +import pytest + +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_text_embedding import VertexAITextEmbedding +from semantic_kernel.core_plugins.text_memory_plugin import TextMemoryPlugin +from semantic_kernel.kernel import Kernel +from semantic_kernel.memory.semantic_text_memory import SemanticTextMemory +from semantic_kernel.memory.volatile_memory_store import VolatileMemoryStore + + +@pytest.mark.asyncio +async def test_vertex_ai_embedding_service(kernel: Kernel): + embeddings_gen = VertexAITextEmbedding() + + kernel.add_service(embeddings_gen) + + memory = SemanticTextMemory(storage=VolatileMemoryStore(), embeddings_generator=embeddings_gen) + kernel.add_plugin(TextMemoryPlugin(memory), "TextMemoryPlugin") + + await memory.save_information(collection="generic", id="info1", text="My budget for 2024 is $100,000") + await memory.save_reference( + "test", + external_id="info1", + text="this is a test", + external_source_name="external source", + ) diff --git a/python/tests/unit/connectors/google/conftest.py b/python/tests/unit/connectors/google/conftest.py new file mode 100644 index 000000000000..65f2888f4280 --- /dev/null +++ b/python/tests/unit/connectors/google/conftest.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft. All rights reserved. + +import pytest + +from semantic_kernel.contents.chat_history import ChatHistory + + +@pytest.fixture() +def service_id() -> str: + return "test_service_id" + + +@pytest.fixture() +def chat_history() -> ChatHistory: + chat_history = ChatHistory() + chat_history.add_user_message("test_prompt") + return chat_history + + +@pytest.fixture() +def prompt() -> str: + return "test_prompt" diff --git a/python/tests/unit/connectors/google/google_ai/conftest.py b/python/tests/unit/connectors/google/google_ai/conftest.py index 40abd9c169cd..1318528d0943 100644 --- a/python/tests/unit/connectors/google/google_ai/conftest.py +++ b/python/tests/unit/connectors/google/google_ai/conftest.py @@ -8,25 +8,6 @@ from google.generativeai import protos from google.generativeai.types import AsyncGenerateContentResponse -from semantic_kernel.contents.chat_history import ChatHistory - - -@pytest.fixture() -def service_id() -> str: - return "test_service_id" - - -@pytest.fixture() -def chat_history() -> ChatHistory: - chat_history = ChatHistory() - chat_history.add_user_message("test_prompt") - return chat_history - - -@pytest.fixture() -def prompt() -> str: - return "test_prompt" - @pytest.fixture() def google_ai_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): diff --git a/python/tests/unit/connectors/google/google_ai/services/test_google_ai_text_completion.py b/python/tests/unit/connectors/google/google_ai/services/test_google_ai_text_completion.py index 9f5f5936fb5c..10c9967e046d 100644 --- a/python/tests/unit/connectors/google/google_ai/services/test_google_ai_text_completion.py +++ b/python/tests/unit/connectors/google/google_ai/services/test_google_ai_text_completion.py @@ -39,10 +39,10 @@ def test_google_ai_text_completion_init_with_service_id(google_ai_unit_test_env, def test_google_ai_text_completion_init_with_model_id_in_argument(google_ai_unit_test_env) -> None: """Test initialization of GoogleAIChatCompletion with model_id in argument""" - google_ai_chat_completion = GoogleAITextCompletion(gemini_model_id="custom_model_id") + google_ai_text_completion = GoogleAITextCompletion(gemini_model_id="custom_model_id") - assert google_ai_chat_completion.ai_model_id == "custom_model_id" - assert google_ai_chat_completion.service_id == "custom_model_id" + assert google_ai_text_completion.ai_model_id == "custom_model_id" + assert google_ai_text_completion.service_id == "custom_model_id" @pytest.mark.parametrize("exclude_list", [["GOOGLE_AI_GEMINI_MODEL_ID"]], indirect=True) diff --git a/python/tests/unit/connectors/google/google_ai/services/test_google_ai_text_embedding.py b/python/tests/unit/connectors/google/google_ai/services/test_google_ai_text_embedding.py index e8fb4a36d349..6870b95d4b23 100644 --- a/python/tests/unit/connectors/google/google_ai/services/test_google_ai_text_embedding.py +++ b/python/tests/unit/connectors/google/google_ai/services/test_google_ai_text_embedding.py @@ -36,7 +36,7 @@ def test_google_ai_text_embedding_init_with_service_id(google_ai_unit_test_env, def test_google_ai_text_embedding_init_with_model_id_in_argument(google_ai_unit_test_env) -> None: - """Test initialization of GoogleAIChatCompletion with model_id in argument""" + """Test initialization of GoogleAITextEmbedding with model_id in argument""" google_ai_chat_completion = GoogleAITextEmbedding(embedding_model_id="custom_model_id") assert google_ai_chat_completion.ai_model_id == "custom_model_id" diff --git a/python/tests/unit/connectors/google/google_ai/services/test_google_ai_utils.py b/python/tests/unit/connectors/google/google_ai/services/test_google_ai_utils.py index 76a0f5b4a2e7..25619d9d4a07 100644 --- a/python/tests/unit/connectors/google/google_ai/services/test_google_ai_utils.py +++ b/python/tests/unit/connectors/google/google_ai/services/test_google_ai_utils.py @@ -4,11 +4,9 @@ from google.generativeai.protos import Candidate, Part from semantic_kernel.connectors.ai.google.google_ai.services.utils import ( - filter_system_message, finish_reason_from_google_ai_to_semantic_kernel, format_user_message, ) -from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.contents.chat_message_content import ChatMessageContent from semantic_kernel.contents.function_call_content import FunctionCallContent from semantic_kernel.contents.image_content import ImageContent @@ -26,27 +24,6 @@ def test_finish_reason_from_google_ai_to_semantic_kernel(): assert finish_reason_from_google_ai_to_semantic_kernel(Candidate.FinishReason.OTHER) is None -def test_first_system_message(): - """Test filter_system_message.""" - # Test with a single system message - chat_history = ChatHistory() - chat_history.add_system_message("System message") - chat_history.add_user_message("User message") - assert filter_system_message(chat_history) == "System message" - - # Test with no system message - chat_history = ChatHistory() - chat_history.add_user_message("User message") - assert filter_system_message(chat_history) is None - - # Test with multiple system messages - chat_history = ChatHistory() - chat_history.add_system_message("System message 1") - chat_history.add_system_message("System message 2") - with pytest.raises(ServiceInvalidRequestError): - filter_system_message(chat_history) - - def test_format_user_message(): """Test format_user_message.""" user_message = ChatMessageContent(role=AuthorRole.USER, content="User message") diff --git a/python/tests/unit/connectors/google/test_shared_utils.py b/python/tests/unit/connectors/google/test_shared_utils.py new file mode 100644 index 000000000000..5914e01fac63 --- /dev/null +++ b/python/tests/unit/connectors/google/test_shared_utils.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft. All rights reserved. + + +import pytest + +from semantic_kernel.connectors.ai.google.shared_utils import filter_system_message +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.exceptions.service_exceptions import ServiceInvalidRequestError + + +def test_first_system_message(): + """Test filter_system_message.""" + # Test with a single system message + chat_history = ChatHistory() + chat_history.add_system_message("System message") + chat_history.add_user_message("User message") + assert filter_system_message(chat_history) == "System message" + + # Test with no system message + chat_history = ChatHistory() + chat_history.add_user_message("User message") + assert filter_system_message(chat_history) is None + + # Test with multiple system messages + chat_history = ChatHistory() + chat_history.add_system_message("System message 1") + chat_history.add_system_message("System message 2") + with pytest.raises(ServiceInvalidRequestError): + filter_system_message(chat_history) diff --git a/python/tests/unit/connectors/google/vertex_ai/conftest.py b/python/tests/unit/connectors/google/vertex_ai/conftest.py new file mode 100644 index 000000000000..3d999ae3f7b4 --- /dev/null +++ b/python/tests/unit/connectors/google/vertex_ai/conftest.py @@ -0,0 +1,127 @@ +# Copyright (c) Microsoft. All rights reserved. + +from collections.abc import AsyncGenerator, AsyncIterable +from unittest.mock import MagicMock + +import pytest +from google.cloud.aiplatform_v1beta1.types.content import Candidate, Content, Part +from google.cloud.aiplatform_v1beta1.types.prediction_service import GenerateContentResponse +from vertexai.generative_models import GenerationResponse +from vertexai.language_models import TextEmbedding + + +@pytest.fixture() +def vertex_ai_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): + """Fixture to set environment variables for Vertex AI Unit Tests.""" + if exclude_list is None: + exclude_list = [] + + if override_env_param_dict is None: + override_env_param_dict = {} + + env_vars = { + "VERTEX_AI_GEMINI_MODEL_ID": "test-gemini-model-id", + "VERTEX_AI_EMBEDDING_MODEL_ID": "test-embedding-model-id", + "VERTEX_AI_PROJECT_ID": "test-project-id", + } + + env_vars.update(override_env_param_dict) + + for key, value in env_vars.items(): + if key not in exclude_list: + monkeypatch.setenv(key, value) + else: + monkeypatch.delenv(key, raising=False) + + return env_vars + + +@pytest.fixture() +def mock_vertex_ai_chat_completion_response() -> GenerationResponse: + """Mock Vertex AI Chat Completion response.""" + candidate = Candidate() + candidate.index = 0 + candidate.content = Content(role="user", parts=[Part(text="Test content")]) + candidate.finish_reason = Candidate.FinishReason.STOP + + response = GenerateContentResponse() + response.candidates.append(candidate) + response.usage_metadata = GenerateContentResponse.UsageMetadata( + prompt_token_count=0, + candidates_token_count=0, + total_token_count=0, + ) + + return GenerationResponse._from_gapic(response) + + +@pytest.fixture() +def mock_vertex_ai_streaming_chat_completion_response() -> AsyncIterable[GenerationResponse]: + """Mock Vertex AI streaming Chat Completion response.""" + candidate = Candidate() + candidate.index = 0 + candidate.content = Content(role="user", parts=[Part(text="Test content")]) + candidate.finish_reason = Candidate.FinishReason.STOP + + response = GenerateContentResponse() + response.candidates.append(candidate) + response.usage_metadata = GenerateContentResponse.UsageMetadata( + prompt_token_count=0, + candidates_token_count=0, + total_token_count=0, + ) + + iterable = MagicMock(spec=AsyncGenerator) + iterable.__aiter__.return_value = [GenerationResponse._from_gapic(response)] + + return iterable + + +@pytest.fixture() +def mock_vertex_ai_text_completion_response() -> GenerationResponse: + """Mock Vertex AI Text Completion response.""" + candidate = Candidate() + candidate.index = 0 + candidate.content = Content(parts=[Part(text="Test content")]) + + response = GenerateContentResponse() + response.candidates.append(candidate) + response.usage_metadata = GenerateContentResponse.UsageMetadata( + prompt_token_count=0, + candidates_token_count=0, + total_token_count=0, + ) + + return GenerationResponse._from_gapic(response) + + +@pytest.fixture() +def mock_vertex_ai_streaming_text_completion_response() -> AsyncIterable[GenerationResponse]: + """Mock Vertex AI streaming Text Completion response.""" + candidate = Candidate() + candidate.index = 0 + candidate.content = Content(parts=[Part(text="Test content")]) + + response = GenerateContentResponse() + response.candidates.append(candidate) + response.usage_metadata = GenerateContentResponse.UsageMetadata( + prompt_token_count=0, + candidates_token_count=0, + total_token_count=0, + ) + + iterable = MagicMock(spec=AsyncGenerator) + iterable.__aiter__.return_value = [GenerationResponse._from_gapic(response)] + + return iterable + + +class MockTextEmbeddingModel: + async def get_embeddings_async( + self, + texts: list[str], + *, + auto_truncate: bool = True, + output_dimensionality: int | None = None, + ) -> list[TextEmbedding]: + pass diff --git a/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_chat_completion.py b/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_chat_completion.py new file mode 100644 index 000000000000..113ee470b2d1 --- /dev/null +++ b/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_chat_completion.py @@ -0,0 +1,169 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from unittest.mock import AsyncMock, patch + +import pytest +from google.cloud.aiplatform_v1beta1.types.content import Content +from vertexai.generative_models import GenerativeModel + +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_chat_completion import VertexAIChatCompletion +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAIChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_settings import VertexAISettings +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.finish_reason import FinishReason +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError + + +# region init +def test_vertex_ai_chat_completion_init(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAIChatCompletion""" + model_id = vertex_ai_unit_test_env["VERTEX_AI_GEMINI_MODEL_ID"] + project_id = vertex_ai_unit_test_env["VERTEX_AI_PROJECT_ID"] + vertex_ai_chat_completion = VertexAIChatCompletion() + + assert vertex_ai_chat_completion.ai_model_id == model_id + assert vertex_ai_chat_completion.service_id == model_id + + assert isinstance(vertex_ai_chat_completion.service_settings, VertexAISettings) + assert vertex_ai_chat_completion.service_settings.gemini_model_id == model_id + assert vertex_ai_chat_completion.service_settings.project_id == project_id + + +def test_vertex_ai_chat_completion_init_with_service_id(vertex_ai_unit_test_env, service_id) -> None: + """Test initialization of VertexAIChatCompletion with a service id that is not the model id""" + vertex_ai_chat_completion = VertexAIChatCompletion(service_id=service_id) + + assert vertex_ai_chat_completion.service_id == service_id + + +def test_vertex_ai_chat_completion_init_with_model_id_in_argument(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAIChatCompletion with model id in argument""" + vertex_ai_chat_completion = VertexAIChatCompletion(gemini_model_id="custom_model_id") + + assert vertex_ai_chat_completion.ai_model_id == "custom_model_id" + assert vertex_ai_chat_completion.service_id == "custom_model_id" + + +@pytest.mark.parametrize("exclude_list", [["VERTEX_AI_GEMINI_MODEL_ID"]], indirect=True) +def test_vertex_ai_chat_completion_init_with_empty_model_id(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAIChatCompletion with an empty model id""" + with pytest.raises(ServiceInitializationError): + VertexAIChatCompletion(env_file_path="fake_env_file_path.env") + + +@pytest.mark.parametrize("exclude_list", [["VERTEX_AI_PROJECT_ID"]], indirect=True) +def test_vertex_ai_chat_completion_init_with_empty_project_id(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAIChatCompletion with an empty project id""" + with pytest.raises(ServiceInitializationError): + VertexAIChatCompletion(env_file_path="fake_env_file_path.env") + + +def test_prompt_execution_settings_class(vertex_ai_unit_test_env) -> None: + vertex_ai_chat_completion = VertexAIChatCompletion() + assert vertex_ai_chat_completion.get_prompt_execution_settings_class() == VertexAIChatPromptExecutionSettings + + +# endregion init + + +# region chat completion +@pytest.mark.asyncio +@patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) +async def test_vertex_ai_chat_completion( + mock_vertex_ai_model_generate_content_async, + vertex_ai_unit_test_env, + chat_history: ChatHistory, + mock_vertex_ai_chat_completion_response, +) -> None: + """Test chat completion with VertexAIChatCompletion""" + settings = VertexAIChatPromptExecutionSettings() + + mock_vertex_ai_model_generate_content_async.return_value = mock_vertex_ai_chat_completion_response + + vertex_ai_chat_completion = VertexAIChatCompletion() + responses: list[ChatMessageContent] = await vertex_ai_chat_completion.get_chat_message_contents( + chat_history, settings + ) + + mock_vertex_ai_model_generate_content_async.assert_called_once_with( + contents=vertex_ai_chat_completion._prepare_chat_history_for_request(chat_history), + generation_config=settings.prepare_settings_dict(), + ) + assert len(responses) == 1 + assert responses[0].role == "assistant" + assert responses[0].content == mock_vertex_ai_chat_completion_response.candidates[0].content.parts[0].text + assert responses[0].finish_reason == FinishReason.STOP + assert "usage" in responses[0].metadata + assert "prompt_feedback" in responses[0].metadata + assert responses[0].inner_content == mock_vertex_ai_chat_completion_response + + +# endregion chat completion + + +# region streaming chat completion +@pytest.mark.asyncio +@patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) +async def test_vertex_ai_streaming_chat_completion( + mock_vertex_ai_model_generate_content_async, + vertex_ai_unit_test_env, + chat_history: ChatHistory, + mock_vertex_ai_streaming_chat_completion_response, +) -> None: + """Test chat completion with VertexAIChatCompletion""" + settings = VertexAIChatPromptExecutionSettings() + + mock_vertex_ai_model_generate_content_async.return_value = mock_vertex_ai_streaming_chat_completion_response + + vertex_ai_chat_completion = VertexAIChatCompletion() + async for messages in vertex_ai_chat_completion.get_streaming_chat_message_contents(chat_history, settings): + assert len(messages) == 1 + assert messages[0].role == "assistant" + assert messages[0].finish_reason == FinishReason.STOP + assert "usage" in messages[0].metadata + assert "prompt_feedback" in messages[0].metadata + + mock_vertex_ai_model_generate_content_async.assert_called_once_with( + contents=vertex_ai_chat_completion._prepare_chat_history_for_request(chat_history), + generation_config=settings.prepare_settings_dict(), + stream=True, + ) + + +# endregion streaming chat completion + + +def test_vertex_ai_chat_completion_parse_chat_history_correctly(vertex_ai_unit_test_env) -> None: + """Test _prepare_chat_history_for_request method""" + vertex_ai_chat_completion = VertexAIChatCompletion() + + chat_history = ChatHistory() + chat_history.add_system_message("test_system_message") + chat_history.add_user_message("test_user_message") + chat_history.add_assistant_message("test_assistant_message") + + parsed_chat_history = vertex_ai_chat_completion._prepare_chat_history_for_request(chat_history) + + assert isinstance(parsed_chat_history, list) + # System message should be ignored + assert len(parsed_chat_history) == 2 + assert all(isinstance(message, Content) for message in parsed_chat_history) + assert parsed_chat_history[0].role == "user" + assert parsed_chat_history[0].parts[0].text == "test_user_message" + assert parsed_chat_history[1].role == "model" + assert parsed_chat_history[1].parts[0].text == "test_assistant_message" + + +def test_vertex_ai_chat_completion_parse_chat_history_throw_unsupported_message(vertex_ai_unit_test_env) -> None: + """Test _prepare_chat_history_for_request method with unsupported message type""" + vertex_ai_chat_completion = VertexAIChatCompletion() + + chat_history = ChatHistory() + chat_history.add_tool_message("test_tool_message") + + with pytest.raises(ValueError): + _ = vertex_ai_chat_completion._prepare_chat_history_for_request(chat_history) diff --git a/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_text_completion.py b/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_text_completion.py new file mode 100644 index 000000000000..0ad9c61f3444 --- /dev/null +++ b/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_text_completion.py @@ -0,0 +1,128 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from unittest.mock import AsyncMock, patch + +import pytest +from vertexai.generative_models import GenerativeModel + +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_text_completion import VertexAITextCompletion +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAITextPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_settings import VertexAISettings +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError + + +# region init +def test_vertex_ai_text_completion_init(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAITextCompletion""" + model_id = vertex_ai_unit_test_env["VERTEX_AI_GEMINI_MODEL_ID"] + project_id = vertex_ai_unit_test_env["VERTEX_AI_PROJECT_ID"] + vertex_ai_text_completion = VertexAITextCompletion() + + assert vertex_ai_text_completion.ai_model_id == model_id + assert vertex_ai_text_completion.service_id == model_id + + assert isinstance(vertex_ai_text_completion.service_settings, VertexAISettings) + assert vertex_ai_text_completion.service_settings.gemini_model_id == model_id + assert vertex_ai_text_completion.service_settings.project_id == project_id + + +def test_vertex_ai_text_completion_init_with_service_id(vertex_ai_unit_test_env, service_id) -> None: + """Test initialization of VertexAITextCompletion with a service id that is not the model id""" + vertex_ai_text_completion = VertexAITextCompletion(service_id=service_id) + + assert vertex_ai_text_completion.service_id == service_id + + +def test_vertex_ai_text_completion_init_with_model_id_in_argument(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAIChatCompletion with model id in argument""" + vertex_ai_text_completion = VertexAITextCompletion(gemini_model_id="custom_model_id") + + assert vertex_ai_text_completion.ai_model_id == "custom_model_id" + assert vertex_ai_text_completion.service_id == "custom_model_id" + + +@pytest.mark.parametrize("exclude_list", [["VERTEX_AI_GEMINI_MODEL_ID"]], indirect=True) +def test_vertex_ai_text_completion_init_with_empty_model_id(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAITextCompletion with an empty model id""" + with pytest.raises(ServiceInitializationError): + VertexAITextCompletion(env_file_path="fake_env_file_path.env") + + +@pytest.mark.parametrize("exclude_list", [["VERTEX_AI_PROJECT_ID"]], indirect=True) +def test_vertex_ai_text_completion_init_with_empty_project_id(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAITextCompletion with an empty project id""" + with pytest.raises(ServiceInitializationError): + VertexAITextCompletion(env_file_path="fake_env_file_path.env") + + +def test_prompt_execution_settings_class(vertex_ai_unit_test_env) -> None: + vertex_ai_text_completion = VertexAITextCompletion() + assert vertex_ai_text_completion.get_prompt_execution_settings_class() == VertexAITextPromptExecutionSettings + + +# endregion init + + +# region text completion +@pytest.mark.asyncio +@patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) +async def test_vertex_ai_text_completion( + mock_vertex_ai_model_generate_content_async, + vertex_ai_unit_test_env, + prompt: str, + mock_vertex_ai_text_completion_response, +) -> None: + """Test text completion with VertexAITextCompletion""" + settings = VertexAITextPromptExecutionSettings() + + mock_vertex_ai_model_generate_content_async.return_value = mock_vertex_ai_text_completion_response + + vertex_ai_text_completion = VertexAITextCompletion() + responses: list[TextContent] = await vertex_ai_text_completion.get_text_contents(prompt, settings) + + mock_vertex_ai_model_generate_content_async.assert_called_once_with( + contents=prompt, + generation_config=settings.prepare_settings_dict(), + ) + assert len(responses) == 1 + assert responses[0].text == mock_vertex_ai_text_completion_response.candidates[0].content.parts[0].text + assert "usage" in responses[0].metadata + assert "prompt_feedback" in responses[0].metadata + assert responses[0].inner_content == mock_vertex_ai_text_completion_response + + +# endregion text completion + + +# region streaming text completion +@pytest.mark.asyncio +@patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) +async def test_vertex_ai_streaming_text_completion( + mock_vertex_ai_model_generate_content_async, + vertex_ai_unit_test_env, + prompt: str, + mock_vertex_ai_streaming_text_completion_response, +) -> None: + """Test streaming text completion with VertexAITextCompletion""" + settings = VertexAITextPromptExecutionSettings() + + mock_vertex_ai_model_generate_content_async.return_value = mock_vertex_ai_streaming_text_completion_response + + vertex_ai_text_completion = VertexAITextCompletion() + async for chunks in vertex_ai_text_completion.get_streaming_text_contents(prompt, settings): + assert len(chunks) == 1 + assert "usage" in chunks[0].metadata + assert "prompt_feedback" in chunks[0].metadata + + mock_vertex_ai_model_generate_content_async.assert_called_once_with( + contents=prompt, + generation_config=settings.prepare_settings_dict(), + stream=True, + ) + + +# endregion streaming text completion diff --git a/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_text_embedding.py b/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_text_embedding.py new file mode 100644 index 000000000000..a11ad1c5bb59 --- /dev/null +++ b/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_text_embedding.py @@ -0,0 +1,165 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from unittest.mock import AsyncMock, patch + +import pytest +from numpy import array, ndarray +from vertexai.language_models import TextEmbedding, TextEmbeddingModel + +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_text_embedding import VertexAITextEmbedding +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAIEmbeddingPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_settings import VertexAISettings +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError +from tests.unit.connectors.google.vertex_ai.conftest import MockTextEmbeddingModel + + +# region init +def test_vertex_ai_text_embedding_init(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAITextEmbedding""" + model_id = vertex_ai_unit_test_env["VERTEX_AI_EMBEDDING_MODEL_ID"] + project_id = vertex_ai_unit_test_env["VERTEX_AI_PROJECT_ID"] + vertex_ai_text_embedding = VertexAITextEmbedding() + + assert vertex_ai_text_embedding.ai_model_id == model_id + assert vertex_ai_text_embedding.service_id == model_id + + assert isinstance(vertex_ai_text_embedding.service_settings, VertexAISettings) + assert vertex_ai_text_embedding.service_settings.embedding_model_id == model_id + assert vertex_ai_text_embedding.service_settings.project_id == project_id + + +def test_vertex_ai_text_embedding_init_with_service_id(vertex_ai_unit_test_env, service_id) -> None: + """Test initialization of VertexAITextEmbedding with a service id that is not the model id""" + vertex_ai_text_embedding = VertexAITextEmbedding(service_id=service_id) + + assert vertex_ai_text_embedding.service_id == service_id + + +def test_vertex_ai_text_embedding_init_with_model_id_in_argument(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAITextEmbedding with model id in argument""" + vertex_ai_chat_completion = VertexAITextEmbedding(embedding_model_id="custom_model_id") + + assert vertex_ai_chat_completion.ai_model_id == "custom_model_id" + assert vertex_ai_chat_completion.service_id == "custom_model_id" + + +@pytest.mark.parametrize("exclude_list", [["VERTEX_AI_EMBEDDING_MODEL_ID"]], indirect=True) +def test_vertex_ai_text_embedding_init_with_empty_model_id(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAITextEmbedding with an empty model id""" + with pytest.raises(ServiceInitializationError): + VertexAITextEmbedding(env_file_path="fake_env_file_path.env") + + +@pytest.mark.parametrize("exclude_list", [["VERTEX_AI_PROJECT_ID"]], indirect=True) +def test_vertex_ai_text_embedding_init_with_empty_project_id(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAITextEmbedding with an empty project id""" + with pytest.raises(ServiceInitializationError): + VertexAITextEmbedding(env_file_path="fake_env_file_path.env") + + +def test_prompt_execution_settings_class(vertex_ai_unit_test_env) -> None: + vertex_ai_text_embedding = VertexAITextEmbedding() + assert vertex_ai_text_embedding.get_prompt_execution_settings_class() == VertexAIEmbeddingPromptExecutionSettings + + +# endregion init + + +@pytest.mark.asyncio +@patch.object(TextEmbeddingModel, "from_pretrained") +@patch.object(MockTextEmbeddingModel, "get_embeddings_async", new_callable=AsyncMock) +async def test_embedding(mock_embedding_client, mock_from_pretrained, vertex_ai_unit_test_env, prompt): + """Test that the service initializes and generates embeddings correctly.""" + mock_from_pretrained.return_value = MockTextEmbeddingModel() + mock_embedding_client.return_value = [TextEmbedding(values=[0.1, 0.2, 0.3])] + + settings = VertexAIEmbeddingPromptExecutionSettings() + + vertex_ai_text_embedding = VertexAITextEmbedding() + response: ndarray = await vertex_ai_text_embedding.generate_embeddings( + [prompt], + settings=settings, + ) + + assert len(response) == 1 + assert response.all() == array([0.1, 0.2, 0.3]).all() + mock_embedding_client.assert_called_once_with([prompt]) + + +@pytest.mark.asyncio +@patch.object(TextEmbeddingModel, "from_pretrained") +@patch.object(MockTextEmbeddingModel, "get_embeddings_async", new_callable=AsyncMock) +async def test_embedding_with_settings(mock_embedding_client, mock_from_pretrained, vertex_ai_unit_test_env, prompt): + """Test that the service initializes and generates embeddings correctly.""" + mock_from_pretrained.return_value = MockTextEmbeddingModel() + mock_embedding_client.return_value = [TextEmbedding(values=[0.1, 0.2, 0.3])] + + settings = VertexAIEmbeddingPromptExecutionSettings() + settings.output_dimensionality = 3 + settings.auto_truncate = True + + vertex_ai_text_embedding = VertexAITextEmbedding() + response: ndarray = await vertex_ai_text_embedding.generate_embeddings( + [prompt], + settings=settings, + ) + + assert len(response) == 1 + assert response.all() == array([0.1, 0.2, 0.3]).all() + mock_embedding_client.assert_called_once_with( + [prompt], + **settings.prepare_settings_dict(), + ) + + +@pytest.mark.asyncio +@patch.object(TextEmbeddingModel, "from_pretrained") +@patch.object(MockTextEmbeddingModel, "get_embeddings_async", new_callable=AsyncMock) +async def test_embedding_without_settings(mock_embedding_client, mock_from_pretrained, vertex_ai_unit_test_env, prompt): + """Test that the service initializes and generates embeddings correctly without settings.""" + mock_from_pretrained.return_value = MockTextEmbeddingModel() + mock_embedding_client.return_value = [TextEmbedding(values=[0.1, 0.2, 0.3])] + + vertex_ai_text_embedding = VertexAITextEmbedding() + response: ndarray = await vertex_ai_text_embedding.generate_embeddings([prompt]) + + assert len(response) == 1 + assert response.all() == array([0.1, 0.2, 0.3]).all() + mock_embedding_client.assert_called_once_with([prompt]) + + +@pytest.mark.asyncio +@patch.object(TextEmbeddingModel, "from_pretrained") +@patch.object(MockTextEmbeddingModel, "get_embeddings_async", new_callable=AsyncMock) +async def test_embedding_list_input(mock_embedding_client, mock_from_pretrained, vertex_ai_unit_test_env, prompt): + """Test that the service initializes and generates embeddings correctly with a list of prompts.""" + mock_from_pretrained.return_value = MockTextEmbeddingModel() + mock_embedding_client.return_value = [TextEmbedding(values=[0.1, 0.2, 0.3]), TextEmbedding(values=[0.1, 0.2, 0.3])] + + vertex_ai_text_embedding = VertexAITextEmbedding() + response: ndarray = await vertex_ai_text_embedding.generate_embeddings([prompt, prompt]) + + assert len(response) == 2 + assert response.all() == array([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]).all() + mock_embedding_client.assert_called_once_with([prompt, prompt]) + + +@pytest.mark.asyncio +@patch.object(TextEmbeddingModel, "from_pretrained") +@patch.object(MockTextEmbeddingModel, "get_embeddings_async", new_callable=AsyncMock) +async def test_raw_embedding(mock_embedding_client, mock_from_pretrained, vertex_ai_unit_test_env, prompt): + """Test that the service initializes and generates embeddings correctly.""" + mock_from_pretrained.return_value = MockTextEmbeddingModel() + mock_embedding_client.return_value = [TextEmbedding(values=[0.1, 0.2, 0.3])] + + settings = VertexAIEmbeddingPromptExecutionSettings() + + vertex_ai_text_embedding = VertexAITextEmbedding() + response: ndarray = await vertex_ai_text_embedding.generate_raw_embeddings([prompt], settings) + + assert len(response) == 1 + assert response[0] == [0.1, 0.2, 0.3] + mock_embedding_client.assert_called_once_with([prompt]) diff --git a/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_utils.py b/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_utils.py new file mode 100644 index 000000000000..d519db2463c7 --- /dev/null +++ b/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_utils.py @@ -0,0 +1,78 @@ +# Copyright (c) Microsoft. All rights reserved. + +import pytest +from google.cloud.aiplatform_v1beta1.types.content import Candidate, Part + +from semantic_kernel.connectors.ai.google.vertex_ai.services.utils import ( + finish_reason_from_vertex_ai_to_semantic_kernel, + format_user_message, +) +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.image_content import ImageContent +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.contents.utils.finish_reason import FinishReason +from semantic_kernel.exceptions.service_exceptions import ServiceInvalidRequestError + + +def test_finish_reason_from_vertex_ai_to_semantic_kernel(): + """Test finish_reason_from_vertex_ai_to_semantic_kernel.""" + assert finish_reason_from_vertex_ai_to_semantic_kernel(Candidate.FinishReason.STOP) == FinishReason.STOP + assert finish_reason_from_vertex_ai_to_semantic_kernel(Candidate.FinishReason.MAX_TOKENS) == FinishReason.LENGTH + assert finish_reason_from_vertex_ai_to_semantic_kernel(Candidate.FinishReason.SAFETY) == FinishReason.CONTENT_FILTER + assert finish_reason_from_vertex_ai_to_semantic_kernel(Candidate.FinishReason.OTHER) is None + + +def test_format_user_message(): + """Test format_user_message.""" + user_message = ChatMessageContent(role=AuthorRole.USER, content="User message") + formatted_user_message = format_user_message(user_message) + + assert len(formatted_user_message) == 1 + assert isinstance(formatted_user_message[0], Part) + assert formatted_user_message[0].text == "User message" + + # Test with an image content + image_content = ImageContent(data="image data", mime_type="image/png") + user_message = ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="Text content"), + image_content, + ], + ) + formatted_user_message = format_user_message(user_message) + + assert len(formatted_user_message) == 2 + assert isinstance(formatted_user_message[0], Part) + assert formatted_user_message[0].text == "Text content" + assert isinstance(formatted_user_message[1], Part) + assert formatted_user_message[1].inline_data.mime_type == "image/png" + assert formatted_user_message[1].inline_data.data == image_content.data + + +def test_format_user_message_throws_with_unsupported_items() -> None: + """Test format_user_message with unsupported items.""" + # Test with unsupported items, any item other than TextContent and ImageContent should raise an error + # Note that method format_user_message will use the content of the message if no ImageContent is found, + # so we need to add an ImageContent to the message to trigger the error + user_message = ChatMessageContent( + role=AuthorRole.USER, + items=[ + FunctionCallContent(), + ImageContent(data="image data", mime_type="image/png"), + ], + ) + with pytest.raises(ServiceInvalidRequestError): + format_user_message(user_message) + + # Test with an ImageContent that has no data_uri + user_message = ChatMessageContent( + role=AuthorRole.USER, + items=[ + ImageContent(data_uri=""), + ], + ) + with pytest.raises(ServiceInvalidRequestError): + format_user_message(user_message) From b1690ce6f20282fd102fd10063903fbc4d4737dc Mon Sep 17 00:00:00 2001 From: Eduard van Valkenburg Date: Thu, 1 Aug 2024 14:18:10 +0200 Subject: [PATCH 2/5] Python: small improvements in test running (#7576) ### Motivation and Context Adds pytest-xdist to parallelize tests, defaults to logical cores Adds single test for Kernel to validate. ### Description ### Contribution Checklist - [x] The code builds clean without any errors or warnings - [x] The PR follows the [SK Contribution Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md) and the [pre-submission formatting script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts) raises no violations - [x] All unit tests pass, and I have added new tests where possible - [x] I didn't break anyone :smile: --- .../workflows/python-integration-tests.yml | 41 +++++++++++----- .github/workflows/python-test-coverage.yml | 16 +++--- .github/workflows/python-unit-tests.yml | 49 +++++++++++++++++-- python/.vscode/tasks.json | 18 +++++++ python/poetry.lock | 41 ++++++++++++++-- python/pyproject.toml | 6 ++- .../completions/test_chat_completions.py | 7 ++- python/tests/unit/kernel/test_kernel.py | 10 ++++ 8 files changed, 155 insertions(+), 33 deletions(-) diff --git a/.github/workflows/python-integration-tests.yml b/.github/workflows/python-integration-tests.yml index c2b3b34ca376..f06e527d539b 100644 --- a/.github/workflows/python-integration-tests.yml +++ b/.github/workflows/python-integration-tests.yml @@ -73,14 +73,18 @@ jobs: - name: Install Ollama if: matrix.os == 'ubuntu-latest' run: | - curl -fsSL https://ollama.com/install.sh | sh - ollama serve & - sleep 5 + if ${{ vars.OLLAMA_MODEL != '' }}; then + curl -fsSL https://ollama.com/install.sh | sh + ollama serve & + sleep 5 + fi - name: Pull model in Ollama if: matrix.os == 'ubuntu-latest' run: | - ollama pull ${{ vars.OLLAMA_MODEL }} - ollama list + if ${{ vars.OLLAMA_MODEL != '' }}; then + ollama pull ${{ vars.OLLAMA_MODEL }} + ollama list + fi - name: Google auth uses: google-github-actions/auth@v2 with: @@ -88,6 +92,9 @@ jobs: credentials_json: ${{ secrets.VERTEX_AI_SERVICE_ACCOUNT_KEY }} - name: Set up gcloud uses: google-github-actions/setup-gcloud@v2 + - name: Setup Redis Stack Server + if: matrix.os == 'ubuntu-latest' + run: docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest - name: Run Integration Tests id: run_tests shell: bash @@ -124,13 +131,25 @@ jobs: VERTEX_AI_GEMINI_MODEL_ID: ${{ vars.VERTEX_AI_GEMINI_MODEL_ID }} VERTEX_AI_EMBEDDING_MODEL_ID: ${{ vars.VERTEX_AI_EMBEDDING_MODEL_ID }} run: | - if ${{ matrix.os == 'ubuntu-latest' }}; then - docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest - fi - cd python - poetry run pytest ./tests/integration -v - poetry run pytest ./tests/samples -v + poetry run pytest ./tests/integration ./tests/samples -v --junitxml=pytest.xml + - name: Surface failing tests + if: always() + uses: pmeier/pytest-results-action@main + with: + # A list of JUnit XML files, directories containing the former, and wildcard + # patterns to process. + # See @actions/glob for supported patterns. + path: python/pytest.xml + # (Optional) Add a summary of the results at the top of the report + summary: true + # (Optional) Select which results should be included in the report. + # Follows the same syntax as `pytest -r` + display-options: fEX + # (Optional) Fail the workflow if no JUnit XML was found. + fail-on-empty: true + # (Optional) Title of the test results section in the workflow summary + title: Test results python-integration-tests: needs: paths-filter diff --git a/.github/workflows/python-test-coverage.yml b/.github/workflows/python-test-coverage.yml index d61da4f022a2..7d3c14ce783b 100644 --- a/.github/workflows/python-test-coverage.yml +++ b/.github/workflows/python-test-coverage.yml @@ -10,10 +10,6 @@ on: types: - in_progress -env: - PYTHON_VERSION: "3.10" - RUN_OS: ubuntu-latest - jobs: python-tests-coverage: runs-on: ubuntu-latest @@ -27,13 +23,13 @@ jobs: uses: lewagon/wait-on-check-action@v1.3.4 with: ref: ${{ github.event.pull_request.head.sha }} - check-name: 'Python Unit Tests (${{ env.PYTHON_VERSION }}, ${{ env.RUN_OS }}, false)' + check-name: 'Python Test Coverage' repo-token: ${{ secrets.GH_ACTIONS_PR_WRITE }} wait-interval: 90 allowed-conclusions: success - uses: actions/checkout@v4 - name: Setup filename variables - run: echo "FILE_ID=${{ github.event.number }}-${{ env.RUN_OS }}-${{ env.PYTHON_VERSION }}" >> $GITHUB_ENV + run: echo "FILE_ID=${{ github.event.number }}" >> $GITHUB_ENV - name: Download coverage uses: dawidd6/action-download-artifact@v3 with: @@ -57,9 +53,9 @@ jobs: github-token: ${{ secrets.GH_ACTIONS_PR_WRITE }} pytest-coverage-path: python-coverage.txt coverage-path-prefix: "python/" - title: "Python ${{ env.PYTHON_VERSION }} Test Coverage Report" - badge-title: "Py${{ env.PYTHON_VERSION }} Test Coverage" - junitxml-title: "Python ${{ env.PYTHON_VERSION }} Unit Test Overview" + title: "Python Test Coverage Report" + badge-title: "Python Test Coverage" + junitxml-title: "Python Unit Test Overview" junitxml-path: pytest.xml default-branch: "main" - unique-id-for-comment: python-${{ env.PYTHON_VERSION }} + unique-id-for-comment: python-test-coverage diff --git a/.github/workflows/python-unit-tests.yml b/.github/workflows/python-unit-tests.yml index 8e34ad0e9b5f..4137270c3796 100644 --- a/.github/workflows/python-unit-tests.yml +++ b/.github/workflows/python-unit-tests.yml @@ -18,7 +18,7 @@ jobs: os: [ubuntu-latest, windows-latest, macos-latest] experimental: [false] include: - - python-version: "3.13.0-beta.3" + - python-version: "3.13.0-beta.4" os: "ubuntu-latest" experimental: true permissions: @@ -28,8 +28,6 @@ jobs: working-directory: python steps: - uses: actions/checkout@v4 - - name: Setup filename variables - run: echo "FILE_ID=${{ github.event.number }}-${{ matrix.os }}-${{ matrix.python-version }}" >> $GITHUB_ENV - name: Install poetry run: pipx install poetry - name: Set up Python ${{ matrix.python-version }} @@ -40,8 +38,50 @@ jobs: - name: Install dependencies run: poetry install --with unit-tests - name: Test with pytest - run: poetry run pytest -q --junitxml=pytest.xml --cov=semantic_kernel --cov-report=term-missing:skip-covered ./tests/unit | tee python-coverage.txt + run: poetry run pytest --junitxml=pytest.xml ./tests/unit + - name: Surface failing tests + if: always() + uses: pmeier/pytest-results-action@main + with: + # A list of JUnit XML files, directories containing the former, and wildcard + # patterns to process. + # See @actions/glob for supported patterns. + path: python/pytest.xml + # (Optional) Add a summary of the results at the top of the report + summary: true + # (Optional) Select which results should be included in the report. + # Follows the same syntax as `pytest -r` + display-options: fEX + # (Optional) Fail the workflow if no JUnit XML was found. + fail-on-empty: true + # (Optional) Title of the test results section in the workflow summary + title: Test results + python-test-coverage: + name: Python Test Coverage + runs-on: [ubuntu-latest] + continue-on-error: true + permissions: + contents: write + defaults: + run: + working-directory: python + steps: + - uses: actions/checkout@v4 + - name: Setup filename variables + run: echo "FILE_ID=${{ github.event.number }}" >> $GITHUB_ENV + - name: Install poetry + run: pipx install poetry + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + cache: "poetry" + - name: Install dependencies + run: poetry install --with unit-tests + - name: Test with pytest + run: poetry run pytest -q --junitxml=pytest.xml --cov=semantic_kernel --cov-report=term-missing:skip-covered ./tests/unit | tee python-coverage.txt - name: Upload coverage + if: always() uses: actions/upload-artifact@v4 with: name: python-coverage-${{ env.FILE_ID }}.txt @@ -49,6 +89,7 @@ jobs: overwrite: true retention-days: 1 - name: Upload pytest.xml + if: always() uses: actions/upload-artifact@v4 with: name: pytest-${{ env.FILE_ID }}.xml diff --git a/python/.vscode/tasks.json b/python/.vscode/tasks.json index 3d7c72c4036e..b5e7b5e78a75 100644 --- a/python/.vscode/tasks.json +++ b/python/.vscode/tasks.json @@ -117,6 +117,24 @@ }, "problemMatcher": [] }, + { + "label": "Python: Tests - Unit - Failed Only", + "type": "shell", + "command": "poetry", + "args": [ + "run", + "pytest", + "tests/unit/", + "--last-failed", + "-v" + ], + "group": "test", + "presentation": { + "reveal": "always", + "panel": "shared" + }, + "problemMatcher": [] + }, { "label": "Python: Tests - Code Coverage", "type": "shell", diff --git a/python/poetry.lock b/python/poetry.lock index 5f2acbb0f0b4..434098bb25cd 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.1 and should not be changed by hand. [[package]] name = "accelerate" @@ -1129,6 +1129,20 @@ files = [ [package.extras] test = ["pytest (>=6)"] +[[package]] +name = "execnet" +version = "2.1.1" +description = "execnet: rapid multi-Python deployment" +optional = false +python-versions = ">=3.8" +files = [ + {file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"}, + {file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"}, +] + +[package.extras] +testing = ["hatch", "pre-commit", "pytest", "tox"] + [[package]] name = "executing" version = "2.0.1" @@ -2774,6 +2788,7 @@ python-versions = ">=3.7" files = [ {file = "milvus_lite-2.4.7-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:c828190118b104b05b8c8e0b5a4147811c86b54b8fb67bc2e726ad10fc0b544e"}, {file = "milvus_lite-2.4.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e1537633c39879714fb15082be56a4b97f74c905a6e98e302ec01320561081af"}, + {file = "milvus_lite-2.4.7-py3-none-manylinux2014_aarch64.whl", hash = "sha256:fcb909d38c83f21478ca9cb500c84264f988c69f62715ae9462e966767fb76dd"}, {file = "milvus_lite-2.4.7-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f016474d663045787dddf1c3aad13b7d8b61fd329220318f858184918143dcbf"}, ] @@ -5090,6 +5105,27 @@ pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] +[[package]] +name = "pytest-xdist" +version = "3.6.1" +description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"}, + {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"}, +] + +[package.dependencies] +execnet = ">=2.1" +psutil = {version = ">=3.0", optional = true, markers = "extra == \"psutil\""} +pytest = ">=7.0.0" + +[package.extras] +psutil = ["psutil (>=3.0)"] +setproctitle = ["setproctitle"] +testing = ["filelock"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -5191,7 +5227,6 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, @@ -7375,4 +7410,4 @@ weaviate = ["weaviate-client"] [metadata] lock-version = "2.0" python-versions = "^3.10,<3.13" -content-hash = "b06f07ed9eb865636ad02f5e67be234042aba91da5090093248b45039369094a" +content-hash = "33a9221211ea37b3ab4586c0876b209cbce4ac0bb7173b445484a74407212b30" diff --git a/python/pyproject.toml b/python/pyproject.toml index b8490b0dedf9..90410539ed2c 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -84,9 +84,10 @@ ruff = ">=0.4.5" ipykernel = "^6.29.4" nbconvert = "^7.16.4" pytest = "^8.2.1" +pytest-xdist = { version="^3.6.1", extras=["psutil"]} +pytest-cov = ">=5.0.0" pytest-asyncio = "^0.23.7" snoop = "^0.4.3" -pytest-cov = ">=5.0.0" mypy = ">=1.10.0" types-PyYAML = "^6.0.12.20240311" @@ -167,6 +168,9 @@ redis = ["redis"] usearch = ["usearch", "pyarrow"] weaviate = ["weaviate-client"] +[tool.pytest.ini_options] +addopts = "-ra -q -r fEX -n logical --dist loadfile --dist worksteal" + [tool.ruff] line-length = 120 target-version = "py310" diff --git a/python/tests/integration/completions/test_chat_completions.py b/python/tests/integration/completions/test_chat_completions.py index f77a761c5a1e..e739250cfd29 100644 --- a/python/tests/integration/completions/test_chat_completions.py +++ b/python/tests/integration/completions/test_chat_completions.py @@ -88,7 +88,7 @@ def history() -> ChatHistory: @pytest.fixture(scope="module") -def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecutionSettings]]]: +def services() -> dict[str, tuple[ChatCompletionClientBase | None, type[PromptExecutionSettings]]]: azure_openai_settings = AzureOpenAISettings.create() endpoint = azure_openai_settings.endpoint deployment_name = azure_openai_settings.chat_deployment_name @@ -118,7 +118,7 @@ def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecution "azure_custom_client": (azure_custom_client, AzureChatPromptExecutionSettings), "azure_ai_inference": (azure_ai_inference_client, AzureAIInferenceChatPromptExecutionSettings), "mistral_ai": (MistralAIChatCompletion() if mistral_ai_setup else None, MistralAIChatPromptExecutionSettings), - "ollama": (OllamaChatCompletion(), OllamaChatPromptExecutionSettings), + "ollama": (OllamaChatCompletion() if ollama_setup else None, OllamaChatPromptExecutionSettings), "google_ai": (GoogleAIChatCompletion(), GoogleAIChatPromptExecutionSettings), "vertex_ai": (VertexAIChatCompletion(), VertexAIChatPromptExecutionSettings), } @@ -464,8 +464,7 @@ def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecution ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="How are you today?")]), ], ["Hello", "well"], - # marks=pytest.mark.skipif(not ollama_setup, reason="Need local Ollama setup"), - marks=pytest.mark.skip(reason="Flaky test"), + marks=pytest.mark.skipif(not ollama_setup, reason="Need local Ollama setup"), id="ollama_text_input", ), pytest.param( diff --git a/python/tests/unit/kernel/test_kernel.py b/python/tests/unit/kernel/test_kernel.py index 3f504035dd00..305ab8b7288d 100644 --- a/python/tests/unit/kernel/test_kernel.py +++ b/python/tests/unit/kernel/test_kernel.py @@ -166,6 +166,16 @@ async def test_invoke_function_fail(kernel: Kernel, create_mock_function): pass +@pytest.mark.asyncio +async def test_invoke_function_cancelled(kernel: Kernel, create_mock_function): + mock_function = create_mock_function(name="test_function") + mock_function._invoke_internal = AsyncMock(side_effect=OperationCancelledException("Operation cancelled")) + kernel.add_plugin(KernelPlugin(name="test", functions=[mock_function])) + + result = await kernel.invoke(mock_function, arguments=KernelArguments()) + assert result is None + + @pytest.mark.asyncio async def test_invoke_stream_function(kernel: Kernel, create_mock_function): mock_function = create_mock_function(name="test_function") From 057b04f2cd2bdb83361fa51722382a46f776b71d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 09:25:52 -0400 Subject: [PATCH 3/5] Python: Bump pyarrow from 16.1.0 to 17.0.0 in /python (#7396) Bumps [pyarrow](https://github.com/apache/arrow) from 16.1.0 to 17.0.0.
Release notes

Sourced from pyarrow's releases.

Apache Arrow 17.0.0

Release Notes URL: https://arrow.apache.org/release/17.0.0.html

Apache Arrow 17.0.0 RC2

Release Notes: Release Candidate: 17.0.0 RC2

Apache Arrow 17.0.0 RC1

Release Notes: Release Candidate: 17.0.0 RC1

Apache Arrow 17.0.0 RC0

Release Notes: Release Candidate: 17.0.0 RC0

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pyarrow&package-manager=pip&previous-version=16.1.0&new-version=17.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- python/poetry.lock | 83 +++++++++++++++++++++++-------------------- python/pyproject.toml | 4 +-- 2 files changed, 46 insertions(+), 41 deletions(-) diff --git a/python/poetry.lock b/python/poetry.lock index 434098bb25cd..79d25adaa28a 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "accelerate" @@ -3527,6 +3527,7 @@ description = "Nvidia JIT LTO Library" optional = false python-versions = ">=3" files = [ + {file = "nvidia_nvjitlink_cu12-12.5.40-py3-none-manylinux2014_aarch64.whl", hash = "sha256:004186d5ea6a57758fd6d57052a123c73a4815adf365eb8dd6a85c9eaa7535ff"}, {file = "nvidia_nvjitlink_cu12-12.5.40-py3-none-manylinux2014_x86_64.whl", hash = "sha256:d9714f27c1d0f0895cd8915c07a87a1d0029a0aa36acaf9156952ec2a8a12189"}, {file = "nvidia_nvjitlink_cu12-12.5.40-py3-none-win_amd64.whl", hash = "sha256:c3401dc8543b52d3a8158007a0c1ab4e9c768fcbd24153a48c86972102197ddd"}, ] @@ -4618,52 +4619,55 @@ tests = ["pytest"] [[package]] name = "pyarrow" -version = "16.1.0" +version = "17.0.0" description = "Python library for Apache Arrow" optional = false python-versions = ">=3.8" files = [ - {file = "pyarrow-16.1.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:17e23b9a65a70cc733d8b738baa6ad3722298fa0c81d88f63ff94bf25eaa77b9"}, - {file = "pyarrow-16.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4740cc41e2ba5d641071d0ab5e9ef9b5e6e8c7611351a5cb7c1d175eaf43674a"}, - {file = "pyarrow-16.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98100e0268d04e0eec47b73f20b39c45b4006f3c4233719c3848aa27a03c1aef"}, - {file = "pyarrow-16.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f68f409e7b283c085f2da014f9ef81e885d90dcd733bd648cfba3ef265961848"}, - {file = "pyarrow-16.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:a8914cd176f448e09746037b0c6b3a9d7688cef451ec5735094055116857580c"}, - {file = "pyarrow-16.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:48be160782c0556156d91adbdd5a4a7e719f8d407cb46ae3bb4eaee09b3111bd"}, - {file = "pyarrow-16.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9cf389d444b0f41d9fe1444b70650fea31e9d52cfcb5f818b7888b91b586efff"}, - {file = "pyarrow-16.1.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:d0ebea336b535b37eee9eee31761813086d33ed06de9ab6fc6aaa0bace7b250c"}, - {file = "pyarrow-16.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e73cfc4a99e796727919c5541c65bb88b973377501e39b9842ea71401ca6c1c"}, - {file = "pyarrow-16.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf9251264247ecfe93e5f5a0cd43b8ae834f1e61d1abca22da55b20c788417f6"}, - {file = "pyarrow-16.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddf5aace92d520d3d2a20031d8b0ec27b4395cab9f74e07cc95edf42a5cc0147"}, - {file = "pyarrow-16.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:25233642583bf658f629eb230b9bb79d9af4d9f9229890b3c878699c82f7d11e"}, - {file = "pyarrow-16.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a33a64576fddfbec0a44112eaf844c20853647ca833e9a647bfae0582b2ff94b"}, - {file = "pyarrow-16.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:185d121b50836379fe012753cf15c4ba9638bda9645183ab36246923875f8d1b"}, - {file = "pyarrow-16.1.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:2e51ca1d6ed7f2e9d5c3c83decf27b0d17bb207a7dea986e8dc3e24f80ff7d6f"}, - {file = "pyarrow-16.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06ebccb6f8cb7357de85f60d5da50e83507954af617d7b05f48af1621d331c9a"}, - {file = "pyarrow-16.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b04707f1979815f5e49824ce52d1dceb46e2f12909a48a6a753fe7cafbc44a0c"}, - {file = "pyarrow-16.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d32000693deff8dc5df444b032b5985a48592c0697cb6e3071a5d59888714e2"}, - {file = "pyarrow-16.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:8785bb10d5d6fd5e15d718ee1d1f914fe768bf8b4d1e5e9bf253de8a26cb1628"}, - {file = "pyarrow-16.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:e1369af39587b794873b8a307cc6623a3b1194e69399af0efd05bb202195a5a7"}, - {file = "pyarrow-16.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:febde33305f1498f6df85e8020bca496d0e9ebf2093bab9e0f65e2b4ae2b3444"}, - {file = "pyarrow-16.1.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b5f5705ab977947a43ac83b52ade3b881eb6e95fcc02d76f501d549a210ba77f"}, - {file = "pyarrow-16.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0d27bf89dfc2576f6206e9cd6cf7a107c9c06dc13d53bbc25b0bd4556f19cf5f"}, - {file = "pyarrow-16.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d07de3ee730647a600037bc1d7b7994067ed64d0eba797ac74b2bc77384f4c2"}, - {file = "pyarrow-16.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fbef391b63f708e103df99fbaa3acf9f671d77a183a07546ba2f2c297b361e83"}, - {file = "pyarrow-16.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:19741c4dbbbc986d38856ee7ddfdd6a00fc3b0fc2d928795b95410d38bb97d15"}, - {file = "pyarrow-16.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f2c5fb249caa17b94e2b9278b36a05ce03d3180e6da0c4c3b3ce5b2788f30eed"}, - {file = "pyarrow-16.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:e6b6d3cd35fbb93b70ade1336022cc1147b95ec6af7d36906ca7fe432eb09710"}, - {file = "pyarrow-16.1.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:18da9b76a36a954665ccca8aa6bd9f46c1145f79c0bb8f4f244f5f8e799bca55"}, - {file = "pyarrow-16.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:99f7549779b6e434467d2aa43ab2b7224dd9e41bdde486020bae198978c9e05e"}, - {file = "pyarrow-16.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f07fdffe4fd5b15f5ec15c8b64584868d063bc22b86b46c9695624ca3505b7b4"}, - {file = "pyarrow-16.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddfe389a08ea374972bd4065d5f25d14e36b43ebc22fc75f7b951f24378bf0b5"}, - {file = "pyarrow-16.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:3b20bd67c94b3a2ea0a749d2a5712fc845a69cb5d52e78e6449bbd295611f3aa"}, - {file = "pyarrow-16.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:ba8ac20693c0bb0bf4b238751d4409e62852004a8cf031c73b0e0962b03e45e3"}, - {file = "pyarrow-16.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:31a1851751433d89a986616015841977e0a188662fcffd1a5677453f1df2de0a"}, - {file = "pyarrow-16.1.0.tar.gz", hash = "sha256:15fbb22ea96d11f0b5768504a3f961edab25eaf4197c341720c4a387f6c60315"}, + {file = "pyarrow-17.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a5c8b238d47e48812ee577ee20c9a2779e6a5904f1708ae240f53ecbee7c9f07"}, + {file = "pyarrow-17.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db023dc4c6cae1015de9e198d41250688383c3f9af8f565370ab2b4cb5f62655"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da1e060b3876faa11cee287839f9cc7cdc00649f475714b8680a05fd9071d545"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c06d4624c0ad6674364bb46ef38c3132768139ddec1c56582dbac54f2663e2"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:fa3c246cc58cb5a4a5cb407a18f193354ea47dd0648194e6265bd24177982fe8"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:f7ae2de664e0b158d1607699a16a488de3d008ba99b3a7aa5de1cbc13574d047"}, + {file = "pyarrow-17.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5984f416552eea15fd9cee03da53542bf4cddaef5afecefb9aa8d1010c335087"}, + {file = "pyarrow-17.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:1c8856e2ef09eb87ecf937104aacfa0708f22dfeb039c363ec99735190ffb977"}, + {file = "pyarrow-17.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e19f569567efcbbd42084e87f948778eb371d308e137a0f97afe19bb860ccb3"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b244dc8e08a23b3e352899a006a26ae7b4d0da7bb636872fa8f5884e70acf15"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b72e87fe3e1db343995562f7fff8aee354b55ee83d13afba65400c178ab2597"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dc5c31c37409dfbc5d014047817cb4ccd8c1ea25d19576acf1a001fe07f5b420"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e3343cb1e88bc2ea605986d4b94948716edc7a8d14afd4e2c097232f729758b4"}, + {file = "pyarrow-17.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:a27532c38f3de9eb3e90ecab63dfda948a8ca859a66e3a47f5f42d1e403c4d03"}, + {file = "pyarrow-17.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9b8a823cea605221e61f34859dcc03207e52e409ccf6354634143e23af7c8d22"}, + {file = "pyarrow-17.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f1e70de6cb5790a50b01d2b686d54aaf73da01266850b05e3af2a1bc89e16053"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0071ce35788c6f9077ff9ecba4858108eebe2ea5a3f7cf2cf55ebc1dbc6ee24a"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:757074882f844411fcca735e39aae74248a1531367a7c80799b4266390ae51cc"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ba11c4f16976e89146781a83833df7f82077cdab7dc6232c897789343f7891a"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b0c6ac301093b42d34410b187bba560b17c0330f64907bfa4f7f7f2444b0cf9b"}, + {file = "pyarrow-17.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:392bc9feabc647338e6c89267635e111d71edad5fcffba204425a7c8d13610d7"}, + {file = "pyarrow-17.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:af5ff82a04b2171415f1410cff7ebb79861afc5dae50be73ce06d6e870615204"}, + {file = "pyarrow-17.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:edca18eaca89cd6382dfbcff3dd2d87633433043650c07375d095cd3517561d8"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c7916bff914ac5d4a8fe25b7a25e432ff921e72f6f2b7547d1e325c1ad9d155"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f553ca691b9e94b202ff741bdd40f6ccb70cdd5fbf65c187af132f1317de6145"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0cdb0e627c86c373205a2f94a510ac4376fdc523f8bb36beab2e7f204416163c"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d7d192305d9d8bc9082d10f361fc70a73590a4c65cf31c3e6926cd72b76bc35c"}, + {file = "pyarrow-17.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:02dae06ce212d8b3244dd3e7d12d9c4d3046945a5933d28026598e9dbbda1fca"}, + {file = "pyarrow-17.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:13d7a460b412f31e4c0efa1148e1d29bdf18ad1411eb6757d38f8fbdcc8645fb"}, + {file = "pyarrow-17.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b564a51fbccfab5a04a80453e5ac6c9954a9c5ef2890d1bcf63741909c3f8df"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32503827abbc5aadedfa235f5ece8c4f8f8b0a3cf01066bc8d29de7539532687"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a155acc7f154b9ffcc85497509bcd0d43efb80d6f733b0dc3bb14e281f131c8b"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:dec8d129254d0188a49f8a1fc99e0560dc1b85f60af729f47de4046015f9b0a5"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:a48ddf5c3c6a6c505904545c25a4ae13646ae1f8ba703c4df4a1bfe4f4006bda"}, + {file = "pyarrow-17.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:42bf93249a083aca230ba7e2786c5f673507fa97bbd9725a1e2754715151a204"}, + {file = "pyarrow-17.0.0.tar.gz", hash = "sha256:4beca9521ed2c0921c1023e68d097d0299b62c362639ea315572a58f3f50fd28"}, ] [package.dependencies] numpy = ">=1.16.6" +[package.extras] +test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] + [[package]] name = "pyasn1" version = "0.6.0" @@ -5227,6 +5231,7 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, @@ -7410,4 +7415,4 @@ weaviate = ["weaviate-client"] [metadata] lock-version = "2.0" python-versions = "^3.10,<3.13" -content-hash = "33a9221211ea37b3ab4586c0876b209cbce4ac0bb7173b445484a74407212b30" +content-hash = "e9b70ba0939d17a7a599686f52ad87498dbbfc83e6d200b70837640063b6c387" diff --git a/python/pyproject.toml b/python/pyproject.toml index 90410539ed2c..a2f46fd1adbb 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -74,7 +74,7 @@ qdrant-client = { version = '^1.9', optional = true} redis = { version = "^4.6.0", optional = true} # usearch usearch = { version = "^2.9", optional = true} -pyarrow = { version = ">=12.0.1,<17.0.0", optional = true} +pyarrow = { version = ">=12.0.1,<18.0.0", optional = true} weaviate-client = { version = ">=3.18,<5.0", optional = true} ruff = "0.5.2" @@ -144,7 +144,7 @@ qdrant-client = '^1.9' redis = "^4.6.0" # usearch usearch = "^2.9" -pyarrow = ">=12.0.1,<17.0.0" +pyarrow = ">=12.0.1,<18.0.0" # weaviate weaviate-client = ">=3.18,<5.0" From 9480d807cfcda6586784aa053cf6e34f942bec44 Mon Sep 17 00:00:00 2001 From: Evan Mattson <35585003+moonbox3@users.noreply.github.com> Date: Thu, 1 Aug 2024 09:45:06 -0400 Subject: [PATCH 4/5] Python: Introduce the non-chat, non-streaming OpenAIAssistantAgent, samples, and tests (#7477) ### Motivation and Context To align with SK dotnet and to allow SK Python devs to leverage the OpenAI/Azure OpenAI assistants, this code is a required component for SK Python. ### Description We're introducing the code to build non-chat, non-streaming OpenAIAssistantAgents for both the OpenAI and Azure OpenAI styles. This PR introduces: - The required code to create OpenAI/AzureOpenAI assistants using the v2 library. - Samples showing how to create the agent, and how to use the tool resources like the code interpreter and file search. - Unit test coverage at 100% - Full mypy coverage - Closes #7080 ![image](https://github.com/user-attachments/assets/1c874710-067d-435e-9382-975d3697a16a) One to-do item: - introduce Agent integration tests Note: streaming assistant agent support will come in the next PR. ### Contribution Checklist - [X] The code builds clean without any errors or warnings - [X] The PR follows the [SK Contribution Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md) and the [pre-submission formatting script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts) raises no violations - [X] All unit tests pass, and I have added new tests where possible - [X] I didn't break anyone :smile: --- python/samples/concepts/README.md | 2 +- .../openai_assistant_agent_chart_maker.py | 92 ++ ...openai_assistant_agent_code_interpreter.py | 70 ++ ...penai_assistant_agent_file_manipulation.py | 96 ++ .../openai_assistant_agent_file_search.py | 89 ++ .../chat_gpt_api_function_calling.py | 3 +- .../sales.csv | 701 +++++++++++ .../travelinfo.txt | 217 ++++ .../README.md | 16 +- .../step1_agent.py | 0 .../step2_plugins.py | 0 .../step8_openai_assistant_agent.py | 103 ++ python/semantic_kernel/agents/agent.py | 6 +- .../agents/chat_completion_agent.py | 3 +- .../agents/open_ai/__init__.py | 6 + .../agents/open_ai/azure_assistant_agent.py | 356 ++++++ .../agents/open_ai/open_ai_assistant_agent.py | 310 +++++ .../agents/open_ai/open_ai_assistant_base.py | 1021 +++++++++++++++++ .../agents/open_ai/run_polling_options.py | 30 + python/semantic_kernel/contents/__init__.py | 2 + .../contents/annotation_content.py | 61 + .../contents/chat_message_content.py | 8 + python/semantic_kernel/contents/const.py | 4 + .../contents/file_reference_content.py | 46 + python/semantic_kernel/exceptions/__init__.py | 1 + .../exceptions/agent_exceptions.py | 24 + python/tests/samples/test_concepts.py | 4 +- .../unit/agents/test_azure_assistant_agent.py | 291 +++++ .../agents/test_open_ai_assistant_agent.py | 428 +++++++ .../agents/test_open_ai_assistant_base.py | 972 ++++++++++++++++ .../unit/agents/test_run_polling_options.py | 50 + .../unit/contents/test_annotation_content.py | 124 ++ .../contents/test_file_reference_content.py | 76 ++ 33 files changed, 5197 insertions(+), 15 deletions(-) create mode 100644 python/samples/concepts/agents/openai_assistant_agent_chart_maker.py create mode 100644 python/samples/concepts/agents/openai_assistant_agent_code_interpreter.py create mode 100644 python/samples/concepts/agents/openai_assistant_agent_file_manipulation.py create mode 100644 python/samples/concepts/agents/openai_assistant_agent_file_search.py create mode 100644 python/samples/concepts/resources/agent_assistant_file_manipulation/sales.csv create mode 100644 python/samples/concepts/resources/agent_assistant_file_search/travelinfo.txt rename python/samples/{concepts/agents => getting_started_with_agents}/README.md (51%) rename python/samples/{concepts/agents => getting_started_with_agents}/step1_agent.py (100%) rename python/samples/{concepts/agents => getting_started_with_agents}/step2_plugins.py (100%) create mode 100644 python/samples/getting_started_with_agents/step8_openai_assistant_agent.py create mode 100644 python/semantic_kernel/agents/open_ai/__init__.py create mode 100644 python/semantic_kernel/agents/open_ai/azure_assistant_agent.py create mode 100644 python/semantic_kernel/agents/open_ai/open_ai_assistant_agent.py create mode 100644 python/semantic_kernel/agents/open_ai/open_ai_assistant_base.py create mode 100644 python/semantic_kernel/agents/open_ai/run_polling_options.py create mode 100644 python/semantic_kernel/contents/annotation_content.py create mode 100644 python/semantic_kernel/contents/file_reference_content.py create mode 100644 python/semantic_kernel/exceptions/agent_exceptions.py create mode 100644 python/tests/unit/agents/test_azure_assistant_agent.py create mode 100644 python/tests/unit/agents/test_open_ai_assistant_agent.py create mode 100644 python/tests/unit/agents/test_open_ai_assistant_base.py create mode 100644 python/tests/unit/agents/test_run_polling_options.py create mode 100644 python/tests/unit/contents/test_annotation_content.py create mode 100644 python/tests/unit/contents/test_file_reference_content.py diff --git a/python/samples/concepts/README.md b/python/samples/concepts/README.md index ac7e6350c714..0ef6120ad285 100644 --- a/python/samples/concepts/README.md +++ b/python/samples/concepts/README.md @@ -4,7 +4,7 @@ This section contains code snippets that demonstrate the usage of Semantic Kerne | Features | Description | | -------- | ----------- | -| Agents | Creating and using agents in Semantic Kernel | +| Agents | Creating and using [agents](../../semantic_kernel/agents/) in Semantic Kernel | | AutoFunctionCalling | Using `Auto Function Calling` to allow function call capable models to invoke Kernel Functions automatically | | ChatCompletion | Using [`ChatCompletion`](https://github.com/microsoft/semantic-kernel/blob/main/python/semantic_kernel/connectors/ai/chat_completion_client_base.py) messaging capable service with models | | Filtering | Creating and using Filters | diff --git a/python/samples/concepts/agents/openai_assistant_agent_chart_maker.py b/python/samples/concepts/agents/openai_assistant_agent_chart_maker.py new file mode 100644 index 000000000000..246251238b40 --- /dev/null +++ b/python/samples/concepts/agents/openai_assistant_agent_chart_maker.py @@ -0,0 +1,92 @@ +# Copyright (c) Microsoft. All rights reserved. +import asyncio + +from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent +from semantic_kernel.agents.open_ai.open_ai_assistant_agent import OpenAIAssistantAgent +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.file_reference_content import FileReferenceContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.kernel import Kernel + +AGENT_NAME = "ChartMaker" +AGENT_INSTRUCTIONS = "Create charts as requested without explanation." + +# Note: you may toggle this to switch between AzureOpenAI and OpenAI +use_azure_openai = False + + +# A helper method to invoke the agent with the user input +async def invoke_agent(agent: OpenAIAssistantAgent, thread_id: str, input: str) -> None: + """Invoke the agent with the user input.""" + await agent.add_chat_message(thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=input)) + + print(f"# {AuthorRole.USER}: '{input}'") + + async for message in agent.invoke(thread_id=thread_id): + if message.content: + print(f"# {message.role}: {message.content}") + + if len(message.items) > 0: + for item in message.items: + if isinstance(item, FileReferenceContent): + print(f"\n`{message.role}` => {item.file_id}") + + +async def main(): + # Create the instance of the Kernel + kernel = Kernel() + + # Define a service_id for the sample + service_id = "agent" + + # Create the agent configuration + if use_azure_openai: + agent = AzureAssistantAgent( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_code_interpreter=True, + ) + else: + agent = OpenAIAssistantAgent( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_code_interpreter=True, + ) + + # Create an assistant with the vector store ID + await agent.create_assistant() + + # Define a thread and invoke the agent with the user input + thread_id = await agent.create_thread() + + try: + await invoke_agent( + agent, + thread_id=thread_id, + input=""" + Display this data using a bar-chart: + + Banding Brown Pink Yellow Sum + X00000 339 433 126 898 + X00300 48 421 222 691 + X12345 16 395 352 763 + Others 23 373 156 552 + Sum 426 1622 856 2904 + """, + ) + await invoke_agent( + agent, + thread_id=thread_id, + input="Can you regenerate this same chart using the category names as the bar colors?", + ) + finally: + await agent.delete_thread(thread_id) + await agent.delete() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/concepts/agents/openai_assistant_agent_code_interpreter.py b/python/samples/concepts/agents/openai_assistant_agent_code_interpreter.py new file mode 100644 index 000000000000..b067992ad950 --- /dev/null +++ b/python/samples/concepts/agents/openai_assistant_agent_code_interpreter.py @@ -0,0 +1,70 @@ +# Copyright (c) Microsoft. All rights reserved. +import asyncio + +from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent +from semantic_kernel.agents.open_ai.open_ai_assistant_agent import OpenAIAssistantAgent +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.kernel import Kernel + +AGENT_NAME = "CodeRunner" +AGENT_INSTRUCTIONS = "Run the provided code file and return the result." + +# Note: you may toggle this to switch between AzureOpenAI and OpenAI +use_azure_openai = True + + +# A helper method to invoke the agent with the user input +async def invoke_agent(agent: OpenAIAssistantAgent, thread_id: str, input: str) -> None: + """Invoke the agent with the user input.""" + await agent.add_chat_message(thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=input)) + + print(f"# {AuthorRole.USER}: '{input}'") + + async for content in agent.invoke(thread_id=thread_id): + if content.role != AuthorRole.TOOL: + print(f"# {content.role}: {content.content}") + + +async def main(): + # Create the instance of the Kernel + kernel = Kernel() + + # Define a service_id for the sample + service_id = "agent" + + # Create the agent + if use_azure_openai: + agent = AzureAssistantAgent( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_code_interpreter=True, + ) + else: + agent = OpenAIAssistantAgent( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_code_interpreter=True, + ) + + await agent.create_assistant() + + thread_id = await agent.create_thread() + + try: + await invoke_agent( + agent, + thread_id=thread_id, + input="Use code to determine the values in the Fibonacci sequence that that are less then the value of 101?", # noqa: E501 + ) + finally: + await agent.delete_thread(thread_id) + await agent.delete() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/concepts/agents/openai_assistant_agent_file_manipulation.py b/python/samples/concepts/agents/openai_assistant_agent_file_manipulation.py new file mode 100644 index 000000000000..9ab35c3460eb --- /dev/null +++ b/python/samples/concepts/agents/openai_assistant_agent_file_manipulation.py @@ -0,0 +1,96 @@ +# Copyright (c) Microsoft. All rights reserved. +import asyncio +import os + +from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent +from semantic_kernel.agents.open_ai.open_ai_assistant_agent import OpenAIAssistantAgent +from semantic_kernel.contents.annotation_content import AnnotationContent +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.kernel import Kernel + +AGENT_NAME = "FileManipulation" +AGENT_INSTRUCTIONS = "Find answers to the user's questions in the provided file." + +# Note: you may toggle this to switch between AzureOpenAI and OpenAI +use_azure_openai = False + + +# A helper method to invoke the agent with the user input +async def invoke_agent(agent: OpenAIAssistantAgent, thread_id: str, input: str) -> None: + """Invoke the agent with the user input.""" + await agent.add_chat_message(thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=input)) + + print(f"# {AuthorRole.USER}: '{input}'") + + async for content in agent.invoke(thread_id=thread_id): + print(f"# {content.role}: {content.content}") + + if len(content.items) > 0: + for item in content.items: + if isinstance(item, AnnotationContent): + print(f"\n`{item.quote}` => {item.file_id}") + response_content = await agent.client.files.content(item.file_id) + print(response_content.text) + + +async def main(): + # Create the instance of the Kernel + kernel = Kernel() + + # Define a service_id for the sample + service_id = "agent" + + # Create the agent configuration + if use_azure_openai: + agent = AzureAssistantAgent( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_file_search=True, + enable_code_interpreter=True, + ) + else: + agent = OpenAIAssistantAgent( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_file_search=True, + enable_code_interpreter=True, + ) + + # Create the OpenAI assistant + await agent.create_assistant() + + # Get the path to the sales.csv file + csv_file_path = os.path.join( + os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", + "agent_assistant_file_manipulation", + "sales.csv", + ) + + # Upload the file for use with the assistant + file_id = await agent.add_file(csv_file_path, purpose="assistants") + + # Create a thread and specify the file to use for code interpretation + thread_id = await agent.create_thread(code_interpreter_file_ids=[file_id]) + + try: + await invoke_agent(agent, thread_id=thread_id, input="Which segment had the most sales?") + await invoke_agent(agent, thread_id=thread_id, input="List the top 5 countries that generated the most profit.") + await invoke_agent( + agent, + thread_id=thread_id, + input="Create a tab delimited file report of profit by each country per month.", + ) + finally: + await agent.client.files.delete(file_id) + await agent.delete_thread(thread_id) + await agent.delete() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/concepts/agents/openai_assistant_agent_file_search.py b/python/samples/concepts/agents/openai_assistant_agent_file_search.py new file mode 100644 index 000000000000..f6b599cd490c --- /dev/null +++ b/python/samples/concepts/agents/openai_assistant_agent_file_search.py @@ -0,0 +1,89 @@ +# Copyright (c) Microsoft. All rights reserved. +import asyncio +import os + +from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent +from semantic_kernel.agents.open_ai.open_ai_assistant_agent import OpenAIAssistantAgent +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.kernel import Kernel + +AGENT_NAME = "FileSearch" +AGENT_INSTRUCTIONS = "Find answers to the user's questions in the provided file." + +# Note: you may toggle this to switch between AzureOpenAI and OpenAI +use_azure_openai = True + + +# A helper method to invoke the agent with the user input +async def invoke_agent(agent: OpenAIAssistantAgent, thread_id: str, input: str) -> None: + """Invoke the agent with the user input.""" + await agent.add_chat_message(thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=input)) + + print(f"# {AuthorRole.USER}: '{input}'") + + async for content in agent.invoke(thread_id=thread_id): + if content.role != AuthorRole.TOOL: + print(f"# {content.role}: {content.content}") + + +async def main(): + # Create the instance of the Kernel + kernel = Kernel() + + # Define a service_id for the sample + service_id = "agent" + + # Create the agent configuration + if use_azure_openai: + agent = AzureAssistantAgent( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_file_search=True, + ) + else: + agent = OpenAIAssistantAgent( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_file_search=True, + ) + + # Get the path to the travelinfo.txt file + txt_file_path = os.path.join( + os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", + "agent_assistant_file_search", + "travelinfo.txt", + ) + + # Open the file and create the assistant with a vector store ID + with open(txt_file_path, "rb") as file: + # Create a file with the travelinfo.txt content + file = await agent.client.files.create(file=file, purpose="assistants") # type: ignore + + # Create a vector store with the file ID + vector_store = await agent.client.beta.vector_stores.create(file_ids=[file.id]) + + # Create an assistant with the vector store ID + await agent.create_assistant(vector_store_id=vector_store.id) + + # Define a thread and invoke the agent with the user input + thread_id = await agent.create_thread() + + try: + await invoke_agent(agent, thread_id=thread_id, input="Where did Sam go?") + await invoke_agent(agent, thread_id=thread_id, input="When does the flight leave Seattle?") + await invoke_agent(agent, thread_id=thread_id, input="What is the hotel contact info at the destination?") + finally: + await agent.client.beta.vector_stores.delete(vector_store.id) + await agent.client.files.delete(file.id) + await agent.delete_thread(thread_id) + await agent.delete() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/concepts/auto_function_calling/chat_gpt_api_function_calling.py b/python/samples/concepts/auto_function_calling/chat_gpt_api_function_calling.py index a8ab7d64e290..f0381c1048ac 100644 --- a/python/samples/concepts/auto_function_calling/chat_gpt_api_function_calling.py +++ b/python/samples/concepts/auto_function_calling/chat_gpt_api_function_calling.py @@ -6,6 +6,7 @@ from typing import TYPE_CHECKING from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, OpenAIChatPromptExecutionSettings from semantic_kernel.contents import ChatHistory from semantic_kernel.contents.chat_message_content import ChatMessageContent @@ -82,7 +83,7 @@ max_tokens=2000, temperature=0.7, top_p=0.8, - function_choice_behavior="auto", + function_choice_behavior=FunctionChoiceBehavior.Auto(auto_invoke=True), ) history = ChatHistory() diff --git a/python/samples/concepts/resources/agent_assistant_file_manipulation/sales.csv b/python/samples/concepts/resources/agent_assistant_file_manipulation/sales.csv new file mode 100644 index 000000000000..da217c62db3e --- /dev/null +++ b/python/samples/concepts/resources/agent_assistant_file_manipulation/sales.csv @@ -0,0 +1,701 @@ +Segment,Country,Product,Units Sold,Sale Price,Gross Sales,Discounts,Sales,COGS,Profit,Date,Month Number,Month Name,Year +Government,Canada,Carretera,1618.5,20.00,32370.00,0.00,32370.00,16185.00,16185.00,1/1/2014,1,January,2014 +Government,Germany,Carretera,1321,20.00,26420.00,0.00,26420.00,13210.00,13210.00,1/1/2014,1,January,2014 +Midmarket,France,Carretera,2178,15.00,32670.00,0.00,32670.00,21780.00,10890.00,6/1/2014,6,June,2014 +Midmarket,Germany,Carretera,888,15.00,13320.00,0.00,13320.00,8880.00,4440.00,6/1/2014,6,June,2014 +Midmarket,Mexico,Carretera,2470,15.00,37050.00,0.00,37050.00,24700.00,12350.00,6/1/2014,6,June,2014 +Government,Germany,Carretera,1513,350.00,529550.00,0.00,529550.00,393380.00,136170.00,12/1/2014,12,December,2014 +Midmarket,Germany,Montana,921,15.00,13815.00,0.00,13815.00,9210.00,4605.00,3/1/2014,3,March,2014 +Channel Partners,Canada,Montana,2518,12.00,30216.00,0.00,30216.00,7554.00,22662.00,6/1/2014,6,June,2014 +Government,France,Montana,1899,20.00,37980.00,0.00,37980.00,18990.00,18990.00,6/1/2014,6,June,2014 +Channel Partners,Germany,Montana,1545,12.00,18540.00,0.00,18540.00,4635.00,13905.00,6/1/2014,6,June,2014 +Midmarket,Mexico,Montana,2470,15.00,37050.00,0.00,37050.00,24700.00,12350.00,6/1/2014,6,June,2014 +Enterprise,Canada,Montana,2665.5,125.00,333187.50,0.00,333187.50,319860.00,13327.50,7/1/2014,7,July,2014 +Small Business,Mexico,Montana,958,300.00,287400.00,0.00,287400.00,239500.00,47900.00,8/1/2014,8,August,2014 +Government,Germany,Montana,2146,7.00,15022.00,0.00,15022.00,10730.00,4292.00,9/1/2014,9,September,2014 +Enterprise,Canada,Montana,345,125.00,43125.00,0.00,43125.00,41400.00,1725.00,10/1/2013,10,October,2013 +Midmarket,United States of America,Montana,615,15.00,9225.00,0.00,9225.00,6150.00,3075.00,12/1/2014,12,December,2014 +Government,Canada,Paseo,292,20.00,5840.00,0.00,5840.00,2920.00,2920.00,2/1/2014,2,February,2014 +Midmarket,Mexico,Paseo,974,15.00,14610.00,0.00,14610.00,9740.00,4870.00,2/1/2014,2,February,2014 +Channel Partners,Canada,Paseo,2518,12.00,30216.00,0.00,30216.00,7554.00,22662.00,6/1/2014,6,June,2014 +Government,Germany,Paseo,1006,350.00,352100.00,0.00,352100.00,261560.00,90540.00,6/1/2014,6,June,2014 +Channel Partners,Germany,Paseo,367,12.00,4404.00,0.00,4404.00,1101.00,3303.00,7/1/2014,7,July,2014 +Government,Mexico,Paseo,883,7.00,6181.00,0.00,6181.00,4415.00,1766.00,8/1/2014,8,August,2014 +Midmarket,France,Paseo,549,15.00,8235.00,0.00,8235.00,5490.00,2745.00,9/1/2013,9,September,2013 +Small Business,Mexico,Paseo,788,300.00,236400.00,0.00,236400.00,197000.00,39400.00,9/1/2013,9,September,2013 +Midmarket,Mexico,Paseo,2472,15.00,37080.00,0.00,37080.00,24720.00,12360.00,9/1/2014,9,September,2014 +Government,United States of America,Paseo,1143,7.00,8001.00,0.00,8001.00,5715.00,2286.00,10/1/2014,10,October,2014 +Government,Canada,Paseo,1725,350.00,603750.00,0.00,603750.00,448500.00,155250.00,11/1/2013,11,November,2013 +Channel Partners,United States of America,Paseo,912,12.00,10944.00,0.00,10944.00,2736.00,8208.00,11/1/2013,11,November,2013 +Midmarket,Canada,Paseo,2152,15.00,32280.00,0.00,32280.00,21520.00,10760.00,12/1/2013,12,December,2013 +Government,Canada,Paseo,1817,20.00,36340.00,0.00,36340.00,18170.00,18170.00,12/1/2014,12,December,2014 +Government,Germany,Paseo,1513,350.00,529550.00,0.00,529550.00,393380.00,136170.00,12/1/2014,12,December,2014 +Government,Mexico,Velo,1493,7.00,10451.00,0.00,10451.00,7465.00,2986.00,1/1/2014,1,January,2014 +Enterprise,France,Velo,1804,125.00,225500.00,0.00,225500.00,216480.00,9020.00,2/1/2014,2,February,2014 +Channel Partners,Germany,Velo,2161,12.00,25932.00,0.00,25932.00,6483.00,19449.00,3/1/2014,3,March,2014 +Government,Germany,Velo,1006,350.00,352100.00,0.00,352100.00,261560.00,90540.00,6/1/2014,6,June,2014 +Channel Partners,Germany,Velo,1545,12.00,18540.00,0.00,18540.00,4635.00,13905.00,6/1/2014,6,June,2014 +Enterprise,United States of America,Velo,2821,125.00,352625.00,0.00,352625.00,338520.00,14105.00,8/1/2014,8,August,2014 +Enterprise,Canada,Velo,345,125.00,43125.00,0.00,43125.00,41400.00,1725.00,10/1/2013,10,October,2013 +Small Business,Canada,VTT,2001,300.00,600300.00,0.00,600300.00,500250.00,100050.00,2/1/2014,2,February,2014 +Channel Partners,Germany,VTT,2838,12.00,34056.00,0.00,34056.00,8514.00,25542.00,4/1/2014,4,April,2014 +Midmarket,France,VTT,2178,15.00,32670.00,0.00,32670.00,21780.00,10890.00,6/1/2014,6,June,2014 +Midmarket,Germany,VTT,888,15.00,13320.00,0.00,13320.00,8880.00,4440.00,6/1/2014,6,June,2014 +Government,France,VTT,1527,350.00,534450.00,0.00,534450.00,397020.00,137430.00,9/1/2013,9,September,2013 +Small Business,France,VTT,2151,300.00,645300.00,0.00,645300.00,537750.00,107550.00,9/1/2014,9,September,2014 +Government,Canada,VTT,1817,20.00,36340.00,0.00,36340.00,18170.00,18170.00,12/1/2014,12,December,2014 +Government,France,Amarilla,2750,350.00,962500.00,0.00,962500.00,715000.00,247500.00,2/1/2014,2,February,2014 +Channel Partners,United States of America,Amarilla,1953,12.00,23436.00,0.00,23436.00,5859.00,17577.00,4/1/2014,4,April,2014 +Enterprise,Germany,Amarilla,4219.5,125.00,527437.50,0.00,527437.50,506340.00,21097.50,4/1/2014,4,April,2014 +Government,France,Amarilla,1899,20.00,37980.00,0.00,37980.00,18990.00,18990.00,6/1/2014,6,June,2014 +Government,Germany,Amarilla,1686,7.00,11802.00,0.00,11802.00,8430.00,3372.00,7/1/2014,7,July,2014 +Channel Partners,United States of America,Amarilla,2141,12.00,25692.00,0.00,25692.00,6423.00,19269.00,8/1/2014,8,August,2014 +Government,United States of America,Amarilla,1143,7.00,8001.00,0.00,8001.00,5715.00,2286.00,10/1/2014,10,October,2014 +Midmarket,United States of America,Amarilla,615,15.00,9225.00,0.00,9225.00,6150.00,3075.00,12/1/2014,12,December,2014 +Government,France,Paseo,3945,7.00,27615.00,276.15,27338.85,19725.00,7613.85,1/1/2014,1,January,2014 +Midmarket,France,Paseo,2296,15.00,34440.00,344.40,34095.60,22960.00,11135.60,2/1/2014,2,February,2014 +Government,France,Paseo,1030,7.00,7210.00,72.10,7137.90,5150.00,1987.90,5/1/2014,5,May,2014 +Government,France,Velo,639,7.00,4473.00,44.73,4428.27,3195.00,1233.27,11/1/2014,11,November,2014 +Government,Canada,VTT,1326,7.00,9282.00,92.82,9189.18,6630.00,2559.18,3/1/2014,3,March,2014 +Channel Partners,United States of America,Carretera,1858,12.00,22296.00,222.96,22073.04,5574.00,16499.04,2/1/2014,2,February,2014 +Government,Mexico,Carretera,1210,350.00,423500.00,4235.00,419265.00,314600.00,104665.00,3/1/2014,3,March,2014 +Government,United States of America,Carretera,2529,7.00,17703.00,177.03,17525.97,12645.00,4880.97,7/1/2014,7,July,2014 +Channel Partners,Canada,Carretera,1445,12.00,17340.00,173.40,17166.60,4335.00,12831.60,9/1/2014,9,September,2014 +Enterprise,United States of America,Carretera,330,125.00,41250.00,412.50,40837.50,39600.00,1237.50,9/1/2013,9,September,2013 +Channel Partners,France,Carretera,2671,12.00,32052.00,320.52,31731.48,8013.00,23718.48,9/1/2014,9,September,2014 +Channel Partners,Germany,Carretera,766,12.00,9192.00,91.92,9100.08,2298.00,6802.08,10/1/2013,10,October,2013 +Small Business,Mexico,Carretera,494,300.00,148200.00,1482.00,146718.00,123500.00,23218.00,10/1/2013,10,October,2013 +Government,Mexico,Carretera,1397,350.00,488950.00,4889.50,484060.50,363220.00,120840.50,10/1/2014,10,October,2014 +Government,France,Carretera,2155,350.00,754250.00,7542.50,746707.50,560300.00,186407.50,12/1/2014,12,December,2014 +Midmarket,Mexico,Montana,2214,15.00,33210.00,332.10,32877.90,22140.00,10737.90,3/1/2014,3,March,2014 +Small Business,United States of America,Montana,2301,300.00,690300.00,6903.00,683397.00,575250.00,108147.00,4/1/2014,4,April,2014 +Government,France,Montana,1375.5,20.00,27510.00,275.10,27234.90,13755.00,13479.90,7/1/2014,7,July,2014 +Government,Canada,Montana,1830,7.00,12810.00,128.10,12681.90,9150.00,3531.90,8/1/2014,8,August,2014 +Small Business,United States of America,Montana,2498,300.00,749400.00,7494.00,741906.00,624500.00,117406.00,9/1/2013,9,September,2013 +Enterprise,United States of America,Montana,663,125.00,82875.00,828.75,82046.25,79560.00,2486.25,10/1/2013,10,October,2013 +Midmarket,United States of America,Paseo,1514,15.00,22710.00,227.10,22482.90,15140.00,7342.90,2/1/2014,2,February,2014 +Government,United States of America,Paseo,4492.5,7.00,31447.50,314.48,31133.03,22462.50,8670.53,4/1/2014,4,April,2014 +Enterprise,United States of America,Paseo,727,125.00,90875.00,908.75,89966.25,87240.00,2726.25,6/1/2014,6,June,2014 +Enterprise,France,Paseo,787,125.00,98375.00,983.75,97391.25,94440.00,2951.25,6/1/2014,6,June,2014 +Enterprise,Mexico,Paseo,1823,125.00,227875.00,2278.75,225596.25,218760.00,6836.25,7/1/2014,7,July,2014 +Midmarket,Germany,Paseo,747,15.00,11205.00,112.05,11092.95,7470.00,3622.95,9/1/2014,9,September,2014 +Channel Partners,Germany,Paseo,766,12.00,9192.00,91.92,9100.08,2298.00,6802.08,10/1/2013,10,October,2013 +Small Business,United States of America,Paseo,2905,300.00,871500.00,8715.00,862785.00,726250.00,136535.00,11/1/2014,11,November,2014 +Government,France,Paseo,2155,350.00,754250.00,7542.50,746707.50,560300.00,186407.50,12/1/2014,12,December,2014 +Government,France,Velo,3864,20.00,77280.00,772.80,76507.20,38640.00,37867.20,4/1/2014,4,April,2014 +Government,Mexico,Velo,362,7.00,2534.00,25.34,2508.66,1810.00,698.66,5/1/2014,5,May,2014 +Enterprise,Canada,Velo,923,125.00,115375.00,1153.75,114221.25,110760.00,3461.25,8/1/2014,8,August,2014 +Enterprise,United States of America,Velo,663,125.00,82875.00,828.75,82046.25,79560.00,2486.25,10/1/2013,10,October,2013 +Government,Canada,Velo,2092,7.00,14644.00,146.44,14497.56,10460.00,4037.56,11/1/2013,11,November,2013 +Government,Germany,VTT,263,7.00,1841.00,18.41,1822.59,1315.00,507.59,3/1/2014,3,March,2014 +Government,Canada,VTT,943.5,350.00,330225.00,3302.25,326922.75,245310.00,81612.75,4/1/2014,4,April,2014 +Enterprise,United States of America,VTT,727,125.00,90875.00,908.75,89966.25,87240.00,2726.25,6/1/2014,6,June,2014 +Enterprise,France,VTT,787,125.00,98375.00,983.75,97391.25,94440.00,2951.25,6/1/2014,6,June,2014 +Small Business,Germany,VTT,986,300.00,295800.00,2958.00,292842.00,246500.00,46342.00,9/1/2014,9,September,2014 +Small Business,Mexico,VTT,494,300.00,148200.00,1482.00,146718.00,123500.00,23218.00,10/1/2013,10,October,2013 +Government,Mexico,VTT,1397,350.00,488950.00,4889.50,484060.50,363220.00,120840.50,10/1/2014,10,October,2014 +Enterprise,France,VTT,1744,125.00,218000.00,2180.00,215820.00,209280.00,6540.00,11/1/2014,11,November,2014 +Channel Partners,United States of America,Amarilla,1989,12.00,23868.00,238.68,23629.32,5967.00,17662.32,9/1/2013,9,September,2013 +Midmarket,France,Amarilla,321,15.00,4815.00,48.15,4766.85,3210.00,1556.85,11/1/2013,11,November,2013 +Enterprise,Canada,Carretera,742.5,125.00,92812.50,1856.25,90956.25,89100.00,1856.25,4/1/2014,4,April,2014 +Channel Partners,Canada,Carretera,1295,12.00,15540.00,310.80,15229.20,3885.00,11344.20,10/1/2014,10,October,2014 +Small Business,Germany,Carretera,214,300.00,64200.00,1284.00,62916.00,53500.00,9416.00,10/1/2013,10,October,2013 +Government,France,Carretera,2145,7.00,15015.00,300.30,14714.70,10725.00,3989.70,11/1/2013,11,November,2013 +Government,Canada,Carretera,2852,350.00,998200.00,19964.00,978236.00,741520.00,236716.00,12/1/2014,12,December,2014 +Channel Partners,United States of America,Montana,1142,12.00,13704.00,274.08,13429.92,3426.00,10003.92,6/1/2014,6,June,2014 +Government,United States of America,Montana,1566,20.00,31320.00,626.40,30693.60,15660.00,15033.60,10/1/2014,10,October,2014 +Channel Partners,Mexico,Montana,690,12.00,8280.00,165.60,8114.40,2070.00,6044.40,11/1/2014,11,November,2014 +Enterprise,Mexico,Montana,1660,125.00,207500.00,4150.00,203350.00,199200.00,4150.00,11/1/2013,11,November,2013 +Midmarket,Canada,Paseo,2363,15.00,35445.00,708.90,34736.10,23630.00,11106.10,2/1/2014,2,February,2014 +Small Business,France,Paseo,918,300.00,275400.00,5508.00,269892.00,229500.00,40392.00,5/1/2014,5,May,2014 +Small Business,Germany,Paseo,1728,300.00,518400.00,10368.00,508032.00,432000.00,76032.00,5/1/2014,5,May,2014 +Channel Partners,United States of America,Paseo,1142,12.00,13704.00,274.08,13429.92,3426.00,10003.92,6/1/2014,6,June,2014 +Enterprise,Mexico,Paseo,662,125.00,82750.00,1655.00,81095.00,79440.00,1655.00,6/1/2014,6,June,2014 +Channel Partners,Canada,Paseo,1295,12.00,15540.00,310.80,15229.20,3885.00,11344.20,10/1/2014,10,October,2014 +Enterprise,Germany,Paseo,809,125.00,101125.00,2022.50,99102.50,97080.00,2022.50,10/1/2013,10,October,2013 +Enterprise,Mexico,Paseo,2145,125.00,268125.00,5362.50,262762.50,257400.00,5362.50,10/1/2013,10,October,2013 +Channel Partners,France,Paseo,1785,12.00,21420.00,428.40,20991.60,5355.00,15636.60,11/1/2013,11,November,2013 +Small Business,Canada,Paseo,1916,300.00,574800.00,11496.00,563304.00,479000.00,84304.00,12/1/2014,12,December,2014 +Government,Canada,Paseo,2852,350.00,998200.00,19964.00,978236.00,741520.00,236716.00,12/1/2014,12,December,2014 +Enterprise,Canada,Paseo,2729,125.00,341125.00,6822.50,334302.50,327480.00,6822.50,12/1/2014,12,December,2014 +Midmarket,United States of America,Paseo,1925,15.00,28875.00,577.50,28297.50,19250.00,9047.50,12/1/2013,12,December,2013 +Government,United States of America,Paseo,2013,7.00,14091.00,281.82,13809.18,10065.00,3744.18,12/1/2013,12,December,2013 +Channel Partners,France,Paseo,1055,12.00,12660.00,253.20,12406.80,3165.00,9241.80,12/1/2014,12,December,2014 +Channel Partners,Mexico,Paseo,1084,12.00,13008.00,260.16,12747.84,3252.00,9495.84,12/1/2014,12,December,2014 +Government,United States of America,Velo,1566,20.00,31320.00,626.40,30693.60,15660.00,15033.60,10/1/2014,10,October,2014 +Government,Germany,Velo,2966,350.00,1038100.00,20762.00,1017338.00,771160.00,246178.00,10/1/2013,10,October,2013 +Government,Germany,Velo,2877,350.00,1006950.00,20139.00,986811.00,748020.00,238791.00,10/1/2014,10,October,2014 +Enterprise,Germany,Velo,809,125.00,101125.00,2022.50,99102.50,97080.00,2022.50,10/1/2013,10,October,2013 +Enterprise,Mexico,Velo,2145,125.00,268125.00,5362.50,262762.50,257400.00,5362.50,10/1/2013,10,October,2013 +Channel Partners,France,Velo,1055,12.00,12660.00,253.20,12406.80,3165.00,9241.80,12/1/2014,12,December,2014 +Government,Mexico,Velo,544,20.00,10880.00,217.60,10662.40,5440.00,5222.40,12/1/2013,12,December,2013 +Channel Partners,Mexico,Velo,1084,12.00,13008.00,260.16,12747.84,3252.00,9495.84,12/1/2014,12,December,2014 +Enterprise,Mexico,VTT,662,125.00,82750.00,1655.00,81095.00,79440.00,1655.00,6/1/2014,6,June,2014 +Small Business,Germany,VTT,214,300.00,64200.00,1284.00,62916.00,53500.00,9416.00,10/1/2013,10,October,2013 +Government,Germany,VTT,2877,350.00,1006950.00,20139.00,986811.00,748020.00,238791.00,10/1/2014,10,October,2014 +Enterprise,Canada,VTT,2729,125.00,341125.00,6822.50,334302.50,327480.00,6822.50,12/1/2014,12,December,2014 +Government,United States of America,VTT,266,350.00,93100.00,1862.00,91238.00,69160.00,22078.00,12/1/2013,12,December,2013 +Government,Mexico,VTT,1940,350.00,679000.00,13580.00,665420.00,504400.00,161020.00,12/1/2013,12,December,2013 +Small Business,Germany,Amarilla,259,300.00,77700.00,1554.00,76146.00,64750.00,11396.00,3/1/2014,3,March,2014 +Small Business,Mexico,Amarilla,1101,300.00,330300.00,6606.00,323694.00,275250.00,48444.00,3/1/2014,3,March,2014 +Enterprise,Germany,Amarilla,2276,125.00,284500.00,5690.00,278810.00,273120.00,5690.00,5/1/2014,5,May,2014 +Government,Germany,Amarilla,2966,350.00,1038100.00,20762.00,1017338.00,771160.00,246178.00,10/1/2013,10,October,2013 +Government,United States of America,Amarilla,1236,20.00,24720.00,494.40,24225.60,12360.00,11865.60,11/1/2014,11,November,2014 +Government,France,Amarilla,941,20.00,18820.00,376.40,18443.60,9410.00,9033.60,11/1/2014,11,November,2014 +Small Business,Canada,Amarilla,1916,300.00,574800.00,11496.00,563304.00,479000.00,84304.00,12/1/2014,12,December,2014 +Enterprise,France,Carretera,4243.5,125.00,530437.50,15913.13,514524.38,509220.00,5304.38,4/1/2014,4,April,2014 +Government,Germany,Carretera,2580,20.00,51600.00,1548.00,50052.00,25800.00,24252.00,4/1/2014,4,April,2014 +Small Business,Germany,Carretera,689,300.00,206700.00,6201.00,200499.00,172250.00,28249.00,6/1/2014,6,June,2014 +Channel Partners,United States of America,Carretera,1947,12.00,23364.00,700.92,22663.08,5841.00,16822.08,9/1/2014,9,September,2014 +Channel Partners,Canada,Carretera,908,12.00,10896.00,326.88,10569.12,2724.00,7845.12,12/1/2013,12,December,2013 +Government,Germany,Montana,1958,7.00,13706.00,411.18,13294.82,9790.00,3504.82,2/1/2014,2,February,2014 +Channel Partners,France,Montana,1901,12.00,22812.00,684.36,22127.64,5703.00,16424.64,6/1/2014,6,June,2014 +Government,France,Montana,544,7.00,3808.00,114.24,3693.76,2720.00,973.76,9/1/2014,9,September,2014 +Government,Germany,Montana,1797,350.00,628950.00,18868.50,610081.50,467220.00,142861.50,9/1/2013,9,September,2013 +Enterprise,France,Montana,1287,125.00,160875.00,4826.25,156048.75,154440.00,1608.75,12/1/2014,12,December,2014 +Enterprise,Germany,Montana,1706,125.00,213250.00,6397.50,206852.50,204720.00,2132.50,12/1/2014,12,December,2014 +Small Business,France,Paseo,2434.5,300.00,730350.00,21910.50,708439.50,608625.00,99814.50,1/1/2014,1,January,2014 +Enterprise,Canada,Paseo,1774,125.00,221750.00,6652.50,215097.50,212880.00,2217.50,3/1/2014,3,March,2014 +Channel Partners,France,Paseo,1901,12.00,22812.00,684.36,22127.64,5703.00,16424.64,6/1/2014,6,June,2014 +Small Business,Germany,Paseo,689,300.00,206700.00,6201.00,200499.00,172250.00,28249.00,6/1/2014,6,June,2014 +Enterprise,Germany,Paseo,1570,125.00,196250.00,5887.50,190362.50,188400.00,1962.50,6/1/2014,6,June,2014 +Channel Partners,United States of America,Paseo,1369.5,12.00,16434.00,493.02,15940.98,4108.50,11832.48,7/1/2014,7,July,2014 +Enterprise,Canada,Paseo,2009,125.00,251125.00,7533.75,243591.25,241080.00,2511.25,10/1/2014,10,October,2014 +Midmarket,Germany,Paseo,1945,15.00,29175.00,875.25,28299.75,19450.00,8849.75,10/1/2013,10,October,2013 +Enterprise,France,Paseo,1287,125.00,160875.00,4826.25,156048.75,154440.00,1608.75,12/1/2014,12,December,2014 +Enterprise,Germany,Paseo,1706,125.00,213250.00,6397.50,206852.50,204720.00,2132.50,12/1/2014,12,December,2014 +Enterprise,Canada,Velo,2009,125.00,251125.00,7533.75,243591.25,241080.00,2511.25,10/1/2014,10,October,2014 +Small Business,United States of America,VTT,2844,300.00,853200.00,25596.00,827604.00,711000.00,116604.00,2/1/2014,2,February,2014 +Channel Partners,Mexico,VTT,1916,12.00,22992.00,689.76,22302.24,5748.00,16554.24,4/1/2014,4,April,2014 +Enterprise,Germany,VTT,1570,125.00,196250.00,5887.50,190362.50,188400.00,1962.50,6/1/2014,6,June,2014 +Small Business,Canada,VTT,1874,300.00,562200.00,16866.00,545334.00,468500.00,76834.00,8/1/2014,8,August,2014 +Government,Mexico,VTT,1642,350.00,574700.00,17241.00,557459.00,426920.00,130539.00,8/1/2014,8,August,2014 +Midmarket,Germany,VTT,1945,15.00,29175.00,875.25,28299.75,19450.00,8849.75,10/1/2013,10,October,2013 +Government,Canada,Carretera,831,20.00,16620.00,498.60,16121.40,8310.00,7811.40,5/1/2014,5,May,2014 +Government,Mexico,Paseo,1760,7.00,12320.00,369.60,11950.40,8800.00,3150.40,9/1/2013,9,September,2013 +Government,Canada,Velo,3850.5,20.00,77010.00,2310.30,74699.70,38505.00,36194.70,4/1/2014,4,April,2014 +Channel Partners,Germany,VTT,2479,12.00,29748.00,892.44,28855.56,7437.00,21418.56,1/1/2014,1,January,2014 +Midmarket,Mexico,Montana,2031,15.00,30465.00,1218.60,29246.40,20310.00,8936.40,10/1/2014,10,October,2014 +Midmarket,Mexico,Paseo,2031,15.00,30465.00,1218.60,29246.40,20310.00,8936.40,10/1/2014,10,October,2014 +Midmarket,France,Paseo,2261,15.00,33915.00,1356.60,32558.40,22610.00,9948.40,12/1/2013,12,December,2013 +Government,United States of America,Velo,736,20.00,14720.00,588.80,14131.20,7360.00,6771.20,9/1/2013,9,September,2013 +Government,Canada,Carretera,2851,7.00,19957.00,798.28,19158.72,14255.00,4903.72,10/1/2013,10,October,2013 +Small Business,Germany,Carretera,2021,300.00,606300.00,24252.00,582048.00,505250.00,76798.00,10/1/2014,10,October,2014 +Government,United States of America,Carretera,274,350.00,95900.00,3836.00,92064.00,71240.00,20824.00,12/1/2014,12,December,2014 +Midmarket,Canada,Montana,1967,15.00,29505.00,1180.20,28324.80,19670.00,8654.80,3/1/2014,3,March,2014 +Small Business,Germany,Montana,1859,300.00,557700.00,22308.00,535392.00,464750.00,70642.00,8/1/2014,8,August,2014 +Government,Canada,Montana,2851,7.00,19957.00,798.28,19158.72,14255.00,4903.72,10/1/2013,10,October,2013 +Small Business,Germany,Montana,2021,300.00,606300.00,24252.00,582048.00,505250.00,76798.00,10/1/2014,10,October,2014 +Enterprise,Mexico,Montana,1138,125.00,142250.00,5690.00,136560.00,136560.00,0.00,12/1/2014,12,December,2014 +Government,Canada,Paseo,4251,7.00,29757.00,1190.28,28566.72,21255.00,7311.72,1/1/2014,1,January,2014 +Enterprise,Germany,Paseo,795,125.00,99375.00,3975.00,95400.00,95400.00,0.00,3/1/2014,3,March,2014 +Small Business,Germany,Paseo,1414.5,300.00,424350.00,16974.00,407376.00,353625.00,53751.00,4/1/2014,4,April,2014 +Small Business,United States of America,Paseo,2918,300.00,875400.00,35016.00,840384.00,729500.00,110884.00,5/1/2014,5,May,2014 +Government,United States of America,Paseo,3450,350.00,1207500.00,48300.00,1159200.00,897000.00,262200.00,7/1/2014,7,July,2014 +Enterprise,France,Paseo,2988,125.00,373500.00,14940.00,358560.00,358560.00,0.00,7/1/2014,7,July,2014 +Midmarket,Canada,Paseo,218,15.00,3270.00,130.80,3139.20,2180.00,959.20,9/1/2014,9,September,2014 +Government,Canada,Paseo,2074,20.00,41480.00,1659.20,39820.80,20740.00,19080.80,9/1/2014,9,September,2014 +Government,United States of America,Paseo,1056,20.00,21120.00,844.80,20275.20,10560.00,9715.20,9/1/2014,9,September,2014 +Midmarket,United States of America,Paseo,671,15.00,10065.00,402.60,9662.40,6710.00,2952.40,10/1/2013,10,October,2013 +Midmarket,Mexico,Paseo,1514,15.00,22710.00,908.40,21801.60,15140.00,6661.60,10/1/2013,10,October,2013 +Government,United States of America,Paseo,274,350.00,95900.00,3836.00,92064.00,71240.00,20824.00,12/1/2014,12,December,2014 +Enterprise,Mexico,Paseo,1138,125.00,142250.00,5690.00,136560.00,136560.00,0.00,12/1/2014,12,December,2014 +Channel Partners,United States of America,Velo,1465,12.00,17580.00,703.20,16876.80,4395.00,12481.80,3/1/2014,3,March,2014 +Government,Canada,Velo,2646,20.00,52920.00,2116.80,50803.20,26460.00,24343.20,9/1/2013,9,September,2013 +Government,France,Velo,2177,350.00,761950.00,30478.00,731472.00,566020.00,165452.00,10/1/2014,10,October,2014 +Channel Partners,France,VTT,866,12.00,10392.00,415.68,9976.32,2598.00,7378.32,5/1/2014,5,May,2014 +Government,United States of America,VTT,349,350.00,122150.00,4886.00,117264.00,90740.00,26524.00,9/1/2013,9,September,2013 +Government,France,VTT,2177,350.00,761950.00,30478.00,731472.00,566020.00,165452.00,10/1/2014,10,October,2014 +Midmarket,Mexico,VTT,1514,15.00,22710.00,908.40,21801.60,15140.00,6661.60,10/1/2013,10,October,2013 +Government,Mexico,Amarilla,1865,350.00,652750.00,26110.00,626640.00,484900.00,141740.00,2/1/2014,2,February,2014 +Enterprise,Mexico,Amarilla,1074,125.00,134250.00,5370.00,128880.00,128880.00,0.00,4/1/2014,4,April,2014 +Government,Germany,Amarilla,1907,350.00,667450.00,26698.00,640752.00,495820.00,144932.00,9/1/2014,9,September,2014 +Midmarket,United States of America,Amarilla,671,15.00,10065.00,402.60,9662.40,6710.00,2952.40,10/1/2013,10,October,2013 +Government,Canada,Amarilla,1778,350.00,622300.00,24892.00,597408.00,462280.00,135128.00,12/1/2013,12,December,2013 +Government,Germany,Montana,1159,7.00,8113.00,405.65,7707.35,5795.00,1912.35,10/1/2013,10,October,2013 +Government,Germany,Paseo,1372,7.00,9604.00,480.20,9123.80,6860.00,2263.80,1/1/2014,1,January,2014 +Government,Canada,Paseo,2349,7.00,16443.00,822.15,15620.85,11745.00,3875.85,9/1/2013,9,September,2013 +Government,Mexico,Paseo,2689,7.00,18823.00,941.15,17881.85,13445.00,4436.85,10/1/2014,10,October,2014 +Channel Partners,Canada,Paseo,2431,12.00,29172.00,1458.60,27713.40,7293.00,20420.40,12/1/2014,12,December,2014 +Channel Partners,Canada,Velo,2431,12.00,29172.00,1458.60,27713.40,7293.00,20420.40,12/1/2014,12,December,2014 +Government,Mexico,VTT,2689,7.00,18823.00,941.15,17881.85,13445.00,4436.85,10/1/2014,10,October,2014 +Government,Mexico,Amarilla,1683,7.00,11781.00,589.05,11191.95,8415.00,2776.95,7/1/2014,7,July,2014 +Channel Partners,Mexico,Amarilla,1123,12.00,13476.00,673.80,12802.20,3369.00,9433.20,8/1/2014,8,August,2014 +Government,Germany,Amarilla,1159,7.00,8113.00,405.65,7707.35,5795.00,1912.35,10/1/2013,10,October,2013 +Channel Partners,France,Carretera,1865,12.00,22380.00,1119.00,21261.00,5595.00,15666.00,2/1/2014,2,February,2014 +Channel Partners,Germany,Carretera,1116,12.00,13392.00,669.60,12722.40,3348.00,9374.40,2/1/2014,2,February,2014 +Government,France,Carretera,1563,20.00,31260.00,1563.00,29697.00,15630.00,14067.00,5/1/2014,5,May,2014 +Small Business,United States of America,Carretera,991,300.00,297300.00,14865.00,282435.00,247750.00,34685.00,6/1/2014,6,June,2014 +Government,Germany,Carretera,1016,7.00,7112.00,355.60,6756.40,5080.00,1676.40,11/1/2013,11,November,2013 +Midmarket,Mexico,Carretera,2791,15.00,41865.00,2093.25,39771.75,27910.00,11861.75,11/1/2014,11,November,2014 +Government,United States of America,Carretera,570,7.00,3990.00,199.50,3790.50,2850.00,940.50,12/1/2014,12,December,2014 +Government,France,Carretera,2487,7.00,17409.00,870.45,16538.55,12435.00,4103.55,12/1/2014,12,December,2014 +Government,France,Montana,1384.5,350.00,484575.00,24228.75,460346.25,359970.00,100376.25,1/1/2014,1,January,2014 +Enterprise,United States of America,Montana,3627,125.00,453375.00,22668.75,430706.25,435240.00,-4533.75,7/1/2014,7,July,2014 +Government,Mexico,Montana,720,350.00,252000.00,12600.00,239400.00,187200.00,52200.00,9/1/2013,9,September,2013 +Channel Partners,Germany,Montana,2342,12.00,28104.00,1405.20,26698.80,7026.00,19672.80,11/1/2014,11,November,2014 +Small Business,Mexico,Montana,1100,300.00,330000.00,16500.00,313500.00,275000.00,38500.00,12/1/2013,12,December,2013 +Government,France,Paseo,1303,20.00,26060.00,1303.00,24757.00,13030.00,11727.00,2/1/2014,2,February,2014 +Enterprise,United States of America,Paseo,2992,125.00,374000.00,18700.00,355300.00,359040.00,-3740.00,3/1/2014,3,March,2014 +Enterprise,France,Paseo,2385,125.00,298125.00,14906.25,283218.75,286200.00,-2981.25,3/1/2014,3,March,2014 +Small Business,Mexico,Paseo,1607,300.00,482100.00,24105.00,457995.00,401750.00,56245.00,4/1/2014,4,April,2014 +Government,United States of America,Paseo,2327,7.00,16289.00,814.45,15474.55,11635.00,3839.55,5/1/2014,5,May,2014 +Small Business,United States of America,Paseo,991,300.00,297300.00,14865.00,282435.00,247750.00,34685.00,6/1/2014,6,June,2014 +Government,United States of America,Paseo,602,350.00,210700.00,10535.00,200165.00,156520.00,43645.00,6/1/2014,6,June,2014 +Midmarket,France,Paseo,2620,15.00,39300.00,1965.00,37335.00,26200.00,11135.00,9/1/2014,9,September,2014 +Government,Canada,Paseo,1228,350.00,429800.00,21490.00,408310.00,319280.00,89030.00,10/1/2013,10,October,2013 +Government,Canada,Paseo,1389,20.00,27780.00,1389.00,26391.00,13890.00,12501.00,10/1/2013,10,October,2013 +Enterprise,United States of America,Paseo,861,125.00,107625.00,5381.25,102243.75,103320.00,-1076.25,10/1/2014,10,October,2014 +Enterprise,France,Paseo,704,125.00,88000.00,4400.00,83600.00,84480.00,-880.00,10/1/2013,10,October,2013 +Government,Canada,Paseo,1802,20.00,36040.00,1802.00,34238.00,18020.00,16218.00,12/1/2013,12,December,2013 +Government,United States of America,Paseo,2663,20.00,53260.00,2663.00,50597.00,26630.00,23967.00,12/1/2014,12,December,2014 +Government,France,Paseo,2136,7.00,14952.00,747.60,14204.40,10680.00,3524.40,12/1/2013,12,December,2013 +Midmarket,Germany,Paseo,2116,15.00,31740.00,1587.00,30153.00,21160.00,8993.00,12/1/2013,12,December,2013 +Midmarket,United States of America,Velo,555,15.00,8325.00,416.25,7908.75,5550.00,2358.75,1/1/2014,1,January,2014 +Midmarket,Mexico,Velo,2861,15.00,42915.00,2145.75,40769.25,28610.00,12159.25,1/1/2014,1,January,2014 +Enterprise,Germany,Velo,807,125.00,100875.00,5043.75,95831.25,96840.00,-1008.75,2/1/2014,2,February,2014 +Government,United States of America,Velo,602,350.00,210700.00,10535.00,200165.00,156520.00,43645.00,6/1/2014,6,June,2014 +Government,United States of America,Velo,2832,20.00,56640.00,2832.00,53808.00,28320.00,25488.00,8/1/2014,8,August,2014 +Government,France,Velo,1579,20.00,31580.00,1579.00,30001.00,15790.00,14211.00,8/1/2014,8,August,2014 +Enterprise,United States of America,Velo,861,125.00,107625.00,5381.25,102243.75,103320.00,-1076.25,10/1/2014,10,October,2014 +Enterprise,France,Velo,704,125.00,88000.00,4400.00,83600.00,84480.00,-880.00,10/1/2013,10,October,2013 +Government,France,Velo,1033,20.00,20660.00,1033.00,19627.00,10330.00,9297.00,12/1/2013,12,December,2013 +Small Business,Germany,Velo,1250,300.00,375000.00,18750.00,356250.00,312500.00,43750.00,12/1/2014,12,December,2014 +Government,Canada,VTT,1389,20.00,27780.00,1389.00,26391.00,13890.00,12501.00,10/1/2013,10,October,2013 +Government,United States of America,VTT,1265,20.00,25300.00,1265.00,24035.00,12650.00,11385.00,11/1/2013,11,November,2013 +Government,Germany,VTT,2297,20.00,45940.00,2297.00,43643.00,22970.00,20673.00,11/1/2013,11,November,2013 +Government,United States of America,VTT,2663,20.00,53260.00,2663.00,50597.00,26630.00,23967.00,12/1/2014,12,December,2014 +Government,United States of America,VTT,570,7.00,3990.00,199.50,3790.50,2850.00,940.50,12/1/2014,12,December,2014 +Government,France,VTT,2487,7.00,17409.00,870.45,16538.55,12435.00,4103.55,12/1/2014,12,December,2014 +Government,Germany,Amarilla,1350,350.00,472500.00,23625.00,448875.00,351000.00,97875.00,2/1/2014,2,February,2014 +Government,Canada,Amarilla,552,350.00,193200.00,9660.00,183540.00,143520.00,40020.00,8/1/2014,8,August,2014 +Government,Canada,Amarilla,1228,350.00,429800.00,21490.00,408310.00,319280.00,89030.00,10/1/2013,10,October,2013 +Small Business,Germany,Amarilla,1250,300.00,375000.00,18750.00,356250.00,312500.00,43750.00,12/1/2014,12,December,2014 +Midmarket,France,Paseo,3801,15.00,57015.00,3420.90,53594.10,38010.00,15584.10,4/1/2014,4,April,2014 +Government,United States of America,Carretera,1117.5,20.00,22350.00,1341.00,21009.00,11175.00,9834.00,1/1/2014,1,January,2014 +Midmarket,Canada,Carretera,2844,15.00,42660.00,2559.60,40100.40,28440.00,11660.40,6/1/2014,6,June,2014 +Channel Partners,Mexico,Carretera,562,12.00,6744.00,404.64,6339.36,1686.00,4653.36,9/1/2014,9,September,2014 +Channel Partners,Canada,Carretera,2299,12.00,27588.00,1655.28,25932.72,6897.00,19035.72,10/1/2013,10,October,2013 +Midmarket,United States of America,Carretera,2030,15.00,30450.00,1827.00,28623.00,20300.00,8323.00,11/1/2014,11,November,2014 +Government,United States of America,Carretera,263,7.00,1841.00,110.46,1730.54,1315.00,415.54,11/1/2013,11,November,2013 +Enterprise,Germany,Carretera,887,125.00,110875.00,6652.50,104222.50,106440.00,-2217.50,12/1/2013,12,December,2013 +Government,Mexico,Montana,980,350.00,343000.00,20580.00,322420.00,254800.00,67620.00,4/1/2014,4,April,2014 +Government,Germany,Montana,1460,350.00,511000.00,30660.00,480340.00,379600.00,100740.00,5/1/2014,5,May,2014 +Government,France,Montana,1403,7.00,9821.00,589.26,9231.74,7015.00,2216.74,10/1/2013,10,October,2013 +Channel Partners,United States of America,Montana,2723,12.00,32676.00,1960.56,30715.44,8169.00,22546.44,11/1/2014,11,November,2014 +Government,France,Paseo,1496,350.00,523600.00,31416.00,492184.00,388960.00,103224.00,6/1/2014,6,June,2014 +Channel Partners,Canada,Paseo,2299,12.00,27588.00,1655.28,25932.72,6897.00,19035.72,10/1/2013,10,October,2013 +Government,United States of America,Paseo,727,350.00,254450.00,15267.00,239183.00,189020.00,50163.00,10/1/2013,10,October,2013 +Enterprise,Canada,Velo,952,125.00,119000.00,7140.00,111860.00,114240.00,-2380.00,2/1/2014,2,February,2014 +Enterprise,United States of America,Velo,2755,125.00,344375.00,20662.50,323712.50,330600.00,-6887.50,2/1/2014,2,February,2014 +Midmarket,Germany,Velo,1530,15.00,22950.00,1377.00,21573.00,15300.00,6273.00,5/1/2014,5,May,2014 +Government,France,Velo,1496,350.00,523600.00,31416.00,492184.00,388960.00,103224.00,6/1/2014,6,June,2014 +Government,Mexico,Velo,1498,7.00,10486.00,629.16,9856.84,7490.00,2366.84,6/1/2014,6,June,2014 +Small Business,France,Velo,1221,300.00,366300.00,21978.00,344322.00,305250.00,39072.00,10/1/2013,10,October,2013 +Government,France,Velo,2076,350.00,726600.00,43596.00,683004.00,539760.00,143244.00,10/1/2013,10,October,2013 +Midmarket,Canada,VTT,2844,15.00,42660.00,2559.60,40100.40,28440.00,11660.40,6/1/2014,6,June,2014 +Government,Mexico,VTT,1498,7.00,10486.00,629.16,9856.84,7490.00,2366.84,6/1/2014,6,June,2014 +Small Business,France,VTT,1221,300.00,366300.00,21978.00,344322.00,305250.00,39072.00,10/1/2013,10,October,2013 +Government,Mexico,VTT,1123,20.00,22460.00,1347.60,21112.40,11230.00,9882.40,11/1/2013,11,November,2013 +Small Business,Canada,VTT,2436,300.00,730800.00,43848.00,686952.00,609000.00,77952.00,12/1/2013,12,December,2013 +Enterprise,France,Amarilla,1987.5,125.00,248437.50,14906.25,233531.25,238500.00,-4968.75,1/1/2014,1,January,2014 +Government,Mexico,Amarilla,1679,350.00,587650.00,35259.00,552391.00,436540.00,115851.00,9/1/2014,9,September,2014 +Government,United States of America,Amarilla,727,350.00,254450.00,15267.00,239183.00,189020.00,50163.00,10/1/2013,10,October,2013 +Government,France,Amarilla,1403,7.00,9821.00,589.26,9231.74,7015.00,2216.74,10/1/2013,10,October,2013 +Government,France,Amarilla,2076,350.00,726600.00,43596.00,683004.00,539760.00,143244.00,10/1/2013,10,October,2013 +Government,France,Montana,1757,20.00,35140.00,2108.40,33031.60,17570.00,15461.60,10/1/2013,10,October,2013 +Midmarket,United States of America,Paseo,2198,15.00,32970.00,1978.20,30991.80,21980.00,9011.80,8/1/2014,8,August,2014 +Midmarket,Germany,Paseo,1743,15.00,26145.00,1568.70,24576.30,17430.00,7146.30,8/1/2014,8,August,2014 +Midmarket,United States of America,Paseo,1153,15.00,17295.00,1037.70,16257.30,11530.00,4727.30,10/1/2014,10,October,2014 +Government,France,Paseo,1757,20.00,35140.00,2108.40,33031.60,17570.00,15461.60,10/1/2013,10,October,2013 +Government,Germany,Velo,1001,20.00,20020.00,1201.20,18818.80,10010.00,8808.80,8/1/2014,8,August,2014 +Government,Mexico,Velo,1333,7.00,9331.00,559.86,8771.14,6665.00,2106.14,11/1/2014,11,November,2014 +Midmarket,United States of America,VTT,1153,15.00,17295.00,1037.70,16257.30,11530.00,4727.30,10/1/2014,10,October,2014 +Channel Partners,Mexico,Carretera,727,12.00,8724.00,610.68,8113.32,2181.00,5932.32,2/1/2014,2,February,2014 +Channel Partners,Canada,Carretera,1884,12.00,22608.00,1582.56,21025.44,5652.00,15373.44,8/1/2014,8,August,2014 +Government,Mexico,Carretera,1834,20.00,36680.00,2567.60,34112.40,18340.00,15772.40,9/1/2013,9,September,2013 +Channel Partners,Mexico,Montana,2340,12.00,28080.00,1965.60,26114.40,7020.00,19094.40,1/1/2014,1,January,2014 +Channel Partners,France,Montana,2342,12.00,28104.00,1967.28,26136.72,7026.00,19110.72,11/1/2014,11,November,2014 +Government,France,Paseo,1031,7.00,7217.00,505.19,6711.81,5155.00,1556.81,9/1/2013,9,September,2013 +Midmarket,Canada,Velo,1262,15.00,18930.00,1325.10,17604.90,12620.00,4984.90,5/1/2014,5,May,2014 +Government,Canada,Velo,1135,7.00,7945.00,556.15,7388.85,5675.00,1713.85,6/1/2014,6,June,2014 +Government,United States of America,Velo,547,7.00,3829.00,268.03,3560.97,2735.00,825.97,11/1/2014,11,November,2014 +Government,Canada,Velo,1582,7.00,11074.00,775.18,10298.82,7910.00,2388.82,12/1/2014,12,December,2014 +Channel Partners,France,VTT,1738.5,12.00,20862.00,1460.34,19401.66,5215.50,14186.16,4/1/2014,4,April,2014 +Channel Partners,Germany,VTT,2215,12.00,26580.00,1860.60,24719.40,6645.00,18074.40,9/1/2013,9,September,2013 +Government,Canada,VTT,1582,7.00,11074.00,775.18,10298.82,7910.00,2388.82,12/1/2014,12,December,2014 +Government,Canada,Amarilla,1135,7.00,7945.00,556.15,7388.85,5675.00,1713.85,6/1/2014,6,June,2014 +Government,United States of America,Carretera,1761,350.00,616350.00,43144.50,573205.50,457860.00,115345.50,3/1/2014,3,March,2014 +Small Business,France,Carretera,448,300.00,134400.00,9408.00,124992.00,112000.00,12992.00,6/1/2014,6,June,2014 +Small Business,France,Carretera,2181,300.00,654300.00,45801.00,608499.00,545250.00,63249.00,10/1/2014,10,October,2014 +Government,France,Montana,1976,20.00,39520.00,2766.40,36753.60,19760.00,16993.60,10/1/2014,10,October,2014 +Small Business,France,Montana,2181,300.00,654300.00,45801.00,608499.00,545250.00,63249.00,10/1/2014,10,October,2014 +Enterprise,Germany,Montana,2500,125.00,312500.00,21875.00,290625.00,300000.00,-9375.00,11/1/2013,11,November,2013 +Small Business,Canada,Paseo,1702,300.00,510600.00,35742.00,474858.00,425500.00,49358.00,5/1/2014,5,May,2014 +Small Business,France,Paseo,448,300.00,134400.00,9408.00,124992.00,112000.00,12992.00,6/1/2014,6,June,2014 +Enterprise,Germany,Paseo,3513,125.00,439125.00,30738.75,408386.25,421560.00,-13173.75,7/1/2014,7,July,2014 +Midmarket,France,Paseo,2101,15.00,31515.00,2206.05,29308.95,21010.00,8298.95,8/1/2014,8,August,2014 +Midmarket,United States of America,Paseo,2931,15.00,43965.00,3077.55,40887.45,29310.00,11577.45,9/1/2013,9,September,2013 +Government,France,Paseo,1535,20.00,30700.00,2149.00,28551.00,15350.00,13201.00,9/1/2014,9,September,2014 +Small Business,Germany,Paseo,1123,300.00,336900.00,23583.00,313317.00,280750.00,32567.00,9/1/2013,9,September,2013 +Small Business,Canada,Paseo,1404,300.00,421200.00,29484.00,391716.00,351000.00,40716.00,11/1/2013,11,November,2013 +Channel Partners,Mexico,Paseo,2763,12.00,33156.00,2320.92,30835.08,8289.00,22546.08,11/1/2013,11,November,2013 +Government,Germany,Paseo,2125,7.00,14875.00,1041.25,13833.75,10625.00,3208.75,12/1/2013,12,December,2013 +Small Business,France,Velo,1659,300.00,497700.00,34839.00,462861.00,414750.00,48111.00,7/1/2014,7,July,2014 +Government,Mexico,Velo,609,20.00,12180.00,852.60,11327.40,6090.00,5237.40,8/1/2014,8,August,2014 +Enterprise,Germany,Velo,2087,125.00,260875.00,18261.25,242613.75,250440.00,-7826.25,9/1/2014,9,September,2014 +Government,France,Velo,1976,20.00,39520.00,2766.40,36753.60,19760.00,16993.60,10/1/2014,10,October,2014 +Government,United States of America,Velo,1421,20.00,28420.00,1989.40,26430.60,14210.00,12220.60,12/1/2013,12,December,2013 +Small Business,United States of America,Velo,1372,300.00,411600.00,28812.00,382788.00,343000.00,39788.00,12/1/2014,12,December,2014 +Government,Germany,Velo,588,20.00,11760.00,823.20,10936.80,5880.00,5056.80,12/1/2013,12,December,2013 +Channel Partners,Canada,VTT,3244.5,12.00,38934.00,2725.38,36208.62,9733.50,26475.12,1/1/2014,1,January,2014 +Small Business,France,VTT,959,300.00,287700.00,20139.00,267561.00,239750.00,27811.00,2/1/2014,2,February,2014 +Small Business,Mexico,VTT,2747,300.00,824100.00,57687.00,766413.00,686750.00,79663.00,2/1/2014,2,February,2014 +Enterprise,Canada,Amarilla,1645,125.00,205625.00,14393.75,191231.25,197400.00,-6168.75,5/1/2014,5,May,2014 +Government,France,Amarilla,2876,350.00,1006600.00,70462.00,936138.00,747760.00,188378.00,9/1/2014,9,September,2014 +Enterprise,Germany,Amarilla,994,125.00,124250.00,8697.50,115552.50,119280.00,-3727.50,9/1/2013,9,September,2013 +Government,Canada,Amarilla,1118,20.00,22360.00,1565.20,20794.80,11180.00,9614.80,11/1/2014,11,November,2014 +Small Business,United States of America,Amarilla,1372,300.00,411600.00,28812.00,382788.00,343000.00,39788.00,12/1/2014,12,December,2014 +Government,Canada,Montana,488,7.00,3416.00,273.28,3142.72,2440.00,702.72,2/1/2014,2,February,2014 +Government,United States of America,Montana,1282,20.00,25640.00,2051.20,23588.80,12820.00,10768.80,6/1/2014,6,June,2014 +Government,Canada,Paseo,257,7.00,1799.00,143.92,1655.08,1285.00,370.08,5/1/2014,5,May,2014 +Government,United States of America,Amarilla,1282,20.00,25640.00,2051.20,23588.80,12820.00,10768.80,6/1/2014,6,June,2014 +Enterprise,Mexico,Carretera,1540,125.00,192500.00,15400.00,177100.00,184800.00,-7700.00,8/1/2014,8,August,2014 +Midmarket,France,Carretera,490,15.00,7350.00,588.00,6762.00,4900.00,1862.00,11/1/2014,11,November,2014 +Government,Mexico,Carretera,1362,350.00,476700.00,38136.00,438564.00,354120.00,84444.00,12/1/2014,12,December,2014 +Midmarket,France,Montana,2501,15.00,37515.00,3001.20,34513.80,25010.00,9503.80,3/1/2014,3,March,2014 +Government,Canada,Montana,708,20.00,14160.00,1132.80,13027.20,7080.00,5947.20,6/1/2014,6,June,2014 +Government,Germany,Montana,645,20.00,12900.00,1032.00,11868.00,6450.00,5418.00,7/1/2014,7,July,2014 +Small Business,France,Montana,1562,300.00,468600.00,37488.00,431112.00,390500.00,40612.00,8/1/2014,8,August,2014 +Small Business,Canada,Montana,1283,300.00,384900.00,30792.00,354108.00,320750.00,33358.00,9/1/2013,9,September,2013 +Midmarket,Germany,Montana,711,15.00,10665.00,853.20,9811.80,7110.00,2701.80,12/1/2014,12,December,2014 +Enterprise,Mexico,Paseo,1114,125.00,139250.00,11140.00,128110.00,133680.00,-5570.00,3/1/2014,3,March,2014 +Government,Germany,Paseo,1259,7.00,8813.00,705.04,8107.96,6295.00,1812.96,4/1/2014,4,April,2014 +Government,Germany,Paseo,1095,7.00,7665.00,613.20,7051.80,5475.00,1576.80,5/1/2014,5,May,2014 +Government,Germany,Paseo,1366,20.00,27320.00,2185.60,25134.40,13660.00,11474.40,6/1/2014,6,June,2014 +Small Business,Mexico,Paseo,2460,300.00,738000.00,59040.00,678960.00,615000.00,63960.00,6/1/2014,6,June,2014 +Government,United States of America,Paseo,678,7.00,4746.00,379.68,4366.32,3390.00,976.32,8/1/2014,8,August,2014 +Government,Germany,Paseo,1598,7.00,11186.00,894.88,10291.12,7990.00,2301.12,8/1/2014,8,August,2014 +Government,Germany,Paseo,2409,7.00,16863.00,1349.04,15513.96,12045.00,3468.96,9/1/2013,9,September,2013 +Government,Germany,Paseo,1934,20.00,38680.00,3094.40,35585.60,19340.00,16245.60,9/1/2014,9,September,2014 +Government,Mexico,Paseo,2993,20.00,59860.00,4788.80,55071.20,29930.00,25141.20,9/1/2014,9,September,2014 +Government,Germany,Paseo,2146,350.00,751100.00,60088.00,691012.00,557960.00,133052.00,11/1/2013,11,November,2013 +Government,Mexico,Paseo,1946,7.00,13622.00,1089.76,12532.24,9730.00,2802.24,12/1/2013,12,December,2013 +Government,Mexico,Paseo,1362,350.00,476700.00,38136.00,438564.00,354120.00,84444.00,12/1/2014,12,December,2014 +Channel Partners,Canada,Velo,598,12.00,7176.00,574.08,6601.92,1794.00,4807.92,3/1/2014,3,March,2014 +Government,United States of America,Velo,2907,7.00,20349.00,1627.92,18721.08,14535.00,4186.08,6/1/2014,6,June,2014 +Government,Germany,Velo,2338,7.00,16366.00,1309.28,15056.72,11690.00,3366.72,6/1/2014,6,June,2014 +Small Business,France,Velo,386,300.00,115800.00,9264.00,106536.00,96500.00,10036.00,11/1/2013,11,November,2013 +Small Business,Mexico,Velo,635,300.00,190500.00,15240.00,175260.00,158750.00,16510.00,12/1/2014,12,December,2014 +Government,France,VTT,574.5,350.00,201075.00,16086.00,184989.00,149370.00,35619.00,4/1/2014,4,April,2014 +Government,Germany,VTT,2338,7.00,16366.00,1309.28,15056.72,11690.00,3366.72,6/1/2014,6,June,2014 +Government,France,VTT,381,350.00,133350.00,10668.00,122682.00,99060.00,23622.00,8/1/2014,8,August,2014 +Government,Germany,VTT,422,350.00,147700.00,11816.00,135884.00,109720.00,26164.00,8/1/2014,8,August,2014 +Small Business,Canada,VTT,2134,300.00,640200.00,51216.00,588984.00,533500.00,55484.00,9/1/2014,9,September,2014 +Small Business,United States of America,VTT,808,300.00,242400.00,19392.00,223008.00,202000.00,21008.00,12/1/2013,12,December,2013 +Government,Canada,Amarilla,708,20.00,14160.00,1132.80,13027.20,7080.00,5947.20,6/1/2014,6,June,2014 +Government,United States of America,Amarilla,2907,7.00,20349.00,1627.92,18721.08,14535.00,4186.08,6/1/2014,6,June,2014 +Government,Germany,Amarilla,1366,20.00,27320.00,2185.60,25134.40,13660.00,11474.40,6/1/2014,6,June,2014 +Small Business,Mexico,Amarilla,2460,300.00,738000.00,59040.00,678960.00,615000.00,63960.00,6/1/2014,6,June,2014 +Government,Germany,Amarilla,1520,20.00,30400.00,2432.00,27968.00,15200.00,12768.00,11/1/2014,11,November,2014 +Midmarket,Germany,Amarilla,711,15.00,10665.00,853.20,9811.80,7110.00,2701.80,12/1/2014,12,December,2014 +Channel Partners,Mexico,Amarilla,1375,12.00,16500.00,1320.00,15180.00,4125.00,11055.00,12/1/2013,12,December,2013 +Small Business,Mexico,Amarilla,635,300.00,190500.00,15240.00,175260.00,158750.00,16510.00,12/1/2014,12,December,2014 +Government,United States of America,VTT,436.5,20.00,8730.00,698.40,8031.60,4365.00,3666.60,7/1/2014,7,July,2014 +Small Business,Canada,Carretera,1094,300.00,328200.00,29538.00,298662.00,273500.00,25162.00,6/1/2014,6,June,2014 +Channel Partners,Mexico,Carretera,367,12.00,4404.00,396.36,4007.64,1101.00,2906.64,10/1/2013,10,October,2013 +Small Business,Canada,Montana,3802.5,300.00,1140750.00,102667.50,1038082.50,950625.00,87457.50,4/1/2014,4,April,2014 +Government,France,Montana,1666,350.00,583100.00,52479.00,530621.00,433160.00,97461.00,5/1/2014,5,May,2014 +Small Business,France,Montana,322,300.00,96600.00,8694.00,87906.00,80500.00,7406.00,9/1/2013,9,September,2013 +Channel Partners,Canada,Montana,2321,12.00,27852.00,2506.68,25345.32,6963.00,18382.32,11/1/2014,11,November,2014 +Enterprise,France,Montana,1857,125.00,232125.00,20891.25,211233.75,222840.00,-11606.25,11/1/2013,11,November,2013 +Government,Canada,Montana,1611,7.00,11277.00,1014.93,10262.07,8055.00,2207.07,12/1/2013,12,December,2013 +Enterprise,United States of America,Montana,2797,125.00,349625.00,31466.25,318158.75,335640.00,-17481.25,12/1/2014,12,December,2014 +Small Business,Germany,Montana,334,300.00,100200.00,9018.00,91182.00,83500.00,7682.00,12/1/2013,12,December,2013 +Small Business,Mexico,Paseo,2565,300.00,769500.00,69255.00,700245.00,641250.00,58995.00,1/1/2014,1,January,2014 +Government,Mexico,Paseo,2417,350.00,845950.00,76135.50,769814.50,628420.00,141394.50,1/1/2014,1,January,2014 +Midmarket,United States of America,Paseo,3675,15.00,55125.00,4961.25,50163.75,36750.00,13413.75,4/1/2014,4,April,2014 +Small Business,Canada,Paseo,1094,300.00,328200.00,29538.00,298662.00,273500.00,25162.00,6/1/2014,6,June,2014 +Midmarket,France,Paseo,1227,15.00,18405.00,1656.45,16748.55,12270.00,4478.55,10/1/2014,10,October,2014 +Channel Partners,Mexico,Paseo,367,12.00,4404.00,396.36,4007.64,1101.00,2906.64,10/1/2013,10,October,2013 +Small Business,France,Paseo,1324,300.00,397200.00,35748.00,361452.00,331000.00,30452.00,11/1/2014,11,November,2014 +Channel Partners,Germany,Paseo,1775,12.00,21300.00,1917.00,19383.00,5325.00,14058.00,11/1/2013,11,November,2013 +Enterprise,United States of America,Paseo,2797,125.00,349625.00,31466.25,318158.75,335640.00,-17481.25,12/1/2014,12,December,2014 +Midmarket,Mexico,Velo,245,15.00,3675.00,330.75,3344.25,2450.00,894.25,5/1/2014,5,May,2014 +Small Business,Canada,Velo,3793.5,300.00,1138050.00,102424.50,1035625.50,948375.00,87250.50,7/1/2014,7,July,2014 +Government,Germany,Velo,1307,350.00,457450.00,41170.50,416279.50,339820.00,76459.50,7/1/2014,7,July,2014 +Enterprise,Canada,Velo,567,125.00,70875.00,6378.75,64496.25,68040.00,-3543.75,9/1/2014,9,September,2014 +Enterprise,Mexico,Velo,2110,125.00,263750.00,23737.50,240012.50,253200.00,-13187.50,9/1/2014,9,September,2014 +Government,Canada,Velo,1269,350.00,444150.00,39973.50,404176.50,329940.00,74236.50,10/1/2014,10,October,2014 +Channel Partners,United States of America,VTT,1956,12.00,23472.00,2112.48,21359.52,5868.00,15491.52,1/1/2014,1,January,2014 +Small Business,Germany,VTT,2659,300.00,797700.00,71793.00,725907.00,664750.00,61157.00,2/1/2014,2,February,2014 +Government,United States of America,VTT,1351.5,350.00,473025.00,42572.25,430452.75,351390.00,79062.75,4/1/2014,4,April,2014 +Channel Partners,Germany,VTT,880,12.00,10560.00,950.40,9609.60,2640.00,6969.60,5/1/2014,5,May,2014 +Small Business,United States of America,VTT,1867,300.00,560100.00,50409.00,509691.00,466750.00,42941.00,9/1/2014,9,September,2014 +Channel Partners,France,VTT,2234,12.00,26808.00,2412.72,24395.28,6702.00,17693.28,9/1/2013,9,September,2013 +Midmarket,France,VTT,1227,15.00,18405.00,1656.45,16748.55,12270.00,4478.55,10/1/2014,10,October,2014 +Enterprise,Mexico,VTT,877,125.00,109625.00,9866.25,99758.75,105240.00,-5481.25,11/1/2014,11,November,2014 +Government,United States of America,Amarilla,2071,350.00,724850.00,65236.50,659613.50,538460.00,121153.50,9/1/2014,9,September,2014 +Government,Canada,Amarilla,1269,350.00,444150.00,39973.50,404176.50,329940.00,74236.50,10/1/2014,10,October,2014 +Midmarket,Germany,Amarilla,970,15.00,14550.00,1309.50,13240.50,9700.00,3540.50,11/1/2013,11,November,2013 +Government,Mexico,Amarilla,1694,20.00,33880.00,3049.20,30830.80,16940.00,13890.80,11/1/2014,11,November,2014 +Government,Germany,Carretera,663,20.00,13260.00,1193.40,12066.60,6630.00,5436.60,5/1/2014,5,May,2014 +Government,Canada,Carretera,819,7.00,5733.00,515.97,5217.03,4095.00,1122.03,7/1/2014,7,July,2014 +Channel Partners,Germany,Carretera,1580,12.00,18960.00,1706.40,17253.60,4740.00,12513.60,9/1/2014,9,September,2014 +Government,Mexico,Carretera,521,7.00,3647.00,328.23,3318.77,2605.00,713.77,12/1/2014,12,December,2014 +Government,United States of America,Paseo,973,20.00,19460.00,1751.40,17708.60,9730.00,7978.60,3/1/2014,3,March,2014 +Government,Mexico,Paseo,1038,20.00,20760.00,1868.40,18891.60,10380.00,8511.60,6/1/2014,6,June,2014 +Government,Germany,Paseo,360,7.00,2520.00,226.80,2293.20,1800.00,493.20,10/1/2014,10,October,2014 +Channel Partners,France,Velo,1967,12.00,23604.00,2124.36,21479.64,5901.00,15578.64,3/1/2014,3,March,2014 +Midmarket,Mexico,Velo,2628,15.00,39420.00,3547.80,35872.20,26280.00,9592.20,4/1/2014,4,April,2014 +Government,Germany,VTT,360,7.00,2520.00,226.80,2293.20,1800.00,493.20,10/1/2014,10,October,2014 +Government,France,VTT,2682,20.00,53640.00,4827.60,48812.40,26820.00,21992.40,11/1/2013,11,November,2013 +Government,Mexico,VTT,521,7.00,3647.00,328.23,3318.77,2605.00,713.77,12/1/2014,12,December,2014 +Government,Mexico,Amarilla,1038,20.00,20760.00,1868.40,18891.60,10380.00,8511.60,6/1/2014,6,June,2014 +Midmarket,Canada,Amarilla,1630.5,15.00,24457.50,2201.18,22256.33,16305.00,5951.33,7/1/2014,7,July,2014 +Channel Partners,France,Amarilla,306,12.00,3672.00,330.48,3341.52,918.00,2423.52,12/1/2013,12,December,2013 +Channel Partners,United States of America,Carretera,386,12.00,4632.00,463.20,4168.80,1158.00,3010.80,10/1/2013,10,October,2013 +Government,United States of America,Montana,2328,7.00,16296.00,1629.60,14666.40,11640.00,3026.40,9/1/2014,9,September,2014 +Channel Partners,United States of America,Paseo,386,12.00,4632.00,463.20,4168.80,1158.00,3010.80,10/1/2013,10,October,2013 +Enterprise,United States of America,Carretera,3445.5,125.00,430687.50,43068.75,387618.75,413460.00,-25841.25,4/1/2014,4,April,2014 +Enterprise,France,Carretera,1482,125.00,185250.00,18525.00,166725.00,177840.00,-11115.00,12/1/2013,12,December,2013 +Government,United States of America,Montana,2313,350.00,809550.00,80955.00,728595.00,601380.00,127215.00,5/1/2014,5,May,2014 +Enterprise,United States of America,Montana,1804,125.00,225500.00,22550.00,202950.00,216480.00,-13530.00,11/1/2013,11,November,2013 +Midmarket,France,Montana,2072,15.00,31080.00,3108.00,27972.00,20720.00,7252.00,12/1/2014,12,December,2014 +Government,France,Paseo,1954,20.00,39080.00,3908.00,35172.00,19540.00,15632.00,3/1/2014,3,March,2014 +Small Business,Mexico,Paseo,591,300.00,177300.00,17730.00,159570.00,147750.00,11820.00,5/1/2014,5,May,2014 +Midmarket,France,Paseo,2167,15.00,32505.00,3250.50,29254.50,21670.00,7584.50,10/1/2013,10,October,2013 +Government,Germany,Paseo,241,20.00,4820.00,482.00,4338.00,2410.00,1928.00,10/1/2014,10,October,2014 +Midmarket,Germany,Velo,681,15.00,10215.00,1021.50,9193.50,6810.00,2383.50,1/1/2014,1,January,2014 +Midmarket,Germany,Velo,510,15.00,7650.00,765.00,6885.00,5100.00,1785.00,4/1/2014,4,April,2014 +Midmarket,United States of America,Velo,790,15.00,11850.00,1185.00,10665.00,7900.00,2765.00,5/1/2014,5,May,2014 +Government,France,Velo,639,350.00,223650.00,22365.00,201285.00,166140.00,35145.00,7/1/2014,7,July,2014 +Enterprise,United States of America,Velo,1596,125.00,199500.00,19950.00,179550.00,191520.00,-11970.00,9/1/2014,9,September,2014 +Small Business,United States of America,Velo,2294,300.00,688200.00,68820.00,619380.00,573500.00,45880.00,10/1/2013,10,October,2013 +Government,Germany,Velo,241,20.00,4820.00,482.00,4338.00,2410.00,1928.00,10/1/2014,10,October,2014 +Government,Germany,Velo,2665,7.00,18655.00,1865.50,16789.50,13325.00,3464.50,11/1/2014,11,November,2014 +Enterprise,Canada,Velo,1916,125.00,239500.00,23950.00,215550.00,229920.00,-14370.00,12/1/2013,12,December,2013 +Small Business,France,Velo,853,300.00,255900.00,25590.00,230310.00,213250.00,17060.00,12/1/2014,12,December,2014 +Enterprise,Mexico,VTT,341,125.00,42625.00,4262.50,38362.50,40920.00,-2557.50,5/1/2014,5,May,2014 +Midmarket,Mexico,VTT,641,15.00,9615.00,961.50,8653.50,6410.00,2243.50,7/1/2014,7,July,2014 +Government,United States of America,VTT,2807,350.00,982450.00,98245.00,884205.00,729820.00,154385.00,8/1/2014,8,August,2014 +Small Business,Mexico,VTT,432,300.00,129600.00,12960.00,116640.00,108000.00,8640.00,9/1/2014,9,September,2014 +Small Business,United States of America,VTT,2294,300.00,688200.00,68820.00,619380.00,573500.00,45880.00,10/1/2013,10,October,2013 +Midmarket,France,VTT,2167,15.00,32505.00,3250.50,29254.50,21670.00,7584.50,10/1/2013,10,October,2013 +Enterprise,Canada,VTT,2529,125.00,316125.00,31612.50,284512.50,303480.00,-18967.50,11/1/2014,11,November,2014 +Government,Germany,VTT,1870,350.00,654500.00,65450.00,589050.00,486200.00,102850.00,12/1/2013,12,December,2013 +Enterprise,United States of America,Amarilla,579,125.00,72375.00,7237.50,65137.50,69480.00,-4342.50,1/1/2014,1,January,2014 +Government,Canada,Amarilla,2240,350.00,784000.00,78400.00,705600.00,582400.00,123200.00,2/1/2014,2,February,2014 +Small Business,United States of America,Amarilla,2993,300.00,897900.00,89790.00,808110.00,748250.00,59860.00,3/1/2014,3,March,2014 +Channel Partners,Canada,Amarilla,3520.5,12.00,42246.00,4224.60,38021.40,10561.50,27459.90,4/1/2014,4,April,2014 +Government,Mexico,Amarilla,2039,20.00,40780.00,4078.00,36702.00,20390.00,16312.00,5/1/2014,5,May,2014 +Channel Partners,Germany,Amarilla,2574,12.00,30888.00,3088.80,27799.20,7722.00,20077.20,8/1/2014,8,August,2014 +Government,Canada,Amarilla,707,350.00,247450.00,24745.00,222705.00,183820.00,38885.00,9/1/2014,9,September,2014 +Midmarket,France,Amarilla,2072,15.00,31080.00,3108.00,27972.00,20720.00,7252.00,12/1/2014,12,December,2014 +Small Business,France,Amarilla,853,300.00,255900.00,25590.00,230310.00,213250.00,17060.00,12/1/2014,12,December,2014 +Channel Partners,France,Carretera,1198,12.00,14376.00,1581.36,12794.64,3594.00,9200.64,10/1/2013,10,October,2013 +Government,France,Paseo,2532,7.00,17724.00,1949.64,15774.36,12660.00,3114.36,4/1/2014,4,April,2014 +Channel Partners,France,Paseo,1198,12.00,14376.00,1581.36,12794.64,3594.00,9200.64,10/1/2013,10,October,2013 +Midmarket,Canada,Velo,384,15.00,5760.00,633.60,5126.40,3840.00,1286.40,1/1/2014,1,January,2014 +Channel Partners,Germany,Velo,472,12.00,5664.00,623.04,5040.96,1416.00,3624.96,10/1/2014,10,October,2014 +Government,United States of America,VTT,1579,7.00,11053.00,1215.83,9837.17,7895.00,1942.17,3/1/2014,3,March,2014 +Channel Partners,Mexico,VTT,1005,12.00,12060.00,1326.60,10733.40,3015.00,7718.40,9/1/2013,9,September,2013 +Midmarket,United States of America,Amarilla,3199.5,15.00,47992.50,5279.18,42713.33,31995.00,10718.33,7/1/2014,7,July,2014 +Channel Partners,Germany,Amarilla,472,12.00,5664.00,623.04,5040.96,1416.00,3624.96,10/1/2014,10,October,2014 +Channel Partners,Canada,Carretera,1937,12.00,23244.00,2556.84,20687.16,5811.00,14876.16,2/1/2014,2,February,2014 +Government,Germany,Carretera,792,350.00,277200.00,30492.00,246708.00,205920.00,40788.00,3/1/2014,3,March,2014 +Small Business,Germany,Carretera,2811,300.00,843300.00,92763.00,750537.00,702750.00,47787.00,7/1/2014,7,July,2014 +Enterprise,France,Carretera,2441,125.00,305125.00,33563.75,271561.25,292920.00,-21358.75,10/1/2014,10,October,2014 +Midmarket,Canada,Carretera,1560,15.00,23400.00,2574.00,20826.00,15600.00,5226.00,11/1/2013,11,November,2013 +Government,Mexico,Carretera,2706,7.00,18942.00,2083.62,16858.38,13530.00,3328.38,11/1/2013,11,November,2013 +Government,Germany,Montana,766,350.00,268100.00,29491.00,238609.00,199160.00,39449.00,1/1/2014,1,January,2014 +Government,Germany,Montana,2992,20.00,59840.00,6582.40,53257.60,29920.00,23337.60,10/1/2013,10,October,2013 +Midmarket,Mexico,Montana,2157,15.00,32355.00,3559.05,28795.95,21570.00,7225.95,12/1/2014,12,December,2014 +Small Business,Canada,Paseo,873,300.00,261900.00,28809.00,233091.00,218250.00,14841.00,1/1/2014,1,January,2014 +Government,Mexico,Paseo,1122,20.00,22440.00,2468.40,19971.60,11220.00,8751.60,3/1/2014,3,March,2014 +Government,Canada,Paseo,2104.5,350.00,736575.00,81023.25,655551.75,547170.00,108381.75,7/1/2014,7,July,2014 +Channel Partners,Canada,Paseo,4026,12.00,48312.00,5314.32,42997.68,12078.00,30919.68,7/1/2014,7,July,2014 +Channel Partners,France,Paseo,2425.5,12.00,29106.00,3201.66,25904.34,7276.50,18627.84,7/1/2014,7,July,2014 +Government,Canada,Paseo,2394,20.00,47880.00,5266.80,42613.20,23940.00,18673.20,8/1/2014,8,August,2014 +Midmarket,Mexico,Paseo,1984,15.00,29760.00,3273.60,26486.40,19840.00,6646.40,8/1/2014,8,August,2014 +Enterprise,France,Paseo,2441,125.00,305125.00,33563.75,271561.25,292920.00,-21358.75,10/1/2014,10,October,2014 +Government,Germany,Paseo,2992,20.00,59840.00,6582.40,53257.60,29920.00,23337.60,10/1/2013,10,October,2013 +Small Business,Canada,Paseo,1366,300.00,409800.00,45078.00,364722.00,341500.00,23222.00,11/1/2014,11,November,2014 +Government,France,Velo,2805,20.00,56100.00,6171.00,49929.00,28050.00,21879.00,9/1/2013,9,September,2013 +Midmarket,Mexico,Velo,655,15.00,9825.00,1080.75,8744.25,6550.00,2194.25,9/1/2013,9,September,2013 +Government,Mexico,Velo,344,350.00,120400.00,13244.00,107156.00,89440.00,17716.00,10/1/2013,10,October,2013 +Government,Canada,Velo,1808,7.00,12656.00,1392.16,11263.84,9040.00,2223.84,11/1/2014,11,November,2014 +Channel Partners,France,VTT,1734,12.00,20808.00,2288.88,18519.12,5202.00,13317.12,1/1/2014,1,January,2014 +Enterprise,Mexico,VTT,554,125.00,69250.00,7617.50,61632.50,66480.00,-4847.50,1/1/2014,1,January,2014 +Government,Canada,VTT,2935,20.00,58700.00,6457.00,52243.00,29350.00,22893.00,11/1/2013,11,November,2013 +Enterprise,Germany,Amarilla,3165,125.00,395625.00,43518.75,352106.25,379800.00,-27693.75,1/1/2014,1,January,2014 +Government,Mexico,Amarilla,2629,20.00,52580.00,5783.80,46796.20,26290.00,20506.20,1/1/2014,1,January,2014 +Enterprise,France,Amarilla,1433,125.00,179125.00,19703.75,159421.25,171960.00,-12538.75,5/1/2014,5,May,2014 +Enterprise,Mexico,Amarilla,947,125.00,118375.00,13021.25,105353.75,113640.00,-8286.25,9/1/2013,9,September,2013 +Government,Mexico,Amarilla,344,350.00,120400.00,13244.00,107156.00,89440.00,17716.00,10/1/2013,10,October,2013 +Midmarket,Mexico,Amarilla,2157,15.00,32355.00,3559.05,28795.95,21570.00,7225.95,12/1/2014,12,December,2014 +Government,United States of America,Paseo,380,7.00,2660.00,292.60,2367.40,1900.00,467.40,9/1/2013,9,September,2013 +Government,Mexico,Carretera,886,350.00,310100.00,37212.00,272888.00,230360.00,42528.00,6/1/2014,6,June,2014 +Enterprise,Canada,Carretera,2416,125.00,302000.00,36240.00,265760.00,289920.00,-24160.00,9/1/2013,9,September,2013 +Enterprise,Mexico,Carretera,2156,125.00,269500.00,32340.00,237160.00,258720.00,-21560.00,10/1/2014,10,October,2014 +Midmarket,Canada,Carretera,2689,15.00,40335.00,4840.20,35494.80,26890.00,8604.80,11/1/2014,11,November,2014 +Midmarket,United States of America,Montana,677,15.00,10155.00,1218.60,8936.40,6770.00,2166.40,3/1/2014,3,March,2014 +Small Business,France,Montana,1773,300.00,531900.00,63828.00,468072.00,443250.00,24822.00,4/1/2014,4,April,2014 +Government,Mexico,Montana,2420,7.00,16940.00,2032.80,14907.20,12100.00,2807.20,9/1/2014,9,September,2014 +Government,Canada,Montana,2734,7.00,19138.00,2296.56,16841.44,13670.00,3171.44,10/1/2014,10,October,2014 +Government,Mexico,Montana,1715,20.00,34300.00,4116.00,30184.00,17150.00,13034.00,10/1/2013,10,October,2013 +Small Business,France,Montana,1186,300.00,355800.00,42696.00,313104.00,296500.00,16604.00,12/1/2013,12,December,2013 +Small Business,United States of America,Paseo,3495,300.00,1048500.00,125820.00,922680.00,873750.00,48930.00,1/1/2014,1,January,2014 +Government,Mexico,Paseo,886,350.00,310100.00,37212.00,272888.00,230360.00,42528.00,6/1/2014,6,June,2014 +Enterprise,Mexico,Paseo,2156,125.00,269500.00,32340.00,237160.00,258720.00,-21560.00,10/1/2014,10,October,2014 +Government,Mexico,Paseo,905,20.00,18100.00,2172.00,15928.00,9050.00,6878.00,10/1/2014,10,October,2014 +Government,Mexico,Paseo,1715,20.00,34300.00,4116.00,30184.00,17150.00,13034.00,10/1/2013,10,October,2013 +Government,France,Paseo,1594,350.00,557900.00,66948.00,490952.00,414440.00,76512.00,11/1/2014,11,November,2014 +Small Business,Germany,Paseo,1359,300.00,407700.00,48924.00,358776.00,339750.00,19026.00,11/1/2014,11,November,2014 +Small Business,Mexico,Paseo,2150,300.00,645000.00,77400.00,567600.00,537500.00,30100.00,11/1/2014,11,November,2014 +Government,Mexico,Paseo,1197,350.00,418950.00,50274.00,368676.00,311220.00,57456.00,11/1/2014,11,November,2014 +Midmarket,Mexico,Paseo,380,15.00,5700.00,684.00,5016.00,3800.00,1216.00,12/1/2013,12,December,2013 +Government,Mexico,Paseo,1233,20.00,24660.00,2959.20,21700.80,12330.00,9370.80,12/1/2014,12,December,2014 +Government,Mexico,Velo,1395,350.00,488250.00,58590.00,429660.00,362700.00,66960.00,7/1/2014,7,July,2014 +Government,United States of America,Velo,986,350.00,345100.00,41412.00,303688.00,256360.00,47328.00,10/1/2014,10,October,2014 +Government,Mexico,Velo,905,20.00,18100.00,2172.00,15928.00,9050.00,6878.00,10/1/2014,10,October,2014 +Channel Partners,Canada,VTT,2109,12.00,25308.00,3036.96,22271.04,6327.00,15944.04,5/1/2014,5,May,2014 +Midmarket,France,VTT,3874.5,15.00,58117.50,6974.10,51143.40,38745.00,12398.40,7/1/2014,7,July,2014 +Government,Canada,VTT,623,350.00,218050.00,26166.00,191884.00,161980.00,29904.00,9/1/2013,9,September,2013 +Government,United States of America,VTT,986,350.00,345100.00,41412.00,303688.00,256360.00,47328.00,10/1/2014,10,October,2014 +Enterprise,United States of America,VTT,2387,125.00,298375.00,35805.00,262570.00,286440.00,-23870.00,11/1/2014,11,November,2014 +Government,Mexico,VTT,1233,20.00,24660.00,2959.20,21700.80,12330.00,9370.80,12/1/2014,12,December,2014 +Government,United States of America,Amarilla,270,350.00,94500.00,11340.00,83160.00,70200.00,12960.00,2/1/2014,2,February,2014 +Government,France,Amarilla,3421.5,7.00,23950.50,2874.06,21076.44,17107.50,3968.94,7/1/2014,7,July,2014 +Government,Canada,Amarilla,2734,7.00,19138.00,2296.56,16841.44,13670.00,3171.44,10/1/2014,10,October,2014 +Midmarket,United States of America,Amarilla,2548,15.00,38220.00,4586.40,33633.60,25480.00,8153.60,11/1/2013,11,November,2013 +Government,France,Carretera,2521.5,20.00,50430.00,6051.60,44378.40,25215.00,19163.40,1/1/2014,1,January,2014 +Channel Partners,Mexico,Montana,2661,12.00,31932.00,3831.84,28100.16,7983.00,20117.16,5/1/2014,5,May,2014 +Government,Germany,Paseo,1531,20.00,30620.00,3674.40,26945.60,15310.00,11635.60,12/1/2014,12,December,2014 +Government,France,VTT,1491,7.00,10437.00,1252.44,9184.56,7455.00,1729.56,3/1/2014,3,March,2014 +Government,Germany,VTT,1531,20.00,30620.00,3674.40,26945.60,15310.00,11635.60,12/1/2014,12,December,2014 +Channel Partners,Canada,Amarilla,2761,12.00,33132.00,3975.84,29156.16,8283.00,20873.16,9/1/2013,9,September,2013 +Midmarket,United States of America,Carretera,2567,15.00,38505.00,5005.65,33499.35,25670.00,7829.35,6/1/2014,6,June,2014 +Midmarket,United States of America,VTT,2567,15.00,38505.00,5005.65,33499.35,25670.00,7829.35,6/1/2014,6,June,2014 +Government,Canada,Carretera,923,350.00,323050.00,41996.50,281053.50,239980.00,41073.50,3/1/2014,3,March,2014 +Government,France,Carretera,1790,350.00,626500.00,81445.00,545055.00,465400.00,79655.00,3/1/2014,3,March,2014 +Government,Germany,Carretera,442,20.00,8840.00,1149.20,7690.80,4420.00,3270.80,9/1/2013,9,September,2013 +Government,United States of America,Montana,982.5,350.00,343875.00,44703.75,299171.25,255450.00,43721.25,1/1/2014,1,January,2014 +Government,United States of America,Montana,1298,7.00,9086.00,1181.18,7904.82,6490.00,1414.82,2/1/2014,2,February,2014 +Channel Partners,Mexico,Montana,604,12.00,7248.00,942.24,6305.76,1812.00,4493.76,6/1/2014,6,June,2014 +Government,Mexico,Montana,2255,20.00,45100.00,5863.00,39237.00,22550.00,16687.00,7/1/2014,7,July,2014 +Government,Canada,Montana,1249,20.00,24980.00,3247.40,21732.60,12490.00,9242.60,10/1/2014,10,October,2014 +Government,United States of America,Paseo,1438.5,7.00,10069.50,1309.04,8760.47,7192.50,1567.97,1/1/2014,1,January,2014 +Small Business,Germany,Paseo,807,300.00,242100.00,31473.00,210627.00,201750.00,8877.00,1/1/2014,1,January,2014 +Government,United States of America,Paseo,2641,20.00,52820.00,6866.60,45953.40,26410.00,19543.40,2/1/2014,2,February,2014 +Government,Germany,Paseo,2708,20.00,54160.00,7040.80,47119.20,27080.00,20039.20,2/1/2014,2,February,2014 +Government,Canada,Paseo,2632,350.00,921200.00,119756.00,801444.00,684320.00,117124.00,6/1/2014,6,June,2014 +Enterprise,Canada,Paseo,1583,125.00,197875.00,25723.75,172151.25,189960.00,-17808.75,6/1/2014,6,June,2014 +Channel Partners,Mexico,Paseo,571,12.00,6852.00,890.76,5961.24,1713.00,4248.24,7/1/2014,7,July,2014 +Government,France,Paseo,2696,7.00,18872.00,2453.36,16418.64,13480.00,2938.64,8/1/2014,8,August,2014 +Midmarket,Canada,Paseo,1565,15.00,23475.00,3051.75,20423.25,15650.00,4773.25,10/1/2014,10,October,2014 +Government,Canada,Paseo,1249,20.00,24980.00,3247.40,21732.60,12490.00,9242.60,10/1/2014,10,October,2014 +Government,Germany,Paseo,357,350.00,124950.00,16243.50,108706.50,92820.00,15886.50,11/1/2014,11,November,2014 +Channel Partners,Germany,Paseo,1013,12.00,12156.00,1580.28,10575.72,3039.00,7536.72,12/1/2014,12,December,2014 +Midmarket,France,Velo,3997.5,15.00,59962.50,7795.13,52167.38,39975.00,12192.38,1/1/2014,1,January,2014 +Government,Canada,Velo,2632,350.00,921200.00,119756.00,801444.00,684320.00,117124.00,6/1/2014,6,June,2014 +Government,France,Velo,1190,7.00,8330.00,1082.90,7247.10,5950.00,1297.10,6/1/2014,6,June,2014 +Channel Partners,Mexico,Velo,604,12.00,7248.00,942.24,6305.76,1812.00,4493.76,6/1/2014,6,June,2014 +Midmarket,Germany,Velo,660,15.00,9900.00,1287.00,8613.00,6600.00,2013.00,9/1/2013,9,September,2013 +Channel Partners,Mexico,Velo,410,12.00,4920.00,639.60,4280.40,1230.00,3050.40,10/1/2014,10,October,2014 +Small Business,Mexico,Velo,2605,300.00,781500.00,101595.00,679905.00,651250.00,28655.00,11/1/2013,11,November,2013 +Channel Partners,Germany,Velo,1013,12.00,12156.00,1580.28,10575.72,3039.00,7536.72,12/1/2014,12,December,2014 +Enterprise,Canada,VTT,1583,125.00,197875.00,25723.75,172151.25,189960.00,-17808.75,6/1/2014,6,June,2014 +Midmarket,Canada,VTT,1565,15.00,23475.00,3051.75,20423.25,15650.00,4773.25,10/1/2014,10,October,2014 +Enterprise,Canada,Amarilla,1659,125.00,207375.00,26958.75,180416.25,199080.00,-18663.75,1/1/2014,1,January,2014 +Government,France,Amarilla,1190,7.00,8330.00,1082.90,7247.10,5950.00,1297.10,6/1/2014,6,June,2014 +Channel Partners,Mexico,Amarilla,410,12.00,4920.00,639.60,4280.40,1230.00,3050.40,10/1/2014,10,October,2014 +Channel Partners,Germany,Amarilla,1770,12.00,21240.00,2761.20,18478.80,5310.00,13168.80,12/1/2013,12,December,2013 +Government,Mexico,Carretera,2579,20.00,51580.00,7221.20,44358.80,25790.00,18568.80,4/1/2014,4,April,2014 +Government,United States of America,Carretera,1743,20.00,34860.00,4880.40,29979.60,17430.00,12549.60,5/1/2014,5,May,2014 +Government,United States of America,Carretera,2996,7.00,20972.00,2936.08,18035.92,14980.00,3055.92,10/1/2013,10,October,2013 +Government,Germany,Carretera,280,7.00,1960.00,274.40,1685.60,1400.00,285.60,12/1/2014,12,December,2014 +Government,France,Montana,293,7.00,2051.00,287.14,1763.86,1465.00,298.86,2/1/2014,2,February,2014 +Government,United States of America,Montana,2996,7.00,20972.00,2936.08,18035.92,14980.00,3055.92,10/1/2013,10,October,2013 +Midmarket,Germany,Paseo,278,15.00,4170.00,583.80,3586.20,2780.00,806.20,2/1/2014,2,February,2014 +Government,Canada,Paseo,2428,20.00,48560.00,6798.40,41761.60,24280.00,17481.60,3/1/2014,3,March,2014 +Midmarket,United States of America,Paseo,1767,15.00,26505.00,3710.70,22794.30,17670.00,5124.30,9/1/2014,9,September,2014 +Channel Partners,France,Paseo,1393,12.00,16716.00,2340.24,14375.76,4179.00,10196.76,10/1/2014,10,October,2014 +Government,Germany,VTT,280,7.00,1960.00,274.40,1685.60,1400.00,285.60,12/1/2014,12,December,2014 +Channel Partners,France,Amarilla,1393,12.00,16716.00,2340.24,14375.76,4179.00,10196.76,10/1/2014,10,October,2014 +Channel Partners,United States of America,Amarilla,2015,12.00,24180.00,3385.20,20794.80,6045.00,14749.80,12/1/2013,12,December,2013 +Small Business,Mexico,Carretera,801,300.00,240300.00,33642.00,206658.00,200250.00,6408.00,7/1/2014,7,July,2014 +Enterprise,France,Carretera,1023,125.00,127875.00,17902.50,109972.50,122760.00,-12787.50,9/1/2013,9,September,2013 +Small Business,Canada,Carretera,1496,300.00,448800.00,62832.00,385968.00,374000.00,11968.00,10/1/2014,10,October,2014 +Small Business,United States of America,Carretera,1010,300.00,303000.00,42420.00,260580.00,252500.00,8080.00,10/1/2014,10,October,2014 +Midmarket,Germany,Carretera,1513,15.00,22695.00,3177.30,19517.70,15130.00,4387.70,11/1/2014,11,November,2014 +Midmarket,Canada,Carretera,2300,15.00,34500.00,4830.00,29670.00,23000.00,6670.00,12/1/2014,12,December,2014 +Enterprise,Mexico,Carretera,2821,125.00,352625.00,49367.50,303257.50,338520.00,-35262.50,12/1/2013,12,December,2013 +Government,Canada,Montana,2227.5,350.00,779625.00,109147.50,670477.50,579150.00,91327.50,1/1/2014,1,January,2014 +Government,Germany,Montana,1199,350.00,419650.00,58751.00,360899.00,311740.00,49159.00,4/1/2014,4,April,2014 +Government,Canada,Montana,200,350.00,70000.00,9800.00,60200.00,52000.00,8200.00,5/1/2014,5,May,2014 +Government,Canada,Montana,388,7.00,2716.00,380.24,2335.76,1940.00,395.76,9/1/2014,9,September,2014 +Government,Mexico,Montana,1727,7.00,12089.00,1692.46,10396.54,8635.00,1761.54,10/1/2013,10,October,2013 +Midmarket,Canada,Montana,2300,15.00,34500.00,4830.00,29670.00,23000.00,6670.00,12/1/2014,12,December,2014 +Government,Mexico,Paseo,260,20.00,5200.00,728.00,4472.00,2600.00,1872.00,2/1/2014,2,February,2014 +Midmarket,Canada,Paseo,2470,15.00,37050.00,5187.00,31863.00,24700.00,7163.00,9/1/2013,9,September,2013 +Midmarket,Canada,Paseo,1743,15.00,26145.00,3660.30,22484.70,17430.00,5054.70,10/1/2013,10,October,2013 +Channel Partners,United States of America,Paseo,2914,12.00,34968.00,4895.52,30072.48,8742.00,21330.48,10/1/2014,10,October,2014 +Government,France,Paseo,1731,7.00,12117.00,1696.38,10420.62,8655.00,1765.62,10/1/2014,10,October,2014 +Government,Canada,Paseo,700,350.00,245000.00,34300.00,210700.00,182000.00,28700.00,11/1/2014,11,November,2014 +Channel Partners,Canada,Paseo,2222,12.00,26664.00,3732.96,22931.04,6666.00,16265.04,11/1/2013,11,November,2013 +Government,United States of America,Paseo,1177,350.00,411950.00,57673.00,354277.00,306020.00,48257.00,11/1/2014,11,November,2014 +Government,France,Paseo,1922,350.00,672700.00,94178.00,578522.00,499720.00,78802.00,11/1/2013,11,November,2013 +Enterprise,Mexico,Velo,1575,125.00,196875.00,27562.50,169312.50,189000.00,-19687.50,2/1/2014,2,February,2014 +Government,United States of America,Velo,606,20.00,12120.00,1696.80,10423.20,6060.00,4363.20,4/1/2014,4,April,2014 +Small Business,United States of America,Velo,2460,300.00,738000.00,103320.00,634680.00,615000.00,19680.00,7/1/2014,7,July,2014 +Small Business,Canada,Velo,269,300.00,80700.00,11298.00,69402.00,67250.00,2152.00,10/1/2013,10,October,2013 +Small Business,Germany,Velo,2536,300.00,760800.00,106512.00,654288.00,634000.00,20288.00,11/1/2013,11,November,2013 +Government,Mexico,VTT,2903,7.00,20321.00,2844.94,17476.06,14515.00,2961.06,3/1/2014,3,March,2014 +Small Business,United States of America,VTT,2541,300.00,762300.00,106722.00,655578.00,635250.00,20328.00,8/1/2014,8,August,2014 +Small Business,Canada,VTT,269,300.00,80700.00,11298.00,69402.00,67250.00,2152.00,10/1/2013,10,October,2013 +Small Business,Canada,VTT,1496,300.00,448800.00,62832.00,385968.00,374000.00,11968.00,10/1/2014,10,October,2014 +Small Business,United States of America,VTT,1010,300.00,303000.00,42420.00,260580.00,252500.00,8080.00,10/1/2014,10,October,2014 +Government,France,VTT,1281,350.00,448350.00,62769.00,385581.00,333060.00,52521.00,12/1/2013,12,December,2013 +Small Business,Canada,Amarilla,888,300.00,266400.00,37296.00,229104.00,222000.00,7104.00,3/1/2014,3,March,2014 +Enterprise,United States of America,Amarilla,2844,125.00,355500.00,49770.00,305730.00,341280.00,-35550.00,5/1/2014,5,May,2014 +Channel Partners,France,Amarilla,2475,12.00,29700.00,4158.00,25542.00,7425.00,18117.00,8/1/2014,8,August,2014 +Midmarket,Canada,Amarilla,1743,15.00,26145.00,3660.30,22484.70,17430.00,5054.70,10/1/2013,10,October,2013 +Channel Partners,United States of America,Amarilla,2914,12.00,34968.00,4895.52,30072.48,8742.00,21330.48,10/1/2014,10,October,2014 +Government,France,Amarilla,1731,7.00,12117.00,1696.38,10420.62,8655.00,1765.62,10/1/2014,10,October,2014 +Government,Mexico,Amarilla,1727,7.00,12089.00,1692.46,10396.54,8635.00,1761.54,10/1/2013,10,October,2013 +Midmarket,Mexico,Amarilla,1870,15.00,28050.00,3927.00,24123.00,18700.00,5423.00,11/1/2013,11,November,2013 +Enterprise,France,Carretera,1174,125.00,146750.00,22012.50,124737.50,140880.00,-16142.50,8/1/2014,8,August,2014 +Enterprise,Germany,Carretera,2767,125.00,345875.00,51881.25,293993.75,332040.00,-38046.25,8/1/2014,8,August,2014 +Enterprise,Germany,Carretera,1085,125.00,135625.00,20343.75,115281.25,130200.00,-14918.75,10/1/2014,10,October,2014 +Small Business,Mexico,Montana,546,300.00,163800.00,24570.00,139230.00,136500.00,2730.00,10/1/2014,10,October,2014 +Government,Germany,Paseo,1158,20.00,23160.00,3474.00,19686.00,11580.00,8106.00,3/1/2014,3,March,2014 +Midmarket,Canada,Paseo,1614,15.00,24210.00,3631.50,20578.50,16140.00,4438.50,4/1/2014,4,April,2014 +Government,Mexico,Paseo,2535,7.00,17745.00,2661.75,15083.25,12675.00,2408.25,4/1/2014,4,April,2014 +Government,Mexico,Paseo,2851,350.00,997850.00,149677.50,848172.50,741260.00,106912.50,5/1/2014,5,May,2014 +Midmarket,Canada,Paseo,2559,15.00,38385.00,5757.75,32627.25,25590.00,7037.25,8/1/2014,8,August,2014 +Government,United States of America,Paseo,267,20.00,5340.00,801.00,4539.00,2670.00,1869.00,10/1/2013,10,October,2013 +Enterprise,Germany,Paseo,1085,125.00,135625.00,20343.75,115281.25,130200.00,-14918.75,10/1/2014,10,October,2014 +Midmarket,Germany,Paseo,1175,15.00,17625.00,2643.75,14981.25,11750.00,3231.25,10/1/2014,10,October,2014 +Government,United States of America,Paseo,2007,350.00,702450.00,105367.50,597082.50,521820.00,75262.50,11/1/2013,11,November,2013 +Government,Mexico,Paseo,2151,350.00,752850.00,112927.50,639922.50,559260.00,80662.50,11/1/2013,11,November,2013 +Channel Partners,United States of America,Paseo,914,12.00,10968.00,1645.20,9322.80,2742.00,6580.80,12/1/2014,12,December,2014 +Government,France,Paseo,293,20.00,5860.00,879.00,4981.00,2930.00,2051.00,12/1/2014,12,December,2014 +Channel Partners,Mexico,Velo,500,12.00,6000.00,900.00,5100.00,1500.00,3600.00,3/1/2014,3,March,2014 +Midmarket,France,Velo,2826,15.00,42390.00,6358.50,36031.50,28260.00,7771.50,5/1/2014,5,May,2014 +Enterprise,France,Velo,663,125.00,82875.00,12431.25,70443.75,79560.00,-9116.25,9/1/2014,9,September,2014 +Small Business,United States of America,Velo,2574,300.00,772200.00,115830.00,656370.00,643500.00,12870.00,11/1/2013,11,November,2013 +Enterprise,United States of America,Velo,2438,125.00,304750.00,45712.50,259037.50,292560.00,-33522.50,12/1/2013,12,December,2013 +Channel Partners,United States of America,Velo,914,12.00,10968.00,1645.20,9322.80,2742.00,6580.80,12/1/2014,12,December,2014 +Government,Canada,VTT,865.5,20.00,17310.00,2596.50,14713.50,8655.00,6058.50,7/1/2014,7,July,2014 +Midmarket,Germany,VTT,492,15.00,7380.00,1107.00,6273.00,4920.00,1353.00,7/1/2014,7,July,2014 +Government,United States of America,VTT,267,20.00,5340.00,801.00,4539.00,2670.00,1869.00,10/1/2013,10,October,2013 +Midmarket,Germany,VTT,1175,15.00,17625.00,2643.75,14981.25,11750.00,3231.25,10/1/2014,10,October,2014 +Enterprise,Canada,VTT,2954,125.00,369250.00,55387.50,313862.50,354480.00,-40617.50,11/1/2013,11,November,2013 +Enterprise,Germany,VTT,552,125.00,69000.00,10350.00,58650.00,66240.00,-7590.00,11/1/2014,11,November,2014 +Government,France,VTT,293,20.00,5860.00,879.00,4981.00,2930.00,2051.00,12/1/2014,12,December,2014 +Small Business,France,Amarilla,2475,300.00,742500.00,111375.00,631125.00,618750.00,12375.00,3/1/2014,3,March,2014 +Small Business,Mexico,Amarilla,546,300.00,163800.00,24570.00,139230.00,136500.00,2730.00,10/1/2014,10,October,2014 +Government,Mexico,Montana,1368,7.00,9576.00,1436.40,8139.60,6840.00,1299.60,2/1/2014,2,February,2014 +Government,Canada,Paseo,723,7.00,5061.00,759.15,4301.85,3615.00,686.85,4/1/2014,4,April,2014 +Channel Partners,United States of America,VTT,1806,12.00,21672.00,3250.80,18421.20,5418.00,13003.20,5/1/2014,5,May,2014 \ No newline at end of file diff --git a/python/samples/concepts/resources/agent_assistant_file_search/travelinfo.txt b/python/samples/concepts/resources/agent_assistant_file_search/travelinfo.txt new file mode 100644 index 000000000000..1516339b1a74 --- /dev/null +++ b/python/samples/concepts/resources/agent_assistant_file_search/travelinfo.txt @@ -0,0 +1,217 @@ +Invoice Booking Reference LMNOPQ Trip ID - 11110011111 +Passenger Name(s) +MARKS/SAM ALBERT Agent W2 + + +MICROSOFT CORPORATION 14820 NE 36TH STREET REDMOND WA US 98052 + +American Express Global Business Travel Microsoft Travel +14711 NE 29th Place, Suite 215 +Bellevue, WA 98007 +Phone: +1 (669) 210-8041 + + + + +BILLING CODE : 1010-10010110 +Invoice Information + + + + + + +Invoice Details +Ticket Number + + + + + + + +0277993883295 + + + + + + +Charges +Ticket Base Fare + + + + + + + +306.29 + +Airline Name + +ALASKA AIRLINES + +Ticket Tax Fare 62.01 + +Passenger Name Flight Details + +MARKS/SAM ALBERT +11 Sep 2023 ALASKA AIRLINES +0572 H Class +SEATTLE-TACOMA,WA/RALEIGH DURHAM,NC +13 Sep 2023 ALASKA AIRLINES +0491 M Class +RALEIGH DURHAM,NC/SEATTLE- TACOMA,WA + +Total (USD) Ticket Amount + +368.30 + +Credit Card Information +Charged to Card + + + +AX XXXXXXXXXXX4321 + + + +368.30 + + + + +Payment Details + + + +Charged by Airline +Total Invoice Charge + + + +USD + + + +368.30 +368.30 + +Monday 11 September 2023 + +10:05 AM + +Seattle (SEA) to Durham (RDU) +Airline Booking Ref: ABCXYZ + +Carrier: ALASKA AIRLINES + +Flight: AS 572 + +Status: Confirmed + +Operated By: ALASKA AIRLINES +Origin: Seattle, WA, Seattle-Tacoma International Apt (SEA) + +Departing: Monday 11 September 2023 at 10:05 AM Destination: Durham, Raleigh, Raleigh (RDU) Arriving: Monday 11 September 2023 at 06:15 PM +Additional Information + +Departure Terminal: Not Applicable + +Arrival Terminal: TERMINAL 2 + + +Class: ECONOMY +Aircraft Type: Boeing 737-900 +Meal Service: Not Applicable +Frequent Flyer Number: Not Applicable +Number of Stops: 0 +Greenhouse Gas Emissions: 560 kg CO2e / person + + +Distance: 2354 Miles Estimated Time: 05 hours 10 minutes +Seat: 24A + + +THE WESTIN RALEIGH DURHAM AP +Address: 3931 Macaw Street, Raleigh, NC, 27617, US +Phone: (1) 919-224-1400 Fax: (1) 919-224-1401 +Check In Date: Monday 11 September 2023 Check Out Date: Wednesday 13 September 2023 Number Of Nights: 2 +Rate: USD 280.00 per night may be subject to local taxes and service charges +Guaranteed to: AX XXXXXXXXXXX4321 + +Reference Number: 987654 +Additional Information +Membership ID: 123456789 +CANCEL PERMITTED UP TO 1 DAYS BEFORE CHECKIN + +Status: Confirmed + + +Corporate Id: Not Applicable + +Number Of Rooms: 1 + +Wednesday 13 September 2023 + +07:15 PM + +Durham (RDU) to Seattle (SEA) +Airline Booking Ref: ABCXYZ + +Carrier: ALASKA AIRLINES + +Flight: AS 491 + +Status: Confirmed + +Operated By: ALASKA AIRLINES +Origin: Durham, Raleigh, Raleigh (RDU) +Departing: Wednesday 13 September 2023 at 07:15 PM + + + +Departure Terminal: TERMINAL 2 + +Destination: Seattle, WA, Seattle-Tacoma International Apt (SEA) +Arriving: Wednesday 13 September 2023 at 09:59 PM Arrival Terminal: Not Applicable +Additional Information + + +Class: ECONOMY +Aircraft Type: Boeing 737-900 +Meal Service: Not Applicable +Frequent Flyer Number: Not Applicable +Number of Stops: 0 +Greenhouse Gas Emissions: 560 kg CO2e / person + + +Distance: 2354 Miles Estimated Time: 05 hours 44 minutes +Seat: 16A + + + +Greenhouse Gas Emissions +Total Greenhouse Gas Emissions for this trip is: 1120 kg CO2e / person +Air Fare Information + +Routing : ONLINE RESERVATION +Total Fare : USD 368.30 +Additional Messages +FOR 24X7 Travel Reservations Please Call 1-669-210-8041 Unable To Use Requested As Frequent Flyer Program Invalid Use Of Frequent Flyer Number 0123XYZ Please Contact Corresponding Frequent Travel Program Support Desk For Assistance +Trip Name-Trip From Seattle To Raleigh/Durham +This Ticket Is Nonrefundable. Changes Or Cancellations Must Be Made Prior To Scheduled Flight Departure +All Changes Must Be Made On Same Carrier And Will Be Subject To Service Fee And Difference In Airfare +******************************************************* +Please Be Advised That Certain Mandatory Hotel-Imposed Charges Including But Not Limited To Daily Resort Or Facility Fees May Be Applicable To Your Stay And Payable To The Hotel Operator At Check-Out From The Property. You May Wish To Inquire With The Hotel Before Your Trip Regarding The Existence And Amount Of Such Charges. +******************************************************* +Hotel Cancel Policies Vary Depending On The Property And Date. If You Have Questions Regarding Cancellation Fees Please Call The Travel Office. +Important Information +COVID-19 Updates: Click here to access Travel Vitals https://travelvitals.amexgbt.com for the latest information and advisories compiled by American Express Global Business Travel. + +Carbon Emissions: The total emissions value for this itinerary includes air travel only. Emissions for each individual flight are displayed in the flight details section. For more information on carbon emissions please refer to https://www.amexglobalbusinesstravel.com/sustainable-products-and-platforms. + +For important information regarding your booking in relation to the conditions applying to your booking, managing your booking and travel advisory, please refer to www.amexglobalbusinesstravel.com/booking-info. + +GBT Travel Services UK Limited (GBT UK) and its authorized sublicensees (including Ovation Travel Group and Egencia) use certain trademarks and service marks of American Express Company or its subsidiaries (American Express) in the American Express Global Business Travel and American Express Meetings & Events brands and in connection with its business for permitted uses only under a limited license from American Express (Licensed Marks). The Licensed Marks are trademarks or service marks of, and the property of, American Express. GBT UK is a subsidiary of Global Business Travel Group, Inc. (NYSE: GBTG). American Express holds a minority interest in GBTG, which operates as a separate company from American Express. \ No newline at end of file diff --git a/python/samples/concepts/agents/README.md b/python/samples/getting_started_with_agents/README.md similarity index 51% rename from python/samples/concepts/agents/README.md rename to python/samples/getting_started_with_agents/README.md index b723dd4f8642..143f77528161 100644 --- a/python/samples/concepts/agents/README.md +++ b/python/samples/getting_started_with_agents/README.md @@ -3,26 +3,28 @@ This project contains a step by step guide to get started with _Semantic Kernel Agents_ in Python. #### PyPI: - - For the use of agents, the minimum allowed Semantic Kernel pypi version is 1.3.0. #### Source -- [Semantic Kernel Agent Framework](../../../semantic_kernel/agents/) +- [Semantic Kernel Agent Framework](../../semantic_kernel/agents/) ## Examples The getting started with agents examples include: -| Example | Description | -| ------------------------------------------- | --------------------------------------- | -| [step1_agent](../agents/step1_agent.py) | How to create and use an agent. | -| [step2_plugins](../agents/step2_plugins.py) | How to associate plugins with an agent. | +Example|Description +---|--- +[step1_agent](../getting_started_with_agents/step1_agent.py)|How to create and use an agent. +[step2_plugins](../getting_started_with_agents/step2_plugins.py)|How to associate plugins with an agent. +[step8_openai_assistant_agent](../getting_started_with_agents/step8_openai_assistant_agent.py)|How to create and use an OpenAI Assistant agent. + +*Note: As we strive for parity with .NET, more getting_started_with_agent samples will be added. The current steps and names may be revised to further align with our .NET counterpart.* ## Configuring the Kernel Similar to the Semantic Kernel Python concept samples, it is necessary to configure the secrets -and keys used by the kernel. See the follow "Configuring the Kernel" [guide](../README.md#configuring-the-kernel) for +and keys used by the kernel. See the follow "Configuring the Kernel" [guide](../concepts/README.md#configuring-the-kernell) for more information. ## Running Concept Samples diff --git a/python/samples/concepts/agents/step1_agent.py b/python/samples/getting_started_with_agents/step1_agent.py similarity index 100% rename from python/samples/concepts/agents/step1_agent.py rename to python/samples/getting_started_with_agents/step1_agent.py diff --git a/python/samples/concepts/agents/step2_plugins.py b/python/samples/getting_started_with_agents/step2_plugins.py similarity index 100% rename from python/samples/concepts/agents/step2_plugins.py rename to python/samples/getting_started_with_agents/step2_plugins.py diff --git a/python/samples/getting_started_with_agents/step8_openai_assistant_agent.py b/python/samples/getting_started_with_agents/step8_openai_assistant_agent.py new file mode 100644 index 000000000000..9f8ca89099bf --- /dev/null +++ b/python/samples/getting_started_with_agents/step8_openai_assistant_agent.py @@ -0,0 +1,103 @@ +# Copyright (c) Microsoft. All rights reserved. +import asyncio +from typing import Annotated + +from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent +from semantic_kernel.agents.open_ai.open_ai_assistant_agent import OpenAIAssistantAgent +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.functions.kernel_function_decorator import kernel_function +from semantic_kernel.kernel import Kernel + +##################################################################### +# The following sample demonstrates how to create an OpenAI # +# assistant using either Azure OpenAI or OpenAI. OpenAI Assistants # +# allow for function calling, the use of file search and a # +# code interpreter. Assistant Threads are used to manage the # +# conversation state, similar to a Semantic Kernel Chat History. # +##################################################################### + +HOST_NAME = "Host" +HOST_INSTRUCTIONS = "Answer questions about the menu." + +# Note: you may toggle this to switch between AzureOpenAI and OpenAI +use_azure_openai = True + + +# Define a sample plugin for the sample +class MenuPlugin: + """A sample Menu Plugin used for the concept sample.""" + + @kernel_function(description="Provides a list of specials from the menu.") + def get_specials(self) -> Annotated[str, "Returns the specials from the menu."]: + return """ + Special Soup: Clam Chowder + Special Salad: Cobb Salad + Special Drink: Chai Tea + """ + + @kernel_function(description="Provides the price of the requested menu item.") + def get_item_price( + self, menu_item: Annotated[str, "The name of the menu item."] + ) -> Annotated[str, "Returns the price of the menu item."]: + return "$9.99" + + +# A helper method to invoke the agent with the user input +async def invoke_agent(agent: OpenAIAssistantAgent, thread_id: str, input: str) -> None: + """Invoke the agent with the user input.""" + await agent.add_chat_message(thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=input)) + + print(f"# {AuthorRole.USER}: '{input}'") + + async for content in agent.invoke(thread_id=thread_id): + if content.role != AuthorRole.TOOL: + print(f"# {content.role}: {content.content}") + + +async def main(): + # Create the instance of the Kernel + kernel = Kernel() + + service_id = "agent" + + # Add the sample plugin to the kernel + kernel.add_plugin(plugin=MenuPlugin(), plugin_name="menu") + + # Create the agent + if use_azure_openai: + agent = AzureAssistantAgent( + kernel=kernel, service_id=service_id, name=HOST_NAME, instructions=HOST_INSTRUCTIONS + ) + else: + agent = OpenAIAssistantAgent( + kernel=kernel, service_id=service_id, name=HOST_NAME, instructions=HOST_INSTRUCTIONS + ) + + # Next create the assistant + await agent.create_assistant() + + # Note: the agent creation can be done in one step if desired + # if use_azure_openai: + # agent = await AzureAssistantAgent.create( + # kernel=kernel, service_id=service_id, name=HOST_NAME, instructions=HOST_INSTRUCTIONS + # ) + # else: + # agent = await OpenAIAssistantAgent.create( + # kernel=kernel, service_id=service_id, name=HOST_NAME, instructions=HOST_INSTRUCTIONS + # ) + + thread_id = await agent.create_thread() + + try: + await invoke_agent(agent, thread_id=thread_id, input="Hello") + await invoke_agent(agent, thread_id=thread_id, input="What is the special soup?") + await invoke_agent(agent, thread_id=thread_id, input="What is the special drink?") + await invoke_agent(agent, thread_id=thread_id, input="Thank you") + finally: + await agent.delete_thread(thread_id) + await agent.delete() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/semantic_kernel/agents/agent.py b/python/semantic_kernel/agents/agent.py index 73ffcba0240e..bff6fbe21a19 100644 --- a/python/semantic_kernel/agents/agent.py +++ b/python/semantic_kernel/agents/agent.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. import uuid -from abc import ABC from typing import ClassVar from pydantic import Field @@ -10,10 +9,11 @@ from semantic_kernel.kernel import Kernel from semantic_kernel.kernel_pydantic import KernelBaseModel from semantic_kernel.utils.experimental_decorator import experimental_class +from semantic_kernel.utils.naming import generate_random_ascii_name @experimental_class -class Agent(ABC, KernelBaseModel): +class Agent(KernelBaseModel): """Base abstraction for all Semantic Kernel agents. An agent instance may participate in one or more conversations. @@ -31,7 +31,7 @@ class Agent(ABC, KernelBaseModel): id: str = Field(default_factory=lambda: str(uuid.uuid4())) description: str | None = None - name: str | None = None + name: str = Field(default_factory=lambda: f"agent_{generate_random_ascii_name()}") instructions: str | None = None kernel: Kernel = Field(default_factory=Kernel) channel_type: ClassVar[type[AgentChannel] | None] = None diff --git a/python/semantic_kernel/agents/chat_completion_agent.py b/python/semantic_kernel/agents/chat_completion_agent.py index 44cf48f94722..07e736c52224 100644 --- a/python/semantic_kernel/agents/chat_completion_agent.py +++ b/python/semantic_kernel/agents/chat_completion_agent.py @@ -64,11 +64,12 @@ def __init__( args: dict[str, Any] = { "service_id": service_id, - "name": name, "description": description, "instructions": instructions, "execution_settings": execution_settings, } + if name is not None: + args["name"] = name if id is not None: args["id"] = id if kernel is not None: diff --git a/python/semantic_kernel/agents/open_ai/__init__.py b/python/semantic_kernel/agents/open_ai/__init__.py new file mode 100644 index 000000000000..e9cbddaf2d6a --- /dev/null +++ b/python/semantic_kernel/agents/open_ai/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft. All rights reserved. + +from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent +from semantic_kernel.agents.open_ai.open_ai_assistant_agent import OpenAIAssistantAgent + +__all__ = ["AzureAssistantAgent", "OpenAIAssistantAgent"] diff --git a/python/semantic_kernel/agents/open_ai/azure_assistant_agent.py b/python/semantic_kernel/agents/open_ai/azure_assistant_agent.py new file mode 100644 index 000000000000..28e085b9cc78 --- /dev/null +++ b/python/semantic_kernel/agents/open_ai/azure_assistant_agent.py @@ -0,0 +1,356 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from collections.abc import AsyncIterable, Awaitable, Callable +from copy import copy +from typing import TYPE_CHECKING, Any + +from openai import AsyncAzureOpenAI +from pydantic import ValidationError + +from semantic_kernel.agents.open_ai.open_ai_assistant_base import OpenAIAssistantBase +from semantic_kernel.connectors.ai.open_ai.settings.azure_open_ai_settings import AzureOpenAISettings +from semantic_kernel.const import DEFAULT_SERVICE_NAME +from semantic_kernel.exceptions.agent_exceptions import AgentInitializationError +from semantic_kernel.kernel_pydantic import HttpsUrl +from semantic_kernel.utils.experimental_decorator import experimental_class +from semantic_kernel.utils.telemetry.user_agent import APP_INFO, prepend_semantic_kernel_to_user_agent + +if TYPE_CHECKING: + from semantic_kernel.kernel import Kernel + + +logger: logging.Logger = logging.getLogger(__name__) + + +@experimental_class +class AzureAssistantAgent(OpenAIAssistantBase): + """Azure OpenAI Assistant Agent class. + + Provides the ability to interact with Azure OpenAI Assistants. + """ + + # region Agent Initialization + + def __init__( + self, + kernel: "Kernel | None" = None, + service_id: str | None = None, + deployment_name: str | None = None, + api_key: str | None = None, + endpoint: str | None = None, + api_version: str | None = None, + ad_token: str | None = None, + ad_token_provider: Callable[[], str | Awaitable[str]] | None = None, + client: AsyncAzureOpenAI | None = None, + default_headers: dict[str, str] | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + description: str | None = None, + id: str | None = None, + instructions: str | None = None, + name: str | None = None, + enable_code_interpreter: bool | None = None, + enable_file_search: bool | None = None, + enable_json_response: bool | None = None, + file_ids: list[str] | None = [], + temperature: float | None = None, + top_p: float | None = None, + vector_store_id: str | None = None, + metadata: dict[str, Any] | None = {}, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + **kwargs: Any, + ) -> None: + """Initialize an Azure OpenAI Assistant Agent. + + Args: + kernel: The Kernel instance. (optional) + service_id: The service ID. (optional) + deployment_name: The deployment name. (optional) + api_key: The Azure OpenAI API key. (optional) + endpoint: The Azure OpenAI endpoint. (optional) + api_version: The Azure OpenAI API version. (optional) + ad_token: The Azure AD token. (optional) + ad_token_provider: The Azure AD token provider. (optional) + client: The Azure OpenAI client. (optional) + default_headers: The default headers. (optional) + env_file_path: The environment file path. (optional) + env_file_encoding: The environment file encoding. (optional) + description: The description. (optional) + id: The Agent ID. (optional) + instructions: The Agent instructions. (optional) + name: The Agent name. (optional) + enable_code_interpreter: Enable the code interpreter. (optional) + enable_file_search: Enable the file search. (optional) + enable_json_response: Enable the JSON response. (optional) + file_ids: The file IDs. (optional) + temperature: The temperature. (optional) + top_p: The top p. (optional) + vector_store_id: The vector store ID. (optional) + metadata: The metadata. (optional) + max_completion_tokens: The maximum completion tokens. (optional) + max_prompt_tokens: The maximum prompt tokens. (optional) + parallel_tool_calls_enabled: Enable parallel tool calls. (optional) + truncation_message_count: The truncation message count. (optional) + **kwargs: Additional keyword arguments. + + Raises: + AgentInitializationError: If the api_key is not provided in the configuration. + """ + try: + azure_openai_settings = AzureOpenAISettings.create( + api_key=api_key, + endpoint=endpoint, + chat_deployment_name=deployment_name, + api_version=api_version, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as ex: + raise AgentInitializationError("Failed to create Azure OpenAI settings.", ex) from ex + + if not azure_openai_settings.chat_deployment_name: + raise AgentInitializationError("The Azure OpenAI chat_deployment_name is required.") + + if not azure_openai_settings.api_key and not ad_token and not ad_token_provider: + raise AgentInitializationError("Please provide either api_key, ad_token or ad_token_provider.") + client = self._create_client( + api_key=azure_openai_settings.api_key.get_secret_value() if azure_openai_settings.api_key else None, + endpoint=azure_openai_settings.endpoint, + api_version=azure_openai_settings.api_version, + ad_token=ad_token, + ad_token_provider=ad_token_provider, + default_headers=default_headers, + ) + service_id = service_id if service_id else DEFAULT_SERVICE_NAME + + args: dict[str, Any] = { + "kernel": kernel, + "ai_model_id": azure_openai_settings.chat_deployment_name, + "service_id": service_id, + "client": client, + "name": name, + "description": description, + "instructions": instructions, + "enable_code_interpreter": enable_code_interpreter, + "enable_file_search": enable_file_search, + "enable_json_response": enable_json_response, + "file_ids": file_ids, + "temperature": temperature, + "top_p": top_p, + "vector_store_id": vector_store_id, + "metadata": metadata, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "parallel_tool_calls_enabled": parallel_tool_calls_enabled, + "truncation_message_count": truncation_message_count, + } + + if id is not None: + args["id"] = id + if kernel is not None: + args["kernel"] = kernel + if kwargs: + args.update(kwargs) + super().__init__(**args) + + @classmethod + async def create( + cls, + *, + kernel: "Kernel | None" = None, + service_id: str | None = None, + deployment_name: str | None = None, + api_key: str | None = None, + endpoint: str | None = None, + api_version: str | None = None, + ad_token: str | None = None, + ad_token_provider: Callable[[], str | Awaitable[str]] | None = None, + client: AsyncAzureOpenAI | None = None, + default_headers: dict[str, str] | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + description: str | None = None, + id: str | None = None, + instructions: str | None = None, + name: str | None = None, + enable_code_interpreter: bool | None = None, + enable_file_search: bool | None = None, + enable_json_response: bool | None = None, + file_ids: list[str] | None = [], + temperature: float | None = None, + top_p: float | None = None, + vector_store_id: str | None = None, + metadata: dict[str, Any] | None = {}, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + **kwargs: Any, + ) -> "AzureAssistantAgent": + """Asynchronous class method used to create the OpenAI Assistant Agent. + + Args: + kernel: The Kernel instance. (optional) + service_id: The service ID. (optional) + deployment_name: The deployment name. (optional) + api_key: The Azure OpenAI API key. (optional) + endpoint: The Azure OpenAI endpoint. (optional) + api_version: The Azure OpenAI API version. (optional) + ad_token: The Azure AD token. (optional) + ad_token_provider: The Azure AD token provider. (optional) + client: The Azure OpenAI client. (optional) + default_headers: The default headers. (optional) + env_file_path: The environment file path. (optional) + env_file_encoding: The environment file encoding. (optional) + description: The description. (optional) + id: The Agent ID. (optional) + instructions: The Agent instructions. (optional) + name: The Agent name. (optional) + enable_code_interpreter: Enable the code interpreter. (optional) + enable_file_search: Enable the file search. (optional) + enable_json_response: Enable the JSON response. (optional) + file_ids: The file IDs. (optional) + temperature: The temperature. (optional) + top_p: The top p. (optional) + vector_store_id: The vector store ID. (optional) + metadata: The metadata. (optional) + max_completion_tokens: The maximum completion tokens. (optional) + max_prompt_tokens: The maximum prompt tokens. (optional) + parallel_tool_calls_enabled: Enable parallel tool calls. (optional) + truncation_message_count: The truncation message count. (optional) + **kwargs: Additional keyword arguments. + + Returns: + An instance of the AzureOpenAIAssistantAgent + """ + agent = cls( + kernel=kernel, + service_id=service_id, + deployment_name=deployment_name, + api_key=api_key, + endpoint=endpoint, + api_version=api_version, + ad_token=ad_token, + ad_token_provider=ad_token_provider, + client=client, + default_headers=default_headers, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + description=description, + id=id, + instructions=instructions, + name=name, + enable_code_interpreter=enable_code_interpreter, + enable_file_search=enable_file_search, + enable_json_response=enable_json_response, + file_ids=file_ids, + temperature=temperature, + top_p=top_p, + vector_store_id=vector_store_id, + metadata=metadata, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + parallel_tool_calls_enabled=parallel_tool_calls_enabled, + truncation_message_count=truncation_message_count, + **kwargs, + ) + agent.assistant = await agent.create_assistant() + return agent + + @staticmethod + def _create_client( + api_key: str | None = None, + endpoint: HttpsUrl | None = None, + api_version: str | None = None, + ad_token: str | None = None, + ad_token_provider: Callable[[], str | Awaitable[str]] | None = None, + default_headers: dict[str, str] | None = None, + ) -> AsyncAzureOpenAI: + """Create the OpenAI client from configuration. + + Args: + api_key: The OpenAI API key. + endpoint: The OpenAI endpoint. + api_version: The OpenAI API version. + ad_token: The Azure AD token. + ad_token_provider: The Azure AD token provider. + default_headers: The default headers. + + Returns: + An AsyncAzureOpenAI client instance. + """ + merged_headers = dict(copy(default_headers)) if default_headers else {} + if APP_INFO: + merged_headers.update(APP_INFO) + merged_headers = prepend_semantic_kernel_to_user_agent(merged_headers) + + if not api_key and not ad_token and not ad_token_provider: + raise AgentInitializationError( + "Please provide either AzureOpenAI api_key, an ad_token or an ad_token_provider or a client." + ) + if not endpoint: + raise AgentInitializationError("Please provide an AzureOpenAI endpoint.") + return AsyncAzureOpenAI( + azure_endpoint=str(endpoint), + api_version=api_version, + api_key=api_key, + azure_ad_token=ad_token, + azure_ad_token_provider=ad_token_provider, + default_headers=merged_headers, + ) + + async def list_definitions(self) -> AsyncIterable[dict[str, Any]]: + """List the assistant definitions. + + Yields: + An AsyncIterable of dictionaries representing the OpenAIAssistantDefinition. + """ + assistants = await self.client.beta.assistants.list(order="desc") + for assistant in assistants.data: + yield self._create_open_ai_assistant_definition(assistant) + + async def retrieve( + self, + id: str, + api_key: str | None = None, + endpoint: HttpsUrl | None = None, + api_version: str | None = None, + ad_token: str | None = None, + ad_token_provider: Callable[[], str | Awaitable[str]] | None = None, + client: AsyncAzureOpenAI | None = None, + kernel: "Kernel | None" = None, + default_headers: dict[str, str] | None = None, + ) -> "AzureAssistantAgent": + """Retrieve an assistant by ID. + + Args: + id: The assistant ID. + api_key: The Azure OpenAI API + endpoint: The Azure OpenAI endpoint. (optional) + api_version: The Azure OpenAI API version. (optional) + ad_token: The Azure AD token. (optional) + ad_token_provider: The Azure AD token provider. (optional) + client: The Azure OpenAI client. (optional) + kernel: The Kernel instance. (optional) + default_headers: The default headers. (optional) + + Returns: + An OpenAIAssistantAgent instance. + """ + client = self._create_client( + api_key=api_key, + endpoint=endpoint, + api_version=api_version, + ad_token=ad_token, + ad_token_provider=ad_token_provider, + default_headers=default_headers, + ) + assistant = await client.beta.assistants.retrieve(id) + assistant_definition = self._create_open_ai_assistant_definition(assistant) + return AzureAssistantAgent(kernel=kernel, **assistant_definition) + + # endregion diff --git a/python/semantic_kernel/agents/open_ai/open_ai_assistant_agent.py b/python/semantic_kernel/agents/open_ai/open_ai_assistant_agent.py new file mode 100644 index 000000000000..d588ab9b0801 --- /dev/null +++ b/python/semantic_kernel/agents/open_ai/open_ai_assistant_agent.py @@ -0,0 +1,310 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from collections.abc import AsyncIterable +from copy import copy +from typing import TYPE_CHECKING, Any + +from openai import AsyncOpenAI +from pydantic import ValidationError + +from semantic_kernel.agents.open_ai.open_ai_assistant_base import OpenAIAssistantBase +from semantic_kernel.connectors.ai.open_ai.settings.open_ai_settings import OpenAISettings +from semantic_kernel.const import DEFAULT_SERVICE_NAME +from semantic_kernel.exceptions.agent_exceptions import AgentInitializationError +from semantic_kernel.utils.experimental_decorator import experimental_class +from semantic_kernel.utils.telemetry.user_agent import APP_INFO, prepend_semantic_kernel_to_user_agent + +if TYPE_CHECKING: + from semantic_kernel.kernel import Kernel + + +logger: logging.Logger = logging.getLogger(__name__) + + +@experimental_class +class OpenAIAssistantAgent(OpenAIAssistantBase): + """OpenAI Assistant Agent class. + + Provides the ability to interact with OpenAI Assistants. + """ + + # region Agent Initialization + + def __init__( + self, + *, + kernel: "Kernel | None" = None, + service_id: str | None = None, + ai_model_id: str | None = None, + api_key: str | None = None, + org_id: str | None = None, + client: AsyncOpenAI | None = None, + default_headers: dict[str, str] | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + description: str | None = None, + id: str | None = None, + instructions: str | None = None, + name: str | None = None, + enable_code_interpreter: bool | None = None, + enable_file_search: bool | None = None, + enable_json_response: bool | None = None, + file_ids: list[str] | None = [], + temperature: float | None = None, + top_p: float | None = None, + vector_store_id: str | None = None, + metadata: dict[str, Any] | None = {}, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + **kwargs: Any, + ) -> None: + """Initialize an OpenAIAssistant service. + + Args: + kernel: The Kernel instance. (optional) + service_id: The service ID. (optional) If not provided the default service name (default) is used. + ai_model_id: The AI model ID. (optional) + api_key: The OpenAI API key. (optional) + org_id: The OpenAI organization ID. (optional) + client: The OpenAI client. (optional) + default_headers: The default headers. (optional) + env_file_path: The environment file path. (optional) + env_file_encoding: The environment file encoding. (optional) + description: The assistant description. (optional) + id: The assistant ID. (optional) + instructions: The assistant instructions. (optional) + name: The assistant name. (optional) + enable_code_interpreter: Enable code interpreter. (optional) + enable_file_search: Enable file search. (optional) + enable_json_response: Enable JSON response. (optional) + file_ids: The file IDs. (optional) + temperature: The temperature. (optional) + top_p: The top p. (optional) + vector_store_id: The vector store ID. (optional) + metadata: The assistant metadata. (optional) + max_completion_tokens: The max completion tokens. (optional) + max_prompt_tokens: The max prompt tokens. (optional) + parallel_tool_calls_enabled: Enable parallel tool calls. (optional) + truncation_message_count: The truncation message count. (optional) + kwargs: Additional keyword arguments. + + Raises: + AgentInitializationError: If the api_key is not provided in the configuration. + """ + try: + openai_settings = OpenAISettings.create( + api_key=api_key, + org_id=org_id, + chat_model_id=ai_model_id, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as ex: + raise AgentInitializationError("Failed to create OpenAI settings.", ex) from ex + + if not client and not openai_settings.api_key: + raise AgentInitializationError("The OpenAI API key is required, if a client is not provided.") + if not openai_settings.chat_model_id: + raise AgentInitializationError("The OpenAI model ID is required.") + + if not client: + client = self._create_client( + api_key=openai_settings.api_key.get_secret_value() if openai_settings.api_key else None, + org_id=openai_settings.org_id, + default_headers=default_headers, + ) + + service_id = service_id if service_id else DEFAULT_SERVICE_NAME + + args: dict[str, Any] = { + "ai_model_id": openai_settings.chat_model_id, + "service_id": service_id, + "client": client, + "description": description, + "instructions": instructions, + "enable_code_interpreter": enable_code_interpreter, + "enable_file_search": enable_file_search, + "enable_json_response": enable_json_response, + "file_ids": file_ids, + "temperature": temperature, + "top_p": top_p, + "vector_store_id": vector_store_id, + "metadata": metadata, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "parallel_tool_calls_enabled": parallel_tool_calls_enabled, + "truncation_message_count": truncation_message_count, + } + + if name is not None: + args["name"] = name + if id is not None: + args["id"] = id + if kernel is not None: + args["kernel"] = kernel + if kwargs: + args.update(kwargs) + super().__init__(**args) + + @classmethod + async def create( + cls, + *, + kernel: "Kernel | None" = None, + service_id: str | None = None, + ai_model_id: str | None = None, + api_key: str | None = None, + org_id: str | None = None, + client: AsyncOpenAI | None = None, + default_headers: dict[str, str] | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + description: str | None = None, + id: str | None = None, + instructions: str | None = None, + name: str | None = None, + enable_code_interpreter: bool | None = None, + enable_file_search: bool | None = None, + enable_json_response: bool | None = None, + file_ids: list[str] | None = [], + temperature: float | None = None, + top_p: float | None = None, + vector_store_id: str | None = None, + metadata: dict[str, Any] | None = {}, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + ) -> "OpenAIAssistantAgent": + """Asynchronous class method used to create the OpenAI Assistant Agent. + + Args: + kernel: The Kernel instance. (optional) + service_id: The service ID. (optional) If not provided the default service name (default) is used. + ai_model_id: The AI model ID. (optional) + api_key: The OpenAI API key. (optional) + org_id: The OpenAI organization ID. (optional) + client: The OpenAI client. (optional) + default_headers: The default headers. (optional) + env_file_path: The environment file path. (optional) + env_file_encoding: The environment file encoding. (optional) + description: The assistant description. (optional) + id: The assistant ID. (optional) + instructions: The assistant instructions. (optional) + name: The assistant name. (optional) + enable_code_interpreter: Enable code interpreter. (optional) + enable_file_search: Enable file search. (optional) + enable_json_response: Enable JSON response. (optional) + file_ids: The file IDs. (optional) + temperature: The temperature. (optional) + top_p: The top p. (optional) + vector_store_id: The vector store ID. (optional) + metadata: The assistant metadata. (optional) + max_completion_tokens: The max completion tokens. (optional) + max_prompt_tokens: The max prompt tokens. (optional) + parallel_tool_calls_enabled: Enable parallel tool calls. (optional) + truncation_message_count: The truncation message count. (optional) + + Returns: + An OpenAIAssistantAgent instance. + """ + agent = cls( + kernel=kernel, + service_id=service_id, + ai_model_id=ai_model_id, + api_key=api_key, + org_id=org_id, + client=client, + default_headers=default_headers, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + description=description, + id=id, + instructions=instructions, + name=name, + enable_code_interpreter=enable_code_interpreter, + enable_file_search=enable_file_search, + enable_json_response=enable_json_response, + file_ids=file_ids, + temperature=temperature, + top_p=top_p, + vector_store_id=vector_store_id, + metadata=metadata, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + parallel_tool_calls_enabled=parallel_tool_calls_enabled, + truncation_message_count=truncation_message_count, + ) + agent.assistant = await agent.create_assistant() + return agent + + @staticmethod + def _create_client( + api_key: str | None = None, org_id: str | None = None, default_headers: dict[str, str] | None = None + ) -> AsyncOpenAI: + """An internal method to create the OpenAI client from the provided arguments. + + Args: + api_key: The OpenAI API key. + org_id: The OpenAI organization ID. (optional) + default_headers: The default headers. (optional) + + Returns: + An OpenAI client instance. + """ + merged_headers = dict(copy(default_headers)) if default_headers else {} + if default_headers: + merged_headers.update(default_headers) + if APP_INFO: + merged_headers.update(APP_INFO) + merged_headers = prepend_semantic_kernel_to_user_agent(merged_headers) + + if not api_key: + raise AgentInitializationError("Please provide an OpenAI api_key") + + return AsyncOpenAI( + api_key=api_key, + organization=org_id, + default_headers=merged_headers, + ) + + async def list_definitions(self) -> AsyncIterable[dict[str, Any]]: + """List the assistant definitions. + + Yields: + An AsyncIterable of dictionaries representing the OpenAIAssistantDefinition. + """ + assistants = await self.client.beta.assistants.list(order="desc") + for assistant in assistants.data: + yield self._create_open_ai_assistant_definition(assistant) + + async def retrieve( + self, + id: str, + api_key: str, + kernel: "Kernel | None" = None, + org_id: str | None = None, + default_headers: dict[str, str] | None = None, + ) -> "OpenAIAssistantAgent": + """Retrieve an assistant by ID. + + Args: + id: The assistant ID. + api_key: The OpenAI API + kernel: The Kernel instance. (optional) + org_id: The OpenAI organization ID. (optional) + default_headers: The default headers. (optional) + + + Returns: + An OpenAIAssistantAgent instance. + """ + client = self._create_client(api_key=api_key, org_id=org_id, default_headers=default_headers) + assistant = await client.beta.assistants.retrieve(id) + assistant_definition = self._create_open_ai_assistant_definition(assistant) + return OpenAIAssistantAgent(kernel=kernel, **assistant_definition) + + # endregion diff --git a/python/semantic_kernel/agents/open_ai/open_ai_assistant_base.py b/python/semantic_kernel/agents/open_ai/open_ai_assistant_base.py new file mode 100644 index 000000000000..d34ee08ea32b --- /dev/null +++ b/python/semantic_kernel/agents/open_ai/open_ai_assistant_base.py @@ -0,0 +1,1021 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import json +import logging +from collections.abc import AsyncIterable +from typing import TYPE_CHECKING, Any, ClassVar, Literal + +from openai import AsyncOpenAI +from openai.resources.beta.assistants import Assistant +from openai.resources.beta.threads.messages import Message +from openai.resources.beta.threads.runs.runs import Run +from openai.types.beta import AssistantResponseFormat +from openai.types.beta.assistant_tool import CodeInterpreterTool, FileSearchTool +from openai.types.beta.threads.image_file_content_block import ImageFileContentBlock +from openai.types.beta.threads.runs import RunStep +from openai.types.beta.threads.text_content_block import TextContentBlock +from pydantic import Field + +from semantic_kernel.agents.agent import Agent +from semantic_kernel.agents.open_ai.run_polling_options import RunPollingOptions +from semantic_kernel.connectors.ai.function_calling_utils import kernel_function_metadata_to_function_call_format +from semantic_kernel.contents.annotation_content import AnnotationContent +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.file_reference_content import FileReferenceContent +from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.contents.image_content import ImageContent +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.exceptions.agent_exceptions import ( + AgentExecutionError, + AgentFileNotFoundException, + AgentInitializationError, + AgentInvokeError, +) +from semantic_kernel.utils.experimental_decorator import experimental_class + +if TYPE_CHECKING: + from openai.types.beta.threads.annotation import Annotation + from openai.types.beta.threads.runs.tool_call import ToolCall + from openai.types.file_object import FileObject + + from semantic_kernel.kernel import Kernel + +logger: logging.Logger = logging.getLogger(__name__) + + +@experimental_class +class OpenAIAssistantBase(Agent): + """OpenAI Assistant Base class. + + Manages the interaction with OpenAI Assistants. + """ + + _options_metadata_key: str = "__run_options" + + ai_model_id: str + client: AsyncOpenAI + assistant: Assistant | None = None + polling_options: RunPollingOptions = Field(default_factory=RunPollingOptions) + enable_code_interpreter: bool | None = Field(False) + enable_file_search: bool | None = Field(False) + enable_json_response: bool | None = Field(False) + file_ids: list[str] | None = Field(default_factory=list, max_length=20) + temperature: float | None = Field(None) + top_p: float | None = Field(None) + vector_store_id: str | None = None + metadata: dict[str, Any] | None = Field(default_factory=dict, max_length=16) + max_completion_tokens: int | None = Field(None) + max_prompt_tokens: int | None = Field(None) + parallel_tool_calls_enabled: bool | None = Field(True) + truncation_message_count: int | None = Field(None) + + allowed_message_roles: ClassVar[list[str]] = [AuthorRole.USER, AuthorRole.ASSISTANT] + polling_status: ClassVar[list[str]] = ["queued", "in_progress", "cancelling"] + error_message_states: ClassVar[list[str]] = ["failed", "canceled", "expired"] + + _is_deleted: bool = False + + # region Assistant Initialization + + def __init__( + self, + ai_model_id: str, + client: AsyncOpenAI, + service_id: str, + *, + kernel: "Kernel | None" = None, + id: str | None = None, + name: str | None = None, + description: str | None = None, + instructions: str | None = None, + enable_code_interpreter: bool | None = None, + enable_file_search: bool | None = None, + enable_json_response: bool | None = None, + file_ids: list[str] | None = [], + temperature: float | None = None, + top_p: float | None = None, + vector_store_id: str | None = None, + metadata: dict[str, Any] | None = {}, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + **kwargs: Any, + ) -> None: + """Initialize an OpenAIAssistant Base. + + Args: + ai_model_id (str): The AI model id. Defaults to None. + client (AsyncOpenAI): The client, either AsyncOpenAI or AsyncAzureOpenAI. + service_id (str): The service id. + kernel (Kernel): The kernel. (optional) + id (str): The id. Defaults to None. (optional) + name (str): The name. Defaults to None. (optional) + description (str): The description. Defaults to None. (optional) + default_headers (dict[str, str]): The default headers. Defaults to None. (optional) + instructions (str): The instructions. Defaults to None. (optional) + enable_code_interpreter (bool): Enable code interpreter. Defaults to False. (optional) + enable_file_search (bool): Enable file search. Defaults to False. (optional) + enable_json_response (bool): Enable JSON response. Defaults to False. (optional) + file_ids (list[str]): The file ids. Defaults to []. (optional) + temperature (float): The temperature. Defaults to None. (optional) + top_p (float): The top p. Defaults to None. (optional) + vector_store_id (str): The vector store id. Defaults to None. (optional) + metadata (dict[str, Any]): The metadata. Defaults to {}. (optional) + max_completion_tokens (int): The max completion tokens. Defaults to None. (optional) + max_prompt_tokens (int): The max prompt tokens. Defaults to None. (optional) + parallel_tool_calls_enabled (bool): Enable parallel tool calls. Defaults to True. (optional) + truncation_message_count (int): The truncation message count. Defaults to None. (optional) + kwargs (Any): The keyword arguments. + """ + args: dict[str, Any] = {} + + args = { + "ai_model_id": ai_model_id, + "client": client, + "service_id": service_id, + "instructions": instructions, + "description": description, + "enable_code_interpreter": enable_code_interpreter, + "enable_file_search": enable_file_search, + "enable_json_response": enable_json_response, + "file_ids": file_ids, + "temperature": temperature, + "top_p": top_p, + "vector_store_id": vector_store_id, + "metadata": metadata, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "parallel_tool_calls_enabled": parallel_tool_calls_enabled, + "truncation_message_count": truncation_message_count, + } + + if name is not None: + args["name"] = name + if id is not None: + args["id"] = id + if kernel is not None: + args["kernel"] = kernel + if kwargs: + args.update(kwargs) + + super().__init__(**args) + + async def create_assistant( + self, + ai_model_id: str | None = None, + description: str | None = None, + instructions: str | None = None, + name: str | None = None, + enable_code_interpreter: bool | None = None, + enable_file_search: bool | None = None, + file_ids: list[str] | None = None, + vector_store_id: str | None = None, + metadata: dict[str, str] | None = {}, + **kwargs: Any, + ) -> "Assistant": + """Create the assistant. + + Args: + ai_model_id (str): The AI model id. Defaults to None. (optional) + description (str): The description. Defaults to None. (optional) + instructions (str): The instructions. Defaults to None. (optional) + name (str): The name. Defaults to None. (optional) + enable_code_interpreter (bool): Enable code interpreter. Defaults to None. (optional) + enable_file_search (bool): Enable file search. Defaults to None. (optional) + file_ids (list[str]): The file ids. Defaults to None. (optional) + vector_store_id (str): The vector store id. Defaults to None. (optional) + metadata (dict[str, str]): The metadata. Defaults to {}. (optional) + kwargs (Any): Extra keyword arguments. + + Returns: + Assistant: The assistant + """ + create_assistant_kwargs: dict[str, Any] = {} + + if ai_model_id is not None: + create_assistant_kwargs["model"] = ai_model_id + elif self.ai_model_id: + create_assistant_kwargs["model"] = self.ai_model_id + + if description is not None: + create_assistant_kwargs["description"] = description + elif self.description: + create_assistant_kwargs["description"] = self.description + + if instructions is not None: + create_assistant_kwargs["instructions"] = instructions + elif self.instructions: + create_assistant_kwargs["instructions"] = self.instructions + + if name is not None: + create_assistant_kwargs["name"] = name + elif self.name: + create_assistant_kwargs["name"] = self.name + + tools = [] + if enable_code_interpreter is not None: + if enable_code_interpreter: + tools.append({"type": "code_interpreter"}) + elif self.enable_code_interpreter: + tools.append({"type": "code_interpreter"}) + + if enable_file_search is not None: + if enable_file_search: + tools.append({"type": "file_search"}) + elif self.enable_file_search: + tools.append({"type": "file_search"}) + + if tools: + create_assistant_kwargs["tools"] = tools + + tool_resources = {} + if file_ids is not None: + tool_resources["code_interpreter"] = {"file_ids": file_ids} + elif self.file_ids: + tool_resources["code_interpreter"] = {"file_ids": self.file_ids} + + if vector_store_id is not None: + tool_resources["file_search"] = {"vector_store_ids": [vector_store_id]} + elif self.vector_store_id: + tool_resources["file_search"] = {"vector_store_ids": [self.vector_store_id]} + + if tool_resources: + create_assistant_kwargs["tool_resources"] = tool_resources + + if metadata: + create_assistant_kwargs["metadata"] = metadata + elif self.metadata: + create_assistant_kwargs["metadata"] = self.metadata + + if kwargs: + create_assistant_kwargs.update(kwargs) + + execution_settings = {} + if self.max_completion_tokens: + execution_settings["max_completion_tokens"] = self.max_completion_tokens + + if self.max_prompt_tokens: + execution_settings["max_prompt_tokens"] = self.max_prompt_tokens + + if self.parallel_tool_calls_enabled: + execution_settings["parallel_tool_calls_enabled"] = self.parallel_tool_calls_enabled + + if self.truncation_message_count: + execution_settings["truncation_message_count"] = self.truncation_message_count + + if execution_settings: + if "metadata" not in create_assistant_kwargs: + create_assistant_kwargs["metadata"] = {} + if self._options_metadata_key not in create_assistant_kwargs["metadata"]: + create_assistant_kwargs["metadata"][self._options_metadata_key] = {} + create_assistant_kwargs["metadata"][self._options_metadata_key] = json.dumps(execution_settings) + + self.assistant = await self.client.beta.assistants.create( + **create_assistant_kwargs, + ) + + if self._is_deleted: + self._is_deleted = False + + return self.assistant + + def _create_open_ai_assistant_definition(self, assistant: "Assistant") -> dict[str, Any]: + """Create an OpenAI Assistant Definition from the provided assistant dictionary. + + Args: + assistant: The assistant dictionary. + + Returns: + An OpenAI Assistant Definition. + """ + execution_settings = {} + if isinstance(assistant.metadata, dict) and self._options_metadata_key in assistant.metadata: + settings_data = assistant.metadata[self._options_metadata_key] + if isinstance(settings_data, str): + settings_data = json.loads(settings_data) + assistant.metadata[self._options_metadata_key] = settings_data + execution_settings = {key: value for key, value in settings_data.items()} + + file_ids: list[str] = [] + vector_store_id = None + + tool_resources = getattr(assistant, "tool_resources", None) + if tool_resources: + if hasattr(tool_resources, "code_interpreter") and tool_resources.code_interpreter: + file_ids = getattr(tool_resources.code_interpreter, "file_ids", []) + + if hasattr(tool_resources, "file_search") and tool_resources.file_search: + vector_store_ids = getattr(tool_resources.file_search, "vector_store_ids", []) + if vector_store_ids: + vector_store_id = vector_store_ids[0] + + enable_json_response = ( + hasattr(assistant, "response_format") + and isinstance(assistant.response_format, AssistantResponseFormat) + and assistant.response_format.type == "json_object" + ) + + enable_code_interpreter = any(isinstance(tool, CodeInterpreterTool) for tool in assistant.tools) + enable_file_search = any(isinstance(tool, FileSearchTool) for tool in assistant.tools) + + return { + "ai_model_id": assistant.model, + "description": assistant.description, + "id": assistant.id, + "instructions": assistant.instructions, + "name": assistant.name, + "enable_code_interpreter": enable_code_interpreter, + "enable_file_search": enable_file_search, + "enable_json_response": enable_json_response, + "file_ids": file_ids, + "temperature": assistant.temperature, + "top_p": assistant.top_p, + "vector_store_id": vector_store_id if vector_store_id else None, + "metadata": assistant.metadata, + **execution_settings, + } + + # endregion + + # region Agent Properties + + @property + def tools(self) -> list[dict[str, str]]: + """The tools. + + Returns: + list[dict[str, str]]: The tools. + """ + if self.assistant is None: + raise AgentInitializationError("The assistant has not been created.") + return self._get_tools() + + # endregion + + # region Agent Methods + + async def create_thread( + self, + *, + code_interpreter_file_ids: list[str] | None = [], + messages: list[ChatMessageContent] | None = [], + vector_store_id: str | None = None, + metadata: dict[str, str] = {}, + ) -> str: + """Create a thread. + + Args: + code_interpreter_file_ids (list[str]): The code interpreter file ids. Defaults to []. (optional) + messages (list[ChatMessageContent]): The chat messages. Defaults to []. (optional) + vector_store_id (str): The vector store id. Defaults to None. (optional) + metadata (dict[str, str]): The metadata. Defaults to {}. (optional) + + Returns: + str: The thread id. + """ + create_thread_kwargs: dict[str, Any] = {} + + tool_resources = {} + + if code_interpreter_file_ids: + tool_resources["code_interpreter"] = {"file_ids": code_interpreter_file_ids} + + if vector_store_id: + tool_resources["file_search"] = {"vector_store_ids": [vector_store_id]} + + if tool_resources: + create_thread_kwargs["tool_resources"] = tool_resources + + if messages: + messages_to_add = [] + for message in messages: + if message.role.value not in self.allowed_message_roles: + raise AgentExecutionError( + f"Invalid message role `{message.role.value}`. Allowed roles are {self.allowed_message_roles}." + ) + message_contents = self._get_message_contents(message=message) + for content in message_contents: + messages_to_add.append({"role": message.role.value, "content": content}) + create_thread_kwargs["messages"] = messages_to_add + + if metadata: + create_thread_kwargs["metadata"] = metadata + + thread = await self.client.beta.threads.create(**create_thread_kwargs) + return thread.id + + async def delete_thread(self, thread_id: str) -> None: + """Delete a thread. + + Args: + thread_id (str): The thread id. + """ + await self.client.beta.threads.delete(thread_id) + + async def delete(self) -> bool: + """Delete the assistant. + + Returns: + bool: True if the assistant is deleted. + """ + if not self._is_deleted and self.assistant: + await self.client.beta.assistants.delete(self.assistant.id) + self._is_deleted = True + return self._is_deleted + + async def add_file(self, file_path: str, purpose: Literal["assistants", "vision"]) -> str: + """Add a file. + + Args: + file_path (str): The file path. + purpose (str): The purpose. Can be "assistants" or "vision". + + Returns: + str: The file id. + + Raises: + AgentInitializationError: If the client has not been initialized or the file is not found. + """ + try: + with open(file_path, "rb") as file: + file: "FileObject" = await self.client.files.create(file=file, purpose=purpose) # type: ignore + return file.id # type: ignore + except FileNotFoundError as ex: + raise AgentFileNotFoundException(f"File not found: {file_path}") from ex + + async def add_chat_message(self, thread_id: str, message: ChatMessageContent) -> "Message": + """Add a chat message. + + Args: + thread_id (str): The thread id. + message (ChatMessageContent): The chat message. + + Returns: + Message: The message. + """ + if message.role.value not in self.allowed_message_roles: + raise AgentExecutionError( + f"Invalid message role `{message.role.value}`. Allowed roles are {self.allowed_message_roles}." + ) + + metadata: Any = None + if message.metadata: + metadata = message.metadata + + message_contents: list[dict[str, Any]] = self._get_message_contents(message=message) + + return await self.client.beta.threads.messages.create( + thread_id=thread_id, + role=message.role.value, # type: ignore + content=message_contents, # type: ignore + metadata=metadata, + ) + + async def get_thread_messages(self, thread_id: str) -> AsyncIterable[ChatMessageContent]: + """Get the messages for the specified thread. + + Args: + thread_id (str): The thread id. + + Yields: + ChatMessageContent: The chat message. + """ + agent_names: dict[str, Any] = {} + + thread_messages = await self.client.beta.threads.messages.list(thread_id=thread_id, limit=100, order="desc") + for message in thread_messages.data: + assistant_name = None + if message.assistant_id and message.assistant_id not in agent_names: + agent = await self.client.beta.assistants.retrieve(message.assistant_id) + if agent.name: + agent_names[message.assistant_id] = agent.name + assistant_name = agent_names.get(message.assistant_id) if message.assistant_id else message.assistant_id + assistant_name = assistant_name or message.assistant_id + + content: ChatMessageContent = self._generate_message_content(str(assistant_name), message) + + if len(content.items) > 0: + yield content + + # region Agent Invoke Methods + + async def invoke( + self, + thread_id: str, + *, + ai_model_id: str | None = None, + enable_code_interpreter: bool | None = False, + enable_file_search: bool | None = False, + enable_json_response: bool | None = None, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + temperature: float | None = None, + top_p: float | None = None, + metadata: dict[str, str] | None = {}, + **kwargs: Any, + ) -> AsyncIterable[ChatMessageContent]: + """Invoke the chat assistant. + + The supplied arguments will take precedence over the specified assistant level attributes. + + Args: + thread_id (str): The thread id. + ai_model_id (str): The AI model id. Defaults to None. (optional) + enable_code_interpreter (bool): Enable code interpreter. Defaults to False. (optional) + enable_file_search (bool): Enable file search. Defaults to False. (optional) + enable_json_response (bool): Enable JSON response. Defaults to False. (optional) + max_completion_tokens (int): The max completion tokens. Defaults to None. (optional) + max_prompt_tokens (int): The max prompt tokens. Defaults to None. (optional) + parallel_tool_calls_enabled (bool): Enable parallel tool calls. Defaults to True. (optional) + truncation_message_count (int): The truncation message count. Defaults to None. (optional) + temperature (float): The temperature. Defaults to None. (optional) + top_p (float): The top p. Defaults to None. (optional) + metadata (dict[str, str]): The metadata. Defaults to {}. (optional) + kwargs (Any): Extra keyword arguments. + + Yields: + ChatMessageContent: The chat message content. + """ + async for is_visible, content in self._invoke_internal( + thread_id=thread_id, + ai_model_id=ai_model_id, + enable_code_interpreter=enable_code_interpreter, + enable_file_search=enable_file_search, + enable_json_response=enable_json_response, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + parallel_tool_calls_enabled=parallel_tool_calls_enabled, + truncation_message_count=truncation_message_count, + temperature=temperature, + top_p=top_p, + metadata=metadata, + kwargs=kwargs, + ): + if is_visible: + yield content + + async def _invoke_internal( + self, + thread_id: str, + *, + ai_model_id: str | None = None, + enable_code_interpreter: bool | None = False, + enable_file_search: bool | None = False, + enable_json_response: bool | None = None, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + temperature: float | None = None, + top_p: float | None = None, + metadata: dict[str, str] | None = {}, + kwargs: Any, + ) -> AsyncIterable[tuple[bool, ChatMessageContent]]: + """Internal invoke method. + + The supplied arguments will take precedence over the specified assistant level attributes. + + Args: + thread_id (str): The thread id. + ai_model_id (str): The AI model id. Defaults to None. (optional) + enable_code_interpreter (bool): Enable code interpreter. Defaults to False. (optional) + enable_file_search (bool): Enable file search. Defaults to False. (optional) + enable_json_response (bool): Enable JSON response. Defaults to False. (optional) + max_completion_tokens (int): The max completion tokens. Defaults to None. (optional) + max_prompt_tokens (int): The max prompt tokens. Defaults to None. (optional) + parallel_tool_calls_enabled (bool): Enable parallel tool calls. Defaults to True. (optional) + truncation_message_count (int): The truncation message count. Defaults to None. (optional) + temperature (float): The temperature. Defaults to None. (optional) + top_p (float): The top p. Defaults to None. (optional) + metadata (dict[str, str]): The metadata. Defaults to {}. (optional) + kwargs (Any): Extra keyword arguments. + + Yields: + tuple[bool, ChatMessageContent]: A tuple of visibility and chat message content. + """ + if not self.assistant: + raise AgentInitializationError("The assistant has not been created.") + + if self._is_deleted: + raise AgentInitializationError("The assistant has been deleted.") + + self._check_if_deleted() + tools = self._get_tools() + + run_options = self._generate_options( + ai_model_id=ai_model_id, + enable_code_interpreter=enable_code_interpreter, + enable_file_search=enable_file_search, + enable_json_response=enable_json_response, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + parallel_tool_calls_enabled=parallel_tool_calls_enabled, + truncation_message_count=truncation_message_count, + temperature=temperature, + top_p=top_p, + metadata=metadata, + kwargs=kwargs, + ) + + # Filter out None values to avoid passing them as kwargs + run_options = {k: v for k, v in run_options.items() if v is not None} + + run = await self.client.beta.threads.runs.create( + assistant_id=self.assistant.id, + thread_id=thread_id, + instructions=self.assistant.instructions, + tools=tools, # type: ignore + **run_options, + ) + + processed_step_ids = set() + function_steps: dict[str, FunctionCallContent] = {} + + while run.status != "completed": + run = await self._poll_run_status(run=run, thread_id=thread_id) + + if run.status in self.error_message_states: + raise AgentInvokeError( + f"Run failed with status: `{run.status}` for agent `{self.name}` and thread `{thread_id}`" + ) + + # Check if function calling required + if run.status == "requires_action": + fccs = self._get_function_call_contents(run, function_steps) + if fccs: + yield False, self._generate_function_call_content(agent_name=self.name, fccs=fccs) + + chat_history = ChatHistory() + _ = await self._invoke_function_calls(fccs=fccs, chat_history=chat_history) + + tool_outputs = self._format_tool_outputs(chat_history) + await self.client.beta.threads.runs.submit_tool_outputs( + run_id=run.id, + thread_id=thread_id, + tool_outputs=tool_outputs, # type: ignore + ) + + steps_response = await self.client.beta.threads.runs.steps.list(run_id=run.id, thread_id=thread_id) + steps: list[RunStep] = steps_response.data + completed_steps_to_process: list[RunStep] = sorted( + [s for s in steps if s.completed_at is not None and s.id not in processed_step_ids], + key=lambda s: s.created_at, + ) + + message_count = 0 + for completed_step in completed_steps_to_process: + if completed_step.type == "tool_calls": + assert hasattr(completed_step.step_details, "tool_calls") # nosec + for tool_call in completed_step.step_details.tool_calls: + is_visible = False + content: ChatMessageContent | None = None + if tool_call.type == "code_interpreter": + content = self._generate_code_interpreter_content( + self.name, + tool_call.code_interpreter.input, # type: ignore + ) + is_visible = True + elif tool_call.type == "function": + function_step = function_steps.get(tool_call.id) + assert function_step is not None # nosec + content = self._generate_function_result_content( + agent_name=self.name, function_step=function_step, tool_call=tool_call + ) + + if content: + message_count += 1 + yield is_visible, content + elif completed_step.type == "message_creation": + message = await self._retrieve_message( + thread_id=thread_id, + message_id=completed_step.step_details.message_creation.message_id, # type: ignore + ) + if message: + content = self._generate_message_content(self.name, message) + if len(content.items) > 0: + message_count += 1 + yield True, content + processed_step_ids.add(completed_step.id) + + # endregion + + # region Content Generation Methods + + def _generate_function_call_content(self, agent_name: str, fccs: list[FunctionCallContent]) -> ChatMessageContent: + """Generate function call content.""" + function_call_content: ChatMessageContent = ChatMessageContent(role=AuthorRole.TOOL, name=agent_name) # type: ignore + + function_call_content.items.extend(fccs) + + return function_call_content + + def _generate_function_result_content( + self, agent_name: str, function_step: FunctionCallContent, tool_call: "ToolCall" + ) -> ChatMessageContent: + """Generate function result content.""" + function_call_content: ChatMessageContent = ChatMessageContent(role=AuthorRole.TOOL, name=agent_name) # type: ignore + function_call_content.items.append( + FunctionResultContent( + function_name=function_step.function_name, + plugin_name=function_step.plugin_name, + id=function_step.id, + result=tool_call.function.output, # type: ignore + ) + ) + return function_call_content + + def _generate_code_interpreter_content(self, agent_name: str, code: str) -> ChatMessageContent: + """Generate code interpreter content.""" + return ChatMessageContent( + role=AuthorRole.ASSISTANT, + content=code, + name=agent_name, + ) + + def _generate_annotation_content(self, annotation: "Annotation") -> AnnotationContent: + """Generate annotation content.""" + file_id = None + if hasattr(annotation, "file_path"): + file_id = annotation.file_path.file_id + elif hasattr(annotation, "file_citation"): + file_id = annotation.file_citation.file_id + + return AnnotationContent( + file_id=file_id, + quote=annotation.text, + start_index=annotation.start_index, + end_index=annotation.end_index, + ) + + # endregion + + # region Agent Helper Methods + + def _generate_options( + self, + *, + ai_model_id: str | None = None, + enable_code_interpreter: bool | None = False, + enable_file_search: bool | None = False, + enable_json_response: bool | None = False, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + temperature: float | None = None, + top_p: float | None = None, + metadata: dict[str, str] | None = {}, + kwargs: Any = {}, + ) -> dict[str, Any]: + """Generate options for the assistant invocation.""" + merged_options = self._merge_options( + ai_model_id=ai_model_id, + enable_code_interpreter=enable_code_interpreter, + enable_file_search=enable_file_search, + enable_json_response=enable_json_response, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + parallel_tool_calls_enabled=parallel_tool_calls_enabled, + truncation_message_count=truncation_message_count, + temperature=temperature, + top_p=top_p, + metadata=metadata, + **kwargs, + ) + + truncation_message_count = merged_options.get("truncation_message_count") + + return { + "max_completion_tokens": merged_options.get("max_completion_tokens"), + "max_prompt_tokens": merged_options.get("max_prompt_tokens"), + "model": merged_options.get("ai_model_id"), + "top_p": merged_options.get("top_p"), + # TODO(evmattso): Support `parallel_tool_calls` when it is ready + "response_format": "json" if merged_options.get("enable_json_response") else None, + "temperature": merged_options.get("temperature"), + "truncation_strategy": truncation_message_count if truncation_message_count else None, + "metadata": merged_options.get("metadata", None), + } + + def _merge_options( + self, + ai_model_id: str | None = None, + enable_code_interpreter: bool | None = False, + enable_file_search: bool | None = False, + enable_json_response: bool | None = False, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + temperature: float | None = None, + top_p: float | None = None, + metadata: dict[str, str] | None = {}, + **kwargs: Any, + ) -> dict[str, Any]: + """Merge the run-time options with the agent level attribute options.""" + merged_options = { + "ai_model_id": ai_model_id if ai_model_id is not None else self.ai_model_id, + "enable_code_interpreter": enable_code_interpreter + if enable_code_interpreter is not None + else self.enable_code_interpreter, + "enable_file_search": enable_file_search if enable_file_search is not None else self.enable_file_search, + "enable_json_response": enable_json_response + if enable_json_response is not None + else self.enable_json_response, + "max_completion_tokens": max_completion_tokens + if max_completion_tokens is not None + else self.max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens if max_prompt_tokens is not None else self.max_prompt_tokens, + "parallel_tool_calls_enabled": parallel_tool_calls_enabled + if parallel_tool_calls_enabled is not None + else self.parallel_tool_calls_enabled, + "truncation_message_count": truncation_message_count + if truncation_message_count is not None + else self.truncation_message_count, + "temperature": temperature if temperature is not None else self.temperature, + "top_p": top_p if top_p is not None else self.top_p, + "metadata": metadata if metadata is not None else self.metadata, + } + + # Update merged_options with any additional kwargs + merged_options.update(kwargs) + return merged_options + + async def _poll_run_status(self, run: Run, thread_id: str) -> Run: + """Poll the run status.""" + logger.info(f"Polling run status: {run.id}, threadId: {thread_id}") + + count = 0 + + while True: + # Reduce polling frequency after a couple attempts + await asyncio.sleep(self.polling_options.get_polling_interval(count).total_seconds()) + count += 1 + + try: + run = await self.client.beta.threads.runs.retrieve(run.id, thread_id=thread_id) + except Exception as e: + logging.warning(f"Failed to retrieve run for run id: `{run.id}` and thread id: `{thread_id}`: {e}") + # Retry anyway + + if run.status not in self.polling_status: + break + + logger.info(f"Polled run status: {run.status}, {run.id}, threadId: {thread_id}") + return run + + async def _retrieve_message(self, thread_id: str, message_id: str) -> Message | None: + """Retrieve a message from a thread.""" + message: Message | None = None + count = 0 + max_retries = 3 + + while count < max_retries: + try: + message = await self.client.beta.threads.messages.retrieve(message_id, thread_id=thread_id) + break + except Exception as ex: + logger.error(f"Failed to retrieve message {message_id} from thread {thread_id}: {ex}") + count += 1 + if count >= max_retries: + logger.error( + f"Max retries reached. Unable to retrieve message {message_id} from thread {thread_id}." + ) + break + backoff_time: float = self.polling_options.message_synchronization_delay.total_seconds() * (2**count) + await asyncio.sleep(backoff_time) + + return message + + def _get_message_contents(self, message: ChatMessageContent) -> list[dict[str, Any]]: + """Get the message contents.""" + contents: list[dict[str, Any]] = [] + for content in message.items: + if isinstance(content, TextContent): + contents.append({"type": "text", "text": content.text}) + elif isinstance(content, ImageContent) and content.uri: + contents.append(content.to_dict()) + return contents + + def _generate_message_content(self, assistant_name: str, message: Message) -> ChatMessageContent: + """Generate message content.""" + role = AuthorRole(message.role) + + content: ChatMessageContent = ChatMessageContent(role=role, name=assistant_name) # type: ignore + + for item_content in message.content: + if item_content.type == "text": + assert isinstance(item_content, TextContentBlock) # nosec + content.items.append( + TextContent( + text=item_content.text.value, + ) + ) + for annotation in item_content.text.annotations: + content.items.append(self._generate_annotation_content(annotation)) + elif item_content.type == "image_file": + assert isinstance(item_content, ImageFileContentBlock) # nosec + content.items.append( + FileReferenceContent( + file_id=item_content.image_file.file_id, + ) + ) + return content + + def _check_if_deleted(self) -> None: + """Check if the assistant has been deleted.""" + if self._is_deleted: + raise AgentInitializationError("The assistant has been deleted.") + + def _get_tools(self) -> list[dict[str, str]]: + """Get the list of tools for the assistant. + + Returns: + list[dict[str, str]]: The list of tools. + """ + tools = [] + if self.assistant is None: + raise AgentInitializationError("The assistant has not been created.") + + for tool in self.assistant.tools: + if isinstance(tool, CodeInterpreterTool): + tools.append({"type": "code_interpreter"}) + elif isinstance(tool, FileSearchTool): + tools.append({"type": "file_search"}) + + funcs = self.kernel.get_full_list_of_function_metadata() + tools.extend([kernel_function_metadata_to_function_call_format(f) for f in funcs]) + + return tools + + def _get_function_call_contents( + self, run: Run, function_steps: dict[str, FunctionCallContent] + ) -> list[FunctionCallContent]: + """Extract function call contents from the run. + + Args: + run (Run): The run. + function_steps (dict[str, FunctionCallContent]): The function steps + + Returns: + list[FunctionCallContent]: The list of function call contents. + """ + function_call_contents: list[FunctionCallContent] = [] + required_action = getattr(run, "required_action", None) + if not required_action or not getattr(required_action, "submit_tool_outputs", False): + return function_call_contents + for tool in required_action.submit_tool_outputs.tool_calls: + fcc = FunctionCallContent( + id=tool.id, + index=getattr(tool, "index", None), + name=tool.function.name, + arguments=tool.function.arguments, + ) + function_call_contents.append(fcc) + function_steps[tool.id] = fcc + return function_call_contents + + async def _invoke_function_calls(self, fccs: list[FunctionCallContent], chat_history: ChatHistory) -> list[Any]: + """Invoke function calls and store results in chat history. + + Args: + fccs (List[FunctionCallContent]): The function call contents. + chat_history (ChatHistory): The chat history. + + Returns: + List[Any]: The results. + """ + tasks = [ + self.kernel.invoke_function_call(function_call=function_call, chat_history=chat_history) + for function_call in fccs + ] + return await asyncio.gather(*tasks) + + def _format_tool_outputs(self, chat_history: ChatHistory) -> list[dict[str, str]]: + """Format tool outputs from chat history for submission. + + Args: + chat_history (ChatHistory): The chat history. + + Returns: + list[dict[str, str]]: The formatted tool outputs + """ + tool_outputs = [] + for tool_call in chat_history.messages[0].items: + if isinstance(tool_call, FunctionResultContent): + tool_outputs.append( + { + "tool_call_id": tool_call.id, + "output": tool_call.result, + } + ) + return tool_outputs + + # endregion diff --git a/python/semantic_kernel/agents/open_ai/run_polling_options.py b/python/semantic_kernel/agents/open_ai/run_polling_options.py new file mode 100644 index 000000000000..9683f4cbea6b --- /dev/null +++ b/python/semantic_kernel/agents/open_ai/run_polling_options.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft. All rights reserved. + +from datetime import timedelta + +from pydantic import Field + +from semantic_kernel.kernel_pydantic import KernelBaseModel +from semantic_kernel.utils.experimental_decorator import experimental_class + + +@experimental_class +class RunPollingOptions(KernelBaseModel): + """Configuration and defaults associated with polling behavior for Assistant API requests.""" + + default_polling_interval: timedelta = Field(default=timedelta(milliseconds=250)) + default_polling_backoff: timedelta = Field(default=timedelta(seconds=1)) + default_polling_backoff_threshold: int = Field(default=2) + default_message_synchronization_delay: timedelta = Field(default=timedelta(milliseconds=250)) + run_polling_interval: timedelta = Field(default=timedelta(milliseconds=250)) + run_polling_backoff: timedelta = Field(default=timedelta(seconds=1)) + run_polling_backoff_threshold: int = Field(default=2) + message_synchronization_delay: timedelta = Field(default=timedelta(milliseconds=250)) + + def get_polling_interval(self, iteration_count: int) -> timedelta: + """Get the polling interval for the given iteration count.""" + return ( + self.run_polling_backoff + if iteration_count > self.run_polling_backoff_threshold + else self.run_polling_interval + ) diff --git a/python/semantic_kernel/contents/__init__.py b/python/semantic_kernel/contents/__init__.py index 21d717945299..7563bdbcfe93 100644 --- a/python/semantic_kernel/contents/__init__.py +++ b/python/semantic_kernel/contents/__init__.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. +from semantic_kernel.contents.annotation_content import AnnotationContent from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.contents.chat_message_content import ChatMessageContent from semantic_kernel.contents.function_call_content import FunctionCallContent @@ -12,6 +13,7 @@ from semantic_kernel.contents.utils.finish_reason import FinishReason __all__ = [ + "AnnotationContent", "AuthorRole", "ChatHistory", "ChatMessageContent", diff --git a/python/semantic_kernel/contents/annotation_content.py b/python/semantic_kernel/contents/annotation_content.py new file mode 100644 index 000000000000..dfbabe28dfd9 --- /dev/null +++ b/python/semantic_kernel/contents/annotation_content.py @@ -0,0 +1,61 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from typing import Any, ClassVar, Literal, TypeVar +from xml.etree.ElementTree import Element # nosec + +from pydantic import Field + +from semantic_kernel.contents.const import ANNOTATION_CONTENT_TAG, ContentTypes +from semantic_kernel.contents.kernel_content import KernelContent +from semantic_kernel.utils.experimental_decorator import experimental_class + +logger = logging.getLogger(__name__) + +_T = TypeVar("_T", bound="AnnotationContent") + + +@experimental_class +class AnnotationContent(KernelContent): + content_type: Literal[ContentTypes.ANNOTATION_CONTENT] = Field(ANNOTATION_CONTENT_TAG, init=False) # type: ignore + tag: ClassVar[str] = ANNOTATION_CONTENT_TAG + file_id: str | None = None + quote: str | None = None + start_index: int | None = None + end_index: int | None = None + + def __str__(self) -> str: + """Return the string representation of the annotation content.""" + return f"AnnotationContent(file_id={self.file_id}, quote={self.quote}, start_index={self.start_index}, end_index={self.end_index})" # noqa: E501 + + def to_element(self) -> Element: + """Convert the annotation content to an Element.""" + element = Element(self.tag) + if self.file_id: + element.set("file_id", self.file_id) + if self.quote: + element.set("quote", self.quote) + if self.start_index is not None: + element.set("start_index", str(self.start_index)) + if self.end_index is not None: + element.set("end_index", str(self.end_index)) + return element + + @classmethod + def from_element(cls: type[_T], element: Element) -> _T: + """Create an instance from an Element.""" + return cls( + file_id=element.get("file_id"), + quote=element.get("quote"), + start_index=int(element.get("start_index")) if element.get("start_index") else None, # type: ignore + end_index=int(element.get("end_index")) if element.get("end_index") else None, # type: ignore + ) + + def to_dict(self) -> dict[str, Any]: + """Convert the instance to a dictionary.""" + return { + "file_id": self.file_id, + "quote": self.quote, + "start_index": self.start_index, + "end_index": self.end_index, + } diff --git a/python/semantic_kernel/contents/chat_message_content.py b/python/semantic_kernel/contents/chat_message_content.py index 930e97202c98..25036b739b9b 100644 --- a/python/semantic_kernel/contents/chat_message_content.py +++ b/python/semantic_kernel/contents/chat_message_content.py @@ -9,15 +9,19 @@ from defusedxml import ElementTree from pydantic import Field +from semantic_kernel.contents.annotation_content import AnnotationContent from semantic_kernel.contents.const import ( + ANNOTATION_CONTENT_TAG, CHAT_MESSAGE_CONTENT_TAG, DISCRIMINATOR_FIELD, + FILE_REFERENCE_CONTENT_TAG, FUNCTION_CALL_CONTENT_TAG, FUNCTION_RESULT_CONTENT_TAG, IMAGE_CONTENT_TAG, TEXT_CONTENT_TAG, ContentTypes, ) +from semantic_kernel.contents.file_reference_content import FileReferenceContent from semantic_kernel.contents.function_call_content import FunctionCallContent from semantic_kernel.contents.function_result_content import FunctionResultContent from semantic_kernel.contents.image_content import ImageContent @@ -29,18 +33,22 @@ from semantic_kernel.exceptions.content_exceptions import ContentInitializationError TAG_CONTENT_MAP = { + ANNOTATION_CONTENT_TAG: AnnotationContent, TEXT_CONTENT_TAG: TextContent, + FILE_REFERENCE_CONTENT_TAG: FileReferenceContent, FUNCTION_CALL_CONTENT_TAG: FunctionCallContent, FUNCTION_RESULT_CONTENT_TAG: FunctionResultContent, IMAGE_CONTENT_TAG: ImageContent, } ITEM_TYPES = Union[ + AnnotationContent, ImageContent, TextContent, StreamingTextContent, FunctionResultContent, FunctionCallContent, + FileReferenceContent, ] logger = logging.getLogger(__name__) diff --git a/python/semantic_kernel/contents/const.py b/python/semantic_kernel/contents/const.py index 07153e4c0541..8f48d4eadb0c 100644 --- a/python/semantic_kernel/contents/const.py +++ b/python/semantic_kernel/contents/const.py @@ -6,16 +6,20 @@ CHAT_HISTORY_TAG: Final[str] = "chat_history" TEXT_CONTENT_TAG: Final[str] = "text" IMAGE_CONTENT_TAG: Final[str] = "image" +ANNOTATION_CONTENT_TAG: Final[str] = "annotation" BINARY_CONTENT_TAG: Final[str] = "binary" +FILE_REFERENCE_CONTENT_TAG: Final[str] = "file_reference" FUNCTION_CALL_CONTENT_TAG: Final[str] = "function_call" FUNCTION_RESULT_CONTENT_TAG: Final[str] = "function_result" DISCRIMINATOR_FIELD: Final[str] = "content_type" class ContentTypes(str, Enum): + ANNOTATION_CONTENT = ANNOTATION_CONTENT_TAG BINARY_CONTENT = BINARY_CONTENT_TAG CHAT_MESSAGE_CONTENT = CHAT_MESSAGE_CONTENT_TAG IMAGE_CONTENT = IMAGE_CONTENT_TAG + FILE_REFERENCE_CONTENT = FILE_REFERENCE_CONTENT_TAG FUNCTION_CALL_CONTENT = FUNCTION_CALL_CONTENT_TAG FUNCTION_RESULT_CONTENT = FUNCTION_RESULT_CONTENT_TAG TEXT_CONTENT = TEXT_CONTENT_TAG diff --git a/python/semantic_kernel/contents/file_reference_content.py b/python/semantic_kernel/contents/file_reference_content.py new file mode 100644 index 000000000000..9cb2acd680f7 --- /dev/null +++ b/python/semantic_kernel/contents/file_reference_content.py @@ -0,0 +1,46 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from typing import Any, ClassVar, Literal, TypeVar +from xml.etree.ElementTree import Element # nosec + +from pydantic import Field + +from semantic_kernel.contents.const import FILE_REFERENCE_CONTENT_TAG, ContentTypes +from semantic_kernel.contents.kernel_content import KernelContent +from semantic_kernel.utils.experimental_decorator import experimental_class + +logger = logging.getLogger(__name__) + +_T = TypeVar("_T", bound="FileReferenceContent") + + +@experimental_class +class FileReferenceContent(KernelContent): + content_type: Literal[ContentTypes.FILE_REFERENCE_CONTENT] = Field(FILE_REFERENCE_CONTENT_TAG, init=False) # type: ignore + tag: ClassVar[str] = FILE_REFERENCE_CONTENT_TAG + file_id: str | None = None + + def __str__(self) -> str: + """Return the string representation of the file reference content.""" + return f"FileReferenceContent(file_id={self.file_id})" + + def to_element(self) -> Element: + """Convert the file reference content to an Element.""" + element = Element(self.tag) + if self.file_id: + element.set("file_id", self.file_id) + return element + + @classmethod + def from_element(cls: type[_T], element: Element) -> _T: + """Create an instance from an Element.""" + return cls( + file_id=element.get("file_id"), + ) + + def to_dict(self) -> dict[str, Any]: + """Convert the instance to a dictionary.""" + return { + "file_id": self.file_id, + } diff --git a/python/semantic_kernel/exceptions/__init__.py b/python/semantic_kernel/exceptions/__init__.py index 3c3a43e419d0..3a5f22b87bf2 100644 --- a/python/semantic_kernel/exceptions/__init__.py +++ b/python/semantic_kernel/exceptions/__init__.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. +from semantic_kernel.exceptions.agent_exceptions import * # noqa: F403 from semantic_kernel.exceptions.content_exceptions import * # noqa: F403 from semantic_kernel.exceptions.function_exceptions import * # noqa: F403 from semantic_kernel.exceptions.kernel_exceptions import * # noqa: F403 diff --git a/python/semantic_kernel/exceptions/agent_exceptions.py b/python/semantic_kernel/exceptions/agent_exceptions.py new file mode 100644 index 000000000000..a24d0700be0e --- /dev/null +++ b/python/semantic_kernel/exceptions/agent_exceptions.py @@ -0,0 +1,24 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from semantic_kernel.exceptions.kernel_exceptions import KernelException + + +class AgentException(KernelException): + pass + + +class AgentFileNotFoundException(AgentException): + pass + + +class AgentInitializationError(AgentException): + pass + + +class AgentExecutionError(AgentException): + pass + + +class AgentInvokeError(AgentException): + pass diff --git a/python/tests/samples/test_concepts.py b/python/tests/samples/test_concepts.py index 64f6f2c6820c..79018034ee93 100644 --- a/python/tests/samples/test_concepts.py +++ b/python/tests/samples/test_concepts.py @@ -5,8 +5,6 @@ import pytest from pytest import mark, param -from samples.concepts.agents.step1_agent import main as step1_agent -from samples.concepts.agents.step2_plugins import main as step2_plugins from samples.concepts.auto_function_calling.azure_python_code_interpreter_function_calling import ( main as azure_python_code_interpreter_function_calling, ) @@ -54,6 +52,8 @@ from samples.concepts.rag.rag_with_text_memory_plugin import main as rag_with_text_memory_plugin from samples.concepts.search.bing_search_plugin import main as bing_search_plugin from samples.concepts.service_selector.custom_service_selector import main as custom_service_selector +from samples.getting_started_with_agents.step1_agent import main as step1_agent +from samples.getting_started_with_agents.step2_plugins import main as step2_plugins from tests.samples.samples_utils import retry concepts = [ diff --git a/python/tests/unit/agents/test_azure_assistant_agent.py b/python/tests/unit/agents/test_azure_assistant_agent.py new file mode 100644 index 000000000000..3da6e3ea963a --- /dev/null +++ b/python/tests/unit/agents/test_azure_assistant_agent.py @@ -0,0 +1,291 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest +from openai import AsyncAzureOpenAI +from openai.resources.beta.assistants import Assistant +from openai.types.beta.assistant import ToolResources, ToolResourcesCodeInterpreter, ToolResourcesFileSearch +from pydantic import ValidationError + +from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent +from semantic_kernel.exceptions.agent_exceptions import AgentInitializationError +from semantic_kernel.kernel import Kernel + + +@pytest.fixture +def azure_openai_assistant_agent(kernel: Kernel, azure_openai_unit_test_env): + return AzureAssistantAgent( + kernel=kernel, + service_id="test_service", + name="test_name", + instructions="test_instructions", + api_key="test_api_key", + endpoint="https://test.endpoint", + ai_model_id="test_model", + api_version="2024-05-01-preview", + default_headers={"User-Agent": "test-agent"}, + ) + + +@pytest.fixture +def mock_assistant(): + return Assistant( + created_at=123456789, + object="assistant", + metadata={ + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + model="test_model", + description="test_description", + id="test_id", + instructions="test_instructions", + name="test_name", + tools=[{"type": "code_interpreter"}, {"type": "file_search"}], + temperature=0.7, + top_p=0.9, + response_format={"type": "json_object"}, + tool_resources=ToolResources( + code_interpreter=ToolResourcesCodeInterpreter(file_ids=["file1", "file2"]), + file_search=ToolResourcesFileSearch(vector_store_ids=["vector_store1"]), + ), + ) + + +def test_initialization(azure_openai_assistant_agent: AzureAssistantAgent, azure_openai_unit_test_env): + agent = azure_openai_assistant_agent + assert agent is not None + + +def test_create_client(azure_openai_assistant_agent, azure_openai_unit_test_env): + assert isinstance(azure_openai_assistant_agent.client, AsyncAzureOpenAI) + + +def test_create_client_from_configuration(azure_openai_assistant_agent, azure_openai_unit_test_env): + assert isinstance(azure_openai_assistant_agent.client, AsyncAzureOpenAI) + assert azure_openai_assistant_agent.client.api_key == "test_api_key" + + +def test_create_client_from_configuration_missing_api_key(): + with pytest.raises( + AgentInitializationError, + match="Please provide either AzureOpenAI api_key, an ad_token or an ad_token_provider or a client.", + ): + AzureAssistantAgent._create_client(None) + + +def test_create_client_from_configuration_missing_endpoint(): + with pytest.raises( + AgentInitializationError, + match="Please provide an AzureOpenAI endpoint.", + ): + AzureAssistantAgent._create_client(api_key="test") + + +@pytest.mark.asyncio +async def test_create_agent(kernel: Kernel, azure_openai_unit_test_env): + with patch.object(AzureAssistantAgent, "create_assistant", new_callable=AsyncMock) as mock_create_assistant: + mock_create_assistant.return_value = MagicMock(spec=Assistant) + agent = await AzureAssistantAgent.create( + kernel=kernel, service_id="test_service", name="test_name", api_key="test_api_key", api_version="2024-05-01" + ) + assert agent.assistant is not None + mock_create_assistant.assert_called_once() + await agent.client.close() + + +@pytest.mark.asyncio +async def test_list_definitions(kernel: Kernel, mock_assistant, azure_openai_unit_test_env): + agent = AzureAssistantAgent( + kernel=kernel, service_id="test_service", name="test_name", instructions="test_instructions", id="test_id" + ) + + with patch.object( + AzureAssistantAgent, "_create_client", return_value=MagicMock(spec=AsyncAzureOpenAI) + ) as mock_create_client: + mock_client_instance = mock_create_client.return_value + mock_client_instance.beta = MagicMock() + mock_client_instance.beta.assistants = MagicMock() + mock_client_instance.beta.assistants.list = AsyncMock(return_value=MagicMock(data=[mock_assistant])) + + agent.client = mock_client_instance + + definitions = [] + async for definition in agent.list_definitions(): + definitions.append(definition) + + mock_client_instance.beta.assistants.list.assert_called() + + assert len(definitions) == 1 + assert definitions[0] == { + "ai_model_id": "test_model", + "description": "test_description", + "id": "test_id", + "instructions": "test_instructions", + "name": "test_name", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "file_ids": ["file1", "file2"], + "temperature": 0.7, + "top_p": 0.9, + "vector_store_id": "vector_store1", + "metadata": { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + + +@pytest.mark.asyncio +async def test_retrieve_agent(kernel, azure_openai_unit_test_env): + with patch.object( + AzureAssistantAgent, "_create_client", return_value=MagicMock(spec=AsyncAzureOpenAI) + ) as mock_create_client: + mock_client_instance = mock_create_client.return_value + mock_client_instance.beta = MagicMock() + mock_client_instance.beta.assistants = MagicMock() + + mock_assistant = MagicMock() + mock_assistant.metadata = { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + } + mock_assistant.model = "test_model" + mock_assistant.description = "test_description" + mock_assistant.id = "test_id" + mock_assistant.instructions = "test_instructions" + mock_assistant.name = "test_name" + mock_assistant.tools = ["code_interpreter", "file_search"] + mock_assistant.temperature = 0.7 + mock_assistant.top_p = 0.9 + mock_assistant.response_format = {"type": "json_object"} + mock_assistant.tool_resources = { + "code_interpreter": {"file_ids": ["file1", "file2"]}, + "file_search": {"vector_store_ids": ["vector_store1"]}, + } + + mock_client_instance.beta.assistants.retrieve = AsyncMock(return_value=mock_assistant) + + agent = AzureAssistantAgent( + kernel=kernel, service_id="test_service", name="test_name", instructions="test_instructions", id="test_id" + ) + agent._create_open_ai_assistant_definition = MagicMock( + return_value={ + "ai_model_id": "test_model", + "description": "test_description", + "id": "test_id", + "instructions": "test_instructions", + "name": "test_name", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "file_ids": ["file1", "file2"], + "temperature": 0.7, + "top_p": 0.9, + "vector_store_id": "vector_store1", + "metadata": { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + ) + + retrieved_agent = await agent.retrieve("test_id", "test_api_key", kernel) + assert retrieved_agent.model_dump( + include={ + "ai_model_id", + "description", + "id", + "instructions", + "name", + "enable_code_interpreter", + "enable_file_search", + "enable_json_response", + "file_ids", + "temperature", + "top_p", + "vector_store_id", + "metadata", + "max_completion_tokens", + "max_prompt_tokens", + "parallel_tool_calls_enabled", + "truncation_message_count", + } + ) == { + "ai_model_id": "test_model", + "description": "test_description", + "id": "test_id", + "instructions": "test_instructions", + "name": "test_name", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "file_ids": ["file1", "file2"], + "temperature": 0.7, + "top_p": 0.9, + "vector_store_id": "vector_store1", + "metadata": { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + mock_client_instance.beta.assistants.retrieve.assert_called_once_with("test_id") + agent._create_open_ai_assistant_definition.assert_called_once_with(mock_assistant) + + +def test_open_ai_settings_create_throws(azure_openai_unit_test_env): + with patch( + "semantic_kernel.connectors.ai.open_ai.settings.azure_open_ai_settings.AzureOpenAISettings.create" + ) as mock_create: + mock_create.side_effect = ValidationError.from_exception_data("test", line_errors=[], input_type="python") + + with pytest.raises(AgentInitializationError, match="Failed to create Azure OpenAI settings."): + AzureAssistantAgent(service_id="test", api_key="test_api_key", deployment_name="test_deployment_name") + + +@pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"]], indirect=True) +def test_azure_openai_agent_create_missing_deployment_name(azure_openai_unit_test_env): + with pytest.raises(AgentInitializationError, match="The Azure OpenAI chat_deployment_name is required."): + AzureAssistantAgent( + service_id="test_service", api_key="test_key", endpoint="https://example.com", env_file_path="test.env" + ) + + +@pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_API_KEY"]], indirect=True) +def test_azure_openai_agent_create_missing_api_key(azure_openai_unit_test_env): + with pytest.raises(AgentInitializationError, match="Please provide either api_key, ad_token or ad_token_provider."): + AzureAssistantAgent(service_id="test_service", endpoint="https://example.com", env_file_path="test.env") diff --git a/python/tests/unit/agents/test_open_ai_assistant_agent.py b/python/tests/unit/agents/test_open_ai_assistant_agent.py new file mode 100644 index 000000000000..0005f9ec76ab --- /dev/null +++ b/python/tests/unit/agents/test_open_ai_assistant_agent.py @@ -0,0 +1,428 @@ +# Copyright (c) Microsoft. All rights reserved. + +import json +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest +from openai import AsyncOpenAI +from openai.resources.beta.assistants import Assistant +from openai.types.beta.assistant import ( + ToolResources, + ToolResourcesCodeInterpreter, + ToolResourcesFileSearch, +) +from pydantic import ValidationError + +from semantic_kernel.agents.open_ai.open_ai_assistant_agent import OpenAIAssistantAgent +from semantic_kernel.exceptions.agent_exceptions import AgentInitializationError +from semantic_kernel.kernel import Kernel + + +@pytest.fixture +def openai_assistant_agent(kernel: Kernel, openai_unit_test_env): + return OpenAIAssistantAgent( + kernel=kernel, + service_id="test_service", + name="test_name", + instructions="test_instructions", + api_key="test_api_key", + kwargs={"temperature": 0.1}, + max_completion_tokens=100, + max_prompt_tokens=100, + parallel_tool_calls_enabled=True, + truncation_message_count=2, + ) + + +@pytest.fixture +def mock_assistant(): + return Assistant( + created_at=123456789, + object="assistant", + metadata={ + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + model="test_model", + description="test_description", + id="test_id", + instructions="test_instructions", + name="test_name", + tools=[{"type": "code_interpreter"}, {"type": "file_search"}], + temperature=0.7, + top_p=0.9, + response_format={"type": "json_object"}, + tool_resources=ToolResources( + code_interpreter=ToolResourcesCodeInterpreter(file_ids=["file1", "file2"]), + file_search=ToolResourcesFileSearch(vector_store_ids=["vector_store1"]), + ), + ) + + +@pytest.fixture +def mock_assistant_json(): + return Assistant( + created_at=123456789, + object="assistant", + metadata={ + "__run_options": json.dumps( + { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + ) + }, + model="test_model", + description="test_description", + id="test_id", + instructions="test_instructions", + name="test_name", + tools=[{"type": "code_interpreter"}, {"type": "file_search"}], + temperature=0.7, + top_p=0.9, + response_format={"type": "json_object"}, + tool_resources=ToolResources( + code_interpreter=ToolResourcesCodeInterpreter(file_ids=["file1", "file2"]), + file_search=ToolResourcesFileSearch(vector_store_ids=["vector_store1"]), + ), + ) + + +def test_initialization(openai_assistant_agent: OpenAIAssistantAgent, openai_unit_test_env): + agent = openai_assistant_agent + assert agent is not None + agent.kernel is not None + + +def test_create_client(openai_unit_test_env): + client = OpenAIAssistantAgent._create_client(api_key="test_api_key", default_headers={"User-Agent": "test-agent"}) + assert isinstance(client, AsyncOpenAI) + assert client.api_key == "test_api_key" + + +def test_create_client_from_configuration_missing_api_key(): + with pytest.raises( + AgentInitializationError, + match="Please provide an OpenAI api_key", + ): + OpenAIAssistantAgent._create_client(None) + + +@pytest.mark.asyncio +async def test_create_agent(kernel: Kernel, openai_unit_test_env): + with patch.object(OpenAIAssistantAgent, "create_assistant", new_callable=AsyncMock) as mock_create_assistant: + mock_create_assistant.return_value = MagicMock(spec=Assistant) + agent = await OpenAIAssistantAgent.create( + kernel=kernel, + ai_model_id="test_model_id", + service_id="test_service", + name="test_name", + api_key="test_api_key", + ) + assert agent.assistant is not None + mock_create_assistant.assert_called_once() + + +@pytest.mark.asyncio +async def test_create_agent_second_way(kernel: Kernel, mock_assistant, openai_unit_test_env): + agent = OpenAIAssistantAgent( + kernel=kernel, + ai_model_id="test_model_id", + service_id="test_service", + name="test_name", + api_key="test_api_key", + max_completion_tokens=100, + max_prompt_tokens=100, + parallel_tool_calls_enabled=True, + truncation_message_count=2, + ) + + with patch.object( + OpenAIAssistantAgent, "_create_client", return_value=MagicMock(spec=AsyncOpenAI) + ) as mock_create_client: + mock_client_instance = mock_create_client.return_value + mock_client_instance.beta = MagicMock() + mock_client_instance.beta.assistants.create = AsyncMock(return_value=mock_assistant) + + agent.client = mock_client_instance + + assistant = await agent.create_assistant() + + mock_client_instance.beta.assistants.create.assert_called_once() + + assert assistant == mock_assistant + + assert json.loads( + mock_client_instance.beta.assistants.create.call_args[1]["metadata"][agent._options_metadata_key] + ) == { + "max_completion_tokens": 100, + "max_prompt_tokens": 100, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 2, + } + + +@pytest.mark.asyncio +async def test_list_definitions(kernel: Kernel, mock_assistant, openai_unit_test_env): + agent = OpenAIAssistantAgent( + kernel=kernel, service_id="test_service", name="test_name", instructions="test_instructions", id="test_id" + ) + + with patch.object( + OpenAIAssistantAgent, "_create_client", return_value=MagicMock(spec=AsyncOpenAI) + ) as mock_create_client: + mock_client_instance = mock_create_client.return_value + mock_client_instance.beta = MagicMock() + mock_client_instance.beta.assistants = MagicMock() + mock_client_instance.beta.assistants.list = AsyncMock(return_value=MagicMock(data=[mock_assistant])) + + agent.client = mock_client_instance + + definitions = [] + async for definition in agent.list_definitions(): + definitions.append(definition) + + mock_client_instance.beta.assistants.list.assert_called() + + assert len(definitions) == 1 + assert definitions[0] == { + "ai_model_id": "test_model", + "description": "test_description", + "id": "test_id", + "instructions": "test_instructions", + "name": "test_name", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "file_ids": ["file1", "file2"], + "temperature": 0.7, + "top_p": 0.9, + "vector_store_id": "vector_store1", + "metadata": { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + + +@pytest.mark.asyncio +async def test_retrieve_agent(kernel, openai_unit_test_env): + with patch.object( + OpenAIAssistantAgent, "_create_client", return_value=MagicMock(spec=AsyncOpenAI) + ) as mock_create_client: + mock_client_instance = mock_create_client.return_value + mock_client_instance.beta = MagicMock() + mock_client_instance.beta.assistants = MagicMock() + + mock_assistant = MagicMock() + mock_assistant.metadata = { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + } + mock_assistant.model = "test_model" + mock_assistant.description = "test_description" + mock_assistant.id = "test_id" + mock_assistant.instructions = "test_instructions" + mock_assistant.name = "test_name" + mock_assistant.tools = ["code_interpreter", "file_search"] + mock_assistant.temperature = 0.7 + mock_assistant.top_p = 0.9 + mock_assistant.response_format = {"type": "json_object"} + mock_assistant.tool_resources = { + "code_interpreter": {"file_ids": ["file1", "file2"]}, + "file_search": {"vector_store_ids": ["vector_store1"]}, + } + + mock_client_instance.beta.assistants.retrieve = AsyncMock(return_value=mock_assistant) + + agent = OpenAIAssistantAgent( + kernel=kernel, service_id="test_service", name="test_name", instructions="test_instructions", id="test_id" + ) + agent._create_open_ai_assistant_definition = MagicMock( + return_value={ + "ai_model_id": "test_model", + "description": "test_description", + "id": "test_id", + "instructions": "test_instructions", + "name": "test_name", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "file_ids": ["file1", "file2"], + "temperature": 0.7, + "top_p": 0.9, + "vector_store_id": "vector_store1", + "metadata": { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + ) + + retrieved_agent = await agent.retrieve("test_id", "test_api_key", kernel) + assert retrieved_agent.model_dump( + include={ + "ai_model_id", + "description", + "id", + "instructions", + "name", + "enable_code_interpreter", + "enable_file_search", + "enable_json_response", + "file_ids", + "temperature", + "top_p", + "vector_store_id", + "metadata", + "max_completion_tokens", + "max_prompt_tokens", + "parallel_tool_calls_enabled", + "truncation_message_count", + } + ) == { + "ai_model_id": "test_model", + "description": "test_description", + "id": "test_id", + "instructions": "test_instructions", + "name": "test_name", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "file_ids": ["file1", "file2"], + "temperature": 0.7, + "top_p": 0.9, + "vector_store_id": "vector_store1", + "metadata": { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + mock_client_instance.beta.assistants.retrieve.assert_called_once_with("test_id") + agent._create_open_ai_assistant_definition.assert_called_once_with(mock_assistant) + + +def test_open_ai_settings_create_throws(openai_unit_test_env): + with patch("semantic_kernel.connectors.ai.open_ai.settings.open_ai_settings.OpenAISettings.create") as mock_create: + mock_create.side_effect = ValidationError.from_exception_data("test", line_errors=[], input_type="python") + + with pytest.raises(AgentInitializationError, match="Failed to create OpenAI settings."): + OpenAIAssistantAgent( + service_id="test", api_key="test_api_key", org_id="test_org_id", ai_model_id="test_model_id" + ) + + +@pytest.mark.parametrize("exclude_list", [["OPENAI_CHAT_MODEL_ID"]], indirect=True) +def test_azure_openai_agent_create_missing_chat_model_id_throws(openai_unit_test_env): + with pytest.raises(AgentInitializationError, match="The OpenAI model ID is required."): + OpenAIAssistantAgent(service_id="test_service", env_file_path="test.env") + + +@pytest.mark.parametrize("exclude_list", [["OPENAI_API_KEY"]], indirect=True) +def test_azure_openai_agent_create_missing_api_key_throws(openai_unit_test_env): + with pytest.raises(AgentInitializationError, match="The OpenAI API key is required, if a client is not provided."): + OpenAIAssistantAgent(service_id="test_service", env_file_path="test.env") + + +def test_create_open_ai_assistant_definition(mock_assistant, openai_unit_test_env): + agent = OpenAIAssistantAgent( + kernel=None, service_id="test_service", name="test_name", instructions="test_instructions", id="test_id" + ) + + definition = agent._create_open_ai_assistant_definition(mock_assistant) + + assert definition == { + "ai_model_id": "test_model", + "description": "test_description", + "id": "test_id", + "instructions": "test_instructions", + "name": "test_name", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "file_ids": ["file1", "file2"], + "temperature": 0.7, + "top_p": 0.9, + "vector_store_id": "vector_store1", + "metadata": { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + + +def test_create_open_ai_assistant_definition_with_json_metadata(mock_assistant_json, openai_unit_test_env): + agent = OpenAIAssistantAgent( + kernel=None, service_id="test_service", name="test_name", instructions="test_instructions", id="test_id" + ) + + definition = agent._create_open_ai_assistant_definition(mock_assistant_json) + + assert definition == { + "ai_model_id": "test_model", + "description": "test_description", + "id": "test_id", + "instructions": "test_instructions", + "name": "test_name", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "file_ids": ["file1", "file2"], + "temperature": 0.7, + "top_p": 0.9, + "vector_store_id": "vector_store1", + "metadata": { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } diff --git a/python/tests/unit/agents/test_open_ai_assistant_base.py b/python/tests/unit/agents/test_open_ai_assistant_base.py new file mode 100644 index 000000000000..1f0c14750459 --- /dev/null +++ b/python/tests/unit/agents/test_open_ai_assistant_base.py @@ -0,0 +1,972 @@ +# Copyright (c) Microsoft. All rights reserved. + +from datetime import datetime, timedelta, timezone +from typing import Any +from unittest.mock import AsyncMock, MagicMock, mock_open, patch + +import pytest +from openai import AsyncAzureOpenAI, AsyncOpenAI +from openai.resources.beta.threads.runs.runs import Run +from openai.types.beta.assistant import Assistant, ToolResources, ToolResourcesCodeInterpreter, ToolResourcesFileSearch +from openai.types.beta.assistant_response_format import AssistantResponseFormat +from openai.types.beta.assistant_tool import CodeInterpreterTool, FileSearchTool +from openai.types.beta.threads.annotation import FileCitationAnnotation, FilePathAnnotation +from openai.types.beta.threads.file_citation_annotation import FileCitation +from openai.types.beta.threads.file_path_annotation import FilePath +from openai.types.beta.threads.image_file import ImageFile +from openai.types.beta.threads.image_file_content_block import ImageFileContentBlock +from openai.types.beta.threads.required_action_function_tool_call import Function +from openai.types.beta.threads.required_action_function_tool_call import Function as RequiredActionFunction +from openai.types.beta.threads.run import ( + RequiredAction, + RequiredActionFunctionToolCall, + RequiredActionSubmitToolOutputs, +) +from openai.types.beta.threads.runs import RunStep +from openai.types.beta.threads.runs.code_interpreter_tool_call import ( + CodeInterpreter, + CodeInterpreterToolCall, +) +from openai.types.beta.threads.runs.function_tool_call import Function as RunsFunction +from openai.types.beta.threads.runs.function_tool_call import FunctionToolCall +from openai.types.beta.threads.runs.message_creation_step_details import MessageCreation, MessageCreationStepDetails +from openai.types.beta.threads.runs.tool_calls_step_details import ToolCallsStepDetails +from openai.types.beta.threads.text import Text +from openai.types.beta.threads.text_content_block import TextContentBlock + +from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent +from semantic_kernel.contents.annotation_content import AnnotationContent +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.file_reference_content import FileReferenceContent +from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.contents.image_content import ImageContent +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.exceptions.agent_exceptions import ( + AgentExecutionError, + AgentFileNotFoundException, + AgentInitializationError, + AgentInvokeError, +) +from semantic_kernel.functions.kernel_function_decorator import kernel_function +from semantic_kernel.functions.kernel_function_from_method import KernelFunctionFromMethod +from semantic_kernel.kernel import Kernel + +# region Test Fixtures + + +@pytest.fixture +def azure_openai_assistant_agent(kernel: Kernel, azure_openai_unit_test_env): + return AzureAssistantAgent( + kernel=kernel, + service_id="test_service", + name="test_name", + instructions="test_instructions", + api_key="test", + metadata={"key": "value"}, + api_version="2024-05-01", + description="test_description", + ai_model_id="test_model", + enable_code_interpreter=True, + enable_file_search=True, + vector_store_id="vector_store1", + file_ids=["file1", "file2"], + temperature=0.7, + top_p=0.9, + enable_json_response=True, + ) + + +@pytest.fixture +def mock_assistant(): + return Assistant( + created_at=123456789, + object="assistant", + metadata={ + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + model="test_model", + description="test_description", + id="test_id", + instructions="test_instructions", + name="test_name", + tools=[{"type": "code_interpreter"}, {"type": "file_search"}], + temperature=0.7, + top_p=0.9, + response_format={"type": "json_object"}, + tool_resources=ToolResources( + code_interpreter=ToolResourcesCodeInterpreter(file_ids=["file1", "file2"]), + file_search=ToolResourcesFileSearch(vector_store_ids=["vector_store1"]), + ), + ) + + +@pytest.fixture +def mock_thread(): + class MockThread: + id = "test_thread_id" + + return MockThread() + + +@pytest.fixture +def mock_chat_message_content(): + return ChatMessageContent(role=AuthorRole.USER, content="test message", metadata={"key": "value"}) + + +@pytest.fixture +def mock_message(): + class MockMessage: + id = "test_message_id" + + return MockMessage() + + +@pytest.fixture +def mock_thread_messages(): + class MockMessage: + def __init__(self, role, content, assistant_id=None): + self.role = role + self.content = content + self.assistant_id = assistant_id + + return [ + MockMessage( + role="user", + content=[ + TextContentBlock( + type="text", + text=Text( + value="Hello", + annotations=[ + FilePathAnnotation( + type="file_path", + file_path=FilePath(file_id="test_file_id"), + end_index=5, + start_index=0, + text="Hello", + ), + FileCitationAnnotation( + type="file_citation", + file_citation=FileCitation(file_id="test_file_id", quote="test quote"), + text="Hello", + start_index=0, + end_index=5, + ), + ], + ), + ) + ], + ), + MockMessage( + role="assistant", + content=[ + ImageFileContentBlock(type="image_file", image_file=ImageFile(file_id="test_file_id", detail="auto")) + ], + assistant_id="assistant_1", + ), + ] + + +@pytest.fixture +def mock_run_failed(): + return Run( + id="run_id", + status="failed", + assistant_id="assistant_id", + created_at=123456789, + instructions="instructions", + model="model", + object="thread.run", + thread_id="thread_id", + tools=[], + ) + + +@pytest.fixture +def mock_run_required_action(): + return Run( + id="run_id", + status="requires_action", + assistant_id="assistant_id", + created_at=123456789, + instructions="instructions", + model="model", + object="thread.run", + thread_id="thread_id", + tools=[], + required_action=RequiredAction( + type="submit_tool_outputs", + submit_tool_outputs=RequiredActionSubmitToolOutputs( + tool_calls=[ + RequiredActionFunctionToolCall( + id="tool_call_id", + type="function", + function=RequiredActionFunction(arguments="{}", name="function_name"), + ) + ] + ), + ), + ) + + +@pytest.fixture +def mock_run_completed(): + return Run( + id="run_id", + status="completed", + assistant_id="assistant_id", + created_at=123456789, + instructions="instructions", + model="model", + object="thread.run", + thread_id="thread_id", + tools=[], + required_action=RequiredAction( + type="submit_tool_outputs", + submit_tool_outputs=RequiredActionSubmitToolOutputs( + tool_calls=[ + RequiredActionFunctionToolCall( + id="tool_call_id", type="function", function=Function(arguments="{}", name="function_name") + ) + ] + ), + ), + ) + + +@pytest.fixture +def mock_function_call_content(): + return FunctionCallContent(id="function_call_id", name="function_name", arguments={}) + + +@pytest.fixture +def mock_run_in_progress(): + class MockRun: + def __init__(self): + self.id = "run_id" + self.status = "requires_action" + self.assistant_id = "assistant_id" + self.created_at = int(datetime.now(timezone.utc).timestamp()) + self.instructions = "instructions" + self.model = "model" + self.object = "run" + self.thread_id = "thread_id" + self.tools = [] + self.poll_count = 0 + + def update_status(self): + self.poll_count += 1 + if self.poll_count > 2: + self.status = "completed" + + return MockRun() + + +@pytest.fixture +def mock_run_step_tool_call(): + class MockToolCall: + def __init__(self): + self.type = "code_interpreter" + self.code_interpreter = MagicMock(input="print('Hello, world!')") + + return RunStep( + id="step_id_1", + type="tool_calls", + completed_at=int(datetime.now(timezone.utc).timestamp()), + created_at=int((datetime.now(timezone.utc) - timedelta(minutes=1)).timestamp()), + step_details=ToolCallsStepDetails( + tool_calls=[ + CodeInterpreterToolCall( + type="code_interpreter", id="test", code_interpreter=CodeInterpreter(input="test code", outputs=[]) + ), + FunctionToolCall( + type="function", + id="test", + function=RunsFunction(arguments="{}", name="function_name", outpt="test output"), + ), + ], + type="tool_calls", + ), + assistant_id="assistant_id", + object="thread.run.step", + run_id="run_id", + status="completed", + thread_id="thread_id", + ) + + +@pytest.fixture +def mock_run_step_message_creation(): + class MockMessageCreation: + def __init__(self): + self.message_id = "message_id" + + class MockStepDetails: + def __init__(self): + self.message_creation = MockMessageCreation() + + return RunStep( + id="step_id_2", + type="message_creation", + completed_at=int(datetime.now(timezone.utc).timestamp()), + created_at=int((datetime.now(timezone.utc) - timedelta(minutes=2)).timestamp()), + step_details=MessageCreationStepDetails( + type="message_creation", message_creation=MessageCreation(message_id="test") + ), + assistant_id="assistant_id", + object="thread.run.step", + run_id="run_id", + status="completed", + thread_id="thread_id", + ) + + +# endregion + +# region Tests + + +@pytest.mark.asyncio +async def test_create_assistant( + azure_openai_assistant_agent: AzureAssistantAgent, mock_assistant, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + + assistant = await azure_openai_assistant_agent.create_assistant( + ai_model_id="test_model", + description="test_description", + instructions="test_instructions", + name="test_name", + enable_code_interpreter=True, + enable_file_search=True, + vector_store_id="vector_store1", + file_ids=["file1", "file2"], + metadata={"key": "value"}, + ) + + assert assistant.model == "test_model" + assert assistant.description == "test_description" + assert assistant.id == "test_id" + assert assistant.instructions == "test_instructions" + assert assistant.name == "test_name" + assert assistant.tools == [CodeInterpreterTool(type="code_interpreter"), FileSearchTool(type="file_search")] + assert assistant.temperature == 0.7 + assert assistant.top_p == 0.9 + assert assistant.response_format == AssistantResponseFormat(type="json_object") + assert assistant.tool_resources == ToolResources( + code_interpreter=ToolResourcesCodeInterpreter(file_ids=["file1", "file2"]), + file_search=ToolResourcesFileSearch(vector_store_ids=["vector_store1"]), + ) + + +@pytest.mark.asyncio +async def test_create_assistant_with_model_attributes( + azure_openai_assistant_agent: AzureAssistantAgent, mock_assistant, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + + assistant = await azure_openai_assistant_agent.create_assistant( + ai_model_id="test_model", + description="test_description", + instructions="test_instructions", + name="test_name", + enable_code_interpreter=True, + enable_file_search=True, + vector_store_id="vector_store1", + file_ids=["file1", "file2"], + metadata={"key": "value"}, + kwargs={"temperature": 0.1}, + ) + + assert assistant.model == "test_model" + assert assistant.description == "test_description" + assert assistant.id == "test_id" + assert assistant.instructions == "test_instructions" + assert assistant.name == "test_name" + assert assistant.tools == [CodeInterpreterTool(type="code_interpreter"), FileSearchTool(type="file_search")] + assert assistant.temperature == 0.7 + assert assistant.top_p == 0.9 + assert assistant.response_format == AssistantResponseFormat(type="json_object") + assert assistant.tool_resources == ToolResources( + code_interpreter=ToolResourcesCodeInterpreter(file_ids=["file1", "file2"]), + file_search=ToolResourcesFileSearch(vector_store_ids=["vector_store1"]), + ) + + +@pytest.mark.asyncio +async def test_create_assistant_delete_and_recreate( + azure_openai_assistant_agent: AzureAssistantAgent, mock_assistant, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + mock_client.beta.assistants.delete = AsyncMock() + + assistant = await azure_openai_assistant_agent.create_assistant() + + assert assistant is not None + + await azure_openai_assistant_agent.delete() + + assert azure_openai_assistant_agent._is_deleted + + assistant = await azure_openai_assistant_agent.create_assistant() + + assert azure_openai_assistant_agent._is_deleted is False + + +@pytest.mark.asyncio +async def test_get_assistant_metadata( + azure_openai_assistant_agent: AzureAssistantAgent, mock_assistant, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + + assistant = await azure_openai_assistant_agent.create_assistant() + + assistant.metadata is not None + + +@pytest.mark.asyncio +async def test_get_agent_tools(azure_openai_assistant_agent, mock_assistant, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + + func = KernelFunctionFromMethod(method=kernel_function(lambda x: x**2, name="square"), plugin_name="math") + azure_openai_assistant_agent.kernel.add_function(plugin_name="test", function=func) + + assistant = await azure_openai_assistant_agent.create_assistant() + + assert assistant.tools is not None + assert len(assistant.tools) == 2 + tools = azure_openai_assistant_agent.tools + assert len(tools) == 3 + assert tools[0] == {"type": "code_interpreter"} + assert tools[1] == {"type": "file_search"} + assert tools[2]["type"].startswith("function") + + +@pytest.mark.asyncio +async def test_get_assistant_tools_throws_when_no_assistant( + azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env +): + with pytest.raises(AgentInitializationError, match="The assistant has not been created."): + _ = azure_openai_assistant_agent.tools + + +@pytest.mark.asyncio +async def test_create_thread(azure_openai_assistant_agent, mock_thread, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.threads.create = AsyncMock(return_value=mock_thread) + + thread_id = await azure_openai_assistant_agent.create_thread( + code_interpreter_file_ids=["file1", "file2"], + vector_store_id="vector_store1", + messages=[ + ChatMessageContent(role=AuthorRole.USER, content="test message"), + ], + metadata={"key": "value"}, + ) + + assert thread_id == "test_thread_id" + mock_client.beta.threads.create.assert_called_once() + _, called_kwargs = mock_client.beta.threads.create.call_args + assert "tool_resources" in called_kwargs + assert called_kwargs["tool_resources"] == { + "code_interpreter": {"file_ids": ["file1", "file2"]}, + "file_search": {"vector_store_ids": ["vector_store1"]}, + } + assert "messages" in called_kwargs + assert called_kwargs["messages"] == [{"role": "user", "content": {"type": "text", "text": "test message"}}] + assert "metadata" in called_kwargs + assert called_kwargs["metadata"] == {"key": "value"} + + +@pytest.mark.asyncio +async def test_create_thread_throws_with_invalid_role(azure_openai_assistant_agent, mock_thread, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.threads.create = AsyncMock(return_value=mock_thread) + + with pytest.raises( + AgentExecutionError, + match="Invalid message role `tool`", + ): + _ = await azure_openai_assistant_agent.create_thread( + messages=[ChatMessageContent(role=AuthorRole.TOOL, content="test message")] + ) + + +@pytest.mark.asyncio +async def test_delete_thread(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.threads.delete = AsyncMock() + + await azure_openai_assistant_agent.delete_thread("test_thread_id") + + mock_client.beta.threads.delete.assert_called_once_with("test_thread_id") + + +@pytest.mark.asyncio +async def test_delete(azure_openai_assistant_agent, mock_assistant, openai_unit_test_env): + azure_openai_assistant_agent.assistant = mock_assistant + + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.delete = AsyncMock() + + azure_openai_assistant_agent._is_deleted = False + result = await azure_openai_assistant_agent.delete() + + assert result == azure_openai_assistant_agent._is_deleted + mock_client.beta.assistants.delete.assert_called_once_with(mock_assistant.id) + + +@pytest.mark.asyncio +async def test_add_file(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.files = MagicMock() + mock_client.files.create = AsyncMock(return_value=MagicMock(id="test_file_id")) + + mock_open_file = mock_open(read_data="file_content") + with patch("builtins.open", mock_open_file): + file_id = await azure_openai_assistant_agent.add_file("test_file_path", "assistants") + + assert file_id == "test_file_id" + mock_open_file.assert_called_once_with("test_file_path", "rb") + mock_client.files.create.assert_called_once() + + +@pytest.mark.asyncio +async def test_add_file_not_found(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.files = MagicMock() + + with patch("builtins.open", mock_open(read_data="file_content")) as mock_open_file: + mock_open_file.side_effect = FileNotFoundError + + with pytest.raises(AgentFileNotFoundException, match="File not found: test_file_path"): + await azure_openai_assistant_agent.add_file("test_file_path", "assistants") + + +@pytest.mark.asyncio +async def test_add_chat_message( + azure_openai_assistant_agent, mock_chat_message_content, mock_message, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.threads.messages = MagicMock() + mock_client.beta.threads.messages.create = AsyncMock(return_value=mock_message) + + result = await azure_openai_assistant_agent.add_chat_message("test_thread_id", mock_chat_message_content) + + assert result.id == "test_message_id" + mock_client.beta.threads.messages.create.assert_called_once_with( + thread_id="test_thread_id", + role="user", + content=[{"type": "text", "text": "test message"}], + metadata={"key": "value"}, + ) + + +@pytest.mark.asyncio +async def test_add_chat_message_invalid_role( + azure_openai_assistant_agent, mock_chat_message_content, openai_unit_test_env +): + mock_chat_message_content.role = AuthorRole.TOOL + + with pytest.raises(AgentExecutionError, match="Invalid message role `tool`"): + await azure_openai_assistant_agent.add_chat_message("test_thread_id", mock_chat_message_content) + + +@pytest.mark.asyncio +async def test_get_thread_messages( + azure_openai_assistant_agent, mock_thread_messages, mock_assistant, openai_unit_test_env +): + async def mock_list_messages(*args, **kwargs) -> Any: + return MagicMock(data=mock_thread_messages) + + async def mock_retrieve_assistant(*args, **kwargs) -> Any: + return mock_assistant + + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.threads.messages = MagicMock() + mock_client.beta.threads.messages.list = AsyncMock(side_effect=mock_list_messages) + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.retrieve = AsyncMock(side_effect=mock_retrieve_assistant) + + messages = [message async for message in azure_openai_assistant_agent.get_thread_messages("test_thread_id")] + + assert len(messages) == 2 + assert len(messages[0].items) == 3 + assert isinstance(messages[0].items[0], TextContent) + assert isinstance(messages[0].items[1], AnnotationContent) + assert isinstance(messages[0].items[2], AnnotationContent) + assert messages[0].items[0].text == "Hello" + + assert len(messages[1].items) == 1 + assert isinstance(messages[1].items[0], FileReferenceContent) + assert str(messages[1].items[0].file_id) == "test_file_id" + + +@pytest.mark.asyncio +async def test_invoke( + azure_openai_assistant_agent, + mock_assistant, + mock_run_in_progress, + mock_chat_message_content, + mock_run_step_tool_call, + mock_run_step_message_creation, + mock_message, + mock_function_call_content, + openai_unit_test_env, +): + async def mock_poll_run_status(run, thread_id): + run.update_status() + return run + + def mock_get_function_call_contents(run, function_steps): + function_steps["test"] = mock_function_call_content + return [mock_function_call_content] + + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + mock_client.beta.threads.runs = MagicMock() + mock_client.beta.threads.runs.create = AsyncMock(return_value=mock_run_in_progress) + mock_client.beta.threads.runs.submit_tool_outputs = AsyncMock() + mock_client.beta.threads.runs.steps = MagicMock() + mock_client.beta.threads.runs.steps.list = AsyncMock( + return_value=MagicMock(data=[mock_run_step_tool_call, mock_run_step_message_creation]) + ) + + azure_openai_assistant_agent.assistant = await azure_openai_assistant_agent.create_assistant() + azure_openai_assistant_agent._get_tools = MagicMock(return_value=["tool"]) + azure_openai_assistant_agent._poll_run_status = AsyncMock(side_effect=mock_poll_run_status) + azure_openai_assistant_agent._invoke_function_calls = AsyncMock() + azure_openai_assistant_agent._format_tool_outputs = MagicMock( + return_value=[{"tool_call_id": "id", "output": "output"}] + ) + azure_openai_assistant_agent._generate_function_call_content = MagicMock(return_value=mock_chat_message_content) + azure_openai_assistant_agent._generate_message_content = MagicMock(return_value=mock_chat_message_content) + azure_openai_assistant_agent._retrieve_message = AsyncMock(return_value=mock_message) + azure_openai_assistant_agent._get_function_call_contents = MagicMock( + side_effect=mock_get_function_call_contents + ) + + messages = [message async for message in azure_openai_assistant_agent.invoke("thread_id")] + + assert len(messages) == 2 + assert messages[0].content == "test message" + assert messages[1].content == "test code" + + +@pytest.mark.asyncio +async def test_invoke_assistant_not_initialized_throws(azure_openai_assistant_agent, openai_unit_test_env): + with pytest.raises(AgentInitializationError, match="The assistant has not been created."): + _ = [message async for message in azure_openai_assistant_agent.invoke("thread_id")] + + +@pytest.mark.asyncio +async def test_invoke_agent_deleted_throws(azure_openai_assistant_agent, mock_assistant, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + + azure_openai_assistant_agent.assistant = await azure_openai_assistant_agent.create_assistant() + azure_openai_assistant_agent._is_deleted = True + + with pytest.raises(AgentInitializationError, match="The assistant has been deleted."): + _ = [message async for message in azure_openai_assistant_agent.invoke("thread_id")] + + +@pytest.mark.asyncio +async def test_invoke_raises_error( + azure_openai_assistant_agent, + mock_assistant, + mock_run_in_progress, + mock_run_step_tool_call, + mock_run_step_message_creation, + openai_unit_test_env, +): + async def mock_poll_run_status(run, thread_id): + run.status = "failed" + return run + + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + mock_client.beta.threads.runs = MagicMock() + mock_client.beta.threads.runs.create = AsyncMock(return_value=mock_run_in_progress) + mock_client.beta.threads.runs.submit_tool_outputs = AsyncMock() + mock_client.beta.threads.runs.steps = MagicMock() + mock_client.beta.threads.runs.steps.list = AsyncMock( + return_value=MagicMock(data=[mock_run_step_tool_call, mock_run_step_message_creation]) + ) + + azure_openai_assistant_agent.assistant = await azure_openai_assistant_agent.create_assistant() + azure_openai_assistant_agent._get_tools = MagicMock(return_value=["tool"]) + azure_openai_assistant_agent._poll_run_status = AsyncMock(side_effect=mock_poll_run_status) + + with pytest.raises( + AgentInvokeError, match="Run failed with status: `failed` for agent `test_name` and thread `thread_id`" + ): + _ = [message async for message in azure_openai_assistant_agent.invoke("thread_id")] + + +def test_format_tool_outputs(azure_openai_assistant_agent, openai_unit_test_env): + chat_history = ChatHistory() + fcc = FunctionCallContent( + id="test", name="test-function", arguments='{"input": "world"}', metadata={"test": "test"} + ) + frc = FunctionResultContent.from_function_call_content_and_result(fcc, 123, {"test2": "test2"}) + chat_history.add_message(message=frc.to_chat_message_content()) + + tool_outputs = azure_openai_assistant_agent._format_tool_outputs(chat_history) + assert tool_outputs[0] == {"tool_call_id": "test", "output": 123} + + +@pytest.mark.asyncio +async def test_invoke_function_calls(azure_openai_assistant_agent, openai_unit_test_env): + chat_history = ChatHistory() + fcc = FunctionCallContent( + id="test", name="test-function", arguments='{"input": "world"}', metadata={"test": "test"} + ) + + with patch( + "semantic_kernel.kernel.Kernel.invoke_function_call", new_callable=AsyncMock + ) as mock_invoke_function_call: + mock_invoke_function_call.return_value = "mocked_result" + results = await azure_openai_assistant_agent._invoke_function_calls([fcc], chat_history) + assert results == ["mocked_result"] + mock_invoke_function_call.assert_called_once_with(function_call=fcc, chat_history=chat_history) + + +def test_get_function_call_contents(azure_openai_assistant_agent, mock_run_required_action, openai_unit_test_env): + result = azure_openai_assistant_agent._get_function_call_contents(run=mock_run_required_action, function_steps={}) + assert result is not None + + +def test_get_function_call_contents_no_action_required( + azure_openai_assistant_agent, mock_run_required_action, openai_unit_test_env +): + mock_run_required_action.required_action = None + result = azure_openai_assistant_agent._get_function_call_contents(run=mock_run_required_action, function_steps={}) + assert result == [] + + +@pytest.mark.asyncio +async def test_get_tools(azure_openai_assistant_agent: AzureAssistantAgent, mock_assistant, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + + azure_openai_assistant_agent.assistant = await azure_openai_assistant_agent.create_assistant() + tools = azure_openai_assistant_agent._get_tools() + assert tools is not None + + +@pytest.mark.asyncio +async def test_get_tools_no_assistant_returns_empty_list( + azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env +): + with pytest.raises(AgentInitializationError, match="The assistant has not been created."): + _ = azure_openai_assistant_agent._get_tools() + + +def test_generate_message_content(azure_openai_assistant_agent, mock_thread_messages, openai_unit_test_env): + for message in mock_thread_messages: + result = azure_openai_assistant_agent._generate_message_content(assistant_name="test", message=message) + assert result is not None + + +def test_check_if_deleted_throws(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + azure_openai_assistant_agent._is_deleted = True + with pytest.raises(AgentInitializationError, match="The assistant has been deleted."): + azure_openai_assistant_agent._check_if_deleted() + + +def test_get_message_contents(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + message = ChatMessageContent(role=AuthorRole.USER, content="test message") + message.items = [ + ImageContent(role=AuthorRole.ASSISTANT, content="test message", uri="http://image.url"), + TextContent(role=AuthorRole.ASSISTANT, text="test message"), + ] + + result = azure_openai_assistant_agent._get_message_contents(message) + assert result is not None + + +@pytest.mark.asyncio +async def test_retrieve_message(azure_openai_assistant_agent, mock_thread_messages, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + + mock_client.beta.threads.messages.retrieve = AsyncMock(side_effect=mock_thread_messages) + + message = await azure_openai_assistant_agent._retrieve_message( + thread_id="test_thread_id", message_id="test_message_id" + ) + assert message is not None + + +@pytest.mark.asyncio +async def test_retrieve_message_fails_polls_again( + azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env +): + with ( + patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client, + patch("semantic_kernel.agents.open_ai.open_ai_assistant_agent.logger", autospec=True), + ): + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + + mock_client.beta.threads.messages.retrieve = AsyncMock(side_effect=Exception("Unable to retrieve message")) + + message = await azure_openai_assistant_agent._retrieve_message( + thread_id="test_thread_id", message_id="test_message_id" + ) + assert message is None + + +@pytest.mark.asyncio +async def test_poll_run_status( + azure_openai_assistant_agent, mock_run_required_action, mock_run_completed, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + + mock_client.beta.threads.runs.retrieve = AsyncMock(return_value=mock_run_completed) + + run = await azure_openai_assistant_agent._poll_run_status( + run=mock_run_required_action, thread_id="test_thread_id" + ) + assert run.status == "completed" + + +@pytest.mark.asyncio +async def test_poll_run_status_exception_polls_again( + azure_openai_assistant_agent, mock_run_required_action, mock_run_completed, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + + mock_client.beta.threads.runs.retrieve = AsyncMock( + side_effect=[Exception("Failed to retrieve message"), mock_run_completed] + ) + + run = await azure_openai_assistant_agent._poll_run_status( + run=mock_run_required_action, thread_id="test_thread_id" + ) + assert run.status == "requires_action" + + +def test_generate_function_result_content( + azure_openai_assistant_agent, mock_function_call_content, openai_unit_test_env +): + mock_tool_call = RequiredActionFunctionToolCall( + id="tool_call_id", type="function", function=Function(arguments="{}", name="function_name", output="result") + ) + + message = azure_openai_assistant_agent._generate_function_result_content( + agent_name="test", function_step=mock_function_call_content, tool_call=mock_tool_call + ) + assert message is not None + assert isinstance(message.items[0], FunctionResultContent) + + +def test_generate_function_call_content(azure_openai_assistant_agent, mock_function_call_content, openai_unit_test_env): + message = azure_openai_assistant_agent._generate_function_call_content( + agent_name="test", fccs=[mock_function_call_content] + ) + assert message is not None + assert isinstance(message, ChatMessageContent) + assert isinstance(message.items[0], FunctionCallContent) + + +def test_merge_options(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + merged_options = azure_openai_assistant_agent._merge_options( + ai_model_id="model-id", + enable_json_response=True, + enable_code_interpreter=True, + enable_file_search=True, + max_completion_tokens=150, + parallel_tool_calls_enabled=True, + ) + + expected_options = { + "ai_model_id": "model-id", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "max_completion_tokens": 150, + "max_prompt_tokens": None, + "parallel_tool_calls_enabled": True, + "truncation_message_count": None, + "temperature": 0.7, + "top_p": 0.9, + "metadata": {}, + } + + assert merged_options == expected_options, f"Expected {expected_options}, but got {merged_options}" + + +def test_generate_options(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + options = azure_openai_assistant_agent._generate_options( + ai_model_id="model-id", max_completion_tokens=150, metadata={"key1": "value1"} + ) + + expected_options = { + "max_completion_tokens": 150, + "max_prompt_tokens": None, + "model": "model-id", + "top_p": 0.9, + "response_format": None, + "temperature": 0.7, + "truncation_strategy": None, + "metadata": {"key1": "value1"}, + } + + assert options == expected_options, f"Expected {expected_options}, but got {options}" + + +# endregion diff --git a/python/tests/unit/agents/test_run_polling_options.py b/python/tests/unit/agents/test_run_polling_options.py new file mode 100644 index 000000000000..bcec2ed9932e --- /dev/null +++ b/python/tests/unit/agents/test_run_polling_options.py @@ -0,0 +1,50 @@ +# Copyright (c) Microsoft. All rights reserved. + +from datetime import timedelta + +from semantic_kernel.agents.open_ai.run_polling_options import RunPollingOptions + + +def test_get_polling_interval_below_threshold(): + options = RunPollingOptions() + iteration_count = 1 + expected_interval = timedelta(milliseconds=250) + assert options.get_polling_interval(iteration_count) == expected_interval + + +def test_get_polling_interval_at_threshold(): + options = RunPollingOptions() + iteration_count = 2 + expected_interval = timedelta(milliseconds=250) + assert options.get_polling_interval(iteration_count) == expected_interval + + +def test_get_polling_interval_above_threshold(): + options = RunPollingOptions() + iteration_count = 3 + expected_interval = timedelta(seconds=1) + assert options.get_polling_interval(iteration_count) == expected_interval + + +def test_get_polling_interval_custom_threshold(): + options = RunPollingOptions(run_polling_backoff_threshold=5) + iteration_count = 4 + expected_interval = timedelta(milliseconds=250) + assert options.get_polling_interval(iteration_count) == expected_interval + + iteration_count = 6 + expected_interval = timedelta(seconds=1) + assert options.get_polling_interval(iteration_count) == expected_interval + + +def test_get_polling_interval_custom_intervals(): + options = RunPollingOptions( + run_polling_interval=timedelta(milliseconds=500), run_polling_backoff=timedelta(seconds=2) + ) + iteration_count = 1 + expected_interval = timedelta(milliseconds=500) + assert options.get_polling_interval(iteration_count) == expected_interval + + iteration_count = 3 + expected_interval = timedelta(seconds=2) + assert options.get_polling_interval(iteration_count) == expected_interval diff --git a/python/tests/unit/contents/test_annotation_content.py b/python/tests/unit/contents/test_annotation_content.py new file mode 100644 index 000000000000..5bf07ef10667 --- /dev/null +++ b/python/tests/unit/contents/test_annotation_content.py @@ -0,0 +1,124 @@ +# Copyright (c) Microsoft. All rights reserved. + +from xml.etree.ElementTree import Element + +import pytest + +from semantic_kernel.contents.annotation_content import AnnotationContent + +test_cases = [ + pytest.param(AnnotationContent(file_id="12345"), id="file_id"), + pytest.param(AnnotationContent(quote="This is a quote."), id="quote"), + pytest.param(AnnotationContent(start_index=5, end_index=20), id="indices"), + pytest.param( + AnnotationContent(file_id="12345", quote="This is a quote.", start_index=5, end_index=20), id="all_fields" + ), +] + + +def test_create_empty(): + annotation = AnnotationContent() + assert annotation.file_id is None + assert annotation.quote is None + assert annotation.start_index is None + assert annotation.end_index is None + + +def test_create_file_id(): + annotation = AnnotationContent(file_id="12345") + assert annotation.file_id == "12345" + + +def test_create_quote(): + annotation = AnnotationContent(quote="This is a quote.") + assert annotation.quote == "This is a quote." + + +def test_create_indices(): + annotation = AnnotationContent(start_index=5, end_index=20) + assert annotation.start_index == 5 + assert annotation.end_index == 20 + + +def test_create_all_fields(): + annotation = AnnotationContent(file_id="12345", quote="This is a quote.", start_index=5, end_index=20) + assert annotation.file_id == "12345" + assert annotation.quote == "This is a quote." + assert annotation.start_index == 5 + assert annotation.end_index == 20 + + +def test_update_file_id(): + annotation = AnnotationContent() + annotation.file_id = "12345" + assert annotation.file_id == "12345" + + +def test_update_quote(): + annotation = AnnotationContent() + annotation.quote = "This is a quote." + assert annotation.quote == "This is a quote." + + +def test_update_indices(): + annotation = AnnotationContent() + annotation.start_index = 5 + annotation.end_index = 20 + assert annotation.start_index == 5 + assert annotation.end_index == 20 + + +def test_to_str(): + annotation = AnnotationContent(file_id="12345", quote="This is a quote.", start_index=5, end_index=20) + assert str(annotation) == "AnnotationContent(file_id=12345, quote=This is a quote., start_index=5, end_index=20)" + + +def test_to_element(): + annotation = AnnotationContent(file_id="12345", quote="This is a quote.", start_index=5, end_index=20) + element = annotation.to_element() + assert element.tag == "annotation" + assert element.get("file_id") == "12345" + assert element.get("quote") == "This is a quote." + assert element.get("start_index") == "5" + assert element.get("end_index") == "20" + + +def test_from_element(): + element = Element("AnnotationContent") + element.set("file_id", "12345") + element.set("quote", "This is a quote.") + element.set("start_index", "5") + element.set("end_index", "20") + annotation = AnnotationContent.from_element(element) + assert annotation.file_id == "12345" + assert annotation.quote == "This is a quote." + assert annotation.start_index == 5 + assert annotation.end_index == 20 + + +def test_to_dict(): + annotation = AnnotationContent(file_id="12345", quote="This is a quote.", start_index=5, end_index=20) + assert annotation.to_dict() == { + "file_id": "12345", + "quote": "This is a quote.", + "start_index": 5, + "end_index": 20, + } + + +@pytest.mark.parametrize("annotation", test_cases) +def test_element_roundtrip(annotation): + element = annotation.to_element() + new_annotation = AnnotationContent.from_element(element) + assert new_annotation == annotation + + +@pytest.mark.parametrize("annotation", test_cases) +def test_to_dict_call(annotation): + expected_dict = { + "file_id": annotation.file_id, + "quote": annotation.quote, + "start_index": annotation.start_index, + "end_index": annotation.end_index, + } + assert annotation.to_dict() == expected_dict diff --git a/python/tests/unit/contents/test_file_reference_content.py b/python/tests/unit/contents/test_file_reference_content.py new file mode 100644 index 000000000000..6f1a0cb18ad2 --- /dev/null +++ b/python/tests/unit/contents/test_file_reference_content.py @@ -0,0 +1,76 @@ +# Copyright (c) Microsoft. All rights reserved. + +from xml.etree.ElementTree import Element + +import pytest + +from semantic_kernel.contents.file_reference_content import FileReferenceContent + + +def test_create_empty(): + file_reference = FileReferenceContent() + assert file_reference.file_id is None + + +def test_create_file_id(): + file_reference = FileReferenceContent(file_id="12345") + assert file_reference.file_id == "12345" + + +def test_update_file_id(): + file_reference = FileReferenceContent() + file_reference.file_id = "12345" + assert file_reference.file_id == "12345" + + +def test_to_str(): + file_reference = FileReferenceContent(file_id="12345") + assert str(file_reference) == "FileReferenceContent(file_id=12345)" + + +def test_to_element(): + file_reference = FileReferenceContent(file_id="12345") + element = file_reference.to_element() + assert element.tag == "file_reference" + assert element.get("file_id") == "12345" + + +def test_from_element(): + element = Element("FileReferenceContent") + element.set("file_id", "12345") + file_reference = FileReferenceContent.from_element(element) + assert file_reference.file_id == "12345" + + +def test_to_dict_simple(): + file_reference = FileReferenceContent(file_id="12345") + assert file_reference.to_dict() == { + "file_id": "12345", + } + + +@pytest.mark.parametrize( + "file_reference", + [ + pytest.param(FileReferenceContent(file_id="12345"), id="file_id"), + pytest.param(FileReferenceContent(), id="empty"), + ], +) +def test_element_roundtrip(file_reference): + element = file_reference.to_element() + new_file_reference = FileReferenceContent.from_element(element) + assert new_file_reference == file_reference + + +@pytest.mark.parametrize( + "file_reference", + [ + pytest.param(FileReferenceContent(file_id="12345"), id="file_id"), + pytest.param(FileReferenceContent(), id="empty"), + ], +) +def test_to_dict(file_reference): + expected_dict = { + "file_id": file_reference.file_id, + } + assert file_reference.to_dict() == expected_dict From a53b96a1ae066160872b3cad65387442995a788d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Aug 2024 13:51:18 +0000 Subject: [PATCH 5/5] Python: Bump pydantic from 2.8.0 to 2.8.2 in /python (#7394) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [pydantic](https://github.com/pydantic/pydantic) from 2.8.0 to 2.8.2.
Release notes

Sourced from pydantic's releases.

v2.8.2 (2024-07-03)

What's Changed

Fixes

  • Fix issue with assertion caused by pluggable schema validator by @​dmontagu in #9838

Full Changelog: https://github.com/pydantic/pydantic/compare/v2.8.1...v2.8.2

v2.8.1 (2024-07-03)

What's Changed

Packaging

Fixes

New Contributors

Full Changelog: https://github.com/pydantic/pydantic/compare/v2.8.0...v2.8.1

Changelog

Sourced from pydantic's changelog.

v2.8.2 (2024-07-03)

GitHub release

What's Changed

Fixes

  • Fix issue with assertion caused by pluggable schema validator by @​dmontagu in #9838

v2.8.1 (2024-07-03)

GitHub release

What's Changed

Packaging

Fixes

Commits
  • 4978ee2 update history
  • 0345929 v bump
  • d390a04 Fix issue with assertion caused by pluggable schema validator (#9838)
  • 040865f update history
  • 5a33e3b bump version
  • 2f9abb2 Bump pydantic-core to v2.20.1, pydantic-extra-types to v2.9.0 (#9832)
  • ce9c5f7 Remove spooky meetings file (#9824)
  • 6bdd6d1 Pedantic typo correction within explanation of Pydantic's root in 'pedantic' ...
  • 701ccde Fix list constraint json schema application (#9818)
  • 2a066a2 Bump ruff to v0.5.0 and pyright to v1.1.369 (#9801)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pydantic&package-manager=pip&previous-version=2.8.0&new-version=2.8.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- python/poetry.lock | 187 +++++++++++++++++++++++---------------------- 1 file changed, 94 insertions(+), 93 deletions(-) diff --git a/python/poetry.lock b/python/poetry.lock index 79d25adaa28a..f28c05319510 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -4719,18 +4719,18 @@ files = [ [[package]] name = "pydantic" -version = "2.8.0" +version = "2.8.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.8.0-py3-none-any.whl", hash = "sha256:ead4f3a1e92386a734ca1411cb25d94147cf8778ed5be6b56749047676d6364e"}, - {file = "pydantic-2.8.0.tar.gz", hash = "sha256:d970ffb9d030b710795878940bd0489842c638e7252fc4a19c3ae2f7da4d6141"}, + {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, + {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.20.0" +pydantic-core = "2.20.1" typing-extensions = {version = ">=4.6.1", markers = "python_version < \"3.13\""} [package.extras] @@ -4738,99 +4738,100 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.20.0" +version = "2.20.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e9dcd7fb34f7bfb239b5fa420033642fff0ad676b765559c3737b91f664d4fa9"}, - {file = "pydantic_core-2.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:649a764d9b0da29816889424697b2a3746963ad36d3e0968784ceed6e40c6355"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7701df088d0b05f3460f7ba15aec81ac8b0fb5690367dfd072a6c38cf5b7fdb5"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab760f17c3e792225cdaef31ca23c0aea45c14ce80d8eff62503f86a5ab76bff"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb1ad5b4d73cde784cf64580166568074f5ccd2548d765e690546cff3d80937d"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b81ec2efc04fc1dbf400647d4357d64fb25543bae38d2d19787d69360aad21c9"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4a9732a5cad764ba37f3aa873dccb41b584f69c347a57323eda0930deec8e10"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6dc85b9e10cc21d9c1055f15684f76fa4facadddcb6cd63abab702eb93c98943"}, - {file = "pydantic_core-2.20.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:21d9f7e24f63fdc7118e6cc49defaab8c1d27570782f7e5256169d77498cf7c7"}, - {file = "pydantic_core-2.20.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8b315685832ab9287e6124b5d74fc12dda31e6421d7f6b08525791452844bc2d"}, - {file = "pydantic_core-2.20.0-cp310-none-win32.whl", hash = "sha256:c3dc8ec8b87c7ad534c75b8855168a08a7036fdb9deeeed5705ba9410721c84d"}, - {file = "pydantic_core-2.20.0-cp310-none-win_amd64.whl", hash = "sha256:85770b4b37bb36ef93a6122601795231225641003e0318d23c6233c59b424279"}, - {file = "pydantic_core-2.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:58e251bb5a5998f7226dc90b0b753eeffa720bd66664eba51927c2a7a2d5f32c"}, - {file = "pydantic_core-2.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:78d584caac52c24240ef9ecd75de64c760bbd0e20dbf6973631815e3ef16ef8b"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5084ec9721f82bef5ff7c4d1ee65e1626783abb585f8c0993833490b63fe1792"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6d0f52684868db7c218437d260e14d37948b094493f2646f22d3dda7229bbe3f"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1def125d59a87fe451212a72ab9ed34c118ff771e5473fef4f2f95d8ede26d75"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b34480fd6778ab356abf1e9086a4ced95002a1e195e8d2fd182b0def9d944d11"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d42669d319db366cb567c3b444f43caa7ffb779bf9530692c6f244fc635a41eb"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:53b06aea7a48919a254b32107647be9128c066aaa6ee6d5d08222325f25ef175"}, - {file = "pydantic_core-2.20.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1f038156b696a1c39d763b2080aeefa87ddb4162c10aa9fabfefffc3dd8180fa"}, - {file = "pydantic_core-2.20.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3f0f3a4a23717280a5ee3ac4fb1f81d6fde604c9ec5100f7f6f987716bb8c137"}, - {file = "pydantic_core-2.20.0-cp311-none-win32.whl", hash = "sha256:316fe7c3fec017affd916a0c83d6f1ec697cbbbdf1124769fa73328e7907cc2e"}, - {file = "pydantic_core-2.20.0-cp311-none-win_amd64.whl", hash = "sha256:2d06a7fa437f93782e3f32d739c3ec189f82fca74336c08255f9e20cea1ed378"}, - {file = "pydantic_core-2.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:d6f8c49657f3eb7720ed4c9b26624063da14937fc94d1812f1e04a2204db3e17"}, - {file = "pydantic_core-2.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ad1bd2f377f56fec11d5cfd0977c30061cd19f4fa199bf138b200ec0d5e27eeb"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed741183719a5271f97d93bbcc45ed64619fa38068aaa6e90027d1d17e30dc8d"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d82e5ed3a05f2dcb89c6ead2fd0dbff7ac09bc02c1b4028ece2d3a3854d049ce"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2ba34a099576234671f2e4274e5bc6813b22e28778c216d680eabd0db3f7dad"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:879ae6bb08a063b3e1b7ac8c860096d8fd6b48dd9b2690b7f2738b8c835e744b"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b0eefc7633a04c0694340aad91fbfd1986fe1a1e0c63a22793ba40a18fcbdc8"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:73deadd6fd8a23e2f40b412b3ac617a112143c8989a4fe265050fd91ba5c0608"}, - {file = "pydantic_core-2.20.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:35681445dc85446fb105943d81ae7569aa7e89de80d1ca4ac3229e05c311bdb1"}, - {file = "pydantic_core-2.20.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0f6dd3612a3b9f91f2e63924ea18a4476656c6d01843ca20a4c09e00422195af"}, - {file = "pydantic_core-2.20.0-cp312-none-win32.whl", hash = "sha256:7e37b6bb6e90c2b8412b06373c6978d9d81e7199a40e24a6ef480e8acdeaf918"}, - {file = "pydantic_core-2.20.0-cp312-none-win_amd64.whl", hash = "sha256:7d4df13d1c55e84351fab51383520b84f490740a9f1fec905362aa64590b7a5d"}, - {file = "pydantic_core-2.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:d43e7ab3b65e4dc35a7612cfff7b0fd62dce5bc11a7cd198310b57f39847fd6c"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b6a24d7b5893392f2b8e3b7a0031ae3b14c6c1942a4615f0d8794fdeeefb08b"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b2f13c3e955a087c3ec86f97661d9f72a76e221281b2262956af381224cfc243"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72432fd6e868c8d0a6849869e004b8bcae233a3c56383954c228316694920b38"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d70a8ff2d4953afb4cbe6211f17268ad29c0b47e73d3372f40e7775904bc28fc"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e49524917b8d3c2f42cd0d2df61178e08e50f5f029f9af1f402b3ee64574392"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4f0f71653b1c1bad0350bc0b4cc057ab87b438ff18fa6392533811ebd01439c"}, - {file = "pydantic_core-2.20.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:16197e6f4fdecb9892ed2436e507e44f0a1aa2cff3b9306d1c879ea2f9200997"}, - {file = "pydantic_core-2.20.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:763602504bf640b3ded3bba3f8ed8a1cc2fc6a87b8d55c1c5689f428c49c947e"}, - {file = "pydantic_core-2.20.0-cp313-none-win32.whl", hash = "sha256:a3f243f318bd9523277fa123b3163f4c005a3e8619d4b867064de02f287a564d"}, - {file = "pydantic_core-2.20.0-cp313-none-win_amd64.whl", hash = "sha256:03aceaf6a5adaad3bec2233edc5a7905026553916615888e53154807e404545c"}, - {file = "pydantic_core-2.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d6f2d8b8da1f03f577243b07bbdd3412eee3d37d1f2fd71d1513cbc76a8c1239"}, - {file = "pydantic_core-2.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a272785a226869416c6b3c1b7e450506152d3844207331f02f27173562c917e0"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efbb412d55a4ffe73963fed95c09ccb83647ec63b711c4b3752be10a56f0090b"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1e4f46189d8740561b43655263a41aac75ff0388febcb2c9ec4f1b60a0ec12f3"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87d3df115f4a3c8c5e4d5acf067d399c6466d7e604fc9ee9acbe6f0c88a0c3cf"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a340d2bdebe819d08f605e9705ed551c3feb97e4fd71822d7147c1e4bdbb9508"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:616b9c2f882393d422ba11b40e72382fe975e806ad693095e9a3b67c59ea6150"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:25c46bb2ff6084859bbcfdf4f1a63004b98e88b6d04053e8bf324e115398e9e7"}, - {file = "pydantic_core-2.20.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:23425eccef8f2c342f78d3a238c824623836c6c874d93c726673dbf7e56c78c0"}, - {file = "pydantic_core-2.20.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:52527e8f223ba29608d999d65b204676398009725007c9336651c2ec2d93cffc"}, - {file = "pydantic_core-2.20.0-cp38-none-win32.whl", hash = "sha256:1c3c5b7f70dd19a6845292b0775295ea81c61540f68671ae06bfe4421b3222c2"}, - {file = "pydantic_core-2.20.0-cp38-none-win_amd64.whl", hash = "sha256:8093473d7b9e908af1cef30025609afc8f5fd2a16ff07f97440fd911421e4432"}, - {file = "pydantic_core-2.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ee7785938e407418795e4399b2bf5b5f3cf6cf728077a7f26973220d58d885cf"}, - {file = "pydantic_core-2.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e75794883d635071cf6b4ed2a5d7a1e50672ab7a051454c76446ef1ebcdcc91"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:344e352c96e53b4f56b53d24728217c69399b8129c16789f70236083c6ceb2ac"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:978d4123ad1e605daf1ba5e01d4f235bcf7b6e340ef07e7122e8e9cfe3eb61ab"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c05eaf6c863781eb834ab41f5963604ab92855822a2062897958089d1335dad"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bc7e43b4a528ffca8c9151b6a2ca34482c2fdc05e6aa24a84b7f475c896fc51d"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:658287a29351166510ebbe0a75c373600cc4367a3d9337b964dada8d38bcc0f4"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1dacf660d6de692fe351e8c806e7efccf09ee5184865893afbe8e59be4920b4a"}, - {file = "pydantic_core-2.20.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3e147fc6e27b9a487320d78515c5f29798b539179f7777018cedf51b7749e4f4"}, - {file = "pydantic_core-2.20.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c867230d715a3dd1d962c8d9bef0d3168994ed663e21bf748b6e3a529a129aab"}, - {file = "pydantic_core-2.20.0-cp39-none-win32.whl", hash = "sha256:22b813baf0dbf612752d8143a2dbf8e33ccb850656b7850e009bad2e101fc377"}, - {file = "pydantic_core-2.20.0-cp39-none-win_amd64.whl", hash = "sha256:3a7235b46c1bbe201f09b6f0f5e6c36b16bad3d0532a10493742f91fbdc8035f"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cafde15a6f7feaec2f570646e2ffc5b73412295d29134a29067e70740ec6ee20"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2aec8eeea0b08fd6bc2213d8e86811a07491849fd3d79955b62d83e32fa2ad5f"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:840200827984f1c4e114008abc2f5ede362d6e11ed0b5931681884dd41852ff1"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8ea1d8b7df522e5ced34993c423c3bf3735c53df8b2a15688a2f03a7d678800"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5b8376a867047bf08910573deb95d3c8dfb976eb014ee24f3b5a61ccc5bee1b"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d08264b4460326cefacc179fc1411304d5af388a79910832835e6f641512358b"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7a3639011c2e8a9628466f616ed7fb413f30032b891898e10895a0a8b5857d6c"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:05e83ce2f7eba29e627dd8066aa6c4c0269b2d4f889c0eba157233a353053cea"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:603a843fea76a595c8f661cd4da4d2281dff1e38c4a836a928eac1a2f8fe88e4"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ac76f30d5d3454f4c28826d891fe74d25121a346c69523c9810ebba43f3b1cec"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e3b1d4b1b3f6082849f9b28427ef147a5b46a6132a3dbaf9ca1baa40c88609"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2761f71faed820e25ec62eacba670d1b5c2709bb131a19fcdbfbb09884593e5a"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a0586cddbf4380e24569b8a05f234e7305717cc8323f50114dfb2051fcbce2a3"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b8c46a8cf53e849eea7090f331ae2202cd0f1ceb090b00f5902c423bd1e11805"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b4a085bd04af7245e140d1b95619fe8abb445a3d7fdf219b3f80c940853268ef"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:116b326ac82c8b315e7348390f6d30bcfe6e688a7d3f1de50ff7bcc2042a23c2"}, - {file = "pydantic_core-2.20.0.tar.gz", hash = "sha256:366be8e64e0cb63d87cf79b4e1765c0703dd6313c729b22e7b9e378db6b96877"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, + {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, + {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, + {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, + {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, + {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, + {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, + {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, + {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, + {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, + {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, + {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, + {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, + {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, ] [package.dependencies]