From ec02d89ed7419a229d6ee18ac3896b6311db165e Mon Sep 17 00:00:00 2001 From: Evan Mattson <35585003+moonbox3@users.noreply.github.com> Date: Thu, 22 Feb 2024 08:44:02 -0500 Subject: [PATCH] Python: implement Chat history, and refactor prompt config/prompt config template (#5023) ### Motivation and Context This PR addresses a few of our remaining urgent work items: #4856, #4630. ### Description In this PR: - The prompt template/prompt template config is refactored to be similar to the dotnet versions. - The KernelPromptTemplate is introduced as the default prompt template (will need to make it more dynamic in the future when we have other prompt templates, like handlebars or jinja2). - The methods related to `create_semantic_function` were removed and it their places a method `create_function_from_prompt` was added. - ChatHistory was introduced as a way to take over what ChatMessage was doing. ChatHistory is passed into the complete chat/text methods. - As of these latest changes, all unit and integration tests are passing. - New kernel examples were added to exercise this new code. ### Contribution Checklist - [X] The code builds clean without any errors or warnings - [X] The PR follows the [SK Contribution Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md) and the [pre-submission formatting script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts) raises no violations - [X] All unit tests pass, and I have added new tests where possible - [ ] I didn't break anyone :smile: --------- Co-authored-by: Eduard van Valkenburg --- python/README.md | 18 +- python/notebooks/00-getting-started.ipynb | 75 +- .../01-basic-loading-the-kernel.ipynb | 81 +- .../02-running-prompts-from-file.ipynb | 87 +- ....ipynb => 03-prompt-function-inline.ipynb} | 154 +- ...t.ipynb => 04-kernel-arguments-chat.ipynb} | 130 +- python/notebooks/05-using-the-planner.ipynb | 119 +- .../notebooks/06-memory-and-embeddings.ipynb | 1266 ++++++++++------- .../notebooks/08-native-function-inline.ipynb | 413 ++++-- .../notebooks/09-groundedness-checking.ipynb | 77 +- .../10-multiple-results-per-prompt.ipynb | 815 +++++------ .../notebooks/11-streaming-completions.ipynb | 669 ++++----- .../kernel-syntax-examples/action_planner.py | 4 +- .../azure_chat_gpt_api.py | 47 +- .../azure_chat_gpt_with_data_api.py | 37 +- ...chat_gpt_with_data_api_function_calling.py | 46 +- ...re_chat_gpt_with_data_api_vector_search.py | 48 +- .../bing_plugin_examples.py | 116 ++ .../bing_search_plugin.py | 4 +- python/samples/kernel-syntax-examples/chat.py | 67 +- .../kernel-syntax-examples/chat_gpt_api.py | 64 +- .../chat_gpt_api_function_calling.py | 66 +- .../configuring_prompts.py | 73 + .../google_palm_chat_with_memory.py | 76 +- .../google_palm_chat_with_plugin.py | 46 +- .../google_palm_text_completion.py | 2 +- .../google_search_plugin.py | 4 +- .../kernel-syntax-examples/grounded.py | 18 +- .../samples/kernel-syntax-examples/memory.py | 23 +- .../open_ai_chat_with_memory.py | 155 ++ .../openai_function_calling.py | 84 +- .../openai_logit_bias.py | 118 +- .../plugins_from_dir.py | 61 +- .../self-critique_rag.py | 18 +- .../sequential_planner.py | 9 +- .../kernel-syntax-examples/setup_logging.py | 13 +- .../template_language.py | 55 + python/semantic_kernel/__init__.py | 6 +- .../ai/chat_completion_client_base.py | 14 +- .../services/gp_chat_completion.py | 27 +- .../services/hf_text_completion.py | 1 + .../ollama/services/ollama_chat_completion.py | 31 +- .../contents/open_ai_chat_message_content.py | 5 + .../services/open_ai_chat_completion_base.py | 19 +- .../connectors/ai/open_ai/utils.py | 81 +- .../ai/prompt_execution_settings.py | 2 +- .../connectors/memory/redis/README.md | 10 +- .../contents/kernel_content.py | 2 +- .../conversation_summary_plugin.py | 23 +- .../functions/function_result.py | 4 + .../functions/kernel_function.py | 242 +++- .../functions/kernel_function_decorator.py | 1 + .../functions/kernel_function_metadata.py | 14 +- .../functions/kernel_parameter_metadata.py | 2 +- .../functions/kernel_plugin.py | 33 +- .../functions/kernel_plugin_collection.py | 26 +- .../functions/prompt_rendering_result.py | 23 + python/semantic_kernel/kernel.py | 164 ++- .../models/ai/chat_completion/chat_history.py | 235 +++ .../planners/action_planner/action_planner.py | 17 +- .../semantic_kernel/planners/basic_planner.py | 33 +- python/semantic_kernel/planners/plan.py | 16 +- .../Plugins/SequentialPlanning/config.json | 4 +- .../sequential_planner/sequential_planner.py | 44 +- .../Plugins/StepwiseStep/config.json | 17 +- .../stepwise_planner/stepwise_planner.py | 23 +- .../prompt_template/chat_prompt_template.py | 74 +- .../prompt_template/input_variable.py | 15 + .../prompt_template/kernel_prompt_template.py | 163 +++ .../prompt_template/prompt_template_base.py | 7 +- .../prompt_template/prompt_template_config.py | 151 +- python/semantic_kernel/services/__init__.py | 1 + .../services/ai_service_selector.py | 4 +- .../template_engine/blocks/code_block.py | 4 +- .../template_engine/blocks/var_block.py | 7 +- .../protocols/prompt_templating_engine.py | 14 +- python/semantic_kernel/utils/chat.py | 28 + python/semantic_kernel/utils/settings.py | 2 +- python/tests/conftest.py | 2 +- .../tests/integration/completions/conftest.py | 26 +- .../test_azure_oai_chat_service.py | 43 +- .../test_azure_oai_chat_service_extensions.py | 65 +- .../test_azure_oai_text_service.py | 44 +- .../test_conversation_summary_plugin.py | 59 +- .../completions/test_gp_chat_service.py | 47 +- .../completions/test_gp_text_service.py | 80 +- .../completions/test_oai_chat_service.py | 69 +- .../completions/test_oai_text_service.py | 70 +- .../test_azure_oai_embedding_service.py | 35 +- .../embeddings/test_gp_embedding_service.py | 4 +- .../embeddings/test_hf_embedding_service.py | 11 +- .../embeddings/test_oai_embedding_service.py | 21 +- .../integration/fakes/writer_plugin_fake.py | 23 +- .../test_sequential_plan_parser.py | 6 +- .../test_sequential_planner.py | 33 +- .../stepwise_planner/test_stepwise_planner.py | 31 +- .../prompt_template_e2e_tests.py | 2 +- .../test_hf_local_text_completions.py | 40 +- .../services/test_palm_chat_completion.py | 11 +- .../services/test_ollama_chat_completion.py | 17 +- .../services/test_ollama_test_completion.py | 2 +- .../services/test_azure_chat_completion.py | 46 +- .../unit/core_plugins/test_text_plugin.py | 4 +- .../test_kernel_function_decorators.py | 2 - .../test_kernel_function_metadata.py | 20 +- .../unit/functions/test_kernel_plugins.py | 80 +- .../unit/functions/test_prompt_templates.py | 257 ++-- .../unit/kernel/test_kernel_invoke_filters.py | 2 +- .../kernel/test_kernel_service_management.py | 2 + .../kernel_extensions/test_import_plugins.py | 29 +- .../ai/chat_completion/test_chat_history.py | 173 +++ .../ai/chat_completion/test_chat_message.py | 30 +- .../action_planner/test_action_planner.py | 16 +- .../test_sequential_planner.py | 20 +- .../test_sequential_planner_extensions.py | 10 +- .../test_sequential_planner_parser.py | 6 +- .../tests/unit/planners/test_plan_creation.py | 20 +- .../template_engine/blocks/test_code_block.py | 16 +- python/tests/unit/test_serialization.py | 8 +- .../unit/text/test_function_extension.py | 32 +- 120 files changed, 5242 insertions(+), 3019 deletions(-) rename python/notebooks/{03-semantic-function-inline.ipynb => 03-prompt-function-inline.ipynb} (62%) rename python/notebooks/{04-context-variables-chat.ipynb => 04-kernel-arguments-chat.ipynb} (54%) create mode 100644 python/samples/kernel-syntax-examples/bing_plugin_examples.py create mode 100644 python/samples/kernel-syntax-examples/configuring_prompts.py create mode 100644 python/samples/kernel-syntax-examples/open_ai_chat_with_memory.py create mode 100644 python/samples/kernel-syntax-examples/template_language.py create mode 100644 python/semantic_kernel/functions/prompt_rendering_result.py create mode 100644 python/semantic_kernel/models/ai/chat_completion/chat_history.py create mode 100644 python/semantic_kernel/prompt_template/input_variable.py create mode 100644 python/semantic_kernel/prompt_template/kernel_prompt_template.py create mode 100644 python/semantic_kernel/services/__init__.py create mode 100644 python/semantic_kernel/utils/chat.py create mode 100644 python/tests/unit/models/ai/chat_completion/test_chat_history.py diff --git a/python/README.md b/python/README.md index f2f5f11ad357..175a7b986cda 100644 --- a/python/README.md +++ b/python/README.md @@ -33,11 +33,25 @@ kernel = sk.Kernel() # Prepare OpenAI service using credentials stored in the `.env` file api_key, org_id = sk.openai_settings_from_dot_env() -kernel.add_chat_service("chat-gpt", OpenAIChatCompletion("gpt-3.5-turbo", api_key, org_id)) +kernel.add_service( + OpenAIChatCompletion( + service_id="chat-gpt", + ai_model_id="gpt-3.5-turbo", + api_key=api_key, + org_id=org_id + ) +) # Alternative using Azure: # deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env() -# kernel.add_chat_service("dv", AzureChatCompletion(deployment, endpoint, api_key)) +# kernel.add_service( +# AzureChatCompletion( +# service_id="dv", +# deployment_name=deployment, +# base_url=endpoint, +# api_key=api_key +# ) +# ) # Wrap your prompt in a function prompt = kernel.create_semantic_function(""" diff --git a/python/notebooks/00-getting-started.ipynb b/python/notebooks/00-getting-started.ipynb index fae229aff099..9932aa6e7f05 100644 --- a/python/notebooks/00-getting-started.ipynb +++ b/python/notebooks/00-getting-started.ipynb @@ -31,20 +31,10 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## Option 1: using OpenAI\n", - "\n", - "**Step 2**: Add your [OpenAI Key](https://openai.com/product/) key to a `.env` file in the same folder (org Id only if you have multiple orgs):\n", - "\n", - "```\n", - "OPENAI_API_KEY=\"sk-...\"\n", - "OPENAI_ORG_ID=\"\"\n", - "```\n", - "\n", - "Use \"keyword arguments\" to instantiate an OpenAI Chat Completion service and add it to the kernel:" + "### Configure the service you'd like to use via the `Service` Enum." ] }, { @@ -53,14 +43,10 @@ "metadata": {}, "outputs": [], "source": [ - "from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", + "from services import Service\n", "\n", - "api_key, org_id = sk.openai_settings_from_dot_env()\n", - "\n", - "kernel.add_chat_service(\n", - " \"chat-gpt\",\n", - " OpenAIChatCompletion(ai_model_id=\"gpt-3.5-turbo-1106\", api_key=api_key, org_id=org_id),\n", - ")" + "# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)\n", + "selectedService = Service.OpenAI" ] }, { @@ -68,6 +54,17 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "## Option 1: using OpenAI\n", + "\n", + "**Step 2**: Add your [OpenAI Key](https://openai.com/product/) key to a `.env` file in the same folder (org Id only if you have multiple orgs):\n", + "\n", + "```\n", + "OPENAI_API_KEY=\"sk-...\"\n", + "OPENAI_ORG_ID=\"\"\n", + "```\n", + "\n", + "Use \"keyword arguments\" to instantiate an OpenAI Chat Completion service and add it to the kernel:\n", + "\n", "## Option 2: using Azure OpenAI\n", "\n", "**Step 2**: Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to a `.env` file in the same folder:\n", @@ -87,14 +84,23 @@ "metadata": {}, "outputs": [], "source": [ - "from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", + "service_id = None\n", + "if selectedService == Service.OpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", "\n", - "deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", + " api_key, org_id = sk.openai_settings_from_dot_env()\n", + " service_id = \"chat-gpt\"\n", + " kernel.add_service(\n", + " OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-3.5-turbo-1106\", api_key=api_key, org_id=org_id),\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", "\n", - "kernel.add_chat_service(\n", - " \"chat_completion\",\n", - " AzureChatCompletion(deployment_name=deployment, endpoint=endpoint, api_key=api_key),\n", - ")" + " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", + " service_id = \"chat_completion\"\n", + " kernel.add_service(\n", + " AzureChatCompletion(service_id=service_id, deployment_name=deployment, endpoint=endpoint, api_key=api_key),\n", + " )" ] }, { @@ -113,10 +119,27 @@ "metadata": {}, "outputs": [], "source": [ - "plugin = kernel.import_semantic_plugin_from_directory(\"../../samples/plugins\", \"FunPlugin\")\n", + "try:\n", + " plugin = kernel.import_plugin_from_prompt_directory(service_id, \"../../samples/plugins\", \"FunPlugin\")\n", + "except ValueError as e:\n", + " # Don't fail if we try to add the plug in again\n", + " # This is just for the sake of the example\n", + " # Once the plugin has been added to the kernel it will fail\n", + " # to add it again, if this cell is run multiple times without\n", + " # restarting the kernel\n", + " pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "joke_function = plugin[\"Joke\"]\n", "\n", - "print(await joke_function(\"time travel to dinosaur age\"))" + "joke = await kernel.invoke(joke_function, sk.KernelArguments(input=\"time travel to dinosaur age\", style=\"super silly\"))\n", + "print(joke)" ] } ], diff --git a/python/notebooks/01-basic-loading-the-kernel.ipynb b/python/notebooks/01-basic-loading-the-kernel.ipynb index 01ed9cfff3f6..aabef3153731 100644 --- a/python/notebooks/01-basic-loading-the-kernel.ipynb +++ b/python/notebooks/01-basic-loading-the-kernel.ipynb @@ -34,29 +34,7 @@ "metadata": {}, "outputs": [], "source": [ - "import semantic_kernel as sk\n", - "from semantic_kernel.connectors.ai.open_ai import (\n", - " AzureChatCompletion,\n", - " OpenAIChatCompletion,\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can instantiate the kernel in a few ways, depending on your use case." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "# Simple instance\n", - "kernel_1 = sk.Kernel()" + "import semantic_kernel as sk" ] }, { @@ -66,7 +44,7 @@ "source": [ "When using the kernel for AI requests, the kernel needs some settings like URL and credentials to the AI models.\n", "\n", - "The SDK currently supports OpenAI and Azure OpenAI, other services will be added over time.\n", + "The SDK currently supports OpenAI and Azure OpenAI, among other connectors.\n", "\n", "If you need an Azure OpenAI key, go [here](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/quickstart?pivots=rest-api)." ] @@ -77,39 +55,10 @@ "metadata": {}, "outputs": [], "source": [ - "kernel = sk.Kernel()\n", + "from services import Service\n", "\n", - "kernel.add_chat_service( # We are adding a text service\n", - " \"Azure_curie\", # The alias we can use in prompt templates' config.json\n", - " AzureChatCompletion(\n", - " deployment_name=\"my-finetuned-Curie\", # Azure OpenAI *Deployment name*\n", - " endpoint=\"https://contoso.openai.azure.com/\", # Azure OpenAI *Endpoint*\n", - " api_key=\"...your Azure OpenAI Key...\", # Azure OpenAI *Key*\n", - " ),\n", - ")\n", - "\n", - "kernel.add_chat_service( # We are adding a text service\n", - " \"OpenAI_chat_gpt\", # The alias we can use in prompt templates' config.json\n", - " OpenAIChatCompletion(\n", - " ai_model_id=\"gpt-3.5-turbo\", # OpenAI Model Name\n", - " api_key=\"...your OpenAI API Key...\", # OpenAI API key\n", - " org_id=\"...your OpenAI Org ID...\", # *optional* OpenAI Organization ID\n", - " ),\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "When working with multiple services and multiple models, the **first service** defined\n", - "is also the \"**default**\" used in these scenarios:\n", - "\n", - "* a prompt configuration doesn't specify which AI service to use\n", - "* a prompt configuration requires a service unknown to the kernel\n", - "\n", - "The default can be set and changed programmatically:" + "# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)\n", + "selectedService = Service.OpenAI" ] }, { @@ -118,7 +67,25 @@ "metadata": {}, "outputs": [], "source": [ - "kernel.set_default_text_completion_service(\"Azure_curie\")" + "kernel = sk.Kernel()\n", + "\n", + "service_id = None\n", + "if selectedService == Service.OpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", + "\n", + " api_key, org_id = sk.openai_settings_from_dot_env()\n", + " service_id = \"oai_chat_gpt\"\n", + " kernel.add_service(\n", + " OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-3.5-turbo-1106\", api_key=api_key, org_id=org_id),\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", + "\n", + " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", + " service_id = \"aoai_chat_completion\"\n", + " kernel.add_service(\n", + " AzureChatCompletion(service_id=service_id, deployment_name=deployment, endpoint=endpoint, api_key=api_key),\n", + " )" ] }, { diff --git a/python/notebooks/02-running-prompts-from-file.ipynb b/python/notebooks/02-running-prompts-from-file.ipynb index 24ca926125c4..27f17243f882 100644 --- a/python/notebooks/02-running-prompts-from-file.ipynb +++ b/python/notebooks/02-running-prompts-from-file.ipynb @@ -6,10 +6,10 @@ "id": "692e361b", "metadata": {}, "source": [ - "# How to run a semantic plugins from file\n", - "Now that you're familiar with Kernel basics, let's see how the kernel allows you to run Semantic Plugins and Semantic Functions stored on disk. \n", + "# How to run a prompt plugins from file\n", + "Now that you're familiar with Kernel basics, let's see how the kernel allows you to run Prompt Plugins and Prompt Functions stored on disk. \n", "\n", - "A Semantic Plugin is a collection of Semantic Functions, where each function is defined with natural language that can be provided with a text file. \n", + "A Prompt Plugin is a collection of Semantic Functions, where each function is defined with natural language that can be provided with a text file. \n", "\n", "Refer to our [glossary](https://github.com/microsoft/semantic-kernel/blob/main/docs/GLOSSARY.md) for an in-depth guide to the terms.\n", "\n", @@ -60,14 +60,30 @@ "```\n", "{\n", " \"schema\": 1,\n", - " \"type\": \"completion\",\n", " \"description\": \"Generate a funny joke\",\n", - " \"completion\": {\n", - " \"max_tokens\": 500,\n", - " \"temperature\": 0.5,\n", - " \"top_p\": 0.5\n", - " }\n", + " \"execution_settings\": {\n", + " \"default\": {\n", + " \"max_tokens\": 1000,\n", + " \"temperature\": 0.9,\n", + " \"top_p\": 0.0,\n", + " \"presence_penalty\": 0.0,\n", + " \"frequency_penalty\": 0.0\n", + " }\n", + " },\n", + " \"input_variables\": [\n", + " {\n", + " \"name\": \"input\",\n", + " \"description\": \"Joke subject\",\n", + " \"default\": \"\"\n", + " },\n", + " {\n", + " \"name\": \"style\",\n", + " \"description\": \"Give a hint about the desired joke style\",\n", + " \"default\": \"\"\n", + " }\n", + " ]\n", "}\n", + "\n", "```" ] }, @@ -77,7 +93,7 @@ "id": "384ff07f", "metadata": {}, "source": [ - "Given a semantic function defined by these files, this is how to load and use a file based semantic function.\n", + "Given a prompt function defined by these files, this is how to load and use a file based prompt function.\n", "\n", "Load and configure the kernel, as usual, loading also the AI service settings defined in the [Setup notebook](00-getting-started.ipynb):" ] @@ -92,6 +108,19 @@ "!python -m pip install semantic-kernel==0.5.1.dev0" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "fdb865a7", + "metadata": {}, + "outputs": [], + "source": [ + "from services import Service\n", + "\n", + "# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)\n", + "selectedService = Service.OpenAI" + ] + }, { "cell_type": "code", "execution_count": null, @@ -100,26 +129,26 @@ "outputs": [], "source": [ "import semantic_kernel as sk\n", - "from semantic_kernel.connectors.ai.open_ai import (\n", - " AzureChatCompletion,\n", - " OpenAIChatCompletion,\n", - ")\n", "\n", "kernel = sk.Kernel()\n", "\n", - "useAzureOpenAI = False\n", + "service_id = None\n", + "if selectedService == Service.OpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", "\n", - "# Configure AI service used by the kernel\n", - "if useAzureOpenAI:\n", - " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", - " azure_chat_service = AzureChatCompletion(\n", - " deployment_name=\"turbo\", endpoint=endpoint, api_key=api_key\n", - " ) # set the deployment name to the value of your chat model\n", - " kernel.add_chat_service(\"chat_completion\", azure_chat_service)\n", - "else:\n", " api_key, org_id = sk.openai_settings_from_dot_env()\n", - " oai_chat_service = OpenAIChatCompletion(ai_model_id=\"gpt-3.5-turbo\", api_key=api_key, org_id=org_id)\n", - " kernel.add_chat_service(\"chat-gpt\", oai_chat_service)" + " service_id = \"oai_chat_gpt\"\n", + " kernel.add_service(\n", + " OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-3.5-turbo-1106\", api_key=api_key, org_id=org_id),\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", + "\n", + " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", + " service_id = \"aoai_chat_completion\"\n", + " kernel.add_service(\n", + " AzureChatCompletion(service_id=service_id, deployment_name=deployment, endpoint=endpoint, api_key=api_key),\n", + " )" ] }, { @@ -141,7 +170,7 @@ "# note: using plugins from the samples folder\n", "plugins_directory = \"../../samples/plugins\"\n", "\n", - "funFunctions = kernel.import_semantic_plugin_from_directory(plugins_directory, \"FunPlugin\")\n", + "funFunctions = kernel.import_plugin_from_prompt_directory(service_id, plugins_directory, \"FunPlugin\")\n", "\n", "jokeFunction = funFunctions[\"Joke\"]" ] @@ -162,7 +191,7 @@ "metadata": {}, "outputs": [], "source": [ - "result = await jokeFunction.invoke(\"travel to dinosaur age\")\n", + "result = await kernel.invoke(jokeFunction, sk.KernelArguments(input=\"travel to dinosaur age\", style=\"silly\"))\n", "print(result)" ] }, @@ -172,7 +201,7 @@ "id": "2281a1fc", "metadata": {}, "source": [ - "Great, now that you know how to load a plugin from disk, let's show how you can [create and run a semantic function inline.](./03-semantic-function-inline.ipynb)" + "Great, now that you know how to load a plugin from disk, let's show how you can [create and run a prompt function inline.](./03-prompt-function-inline.ipynb)" ] } ], @@ -192,7 +221,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.10.12" } }, "nbformat": 4, diff --git a/python/notebooks/03-semantic-function-inline.ipynb b/python/notebooks/03-prompt-function-inline.ipynb similarity index 62% rename from python/notebooks/03-semantic-function-inline.ipynb rename to python/notebooks/03-prompt-function-inline.ipynb index dbaf6c0ebac8..b3dbc9663948 100644 --- a/python/notebooks/03-semantic-function-inline.ipynb +++ b/python/notebooks/03-prompt-function-inline.ipynb @@ -6,16 +6,9 @@ "id": "3c93ac5b", "metadata": {}, "source": [ - "# Running Semantic Functions Inline" + "# Running Prompt Functions Inline" ] }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "ebcabb91", - "metadata": {}, - "source": [] - }, { "attachments": {}, "cell_type": "markdown", @@ -58,6 +51,19 @@ "!python -m pip install semantic-kernel==0.5.1.dev0" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "68b770df", + "metadata": {}, + "outputs": [], + "source": [ + "from services import Service\n", + "\n", + "# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)\n", + "selectedService = Service.OpenAI" + ] + }, { "cell_type": "code", "execution_count": null, @@ -66,26 +72,30 @@ "outputs": [], "source": [ "import semantic_kernel as sk\n", - "from semantic_kernel.connectors.ai.open_ai import (\n", - " AzureTextCompletion,\n", - " OpenAITextCompletion,\n", - ")\n", + "import semantic_kernel.connectors.ai.open_ai as sk_oai\n", + "from semantic_kernel.prompt_template.input_variable import InputVariable\n", "\n", "kernel = sk.Kernel()\n", "\n", - "useAzureOpenAI = False\n", + "service_id = None\n", + "if selectedService == Service.OpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import OpenAITextCompletion\n", "\n", - "# Configure AI service used by the kernel\n", - "if useAzureOpenAI:\n", - " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", - " azure_text_service = AzureTextCompletion(\n", - " deployment_name=\"text\", endpoint=endpoint, api_key=api_key\n", - " ) # set the deployment name to the value of your text model\n", - " kernel.add_text_completion_service(\"dv\", azure_text_service)\n", - "else:\n", " api_key, org_id = sk.openai_settings_from_dot_env()\n", - " oai_text_service = OpenAITextCompletion(ai_model_id=\"gpt-3.5-turbo-instruct\", api_key=api_key, org_id=org_id)\n", - " kernel.add_text_completion_service(\"dv\", oai_text_service)" + " service_id = \"oai_text_completion\"\n", + " kernel.add_service(\n", + " OpenAITextCompletion(\n", + " service_id=service_id, ai_model_id=\"gpt-3.5-turbo-instruct\", api_key=api_key, org_id=org_id\n", + " ),\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureTextCompletion\n", + "\n", + " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", + " service_id = \"aoai_text_completion\"\n", + " kernel.add_service(\n", + " AzureTextCompletion(service_id=service_id, deployment_name=deployment, endpoint=endpoint, api_key=api_key),\n", + " )" ] }, { @@ -110,7 +120,32 @@ "Summarize the content above.\n", "\"\"\"\n", "\n", - "summarize = kernel.create_semantic_function(prompt_template=prompt, max_tokens=2000, temperature=0.2, top_p=0.5)" + "if selectedService == Service.OpenAI:\n", + " execution_settings = sk_oai.OpenAITextPromptExecutionSettings(\n", + " service_id=service_id,\n", + " ai_model_id=\"gpt-3.5-turbo-instruct\",\n", + " max_tokens=2000,\n", + " temperature=0.7,\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " execution_settings = sk_oai.OpenAITextPromptExecutionSettings(\n", + " service_id=service_id,\n", + " ai_model_id=deployment,\n", + " max_tokens=2000,\n", + " temperature=0.7,\n", + " )\n", + "\n", + "prompt_template_config = sk.PromptTemplateConfig(\n", + " template=prompt,\n", + " name=\"summarize\",\n", + " template_format=\"semantic-kernel\",\n", + " input_variables=[\n", + " InputVariable(name=\"input\", description=\"The user input\", is_required=True),\n", + " ],\n", + " execution_settings=execution_settings,\n", + ")\n", + "\n", + "summarize = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config)" ] }, { @@ -158,7 +193,7 @@ "metadata": {}, "outputs": [], "source": [ - "summary = await summarize(input_text)\n", + "summary = await kernel.invoke(summarize, sk.KernelArguments(input=input_text))\n", "\n", "print(summary)" ] @@ -178,7 +213,7 @@ "id": "29b59b28", "metadata": {}, "source": [ - "You can also use chat completion models (like `gpt-35-turbo` and `gpt4`) for creating plugins. Normally you would have to tweak the API to accommodate for a system and user role, but SK abstracts that away for you by using `kernel.add_chat_service` and `AzureChatCompletion` or `OpenAIChatCompletion`" + "You can also use chat completion models (like `gpt-35-turbo` and `gpt4`) for creating plugins. Normally you would have to tweak the API to accommodate for a system and user role, but SK abstracts that away for you by using `kernel.add_service` and `AzureChatCompletion` or `OpenAIChatCompletion`" ] }, { @@ -199,27 +234,25 @@ "metadata": {}, "outputs": [], "source": [ - "import semantic_kernel as sk\n", - "from semantic_kernel.connectors.ai.open_ai import (\n", - " AzureChatCompletion,\n", - " OpenAIChatCompletion,\n", - ")\n", - "\n", "kernel = sk.Kernel()\n", "\n", - "useAzureOpenAI = False\n", + "service_id = None\n", + "if selectedService == Service.OpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", "\n", - "# Configure AI service used by the kernel\n", - "if useAzureOpenAI:\n", - " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", - " azure_chat_service = AzureChatCompletion(\n", - " deployment_name=\"turbo\", endpoint=endpoint, api_key=api_key\n", - " ) # set the deployment name to the value of your chat model\n", - " kernel.add_chat_service(\"chat_completion\", azure_chat_service)\n", - "else:\n", " api_key, org_id = sk.openai_settings_from_dot_env()\n", - " oai_chat_service = OpenAIChatCompletion(ai_model_id=\"gpt-3.5-turbo\", api_key=api_key, org_id=org_id)\n", - " kernel.add_chat_service(\"chat-gpt\", oai_chat_service)" + " service_id = \"oai_chat_gpt\"\n", + " kernel.add_service(\n", + " OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-3.5-turbo-1106\", api_key=api_key, org_id=org_id),\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", + "\n", + " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", + " service_id = \"aoai_chat_completion\"\n", + " kernel.add_service(\n", + " AzureChatCompletion(service_id=service_id, deployment_name=deployment, endpoint=endpoint, api_key=api_key),\n", + " )" ] }, { @@ -229,7 +262,7 @@ "metadata": {}, "outputs": [], "source": [ - "sk_prompt = \"\"\"\n", + "prompt = \"\"\"\n", "{{$input}}\n", "\n", "Give me the TLDR in 5 words or less.\n", @@ -246,11 +279,36 @@ " does not conflict with the First or Second Law.\n", "\"\"\"\n", "\n", - "tldr_function = kernel.create_semantic_function(prompt_template=sk_prompt, max_tokens=200, temperature=0, top_p=0.5)\n", + "if selectedService == Service.OpenAI:\n", + " execution_settings = sk_oai.OpenAIChatPromptExecutionSettings(\n", + " service_id=service_id,\n", + " ai_model_id=\"gpt-3.5-turbo-1106\",\n", + " max_tokens=2000,\n", + " temperature=0.7,\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " execution_settings = sk_oai.OpenAIChatPromptExecutionSettings(\n", + " service_id=service_id,\n", + " ai_model_id=deployment,\n", + " max_tokens=2000,\n", + " temperature=0.7,\n", + " )\n", + "\n", + "prompt_template_config = sk.PromptTemplateConfig(\n", + " template=prompt,\n", + " name=\"tldr\",\n", + " template_format=\"semantic-kernel\",\n", + " input_variables=[\n", + " InputVariable(name=\"input\", description=\"The user input\", is_required=True),\n", + " ],\n", + " execution_settings=execution_settings,\n", + ")\n", + "\n", + "tldr_function = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config)\n", "\n", - "summary = await tldr_function(text)\n", + "summary = await kernel.invoke(tldr_function, sk.KernelArguments(input=text))\n", "\n", - "print(f\"Output: {summary}\") # Output: Robots must not harm humans." + "print(f\"Output: {summary}\")" ] } ], @@ -270,7 +328,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.10.12" } }, "nbformat": 4, diff --git a/python/notebooks/04-context-variables-chat.ipynb b/python/notebooks/04-kernel-arguments-chat.ipynb similarity index 54% rename from python/notebooks/04-context-variables-chat.ipynb rename to python/notebooks/04-kernel-arguments-chat.ipynb index 1bd69448b77a..f10edae03847 100644 --- a/python/notebooks/04-context-variables-chat.ipynb +++ b/python/notebooks/04-kernel-arguments-chat.ipynb @@ -6,17 +6,17 @@ "id": "fde98ddf", "metadata": {}, "source": [ - "# Creating a basic chat experience with context variables\n", + "# Creating a basic chat experience with kernel arguments\n", "\n", - "In this example, we show how you can build a simple chat bot by sending and updating context with your requests. \n", + "In this example, we show how you can build a simple chat bot by sending and updating the kernel arguments with your requests. \n", "\n", - "We introduce the Context Variables object which in this demo functions similarly as a key-value store that you can use when running the kernel.\n", + "We introduce the Kernel Arguments object which in this demo functions similarly as a key-value store that you can use when running the kernel.\n", "\n", - "The context is local (i.e. in your computer's RAM) and not persisted anywhere beyond the life of this Jupyter session.\n", + "The chat history is local (i.e. in your computer's RAM) and not persisted anywhere beyond the life of this Jupyter session.\n", "\n", - "In future examples, we will show how to persist the context on disk so that you can bring it into your applications. \n", + "In future examples, we will show how to persist the chat history on disk so that you can bring it into your applications. \n", "\n", - "In this chat scenario, as the user talks back and forth with the bot, the context gets populated with the history of the conversation. During each new run of the kernel, the context can provide the AI with its variables' content. " + "In this chat scenario, as the user talks back and forth with the bot, the chat context gets populated with the history of the conversation. During each new run of the kernel, the kernel arguments and chat history can provide the AI with its variables' content. " ] }, { @@ -29,6 +29,19 @@ "!python -m pip install semantic-kernel==0.5.1.dev0" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "0a235b31", + "metadata": {}, + "outputs": [], + "source": [ + "from services import Service\n", + "\n", + "# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)\n", + "selectedService = Service.OpenAI" + ] + }, { "cell_type": "code", "execution_count": null, @@ -37,27 +50,29 @@ "outputs": [], "source": [ "import semantic_kernel as sk\n", - "from semantic_kernel.connectors.ai.open_ai import (\n", - " AzureChatCompletion,\n", - " OpenAIChatCompletion,\n", - ")\n", + "import semantic_kernel.connectors.ai.open_ai as sk_oai\n", + "from semantic_kernel.prompt_template.input_variable import InputVariable\n", + "from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory\n", + "from semantic_kernel.functions.kernel_arguments import KernelArguments\n", "\n", "kernel = sk.Kernel()\n", "\n", - "useAzureOpenAI = False\n", + "service_id = None\n", + "if selectedService == Service.OpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", "\n", - "# Configure AI service used by the kernel\n", - "if useAzureOpenAI:\n", - " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", - " kernel.add_chat_service(\n", - " \"chat_completion\",\n", - " AzureChatCompletion(deployment_name=deployment, endpoint=endpoint, api_key=api_key),\n", - " )\n", - "else:\n", " api_key, org_id = sk.openai_settings_from_dot_env()\n", - " kernel.add_chat_service(\n", - " \"gpt-3.5\",\n", - " OpenAIChatCompletion(ai_model_id=\"gpt-3.5-turbo\", api_key=api_key, org_id=org_id),\n", + " service_id = \"oai_chat_gpt\"\n", + " kernel.add_service(\n", + " OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-3.5-turbo-1106\", api_key=api_key, org_id=org_id),\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", + "\n", + " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", + " service_id = \"aoai_chat_completion\"\n", + " kernel.add_service(\n", + " AzureChatCompletion(service_id=service_id, deployment_name=deployment, endpoint=endpoint, api_key=api_key),\n", " )" ] }, @@ -77,7 +92,7 @@ "metadata": {}, "outputs": [], "source": [ - "sk_prompt = \"\"\"\n", + "prompt = \"\"\"\n", "ChatBot can have a conversation with you about any topic.\n", "It can give explicit instructions or say 'I don't know' if it does not have an answer.\n", "\n", @@ -102,13 +117,44 @@ "metadata": {}, "outputs": [], "source": [ - "chat_function = kernel.create_semantic_function(\n", - " prompt_template=sk_prompt,\n", - " function_name=\"ChatBot\",\n", - " max_tokens=2000,\n", - " temperature=0.7,\n", - " top_p=0.5,\n", - ")" + "if selectedService == Service.OpenAI:\n", + " execution_settings = sk_oai.OpenAIChatPromptExecutionSettings(\n", + " service_id=service_id,\n", + " ai_model_id=\"gpt-3.5-turbo-1106\",\n", + " max_tokens=2000,\n", + " temperature=0.7,\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " execution_settings = sk_oai.OpenAIChatPromptExecutionSettings(\n", + " service_id=service_id,\n", + " ai_model_id=deployment,\n", + " max_tokens=2000,\n", + " temperature=0.7,\n", + " )\n", + "\n", + "prompt_template_config = sk.PromptTemplateConfig(\n", + " template=prompt,\n", + " name=\"chat\",\n", + " template_format=\"semantic-kernel\",\n", + " input_variables=[\n", + " InputVariable(name=\"input\", description=\"The user input\", is_required=True),\n", + " InputVariable(name=\"history\", description=\"The conversation history\", is_required=True),\n", + " ],\n", + " execution_settings=execution_settings,\n", + ")\n", + "\n", + "chat_function = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6a0f7c01", + "metadata": {}, + "outputs": [], + "source": [ + "chat_history = ChatHistory()\n", + "chat_history.add_system_message(\"You are a helpful chatbot who is good about giving book recommendations.\")" ] }, { @@ -117,7 +163,7 @@ "id": "6e8a676f", "metadata": {}, "source": [ - "Initialize your context" + "Initialize the Kernel Arguments" ] }, { @@ -127,8 +173,7 @@ "metadata": {}, "outputs": [], "source": [ - "context = kernel.create_new_context()\n", - "context[\"history\"] = \"\"" + "arguments = KernelArguments(user_input=\"Hi, I'm looking for book suggestions\", history=chat_history)" ] }, { @@ -147,9 +192,8 @@ "metadata": {}, "outputs": [], "source": [ - "context[\"user_input\"] = \"Hi, I'm looking for book suggestions\"\n", - "bot_answer = await chat_function.invoke(context=context)\n", - "print(bot_answer)" + "response = await kernel.invoke(chat_function, arguments)\n", + "print(response)" ] }, { @@ -168,8 +212,7 @@ "metadata": {}, "outputs": [], "source": [ - "context[\"history\"] += f\"\\nUser: {context['user_input']}\\nChatBot: {bot_answer}\\n\"\n", - "print(context[\"history\"])" + "chat_history.add_assistant_message(str(response))" ] }, { @@ -191,16 +234,15 @@ "async def chat(input_text: str) -> None:\n", " # Save new message in the context variables\n", " print(f\"User: {input_text}\")\n", - " context[\"user_input\"] = input_text\n", + " chat_history.add_user_message(input_text)\n", "\n", " # Process the user message and get an answer\n", - " answer = await chat_function.invoke(context=context)\n", + " answer = await kernel.invoke(chat_function, KernelArguments(user_input=input_text, history=chat_history))\n", "\n", " # Show the response\n", " print(f\"ChatBot: {answer}\")\n", "\n", - " # Append the new interaction to the chat history\n", - " context[\"history\"] += f\"\\nUser: {input_text}\\nChatBot: {answer}\\n\"" + " chat_history.add_assistant_message(str(answer))" ] }, { @@ -259,7 +301,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(context[\"history\"])" + "print(chat_history)" ] } ], @@ -279,7 +321,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.10.12" } }, "nbformat": 4, diff --git a/python/notebooks/05-using-the-planner.ipynb b/python/notebooks/05-using-the-planner.ipynb index ab705b4014d2..15fcbf9bf5d9 100644 --- a/python/notebooks/05-using-the-planner.ipynb +++ b/python/notebooks/05-using-the-planner.ipynb @@ -26,6 +26,31 @@ "!python -m pip install semantic-kernel==0.5.1.dev0" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "7d548e40", + "metadata": {}, + "outputs": [], + "source": [ + "from services import Service\n", + "\n", + "# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)\n", + "selectedService = Service.OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3852961c", + "metadata": {}, + "outputs": [], + "source": [ + "from semantic_kernel.prompt_template.input_variable import InputVariable\n", + "from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory\n", + "from semantic_kernel.functions.kernel_arguments import KernelArguments" + ] + }, { "cell_type": "code", "execution_count": null, @@ -34,27 +59,26 @@ "outputs": [], "source": [ "import semantic_kernel as sk\n", - "from semantic_kernel.connectors.ai.open_ai import (\n", - " AzureChatCompletion,\n", - " OpenAIChatCompletion,\n", - ")\n", + "import semantic_kernel.connectors.ai.open_ai as sk_oai\n", "\n", "kernel = sk.Kernel()\n", "\n", - "useAzureOpenAI = True\n", + "service_id = None\n", + "if selectedService == Service.OpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", "\n", - "# Configure AI backend used by the kernel\n", - "if useAzureOpenAI:\n", - " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", - " kernel.add_chat_service(\n", - " \"chat_completion\",\n", - " AzureChatCompletion(deployment_name=deployment, endpoint=endpoint, api_key=api_key),\n", - " )\n", - "else:\n", " api_key, org_id = sk.openai_settings_from_dot_env()\n", - " kernel.add_chat_service(\n", - " \"gpt-4\",\n", - " OpenAIChatCompletion(ai_model_id=\"gpt-3.5-turbo-1106\", api_key=api_key, org_id=org_id),\n", + " service_id = \"oai_chat_gpt\"\n", + " kernel.add_service(\n", + " OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-3.5-turbo-1106\", api_key=api_key, org_id=org_id),\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", + "\n", + " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", + " service_id = \"aoai_chat_completion\"\n", + " kernel.add_service(\n", + " AzureChatCompletion(service_id=service_id, deployment_name=deployment, endpoint=endpoint, api_key=api_key),\n", " )" ] }, @@ -100,8 +124,8 @@ "from semantic_kernel.core_plugins.text_plugin import TextPlugin\n", "\n", "plugins_directory = \"../../samples/plugins/\"\n", - "summarize_plugin = kernel.import_semantic_plugin_from_directory(plugins_directory, \"SummarizePlugin\")\n", - "writer_plugin = kernel.import_semantic_plugin_from_directory(plugins_directory, \"WriterPlugin\")\n", + "summarize_plugin = kernel.import_plugin_from_prompt_directory(service_id, plugins_directory, \"SummarizePlugin\")\n", + "writer_plugin = kernel.import_plugin_from_prompt_directory(service_id, plugins_directory, \"WriterPlugin\")\n", "text_plugin = kernel.import_plugin(TextPlugin(), \"TextPlugin\")" ] }, @@ -136,9 +160,9 @@ "metadata": {}, "outputs": [], "source": [ - "from semantic_kernel.planning.basic_planner import BasicPlanner\n", + "from semantic_kernel.planners.basic_planner import BasicPlanner\n", "\n", - "planner = BasicPlanner()" + "planner = BasicPlanner(service_id)" ] }, { @@ -186,18 +210,31 @@ "metadata": {}, "outputs": [], "source": [ - "sk_prompt = \"\"\"\n", + "prompt = \"\"\"\n", "{{$input}}\n", "\n", "Rewrite the above in the style of Shakespeare.\n", "\"\"\"\n", - "shakespeareFunction = kernel.create_semantic_function(\n", - " prompt_template=sk_prompt,\n", - " function_name=\"shakespeare\",\n", - " plugin_name=\"ShakespearePlugin\",\n", + "\n", + "exec_settings = sk_oai.OpenAIChatPromptExecutionSettings(\n", + " service_id=service_id,\n", " max_tokens=2000,\n", " temperature=0.8,\n", - ")" + ")\n", + "\n", + "prompt_template_config = sk.PromptTemplateConfig(\n", + " template=prompt,\n", + " function_name=\"shakespeare\",\n", + " plugin_name=\"ShakespearePlugin\",\n", + " name=\"planner\",\n", + " template_format=\"semantic-kernel\",\n", + " input_variables=[\n", + " InputVariable(name=\"input\", description=\"The user input\", is_required=True),\n", + " ],\n", + " execution_settings=exec_settings,\n", + ")\n", + "\n", + "shakespeare_plugin = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config)" ] }, { @@ -220,7 +257,33 @@ "She likes Shakespeare so write using his style. She speaks French so write it in French.\n", "Convert the text to uppercase.\"\"\"\n", "\n", - "new_plan = await planner.create_plan(ask, kernel)" + "# TODO: we cannot add an updated ask to a current plan because the underlying plugins already exist\n", + "kernel = sk.Kernel()\n", + "service_id = None\n", + "if selectedService == Service.OpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", + "\n", + " api_key, org_id = sk.openai_settings_from_dot_env()\n", + " service_id = \"oai_chat_gpt\"\n", + " kernel.add_service(\n", + " OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-3.5-turbo-1106\", api_key=api_key, org_id=org_id),\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", + "\n", + " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", + " service_id = \"aoai_chat_completion\"\n", + " kernel.add_service(\n", + " AzureChatCompletion(service_id=service_id, deployment_name=deployment, endpoint=endpoint, api_key=api_key),\n", + " )\n", + "\n", + "plugins_directory = \"../../samples/plugins/\"\n", + "summarize_plugin = kernel.import_plugin_from_prompt_directory(service_id, plugins_directory, \"SummarizePlugin\")\n", + "writer_plugin = kernel.import_plugin_from_prompt_directory(service_id, plugins_directory, \"WriterPlugin\")\n", + "text_plugin = kernel.import_plugin(TextPlugin(), \"TextPlugin\")\n", + "\n", + "planner = BasicPlanner(service_id)\n", + "new_plan = await planner.create_plan(ask, kernel, prompt)" ] }, { @@ -667,4 +730,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/python/notebooks/06-memory-and-embeddings.ipynb b/python/notebooks/06-memory-and-embeddings.ipynb index 81e3cc0124e0..2b378dcc39f0 100644 --- a/python/notebooks/06-memory-and-embeddings.ipynb +++ b/python/notebooks/06-memory-and-embeddings.ipynb @@ -1,545 +1,731 @@ { - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "68e1c158", - "metadata": {}, - "source": [ - "# Building Semantic Memory with Embeddings\n", - "\n", - "So far, we've mostly been treating the kernel as a stateless orchestration engine.\n", - "We send text into a model API and receive text out.\n", - "\n", - "In a [previous notebook](04-context-variables-chat.ipynb), we used `context variables` to pass in additional\n", - "text into prompts to enrich them with more context. This allowed us to create a basic chat experience.\n", - "\n", - "However, if you solely relied on context variables, you would quickly realize that eventually your prompt\n", - "would grow so large that you would run into the model's token limit. What we need is a way to persist state\n", - "and build both short-term and long-term memory to empower even more intelligent applications.\n", - "\n", - "To do this, we dive into the key concept of `Semantic Memory` in the Semantic Kernel.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a77bdf89", - "metadata": {}, - "outputs": [], - "source": [ - "!python -m pip install semantic-kernel==0.5.1.dev0" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "508ad44f", - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Tuple\n", - "\n", - "import semantic_kernel as sk\n", - "from semantic_kernel.connectors.ai.open_ai import (\n", - " OpenAIChatCompletion,\n", - " OpenAITextEmbedding,\n", - " AzureChatCompletion,\n", - " AzureTextEmbedding,\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "d8ddffc1", - "metadata": {}, - "source": [ - "In order to use memory, we need to instantiate the Kernel with a Memory Storage\n", - "and an Embedding service. In this example, we make use of the `VolatileMemoryStore` which can be thought of as a temporary in-memory storage. This memory is not written to disk and is only available during the app session.\n", - "\n", - "When developing your app you will have the option to plug in persistent storage like Azure AI Search, Azure Cosmos Db, PostgreSQL, SQLite, etc. Semantic Memory allows also to index external data sources, without duplicating all the information as you will see further down in this notebook.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8f8dcbc6", - "metadata": {}, - "outputs": [], - "source": [ - "kernel = sk.Kernel()\n", - "\n", - "useAzureOpenAI = False\n", - "\n", - "# Configure AI service used by the kernel\n", - "if useAzureOpenAI:\n", - " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", - " # next line assumes chat deployment name is \"turbo\", adjust the deployment name to the value of your chat model if needed\n", - " azure_chat_service = AzureChatCompletion(deployment_name=\"turbo\", endpoint=endpoint, api_key=api_key)\n", - " # next line assumes embeddings deployment name is \"text-embedding\", adjust the deployment name to the value of your chat model if needed\n", - " azure_text_embedding = AzureTextEmbedding(deployment_name=\"text-embedding\", endpoint=endpoint, api_key=api_key)\n", - " kernel.add_chat_service(\"chat_completion\", azure_chat_service)\n", - " kernel.add_text_embedding_generation_service(\"ada\", azure_text_embedding)\n", - "else:\n", - " api_key, org_id = sk.openai_settings_from_dot_env()\n", - " oai_chat_service = OpenAIChatCompletion(ai_model_id=\"gpt-3.5-turbo\", api_key=api_key, org_id=org_id)\n", - " oai_text_embedding = OpenAITextEmbedding(ai_model_id=\"text-embedding-ada-002\", api_key=api_key, org_id=org_id)\n", - " kernel.add_chat_service(\"chat-gpt\", oai_chat_service)\n", - " kernel.add_text_embedding_generation_service(\"ada\", oai_text_embedding)\n", - "\n", - "kernel.register_memory_store(memory_store=sk.memory.VolatileMemoryStore())\n", - "kernel.import_plugin(sk.core_plugins.TextMemoryPlugin(), \"text_memory\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "e7fefb6a", - "metadata": {}, - "source": [ - "At its core, Semantic Memory is a set of data structures that allow you to store the meaning of text that come from different data sources, and optionally to store the source text too. These texts can be from the web, e-mail providers, chats, a database, or from your local directory, and are hooked up to the Semantic Kernel through data source connectors.\n", - "\n", - "The texts are embedded or compressed into a vector of floats representing mathematically the texts' contents and meaning. You can read more about embeddings [here](https://aka.ms/sk/embeddings).\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "2a7e7ca4", - "metadata": {}, - "source": [ - "### Manually adding memories\n", - "\n", - "Let's create some initial memories \"About Me\". We can add memories to our `VolatileMemoryStore` by using `SaveInformationAsync`\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d096504c", - "metadata": {}, - "outputs": [], - "source": [ - "async def populate_memory(kernel: sk.Kernel) -> None:\n", - " # Add some documents to the semantic memory\n", - " await kernel.memory.save_information(collection=\"aboutMe\", id=\"info1\", text=\"My name is Andrea\")\n", - " await kernel.memory.save_information(collection=\"aboutMe\", id=\"info2\", text=\"I currently work as a tour guide\")\n", - " await kernel.memory.save_information(\n", - " collection=\"aboutMe\", id=\"info3\", text=\"I've been living in Seattle since 2005\"\n", - " )\n", - " await kernel.memory.save_information(\n", - " collection=\"aboutMe\",\n", - " id=\"info4\",\n", - " text=\"I visited France and Italy five times since 2015\",\n", - " )\n", - " await kernel.memory.save_information(collection=\"aboutMe\", id=\"info5\", text=\"My family is from New York\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "await populate_memory(kernel)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "2calf857", - "metadata": {}, - "source": [ - "Let's try searching the memory:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "628c843e", - "metadata": {}, - "outputs": [], - "source": [ - "async def search_memory_examples(kernel: sk.Kernel) -> None:\n", - " questions = [\n", - " \"what's my name\",\n", - " \"where do I live?\",\n", - " \"where's my family from?\",\n", - " \"where have I traveled?\",\n", - " \"what do I do for work\",\n", - " ]\n", - "\n", - " for question in questions:\n", - " print(f\"Question: {question}\")\n", - " result = await kernel.memory.search(\"aboutMe\", question)\n", - " print(f\"Answer: {result[0].text}\\n\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "await search_memory_examples(kernel)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "e70c2b22", - "metadata": {}, - "source": [ - "Let's now revisit the our chat sample from the [previous notebook](04-context-variables-chat.ipynb).\n", - "If you remember, we used context variables to fill the prompt with a `history` that continuously got populated as we chatted with the bot. Let's add also memory to it!\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "1ed54a32", - "metadata": {}, - "source": [ - "This is done by using the `TextMemoryPlugin` which exposes the `recall` native function.\n", - "\n", - "`recall` takes an input ask and performs a similarity search on the contents that have\n", - "been embedded in the Memory Store and returns the most relevant memory.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fb8549b2", - "metadata": {}, - "outputs": [], - "source": [ - "async def setup_chat_with_memory(\n", - " kernel: sk.Kernel,\n", - ") -> Tuple[sk.KernelFunction, sk.KernelContext]:\n", - " sk_prompt = \"\"\"\n", - " ChatBot can have a conversation with you about any topic.\n", - " It can give explicit instructions or say 'I don't know' if\n", - " it does not have an answer.\n", - "\n", - " Information about me, from previous conversations:\n", - " - {{$fact1}} {{recall $fact1}}\n", - " - {{$fact2}} {{recall $fact2}}\n", - " - {{$fact3}} {{recall $fact3}}\n", - " - {{$fact4}} {{recall $fact4}}\n", - " - {{$fact5}} {{recall $fact5}}\n", - "\n", - " Chat:\n", - " {{$chat_history}}\n", - " User: {{$user_input}}\n", - " ChatBot: \"\"\".strip()\n", - "\n", - " chat_func = kernel.create_semantic_function(sk_prompt, max_tokens=200, temperature=0.8)\n", - "\n", - " context = kernel.create_new_context()\n", - " context[\"fact1\"] = \"what is my name?\"\n", - " context[\"fact2\"] = \"where do I live?\"\n", - " context[\"fact3\"] = \"where's my family from?\"\n", - " context[\"fact4\"] = \"where have I traveled?\"\n", - " context[\"fact5\"] = \"what do I do for work?\"\n", - "\n", - " context[sk.core_plugins.TextMemoryPlugin.COLLECTION_PARAM] = \"aboutMe\"\n", - " context[sk.core_plugins.TextMemoryPlugin.RELEVANCE_PARAM] = \"0.8\"\n", - "\n", - " context[\"chat_history\"] = \"\"\n", - "\n", - " return chat_func, context" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "1ac62457", - "metadata": {}, - "source": [ - "The `RelevanceParam` is used in memory search and is a measure of the relevance score from 0.0 to 1.0, where 1.0 means a perfect match. We encourage users to experiment with different values.\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "645b55a1", - "metadata": {}, - "source": [ - "Now that we've included our memories, let's chat!\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "75267a2f", - "metadata": {}, - "outputs": [], - "source": [ - "async def chat(kernel: sk.Kernel, chat_func: sk.KernelFunction, context: sk.KernelContext) -> bool:\n", - " try:\n", - " user_input = input(\"User:> \")\n", - " context[\"user_input\"] = user_input\n", - " print(f\"User:> {user_input}\")\n", - " except KeyboardInterrupt:\n", - " print(\"\\n\\nExiting chat...\")\n", - " return False\n", - " except EOFError:\n", - " print(\"\\n\\nExiting chat...\")\n", - " return False\n", - "\n", - " if user_input == \"exit\":\n", - " print(\"\\n\\nExiting chat...\")\n", - " return False\n", - "\n", - " answer = await kernel.run(chat_func, input_vars=context.variables)\n", - " context[\"chat_history\"] += f\"\\nUser:> {user_input}\\nChatBot:> {answer}\\n\"\n", - "\n", - " print(f\"ChatBot:> {answer}\")\n", - " return True" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e3875a34", - "metadata": {}, - "outputs": [], - "source": [ - "print(\"Populating memory...\")\n", - "await populate_memory(kernel)\n", - "\n", - "print(\"Asking questions... (manually)\")\n", - "await search_memory_examples(kernel)\n", - "\n", - "print(\"Setting up a chat (with memory!)\")\n", - "chat_func, context = await setup_chat_with_memory(kernel)\n", - "\n", - "print(\"Begin chatting (type 'exit' to exit):\\n\")\n", - "chatting = True\n", - "while chatting:\n", - " chatting = await chat(kernel, chat_func, context)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "0a51542b", - "metadata": {}, - "source": [ - "### Adding documents to your memory\n", - "\n", - "Many times in your applications you'll want to bring in external documents into your memory. Let's see how we can do this using our VolatileMemoryStore.\n", - "\n", - "Let's first get some data using some of the links in the Semantic Kernel repo.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c3d5a1b9", - "metadata": {}, - "outputs": [], - "source": [ - "github_files = {}\n", - "github_files[\n", - " \"https://github.com/microsoft/semantic-kernel/blob/main/README.md\"\n", - "] = \"README: Installation, getting started, and how to contribute\"\n", - "github_files[\n", - " \"https://github.com/microsoft/semantic-kernel/blob/main/dotnet/notebooks/02-running-prompts-from-file.ipynb\"\n", - "] = \"Jupyter notebook describing how to pass prompts from a file to a semantic plugin or function\"\n", - "github_files[\n", - " \"https://github.com/microsoft/semantic-kernel/blob/main/dotnet/notebooks/00-getting-started.ipynb\"\n", - "] = \"Jupyter notebook describing how to get started with the Semantic Kernel\"\n", - "github_files[\n", - " \"https://github.com/microsoft/semantic-kernel/tree/main/samples/plugins/ChatPlugin/ChatGPT\"\n", - "] = \"Sample demonstrating how to create a chat plugin interfacing with ChatGPT\"\n", - "github_files[\n", - " \"https://github.com/microsoft/semantic-kernel/blob/main/dotnet/src/SemanticKernel/Memory/Volatile/VolatileMemoryStore.cs\"\n", - "] = \"C# class that defines a volatile embedding store\"" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "75f3ea5e", - "metadata": {}, - "source": [ - "Now let's add these files to our VolatileMemoryStore using `SaveReferenceAsync`. We'll separate these memories from the chat memories by putting them in a different collection.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "170e7142", - "metadata": {}, - "outputs": [], - "source": [ - "memory_collection_name = \"SKGitHub\"\n", - "print(\"Adding some GitHub file URLs and their descriptions to a volatile Semantic Memory.\")\n", - "i = 0\n", - "for entry, value in github_files.items():\n", - " await kernel.memory.save_reference(\n", - " collection=memory_collection_name,\n", - " description=value,\n", - " text=value,\n", - " external_id=entry,\n", - " external_source_name=\"GitHub\",\n", - " )\n", - " i += 1\n", - " print(\" URL {} saved\".format(i))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "143911c3", - "metadata": {}, - "outputs": [], - "source": [ - "ask = \"I love Jupyter notebooks, how should I get started?\"\n", - "print(\"===========================\\n\" + \"Query: \" + ask + \"\\n\")\n", - "\n", - "memories = await kernel.memory.search(memory_collection_name, ask, limit=5, min_relevance_score=0.77)\n", - "\n", - "i = 0\n", - "for memory in memories:\n", - " i += 1\n", - " print(f\"Result {i}:\")\n", - " print(\" URL: : \" + memory.id)\n", - " print(\" Title : \" + memory.description)\n", - " print(\" Relevance: \" + str(memory.relevance))\n", - " print()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "59294dac", - "metadata": {}, - "source": [ - "Now you might be wondering what happens if you have so much data that it doesn't fit into your RAM? That's where you want to make use of an external Vector Database made specifically for storing and retrieving embeddings. Fortunately, semantic kernel makes this easy thanks to an extensive list of available connectors. In the following section, we will connect to an existing Azure AI Search service that we will use as an external Vector Database to store and retrieve embeddings.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from semantic_kernel.connectors.memory.azure_cognitive_search import (\n", - " AzureCognitiveSearchMemoryStore,\n", - ")\n", - "\n", - "azure_ai_search_api_key, azure_ai_search_url = sk.azure_aisearch_settings_from_dot_env()\n", - "\n", - "# text-embedding-ada-002 uses a 1536-dimensional embedding vector\n", - "kernel.register_memory_store(\n", - " memory_store=AzureCognitiveSearchMemoryStore(\n", - " vector_size=1536,\n", - " search_endpoint=azure_ai_search_url,\n", - " admin_key=azure_ai_search_api_key,\n", - " )\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The implementation of Semantic Kernel allows to easily swap memory store for another. Here, we will re-use the functions we initially created for `VolatileMemoryStore` with our new external Vector Store leveraging Azure AI Search\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "await populate_memory(kernel)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can see that our function created an \"About Me\" index and that our five pieces of information have been indexed (note that it can take a few minutes for the UI to reflect the document count and storage size).\n" - ] - }, - { - "attachments": { - "image.png": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAhoAAAE6CAYAAABQ/fuNAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAFiUAABYlAUlSJPAAACvLSURBVHhe7d0JfBXVwf7xB7HGqgF5BRVIVYJUAraCKCDFWMFYNFEwgAraPyBWxAW0IrSyVCVYWVrBhUVBaAu4sNQlvFDCItFqqEboWyG4JAGNgiQKJFTBQu9/ztxzk5s9wRwSwu/7+QyZObPcuffOvfPMOWcuDQIeAQAAOHCC/QsAAFDjCBoAAMAZggYAAHCmloNGpubc0FbtYhI152NbBAAA6o0SQYMTPwAAqDk0nQAAAGcIGvXA9u3b/QEAgLqGoFEPrFu7VuvXrbNTAADUHQSNemD9+nVa/8Z6OwUAQN1R5aCRMqqok+jBrGQlDY1XlwtNWVt1iR+spBU5dsky5KVp/qibFds5uHy7Tt01cNR8bdxj55fnYKaSJw1WQmi9CzsrYehEJWcdtAsY+Uq+v7M/v++sTFsWJn2yEsy6N0xXxmFbFhLar04l9ivPzi/moLJem6gh13dXR7OsGTqb5Zcqyy5RG/Ly8rR//37l79vnjwMAUJdUu0bjwEezNfD6UXo9P0pX9+6n2AsbqSArTYtHxWvk8ny7VJht3vK/GKypKzarIKKDYhP7qW+XKOWsn6whg6ZrU8mTf8i2RRpyVbxGL0xTbrOuSvDWS+jSSJ+/vUijr4/X+DWhx2qkhAmjFdtQynhmipYVCy+ZmvP4fC8ItNOw39+nGG+ZkIL1E5Xwc7NfGVKbK9TX235s9HfavMLbr1/crPnF7rrJ94JWTyWMWaSNu85UF/Mc4rsqOqLAWz7Ne5Tas2bNGsVecYUuvzxW69attaUAANQR5v86KfJJYHafCwIxbW8IzP7IFlmrHzDl3tD+Z4EHk3NtaVDu4juC83o+Hthky3yH3ghM7G7W+1lgxMuf2ULrwJbA87f9LLheyccLW6/kYx341xOBxPbevO6PBjYcsoWez/4yyN/W5Y+8YUsCgfyXg/uVOGOLLbFyFgYGm220vyEw/V8HbGFQbvIDgcvNPg1aGCjc4/ceD8SZsltmBTLDHtPI/fs7gUw77tprr74a+NXtQ4sNV13VM/Deu+8GNm7cGIjzxkvON+sAAFBbql2jET34aU2Jb2qngpoOGK4hLb2RzzdqY1g7Qt4L87U4T4rsP0kz+kfZUiuinYZMu8+viSgptF700NKPFXHhfRqZ2Mhb6HWl/t0WeqJuTdK4LmbdGcHfAClI0cQnNkht71TS3e2CC1mp82Zo42Ep9ncLNPLCCFsa1DT+Pg3p5I1sTFHqrmCZcnNlGoYifxyj6BL727RbV0Xbcdeuu/56XRufoB07dqh3nxs0bNidevz3j6tDx466+OKL9Xtv3JRdf31vffbZZ4pPSPDXqUl79uzRvffcrb1799oS6YXFi/Xiiy/aqbKXAQAcn6oZNBqpa+cOdjxcB8X8xPzNUW5YV4209DTv3yj1631FsKCkJh3VsY0dL5SvDW+b9drput5lPZbU8ScdvX/ztWlLeKNFlAaONc0jWzVj0nwlz5mu5D2lm0ykzdqYmi81TFBfE1hKiVLHDiYUpWnz5mCJvMfr4m2j4OUpGv/aVh0sr7nnKOjTp4+emTlTixb+RXlffaVOl1yihg0b6sQTT/THTdmLL76gp55+Wr1797Fr1awGJ5xgasLslFViusxlAADHnWoGjSg1a25HS4jwT+b5yv/Gn/Rk6nP/px1aK/rHfkEV5Spvp/nrBYbrbafLEkOXCRv8JUtpY2svNk7W6HmZihmepJFt7bxCucr93PtzOFkjbWfWksPAeSU6tra8RRN/F+c9+0wtG5Oojp2v0pBJi7S5lvpenn9+G82dO09PP/Wktm3LsKXSli1b9IwXMJ59bq6/jAtNmjTRk08+5f8NGTBwoG4eMMBOlb0MAOD4VO2mk+o7SRGn2NFqiQp2uqxgiG3T2C5bJOqcloq04zq5eLNIMU3aKa6MbYYPHcJCVVT/p7Q6fYVmjEhQTMMcbVw4UQMv764hC7faJY6yBg307bff6sc/vsBvSjFDTEyM9u8v8GY1sAsBAFDL/J4ahSrrDFq6PCTUWfTBlbbAbKufKbs2MD3DFpWyJTD9upLbzQ0sGmTLqtvL8uslgWGmk+egRwPjzPNoX9b+vhGYeJmZNzaw2pZU34HAZxseD3ZKbfuzwMQ0W3wUrVu3NvDb34wJpKWlBX5xdZw/mA6hY0aPDqxfv94uBQBA7XJYo9FaXbqYvg6ZSkkOdXYoIf11pZT6z9uaqt1Frb2/W7VhfQW/zVFKjhbfP06ph9tp2G/Ga+K4IYo+vFUzHppd4ncuOirGdPY8vFqpR/wbVxGKih2jpDvMfuZp0/tH/wbX1A2p/u9mPPHHP2jOnGf9wYx//fVXevPNVLsUAAC1y2nTSYfe/RTj/c1aMFZJa0p0aMhL1uj75iurjLtOOtw40O98ufmJEZr6dumOEOYHw8aPMr+PUSTr2RFK2mjuVJkQ7JfRaYxGD2gqfTBdDz4bHgQaqW//W7w4k69lE+7V4mI//hWU9/Zs3fl4ip3yplOTlfp56eXyC4JlUWc18/8eLV5AVGrqBv3onHP0578s1LnnnecPf/rzX/yyN9avpyMmAKBOaGCqNey4x/w38fGasa2dRr62XMPC+hOaXwYduaJ0eUhwvpTwxDZN6WULPRnP3qyBT2yWOSVHRnfVFeaOjl2btGFjphr3vk+xW6drcRmPV7BqlOLvT5aJGRFNO6hL7Plqplxlvr1Jm3flSxfep+QldwZvLd02W337TVdGk1s0+43xRbfM7knWyPhRSsn3tv/X4tvPmJWovk8G+1eE9iviYI42bdykrDxvbxOf0tZJcf78LG/ZBG/ZyLM7qGM3ux+padpslvP2Y9mLd5a4s8Wtjz/+WNnZWbr66l/YkuJWr/6b3xk0Ovpo3XgLAEDZnHcGjbnjRaXMG6OEi5rqu6w0JS9fqg27mum6x1bode9EXl5dQGSvaUpZMU0Du7VW5Debleqtt2x5mnIaRithxFNKXmhDxuGtmjPe/Lx4U/X93cjiv8vRJEFJ91/hL1OyCSVm+HKl2v3SjuB+LVu1VflNOmrghBeV+mgwZBjNrhzo70fEv8P2w1uu76gFSj3KIcNo06ZNuSHDMPMIGQCAuqBEjQYAAEDNOQq3twIAgOMVQQMAADhD0AAAAM4QNAAAgDMEDQAA4AxBAwAAOEPQAAAAzhA0AACAMwQNAADgDEEDAAA4Q9AAAADOEDQAAIAzBA0AAOAMQQMAADhD0AAAAM4QNAAAgDMEDQAA4AxBAwAAOEPQAAAAzhA0AACAMwQNAADgDEEDAAA4Q9AAAADOEDQAAIAzBA0AAOAMQQMAADhD0AAAAM4QNAAAgDMEDQAA4AxBAwAAOEPQAAAAzhA0AACAMwQNAADgDEEDAAA4Q9AAAADOEDQAAIAzBA0AAOAMQQMAADhD0AAAAM4QNAAAgDMEDQAA4AxBAwAAONMg4LHjtS5jxYe6Ry21Nv40W4JjzfIFW9T3Qzvhubtfez3dyU54NqXu0LrIM3R3x9N0si1DJQ58pNXL3tKu/9hpT8OW3ZUY92P90E4DQF1FjQZqVOLgdspKPFU9TmqgVqWOrkM6+YwfKHvtp2o3ZbuW59aZjFu3FXylfRExumbwbfp/ZoiP0Un7vtJ+OxsA6jJqNFCDAtq08hP1TT9BScOidfKKrVr3k+I1GkEBZb+Vpb4p/9UtvzpfD0Q1sOXw5b6jJSsy9K2drJLTvCDS7zI1s5MAUFcQNFBztmxXu6UBJf26lRIjg80oZQeNoL3p2eqW3EBPjz9PPahbK86EjQ3SzysLD1VdDgBqCV/vvmzNGzBA8z6xkyWlPKROD621E8Wte6hjufMqk/3cAA18LttO1bDDB3XwsB2vzMGDduT72KdJr36ja/ud54cMI3Fw+SHDOL3TeXr6gm807vV9tsQd81p3uth7rwbM9d7t78+8787euzouN22J/rw6w04BQMXqddAInlwe0jo77UKPxzYp/bGedqruOJg6UQPvW6qcysLGp0s18uaJSv3GTh+pLXs075TTdXf76jSDNFCPuNPV5P09VXqPCsNC4VBBOCxmrebOku562XuvXrhdrWzpMcPUWix9R7kl/9rZ1ZehlQue15+LDUv09pFvsGxmPxes1FY7CeD4VI+Dhjm5tFKvXiu1NsUWHUcirkzSgvg0jawobJiQMTJNCc8nKfYUW3aENmUd1NmtT6v+SfyM09Tt1IN6J8dOV+KC4cuV/r4XGMwwuZVmjq9qDUUrtTrfjsLTXJeEOpf6HUyj9PmK57UkbbedDwA14ygHjX2atzBb95Qz/CHjsLIzvixzXnDYqU12S5VKWatVvXpq0pXXaNX60k0b4VfHA5/LsqUhazW28Kq54hqR8Cp0f5sPrQ02p5R1xf3JXA0MbbeMKvxiV+yh5hh/nbDtmGacKtbSRPaaVn7YKAwZ0xTXxJZ9D9lfHdLFLRrZqepopJgzD2nnl3ayOuJ6qteHWYWvY/mv3yit0ko96JWb9yr4Ps3VvAFm2dBrGf6eF3/fit7Pks0lWXYbpdc55jS7TP3jY6Rt6UU1EH6NRKjGo4KaibKW27ZSf/Y7tO7Ue155UYAJr01xUIsCoM6ptzUa69avVK8rewZPRqu8k0r4ScA7Wfdb11NL7ZXxJM3VzMLffjD9NUZJk+1V8/s9tXbMSjuvClaN0torg+suHa6wK27vRHbjWvU01fdmuxOlubO2+XN8JfZpqkYFT2rn365JZjvPmxOnt2/Pe89r8mPqEVyrUmWGjRoOGbUl+7m5fpj0X4sKXr/F709TL12jqV754l/ZOpdV3us50SxrXssS7/nL3nt+YzCAmFDyoKYFy8PX93w4a67dhvd4vbbZ9+gY1qy1Wp62Uzv8w9ILBCty1DLe1nh0ld4rs7mmnOXaXuPfhvtDW3PSv+uZ3rK79bY3T13tsn4tCk0rQH13lINGYw29tZWeLmd4IKahWsWcVea84NBcHe2WKuRdxc5ddY16xpmJnurpnQTWri+87g2erG8raqtv9avHdNcFdiLlOc3U3brdX9foqUmTr7HjVeCd2CfZdVt5QecCe8UdPCnerqGh6ns/QLS1E6X3qceV1+jD7GBNi79/2V5Yes7bt1ZF26+q8LCRleUmZLQ640S9/0W+naqOfGXsPlHNz7KTlfhwVmJh7cJYPWb7x1T8+pUp/L0o+Z6f7x0zF2Qr2wunrc7ztpjtjdtZ4S4Y/ljhNszjlbfcseNMnW4rpXLTPlBuiwvVLXQrS9vz1Gz/3lJBo6rL+bal6xPF6JLQYe8Hm/3aS60GUK/VyxqN7PVr9WHoStfT47a7pXVrw04CbRVddGFaWqtW1e9rUJbzi2/nglbRdqxsq8YUVdF3MrUohSeuVhp6WyvNnCVNPcKOp6GwMXiQm5qMjtER2pW5v/on2q/26+1/R+iyKDtdicI+Gl74+3DWc8WakMp//argw2fUL7TuxYma+eE2ZZmV4x7T0h5rg/MqumPFe29DWfXYtVt78yPV+Aw7+cU7tonDDKY2o5xQUNXljP3hTSfJ+mR/gfZ9ZecBqJfqYdDI1rp12/wmjMKTzo3P6EPvRDK3sFOoPYkUylJW2M9mlzxBZW+v8umqQiWvsLOzw5pOvPDj3xVhq+j9ofAOCXPFnq1evbL14BHeSmuYsJH6hqPmkvZNNPSbvXpmS3V+liWgdSl7tefiJlVuCirkBYCpvVZqbmGfiYpevyrwXpti63pDYc3Ur17wp/3A8T1e/zrPr3GIUhtbO/HDtglFnUX9oX9RzUWYqi7na3FZiWVv0zWhGg4A9VL9Cxq2GjzUVh8alg5vazuFtlKPHt7480VXp36zhh0PdjAMDyXm7pXwQHBk/GaU8L4ifvOOHff3Kbw/R3HZzz2kma1u16THbi/d36S6Gtq/Na6xxvY+Rf+7dLuWF9iiSuxN3657PjxFSdc1tiXV0+OxaWo16yHv9aj49auU349nlMZWcndSRc0oxzzTeTNtv86/IvjDX81aR3llGyrtrFnV5XymWeWLd7Ty+3+cABxD6l3QMJ1AL+jRs9SVbPiJ3lyhTm1VVFU+VrcX9dEwfTJevlvZhdXwa9WzOn00ymM6JZrbMW+02x0v3V7YR6P0Pvn7ZU58ppPjrFa2ycT0FzHbcPvbIEes/bla1PmQRs3I1LwvK/oBD/MT5Jnq8dohDR167vf4VdCe3mvoBQzv9cgu7/WrkpLvuTfYZpJid7KMkabWxu9wmDtCzC9/lvxrZx+Z4N0ghU0eH5yua8JrIsxjdD1Nn6wIW6asH+mqaDlvXvsW4XedxOia+BjtTwtb9nv9HgiAY0EDfoIcNcsLEe/u0O3J30htTtfY2P9Rt6iTdbIJEwcOaFPm15q3cq/+97+naNqQc5XYjP/npEzmltE3Duvy/t11ti0q05dvacmbDfkJcgB1FkEDbhzar+V/y9OiLd/o7T0B7TJlJzRQjxan6NpuTflv4iu1W5uSV+tfed/Z6XKccJL+56KrlXCRuX0UAOqeOhU0AABA/VIP7zoBAAB1BUEDAAA4Q9AAAADOEDQAAIAzBA0AAOAMQQMAADhD0AAAAM4QNAAAgDMEDQAA4EyN/zJoenq6HQMAAMc7foIcAAA4Q9MJAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcKbGfxn0zTdTteSll1RQkG9LAKBuiYxspP433aTLL4+1JQBcqfGgMeKeu3X/rx9Qq+hoWwIAdUt2Vpae+OMf9OTTz9gSAK7UeNOJqckgZACoy8x3FLWuwNFBHw0AAOAMQQMAADhD0AAAAM7UmaBx4MvduuePW3Vx0jZv8P7+MVvPfHjQzgUAAMeiOhE09qZnq9sL3ypx0AV6f1xbb2in94c21oE123VPutuwkfrkLzXkybf98R1LxhSO16y39cSgMXrpUzsJAMBxovaDxn+/UlKKlHTnuepxRkNb6Gn8P3pg+Nk6PSVHiw/YsiO2Qy/9xgsUg8KG3yz1SqXYEX/R/BHdgosVE1zniTQ7CQAAqq32g8a/8rWp/Rm69mQ7He6Exrqn83+14r3vbMH30UK9Jnqh4k92eLyfzrVzAACAG7UeNDJyvlNMi0Z2qrSzG5+oXftqImiUzTSdjF5i6jbCmaaOcVq1U/q/WUW1H8HyUK1IWFPIp0s1etAMvWSaXrx55daCfGGWK16jElo31V8gqLwmnFC539xTch88ReVh2/cE11taVKvjzwur5Qlb1vCXD23HSVMSAOB4UetBIybqJL2/Y5+dKi3760Ne2DjJTh0t3XT/n5LUq7n00+Gh2g9zYp4lmWlTIzLxUqWPDw8I7yldd/nz7u9qi4r5Qqteke62NSpDWryqh81J/JxL1am5t25hONmhtHSpV5+ymnM86bOU3jm4jYcTpFUzi0JQuoYH983su17VM+EBKv1d6a6ieQ8PmllsunDZtBl6OP1SPRzaT80qI4gBAFA1td908pPG6vZRnpYX2Olw//1az6SfqL6X1ETQ8E704+1V+pFcqae9olXqrcRQiPADwhfaWVijcIkS+lfUGNNCve4qaq6J7dNbzbyTf6pX0rVTC+38zJ7MP33XCwyXqus5wclSOg0vDDLndrlUzXZ+boOGF44K+5oEt1lMp966yd+mnVdiOtfbjgk5L73ynn7aJ2w/O19i5wEAUH21HzROOFU9ov6joc9ka3HOIVtob3edtlsH4loosaz+G9VWoo9GmR1AK7HT1ASEwoppWvlCX3xh51XXOS3U3I6awGBqHExg2LHxXS8EXFp4oq9Q2DaM8CaPh5OPdMdsc1Hoec56z8toX9gwAwBA9dRy0Diod5Z/qmdOOUsZQxtp55rs4G9oeEP80m/VbVAbPd0pwi5bB3QKNU0UDWU3k1TBp19oZ/OWwUBxTj8ltHhXaZ8Gm006dalSzCjGhIyHd/Yu3K+HE0rUaFRZiUBmBjrOAgCOUC0GjWDISFJTLUpsorObnaEHBrcJ/oaGN6y9+1wNPCvsdtfa1vVS/TR91ve43dX00Qg11+zQSzNfLVZzEdu5hdJfeUXpLUJNGtWzY+cXauYFFzvlBZYjqdEwzSjh/T6KK+o4G3brbxmdWQEACKmloFE8ZJxuS+uWc3VTn0vC7jrppvsn9tbO8GaFEndrVKyFejV/1647TqtaDNeU8D4dXpBpnv6emnc+giYdT+yI4WqePM5uf6a+aHFkNRrn9p8c7Kgaeo7ewG+JAACOVIOAx47XCHNiMtXt5TsWQkZtMLfOvqtOfxqpWFsCwJ3Kv6sA1ISjXqOx960cQkYZdix5VTsT+hAyAAD1ylEPGqd3b60VhIwifh+HX/q/XXF3hbfHAgBw7Knlu05g7jiZwp0dAIB6iqABAACcqfGgERnZSNlZWXYKAOoe8x1lvqsAuFfjd528+Waqlrz0kgoK8m0JANQtJmT0v+kmXX453a8B12o8aAAAAITQRwMAADhD0AAAAM4QNAAAgDMEDQAA4AxBAwAAOEPQAAAAzhA0AACAMwQNAADgDEEDAAA4Q9AAAADOEDQAAIAzBA0AAOAMQQMAADhD0AAAAM4QNAAAgDMEDQAA4AxBAwAAOEPQAAAAzhA0AACAMwQNAADgDEEDAAA4Q9AAAADOEDQAAIAzBA0AAOAMQQMAADhD0AAA4KjKUcq0cZqzPt9O1291IGhkas4NbdUuJl5T022RlTUr0SsfpRQ7DdSe0HFaNHS58maNnpemvMN2kWNenlIm3KzYCRV/4g5mJStpaLy6XGhfi87xGv1anp3rzuZ5g5UweLay7DTqt4KN8zX65u7qaD9vHS+fqFR/TtWO07qg/OeQo42vLtWyjVv9qfquDtVoZGr+hMnaXG++tFEvXTVeqevf0rI503TnVd7J74nBir15tjLqxXG7T1n/2qy8b+xkGQpWjdN114/Ssq9jdNvYaZoyeZrGDYmRcvfZJdzJzUhTlvuHQV3w+SKNGDpZaaf205Qlbyl1yRyNuDLSzqz8OK0TKnwOXTXuzW1a/ZuudrqeC9S6TwKz+1wQiLnjnsCIyy4IJM7YYssDgcyZNwRi2j4QWO1P7Qts+OOgQPyl3rJtveHSGwIT1+3z5wQ+mhVI9MpGzFgYGHb1T735Pw3EjV8dyM0ITV8QiLtjYWDroeDigUO5gdVJNwQ6t/e20/7SQOL41wOfheYBZbLH6QPBozEkf+UDgcvNsbfMHovm2Jo6KBB3cfA47XztHYHZaXae8fU7gdnDrw0ee+Y4nfqOV1h628WO/Ro8vlc/4JX1eTTwfGj+xaHPkd0Hb1uh4cGVwXUKHXojMM77jMYMXxjILe/zUtHzt8+jaLvFn3fwOd8TmP6XO4Lrt+8ZGPaX4PeBv99h+5Y48xO/HPWU97mKaXttYHqGnS5UznFa4edudeBBc8w88GhgWE9vfp9ZgcyKziee/HWPBhL9ed5n7YEnAuOKfT4PBLbOtceomT98ViDtazsrXLnPwbD75B/HwfHw52SGcSlmuSo+Vh1Xd2o0Iq9X0tg45cwapxnbbFkxuco/NU5JL/5DG99eoGEtt2rx2Bm2Gioo5c0cDZy1XLNvba2cJfcq9p40xf5hrZaNvUL7Uidq9qvB9rDUSX00clVLjVz6T216baSarRmlO5/c7M8DqiOy1626rqV37KVv9Kf9Y2tBvuKeWKHU9cs1MnqLZgwdocWfezMPb9WcOwZrxr+ideefvPkrnlRcowJ/vaqoseN729+0OTpJK16bo4HneJ+jSbO0Wa01+LkFGtLGm29rbcZfGVy80PoULdsTpSFDb1HThrashAqff5WkKHXXQM1esVzjYvO97c3QMu8lih37lsZd5c1uM0TzvX2bM6h1cHHUTz/7hfo2zdSce4ZpTmqODtpilXOcVuW4y9i4TwkvbtPWv96p6IrOJ6Ym4t5Fyu0yXsvWr9WMTpnaEHZOylk4TH2fyPUea402vTlHsTuna8iEpSr1SS73OZQUq/He8zDPJfVvSUo4xXyvTNNo73gv77GONXWqM2hk/CSNvtL7Mv7t9DKqolsr4Y5b1CG6kSKbdNV1V7eT9uwr9ubFDR6j2OjWir3xasV40zF979PAC5sqZsB1MhVUOV/mev+mKOXlPMUM9Oa1jVBE9C3q203KejvNbAKopkg1MrWh/oEYPLaiBk/Qg7Gt1fTsdho4dri6HE5Tyvo86e/L9PwHURo47SkN6eTNj75CD94RZ1asku97fBf2bWjTT3cOaOc//kDzOfo8Rzne5y2iaTM1MgEi4kxv35sqMiK4eEhWlmlPjlFMh+B0aZU8/yqJ05D7r1C0Wff6WG/a2zfvZBHRpKmamf1p2FjNvH1r6n0Zox6LjNPEvy7QyNaZmjHsKnX9xTDN/yD4bV/6OK3acRfV+1YlNLUTFZxPslYs08bDV2jkY7coxtt+zIDh6muCjS9TK171zhW9hnuPFeXtS9fgZ2j9ZpU6g1TwHIqLUKQ5pr1h64LpSo6I0/gJCd43S/mPdaypY3edNFLfSUmK/Xi2xj27tUQCPKis1ybrzpuv0tVXdlbCk6U70USUuMpq3cpe9TT0vnCDY9LHmdrqfalmPBlf2Klv5Aqv/FBwNlA9Ocr90vsSO7tZ4bEVE34mPvtMeXOUX7BPWds2eVc9MepwSXBWddXY8d3w5KLlfZ/r8yr0sIxu2dL7N0uZHwenS6nk+VeN91yKPc9Mfb7djuL44p1Yh83xruT/Os27yk/T1JuHlV0zVsXjLvLUUP8Io/zzSeYOb7xNjNoVLh6hkwuPSe/4/8D7s+Lews+Xv+7hA8HZJVX1OXgK1ozTuBe+U9zYSUpoYkqq+Vh1WB0LGp4m/TT1d1co45lxWpxpyzwFy0cpYcxGRd01R4tX/EPJI7xkdyTatPayrNRh1BptzdhWNPz1zuB8oBoKlizW4j2tFR/nfcl5x1Y77wspIyPsimPXbpl6hmbeVVj0OdHeWIbCZxdzuCha5+Tm2LFqquD4No/+vXTqqi7eiX/Z3JTS1cRGJc8/5KB3UggKhjSgIhFtEzTx+dGKPZymZSvCTgohVTzuwlV0Pok6y/sEeeEls/A4LfACix31PkWt23qhpf+c4p+vjGmqqG6y0uewJ0UTH1kqxU9SUnwjW1j+Yx1r6l7Q8ET2n6qJsVu9N6QoZebmBWNgZERj7yDaoGXriuZVTxfFeW/k5gXjNCc1U3m78pSxaqLmr7ezgYoc3B08ZlKTNX9ComInbFDMiKkaeZGZGae4G5sqZ8GjmuofW8H+Dxub9lPfXt6Xh99mm+OtN07JH+Qp74MUJT1rbtFrrWiTDlKe14yNecpKnVzY36L6vs/x3ViR3sdLGWlK3ZVfokbRc/YtGu19Ied5V1jxN3vbfC1Zyd6weNYojZ5nvjwref7ek2ztnRBSFkzXxl2ZSp02S8uq8TQjGnnb+HiT0rLywsIK6qU1kzVw0nylpHvHUdZmLZ62WKlqqo4XmQ9KyeO0kuOuDBWdT2K6dPUeKUVPPpKiLP84fVTzC2shWivu6nZeUJmi8a9t9T9fWenzNWNJGeGhwucQLl8pkx5R8jdXaMjtHXXQ26bZbsHBajxWHVcng4bfhDJ2vLoUVld531H9R2rghTmaM7i74h7w3q4OR1ij4W07btICPfiTXD1/V7xir+yuwU/lKCLKzgYqsmaif8z0vcv78vkoWiPmvaVlw4uOxdixL2pK4kl6/X5zbCVqxq6fa8qiJMWZaljTZrtomvqeslqj+3dX7KDJymwYvOKKuz9JCS0z/eN7wPMR6jugNo7vprru5n6K2rVId17ZWUlrbHGYmOHLtXraLWpd8LqmjvEChjfMeDlHzdoEn0eFz79hnEY8mqCorNkactUAzT+pnwYWtn1XLvbG29TllA3eFV93DfSDDeqtZmcqYuM8jb7VO47ib9aMtyPVd9qLGtfFzCx9nFZ43JWhwvNJl/GaM/YKv8kiwRynDftqYFs7zxN9x7OaMSBSGx7xLjTMd8F9f9N3Z5VRc1LhcwizcYYmrsiTvtmgqTd43wveNs0w0bs4qPJj1XENzK0ndhwAAIQ7vEHjLx+m1b3maOMEL4Cg2upojQYAALXg4/kaPWGRNmflBZs8JkzRsj2tNbA3IeNIUaMBAEDIrhSNv+8RJf8zz++nFBndVf3un6YHryq8NxbVRNAAAADO0HQCAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcqfH/vXV/Qb4dAwAAxzv+m3gAAOAMTScAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAmTodNMau3q2+Cz+zUwAA4Fhz3NZoLNq8T11nZmnvgcO2BKjYzJnPqF1M28JhyOBBKigosHMB1LSdO3fqml6/0OjRD9qSoMo+i+WtV9L76enqdllXf3slhT9GeZ/1VStXFnuM8Mc1y5v1wvezrMc5HtB0AlTDPffeq60Z2/xh/oI/KTIy0s4BUJPMSfm2IYPV9bLLbElx5X0WK1svxISBp556Uh0vvtiWFDHb2L59e7U+6yZYPPTb3+i666/XlClT/bJTTj1VCxcu8rexdt16rV+3zg83x5s6ETRME0mD327xhx89/pG27/mPnRNkmk9C801NRLjweeHrbsj+t+LmbS+ssTB/zbQpN49360s52vjZt2ryyDZ/m6a8/ROfaPCSz/1tNX44o3DZ8OkQM27KSj4u6i/zxRPdKtpOAXDprrvu1spVf1OzZs1sSZGKPosVrRfOhAETIE71wkA4Uythtj9+/ARbUjUTJz6qSzt39h+/LKeddpo/7N6925YcP2o9aJgAkO8Ne37XVoHft1fnqB/qgRW77Fxp+ZZ8JV7YyJ/3xh3naczKLwtP+CYEfJ7/n8J1H+91lm5+4bNKm0MmXX2mFt4UpS4/+qG/7i0dGvvlOfv+o5aNf+Bv655uZ+jnz273y0PT5rHNtk2ouOuVnXpt0DnVelwc+3796/v9KlBTPWq+kADUDlefxX9u3qx///vfGnHvPYVNHqaJpCKh5pPyQobx8Ucf+TUcP+ve3ZYcP2o9aJx+ckM9dX1z/69hQkW4xPaNCoPAFa1O9cPB6o//7Z/sX/FCyORrzipcN77taYqMOEH/3HnAn66uKC9k/OrSJv741W1OVbszI4pNFxz8r/Z++1899+4etW0W4e+P8bNzT/H/mnmov8wVUKgq1VSPmmpS+mgAR5/Lz2JWdpY2vf++7r13hL990/Qxb97ccsNM8uuv61//93+6//5f25Ii33iB5dZbb/HDivl7XcJ1x2Vza51oOjFNF6HmD9OkUZG23sk/xISKc08/yU4FQ0ujiIbK2XfIlrhjalpC+9xqykfK2H1QO/Z+Z+eivvvlL/+ff3VirlIA1B4Xn8Xul1+uizt18sfb/PjHfpOHqekoS8J115UbdsL7aJhh3fp1x2WH0FoPGiZkPPX2V4XNH6ZJoyLbvBO6qWkwTA1D+Mndb4Y5eFhRjU+0Je48dGUzf39Dw76HYwprOFD/7d+/X7u//NJOAagtNf1ZPJJ+WKbJpNmZZ/rNLRXVrPS4soff/+N4U+tBY6sXHFo2+kFh88fyD/L9vyFrPtlf2CfDhJJ/5HzrN1Wc1+QHfvNFqN+EsWLbfj98XNT8ZL+m44v8Q4XNKFNTv9I/PvvWH/++TDPKn9L3FuscivrNVJv+Ydo0OyX99a/L/ascc7UD4Og50s+iudujf7++lfbnuKhDB78pJNQv4+9vvaVdu3b55RUxzTkmbJhOoeUxNRrnnXeenTp+1HrQeDD2DL9DZ6gZoqTOP/qhHybMvLte+UILb2rphwxj2a0/8kOKuXPEzDc1I6tuO9cPLWaZPu0b+R06zTzT4dRsK8T05zBCd51Uh6m5MH1DQts2Az8sVv+tWZNS2Dns3X/8Q08+9TS3twK1wOVnsXnz5po06TE9+ugj/vZnzJiu5+cv8MsrY+5Uyd292//9DFPTEt5HwwxGRR1G66sGAY8dBwAAqFF1ojMoAAConwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcKbG/6+Tr/N22zEAAHC84z9VAwAAztB0AgAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwRPr/EuE4d8kNtv0AAAAASUVORK5CYII=" - } - }, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![image.png](attachment:image.png)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And we can see that embeddings have been conveniently created to allow for semantic search.\n" - ] - }, - { - "attachments": { - "image.png": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqwAAAJ1CAYAAAAc86LXAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAFiUAABYlAUlSJPAAAJaXSURBVHhe7d0LfFT1nTf+T+y/25bHkgckqzJsSQiSgLC1w0PkqcAGFEeFhxAuLqHVRXuJJip33NB2XauSVe7iBLO1SlGByjUUlBGVlEsfjGW0G0oSTEjaZRAahA3t0nb3qfP//c75nZlzzlwygVzOJJ/363V0fufMucwl4ZPv73fOSQkKICIiIiJyqGvU/4mIiIiIHImBlYiIiIgcjUMCutR/48zRCvx03y/wmwv/Jdp/BXy+FwaOn4kH7/4qUj+nP4uciJ8dERFRZ2Fg7Sp/aca+dStwzDUb3536VaR9Xs2XQajyJazZ/3n8/T9+GyO/rGaTc/CzIyIi6lQMrF2k5ieP42cDl2JJbqqaY9O4Dd97GfjuUzMwUM0iZ+BnR0RE1Lk4hrUrXHgTO343EYXmwHPsJTzz5hnVEDJm4Lt/+wF+WnlZzSBH4GdHRETU6RhYu8Dlj34FjPw6LPW5/9kfA/v9D9XQDRwzCi2/PqFa5AT87IiIiDrfFQbWRvy44GsY6bZO39uvFie7+pcw270U76lme2v+9D/Qq1cv1VKaP8a//eY/VEO5sT/6Xf5P/LdqOt17S83fhwL8uF7O1b8r3eW70V0/OyIiIie7igprNore+BDH/Ma0AnhcBJWl76rlFEuv/9EL/51Ikrl8GZe/8HmEzulxMBlWF2NF+Pvwxu0yq3Y73fGzIyIicrp2HBJwO54RofWufS+pypr0Lr5nqsBGhlnrcq0KJ6ubBS+Zso6s0BnVOvl88fhHS63r7Le1TcxVv9k/MraqtrNfVlLVcuPY5Lbu9aIOb2GxaZ3o27kyacNvxuWaX6pWbJerPsB/D75ZtZzvrvEipBoGfxvfmijf52koqwP2yT9mTJ+rtRprrmZbP2P9vbZ+T6zvv3mZ2I78/MzfM61ablquZl+p7vrZEREROZq8SkDbnQq+NGtW8KWPVdPk3ZJbggX/eko8ks8xHuvCy6R3gku/dktw6duq+fGPgi/Jx+L/BbN+JNY2mPelr+MueUdbEny7JOiOaJcE39VbwVP/Oiu8TB2Pvj+1ndB+Io+lIOZ22sN/Bt8qXRJ8Nfwig8H/+s/gf/yneixdOhJctXhN8P+a5zmZ9t5H+06Y33ed9n6aP2O5ru2zsLzfYrn1szH2Y9+2fV3ZNh2TZT9Xqht+dkRERA7X7iddZWRk6w/2/whlKMYz38nQ28KEB4uB997Vq2z738W+u1bgmYnaIlWRU4/jykbRg6qSN/F23BXRbkSjqsa+tB7hZcjAhAnZaGwyqnNivae+LeZKt+P2u2BaZpWRLp7VKLar2levF+4qzkPzC9/Ha//Wos/6fC+kGkMjL/wS5U+9if4Pz8Vo23BJx5q4TBsG8O69spJpVMSjUZ9L6L0XJn4HRWL+e6F1TJ+pJLYd/p6IzypLPVbfsW+Hvje345ln71aPxUf2o5fEd0x8rwarGfL7UXfqKj/HbvjZEREROVy7B9bGxlo94EkZGeFQIg0WbRUYZDjMyhikz+8wtSjTApQ+zVhfi7rGU2qZVShoRyMC07YJ72KG3I5luMJV+PLXMf9finDjkeWYt+RJPLd+PV4Q03M/XIAlL3yEoQufxt9b3rwkIP7o2KTGr8rgGvtEK/E9MEKkRrQzanEq5htrPslPH2IQYv+O2e1bFPr8R7oXYZ/Ylv4HzVXojp8dERGRg7VvYK1/CS/tuxu3GxUve1WyXrSzBoUCRqzw2H7uxvLQSWFqWmaq3LVBxnc2a+trwbW9Tiz7fH/c/vDTWFO6GIUzZ+DvxVS4aBWe+6dvY9z16jnJSAbXZ+/GvgOx3id7aBTtxmwMihryZFidhlMPGp/hDhQZFVbJ9h2zV8mzHt4R/uy1aXO44no1uutnR0RE5EDtF1jlyS33epHx7DJMkG2t+9WL75lOkHnvZS8w4XYtsGaMvx1Z+xaFq3Bi/R/Lx1oVNtw93PijpdaKWsJkN/9bWNzOVy1o/+EBwud6IbVfGtLEFOpaTioiVC61Vp7fO/CWemQnP5dalP3A9Hyta/92TIgaJE/hVJ0pzNa/i3eN74P6jr0UquTK4Qa16rH+HcP6pXGGJ7SDpP/siIiInO8qAqu1u33kvafwbf+H4bGGcjyhfwUy1k8LPeeljB3YZIxplVW4N4rRKM8e19YXwVJbdDu+/TBC2/4evm2tqLXBhGU7UNRo7hKON7bSRBzbt2XYFevIM9Ibf1QQ3sbjwPLNpvGXJGTgW+NP6UMm1KRd4kqrZotlD95tuUrAhGUfYnmGN/z8lwdhW8z3VI5LzQh/135wChmh74NYZv4Oud/F7aYxrHql17SunNr5DxgiIiLqeCnyzCv1mCjpyT8uvodl4T+MiIiIKOm1+0lXRF2m/iV8bz1w+3iGVSIiou6EFVZKYvKmAfLM/7C7njUPSyEiIqLugIGViIiIiByNQwKIiIiIyNEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEYWImIiIjI0RhYiYiIiMjRriKwNqA8PxvDhkabpqH8Y/U06nHO/+JFPDA2G9PXN6g5yr5FUb4ralq0Xz3JKua24mh1nd/uR/miWRg3Uu17eA7GzVqDj9TimELHvwgRR1u7BtPVa5m+/oSauR9L1DzLJPY3+VtPYdPxS+p5REREFA8rrNQ+/vJn1Ox7EUvyv4px31qD98+r+VfiSraV4Dq/3/d93HnPo1i79yOc/3Nv9HMNQL9el3D+V6fRrJ7TZhdFMP3Oi6gRD/tNWocNDw/T55t8od8ADBD7GtDvC+JYL+HUL17H0zPvwQ/eYWglIiJqzVUEVvEP/Dn5/2GYu7sWJ2rM0w4U3qQ9iXqI/Y9/FdPnr8Ge2j8Dn1Mzbc6fVylShDrr90VMKybqy4REtmWX0Dofv4g5i7bh9F/6YeI/7cCHv6rCwXfewcEqsf9jz2Ccelqb/OUEyr/7KPbIlzZ8HsqfnYgv60ssJn7vHbwt9vX2oV/hxKEVmNxPzj2P7a/9TPyXiIiI4rmKwPpfuHRRPSTq1Q+3TJqHtXt3YG6MP1Yu/T7BamIC24rQ6jqXsP35Naj5CzD04X/F2oJh+II52Pb6Ar6gHrbFwWe+i7XHxYN+k7H2Xx/C0EQCtnjuAzMz9cctvxdHRkRERPFceWD9fYv6h3YQBrGa2uNN/OFhbFrxECYOih37Lv2n/o0ZmjlI+38siWzLrtV1fv829r8jH/wdZj8Q2WV/JX6/bxG+v/k88LlhmPujFZjYRy1IwOnfqvG1A/ohTX9EREREMVx5YD17Xo3524O56mSSr42dhAee2YNTf9YWEFk0nz2t/b/m+Umhk4/GzVqEV37RCZ3i73+Eg/L/2V/DgBMv4qFJOZYToPb8VntWwv5cK4cX7MF59MPkFRtQmK0WtEaOtd38KJ7aKx7LoFs8I+oQAiIiIgq7upOuPqeftDLA1U/rTv3z+Qa8/9oiTP7mi1rXK5GdcfJRP5nS/iJPdtqD5d+aiiX7OrZj/NQpdeb+udew+FtrcPDsX+nfW3UC1JL8R7E94SEuNXhxoT68YNB3/xXP3dVbzY9tz3zjCgFfxfQf7gdGPoC1b+5IPOgSERH1YFceWG96CNuP6yetvP3OYXx4/FfYtGg0tHNJjq/Bc7KrlMhk4opafHhIP/lInuj04TvrMFsbHXAee364Vq+AdhTjD6iL4nt51wqx/8P699Y4Aeryfjy37uf6c1o1QIRP7ZuOUztexv4Egm7oKgE36OH2/LFXMPeeO/DQa8YlsIiIiCiWq6uwmn3uC7jlWxvwg0l68/2PPtQfEMXwBddEfH/NPAyVjYsfoqZTrt37d5j7xGT0M06O6jcZD83Wx7T+/sManNIetebLmPjsM5gtM+v5PZj73dZ7FEJXCThQhRPHD+OVb4l9/uU0Dj6zGGt/pZ5EREREUbVfYFUyM9vnhBbqIW7KhDpfvkMN+opLPfoyetsGjQ7KiH8SWFSf+zt8/0cibMvge3wNCh/fj9/rS1r3uX64ddHDmKw1GnDwF4nfFIGIiKgnaufAegkfnlBdnFdyjSDqeX71a4Q6xRO5JNSVusWNW7QHp3DKVsmtOSkv+S/c2MYz9rMfwoYVk7VhMOf3Poo5oTtcJeAvfwbPTSQiIkrMFQfWU7tf1C/SbvjLeby//jGs1S4d1A+zPeELwRMBR7Fp/c9x2vSV+fOpPfjB0hf1bvhbp2NSooXOj1/BbHlb1Tu+n9D4UY1rOqaPlw9OYO3SNahRx/HnU69j7Ra9wjnxjjvbfMb+l+/6J/xgmj6eteb57yZ28pj8WVn9srq9a28My+6MGjMREVHyuvIKa+Bt7TaYw3LG4M47xuDWr47BA88f1e7aM6jgGcy9otsGUff1e3z0fCHuvOWrGHfHHbhz7FfxtUmLsF2m1X6j8f1//AYG6E9s1al3foaPLosHgW3Y/74+r3W9MX3+47ill3h4/EVMH5mDceNzxDE8hYMi9Mpbqj49rfWz/SP1xsQf/ivmDpePz2PPojkor9UWWOx/Rrxm+brFz8rXhouflR/r1dhBBcuxRAvSREREFMsVB9a0m8fh1pv64Qu/P4/TgfMijvRGv69OxuIfH8aef/o7XluSbAbh1km3oN+X/4zzgdM4ff7P+EK/TNz6zRXY884GzG7D5Z0G3fF/9ODpmoGJt+rzEnLTA9jk24DF8ji+cAnnz14Sx3ALJi/agB0xbqmakM8NQ+G/rtOvNvCXE1j7nUURld8/nxevWb5u8bPyZ3k5OP6sEBERJSwlKKjHRERERESO0+5XCSAiIiIiak8MrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GhXH1jPH0V50SSMG5mNYUPFNHIM5m49rxZ2lv1YIve9aL9qdyUnHUtHaEB5vvqsQ9M0lH8MnFo/DcPyX8Qp9cx4tOfGeY9aW95l9i2yvfZF4hMP0447znJNxDayMX19g1qo2J/TFe9FJx1DW743jhJ6fxbhNctr0H9GluzTGklK/R4LTVG+xw7kzO9S+Hfm9PWvau+rk74bSfvzd4X2L4ry+1bTHX5u2+DjFzFd+9mW/37rP+/Ga3fsv7/Bq/H/KoNPjckKDnXnB7+/4WfBn1X8LPh62cLg4pfq1RM6y9vBxdniOBa+rdpdyUnH0hHqgy9OzQoufks1r1BDWX7c96i15V3h7YXic81eKD5hk7fE9129F7GWDxXfB8v7JedNXR9sUM3gyfXBaabnaK/dvFx4u8za7mhOOAZnkz/n+cEXT6qmRfv8jHQZ7ftoe21vrY/xWruW/JmbVtbZ/960jdN+lyXDe9ahtN/Jtt/Tkva9jzK/DZLnvY3/O8qJ//5KV1dhPVWDD88D4/5xA576h8mYPGUyZj+8As99K1M9gah7kH9xzm2Yhz01KzBRzdPcJb7vd4n/71uEuXsnY22U5SdWT8ae8jgVjJsewkOTxN/3jfpf/Q0NJzD0zokYpLV0Ex9+yNLuaE44Bkf7uAEN4t0YdJNqdycN9ajJvhMTza/trodQ2B1fayfQfpYy+ZPjGHd5IH4jY7+tknrqnbdRM8lj/f3dbZ1CQ+0wZCZZVLu6wPrlL6O3+N/BN15GzZ/1WRF+uwc/mDUGX5Ol55FjMPuZn+P32oJL+Oi1RZg99qt6l9PwHEwPLVMl6aHT8PSPn8J0OdxAlafP/+JF8Y97TmidhyzDD36H91fPwa3D9WUP/OsJNT+Ki/pQBuO5k4teR81fxPy//BxPjxXzJq3BR/ozcfCHY8RzCrH9Yvi41v5iP56eqR/H1zyFKH//knp2pNN7n8IDpmOeXPQi3hfb0qmut/lrsKnwDm3bsns95vEJsd6bLie7SC1dS7bhA/GO09L9HNn9KLtxQss7vfuqAfvfPoHJhbED2/539mDoYw9H/2Unf0HWvo398nNNQGbmMNS8vb9Lu+gSOQbLZ2L+zOINJVDfkXK1rtYFZfneyJ8H8TOwz+iukpP9+2DurjaeG6vLWm1PdXmF1gl9FqobcL3aX+g4bN9d8zryeKesQY34R2+uXCZfn+U1RNJ/Zo1tWY+1a7/bUWQOxtBWvq8xX496H/aHlqv3Tc43nm/7PWDdlrlLNt53Qf985u4Fap6fJOab9mN5D22fY7z313yMYgodh22+9fjbeozq+2YOS+btG++dsQ+t29b6fdHer9AxqP2vV9vQ5tu/u219z8w/K+b1JeP4Y/08xf+uO8NETJwE7HnHfGTqd/wdxm9w63vQ2pCtJftivLdRnmv5/qj33vL70Mb4vC3vayLbiLVf7Tv1qPjtdQJrp4j52mcf5XtpEfv96NTPW1Var9iJV78bnHhzVnDozbcHC57+WbDhT2qBdOHt4GI5ZGDGvwR/3tAcPLHpkeDY7KzgnFf/XSysD7688HvB139ZH2z+5NfB1x+5zdJ1qpWkZdf6jNXBE2qbl/Z/T1t/7IOrgz8T6514a3XwuTdk+V11w9/8t8GCJ98Onqh+O/jUDNk9+w/B1z/R17X4f78OviiXj3kk+Hp1c7Dh5/8SnCZew9gnK7XFxn4K32gJdddqjwX9uEYFc+55JPjiz38dPPHz9cHH5Gv83wuDb1+Sz7AOCbj01kJtW5OWvhz8eXV98MNN3wtOku/XDKN7VT0/+7bg4j3N2pzWji/ae9N54nQlWLq69eeFu0es61m6HOxdNOo9tyw3dU83vPV2J3dNx+v+lVrrArYtt72eaF1U+vCCru1eincM2jJTl1FD2Xr9+LXXYn6vbN8DbbntvbK8H+rnwfY9Cu9LXx5eXy2P2ZVn/HyZllveb7W++fOI2Kdg/4zs3YeW16Cvb/mum7Yf8d03L+v073YM2uu1vy+6Vl+PWM/4vLVlcjvG8ojhBuK9WmjbVmjbrX0X9O+h5fsZ5XMwL485rEU7buvn+6L8/LT5cb7PbT5Gfbnld0HE98r+fpmWC5b329i/aX9yncW2/Vneg7jvmb4983LrZ6Jen+mYtN8FxnL78Yrvs/nYHcP+vlva+nsQ/h0j26bvQJTPTPuuCFHf27b+PrQxfobC201gG63t1/6a1HLL7yzbdyzq+9HJn/dVn3Q19Jvl+NnudSgcJ/6yeG2R+AtlDjbV6stObV2PPedvweI1j2PcoH4YWvAEHroVeL9ij0j0mXhgxdOYPTIT/W4YhtmPfRNDxTpGt6iuN2Y/Og9DvyAfn8fPXtuG865vYPm/zsNksd7Qu+Zh8UxTTfumh/H0P03E0OETMffevxMzjuIjo0xqdmAT1h7vjek/XIfZw/th0LjHxfN74/wb+7W/Dr58xxL8YFo/HHz+h/jB6jWoufUH+P5MWUsOu3ORfM3DMHTcQ3j6MbGvi3tw8H21MKQBm368B+cHiec88wDGDc/ELQVP48X5twDHt2H7MfU06daHsWRSP/1xK8enM783nW/P/PBfW9a/zpWP9+Pt2sl46GHj88lEYeFk21+1uojq5E0PYfljw1RDqa0X76Zu0F3WruqkVLsGk4337x0PTtiGEkxcUSvm7cWdb9v+Wu9EMY9B/IX+ohz+sCJ8xIMefkg7fv2zXG7qPtY/d0u1NnseHpLDKGIahrnPGdVsfX00NOjr7/Nhj2V9sfw58XOgWtGJ7e02vb93PYy52dYuQUv1XH13za8v2jqJiazOD3r4YUze6wv/LDvxuy2HstTUYs+db2vf03BFJYHXIz6f5ernftAdd4rPRrz/heq9vGki7sw+IT9ORXx+K0zbks83vR9xvwut2bceaxE+Fin6sJYGlJfvweTVpu+I+B1UKL5jCX2fr+IYE/rd1yrT+yuJbTxn+r078c5hqGlI6B0L/XyZ3zPt87VV3M3v1cRC8fNn+cxO4ZTxXPF9Nh2Zc9iGBVg+h4jfMRPx0GPA2+/IVxj7uxJN+/w+FCatw3bbv6XxtpHY9zZBcd8PqfM+73a5rNUXBomAWPYOju79Acb95Sie/pfXRbwUH+1J2SX/EZbfYYSbMXhahrr/J9f6M07tfhZLvjUJd94xBl/TutjsBiDtRvUQH6Lml+J/t4zGrZ/T50TIzAz9Mvryl7+sHkU69bE8rkvYXmQcVzYe2nwJ+Muf9CeIMDhx/sO49fwebD8gPuhF3xBHYjYAA0wzjH39WXtdZqdwWr6ooTdDRNSQAS658mk0N+ttTb+/hoqrCRyfZH5vOt/k1TLMqGlnlH8E5Dg4o8vUmObvifKLXLTF9z4zI/wLMoI2DhRqW10R3gYh0/KPrJ343onDt/6xZRZlvJD4BbBHe//WiX/sH43RFSO+ezvFc1YPwtopXdW1FuUYtDGOg8USuxifpdbFbP4H7cqdahTfHtPP+ZXRP6+Yor6+1j7jWORnb/sDT+uOU7/ku/y7Hd+gh3do39HM5yep72grr8fupkz5zsUf62vuuoz678CVSfy7Ems8X0d/nxP43XeFzMNMJj8fZ2icTfT3rLXffyYivG3ffSfell3NYt+xu5i7mgxdw1QBZT/27x0mcoj+OWjvgbmgoN5DPfTH+q5E04HfH7kN9TBS++437vvRyZ93uwRWwxcGfQOTbxUPflkj4qV4f4bIvxRHY/Huwzh4wDT96AGk7ViEyY9vx6WvP4MXX9uFg1tbq5IMwgD5hJpfh8aWXqlBN8nj6ofp62zHdeCfME57xiXsX70e7w/6O4wbpFdJjbG1uku4ZBqyejpwWvy3N3r30tth0Y9Zf37sL33rx5cE5A9HKJSZpohwGz0IyBMVLFTF54T2w9HZ/7DrVYpo1WHDxDvi/PWq/YVqO4klZCKekydlzY8TSLVqQIxA0FnMxxDzF1+cUBc14F4h+x892h9HbaH/Qo8pzi/2tocL+Y+9rPDafg5qdoSrH1363U6EPuZP/1wTeD1tIcNq+eDw74ndrf07kLhBGeI3TUKVzliBrHO+z63+7msjGVZfzNwb+mz2tKFiG/s9a8MJOjLEaPteB4g/bJwaWrVqvuwZsP1+1t6DSetC719o0npc2hDeO/L7E/d3XvvuN/77IXTi5311gfWdpzC96Fm8snsP9ohp0+o5eE4cbL+8vxMxVbzQ8eIL8bmjeOVfXtOuJiBPivpw55N4/aMvoPl8QM4QKfev0PvyaWwrf62Vf3QyMSlPbPXUi1hSuAZ7jjWgZt8aLN+a0DfH6ta/w/R+57F99Rq8HWgRM1pw+sB6rD3QDNnD/vt9P8RTO/4Lkx9djuWPTha/VH+I7+81n1R1Gpue/j72HD+vHcMPNog4Omg2JkekyfAxf/97r+Dg8QZ8tPspzF0tnn/rdEyK9Qu+leNLCrLrD2uwOOr17qy0E3yeXx8ObFqXs3osnFq/KPyPuFax6Xx6t+ejkcMfxD+42g+oCB1rM8VfoVGWD5t/ytRlGIVcd9IezFWD4vcvsoVX+QtVrN2ZZ6THPQatWzd8vNKp9S9qz9eC+/OLTaGrAeVL1gC2Kw5cKb3LeA1eDP1S1Lvo4juBteXmY12sdRXH7IaL8vrk5yivAjGxta67CPKPHWDtkijDZgQnfLcjGN/pEFmBEq9EC+vxX09b2St62pna6vFV0052tP4O2i++p5HHrf4gNf/RKH4HlYv3oGO/z6q7Ps7vPv07YRqKIpYvjlsxtVfX9CEcCYvynuk/L7H+4LaxfHdkuFMPncj4OZ+/x3pFFPkexOn1ivVdiabdvj9715u2sR9LxDHHOwm4Xb+38d6PTv68ry6wpg1A79M/w4tLxUE/vghPb25G5jdXYNMPJ0LrJBfJe8OP52HY2U2YO3MMxo0vwFM//wIGDBQvbeZczB4OHHxmGsbNWoNLX/e0+pf1gG+WY/s/TUa/E69gyTcnYfo/7UfzF1LV0jb48kQ89foKTP9yJZaL7YwbPwmFrzWgnysNuLgfTz2zB+fHL8EP7uqNL981D3NvvYT9z3wPe0Jn9g/D9Hv7YfuDYzB9/os4ccM3sPYn8yzd/gZ5zHuenYHeB9fgoZmTMPtJH74wbQX2lNuHGZjEO76kIbuS9a5EoxshVpeB7Hbc85gIdcbzlkDrrjEMyhD/QKouB9n9iNVXWM25KhPxnPgrUgulxnHKqXxwKPjIMZ/GmL/Q8vni2BOoPk1coQ8NkGMFMzNN70VoG9Yxrh0t/jHIz3Yv5jaIAK+WT35bzhW0Lm45fMBYdxLevnOvafzVVZJ/zWsVaWP7iwE5hk4tjm4Y5mb61PNld5Y4vmjDWEIiX9/VfAby+x3xvVFh2BnfbZvMwWiwdfljda1++TYh3utpq9Afgmo7ixsGtanCKsdPwn5Wdoj8mbX+DprbEA7HZhG/g6aoL3QHf5+191L+sWrs1/a7T3sN5u97xHI78d0ttP58NGRan9/W92zy23diT9yfFxPLd0d/r4zvjfPo4VP+fjCGA+jEe7B7nu1nIPxexfyuCBHvbXt9fyaJvxKXGNt4FA2PtfK+tuv3Ns770cmfd4o880o9pgTISzhMfh6Yu9sB/7AQkfZXvtatHPUfVXk5lvXI5M8rJQmjS7/d/sijpKZljoaHw13wPVi7jmElIupceveY/SYHRElJG3pir/gRkcTASkRJxHoBa6N7jNUoSkqyd8D8fZbj3dkbQBQVhwQQERERkaOxwkpEREREjsbASkRERESOxsBKRERERI7GwEpEREREjsbASkRERESOxsBKRERERI7GwEpEREREjsbASkRERESOdkU3Dkgp+bV6RE4TLL1ZPSIiIiLqHlhhJSIiIiJH461ZiYiIiMjRWGElIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdLCQrqccKq/+3fcPDnB3D58mU1h4iIiIioY1xRYPWuW4vpM+7FDTfeqOYQEREREXWMKxoSICurDKtERERE1Bk4hpWIiIiIHI2BlYiIiIgcrVMD65/O/Q6PrDoB99O1YhL/X9UIb92f1VIiIiIiokidFlj/41gjvr75j5j2D1nwfz9bTMPg/1Yq/vROEx45xtBKRERERNF1TmD97FM8vR94+qGBmHDd59RMIbUvFj58A/7n/tPY9Cc1Lxn9dhuW/MNaHFRNIiIiImo/nRNYqy/hw5uvwz1fVG2za1LxSM5n2PvL/1IzroIWHO/DA8b0/C/UAiIiIiJKVp0SWGtO/xeG9u+tWpFuSP3/cLblKgOrDKs/+AAjn3oVr/xEnx648Qx+oxYTERERUXLqlMA6dMBfwf+bFtWK1Hjh/4nQ+leqdRVuHIXRX1GPhXEzZ2Cgegz8AqtD1dfH8dPfqtnCwedNVdl/3BYKuXL+kq3b9PVC883buQ+rj2ozdUfXhrfD6i4RERFRu+icIQEjUvH1k+ex4/eqbfbZBXiP/X+Y/r+uMrB+ZRRGogL/HDUo/gY//cf1wMOq+vrUKBz7gTHm9Bc4hodVVfZp3CW24d0arss27wlgpFz2LzL8yrBq3k4e+qvnAb/EK1WjQvPTjlVYQjERERERXZnOCazX/A9MGPDf+Ja3EZtO/z81U13masXv8KeJ/TEt2vjWNhmIv/+XV/EA1kdWOI/uwj7kYdpo1Zbh9sYz+EQLlF/H/Me+rs2W2xg9MhxBpbTJUzFOPcbRD/BvIx/G/NB2ZuDvjcf4X3jA2I7a/pkzepOIiIiIrlwnBNY/4//u+C28va5Hzbd645N3GvVrsIpp0rY/4uv/cBNeGPkF9dyrN+4xvfqpBVdT9z4+qcA/G931//B97PskHCh/s/XxUFf+P++JnTJ/8+9nkHajS7XiGYgbrbmXiIiIiK5QBwdWPaw+jX54fVof3JB2HRbOuUm/BquY3i0eiNnXmy5z1Y7GPSa79z/AUaNbfqTR7R+eZKVUhtV//iQvNO+fJ8dPms2fBNQjIiIiIuoMHRhYrWH1f6q5HeboWusJUL/9AMc+UY9Hj8LfHltvXa785hNz1fQ3OHosdoV14K2jkGbezm+34adRtklERERE7aeDAmsnh1Vp9FT032V0+YtJu8TVs/h77aoBX8f8p/LwyXrTcjVcYNxjD+PGPd9X88twpn+cCutXZuA583bEPkxnXRERERFRB0gJCupxwpY/W4rFj5eoVqT/ONyAb/yub+eFVSIiIiLqtjqkwvo/x2RiL8MqEREREbWDTrhKABERERHRlWNgJSIiIiJHY2AlIiIiIke7osDaq1cvnP3EuGYUEREREVHHuaKrBFT/27/h4M8P4PLly2oOEREREVHHuKLASkRERETUWTiGlYiIiIgcjYGViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiBwtCQLru/ieuwA/rldNIiIiIupRrjCwNuLHBV/DSLdpKnhJzO0E+5d23r6IiIiIqMtdRYU1G0VvfIhjfn1anuHF937EGElERERE7avdhgRMGH836hpPqRbQ+KOCcPV16btqrlD/EmaHKrNL8Z4xz1I1lRXcyGEA2jYffwuo82KGWP97++VcOWTA2B6HDhARERF1N+0UWEXAfPkt3DX+dr25fylmvHc7thnVVyzCbK36KsLlve/i9lBldhkm6GskJOM7m3Hs2buBrGJt289MlMF2EfCssb3N+NZg9WQiIiIi6hauIrDWouzecKUUT8kAKeer8Prgt5GhPc9cfR2EQVm1ONVuIwcykCF20tjEoQhERERE3VW7jGFdfpcIry+buv2FfY8bYVZMshu/sVFE2Qx8a/MKQC3Tu/SvzoRlO3D7e9O07elVXCIiIiLqTtplSMCEZStw176XTONHrSdkadNmo+J6O57R5sng2h5jTmUIltvTg2t7hGAiIiIico52GsMqQuizGSj7gTxxKgMTJkA9jsc0PGBwBjLq3sV7Krw2/mgpyur0x4nj8AAiIiKi7qidAqsw8Tsoghczlr6rnRwlL3Mlz+Q3hgVEntE/De9O2KHGvd6Obz8sQq4aE/s9fBtFWXJ+FMZ+tG1arwe7GCuw6TvGyFkiIiIi6g5SgoJ6TERERETkOO1XYSUiIiIi6gAMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GhXdB3WM2fOqEdERERERB3rigNr//43oqWlRc3pWrXvH8ZWZOIHt96o5nS8Q4cOY+zYMarVMf5w6BU04FaknjyFL31rMq5X83HuMH71/MdIfewBpMuZJ/bgl68fxefvmouvjk3Tn6M5gdrvvY8vGs9T5HZrT96E7G+NwbXaczbhDxiNv3lG7qMZTT9ei/Nps/G/pgxTa7Tdr7bOw8uHx+LBtdPxVTUvMedw4IVSfDpuDWb8rZpFREREPdoVB9bU1N7a45SUFO3/nensLyvxxInLqiW4bkb57QNVo3Ps3fsmJk26R7U6yi/x4f3r0DLhUeTO+V9qnu70hn9A/XuqMXQWbrxxCz7961J8/Z7+wLFXUbn2HbXQMAw3LnscWQPk4zOoKy3BJzVq/ty/xadrz+MrG++DttiyXPdXs9S2E/R/X5yKf3nrHvxjxXfxv9W8xJzGru8/gnOTd6FwtJpFREREPdoVB9brruurWj3TG29sxb33zlQtaj+NeO2he3Hm7z/AkvFqFhEREfVoVxxY09L6dUl11Sl+8pON+Id/uF+1qP3U40cFE/HvcxrxQ4+aRURERD3aFQfW66//6x4dWH/0o5fwne98W7Wo/ZyEN+9/4weHVPPBn+DCismqQURERD3RFQfWG264vkcH1vXrX8TDDz+kWkRERETUUa44sMqrBPRkL7zgxSOPFKsWEREREXUU3jiAiIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiByNgTXZ7XwUKSnXiOlR7FCziIiIiLqTJA+sZ1G2wovBJa+i7BM1q0fZgUemeVG84zMEg+swTc0lIiIi6k6SOrA27N2PVf0GYIpq9zjHG1GDYkzIV20iIiKibih5A+snBzH/MLDAM0jNICIiIqLuKEkD61mUvV4NjJmIop59/wIiIiKibi85A+ux97EKI7B60g1qRg/1cRPeG5+OoapJRERE1B0lYWCtxoJtl7DgG+OQqeb0OMdX4nZ5ZYD94xF8byEDKxEREXVrKUFBPU7YmTNn0L9/F/XFH6vA4G2nVcMmaxzq54xQjY71wgtePPJIsWp1EXlJq3XpOMHQSkRERN1Y8lVYR+ahvrTYNI3DFPTGgsfE404Kq45xUzomHGhCjWoSERERdUfJe5UAIiIiIuoRukFgHYFVpffxagFERERE3RQrrMlseAaGogaNx1WbiIiIqBtiYE1q0/DCjqFYNOIapKQ8ih1qLhEREVF3wsCa7PLXIRj8TEzrRHwlIiIi6n4YWImIiIjI0RhYk9Dx479Wj4iIiIi6PwZWIiIiInI0BlYiIiIicjQGViIiIiJyNAbWHmkvfH99PbZ7TwK1ZdguHvveVIsMav5r2lSSBLd/PYmj06K8ju7gzRLxGczA0Vqg2TsjST4PIiKi9pOcgfWTg5hS4sVg87ShWi2kqyYD0rh/x7DfncM35bQBODatDM1qMXU3RzC/dDZ6lW2FyMRERESOk7wV1utGwFdajHpjmjNCLaDW3YTUMcC1GUOA7IG4FmOROkgtkpXKlz7G4IOlGKrm4J4HMBjvoYFppmsM+ht8SX5m2UBaxk3AmL9BP7XoqvnXoFfpB0h3u9QMIiIi5+GQgB5pCEbvOAfPPfLxJHh+tw2jRRjSvPkK6gc/EG5rhiDzdiDw7km9qXVRG8MF1NACE73bOtZwAr3rPrTcUrkNd+vXLDGeo3eFh+nDGaKvL5mXt7HrvJXXFXffliEUYlqyVy0QtGXyWEzr24477nuWXYTpv1N/QNxTim/uKEKatsDk+ErcnnINbl/elld8BPOPuuAvmQePmkNEROREDKxkUVP5MtJyJ4lH1nAFWdnTiPlzZAVWDRcQ0/TiIWqZIEKf78mbMFIt8zzxsWU4QbP3FWCZse4hDMaTOGgPvHOux4mBh7TnjJxzCPUvG+FPHtOD+MMT+jJtsoW35jmvIFU7NrHtMS/jRETojKWV1xV332LZuCdx7QZjXfEebnjQFnhfxrG/No5NLD/8JPzGeNtW3rOOcxtWF82E5W8TIiIiB0rewPppNTyhMawV8KnZdDVOoqVeHx5Qs0SFMxG8MHigraJ3KFxttdEC74bwcIK04gdEOAsPJ0grLjVVb2Xldiz++JuPVVuZ83IoLA7NFfuv/40W3mTYbR7zBMZZgqRV2gajWhxj23HFfl3x9m0sc2sVa2kSPOJ9++O771hCZ/jYJuErc4A/NOr7au09S8jwhXg3+BneXRwayEFERNRtJGdgvXEcdpvGr/rGXEIxQ2s7kWMl9+K3InANE+GsufFjfGmgUV2VRBg7+ATw5Ngo3dcy8OoV0nD39oPWSqGt69z35CG1IEyv8CqmLvDzvxHPjQjP7SXe60pg360e14P4SijQiiD+nFHBTeA9IyIi6uG6xZCATPdADFOP6Wp8jJbD4n+1v8Ef9BlaUJMnZ8ngqp2kJWljKk3d17ZwlxbqGjcmo7Kod53D1K3ueWKstk4i+g1M/LlXJM7ranXfqgpskO9XW8R+z4iIiKgbBNazKHu9GieyBvHEkaumXz0gTFZa5f9PouFdmK4kEKadtR6id8M3z7GdNGQTCr61ZTgYpcIaS9rtE/ClDQ9e3bVWjROrzCdFRWF9XfH3rS0zj0kV75tfvK60b0c5OSpCYu9Zq67opKsE7XwUKWLbKUU71AwiIqLOlZSBtWHvq6ZrsG7HvqHTeVmrdvMxWnAHXGPUSUJPPCjC1NjwlQNsZ9K/pp2oZB5/uU1VJ03PCZ1ANAnuJ2Q4U/PH/TtcbaiwahXQg0/gD+bu8/Y6OamV1xV33xHL9PG/+lUYWhf/PetITVhbNhu9SmfD7Q8ALTvhjnY91vzxKJb/r228ulBNRER0hVKCgnqcsDNnzqB//xtVq2d64QUvHnlE+2e80x0//msMH36zarUveTkpeYa+9Qx56tlqsHLCzViUvQ3BsmlqHhERUefpFmNYqf0MfVCeeLTUdO3TvfC10n1O3VfN8juQksKwSkREXYuBlaxk9/aGm1A/Lty9DfNZ+9SjDF38DoLBzxhWiYioSzGwUiR5KSnTGeuJjsUkIiIi6ggMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKw90l74/vp6bPee1G6Pul08vuLbnWrrm28r2o7bjnAeqx/djM8/K/dWg8embcadr5/XF3Up/bgeS/wus+2q9vWd+Py0/dgpHu98Vrw/j1ZZ71RFRESU5JI6sPo2GLdnFdOGajWXrs5JHJ1mukWomrQAmrRiBUo5fydWN8rHegD+vHnqlOCn79d6bNHmERER9VxJGljPomyFF8UYh/rSYn2aM0Ito9bdhNQxwLUZQ4DsgbgWY5E6SC3CEIzeIa+/egiDxXO+9MQh7Vqsid+qNd62r1Y/pA8Axg9IE4/TkOESu/hKP31RAmp/21o1th/e2FGA/zamdTnIVkucLPsr1wKuVAwVj4cO+CIw4MtJcdxERESJSs7Aeux9rOonwipD6hXSQ6l+Q4BJ8PxuG0a3JeG8WRKuvo57En9Us3WJbHsHHkm5BilFO1Q7cfmPF+Dtb8iQ2g/z1xXg+bH6fJ0aMhBRIdWDbqRmNAbUw7hkxXM/Vmtd73IYQo3aj1GdNZirtHoXvebQfjXPNLVn9XbsxFC4zv5GPv77cRldiYiIuo+kDKy+6tMYlnYRC4zhAGJacEwtpI4lx6XO+RiDD6o7YR18Al9Si7pa7esfAguMCulEPIcGPBYxxtU8DEC6FukZ6mFc57HkdDb+e34/HNj+ERrvLcAbt/4Jb/0ivP31q2uRsVLf/xu3nse92lhbQQbKUOVWHJfri3huQXJUb4mIiJwgCQPrWTSIjHDi8EV4jOEAMwZg97ZXUfaJegp1mJqXnwSeWNa2imyEaXihA+5Pn/2NiZgfCp/9cPfoL+LA6WatJbvKtceNp/BWwBo0w0TIDFVB7dVTETLvVZVLVyaKLJVd3cPz80P7z/96P+D90+Eqq1L7+iG8NXqs6ThbE2VsrZx4YhUREfUgSXvS1ZQZefCoxxh5KxZcdwn1Z1SbeqbGKtxpCnUjtv9JLTA53YLs+bcg++gp1Db+HrVq7KeItHg+VAUV0/xrsWShPbS2wYBUjFcPQ8TxPXbUhee1IQ2Jsh2XMSXJ+FoiIqL2kISB9QZkin/v68+eVW0iqQaPLWwApoe736unf1EtUycmCTt/AYwfOxTjBwTw1mltVnRjB+Bh9fCKiGB8IBSGpfNYvSqAuyOGAugnj0X6IjK0cbessBIRESVlhdUzYgBOHH4fPtXWTsL6dAA8I1WbOky/gWPxx3ffgd7Rvhe+iJOuEnHlJ121JnTVAFnNtFdYT9di/WnjbHrgrV+06POj2PnsR1jvcuHuhLvuzUQ4feM8xo8eFAqnO5/dH2MogH5C2PpfhK9kW/u6OM7QvtunwrqjSLzf4j1/xD5GgYiIKAkk55CAkXnwjbmEYuOkq22At9Q0RICugnEd1rGoPwz88cmxluuwphUvw2A8qd0c4LW/fgWpB1+GvMhU1xuKoulfxPrVqgK5sAV3myqsmoAIsCpEZn/dBbx/HgeMS0DZzuS/F7e0ORSG9j1NhtOJ6moGgtj2ve8DB7ab9xEebpD/+EQ8d/qj0LIR26/FG+3c5T9tYrH2/5r6cDAmIiJKFilBQT1O2JkzZ9C//42q1TO98IIXjzyih4DOdvz4rzF8+M2qRZSA4ytx+4jFGLrjM7yQr+YREREliaQ96YqIElGDlROuQQrDKhERJTEGVqJubSgWvvcZgkGGVSIiSl4MrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrD3SXu1OVdrdq2rLsF089r2pFnWxZu8MvPbXJZD3Y6pZcj1em1ambgN79Wpf3wl5Fyp5d9Kdz27ugvvxn8fqR407XYnp2Xa861RjFe6Mtk1jvjbpr7296e/rZjx2SM1oVx34npGJ/j7f+fp51bZTn4P2/tfgMfFZxH4uEVH7S8LAehZlK9QtWS3Tqyj7RD2FrtybJdqtWCMnPUS2BxlEjVu9OoUWYM2BTgW99vxHufb1Q1iCTFTvKMB/y+nxoWpJB8rIwdtyXyszMV7NSiZd8p7FE+0PA3lb3w7/4ydGSFTH0zF/LBAROUcSBtYbULSoGPWlpmnGADG/NzJ79t1i2+AmpI4Brs0YAmQPxLUYi9RBatE9pfjm786J6RAGi+d86YlDql2KzogKaRk3AWP+BvIu/P0GjgUGD0SavuiqZX/lWsCVqr2OoQO+CAz4srpf/3k0nf4ixrv+gCZ1f//aXwTEc8Vz2lHN6T9h/OhBap/tzAimXRDosr+Rr4XJ58XH1d469D27YuK7cvp0h1SrYxuK5+f3w4HtH1r2u/ONBhy49ZYOee+t+iFd/JodP0D+NKYhwyV/nuRPKRFR5+gWQwJ81acxbMyt8Kg2tWYIRu84B8898vEkeH63DaPbkggsVdgZOBoqLZ3E0WlinrkbXz1XDjnQu/uvx7ENwB+fHBvexpK96smCDMw7irSQmla8Dd98bpI+36Rm+R1ISbkDK4+rGYkaOxH/vS5HCz9ayLKFu7tHX4u3fiErWOfx1mkXHh4NHDgtX4le3bJWsaLNi0eGYvUwCqNbXZ92YrUKzlr17tkq1S0u5h9SFT5TRU+vDutTmyrCctv2YQLR5sWkvwcRx2xifV1trQTGec+MqqZ5yINluID52MRker/kMd35epVaLl6r9prt68dzrfiu/AHrY77X1n2HPhPtWKO8t8ZrUc2Yxn4Nz7nOh/crtrf+/S/iuXvD3+O473er75mZ8RrCx5v/eAHe/ob2pyTmr+uYP1CIiGJJ/sD6yUF46wageNINagZ1KDnmdc7HGHxQVl3FtOEm1I8zhgvIIPwy0g4/iYNal/9e+OaI9gY9HGsBVKwzco65ciumKKG0S3x9ALKPnhL/oJ9C44BBporyUIy/FVj/C3M38GmsF/9wj2/1H+3wP/xLAiIAb1fhyBzwRIAow1i9y1tM1dOBJatMAeb9BjTeW4A3bv0TlqxuwcM7bsHDgQDeUuvLICHXe0McY5uMHYCHRSg8YAo1O2Vgv3UAEruL61A8rx2zOB41x0K8rse2X4s31OtKvAqbwHsmBRowYqF8P8S25ZCH92vVcjne8iPUThd/oGj7nYjnIJ5rCmcHtgeQsVLMFwHw3jdSUa2tn3jVNP0b2fp3RbXD7Pu+Bdni+LXgmCEr+uEqftuJoHhvuMqqV1ezMT9DXyoD6Qjz+z2/H9avTvQ9M5Pvv/EaJib4XSAi6lhJH1h9vmqA1dVO0/zue8ATy8IV2XsewOAxL+O3oZO2JsFz8AngyVdw1PsKmkVg1Su57Wfo4ncQDL6DhcPVjKvWjEYRjLRgOiCAsjdakPF1a3dn/r3WQCOD3fjpX0vgH3Mj1MlwBLGOEWTyw0EjIwfPa5UrXfbXXRgfaFF/BAiuTBSpoJfYPhM1FEXTv2gK4jU4YKvYXT1rIE5MAu+ZRhyrCJ3a+5ExCHe7/oRGWZE99KEIuv3wcOg9FUFvgS2QmoLew/fqVfe2kd+VBpTZX5v4Y+ctZJo+T/N7rHela0zV1trf/sE0PKUVYyeKP0zOY/2z+23VVRGU37B9J7Xn/kn1GhhivGchp0JhVa+mEhE5Q3IHVq262ht3uVld7Sznf3PI2p3/12NRf1gtNGQXYdwTH6P+yZsw0inV0wTlf/1arD+dirtFmNHGvBq0f9yN8CWDnTkQXS11BrZWRRTTwgYcUEs6mhaOjSAnq8bmit3VkuNqtSqfel2JdHu3hculfU46Wze1GqvckeQfMbXmqrt0ugUHZBXT+CzFNGL7n9RCOQ5UD4i1v2hB9q0IVVv1saGJkfvF+yKETh8b8Vm1Oq403nsmHNjegPUi1N5t+4ONiKirJXVgldXVE1m3oIgnW3UqS3e+mixV1DdL4Ht3AkaK0HqsHS9L1bGuRbr8h9w0zlVz+vcqZPXD3aNVpUwLdol2m7du57P7rWfCd+YZ/aYgLqvGD3+9nWOefD/V63pjgAhy7R1aYzFXqCUZJNXDdiPfu9O1WP1b1Ta4TJ+lManx0vJkv9rf1uCtoyKk3puKRvGeyxPL2nQCkza0IHo4rf2tuZoaf9x0NLKarQ1JWZjoOGYios6RvIFVVVcXeEaoGdQZhuY+iD8+udR0opWdGrf67SIMLV6GwTDGs4bJs///+O47Vxxkr/ikq6uU/Y1sPPx+Le584w/t3G0umK5YsHpV51VYtSrbvf2w/o2dWH86PPSgI2hXZugMamxu+KSoKN3l7UK+d9eK8PkH1RbkvgMNeCzuyW+/R+MA8QePCJ44+iEOnP4iMuSFTq6K/geV5SoCEUMjEiNPSJTDDu5t8x8XNVg54Zou+dkkou4vSQPrWZS9zupql5Bn8WsnWhlDAuRknHQlb0jwoGnc6hCM/rYMuGMt111NU0FW3rwg4ioBXaHRqKK2Rp589ScRJs3dqldPHx/7kepC3o/G0W2psIbPSL/3/fAJSsaZ6aGzxrVhBiKEqH1YqmdayPoT0NbLRxln10/7COvxJyxZKB+HT/Kxn7GunRBkrl53GDkGVj/ZSd/3fiwZcEvHjMkU7122fO9C7PvWJ+NsfTnMRHa7Q6tky3GwIlgHVHX/KsmgWT39D+ozFtNq8YeVMV61jfIfVyeqxbj6Q3RDcc/dE8T/30Pjx/ocIqL2khIU1OOEnTlzBv379+yk+MILXjzySLFqda7jx3+N4cNvVi3qTPISUusHdLcTUmTorUXGSvtJTURttPNRpEyrwYrq9jwpkoioG1wlgKjTHNqPe9v1ZCtn2PnsR+17shX1QDvwSMo1DKtE1GEYWIlaY3R9X0UXqxMZNxy493Qmqrv6lqeU5KbhheBn7Xy5OSKiMA4JuEIcEkBERETUOVhhJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYO2R5B2prtfvPlVbhu3ise9NtegqNXtntOOdq07i6LQ4x/ZmCV6bVma7xav+2iLnX62Oe8808oLr8jqW2sRbWxIREZklbWD1bfBicEl4WnBMLaCrVrPEuOVqeDLfWpU6yPjlOMFrWRIREUVIysDasPdVFGMc6kuL9WnGAOzeVgGfWk6tuQmpY4BrM4YA2QNxLcYidZBapHzpiUP45u/OhabpxeK5SWESPPKYdxQhTc1pH62/Z0RERNQxkjKw1jdfwrA0Uxzp3wfD1ENKxBCM3nEOnnvkYxnwtmF0tragFbKLfgaOekv0yuuSvaFqrL173FyltS7Tu/mNZZHDB1SXvjaNRf1hNVvRhhwYy+e8rObqLMuibrcENao7X3uObdiAZX1jCm0ngffs+ErcnnINbl9eo2YQERFRe0jKwOoZMQAnDm9XwwDOouz1apzIGgSPtpQ61iHUv/s38Bx8Al/a8CBODDwEzxNj0VxpCohqvladFY+b54igqBbVLBEhdPDLqnJ7CIPrHzQNN5Bh9kH8IVTdFcvHqEXSmyXwPXkTRmrL9G2bpRVv0+bL44nuZRwb9+8Ypq3/MtIOPwm/EaZFkD34JDD4oL7tkXPEvDFPwPPcJH05ERERdZnkHMM6Mg/1peOAbXL86nbsGzod9XNGqIXUHv745FhLpdFcJU37ttHd/iCGRRsqMOfl8BCCe8aI576M32rr78VvRcgcGQqBQzD62w/ij+++o1c633wF9XgC46IOPxBh9iURMjeU4srvej9WBFJj/Un4igilf2jUw3Lzu+/hj2MmIFNVTYfmijB8+N9xXm8mZvhCvBv8DO8u5n35iYiI2lNyBtZjFSKofoTBj8kxrOMw+PB2DF5xEA1qMV09+xhWvSv8SuhjPzW1v8EfZJXTFITt3fpdJS3jJhFQ30NDrd6uqRTHNWfMVYRjIiIiai9JGFjPouzAaQwbMxFFN8r2CKwSoXXKp9Xw8koBDvQxWg6bT1B6MNylb0ztfoLUFRj0N/iSHO4wTg/SxyyVYCIiIupKyVlhFU40m06XOXYKu9Ebg/urNjlGs/cVNBtd7dl3wDXmZRyLdZ1WGRrNVU453jV00tUQpA4W2zPGysqTp9qxOlvz8pOApap8BUMPeNIVERFRh0jCwHoDihaNw5S6g+HrsG67hAWP3acqrtQe7GNY23Qd1g0PhtbzvTsBnlAFVZ5pr59oFXXb2UUY9wRCVU554pZ28pMy9Dk5hlWtK0+ekid+qWXmqw/4njwUPoZY4dhm6INix7bX/Jq8qoBaTkRERF0nJSioxwk7c+YM+vfv2enwhRe8eOSRYtXqXMeP/xrDh9+sWtQe5GW4ZEA2X29WzjuGl/HNzhgaIO90tS4dJ95byHGzRERENkk7JICo/ZxES716GCKvaAB8aeBNqk1ERERdhYGVSA5VWGYfEqBfD7ZT7/B1YDGGpVyDlJQ7sPK4mkdEREQcEnClOCSAiIiIqHOwwkpEREREjsbASkRERESOxsBKRERERI7GwEpEREREjsbASkRERESOxsBKRERERI6WtJe18m3worhONa4bAd+icchUzc7Ay1rFVutbDLc/oFo52FwyD3mq1bojmF/qRblq5bqX4U1PumrF37Z1ma7Qswmr3aoR2Ip7Nu5EpWpalgn29duyb42x/YxiXJ51m5op2PaL1Hz4i2YiWzWBJqwtW4qSFtW0r6+xvi/2YyciIurWZGBtq0AgIP77WZdN9Xt+EsxcXhmsV+19r6wLZr7yq4jndeS0bt26qPM7Y6quro463xHTsVXBLy1bFdyl2rs2zwp+yfvTYI35OTGnU8E13lnBu/ed0tunfxq8e9ms4Lxjankr267ZtzC8bsR0KDjPvC1t2wuDa06rtty2+TijLW9l37K9Rs7ffEhfJ8akrWt6jvW49eO0vA77+8CJEydOnDj1sCkJhwRUw3v4EqaMD1dUPZ4RGFZ3Cj7Vpq7ShLVHq5DrnhqqPOaNzUduSxV81sJndP5dKGnJQaFR1XTNRGEGUH7yiGhc3bZrfbtQnpqPQqMqqW07AN/xJq1Z2yw20rd/uOrp6o8s9bDVfQe2YsHFqbhcNBPhemxsWX1c6pEu27PcVMm9DbniNVdeDL+oikM7kcWKKhER9WBJOoa1Nwb3Vw+lG/tgMC6h4RPVpi4SQFOLC57hRvg6gvlaV7iYf07NikMLjRmjLF38BY3iwYUzqL3KbWvMgVSQwdEIhtnDc5Db6MU9Pj3AVmzxioCbA4+WLVvZtwi/b0Z04cfSBF9DAIVDEn3+EVQ2upDevAa9SmeraQ0q1FIiIqKeIAkD6wh4si5hla9ateV41oPYrR6TE8gxmTJYeQHPJmzOAOqa9SCYEL8eztwNOfDfLyuZARjDlVvbdqV/qQp1YtoiK7M6PZDuwlqjcCmroubxrjJ0liyDp0FfvwDFWsXUHHCv6nWp19SrdClKYKr02onnFYiAWjpWBdrAGfHaAyi5OAqXSzZp0+aMKhSUbRUhnoiIqGdIygqrZ844TKk7iMElXm3yjRBt9EZm154H1mNUbFGBUE1GVVInwtXGpWgarYer1e4mNF0AstIS6SwXGr3oddQFvwxnMjCeC6Ay1aW65+NvW3atG6HusgifpRfEtozQKgLpKhESSzaq466ACI0u5Brd8/LEKBEm9W2rdS2VzKt8Xe554WMbHYA7WpVUHoOvCoXidcy1jBrIwWZTBVcfjmAO8URERN1bkg4JGIFVpcWoV9Oq/hdRf50cFkCdIW+WCl5qCo+/dCE9VT+7PjzeUu9OT79eNePITpMpTYQzU2UzPLa0rdtOhyczcqxo6LjFPnAxEAqccpyoPLtf33Y65haJ0JpahXItjF/d64rgHoVCub6pwGtcSQCWfQjaWFrbc4mIiHqYJA2sJp8cxJTnf4O7vtG5l7WiaPSQWOn3hrre9ZOdjLGgilbNnI1e9m5tLchVoSDUlX8E5X5jvGeC2zaoLv9YY0VllVh2+1vCoTZWVgl8AF+LUUFt475bEbGuKayaL6OlkydhBVByKDy8QQ/X4bG+mp2PIiXlGqQU7VAziIiIuo+kvA5rw95X4Tl8SbUGwFuaB49qdRZehzU2y/VKI645KqiAVhltmTyhKc71RmNvW44vNV3LFC6U3m/uWrdu136N1cj127Jv67ZDjOupyvGrvio1U7C9bi08y5PLLMzHbzu2qNdp3YFHUmbAO345Try3EEPVXCIiou4gaW8c0NUYWMlZarByws1YlL0NwbJpah4REVH3kPxDAoh6uJrldyAlhWGViIi6LwZWoiQ3dPE7CAY/Y1glIqJui4GViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiBzN0Xe6Mm7BOmVGMVaNVDOVrr49K+90FZvlFqbIweaSedb73scV/xaq8bdtv0WqdXnkLVDDtz+NfnvUyNuz6s+z3/ZVMG43q5r29QzGfizLbetqot5+lYiIqGdyaIX1LMpWeDEfAzFFzbE4ViHCam8RUotRLyZv1mkUrziIBrWYupB/jQiULhEUN+GymDZnVKGgbCtq1eL45D3zvagTIVWue/n+fLG9pZjvV4vjbltfFyIIausay7cc0ZYaZFA0ll8uCYfOvFnm+WIS+84VgTb9en25HoZno7KPnG8nlonAmWVsW6xbJ4L1WiNXG8TxF1zIQWGqapul5sNv3j/DKhERUYgjA2vD3v2oH1+M3ZP6qDlmIsweOI1hY24NVVQ9nhEY9ulv4PtEzaAuIkLj0SrkuqeGqpp5Y0XAa6mCzx7eovHvQkmLCHRGRdU1E4UZQPlJGTpb23YATS3mgAlk9TGXQNum9ngVKjOmhgJtxZZdSL9/E1YP19tmtb5dKBeBs9ComGrHHYDveJOaIYlQ66tC4eipCNeLiYiIKBGODKyZk+6LGAIQ1oz6T3vjLvcNql2NBc9X4wQuof6MmkVdRA+NnuFGJNMrj5Vy/jk1K47aZpE8M0aFAqns/te66S+cQW2r274NuSIklmxcgwrZDGzFAn8AhUOupFJ5BOV+oHRseN28WbYhAHZ9+yNbPZRkWK68GE7pFVu8KM8ojjpMgIiIiOJL4pOu9GEDg0sOAjPksACg/uxZtYy6luyen41epXoX/eYMoK7ZXG1shX+NWHc23A058Muu+ZYA6tSieNvWuvU9QIFYt9fGKnhkRdQWEMt9cl19uscX/Zi0iqmputqa7OE5yG3cFR4CoMJyiGiXN+Zgc7xu/padcKvj6lWqQjcRERFpkjSwXsKq57drwwbkGNZVI8+i4Tww+Aaj6kodSZ44ZIS+yOAnq5xL0TRaH4u52t2EpgtAVlqCHeGNXvQ66tLHcxbNRPa5ACpTXRB/jwjxt60dl7GuxyWeaz026zjVYmT5l0YJrbK62sbKrGsmVolgLPenvScibRa6XcjVhiSIgF0hx7fGOfFMrP9m6Lg2we8OiNDN0EpERGRIwsCahsHXAcPGTDcNG9CHCQzur5rUoewnKIXP4nchPVU/sz9c2YwcWxpLdpoMeDnYLIOqPksfJqB1t7eyba2K6UJpnlrXPU+E1hxU+nfFCH5yCIF6aBIxHjVB2Z7l4fdEHD8uBvQgHfgAvhZzZXcpSox2jJPRtIqtekxERERJGVhvgGdob5w4vB9l6iSrhr0fYfd1A+Hp+CttUVzp8GS6REj0hrrH9QCYA4+5e11exkmGN3tgc49CIcxn9purnYls2zpWtuJkFRCqztrIM/ZFwA2PiZXU/kaHA/OV0C5dBTVe1VY9vVyyDKUieGtXKzAF8zC9IltpGsurOb4St6dcg5QJK1GjZhEREfUUzrwO67EKDN52WjXCzNdjtVyH9boR8C0ah0y91Sl4HdbYLNdKlZdrsgczGVjlCVPRlsmTqUzXUrVfzzTutuXYV58IqQbLcut2o10fVguaF6Idk22/Icb1WK3btl871kqOwdWHNRivy77t6OvXYOWEm7HoQDG2B9dhmppLRETUEzgzsCYBBlbqbDuKrsH02uU48d5CDFXziIiIeoIkvkoAUQ+x81GkpDCsEhFRz8XASuR0+esQDH6GIMMqERH1UAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORojg6s8varg0u8WHBMzbDxbfCK5a+i7BM1gxxB3mq0V+lsNa1BhZqfGHmbU2Pd2bjH16Tm61rftlq/bCtq1ZwI8hau8jlbjqgZVvIWrXL5fL+aocTbt7FOeFqMtbY7uRrr27crtWXb0dYnIiLqzhwaWM+ibIUX8zEQU9Qcq2osEEHWlzYCw9QccggRBt1+FzaXbMJlMW3OqEJBvPBoIe+z70Wde5m27uX788X2loYDWmvb1oLoB0h3u9SMaESg9QVQmBHjOWIbBRdyUJiq2oYEXlehR1+mT8sxN7QL+bpmYwHEdtUci1a2nTfLtF3xntSJcGsPw0RERN2ZIwNrw979qB9fjN2T+qg5Vr4NH2HwY8VY5VYzyCFEMDtahVz3VOSpOXlj85HbUgWREVvn34WSFhHqPOl62zVTBEug/KSshLa2bRFEj7rgL5kHj7Y0uootXpRnTEVh1K+WDLNVKBw9FeoIlKt7XbU+L5pGb8Kbnv5qjlkbt+3qjyz1kIiIqKdwZGDNnHQfVo1UjSg8c+5D0Y2qQQ4SQFOLC57hRtwTAXDjTlTK+efUrDhqm0VCyxgVCm6ym7ygUTy4cAa1rW77NqwumolsbVkMsnramIPNs25TM6z0MFuM1RF/CF3d68r2LI+yTUMbt+3/AOWpOfDEKyITERF1MzzpijqA3gXeq9Qr/rqQXdxAXbN1LGpcWtf+bLgbcuC/X1YbA6hTi65823ols9AzLxSILQJbUR4nzOri77vcFx5nah9727p42zaWiUmrALcSzImIiLoZBlZqM/tJQNZwFkDJxqVaF7gcc7na3YSmC0BWmrWTPaZGL3ppXftifVkxPRdAZapLdYNf+bZlt3xJ32jVU0kEwoqdyIoVZjXx920ZZ1pSjCz/0jaE1tZeVzrmFhnbXob0o1cSiImIiJIXAyu1mTWcybGZRrByIT0VyHUvMwVDvcs7/XrVjCM7TfZz52CzqWtfGybQt79oX822m+BrENuRYViFbLffaC/G2o8+gK/FXCFdihKjrZ381NZ934bcDPWwVW3ddjo8mS5UXow2wJWIiKh7YmCldqTClN8bOou91rcrcsxlYCvukcHQfvUA9ygUogoFoctNHUG5CJaFQ2Q3fYLbjspcodQnv7ySQEaxeLwcc2+ZiTdNy2QVs1SESO2sfy08t3Hf2lhZ87jUeNr6uszvicnOR5GScg1SinaoGURERN1HSlBQjxN25swZ9O/fgWc9HavA4G2nVSNsyoxi7WQseX1Wz+FLaq6hNxY81nknY73wghePPFKsWp3r+PFfY/jwm1XLeeTJUloFU0rNh99+MpQMrPLEomjL5ElHpV6Uq5YMjeZu/NjbluM89cqoRdR9qO1cnIrLUces6tuSXfSJ7dt6zFqVuMQ0vECOyfVVqUaY+bW15XXZ3xPdDjySMgPe8ctx4r2FGKrmEhERdQfODKxJgIGVnKUGKyfcjEXZ2xAsm6bmERERdQ8cEkCU5GqW34GUFIZVIiLqvhhYiZLc0MXvIBj8jGGViIi6LQZWIiIiInI0BlYiIiIicjQGViIiIiJyNAZWIiIiInI0BlYiIiIicjQGViIiIiJyNAZWIiIiInI0R9/pyrgFq3FL1rCzKFuxHas+Vc2scaifM0I1OgfvdBWb5Taj9tuUtsp6m9Nc9zK86THuyW+7TWlGcfRbq0a79WuM26NatmF5jgul9y/H3Ij7+RvHYF5uv31q5Lqx35PIW6/qYu2fiIio53FohVUGUi/mYyCmqDlmDXv3Y9/Q6agvLRbTOEypO4gpe8+qpdSlROhz+10ikG3CZTFtzqhCQdlW1KrF8cnw5kWdCKly3cv354vtLcV8v7601ueFL1MtKylGYaMX9/ia9IUhYhsVO5GVkaPainueWs+YlqE0VQTiPioRypDrC4iQqC/3u4GSjWtQoS8NkcdQ0jcHhaqtS8fcItO2PS7rurb3xO8OmN4T27ra+vLYXUhnWCUiItI4MrDKQFo/vhi7J/VRc6wyJ90nlt2gWiPgyQJONDerNnUdERaPViHXPTVUUc0bm4/cliqILNg6/y6UtIgwaFRUXTNRmAGUnzyiNbM9y03V1tuQK5ZVXrRuWAuUyEfhEDUjlsAH8Jn2VXFoJyozpoYqmtmeqSKUVqFShWWNCLULRLt07Cg1I4brXchVD6O9J9q247wnFSetzyciIurpHBlYZSC1DgGg5BBAU4sLnuFGqDyC+bJrXs4/p2bFUdssElzGqFBQk93oBY3iwYUziVVojUCZp4YBxCEDKkKhsAlNFyBCrjG8QK/0ymEJdc1GBVev3MJd3Go3fe1xEXRNr0PKSjPeE8mF9NQY74l4DeWNptBORERE3eCkq2MVKK7rjQWezh3DSvHIwDcbvUq9gEcOCzAHvwTIsaSls+FuyIH/flmhDaBOLQoRzylodKF0bHgMqx5CWw+U8UJhxRZ53Eu1oQd+tytcwZXVX+RjVcwgKcfeynXFcWtVWOO40uHJdKH8aHhYhFYFjhizqrMGaSIiIpKSO7B+chBTtp3GlBn3oajjzwEjRQ914ck6jjSAko1L0TRaH4+52q1XL60Vxjgaveh11AW/HMspT5g6F0BlqgtZarFGG29aJQKn6aQkGWAvxAuUYXr3v7UCKpX7ZqO8jz5GVg49qBNhVR/jKsKoHN8at3J7G1YbY1BLpqJp4+zQ2NtsTzFKsRNu9X4twFSUiteUfr2+PEQL0uYKNREREUnJG1hlWH2+GhgzncMHOlneLCOY6VN4XKns6tbP7F/tVrPUMIGIcBZFdpoMhznYbJzZL2jDBPr2DwdFGVY3yiqkeR8ihJ6sAlrCoVA721+1jeCoUaHQXJmVVdD0vuJ/GcWWKxKEgrb/A5RrQVxtW7uKgWpv0cfXWunja8NVZeuJVW96xNble2KrBNvH0RIREZEuOQOrKayGT76irqd3f1f6vViretJrfbtQnpoDjzmEydApg5/96gHuUdqJTgWhEHgE5f5AeGypKayGg6XOHqK1M+3lZa3EY0uwjREK84aI5zd6w+FWnQCWK9eNuMJAsThOedkp8TjGZbViV0r1y3bBY7vUV9QgbXJ8JW5PuQYpE1aiRs0iIiLqKZx5HdZjFRi87bRqhBnXY/Vt8KI4YlBjbyx4rPOGBvA6rLFZrjlqvhaqQQVPy3VSQ6zXYS30hAOnHIqgnYRlEeN6pXIcrBxaYN6+nKdduirG9U215cZ1WONdP1Ye4y6kh7ZjPeaIY7JsN9rxyjG/S1HSN8Z1ZTU1WDnhZiw6UIztwXWYpuYSERH1BI6+cYCTMbBSZ9tRdA2m1y7HifcWYqiaR0RE1BMk/1UCiLq7nY8iJYVhlYiIei4GViKny1+HYPAzBBlWiYioh2JgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHc/Sdrhr2vgrP4UuhW7KG2G/dmjUO9XNGqEbn4J2uYrPcmjXuLU6jsd7mNNe9DG96rPfk12/RGu0Wp/ZbpNr3HW/b9nWFqLeOVbdRbYmyf+OWsxmRt1i1vie2fRvr6S1dlG0QERH1VA6tsJ5F2Qov5mMgpqg5FiPzUF9arKbpWHD+IKbsPasWUpfyrxHBzCWC4iZcFtPmjCoUlG1FrVocnwyDXtSJMCfXvXx/vtjeUsz3q8VaqJyNyj75yFVzwvR14dH3G9r3liOW5bG3LcmAG17/ckRYlcHTi5K+OShUbYMMpL0qxO4z1Awz+Z405MBvbFfbtxdrw/lVD8fmfTOsEhERhTgysDbs3Y/68cXYPamPmhPPDcjspx5SFxOh8GgVct1TQ1XNvLEiXLZUwWcOZ7H4d6GkRYRBo/LomolCEQDLT+qhs2LLLqTfvwmrh2tNmwCaWlxIv141haw+pvJnK9tOSGArFoiAWzp2lJqhyPkXp2oB11oL1tU2ixfft384/Lr6I0s9JCIiotY5MrBmTrrPOgQgrmr46nrjLvcNqk1dRw+NnuGmbnatq1vMP6dmxaEFu4xRobArq5YFjeLBhTNahTZvln0IgNltyM0IoGTjGlTIphYuAygcolcqW9t260QYr9gJuIsjj0GE3zfjVESzh+cgt9GLe3xNWrtiixflqTnwxHwtREREZJa0J13J8a2DS7xiOojdWbegqOOH1FLCZPf7bPQq1bvoN2cAdc16WEuIf41Yd7bejX6/rNAGUKcWxZM3axMue4ACsW6vjVXwyGqsWy00xN12lb6uNi22dtnLCi3ysco2njYhMtCWLIOnYam27QIURw43aNkJd2jfKnQTERGRJmkDq6zCGuNYfWkfYfCKg2hQy6hjyZOe9GClT0blUCernEvRNFofi7na3YSmC0BWWoJBr9GLXkdd+nhOGerOBVCZ6kqoC107LmNdj0sch+3Y4m77NqyW841JW98IrUcw3xdAaV7kmNaEyJOqSo33ZBlKL4jjMIdSLdCG9+13B0RwZmglIiIyJG1gNct0D8SwTy+iXrWpY2mVTFPACp9p70J6qn4GfLiyGTm2NJbsNNlHnoPNpupjxPjPWEQoLJdXDjBCpXueCJ05qPTv0oJfm7ftHhU+scr/Acq1IG6EdHk1AdUOndQVW8Uh/coB+nuSjrlFIrSmVqHcEvTDtCEE6jERERF1k8Dq81XjRNYgeFSbuko6PJkuERLDZ8DX+nZFjtfUKo4i7NmvHqCFRPOZ/UdQbhqH2jrrWNmKk1WAUUFt47Yt40xl+DUF9MslxWJb8rJW4nGiZ/Obx8oGPoCvJVbVWR8rW2kab6s5vhK3p1yDlAkrUaNmERER9RTOvA6r/TqrinE9VuP6rCG8DqujWK45Gu1apjKwypOxol7n1Ho91EJPeByq/VqmOtP1UOX4VJ8IqYaI7cfedsS6ca+DKrcjr1hgnARm3W5IaBtyTK+8dqs+W4r3uqJdexYipq6ccDMWHSjG9uA6TFNziYiIegJH3zjAyRhYqbPtKLoG02uX48R7CzFUzSMiIuoJusWQAKJubeejSElhWCUiop6LgZXI6fLXIRj8DEGGVSIi6qEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEYWImIiIjI0Rx9pyvjFqzGLVkjnUXZiu1Y9WlvLHjsPhR14s23evSdriy3MTXdGjUhttuU2m+BGnfbraxru0Wq9Ran9tujxj7uii2zUdDYhlu32pcZ1HOM7dlZtk9EREQxObTCKoOoF/MxEFPUnGga9u7Hqn4D4j6H2llgK+7xBUTY24TLJZvgF4GrZOMaVKjFranYIkJjXxHkxLqXS4pR2OgV22tSS0Xg9AGbtWWR2671eeHLXBZjXRlIvagTIdVYnuVfivl+tRjpmFukb1ebPK7oxy3CZ8GFHBSmqrbBPS+8bskylF4w7duyTC0X6+f20dNw3izzMjHdn49cEZjTr9cWExERUSscGVhlEK0fX4zdk/qoOVF8chDzDwMLPIPUDOoMFYd2ojJjaqgyme2ZikJUoTIUDOMQYbe80YXSsUZV9DYUul2obPgAtaq9umQe8rTHYtvDc0SwC6ApoNqe5aaK6W3IzQAqL6qF/l0oaRFB07Rcbrv85BHVtrneJbZtJwNzFQpHTxXxNp50pPdVD6MJfACf5Visao+L98v0HhIREVF8jgysmZPuizEEwHAWZa9XA2MmduowAGpC0wWgcIgROPWqpuyCr2s2Kp1xnAugMjUHHiOo+dfA7ReBsyWAOjXrqqS6kKUeStlpYkcXzqgwbKWHxlGhcCxVbBGvJaM4gW76I6gUwdszPHoglaEe7qmWbYcdQbkI9+HQTkRERK1JzpOujr2PVRiB1ZNuUDOos8lxmb1Kl2pd9H5ZJTUqnYmQwwpKxfpa938xCk1V1DARhius1VwL2XVvrta6R6GwZacWBnV6tdRKjnGVxz1bBGVbaNSqvznYbBkTa1XrW6yt20uOk411XGo7Maurvl2x1yUiIqKokjCwVmPBtktY8I1xyFRzqHOV+2ajvI8+VlR20deJsGqM12yVCJXujQEUauM55yEvcAZ1cjynbXVtrCvy4Y8WILVxtFUiFJpPmroNqz052rHpofID5Io2+vZHtnqGPuTAGEs6FU0bZ6sxrno4zvKEhyNEI4ckGONQ/X12oVfZ1ojqrT5kwlq5DZPVVfHaQxVqIiIiSkTyBdZjp7Abl7DqeS8Gl8jpYLi9oVo9iTqGGruZUWw5+14OE8hKiz/qU6ONG5Vn55uCoTZMwNqVr59Vn4PNRTNNYVORYXWj7HJfFtl1bzn5aR6ymuMFaX0MrDaUQRtzqgdxPezqVxPQ2lFCqaSNr7UPZdCqq+YxulZadTU1H4W8MgAREVGbJF9gHZmH+tJi0zQOUyAvayUezxmhnkQdJW9IDtDoDZ99r052yrWEMDm2VQa/xVhr7up3jYInNYCSCiMEiucdrUJu5qhQMA2FVdPJVyGmsBoOzNHJ7nt3Qw5WxXqeCpfaOFTXTLwZCrpy0s/yl5eduhwtNAvRKqn2E9KsVHV1dPTtATVYOeEapKTcgZXH1SwiIiLSODOwHqswVU+B3dv0auqCY/pi6kKyimnuetfGocbvSg+Tl5YSYRA74VaVTDkGNhQ+tRApH1ShQFuupi36mf5aIBT/r/QvDS8LhWIjJOuT++JUW9gMj1/Vpo1V8LTh+rHh8av6VADbNWDtY2pttBO64lZXh+KeuyeI/7+Hxo/1OURERKRz9I0DnKxH3ziAOsbOR5EyrQYrqt/BwuFqHhERESXpVQKIupUdeCTlGoZVIiKiGBhYibrcNLwQ/AzBIMMqERFRNAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORojr7TVcPeV+E5fAlTZhRj1Ug1U/rkIKY8X40TqqnJGof6OSNUo+P16Dtd+degl69KNVwobcMtTvVbqC5FSYtqZthucardQtWLctXKdZtu3SpUbJmNAu32rTp5v//V9tudBrbino07UZmaD3/E7Vljb1uSt2B1y3v+R2y3teMWjP1qjejvi3H8UY+biIiIonJohfUsylZ4MR8DMUXNiXDdCPhKi1FvTJ0YVns0Gcp8ARHGNuFyySb4Regq2bgGFWpxayq2iNDXV4Q9se7lkmIUNnrF9prUUhkKvagTQVJbfn++CMdLMd+vL5VhUruHv7aumDw5KPfZ9y22UbETWRk5qm2Iv219+WwsQA4K1RyzWp8Xvky1bsRxCzLEb6yCR70vl0uihHjxnIILYvupqk1EREQJcWRgbdi7H/Xji7F7Uh81h5yi4tBOVGZMDYWxbM9UEfCqUBkKfnGIsFve6ELpWKMyeRsK3S5UNnyAWtn070JJiwh0RtXTNROFGUD5ySNas+5iALl9TCnwehdy1UODDJYlyEfhEDXD0Mq25XpNozfhTU9/rW2X7VluqsbehlyxbqU4Hp0Iu0dliI9XaT6C+b4qFI6eCmtNl4iIiFrjyMCaOek+6xAAcogmNF2ACING4NSrlrKLva7ZVG2M5VwAlak58Bihzr9G635HSwB1olnbLB5njEKevlSvqMru/wtntECbNyRHBGOjKqpXUitNz5eBeIFYVppnHgaga23bMpBecRd94AP4WsSLOr4YvUpn61PZVj2EKxVbxPuUUcxhAERERFcgeU+6+rQanhIvBmtTBXxqNnUOORazV+lSrZvcL6ukoWpjAuSwAhnqxIe2WXavI4Am8+qye10sdzfkwH9/PnJVoIV7ntYdD1943+ZxpLL6C3dx/PG0sbbdFrJr31wplkEcVeI7aAxXWIZS7IR7i1691SvLOdhsH/NKRERECUnOwHrjOOw2jV/1jbmEYobWTlMuAmN5H308p+wmj+iqj6dFBLmNARRqwW4e8gJnRGB0Id1YvdGLXkdd8Mvl8oQprSrrQpZcpoXNXUjXxokWI8u/NFzJlCHyQj5W2U6isoi37URpY3irUOixdf+nmvedjrmjc1T1Vq8EZ3nEa1VLiYiIqG2St8JqkukeiGHqMXWkdKT3Ff/LKDaN59SHCWSlJTAyUxtzKs+eN4U3U2jMTpMJMAebTWf2a135ffuLthwnWoXcUAX1NqyW1VkRgMv9QMXJKj0MG13y8ioGqi2HEMTfdoJkWN0oq7jLrF378nXFqtRqwwX0kK8PF9CvNKC1bcMGiIiIKLpuEFjPouz1apzIGgSPmkMdR44jlZXK0Nn16mSmXMvYTDm2VYazxVhr7up3jYInNYCSCiOoqRCaOUoPje5R2glcBUZXOo6IMBowjZk1n+gk+D9AuazOXi+Oa5asupomjzhOeVkr8VgLlwlsOy5TWLVfCkt/XVUihJqudmC8LtdMvGk+LjlcIFW/rJVW5VVrADVYOeEapKTcgZXH1SwiIiLSOPM6rMcqMHjbadUIM67Halyf1TBszHTsnnSDanUOXofVuA5rDjbLrn3V0snAKiuJ0a5FaizTW5HXQrVeK9V6vVLrsljXOtXIY5Td/5ZQGGfbltcUZjzHfv1XnXn/rV/jVae/fnlFAkuVVqhZfgeGLXkPxTs+wwv5aiYRERE5+8YBTtajAyt1jJ2PImVaDVZUv4OFw9U8IiIi6h5jWImS2w48knINwyoREVEMDKxEXW4aXgh+hmCQYZWIiCgaBlYiIiIicjQGViIiIiJyNAZWIiIiInI0BlYiIiIicjQGViIiIiJyNAZWIiIiInI0BlYiIiIicjRH3+nKuAWrcUtWO98GL4rrVCNrHOrnjFCNjsdbsxq3MY1ze9SorLdmRUYxLs+y3c9f3be/MtoywXKbVNNzan2L4fYHtMeG2LdfjX3cxvbj3rrVfmzGMasmUvNtt4XVRd02ERERxeXQCutZlK3wYj4GYoqaY6UvL4YIqaXF+tSJYbVHk8HMFxBhbxMul2yCX4Suko1rUKEWt6ZiiwirfUXYE+teLilGYaNXbK9JLdVDZy+xMU+GmmEhw64IfOKT19cXky3Qynv4h5aJKRwKj2C+D9is5sc8bhFMCy7koDBVtQ3ueabtLkPpBfNxi21vrIJHvSfacuzEAtPr0sTaNhEREcXlyMDasHc/6scXY/ekPmqOzbH3sapf51ZUSVdxSFY+p4Yqk9meqShEFSr9ejsuEXbLG10oHWuEzNtQ6HahsuED1MqmWL7g4lRcLpqJdG25jX+XHnajVF1bdxtWl8xDnmplD89BLgJoshRkZaitQuHoqdH3H5KO9L7qoRQ4gzq4kB6q1tqWaxLdNhEREdk5MrBmTrov6hAAg6/6NIalXcSCEi8Gq2nBMbWQOlATmi4AhUOMwCgrnl6Ui0d1zbZqYjTnAqhMzYHHCHb+NXoXfktABD7BNRNvxgmjFSerkNvnDOaXzkYvNc1PJCgnqGKLeC0ZxQl01R9BpQjenuEqerpGwZNahYKyrXrwlpVU83Ih8W0TERGRXRKedHUWDeeBE4cvwmMMB5gxALu3vYqyT9RTqMPJsZi9SpfCl7kMflklvWgdOxqXHFYgA6fWRV+MwohKZzR6WK4UATfX6Jr35KDctxhrTetW+peGwmyvLUfUXDsRtCuslWK9+puDzXECszZcQdu2DJ+mdZGOuUWb4M+sgjv0ukzjYxPYNhEREcWWtFcJmDIjDx71GCNvxYLrLqH+jGpThyr3zUZ5H32s6JuedNSJsJrbJ5Te4mvZCffGAAq10DkPeRHd6fEVesLd+nBPRWmqCLvn9Ga2Z7kaQ6rGkV7wRg2t2jha5MMfCpB6gM0ybzsK8/b9fXahl1FR1SrNs+GWwxnkMncABSK46tXfxLZNREREsSVhYL0Bmf2A+rNnVZs6jxqbmVGsBVWdXvnMSktgZOb1LuRqZ+ebwps2TMCFLNWMTd93QkMPNOnwZEamYP0s/RxsNp/BH/gAvhY9iOsVVP0qBlo7FEqttDGwxlAGOba2JVxBlcFWVp3Lj4p1r2DbREREZJWUFVbPiAE4cfh9+FRbOwnr0wHwxBn3Su0jb0gO0OgNjx1VYS3XMjZTrzj2KrV21+tjPQMoqTBVJo9WITdzVMTln6KR+64U+wud2R9134o8gcsfMI23NYVV08lXGjl2NlSZVdXZVP3SU/IEsGjHpp98Nsq0HfOwhib4GkSjb39kJ7ztGqyccA1SUu7AyuNqFhEREWmceR3WYxUYvO20aoSZr8dqXKNVNwDeUtMQgU7A67Aa1ySNEgC1wCoridGudWos01vyMlThau0RzJfjQ1UrJOa1Vs37tm434jqrctys+TqpBvv1VDX6tppGhy+LFXGNV9t6rS0Pi9y2oWb5HRi25D0U7/gML+SrmUREROTsGwc4WY8OrNQxdj6KlGk1WFH9DhYOV/OIiIgoeU+6Iuo+duCRlGsYVomIiGJgYCXqctPwQvAzBIMMq0RERNEwsBIRERGRozGwEhEREZGjMbASERERkaMxsBIRERGRozGwEhEREZGjMbASERERkaMxsBIRERGRozk6sMrbrw4u8WLBMTVDcxZlK7zafOv0Kso+UU+hLiVvU9qrdLaa1oTv/Z8QeXtWY93ZuMfXpOaHVWyRyxZjrelOqBbyNqxy/bKtqFWzdLG3rW8zcprvV09Q4u7b2K82RX+OsR/7dnXytq1xtk9ERNRDOTSw6qF0PgZiipoTdgOKFhWjvtQ0zRgg5vdGZs++W6wz+NfA7Xdhc8kmXBbT5owqFEQEx1hkYPOizr1MW/fy/flie0tN4U4PnJV98pGr5kQS26jYiayMHNU2xN923iz9eEOTWJ4LF9Kv15e3um/xunttrILnfmMbyzHXpZYZxHMKLuSgMFW1bWp9XpT0FctVm4iIiHSODKwNe/ejfnwxdk/qo+bE56s+jWFjboVHtamriFB4tAq57qnIU3PyxoqA11IFXyIVQ/8ulLSIwOZJ19uumSjMAMpPHtGaFVt2IV0EwtVx7galhT7ko3CImmFoZdt2tcerUJkxNRQ64+9bvu4ASu+PElJDROD1VaFw9FSoI7AKbMUCEZ5Lx45SM4iIiMjgyMCaOek+rBqpGq355CC8dQNQPOkGNYO6TgBNLS54hhuRTIS0jTtRKeefU7PiqG0WqTZjVCjsyqEFBY3iwYUzWoU2b1a8QCgYoS9vJrLVLENr27Y6gnItPN6m2q3sO/ABfOJ147hpKIStqlyxxYvyjGKsdqsZFnpVGO7i+K+PiIioh0r6k658vmqA1VWHkd3vMrh5AY8cFgDUNUeORY1Jdq+L0OduyIFfds23BFCnFsVTcSiB0JfAtmt9u0S4DFdXW3UuIEJ5FXwoVsMBlqEUO+Heoqq3IkiXN+Zg86xwALaQ1V/kY5VR/SUiIiKL5A6sWnW1N+5ys7ramewnKFlPjAqgZONSNI3Wx3Kudjeh6QKQlZZgGGv0otdRF/wy+BXNRLYMg6kuZKnFMWnjQ1sJfQltW1ZXAygcEiNcxpJq3nc65o7OUdVbvXqa5ZkXqu5ayaECgahVYSIiItIldWCV1dUTWbegiCdbdSr7CUpvhoKaC+mpQK57manrWx8mED55KbbsNFnSzMFmGSb1WXpXft/+rYa5ipNVQMtOuI0g7Qu35YlViW5bq66K8FkYtes+hutdsavA2nABoNxnBPylKDHactiA/wOUayHfWO4Nt40KLRERUQ+XvIFVVVcXeEaoGdT10uHJdKHS7w1dlkkPgDnwmLvXY112yj0KhahCQSioJV7tjDjL35OjVT1lNVULzwltW80b3cZqp2sUPKlVIoQalWZ18lnmKGS7ZuJN83HJ4QIi1Bd6xGMZnt3zTMvkVCyO04VSebUB8xCC4ytxe8o1SJmwEjVqFhERUU/hzMB6rEJdW/Ugdovm7m36tVbD12M9i7LXWV11omzPcvhFQDQqhtpYUVNVM77bsFoGNtl1r6qNcgysUa0NXd9Vncil7yPRa5bG37aknRgVo7oaf9/pmFtUjCz/UrXtpfBlLjNVntvB8Htwz3jx/wNNDKxERNTjpAQF9ThhZ86cQf/+PTspvvCCF488Uqxanev48V9j+PCbVYt6ih1F12B67XKceG8hhqp5REREPUHSXyWAqNvb+ShSUhhWiYio52JgJXK6/HUIBj9DkGGViIh6KAZWIiIiInI0BlYiIiIicjQGViIiIiJyNAZWIiIiInI0BlYiIiIicjQGViIiIiJyNAZWIiIiInI0R9/pqmHvq/AcvoQpM4qxaqSaqfg2eFFcpxpCtOd0JN7pKjZ5G1O337hfag42l8xDnmq17gjml3pRrlq57mi3OG3C2rKlKGmR99xfjrkuNTuwFfdot07Vyfv1m2+9Gn/b1mWa1HzLbWXjva6KLbNR0KgamvCxRS7TRR5fjNdFRETUwzm0wnoWZSu8mI+BmKLmmMkgW4xxqC8t1qcZA7B7WwV8ajl1If8aEepcIsxtwmUxbc6oQkHZVtSqxfHJwOZFnQiSct3L9+eL7S3FfL9arNT6vCjpm4NC1daJdSuq4Llf3+9lTw7KRcDU7/UvJbJtGULV+nIyhdVEXpcMoKF1S8KBM2+Web6YxL5zRaBNv15fboj+uoiIiMiRgbVh737Ujy/G7kl91Byr+uZLGJaWplpC/z4Yph5SVxKh8GgVct1TQ5XHvLEinLVUwRcKjnH4d6GkRQQ2o+rpmonCDKD85BG9LQW2YoEImaVjR6kZhnTMLTJVJd2jRPALoOmcaiey7Ziu8nXZ1B6vQmXGVGsFNebrIiIiIkcG1sxJ98Xt3veMGIATh7djwTHZOouy16txImsQPNpS6joiILa44Blu6mbXuuhNwTGO2maR/jJGhUKh7ILXutIvnFGVTFlF3SnCaHGbu8tb33Y8V/e6rI6gXAumt6m2dOWvi4iIqCdIzpOuRuahvnQcsM2LwSXbsW/odNTPGaEWUteT3e+z0avUK/66kN3nQF1zk1qWAP8ase5suBty4Jfd5y0BaMOVZZUU+VgVMaY1UsUWL8pT81FoGSMqxNq2pgoFYplc3qvUPJzAEP91lfuMdWfjHl/011vr24Vye3W1Da+LiIioJ0rOwHqsQgTVjzD4MTmGdRwGH96OwSsOokEtpo4lTyIygllkOAugZONSNI3Wx2uudjeh6QKQlZZgGGv0otdRF/xyrKccQ3ougMpUF7JkVdMXQGmeaVxpDHr1NAebzWNQpZjblm7DamOMqZw8LvE6zKE1/uuyjlMtRpZ/aZTQKqurARQOMVdXE39dREREPVUSBtazKDtwGsPGTESRdqGCEVglQuuUT6vh1YYIUEezn0QUPtPehfRU/ez78Nnvene6/QSjaLLTZNnRGjS1rvy+/ZHt/wDlWmg0grI8o1+1t4THoepn8gOl91uvTBB326ptoY2BNbT1dd2G3Az10ESrrtqrvgm+LiIiop4sOSuswonmZvVIOHYKu9Ebg/urNnWRdHgyXaj0e0OVST2k5cBj7gKXl5+S4cx+9QAtJFahIBTUTBVJ9zxLSJZVzELt0lHi8Sy9YhkOq1EuCRVv21HoQwqM407wdRn8a1DQaB7zKqn9jbZVUhN4XZrjK3F7yjVImbASNWoWERFRT+HM67DKLv9tp1UjLHyt1WosKDkoQqqhNxY8dp+quHYOXoc1Nj04qmRnu5apxrhearRlsovcdD3UyGuVGuTzdiE9FE6t64VY9hFn23Jsq69KNYSMYmtgFGK/Lvu+rddolbRrsV6I9nrt7K/LUIOVE27GogPF2B5ch2lqLhERUU/gzMCaBBhYqbPtKLoG02uX48R7CzFUzSMiIuoJknZIAFGPsfNRpKQwrBIRUc/FwErkdPnrEAx+hiDDKhER9VAMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaI6+01XD3lfhOXzJdEvWMN8GL4rrVOO6EfAtGodM1ewMvNNVbJZbmEa5TWl81tuc5rqX4U1P+J788bZtXaYL3X7VfutVg7oFq3br1EY1z8R+a1j9efJe/9Zbp0aub32OZbn9lrSt3BbWvu3Yt6slIiLqpmRgbatAICD++1kHTmeC3uXrgv9nT2Vw/j+uC87/pXV5/Z6fBDOXVwbrVXvfK+uCma/8yvKcjp7WrVsXdX5nTNXV1VHnO2I6tir4pWWrgrtUe9fmWcEveX8arDE/J+Z0KrjGOyt4975Tevv0T4N3L5sVnHdMLW9l2zX7FobXbXWy7cs+afteGFxz2ph3KDhPHss++3x9kscSOk77JI978yHV1vcbbtunth4XJ06cOHHi1P0nRw4JaNi7H/Xji7F7Uh81x6waXll1HR+uqHo8IzCs7hR8qk1dpQlrj1Yh1z01VPXMG5uP3JYq+KyFz+j8u1DSkoNCo6LqmonCDKD85BHRuMpt2wU+gM+8L5va41WozJhqqpDuQvr9m7B6uN5uE/c8U8U0HZ5MsdELZ1Cr5lilI72vehiNqz+y1EMiIqKewpGBNXPSfRFDAKx6Y3B/9VC6sQ8G4xIaPlFt6iIBNLW44BluhMAjmL9xJyrl/HNqVhy1zSJ5ZoyydPFrXeFauLu6bdtVHNopgmQ4/FodQbkfKB0b7pbPm2UdAtBxjqCy0fw6bfwfoDw1B55OORYiIiJnSMKTrkbAk3UJq3zVqi3Hsx7EbvWYnKAJa8tmo1epF/BswuYMoK65SS1LgBzTWTob7oYc+O+XVdQAjOHKrW270r9UW1ebtsjKbBSBrShvjFNd9e1Cuam6mqhyn9qvmO7xxXi9Yt8L/AEUjjaNYRVkONfX9UbZt/GaxeSriliXiIiou0vKqwR45ozDlLqDGFzi1SbfCNFGb2R2/HlgJMiTgIxgFhnOAijZuBRNozfhcok8OagJTReArLQYFUO7Ri96HXXBL9a9LE9MOhdAZapLdYPH33a2Z7k2X5+WofSC2FaU0Cqrq5WmSq6VrK6KQDkkXF1NRN4sY79yKkaWCM6RoVWvCsO9LOKkKfOx+/vsQq+yraYhA+mYW2RsexnSj8YJxERERN1Qkl7WagRWlRajXk2r+l9E/XVyWAB1Bms422Q6i9+F9FT9zP5wINO78tOvV804stNkWTEHm01n0GvDBPr2F+22bluNFbXTqqsuS3e/mVZdTc1H4VWdhX8bcjPUwxB19YOMYstVD6LJHp5jqyqb6a+r8uKVDNwlIiJKTkkaWE0+OYgpz/8Gd32jcy9rRdGoMOX3Yq3KU3oAtI25FKHxHlmdtVQRBfcoFKIKBaGqqLnameC2DUbXu61SqldXY3X3q/1dbZe7f4126SvLeFsVVs2Xq4rliirAOx9FSso1SCnaoWYQERF1HylBQT1OWIdfh/VYBQZvO60aYcb1WI3rs+oGwFuaB49qdRZehzU2y/VQ7dcclWRglSdMRVtmhDvVsl9zNPa25TjPpShp0RpC5LVS9eudBiLnK9r1Ti9EOybbfkOMfViPOZHrw0rGa4tYbgm29tcV6zqsO/BIygx4xy/HifcWYqiaS0RE1B04M7AmAQZWcpYarJxwMxZlb0OwbJqaR0RE1D0k/5AAoh6uZvkdSElhWCUiou6LgZUoyQ1d/A6Cwc8YVomIqNtiYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR3Psna58G7worlON60bAt2gcMlVT6urbs/boO11ptzitUo0ot0CNy3ar0Yj761tvc5rrXoY3PcY9+a9mXcG4JaxqRrttbLzbymq3bm1UjYh14+3bfutWIcq+LduPeG1EREQ9mAysbRUIBMR/P+u46Zc7g5mv/Eq1zwS9y9eZ2mr5P+4M7lPtfa+I5csrg/XG8k6Y1q1bF3V+Z0zV1dVR53fKdPqnwbuXLQyuOa23a/YtDH5p2argLvvzYky7Ns8KfmnzIdU+FJy3bFbw7n2nVPtUcI3X1Nb2NSs475i+rtxX+LmtrKuWG+vq7fBxRz5fHZv3p8Ea1TZP2us0LbO+jvjHre873nukrx/eHidOnDhx4sTJPDlzSMDIPNTPGaEaN8AztDdw/iIatPZZlB04jWFjbg1VVD2eERj26W/g+0TNoA5TcWgnKjOmhiqq2Z6pKEQVKv16O67AVpQ3ulA61qgc3oZCtwuVDR+gVjb9u1DSkoNCozLpmonCDKD85BGtme1Zbqpa3oZcsazyoqqG2tdV2zbWReAM6uBCeqgSnI70vuqhJI/tQmTVU3cE5f4ACkeHl+WNzUdu4weokI1WjrtVcv2+rKgSERHFkoRjWJtR/2lv3OW+QbWrseD5apzAJdSfUbOogzSh6QJQOMQIVrKLXu/qrmtu0mfFcy6AytQceIzQ6F+jd7+3BESYBGqbxeOMUcjTl2rd81oX+YUzeqBtTaoLWeqhlJ0mdmSs6xoFT2oVCsq2qnC8RmzbBc9wPWTWHhehWwRYX9ls9CrVp3t85tckwu716qHk6i/2FUCTOOSrPe6Kk1XI7XMG89V+5TQ/kT8AiIiIegjnB9ZPDmL+4UuYMt46hlWrtK7wYnDJQWBGMbwiqdSfPauWUUeT4y17lS6FL3MZ/LJKalQ6EyHHkspg5gM2lxSjUAW/EDlGVix3N+TAf38+clWgtVCBM1StdY9CYctOlIeC3hHMD42zldIxt2gT/JlVcIf2HR57WyePv1E8P28TLpeISewX/qUqOMpqbgAlh8IV04ottjGpUtzjFmFZ7lebFmNt6PXqfwRUiuCeK/crJ08OykXoDT+HiIioZ3N4YNWrpxgzHatGqlmaS1j1/HbUjy9GfWmxWHYWDeeBwTcYVVfqSOW+2Sjvs0wLV7KLXoa93D6hvvb4RKh0bwygUAtn85Bn76pv9KLXURf8crnsnteqstbKqRZ4RRgt9JhP9roNq7WgZ4TCD5Ar2ujbX3Xjy2qwCJMXp2rH7XcHtABprmTmuovD21Pd+kblOG+WCNby2FTorBwig3aixy2OTXu9avK4ULLRGkgLPeK9UI/hnorSVBHiz6k2ERFRD+fgwCrCaslB7M4ah92TzEE0DYOvA4ZZQqw+TGBwf9WkDqLGfWYUW87clxXCrDSjHcf1LuRqVxUwhTNTsNO68JGDzaZxpFp3eyh0CupMf7iXYbVbzTO454VDoQjDWWLdUJBW40w3q3GicjysrAyXH9WHCGSJ58WvEltD5+rrRdBuy3GbyWqwemi8pwkNqSAiIuqhHBpYw2E1fPKVQT8J68Th/ShTJ1k17P0Iu68bCE/HXmmLhLwhOVo1MVSZVEEw1xIe9Wqmtetb0MaRBlBSocaRyucdrUJu5ig92GlBrgoFW4yud3WykzFm1hRWLZerikK7PFVDDlZZnmceetAEX0M4VGYPF6+hcVf4eOVJWKYxrhbacVTBk6cCamvHbaMNJzCN5ZXvaaV4H7UTuKRo7+nxlbg95RqkTFiJGjWLiIiop3DkdVit11gNmzJDdv/rjy3PiXKd1o7G67Aa40NzsFl27auWzrhearRrtBrL9FbEtVJt1ywt9GwKVVIt1ykNMfZh3W6065harrEq2Z9jeV3WY7euG+01xz5u63aFVo8t2vZrsHLCzVh0oBjbg+swTc0lIiLqCRwZWJNBjw6s1CV2FF2D6bXLceK9hRiq5hEREfUESXhZK6IeZuejSElhWCUiop6LgZXI6fLXIRj8DEGGVSIi6qEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEYWImIiIjI0Rx7pyvfBi+K61Qjxq1X9ef0xoLH7kNRJ994i7dmjX4L09a1fgtV/V79O1EZbZmg36I1cr8Rt14VLLdINRjHb96+sU+9BaTmw180E9mqKbV++1Qhyraj31I2xrERERFRBGdWWI9VoBjjUF9aLKbpWIBqeDZUq4VSNRaUeOFLG4Fhag51EhnsfAERFjfhcskm+EXgKtm4BhVqcWsqtoiw2leEObHu5ZJiFDZ6xfaa1FI9FPYSG/NkqBkW8n79s1HZJx+5ao5drnuZ2rY+RQZCsQ1x/IUZtoTtmok3Tett7rsT7i1H1EJBBFG33yVCqlqeUYWCsq2oVYt10bedNyu8XW26Xx6/C+nXqycQERFRXM4MrCPzUD9nhGrcAM/Q3sD5i2hQc3wbPsLgx4qxitWpTldxSFY+p4Yqm9meqShEFSr9ejsuEXbLZWV0rFE1vQ2FbhcqGz7Qg59YvuDiVFwumol0bblVxZZdSBdBefVwNeMKVGzxolwcf2EfNSOGrD7m0NmEtUerRBieGqqo5o0VobOlCiKfhiS67drj4v0yvYdEREQUX1KOYfXM6fwhACQ1oekCUDjECJyye1+ENPGorjlcJY3pXACVqTnwGEFNq1qKxNcSgDb6Q1Y5owwBMOTNasvQgyjE/goac7A5zj50TfA1BEyvM4CmFhc8w40YfQTzteEDYv45NSvhbR9BuQj34dBORERErXF+YP3kIOYfvoQp4yPHsFLXkeMye5UuhS9zGfyySnrROnY0LjmsoFSs7wM2y2EBMvi1YfV4Kv1LxXHJYxOTuUtfVUkLPVHGnRrk+FNt3aUoQT4KIyr4MqDL5V7xV5McFmAE9QS2rdT6dmlVWFZXiYiIEufwwFqNBc9XA2OmY9VINYu6XLlvNsr76GNF3/Sko06E1VxLF3ocLTvh3hhAoTaeUwS8wBnUyfGc7RDgsj3LTWNFl6H0gjcUWmt9Xm3sbNyTnNzzwuuPDsBdah6bG0DJxqVoGq0vX+3Wq81ZaemJbVsjq6vmyi0RERElwsGBVZ5YdRC7s8Zh96Qb1DzqWulI7yv+l1GsBVVdOLi16nqXdrJR6f2mSqQ2TMCFLNVsP+nwZBopWO/iR6MIsKr6qg1F0NqLsTZaddc9ylT5FYE6VT+hKxxK9WEC6dcnvm2tupoarXJLRERE8Tg0sIbDavjkK3KCvCE5Whibb5xk5d+FkpYcEeZUW2N0ndvCoGsUPKkBlFQYZ9frXem5maMsl49qF/IErlA1Mx1zi4zKqz7JYQzapadKoo+L1cOlMd5WD7+Vfm/o9YSXJ7ptVV0dbb1UVlgNVk64Bikpd2DlcTWLiIiINI68DmvD3lfhOXxJtcKmzCjWhgZEX96512PldViN67BGux6pDKzyWqvRrtFqLNNbsmoZrtbKy1bpJ3FZqGuaRrvOavg6sNbthuerpo22LXlFAuMkKctrElq7DmuU5YaIbQvatVgvxF5Hqll+B4YteQ/FOz7DC/lqJhERETn3xgFO16MDK3WMnY8iZVoNVlS/g4VXcekuIiKi7iYpL2tF1L3swCMp1zCsEhERxcDAStTlpuGF4GcIBhlWiYiIomFgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHc+ydrnwbvCiuU43rRsC3aBwyVRM4i7IV27HqU9XMGof6OSNUo3PwTlexWW+hGu3WrfFYb89qvXVra9u23Z5V3dLVIrAV92zciUqtYb19ayLHrd1itTHKbV8t2xXst2613Po1cn19u6ohFHo2YbVbNQzGPuLcFpaIiKg7cmaF9VgFiiFCaGmxmKZjAarh2VCtFgINe/dj39Dpavk4TKk7iCl7z6ql1KVEMHP7XSLsbcJlMW3OqEJB2VbUqsXxycDpRZ0IqXLdy/fni+0txXy/WtzKtiu2iLDaV4RUbXkxChu9uMfXpJYKMjRurILnfn39yyWm0Gjbtt8dsB23DNKzUdknH7lqTphYZtnuMpRiJxYY+5ZB0xcQIdXYNlCycQ0q9KVaUC4Q33h9XTF5clDuCy/XifemYieyMnJUm4iIqOdwZmAdmWeqmN4Az9DewPmLaFBzMifdh92TblCtEfBkASeam1Wbuo4IVUerkOueGqpM5o0VAa+lCiKvtc6/CyUtOSg0KqqumSjMAMpPHhGNVrYtQmG5rHyONSqqt6HQ7UJlwwcqdMr1ZWi0VUY1kdvO9kxFoem4K7bsQroInKuj3YkqcAZ1cCE9tN10pPdVD4WKQztRmTE1tF9t26hCpQridRcDyO1jOqjrXRGhuNbnRQnyUThEzSAiIupBOIaV2lEATS0ueIYbXfiy8ii7ycX8c2pWHLXNIh1mjAqFRq3yKLvJL5wRobOVbZ8LoDI1Bx5LxVRsryUgwqQQ+AA+sT6OL0av0tn6ZKv8ZqWFhx7Ibvv01PBx582KFnQV1yh4Uk3VXrFvOWxAP9YmNF2ACJpGkBbhuEwf8lDXrFdg84bkiPBqVJLF8goZcMPvgwzjC8Sy0jwOAyAiop7J+YH1k4OYf/gSpow3j2E1kcMH6npjgadzx7BSPDKUyVDoBTyy6z4czhIiu+5FoHQ35MB/v6yiqtCpaWXbsvtdhlEfsFkOC5CBVlZJZaBFFXyhrne92969RVZv0+HJdKH8aDjAahVNYyxsq9Ixt2gT/JlVcIf2HRlw5TjVXqVL4ctcBr+s/l5U5Vv3PHE8xYAvvNw89lZWaOEujh2YiYiIujmHB9ZqLHi+GhgzHatGqllmIsxO2XYaU2bch6KOPQeMTPTgFZ4s40RFQCzZuBRNo/XxmKvdeoXRWr2Mo9GLXkdd8MtQKU8s0iqnLmRpC1vZdosIoBsDKNQC6Tzk2bvqU/OxKnQClwiZo3NU9VZ20xfrAVa9pgWYilKx3/Tr9WfHp4do98Wp2nFp41/FNkJjb4VyEUbL++hjc+VJZJZhAFpA14ccyOCa5V8arv7Kau0F83ETERH1PA4OrCKslhzE7qxxpvGqJjKsxguz1GHyZumB0ZjCZ/HLbnT9zP7wGe56V34iwS87TQa4HGw2nQGvDRPo21+0W9m2Nu5Tnn1vOrPfHHblckul1k6vkoZfk4ihctuJVDXV2NvNqiqa7VmuVVD1iq0az5pRbHqfzEFbhF1t/KxRQb0Nq2VlWITvchF4K05W6UFcBWntSgOqbQ7ERERE3ZlDA2s4rEa9XJUprEYNs9RF9K71Sr8Xa1Vvd61vF8rNY0slo9veNoYU7lHayUgFWje9dESEtoAa/9nKtrVxpAGUVBjbVEEwc5QeftU40/JQNdi23EK/tBY8bbkclxp6oGmCr8EI2voYVVk5Dl/tQA+4uabLVoWGB0j+D1AuA7oI4vY/DuQVBLRLZonHlste7XwUKSnXIKVoh5pBRETUfTjyOqwNe1+F5/Al1QqbMqNYq6ZartEa0hsLHuu8oQG8DmtsluuZRrtmqAysMa8nqodF4zqs9uuRxt+27JoPX4fVfg1X+7Yty2W3fJzrpFqv0WoIPy9iuf0asJbt26/xaj2uaPsPkduRQyYi3rcdeCRlBrzjl+PEewsxVM0lIiLqDhx74wCnY2AlZ6nBygk3Y1H2NgTLpql5RERE3QMva0WU5GqW34GUFIZVIiLqvhhYiZLc0MXvIBj8jGGViIi6LQZWIiIiInI0BlYiIiIicjQGViIiIiJyNAZWIiIiInI0BlYiIiIicjQGViIiIiJyNAZWIiIiInI0x97pynL71etGwLdoHDJVE8cqMHjbadUQssahfs4I1egcvNNVbNbblNpvQ9qaOLdPtd161X7704ots1HQqBqC/bauIcZtUuPePtV+e9T4+9YYt5xt5bgMMY+PiIiILJxZYRWBtBgihJYWi2k6FqAang3VaqEwMk8tU8vPH8SUvWfVQupSIvS5/S4RUjfhspg2Z1ShoGwratXi+GQo9KJOhFS57uX788X2lmK+X19a6/PCl6mWlRSjsNGLe3xN+kIhb5a+T2PdOhGc19pv/y8DsS+Awgzbjfpl2BTzS+/X1/eLIFmycQ0q1OLW9i1Dei/xZE+GmmFiOS51bLkiEKdfr55AREREcTkzsMpAGqqY3gDP0N7A+YtoUHOsbkBmP/WQupgInEerkOueGqqo5o0V4aylCiILts6/CyUtOSg0KqqumSJYAuUnj2jNbM9yU7X1NuSKZZUXY2zY1R9Z6qFZxRYvyjOmorCPmqFUHJKV0amhimq2RzwHVahUYTnuvkXYXXBxKi4XzYTxjHhqj4vtmvZFRERE8XWDMazV8NX1xl3uG1Sbuk4ATS0ueIYbse0I5ssucjn/nJoVR22zCIAZo0JhV1Ytta70C2cSrNCa+D9AeWoOPOZQ6F8jtpeDzfaufBG0my4AhUOM+XqlVw5LqGsOV1FjEsH6zYhtxnIE5SIEl45N9PlERETk/MD6yUHMP3wJU8abxrAKDXtfxeASr5gOYnfWLSjq2CG11CYy8M1Gr1Iv4JHDAhIMfgY5lrR0NtwNOfDL7vOWAIzhzCFa+HTZgp+xXzH5qlA4eiay1RKj+lvoiT+eVo437VW6VOv+97td0Su4UfedmFrfLq3Cy+oqERFR4hweWKux4PlqYMx0rBqpZimZk+4LjWP1pX2EwSsOxhgyQO1ND3XhyTyWU1ZZSzYuRdNofbzmardevcxKS6SzXGj0otdRF/xyrGeRCJznAqhMdVm797XxpjJ8mk+KktIxt8gYK7oM6UfDxybHoJb0LY57klO5bzbK++jjVGX3f50Iq7l9bMky5r4TIaurAVMll4iIiBLh4MAqwqpWPR2H3ZPid/dnugdi2KcXUa/a1LHsJxGFx3a6kJ6qn9kfDob6MIFETjDKTpMJMAebZVDVZ+nDBPr2D1dK1Zn4sOwjmnR4Mo0KaRN8DeL/MgyrkK1dxUBryxOz0pHeVzwto9hyRYKIoJ3wvqPTqqup+Si8gnWJiIh6MocG1nBYTeRyVT5fNU5kDYJHtamrqJDo94bOztdDmm0sqQx+Mjjarx7gHqWd6FSwRT/JKqIiaQqM4WAZi3ldc+VVn2R3v3ZpqhK9Upo3JEcLsMYVCYwTwHKNcNmmfUejjscyTMGsBisnXIOUlDuw8riaRURERBpHXodVjk/1HL6kWmFTZhRrQwMilvM6rI5iuQ5raj78poqpRoW/ymjLRLAzX4fVfK3S6NczNa6XKsevmq6TKsS7zql2jPLM/pjXYbVePzb+vq3HHGK6Hqu2/oVorzesZvkdGLbkPRTv+Awv5KuZRERE5NwbBzgdAyu1u52PImVaDVZUv4OFw9U8IiIi6g6XtSJKdjvwSMo1DKtEREQxMLASdblpeCH4GYJBhlUiIqJoGFiJiIiIyNEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEce6cr3wYviutU47oR8C0ah0zVDDuLshXbserT3ljw2H0o6sSbb/FOV7FZbs1qu8Vp66y3Oc213bs//rbtt0g1L7ffutW4rapqGreLVc1ot5S1357VeuvXeMcd5datUW9LS0RERFHJwNpWgUBA/Pezjpt+uTOY+cqvVPtM0Lt8nakdnur3/ETM3xmc/48/CXrPWJd19LRu3bqo8ztjqq6ujjrfEdOxVcEvLVsV3KXauzbPCn7J+9Ngjfk5MadTwTXeWcG7953S26d/Grx72azgvGNqedxt6+uGnmss33wo1LZMtm3ZJ/u6NfsWWrdlWb+V4w4eCs6Lsy9OnDhx4sSJU/zJmUMCRuahfs4I1bgBnqG9gfMX0aDmaD45iPmHgQWeQWoGdb0mrD1ahVz31FDVM29sPnJbquAziqLx+HehpCUHhUZl0jUThRlA+ckjotHatgNoanEh/XptkSarj1E+jeJ6F3LVw2js69ZdDCDXPM+8ftzjJiIioquVpGNYz6Ls9WpgzMROHQZArdFDo2e4qStc62YX88+pWXHUNovkmTEqFEhl97/WBX/hDGpb3fZtyM0IoGTjGlTIZmArFvgDKBxym2xFqD1ehUrTvqya4Guwrps3JAeV/qWY75ctEZ4rxL7V+vGPm4iIiK6W8wOrVkm9hCnjTWNYj72PVRiB1ZNuUDPIWeR40dnoVeoFPJuwOQOoa25SyxLgXyPWnQ13Qw7898sqagDGcOZ4286btQmXPUCBWLfXxip47jePMZXkWFK5rti2CJ6lY21hVu23V+lSlCAfheZ13fNwuaQY8OnLfZnLcHlW9PWjH3eVflzatBhrE6k4ExERkQD8/+3lMs0z1zWgAAAAAElFTkSuQmCC" - } - }, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![image.png](attachment:image.png)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's now try to query from Azure AI Search!\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "await search_memory_examples(kernel)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We have laid the foundation which will allow us to store an arbitrary amount of data in an external Vector Store above and beyond what could fit in memory at the expense of a little more latency.\n" - ] + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "68e1c158", + "metadata": {}, + "source": [ + "# Building Semantic Memory with Embeddings\n", + "\n", + "So far, we've mostly been treating the kernel as a stateless orchestration engine.\n", + "We send text into a model API and receive text out.\n", + "\n", + "In a [previous notebook](04-context-variables-chat.ipynb), we used `context variables` to pass in additional\n", + "text into prompts to enrich them with more context. This allowed us to create a basic chat experience.\n", + "\n", + "However, if you solely relied on context variables, you would quickly realize that eventually your prompt\n", + "would grow so large that you would run into the model's token limit. What we need is a way to persist state\n", + "and build both short-term and long-term memory to empower even more intelligent applications.\n", + "\n", + "To do this, we dive into the key concept of `Semantic Memory` in the Semantic Kernel.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a77bdf89", + "metadata": {}, + "outputs": [], + "source": [ + "!python -m pip install semantic-kernel==0.5.1.dev0" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "508ad44f", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"ai_model_type\" shadows an attribute in parent \"OpenAIHandler\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"prompt_tokens\" shadows an attribute in parent \"OpenAIHandler\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"completion_tokens\" shadows an attribute in parent \"OpenAIHandler\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"total_tokens\" shadows an attribute in parent \"OpenAIHandler\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"ai_model_type\" shadows an attribute in parent \"AzureOpenAIConfigBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"ai_model_type\" shadows an attribute in parent \"OpenAIChatCompletionBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"ai_model_type\" shadows an attribute in parent \"OpenAITextCompletionBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"prompt_tokens\" shadows an attribute in parent \"AzureOpenAIConfigBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"prompt_tokens\" shadows an attribute in parent \"OpenAIChatCompletionBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"prompt_tokens\" shadows an attribute in parent \"OpenAITextCompletionBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"completion_tokens\" shadows an attribute in parent \"AzureOpenAIConfigBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"completion_tokens\" shadows an attribute in parent \"OpenAIChatCompletionBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"completion_tokens\" shadows an attribute in parent \"OpenAITextCompletionBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"total_tokens\" shadows an attribute in parent \"AzureOpenAIConfigBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"total_tokens\" shadows an attribute in parent \"OpenAIChatCompletionBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"total_tokens\" shadows an attribute in parent \"OpenAITextCompletionBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"ai_model_type\" shadows an attribute in parent \"OpenAITextEmbeddingBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"prompt_tokens\" shadows an attribute in parent \"OpenAITextEmbeddingBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"completion_tokens\" shadows an attribute in parent \"OpenAITextEmbeddingBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"total_tokens\" shadows an attribute in parent \"OpenAITextEmbeddingBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"ai_model_type\" shadows an attribute in parent \"OpenAIConfigBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"prompt_tokens\" shadows an attribute in parent \"OpenAIConfigBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"completion_tokens\" shadows an attribute in parent \"OpenAIConfigBase\"; \n", + " warnings.warn(\n", + "/home/evmattso/.cache/pypoetry/virtualenvs/semantic-kernel-eoLGgW5m-py3.10/lib/python3.10/site-packages/pydantic/_internal/_fields.py:184: UserWarning: Field name \"total_tokens\" shadows an attribute in parent \"OpenAIConfigBase\"; \n", + " warnings.warn(\n" + ] } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.12" + ], + "source": [ + "from typing import Tuple\n", + "\n", + "import semantic_kernel as sk\n", + "from semantic_kernel.connectors.ai.open_ai import (\n", + " OpenAIChatCompletion,\n", + " OpenAITextEmbedding,\n", + " AzureChatCompletion,\n", + " AzureTextEmbedding,\n", + ")\n", + "from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory\n", + "from semantic_kernel.prompt_template.input_variable import InputVariable\n", + "from semantic_kernel.functions import KernelFunction" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "d8ddffc1", + "metadata": {}, + "source": [ + "In order to use memory, we need to instantiate the Kernel with a Memory Storage\n", + "and an Embedding service. In this example, we make use of the `VolatileMemoryStore` which can be thought of as a temporary in-memory storage. This memory is not written to disk and is only available during the app session.\n", + "\n", + "When developing your app you will have the option to plug in persistent storage like Azure AI Search, Azure Cosmos Db, PostgreSQL, SQLite, etc. Semantic Memory allows also to index external data sources, without duplicating all the information as you will see further down in this notebook.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "7aa8b6f2", + "metadata": {}, + "outputs": [], + "source": [ + "from services import Service\n", + "\n", + "# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)\n", + "selectedService = Service.OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "8f8dcbc6", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "KernelPlugin(name='text_memory', description=None, functions={'recall': KernelFunction(plugin_name='text_memory', description='Recall a fact from the long term memory', name='recall', is_prompt=False, stream_function=None, parameters=[KernelParameterMetadata(name='ask', description='The information to retrieve', default_value=None, type_='str', required=True, expose=True), KernelParameterMetadata(name='kernel', description='The kernel', default_value=None, type_='Kernel', required=True, expose=True), KernelParameterMetadata(name='collection', description='The collection to search for information.', default_value='generic', type_='str', required=False, expose=True), KernelParameterMetadata(name='relevance', description='The relevance score, from 0.0 to 1.0; 1.0 means perfect match', default_value=0.75, type_='float', required=False, expose=True), KernelParameterMetadata(name='limit', description='The maximum number of relevant memories to recall.', default_value=1, type_='int', required=False, expose=True)], return_parameter=KernelParameterMetadata(name='return', description='', default_value=None, type_='str', required=True, expose=False), function=, plugins=KernelPluginCollection(plugins={'text_memory': KernelPlugin(name='text_memory', description=None, functions={...})}), prompt_execution_settings={}, prompt_template_config=None, metadata=KernelFunctionMetadata(name='recall', plugin_name='text_memory', description='Recall a fact from the long term memory', parameters=[KernelParameterMetadata(name='ask', description='The information to retrieve', default_value=None, type_='str', required=True, expose=True), KernelParameterMetadata(name='kernel', description='The kernel', default_value=None, type_='Kernel', required=True, expose=True), KernelParameterMetadata(name='collection', description='The collection to search for information.', default_value='generic', type_='str', required=False, expose=True), KernelParameterMetadata(name='relevance', description='The relevance score, from 0.0 to 1.0; 1.0 means perfect match', default_value=0.75, type_='float', required=False, expose=True), KernelParameterMetadata(name='limit', description='The maximum number of relevant memories to recall.', default_value=1, type_='int', required=False, expose=True)], is_prompt=False, is_asynchronous=True, return_parameter=KernelParameterMetadata(name='return', description='', default_value=None, type_='str', required=True, expose=False))), 'save': KernelFunction(plugin_name='text_memory', description='Save information to semantic memory', name='save', is_prompt=False, stream_function=None, parameters=[KernelParameterMetadata(name='text', description='The information to save.', default_value=None, type_='str', required=True, expose=True), KernelParameterMetadata(name='kernel', description='The kernel', default_value=None, type_='Kernel', required=True, expose=True), KernelParameterMetadata(name='key', description='The unique key to associate with the information.', default_value=None, type_='str', required=True, expose=True), KernelParameterMetadata(name='collection', description='The collection to save the information.', default_value='generic', type_='str', required=False, expose=True)], return_parameter=KernelParameterMetadata(name='return', description='', default_value=None, type_='', required=True, expose=False), function=, plugins=KernelPluginCollection(plugins={'text_memory': KernelPlugin(name='text_memory', description=None, functions={...})}), prompt_execution_settings={}, prompt_template_config=None, metadata=KernelFunctionMetadata(name='save', plugin_name='text_memory', description='Save information to semantic memory', parameters=[KernelParameterMetadata(name='text', description='The information to save.', default_value=None, type_='str', required=True, expose=True), KernelParameterMetadata(name='kernel', description='The kernel', default_value=None, type_='Kernel', required=True, expose=True), KernelParameterMetadata(name='key', description='The unique key to associate with the information.', default_value=None, type_='str', required=True, expose=True), KernelParameterMetadata(name='collection', description='The collection to save the information.', default_value='generic', type_='str', required=False, expose=True)], is_prompt=False, is_asynchronous=True, return_parameter=KernelParameterMetadata(name='return', description='', default_value=None, type_='', required=True, expose=False)))})" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "kernel = sk.Kernel()\n", + "\n", + "# Configure Azure LLM service\n", + "if selectedService == Service.AzureOpenAI:\n", + " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", + " azure_text_embedding = AzureTextEmbedding(\n", + " service_id=\"azure_text_embed\", deployment_name=\"text-embedding\", endpoint=endpoint, api_key=api_key\n", + " )\n", + " service_id = \"aoai_chat\"\n", + " azure_chat_service = AzureChatCompletion(\n", + " service_id=service_id, deployment_name=\"gpt-35-turbo\", endpoint=endpoint, api_key=api_key\n", + " ) # set the deployment name to the value of your chat model\n", + " kernel.use_memory(storage=sk.memory.VolatileMemoryStore(), embeddings_generator=azure_text_embedding)\n", + " kernel.add_service(azure_chat_service)\n", + " kernel.add_service(azure_text_embedding)\n", + "\n", + "# Configure OpenAI service\n", + "if selectedService == Service.OpenAI:\n", + " api_key, org_id = sk.openai_settings_from_dot_env()\n", + " oai_text_embedding = OpenAITextEmbedding(\n", + " service_id=\"oai_text_embed\", ai_model_id=\"text-embedding-ada-002\", api_key=api_key, org_id=org_id\n", + " )\n", + " service_id = \"oai_chat\"\n", + " oai_chat_service = OpenAIChatCompletion(\n", + " service_id=service_id, ai_model_id=\"gpt-3.5-turbo\", api_key=api_key, org_id=org_id\n", + " )\n", + " kernel.use_memory(storage=sk.memory.VolatileMemoryStore(), embeddings_generator=oai_text_embedding)\n", + " kernel.add_service(oai_chat_service)\n", + " kernel.add_service(oai_text_embedding)\n", + "\n", + "kernel.import_plugin(sk.core_plugins.TextMemoryPlugin(), \"text_memory\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "e7fefb6a", + "metadata": {}, + "source": [ + "At its core, Semantic Memory is a set of data structures that allow you to store the meaning of text that come from different data sources, and optionally to store the source text too. These texts can be from the web, e-mail providers, chats, a database, or from your local directory, and are hooked up to the Semantic Kernel through data source connectors.\n", + "\n", + "The texts are embedded or compressed into a vector of floats representing mathematically the texts' contents and meaning. You can read more about embeddings [here](https://aka.ms/sk/embeddings).\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "2a7e7ca4", + "metadata": {}, + "source": [ + "### Manually adding memories\n", + "\n", + "Let's create some initial memories \"About Me\". We can add memories to our `VolatileMemoryStore` by using `SaveInformationAsync`\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "d096504c", + "metadata": {}, + "outputs": [], + "source": [ + "async def populate_memory(kernel: sk.Kernel) -> None:\n", + " # Add some documents to the semantic memory\n", + " await kernel.memory.save_information(collection=\"aboutMe\", id=\"info1\", text=\"My name is Andrea\")\n", + " await kernel.memory.save_information(collection=\"aboutMe\", id=\"info2\", text=\"I currently work as a tour guide\")\n", + " await kernel.memory.save_information(\n", + " collection=\"aboutMe\", id=\"info3\", text=\"I've been living in Seattle since 2005\"\n", + " )\n", + " await kernel.memory.save_information(\n", + " collection=\"aboutMe\",\n", + " id=\"info4\",\n", + " text=\"I visited France and Italy five times since 2015\",\n", + " )\n", + " await kernel.memory.save_information(collection=\"aboutMe\", id=\"info5\", text=\"My family is from New York\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "9d5f294d", + "metadata": {}, + "outputs": [], + "source": [ + "await populate_memory(kernel)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "2calf857", + "metadata": {}, + "source": [ + "Let's try searching the memory:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "628c843e", + "metadata": {}, + "outputs": [], + "source": [ + "async def search_memory_examples(kernel: sk.Kernel) -> None:\n", + " questions = [\n", + " \"what's my name\",\n", + " \"where do I live?\",\n", + " \"where's my family from?\",\n", + " \"where have I traveled?\",\n", + " \"what do I do for work\",\n", + " ]\n", + "\n", + " for question in questions:\n", + " print(f\"Question: {question}\")\n", + " result = await kernel.memory.search(\"aboutMe\", question)\n", + " print(f\"Answer: {result[0].text}\\n\")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "7a170e0d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Question: what's my name\n", + "Answer: My name is Andrea\n", + "\n", + "Question: where do I live?\n", + "Answer: I've been living in Seattle since 2005\n", + "\n", + "Question: where's my family from?\n", + "Answer: My family is from New York\n", + "\n", + "Question: where have I traveled?\n", + "Answer: I visited France and Italy five times since 2015\n", + "\n", + "Question: what do I do for work\n", + "Answer: I currently work as a tour guide\n", + "\n" + ] + } + ], + "source": [ + "await search_memory_examples(kernel)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "e70c2b22", + "metadata": {}, + "source": [ + "Let's now revisit the our chat sample from the [previous notebook](04-context-variables-chat.ipynb).\n", + "If you remember, we used kernel arguments to fill the prompt with a `history` that continuously got populated as we chatted with the bot. Let's add also memory to it!\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "1ed54a32", + "metadata": {}, + "source": [ + "This is done by using the `TextMemoryPlugin` which exposes the `recall` native function.\n", + "\n", + "`recall` takes an input ask and performs a similarity search on the contents that have\n", + "been embedded in the Memory Store and returns the most relevant memory.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "fb8549b2", + "metadata": {}, + "outputs": [], + "source": [ + "async def setup_chat_with_memory(\n", + " kernel: sk.Kernel,\n", + ") -> Tuple[KernelFunction, sk.KernelArguments]:\n", + " prompt = \"\"\"\n", + " ChatBot can have a conversation with you about any topic.\n", + " It can give explicit instructions or say 'I don't know' if\n", + " it does not have an answer.\n", + "\n", + " Information about me, from previous conversations:\n", + " {{$fact1}} {{recall $fact1}}\n", + " {{$fact2}} {{recall $fact2}}\n", + " {{$fact3}} {{recall $fact3}}\n", + " {{$fact4}} {{recall $fact4}}\n", + " {{$fact5}} {{recall $fact5}}\n", + "\n", + " \"\"\".strip()\n", + "\n", + " req_settings = kernel.get_service(service_id).get_prompt_execution_settings_class()(service_id=service_id)\n", + " req_settings.max_tokens = 2000\n", + " req_settings.temperature = 0.7\n", + " req_settings.top_p = 0.8\n", + "\n", + " prompt_template_config = sk.PromptTemplateConfig(\n", + " template=\"{{$user_input}}\",\n", + " name=\"chat\",\n", + " template_format=\"semantic-kernel\",\n", + " input_variables=[\n", + " InputVariable(name=\"user_input\", description=\"The user input\", is_required=True),\n", + " InputVariable(name=\"chat_history\", description=\"The history of the conversation\", is_required=True),\n", + " ],\n", + " execution_settings=req_settings,\n", + " )\n", + "\n", + " chat_func = kernel.create_function_from_prompt(\n", + " plugin_name=\"chat_memory\", function_name=\"ChatWithMemory\", prompt_template_config=prompt_template_config\n", + " )\n", + "\n", + " chat_history = ChatHistory()\n", + " chat_history.add_system_message(prompt)\n", + "\n", + " arguments = sk.KernelArguments(\n", + " fact1=\"what is my name?\",\n", + " fact2=\"what is my favorite hobby?\",\n", + " fact3=\"where's my family from?\",\n", + " fact4=\"where did I travel last year?\",\n", + " fact5=\"what do I do for work?\",\n", + " collection=\"aboutMe\",\n", + " relevance=0.6,\n", + " chat_history=chat_history,\n", + " )\n", + "\n", + " return chat_func, arguments" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "1ac62457", + "metadata": {}, + "source": [ + "The `RelevanceParam` is used in memory search and is a measure of the relevance score from 0.0 to 1.0, where 1.0 means a perfect match. We encourage users to experiment with different values.\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "645b55a1", + "metadata": {}, + "source": [ + "Now that we've included our memories, let's chat!\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "75267a2f", + "metadata": {}, + "outputs": [], + "source": [ + "async def chat(kernel: sk.Kernel, chat_func: KernelFunction, chat_history: ChatHistory) -> bool:\n", + " try:\n", + " user_input = input(\"User:> \")\n", + " print(f\"User:> {user_input}\")\n", + " except KeyboardInterrupt:\n", + " print(\"\\n\\nExiting chat...\")\n", + " return False\n", + " except EOFError:\n", + " print(\"\\n\\nExiting chat...\")\n", + " return False\n", + "\n", + " if user_input == \"exit\":\n", + " print(\"\\n\\nExiting chat...\")\n", + " return False\n", + "\n", + " answer = await kernel.invoke(chat_func, user_input=user_input, chat_history=chat_history)\n", + " chat_history.add_user_message(user_input)\n", + " chat_history.add_system_message(str(answer))\n", + "\n", + " print(f\"ChatBot:> {answer}\")\n", + " return True" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "e3875a34", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Populating memory...\n", + "Asking questions... (manually)\n", + "Question: what's my name\n", + "Answer: My name is Andrea\n", + "\n", + "Question: where do I live?\n", + "Answer: I've been living in Seattle since 2005\n", + "\n", + "Question: where's my family from?\n", + "Answer: My family is from New York\n", + "\n", + "Question: where have I traveled?\n", + "Answer: I visited France and Italy five times since 2015\n", + "\n", + "Question: what do I do for work\n", + "Answer: I currently work as a tour guide\n", + "\n", + "Setting up a chat (with memory!)\n", + "Begin chatting (type 'exit' to exit):\n", + "\n", + "User:> where do I work?\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Something went wrong in function invocation. During function invocation: 'chat_memory.ChatWithMemory'. Error description: 'Parameter chat_history is not a valid ChatHistory object.'\n" + ] + }, + { + "ename": "KernelException", + "evalue": "(, \"Error occurred while invoking function: 'chat_memory.ChatWithMemory'\", ValueError('Parameter chat_history is not a valid ChatHistory object.'))", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", + "File \u001b[0;32m~/workspace/semantic-kernel/python/semantic_kernel/kernel.py:307\u001b[0m, in \u001b[0;36mKernel.invoke\u001b[0;34m(self, functions, arguments, **kwargs)\u001b[0m\n\u001b[1;32m 306\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 307\u001b[0m function_result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mawait\u001b[39;00m func\u001b[38;5;241m.\u001b[39minvoke(\u001b[38;5;28mself\u001b[39m, arguments)\n\u001b[1;32m 308\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m exc:\n", + "File \u001b[0;32m~/workspace/semantic-kernel/python/semantic_kernel/functions/kernel_function.py:479\u001b[0m, in \u001b[0;36mKernelFunction.invoke\u001b[0;34m(self, kernel, arguments, **kwargs)\u001b[0m\n\u001b[1;32m 478\u001b[0m arguments \u001b[38;5;241m=\u001b[39m KernelArguments(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m--> 479\u001b[0m function_arguments \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgather_function_parameters\u001b[49m\u001b[43m(\u001b[49m\u001b[43mkernel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43marguments\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 480\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mis_prompt \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mCHAT_HISTORY_TAG \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m function_arguments:\n", + "File \u001b[0;32m~/workspace/semantic-kernel/python/semantic_kernel/functions/kernel_function.py:559\u001b[0m, in \u001b[0;36mKernelFunction.gather_function_parameters\u001b[0;34m(self, kernel, arguments)\u001b[0m\n\u001b[1;32m 558\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(chat, ChatHistory):\n\u001b[0;32m--> 559\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mParameter \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mparam\u001b[38;5;241m.\u001b[39mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m is not a valid ChatHistory object.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 560\u001b[0m function_arguments[param\u001b[38;5;241m.\u001b[39mname] \u001b[38;5;241m=\u001b[39m chat\n", + "\u001b[0;31mValueError\u001b[0m: Parameter chat_history is not a valid ChatHistory object.", + "\nThe above exception was the direct cause of the following exception:\n", + "\u001b[0;31mKernelException\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[10], line 13\u001b[0m\n\u001b[1;32m 11\u001b[0m chatting \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m chatting:\n\u001b[0;32m---> 13\u001b[0m chatting \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mawait\u001b[39;00m chat(kernel, chat_func, context)\n", + "Cell \u001b[0;32mIn[9], line 16\u001b[0m, in \u001b[0;36mchat\u001b[0;34m(kernel, chat_func, chat_history)\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124mExiting chat...\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 14\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;01mFalse\u001b[39;00m\n\u001b[0;32m---> 16\u001b[0m answer \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mawait\u001b[39;00m kernel\u001b[38;5;241m.\u001b[39minvoke(chat_func, user_input\u001b[38;5;241m=\u001b[39muser_input, chat_history\u001b[38;5;241m=\u001b[39mchat_history)\n\u001b[1;32m 17\u001b[0m chat_history\u001b[38;5;241m.\u001b[39madd_user_message(user_input)\n\u001b[1;32m 18\u001b[0m chat_history\u001b[38;5;241m.\u001b[39madd_system_message(\u001b[38;5;28mstr\u001b[39m(answer))\n", + "File \u001b[0;32m~/workspace/semantic-kernel/python/semantic_kernel/kernel.py:320\u001b[0m, in \u001b[0;36mKernel.invoke\u001b[0;34m(self, functions, arguments, **kwargs)\u001b[0m\n\u001b[1;32m 317\u001b[0m results\u001b[38;5;241m.\u001b[39mappend(function_invoked_args\u001b[38;5;241m.\u001b[39mfunction_result)\n\u001b[1;32m 319\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m function_invoked_args\u001b[38;5;241m.\u001b[39mexception:\n\u001b[0;32m--> 320\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m KernelException(\n\u001b[1;32m 321\u001b[0m KernelException\u001b[38;5;241m.\u001b[39mErrorCodes\u001b[38;5;241m.\u001b[39mFunctionInvokeError,\n\u001b[1;32m 322\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mError occurred while invoking function: \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfunc\u001b[38;5;241m.\u001b[39mplugin_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfunc\u001b[38;5;241m.\u001b[39mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 323\u001b[0m function_invoked_args\u001b[38;5;241m.\u001b[39mexception,\n\u001b[1;32m 324\u001b[0m ) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mfunction_invoked_args\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mexception\u001b[39;00m\n\u001b[1;32m 325\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m function_invoked_args\u001b[38;5;241m.\u001b[39mis_cancel_requested:\n\u001b[1;32m 326\u001b[0m logger\u001b[38;5;241m.\u001b[39minfo(\n\u001b[1;32m 327\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mExecution was cancelled on function invoked event of pipeline step \u001b[39m\u001b[38;5;130;01m\\\u001b[39;00m\n\u001b[1;32m 328\u001b[0m \u001b[38;5;132;01m{\u001b[39;00mpipeline_step\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfunc\u001b[38;5;241m.\u001b[39mplugin_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfunc\u001b[38;5;241m.\u001b[39mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 329\u001b[0m )\n", + "\u001b[0;31mKernelException\u001b[0m: (, \"Error occurred while invoking function: 'chat_memory.ChatWithMemory'\", ValueError('Parameter chat_history is not a valid ChatHistory object.'))" + ] } + ], + "source": [ + "print(\"Populating memory...\")\n", + "await populate_memory(kernel)\n", + "\n", + "print(\"Asking questions... (manually)\")\n", + "await search_memory_examples(kernel)\n", + "\n", + "print(\"Setting up a chat (with memory!)\")\n", + "chat_func, context = await setup_chat_with_memory(kernel)\n", + "\n", + "print(\"Begin chatting (type 'exit' to exit):\\n\")\n", + "chatting = True\n", + "while chatting:\n", + " chatting = await chat(kernel, chat_func, context)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "0a51542b", + "metadata": {}, + "source": [ + "### Adding documents to your memory\n", + "\n", + "Many times in your applications you'll want to bring in external documents into your memory. Let's see how we can do this using our VolatileMemoryStore.\n", + "\n", + "Let's first get some data using some of the links in the Semantic Kernel repo.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c3d5a1b9", + "metadata": {}, + "outputs": [], + "source": [ + "github_files = {}\n", + "github_files[\n", + " \"https://github.com/microsoft/semantic-kernel/blob/main/README.md\"\n", + "] = \"README: Installation, getting started, and how to contribute\"\n", + "github_files[\n", + " \"https://github.com/microsoft/semantic-kernel/blob/main/dotnet/notebooks/02-running-prompts-from-file.ipynb\"\n", + "] = \"Jupyter notebook describing how to pass prompts from a file to a semantic plugin or function\"\n", + "github_files[\n", + " \"https://github.com/microsoft/semantic-kernel/blob/main/dotnet/notebooks/00-getting-started.ipynb\"\n", + "] = \"Jupyter notebook describing how to get started with the Semantic Kernel\"\n", + "github_files[\n", + " \"https://github.com/microsoft/semantic-kernel/tree/main/samples/plugins/ChatPlugin/ChatGPT\"\n", + "] = \"Sample demonstrating how to create a chat plugin interfacing with ChatGPT\"\n", + "github_files[\n", + " \"https://github.com/microsoft/semantic-kernel/blob/main/dotnet/src/SemanticKernel/Memory/Volatile/VolatileMemoryStore.cs\"\n", + "] = \"C# class that defines a volatile embedding store\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "75f3ea5e", + "metadata": {}, + "source": [ + "Now let's add these files to our VolatileMemoryStore using `SaveReferenceAsync`. We'll separate these memories from the chat memories by putting them in a different collection.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "170e7142", + "metadata": {}, + "outputs": [], + "source": [ + "memory_collection_name = \"SKGitHub\"\n", + "print(\"Adding some GitHub file URLs and their descriptions to a volatile Semantic Memory.\")\n", + "i = 0\n", + "for entry, value in github_files.items():\n", + " await kernel.memory.save_reference(\n", + " collection=memory_collection_name,\n", + " description=value,\n", + " text=value,\n", + " external_id=entry,\n", + " external_source_name=\"GitHub\",\n", + " )\n", + " i += 1\n", + " print(\" URL {} saved\".format(i))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "143911c3", + "metadata": {}, + "outputs": [], + "source": [ + "ask = \"I love Jupyter notebooks, how should I get started?\"\n", + "print(\"===========================\\n\" + \"Query: \" + ask + \"\\n\")\n", + "\n", + "memories = await kernel.memory.search(memory_collection_name, ask, limit=5, min_relevance_score=0.77)\n", + "\n", + "i = 0\n", + "for memory in memories:\n", + " i += 1\n", + " print(f\"Result {i}:\")\n", + " print(\" URL: : \" + memory.id)\n", + " print(\" Title : \" + memory.description)\n", + " print(\" Relevance: \" + str(memory.relevance))\n", + " print()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "59294dac", + "metadata": {}, + "source": [ + "Now you might be wondering what happens if you have so much data that it doesn't fit into your RAM? That's where you want to make use of an external Vector Database made specifically for storing and retrieving embeddings. Fortunately, semantic kernel makes this easy thanks to an extensive list of available connectors. In the following section, we will connect to an existing Azure AI Search service that we will use as an external Vector Database to store and retrieve embeddings.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from semantic_kernel.connectors.memory.azure_cognitive_search import (\n", + " AzureCognitiveSearchMemoryStore,\n", + ")\n", + "\n", + "azure_ai_search_api_key, azure_ai_search_url = sk.azure_aisearch_settings_from_dot_env()\n", + "\n", + "# text-embedding-ada-002 uses a 1536-dimensional embedding vector\n", + "kernel.register_memory_store(\n", + " memory_store=AzureCognitiveSearchMemoryStore(\n", + " vector_size=1536,\n", + " search_endpoint=azure_ai_search_url,\n", + " admin_key=azure_ai_search_api_key,\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The implementation of Semantic Kernel allows to easily swap memory store for another. Here, we will re-use the functions we initially created for `VolatileMemoryStore` with our new external Vector Store leveraging Azure AI Search\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "await populate_memory(kernel)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can see that our function created an \"About Me\" index and that our five pieces of information have been indexed (note that it can take a few minutes for the UI to reflect the document count and storage size).\n" + ] + }, + { + "attachments": { + "image.png": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAhoAAAE6CAYAAABQ/fuNAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAFiUAABYlAUlSJPAAACvLSURBVHhe7d0JfBXVwf7xB7HGqgF5BRVIVYJUAraCKCDFWMFYNFEwgAraPyBWxAW0IrSyVCVYWVrBhUVBaAu4sNQlvFDCItFqqEboWyG4JAGNgiQKJFTBQu9/ztxzk5s9wRwSwu/7+QyZObPcuffOvfPMOWcuDQIeAQAAOHCC/QsAAFDjCBoAAMAZggYAAHCmloNGpubc0FbtYhI152NbBAAA6o0SQYMTPwAAqDk0nQAAAGcIGvXA9u3b/QEAgLqGoFEPrFu7VuvXrbNTAADUHQSNemD9+nVa/8Z6OwUAQN1R5aCRMqqok+jBrGQlDY1XlwtNWVt1iR+spBU5dsky5KVp/qibFds5uHy7Tt01cNR8bdxj55fnYKaSJw1WQmi9CzsrYehEJWcdtAsY+Uq+v7M/v++sTFsWJn2yEsy6N0xXxmFbFhLar04l9ivPzi/moLJem6gh13dXR7OsGTqb5Zcqyy5RG/Ly8rR//37l79vnjwMAUJdUu0bjwEezNfD6UXo9P0pX9+6n2AsbqSArTYtHxWvk8ny7VJht3vK/GKypKzarIKKDYhP7qW+XKOWsn6whg6ZrU8mTf8i2RRpyVbxGL0xTbrOuSvDWS+jSSJ+/vUijr4/X+DWhx2qkhAmjFdtQynhmipYVCy+ZmvP4fC8ItNOw39+nGG+ZkIL1E5Xwc7NfGVKbK9TX235s9HfavMLbr1/crPnF7rrJ94JWTyWMWaSNu85UF/Mc4rsqOqLAWz7Ne5Tas2bNGsVecYUuvzxW69attaUAANQR5v86KfJJYHafCwIxbW8IzP7IFlmrHzDl3tD+Z4EHk3NtaVDu4juC83o+Hthky3yH3ghM7G7W+1lgxMuf2ULrwJbA87f9LLheyccLW6/kYx341xOBxPbevO6PBjYcsoWez/4yyN/W5Y+8YUsCgfyXg/uVOGOLLbFyFgYGm220vyEw/V8HbGFQbvIDgcvNPg1aGCjc4/ceD8SZsltmBTLDHtPI/fs7gUw77tprr74a+NXtQ4sNV13VM/Deu+8GNm7cGIjzxkvON+sAAFBbql2jET34aU2Jb2qngpoOGK4hLb2RzzdqY1g7Qt4L87U4T4rsP0kz+kfZUiuinYZMu8+viSgptF700NKPFXHhfRqZ2Mhb6HWl/t0WeqJuTdK4LmbdGcHfAClI0cQnNkht71TS3e2CC1mp82Zo42Ep9ncLNPLCCFsa1DT+Pg3p5I1sTFHqrmCZcnNlGoYifxyj6BL727RbV0Xbcdeuu/56XRufoB07dqh3nxs0bNidevz3j6tDx466+OKL9Xtv3JRdf31vffbZZ4pPSPDXqUl79uzRvffcrb1799oS6YXFi/Xiiy/aqbKXAQAcn6oZNBqpa+cOdjxcB8X8xPzNUW5YV4209DTv3yj1631FsKCkJh3VsY0dL5SvDW+b9drput5lPZbU8ScdvX/ztWlLeKNFlAaONc0jWzVj0nwlz5mu5D2lm0ykzdqYmi81TFBfE1hKiVLHDiYUpWnz5mCJvMfr4m2j4OUpGv/aVh0sr7nnKOjTp4+emTlTixb+RXlffaVOl1yihg0b6sQTT/THTdmLL76gp55+Wr1797Fr1awGJ5xgasLslFViusxlAADHnWoGjSg1a25HS4jwT+b5yv/Gn/Rk6nP/px1aK/rHfkEV5Spvp/nrBYbrbafLEkOXCRv8JUtpY2svNk7W6HmZihmepJFt7bxCucr93PtzOFkjbWfWksPAeSU6tra8RRN/F+c9+0wtG5Oojp2v0pBJi7S5lvpenn9+G82dO09PP/Wktm3LsKXSli1b9IwXMJ59bq6/jAtNmjTRk08+5f8NGTBwoG4eMMBOlb0MAOD4VO2mk+o7SRGn2NFqiQp2uqxgiG3T2C5bJOqcloq04zq5eLNIMU3aKa6MbYYPHcJCVVT/p7Q6fYVmjEhQTMMcbVw4UQMv764hC7faJY6yBg307bff6sc/vsBvSjFDTEyM9u8v8GY1sAsBAFDL/J4ahSrrDFq6PCTUWfTBlbbAbKufKbs2MD3DFpWyJTD9upLbzQ0sGmTLqtvL8uslgWGmk+egRwPjzPNoX9b+vhGYeJmZNzaw2pZU34HAZxseD3ZKbfuzwMQ0W3wUrVu3NvDb34wJpKWlBX5xdZw/mA6hY0aPDqxfv94uBQBA7XJYo9FaXbqYvg6ZSkkOdXYoIf11pZT6z9uaqt1Frb2/W7VhfQW/zVFKjhbfP06ph9tp2G/Ga+K4IYo+vFUzHppd4ncuOirGdPY8vFqpR/wbVxGKih2jpDvMfuZp0/tH/wbX1A2p/u9mPPHHP2jOnGf9wYx//fVXevPNVLsUAAC1y2nTSYfe/RTj/c1aMFZJa0p0aMhL1uj75iurjLtOOtw40O98ufmJEZr6dumOEOYHw8aPMr+PUSTr2RFK2mjuVJkQ7JfRaYxGD2gqfTBdDz4bHgQaqW//W7w4k69lE+7V4mI//hWU9/Zs3fl4ip3yplOTlfp56eXyC4JlUWc18/8eLV5AVGrqBv3onHP0578s1LnnnecPf/rzX/yyN9avpyMmAKBOaGCqNey4x/w38fGasa2dRr62XMPC+hOaXwYduaJ0eUhwvpTwxDZN6WULPRnP3qyBT2yWOSVHRnfVFeaOjl2btGFjphr3vk+xW6drcRmPV7BqlOLvT5aJGRFNO6hL7Plqplxlvr1Jm3flSxfep+QldwZvLd02W337TVdGk1s0+43xRbfM7knWyPhRSsn3tv/X4tvPmJWovk8G+1eE9iviYI42bdykrDxvbxOf0tZJcf78LG/ZBG/ZyLM7qGM3ux+padpslvP2Y9mLd5a4s8Wtjz/+WNnZWbr66l/YkuJWr/6b3xk0Ovpo3XgLAEDZnHcGjbnjRaXMG6OEi5rqu6w0JS9fqg27mum6x1bode9EXl5dQGSvaUpZMU0Du7VW5Debleqtt2x5mnIaRithxFNKXmhDxuGtmjPe/Lx4U/X93cjiv8vRJEFJ91/hL1OyCSVm+HKl2v3SjuB+LVu1VflNOmrghBeV+mgwZBjNrhzo70fEv8P2w1uu76gFSj3KIcNo06ZNuSHDMPMIGQCAuqBEjQYAAEDNOQq3twIAgOMVQQMAADhD0AAAAM4QNAAAgDMEDQAA4AxBAwAAOEPQAAAAzhA0AACAMwQNAADgDEEDAAA4Q9AAAADOEDQAAIAzBA0AAOAMQQMAADhD0AAAAM4QNAAAgDMEDQAA4AxBAwAAOEPQAAAAzhA0AACAMwQNAADgDEEDAAA4Q9AAAADOEDQAAIAzBA0AAOAMQQMAADhD0AAAAM4QNAAAgDMEDQAA4AxBAwAAOEPQAAAAzhA0AACAMwQNAADgDEEDAAA4Q9AAAADOEDQAAIAzBA0AAOAMQQMAADhD0AAAAM4QNAAAgDMEDQAA4AxBAwAAONMg4LHjtS5jxYe6Ry21Nv40W4JjzfIFW9T3Qzvhubtfez3dyU54NqXu0LrIM3R3x9N0si1DJQ58pNXL3tKu/9hpT8OW3ZUY92P90E4DQF1FjQZqVOLgdspKPFU9TmqgVqWOrkM6+YwfKHvtp2o3ZbuW59aZjFu3FXylfRExumbwbfp/ZoiP0Un7vtJ+OxsA6jJqNFCDAtq08hP1TT9BScOidfKKrVr3k+I1GkEBZb+Vpb4p/9UtvzpfD0Q1sOXw5b6jJSsy9K2drJLTvCDS7zI1s5MAUFcQNFBztmxXu6UBJf26lRIjg80oZQeNoL3p2eqW3EBPjz9PPahbK86EjQ3SzysLD1VdDgBqCV/vvmzNGzBA8z6xkyWlPKROD621E8Wte6hjufMqk/3cAA18LttO1bDDB3XwsB2vzMGDduT72KdJr36ja/ud54cMI3Fw+SHDOL3TeXr6gm807vV9tsQd81p3uth7rwbM9d7t78+8787euzouN22J/rw6w04BQMXqddAInlwe0jo77UKPxzYp/bGedqruOJg6UQPvW6qcysLGp0s18uaJSv3GTh+pLXs075TTdXf76jSDNFCPuNPV5P09VXqPCsNC4VBBOCxmrebOku562XuvXrhdrWzpMcPUWix9R7kl/9rZ1ZehlQue15+LDUv09pFvsGxmPxes1FY7CeD4VI+Dhjm5tFKvXiu1NsUWHUcirkzSgvg0jawobJiQMTJNCc8nKfYUW3aENmUd1NmtT6v+SfyM09Tt1IN6J8dOV+KC4cuV/r4XGMwwuZVmjq9qDUUrtTrfjsLTXJeEOpf6HUyj9PmK57UkbbedDwA14ygHjX2atzBb95Qz/CHjsLIzvixzXnDYqU12S5VKWatVvXpq0pXXaNX60k0b4VfHA5/LsqUhazW28Kq54hqR8Cp0f5sPrQ02p5R1xf3JXA0MbbeMKvxiV+yh5hh/nbDtmGacKtbSRPaaVn7YKAwZ0xTXxJZ9D9lfHdLFLRrZqepopJgzD2nnl3ayOuJ6qteHWYWvY/mv3yit0ko96JWb9yr4Ps3VvAFm2dBrGf6eF3/fit7Pks0lWXYbpdc55jS7TP3jY6Rt6UU1EH6NRKjGo4KaibKW27ZSf/Y7tO7Ue155UYAJr01xUIsCoM6ptzUa69avVK8rewZPRqu8k0r4ScA7Wfdb11NL7ZXxJM3VzMLffjD9NUZJk+1V8/s9tXbMSjuvClaN0torg+suHa6wK27vRHbjWvU01fdmuxOlubO2+XN8JfZpqkYFT2rn365JZjvPmxOnt2/Pe89r8mPqEVyrUmWGjRoOGbUl+7m5fpj0X4sKXr/F709TL12jqV754l/ZOpdV3us50SxrXssS7/nL3nt+YzCAmFDyoKYFy8PX93w4a67dhvd4vbbZ9+gY1qy1Wp62Uzv8w9ILBCty1DLe1nh0ld4rs7mmnOXaXuPfhvtDW3PSv+uZ3rK79bY3T13tsn4tCk0rQH13lINGYw29tZWeLmd4IKahWsWcVea84NBcHe2WKuRdxc5ddY16xpmJnurpnQTWri+87g2erG8raqtv9avHdNcFdiLlOc3U3brdX9foqUmTr7HjVeCd2CfZdVt5QecCe8UdPCnerqGh6ns/QLS1E6X3qceV1+jD7GBNi79/2V5Yes7bt1ZF26+q8LCRleUmZLQ640S9/0W+naqOfGXsPlHNz7KTlfhwVmJh7cJYPWb7x1T8+pUp/L0o+Z6f7x0zF2Qr2wunrc7ztpjtjdtZ4S4Y/ljhNszjlbfcseNMnW4rpXLTPlBuiwvVLXQrS9vz1Gz/3lJBo6rL+bal6xPF6JLQYe8Hm/3aS60GUK/VyxqN7PVr9WHoStfT47a7pXVrw04CbRVddGFaWqtW1e9rUJbzi2/nglbRdqxsq8YUVdF3MrUohSeuVhp6WyvNnCVNPcKOp6GwMXiQm5qMjtER2pW5v/on2q/26+1/R+iyKDtdicI+Gl74+3DWc8WakMp//argw2fUL7TuxYma+eE2ZZmV4x7T0h5rg/MqumPFe29DWfXYtVt78yPV+Aw7+cU7tonDDKY2o5xQUNXljP3hTSfJ+mR/gfZ9ZecBqJfqYdDI1rp12/wmjMKTzo3P6EPvRDK3sFOoPYkUylJW2M9mlzxBZW+v8umqQiWvsLOzw5pOvPDj3xVhq+j9ofAOCXPFnq1evbL14BHeSmuYsJH6hqPmkvZNNPSbvXpmS3V+liWgdSl7tefiJlVuCirkBYCpvVZqbmGfiYpevyrwXpti63pDYc3Ur17wp/3A8T1e/zrPr3GIUhtbO/HDtglFnUX9oX9RzUWYqi7na3FZiWVv0zWhGg4A9VL9Cxq2GjzUVh8alg5vazuFtlKPHt7480VXp36zhh0PdjAMDyXm7pXwQHBk/GaU8L4ifvOOHff3Kbw/R3HZzz2kma1u16THbi/d36S6Gtq/Na6xxvY+Rf+7dLuWF9iiSuxN3657PjxFSdc1tiXV0+OxaWo16yHv9aj49auU349nlMZWcndSRc0oxzzTeTNtv86/IvjDX81aR3llGyrtrFnV5XymWeWLd7Ty+3+cABxD6l3QMJ1AL+jRs9SVbPiJ3lyhTm1VVFU+VrcX9dEwfTJevlvZhdXwa9WzOn00ymM6JZrbMW+02x0v3V7YR6P0Pvn7ZU58ppPjrFa2ycT0FzHbcPvbIEes/bla1PmQRs3I1LwvK/oBD/MT5Jnq8dohDR167vf4VdCe3mvoBQzv9cgu7/WrkpLvuTfYZpJid7KMkabWxu9wmDtCzC9/lvxrZx+Z4N0ghU0eH5yua8JrIsxjdD1Nn6wIW6asH+mqaDlvXvsW4XedxOia+BjtTwtb9nv9HgiAY0EDfoIcNcsLEe/u0O3J30htTtfY2P9Rt6iTdbIJEwcOaFPm15q3cq/+97+naNqQc5XYjP/npEzmltE3Duvy/t11ti0q05dvacmbDfkJcgB1FkEDbhzar+V/y9OiLd/o7T0B7TJlJzRQjxan6NpuTflv4iu1W5uSV+tfed/Z6XKccJL+56KrlXCRuX0UAOqeOhU0AABA/VIP7zoBAAB1BUEDAAA4Q9AAAADOEDQAAIAzBA0AAOAMQQMAADhD0AAAAM4QNAAAgDMEDQAA4EyN/zJoenq6HQMAAMc7foIcAAA4Q9MJAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcKbGfxn0zTdTteSll1RQkG9LAKBuiYxspP433aTLL4+1JQBcqfGgMeKeu3X/rx9Qq+hoWwIAdUt2Vpae+OMf9OTTz9gSAK7UeNOJqckgZACoy8x3FLWuwNFBHw0AAOAMQQMAADhD0AAAAM7UmaBx4MvduuePW3Vx0jZv8P7+MVvPfHjQzgUAAMeiOhE09qZnq9sL3ypx0AV6f1xbb2in94c21oE123VPutuwkfrkLzXkybf98R1LxhSO16y39cSgMXrpUzsJAMBxovaDxn+/UlKKlHTnuepxRkNb6Gn8P3pg+Nk6PSVHiw/YsiO2Qy/9xgsUg8KG3yz1SqXYEX/R/BHdgosVE1zniTQ7CQAAqq32g8a/8rWp/Rm69mQ7He6Exrqn83+14r3vbMH30UK9Jnqh4k92eLyfzrVzAACAG7UeNDJyvlNMi0Z2qrSzG5+oXftqImiUzTSdjF5i6jbCmaaOcVq1U/q/WUW1H8HyUK1IWFPIp0s1etAMvWSaXrx55daCfGGWK16jElo31V8gqLwmnFC539xTch88ReVh2/cE11taVKvjzwur5Qlb1vCXD23HSVMSAOB4UetBIybqJL2/Y5+dKi3760Ne2DjJTh0t3XT/n5LUq7n00+Gh2g9zYp4lmWlTIzLxUqWPDw8I7yldd/nz7u9qi4r5Qqteke62NSpDWryqh81J/JxL1am5t25hONmhtHSpV5+ymnM86bOU3jm4jYcTpFUzi0JQuoYH983su17VM+EBKv1d6a6ieQ8PmllsunDZtBl6OP1SPRzaT80qI4gBAFA1td908pPG6vZRnpYX2Olw//1az6SfqL6X1ETQ8E704+1V+pFcqae9olXqrcRQiPADwhfaWVijcIkS+lfUGNNCve4qaq6J7dNbzbyTf6pX0rVTC+38zJ7MP33XCwyXqus5wclSOg0vDDLndrlUzXZ+boOGF44K+5oEt1lMp966yd+mnVdiOtfbjgk5L73ynn7aJ2w/O19i5wEAUH21HzROOFU9ov6joc9ka3HOIVtob3edtlsH4loosaz+G9VWoo9GmR1AK7HT1ASEwoppWvlCX3xh51XXOS3U3I6awGBqHExg2LHxXS8EXFp4oq9Q2DaM8CaPh5OPdMdsc1Hoec56z8toX9gwAwBA9dRy0Diod5Z/qmdOOUsZQxtp55rs4G9oeEP80m/VbVAbPd0pwi5bB3QKNU0UDWU3k1TBp19oZ/OWwUBxTj8ltHhXaZ8Gm006dalSzCjGhIyHd/Yu3K+HE0rUaFRZiUBmBjrOAgCOUC0GjWDISFJTLUpsorObnaEHBrcJ/oaGN6y9+1wNPCvsdtfa1vVS/TR91ve43dX00Qg11+zQSzNfLVZzEdu5hdJfeUXpLUJNGtWzY+cXauYFFzvlBZYjqdEwzSjh/T6KK+o4G3brbxmdWQEACKmloFE8ZJxuS+uWc3VTn0vC7jrppvsn9tbO8GaFEndrVKyFejV/1647TqtaDNeU8D4dXpBpnv6emnc+giYdT+yI4WqePM5uf6a+aHFkNRrn9p8c7Kgaeo7ewG+JAACOVIOAx47XCHNiMtXt5TsWQkZtMLfOvqtOfxqpWFsCwJ3Kv6sA1ISjXqOx960cQkYZdix5VTsT+hAyAAD1ylEPGqd3b60VhIwifh+HX/q/XXF3hbfHAgBw7Knlu05g7jiZwp0dAIB6iqABAACcqfGgERnZSNlZWXYKAOoe8x1lvqsAuFfjd528+Waqlrz0kgoK8m0JANQtJmT0v+kmXX453a8B12o8aAAAAITQRwMAADhD0AAAAM4QNAAAgDMEDQAA4AxBAwAAOEPQAAAAzhA0AACAMwQNAADgDEEDAAA4Q9AAAADOEDQAAIAzBA0AAOAMQQMAADhD0AAAAM4QNAAAgDMEDQAA4AxBAwAAOEPQAAAAzhA0AACAMwQNAADgDEEDAAA4Q9AAAADOEDQAAIAzBA0AAOAMQQMAADhD0AAA4KjKUcq0cZqzPt9O1291IGhkas4NbdUuJl5T022RlTUr0SsfpRQ7DdSe0HFaNHS58maNnpemvMN2kWNenlIm3KzYCRV/4g5mJStpaLy6XGhfi87xGv1anp3rzuZ5g5UweLay7DTqt4KN8zX65u7qaD9vHS+fqFR/TtWO07qg/OeQo42vLtWyjVv9qfquDtVoZGr+hMnaXG++tFEvXTVeqevf0rI503TnVd7J74nBir15tjLqxXG7T1n/2qy8b+xkGQpWjdN114/Ssq9jdNvYaZoyeZrGDYmRcvfZJdzJzUhTlvuHQV3w+SKNGDpZaaf205Qlbyl1yRyNuDLSzqz8OK0TKnwOXTXuzW1a/ZuudrqeC9S6TwKz+1wQiLnjnsCIyy4IJM7YYssDgcyZNwRi2j4QWO1P7Qts+OOgQPyl3rJtveHSGwIT1+3z5wQ+mhVI9MpGzFgYGHb1T735Pw3EjV8dyM0ITV8QiLtjYWDroeDigUO5gdVJNwQ6t/e20/7SQOL41wOfheYBZbLH6QPBozEkf+UDgcvNsbfMHovm2Jo6KBB3cfA47XztHYHZaXae8fU7gdnDrw0ee+Y4nfqOV1h628WO/Ro8vlc/4JX1eTTwfGj+xaHPkd0Hb1uh4cGVwXUKHXojMM77jMYMXxjILe/zUtHzt8+jaLvFn3fwOd8TmP6XO4Lrt+8ZGPaX4PeBv99h+5Y48xO/HPWU97mKaXttYHqGnS5UznFa4edudeBBc8w88GhgWE9vfp9ZgcyKziee/HWPBhL9ed5n7YEnAuOKfT4PBLbOtceomT98ViDtazsrXLnPwbD75B/HwfHw52SGcSlmuSo+Vh1Xd2o0Iq9X0tg45cwapxnbbFkxuco/NU5JL/5DG99eoGEtt2rx2Bm2Gioo5c0cDZy1XLNvba2cJfcq9p40xf5hrZaNvUL7Uidq9qvB9rDUSX00clVLjVz6T216baSarRmlO5/c7M8DqiOy1626rqV37KVv9Kf9Y2tBvuKeWKHU9cs1MnqLZgwdocWfezMPb9WcOwZrxr+ideefvPkrnlRcowJ/vaqoseN729+0OTpJK16bo4HneJ+jSbO0Wa01+LkFGtLGm29rbcZfGVy80PoULdsTpSFDb1HThrashAqff5WkKHXXQM1esVzjYvO97c3QMu8lih37lsZd5c1uM0TzvX2bM6h1cHHUTz/7hfo2zdSce4ZpTmqODtpilXOcVuW4y9i4TwkvbtPWv96p6IrOJ6Ym4t5Fyu0yXsvWr9WMTpnaEHZOylk4TH2fyPUea402vTlHsTuna8iEpSr1SS73OZQUq/He8zDPJfVvSUo4xXyvTNNo73gv77GONXWqM2hk/CSNvtL7Mv7t9DKqolsr4Y5b1CG6kSKbdNV1V7eT9uwr9ubFDR6j2OjWir3xasV40zF979PAC5sqZsB1MhVUOV/mev+mKOXlPMUM9Oa1jVBE9C3q203KejvNbAKopkg1MrWh/oEYPLaiBk/Qg7Gt1fTsdho4dri6HE5Tyvo86e/L9PwHURo47SkN6eTNj75CD94RZ1asku97fBf2bWjTT3cOaOc//kDzOfo8Rzne5y2iaTM1MgEi4kxv35sqMiK4eEhWlmlPjlFMh+B0aZU8/yqJ05D7r1C0Wff6WG/a2zfvZBHRpKmamf1p2FjNvH1r6n0Zox6LjNPEvy7QyNaZmjHsKnX9xTDN/yD4bV/6OK3acRfV+1YlNLUTFZxPslYs08bDV2jkY7coxtt+zIDh6muCjS9TK171zhW9hnuPFeXtS9fgZ2j9ZpU6g1TwHIqLUKQ5pr1h64LpSo6I0/gJCd43S/mPdaypY3edNFLfSUmK/Xi2xj27tUQCPKis1ybrzpuv0tVXdlbCk6U70USUuMpq3cpe9TT0vnCDY9LHmdrqfalmPBlf2Klv5Aqv/FBwNlA9Ocr90vsSO7tZ4bEVE34mPvtMeXOUX7BPWds2eVc9MepwSXBWddXY8d3w5KLlfZ/r8yr0sIxu2dL7N0uZHwenS6nk+VeN91yKPc9Mfb7djuL44p1Yh83xruT/Os27yk/T1JuHlV0zVsXjLvLUUP8Io/zzSeYOb7xNjNoVLh6hkwuPSe/4/8D7s+Lews+Xv+7hA8HZJVX1OXgK1ozTuBe+U9zYSUpoYkqq+Vh1WB0LGp4m/TT1d1co45lxWpxpyzwFy0cpYcxGRd01R4tX/EPJI7xkdyTatPayrNRh1BptzdhWNPz1zuB8oBoKlizW4j2tFR/nfcl5x1Y77wspIyPsimPXbpl6hmbeVVj0OdHeWIbCZxdzuCha5+Tm2LFqquD4No/+vXTqqi7eiX/Z3JTS1cRGJc8/5KB3UggKhjSgIhFtEzTx+dGKPZymZSvCTgohVTzuwlV0Pok6y/sEeeEls/A4LfACix31PkWt23qhpf+c4p+vjGmqqG6y0uewJ0UTH1kqxU9SUnwjW1j+Yx1r6l7Q8ET2n6qJsVu9N6QoZebmBWNgZERj7yDaoGXriuZVTxfFeW/k5gXjNCc1U3m78pSxaqLmr7ezgYoc3B08ZlKTNX9ComInbFDMiKkaeZGZGae4G5sqZ8GjmuofW8H+Dxub9lPfXt6Xh99mm+OtN07JH+Qp74MUJT1rbtFrrWiTDlKe14yNecpKnVzY36L6vs/x3ViR3sdLGWlK3ZVfokbRc/YtGu19Ied5V1jxN3vbfC1Zyd6weNYojZ5nvjwref7ek2ztnRBSFkzXxl2ZSp02S8uq8TQjGnnb+HiT0rLywsIK6qU1kzVw0nylpHvHUdZmLZ62WKlqqo4XmQ9KyeO0kuOuDBWdT2K6dPUeKUVPPpKiLP84fVTzC2shWivu6nZeUJmi8a9t9T9fWenzNWNJGeGhwucQLl8pkx5R8jdXaMjtHXXQ26bZbsHBajxWHVcng4bfhDJ2vLoUVld531H9R2rghTmaM7i74h7w3q4OR1ij4W07btICPfiTXD1/V7xir+yuwU/lKCLKzgYqsmaif8z0vcv78vkoWiPmvaVlw4uOxdixL2pK4kl6/X5zbCVqxq6fa8qiJMWZaljTZrtomvqeslqj+3dX7KDJymwYvOKKuz9JCS0z/eN7wPMR6jugNo7vprru5n6K2rVId17ZWUlrbHGYmOHLtXraLWpd8LqmjvEChjfMeDlHzdoEn0eFz79hnEY8mqCorNkactUAzT+pnwYWtn1XLvbG29TllA3eFV93DfSDDeqtZmcqYuM8jb7VO47ib9aMtyPVd9qLGtfFzCx9nFZ43JWhwvNJl/GaM/YKv8kiwRynDftqYFs7zxN9x7OaMSBSGx7xLjTMd8F9f9N3Z5VRc1LhcwizcYYmrsiTvtmgqTd43wveNs0w0bs4qPJj1XENzK0ndhwAAIQ7vEHjLx+m1b3maOMEL4Cg2upojQYAALXg4/kaPWGRNmflBZs8JkzRsj2tNbA3IeNIUaMBAEDIrhSNv+8RJf8zz++nFBndVf3un6YHryq8NxbVRNAAAADO0HQCAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcqfH/vXV/Qb4dAwAAxzv+m3gAAOAMTScAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAmTodNMau3q2+Cz+zUwAA4Fhz3NZoLNq8T11nZmnvgcO2BKjYzJnPqF1M28JhyOBBKigosHMB1LSdO3fqml6/0OjRD9qSoMo+i+WtV9L76enqdllXf3slhT9GeZ/1VStXFnuM8Mc1y5v1wvezrMc5HtB0AlTDPffeq60Z2/xh/oI/KTIy0s4BUJPMSfm2IYPV9bLLbElx5X0WK1svxISBp556Uh0vvtiWFDHb2L59e7U+6yZYPPTb3+i666/XlClT/bJTTj1VCxcu8rexdt16rV+3zg83x5s6ETRME0mD327xhx89/pG27/mPnRNkmk9C801NRLjweeHrbsj+t+LmbS+ssTB/zbQpN49360s52vjZt2ryyDZ/m6a8/ROfaPCSz/1tNX44o3DZ8OkQM27KSj4u6i/zxRPdKtpOAXDprrvu1spVf1OzZs1sSZGKPosVrRfOhAETIE71wkA4Uythtj9+/ARbUjUTJz6qSzt39h+/LKeddpo/7N6925YcP2o9aJgAkO8Ne37XVoHft1fnqB/qgRW77Fxp+ZZ8JV7YyJ/3xh3naczKLwtP+CYEfJ7/n8J1H+91lm5+4bNKm0MmXX2mFt4UpS4/+qG/7i0dGvvlOfv+o5aNf+Bv655uZ+jnz273y0PT5rHNtk2ouOuVnXpt0DnVelwc+3796/v9KlBTPWq+kADUDlefxX9u3qx///vfGnHvPYVNHqaJpCKh5pPyQobx8Ucf+TUcP+ve3ZYcP2o9aJx+ckM9dX1z/69hQkW4xPaNCoPAFa1O9cPB6o//7Z/sX/FCyORrzipcN77taYqMOEH/3HnAn66uKC9k/OrSJv741W1OVbszI4pNFxz8r/Z++1899+4etW0W4e+P8bNzT/H/mnmov8wVUKgq1VSPmmpS+mgAR5/Lz2JWdpY2vf++7r13hL990/Qxb97ccsNM8uuv61//93+6//5f25Ii33iB5dZbb/HDivl7XcJ1x2Vza51oOjFNF6HmD9OkUZG23sk/xISKc08/yU4FQ0ujiIbK2XfIlrhjalpC+9xqykfK2H1QO/Z+Z+eivvvlL/+ff3VirlIA1B4Xn8Xul1+uizt18sfb/PjHfpOHqekoS8J115UbdsL7aJhh3fp1x2WH0FoPGiZkPPX2V4XNH6ZJoyLbvBO6qWkwTA1D+Mndb4Y5eFhRjU+0Je48dGUzf39Dw76HYwprOFD/7d+/X7u//NJOAagtNf1ZPJJ+WKbJpNmZZ/rNLRXVrPS4soff/+N4U+tBY6sXHFo2+kFh88fyD/L9vyFrPtlf2CfDhJJ/5HzrN1Wc1+QHfvNFqN+EsWLbfj98XNT8ZL+m44v8Q4XNKFNTv9I/PvvWH/++TDPKn9L3FuscivrNVJv+Ydo0OyX99a/L/ascc7UD4Og50s+iudujf7++lfbnuKhDB78pJNQv4+9vvaVdu3b55RUxzTkmbJhOoeUxNRrnnXeenTp+1HrQeDD2DL9DZ6gZoqTOP/qhHybMvLte+UILb2rphwxj2a0/8kOKuXPEzDc1I6tuO9cPLWaZPu0b+R06zTzT4dRsK8T05zBCd51Uh6m5MH1DQts2Az8sVv+tWZNS2Dns3X/8Q08+9TS3twK1wOVnsXnz5po06TE9+ugj/vZnzJiu5+cv8MsrY+5Uyd292//9DFPTEt5HwwxGRR1G66sGAY8dBwAAqFF1ojMoAAConwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcKbG/6+Tr/N22zEAAHC84z9VAwAAztB0AgAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwhqABAACcIWgAAABnCBoAAMAZggYAAHCGoAEAAJwhaAAAAGcIGgAAwBmCBgAAcIagAQAAnCFoAAAAZwgaAADAGYIGAABwRPr/EuE4d8kNtv0AAAAASUVORK5CYII=" + } + }, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![image.png](attachment:image.png)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And we can see that embeddings have been conveniently created to allow for semantic search.\n" + ] + }, + { + "attachments": { + "image.png": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqwAAAJ1CAYAAAAc86LXAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAFiUAABYlAUlSJPAAAJaXSURBVHhe7d0LfFT1nTf+T+y/25bHkgckqzJsSQiSgLC1w0PkqcAGFEeFhxAuLqHVRXuJJip33NB2XauSVe7iBLO1SlGByjUUlBGVlEsfjGW0G0oSTEjaZRAahA3t0nb3qfP//c75nZlzzlwygVzOJJ/363V0fufMucwl4ZPv73fOSQkKICIiIiJyqGvU/4mIiIiIHImBlYiIiIgcjUMCutR/48zRCvx03y/wmwv/Jdp/BXy+FwaOn4kH7/4qUj+nP4uciJ8dERFRZ2Fg7Sp/aca+dStwzDUb3536VaR9Xs2XQajyJazZ/3n8/T9+GyO/rGaTc/CzIyIi6lQMrF2k5ieP42cDl2JJbqqaY9O4Dd97GfjuUzMwUM0iZ+BnR0RE1Lk4hrUrXHgTO343EYXmwHPsJTzz5hnVEDJm4Lt/+wF+WnlZzSBH4GdHRETU6RhYu8Dlj34FjPw6LPW5/9kfA/v9D9XQDRwzCi2/PqFa5AT87IiIiDrfFQbWRvy44GsY6bZO39uvFie7+pcw270U76lme2v+9D/Qq1cv1VKaP8a//eY/VEO5sT/6Xf5P/LdqOt17S83fhwL8uF7O1b8r3eW70V0/OyIiIie7igprNore+BDH/Ma0AnhcBJWl76rlFEuv/9EL/51Ikrl8GZe/8HmEzulxMBlWF2NF+Pvwxu0yq3Y73fGzIyIicrp2HBJwO54RofWufS+pypr0Lr5nqsBGhlnrcq0KJ6ubBS+Zso6s0BnVOvl88fhHS63r7Le1TcxVv9k/MraqtrNfVlLVcuPY5Lbu9aIOb2GxaZ3o27kyacNvxuWaX6pWbJerPsB/D75ZtZzvrvEipBoGfxvfmijf52koqwP2yT9mTJ+rtRprrmZbP2P9vbZ+T6zvv3mZ2I78/MzfM61ablquZl+p7vrZEREROZq8SkDbnQq+NGtW8KWPVdPk3ZJbggX/eko8ks8xHuvCy6R3gku/dktw6duq+fGPgi/Jx+L/BbN+JNY2mPelr+MueUdbEny7JOiOaJcE39VbwVP/Oiu8TB2Pvj+1ndB+Io+lIOZ22sN/Bt8qXRJ8Nfwig8H/+s/gf/yneixdOhJctXhN8P+a5zmZ9t5H+06Y33ed9n6aP2O5ru2zsLzfYrn1szH2Y9+2fV3ZNh2TZT9Xqht+dkRERA7X7iddZWRk6w/2/whlKMYz38nQ28KEB4uB997Vq2z738W+u1bgmYnaIlWRU4/jykbRg6qSN/F23BXRbkSjqsa+tB7hZcjAhAnZaGwyqnNivae+LeZKt+P2u2BaZpWRLp7VKLar2levF+4qzkPzC9/Ha//Wos/6fC+kGkMjL/wS5U+9if4Pz8Vo23BJx5q4TBsG8O69spJpVMSjUZ9L6L0XJn4HRWL+e6F1TJ+pJLYd/p6IzypLPVbfsW+Hvje345ln71aPxUf2o5fEd0x8rwarGfL7UXfqKj/HbvjZEREROVy7B9bGxlo94EkZGeFQIg0WbRUYZDjMyhikz+8wtSjTApQ+zVhfi7rGU2qZVShoRyMC07YJ72KG3I5luMJV+PLXMf9finDjkeWYt+RJPLd+PV4Q03M/XIAlL3yEoQufxt9b3rwkIP7o2KTGr8rgGvtEK/E9MEKkRrQzanEq5htrPslPH2IQYv+O2e1bFPr8R7oXYZ/Ylv4HzVXojp8dERGRg7VvYK1/CS/tuxu3GxUve1WyXrSzBoUCRqzw2H7uxvLQSWFqWmaq3LVBxnc2a+trwbW9Tiz7fH/c/vDTWFO6GIUzZ+DvxVS4aBWe+6dvY9z16jnJSAbXZ+/GvgOx3id7aBTtxmwMihryZFidhlMPGp/hDhQZFVbJ9h2zV8mzHt4R/uy1aXO44no1uutnR0RE5EDtF1jlyS33epHx7DJMkG2t+9WL75lOkHnvZS8w4XYtsGaMvx1Z+xaFq3Bi/R/Lx1oVNtw93PijpdaKWsJkN/9bWNzOVy1o/+EBwud6IbVfGtLEFOpaTioiVC61Vp7fO/CWemQnP5dalP3A9Hyta/92TIgaJE/hVJ0pzNa/i3eN74P6jr0UquTK4Qa16rH+HcP6pXGGJ7SDpP/siIiInO8qAqu1u33kvafwbf+H4bGGcjyhfwUy1k8LPeeljB3YZIxplVW4N4rRKM8e19YXwVJbdDu+/TBC2/4evm2tqLXBhGU7UNRo7hKON7bSRBzbt2XYFevIM9Ibf1QQ3sbjwPLNpvGXJGTgW+NP6UMm1KRd4kqrZotlD95tuUrAhGUfYnmGN/z8lwdhW8z3VI5LzQh/135wChmh74NYZv4Oud/F7aYxrHql17SunNr5DxgiIiLqeCnyzCv1mCjpyT8uvodl4T+MiIiIKOm1+0lXRF2m/iV8bz1w+3iGVSIiou6EFVZKYvKmAfLM/7C7njUPSyEiIqLugIGViIiIiByNQwKIiIiIyNEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEYWImIiIjI0RhYiYiIiMjRriKwNqA8PxvDhkabpqH8Y/U06nHO/+JFPDA2G9PXN6g5yr5FUb4ralq0Xz3JKua24mh1nd/uR/miWRg3Uu17eA7GzVqDj9TimELHvwgRR1u7BtPVa5m+/oSauR9L1DzLJPY3+VtPYdPxS+p5REREFA8rrNQ+/vJn1Ox7EUvyv4px31qD98+r+VfiSraV4Dq/3/d93HnPo1i79yOc/3Nv9HMNQL9el3D+V6fRrJ7TZhdFMP3Oi6gRD/tNWocNDw/T55t8od8ADBD7GtDvC+JYL+HUL17H0zPvwQ/eYWglIiJqzVUEVvEP/Dn5/2GYu7sWJ2rM0w4U3qQ9iXqI/Y9/FdPnr8Ge2j8Dn1Mzbc6fVylShDrr90VMKybqy4REtmWX0Dofv4g5i7bh9F/6YeI/7cCHv6rCwXfewcEqsf9jz2Ccelqb/OUEyr/7KPbIlzZ8HsqfnYgv60ssJn7vHbwt9vX2oV/hxKEVmNxPzj2P7a/9TPyXiIiI4rmKwPpfuHRRPSTq1Q+3TJqHtXt3YG6MP1Yu/T7BamIC24rQ6jqXsP35Naj5CzD04X/F2oJh+II52Pb6Ar6gHrbFwWe+i7XHxYN+k7H2Xx/C0EQCtnjuAzMz9cctvxdHRkRERPFceWD9fYv6h3YQBrGa2uNN/OFhbFrxECYOih37Lv2n/o0ZmjlI+38siWzLrtV1fv829r8jH/wdZj8Q2WV/JX6/bxG+v/k88LlhmPujFZjYRy1IwOnfqvG1A/ohTX9EREREMVx5YD17Xo3524O56mSSr42dhAee2YNTf9YWEFk0nz2t/b/m+Umhk4/GzVqEV37RCZ3i73+Eg/L/2V/DgBMv4qFJOZYToPb8VntWwv5cK4cX7MF59MPkFRtQmK0WtEaOtd38KJ7aKx7LoFs8I+oQAiIiIgq7upOuPqeftDLA1U/rTv3z+Qa8/9oiTP7mi1rXK5GdcfJRP5nS/iJPdtqD5d+aiiX7OrZj/NQpdeb+udew+FtrcPDsX+nfW3UC1JL8R7E94SEuNXhxoT68YNB3/xXP3dVbzY9tz3zjCgFfxfQf7gdGPoC1b+5IPOgSERH1YFceWG96CNuP6yetvP3OYXx4/FfYtGg0tHNJjq/Bc7KrlMhk4opafHhIP/lInuj04TvrMFsbHXAee364Vq+AdhTjD6iL4nt51wqx/8P699Y4Aeryfjy37uf6c1o1QIRP7ZuOUztexv4Egm7oKgE36OH2/LFXMPeeO/DQa8YlsIiIiCiWq6uwmn3uC7jlWxvwg0l68/2PPtQfEMXwBddEfH/NPAyVjYsfoqZTrt37d5j7xGT0M06O6jcZD83Wx7T+/sManNIetebLmPjsM5gtM+v5PZj73dZ7FEJXCThQhRPHD+OVb4l9/uU0Dj6zGGt/pZ5EREREUbVfYFUyM9vnhBbqIW7KhDpfvkMN+opLPfoyetsGjQ7KiH8SWFSf+zt8/0cibMvge3wNCh/fj9/rS1r3uX64ddHDmKw1GnDwF4nfFIGIiKgnaufAegkfnlBdnFdyjSDqeX71a4Q6xRO5JNSVusWNW7QHp3DKVsmtOSkv+S/c2MYz9rMfwoYVk7VhMOf3Poo5oTtcJeAvfwbPTSQiIkrMFQfWU7tf1C/SbvjLeby//jGs1S4d1A+zPeELwRMBR7Fp/c9x2vSV+fOpPfjB0hf1bvhbp2NSooXOj1/BbHlb1Tu+n9D4UY1rOqaPlw9OYO3SNahRx/HnU69j7Ra9wjnxjjvbfMb+l+/6J/xgmj6eteb57yZ28pj8WVn9srq9a28My+6MGjMREVHyuvIKa+Bt7TaYw3LG4M47xuDWr47BA88f1e7aM6jgGcy9otsGUff1e3z0fCHuvOWrGHfHHbhz7FfxtUmLsF2m1X6j8f1//AYG6E9s1al3foaPLosHgW3Y/74+r3W9MX3+47ill3h4/EVMH5mDceNzxDE8hYMi9Mpbqj49rfWz/SP1xsQf/ivmDpePz2PPojkor9UWWOx/Rrxm+brFz8rXhouflR/r1dhBBcuxRAvSREREFMsVB9a0m8fh1pv64Qu/P4/TgfMijvRGv69OxuIfH8aef/o7XluSbAbh1km3oN+X/4zzgdM4ff7P+EK/TNz6zRXY884GzG7D5Z0G3fF/9ODpmoGJt+rzEnLTA9jk24DF8ji+cAnnz14Sx3ALJi/agB0xbqmakM8NQ+G/rtOvNvCXE1j7nUURld8/nxevWb5u8bPyZ3k5OP6sEBERJSwlKKjHRERERESO0+5XCSAiIiIiak8MrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GhXH1jPH0V50SSMG5mNYUPFNHIM5m49rxZ2lv1YIve9aL9qdyUnHUtHaEB5vvqsQ9M0lH8MnFo/DcPyX8Qp9cx4tOfGeY9aW95l9i2yvfZF4hMP0447znJNxDayMX19g1qo2J/TFe9FJx1DW743jhJ6fxbhNctr0H9GluzTGklK/R4LTVG+xw7kzO9S+Hfm9PWvau+rk74bSfvzd4X2L4ry+1bTHX5u2+DjFzFd+9mW/37rP+/Ga3fsv7/Bq/H/KoNPjckKDnXnB7+/4WfBn1X8LPh62cLg4pfq1RM6y9vBxdniOBa+rdpdyUnH0hHqgy9OzQoufks1r1BDWX7c96i15V3h7YXic81eKD5hk7fE9129F7GWDxXfB8v7JedNXR9sUM3gyfXBaabnaK/dvFx4u8za7mhOOAZnkz/n+cEXT6qmRfv8jHQZ7ftoe21vrY/xWruW/JmbVtbZ/960jdN+lyXDe9ahtN/Jtt/Tkva9jzK/DZLnvY3/O8qJ//5KV1dhPVWDD88D4/5xA576h8mYPGUyZj+8As99K1M9gah7kH9xzm2Yhz01KzBRzdPcJb7vd4n/71uEuXsnY22U5SdWT8ae8jgVjJsewkOTxN/3jfpf/Q0NJzD0zokYpLV0Ex9+yNLuaE44Bkf7uAEN4t0YdJNqdycN9ajJvhMTza/trodQ2B1fayfQfpYy+ZPjGHd5IH4jY7+tknrqnbdRM8lj/f3dbZ1CQ+0wZCZZVLu6wPrlL6O3+N/BN15GzZ/1WRF+uwc/mDUGX5Ol55FjMPuZn+P32oJL+Oi1RZg99qt6l9PwHEwPLVMl6aHT8PSPn8J0OdxAlafP/+JF8Y97TmidhyzDD36H91fPwa3D9WUP/OsJNT+Ki/pQBuO5k4teR81fxPy//BxPjxXzJq3BR/ozcfCHY8RzCrH9Yvi41v5iP56eqR/H1zyFKH//knp2pNN7n8IDpmOeXPQi3hfb0qmut/lrsKnwDm3bsns95vEJsd6bLie7SC1dS7bhA/GO09L9HNn9KLtxQss7vfuqAfvfPoHJhbED2/539mDoYw9H/2Unf0HWvo398nNNQGbmMNS8vb9Lu+gSOQbLZ2L+zOINJVDfkXK1rtYFZfneyJ8H8TOwz+iukpP9+2DurjaeG6vLWm1PdXmF1gl9FqobcL3aX+g4bN9d8zryeKesQY34R2+uXCZfn+U1RNJ/Zo1tWY+1a7/bUWQOxtBWvq8xX496H/aHlqv3Tc43nm/7PWDdlrlLNt53Qf985u4Fap6fJOab9mN5D22fY7z313yMYgodh22+9fjbeozq+2YOS+btG++dsQ+t29b6fdHer9AxqP2vV9vQ5tu/u219z8w/K+b1JeP4Y/08xf+uO8NETJwE7HnHfGTqd/wdxm9w63vQ2pCtJftivLdRnmv5/qj33vL70Mb4vC3vayLbiLVf7Tv1qPjtdQJrp4j52mcf5XtpEfv96NTPW1Var9iJV78bnHhzVnDozbcHC57+WbDhT2qBdOHt4GI5ZGDGvwR/3tAcPLHpkeDY7KzgnFf/XSysD7688HvB139ZH2z+5NfB1x+5zdJ1qpWkZdf6jNXBE2qbl/Z/T1t/7IOrgz8T6514a3XwuTdk+V11w9/8t8GCJ98Onqh+O/jUDNk9+w/B1z/R17X4f78OviiXj3kk+Hp1c7Dh5/8SnCZew9gnK7XFxn4K32gJdddqjwX9uEYFc+55JPjiz38dPPHz9cHH5Gv83wuDb1+Sz7AOCbj01kJtW5OWvhz8eXV98MNN3wtOku/XDKN7VT0/+7bg4j3N2pzWji/ae9N54nQlWLq69eeFu0es61m6HOxdNOo9tyw3dU83vPV2J3dNx+v+lVrrArYtt72eaF1U+vCCru1eincM2jJTl1FD2Xr9+LXXYn6vbN8DbbntvbK8H+rnwfY9Cu9LXx5eXy2P2ZVn/HyZllveb7W++fOI2Kdg/4zs3YeW16Cvb/mum7Yf8d03L+v073YM2uu1vy+6Vl+PWM/4vLVlcjvG8ojhBuK9WmjbVmjbrX0X9O+h5fsZ5XMwL485rEU7buvn+6L8/LT5cb7PbT5Gfbnld0HE98r+fpmWC5b329i/aX9yncW2/Vneg7jvmb4983LrZ6Jen+mYtN8FxnL78Yrvs/nYHcP+vlva+nsQ/h0j26bvQJTPTPuuCFHf27b+PrQxfobC201gG63t1/6a1HLL7yzbdyzq+9HJn/dVn3Q19Jvl+NnudSgcJ/6yeG2R+AtlDjbV6stObV2PPedvweI1j2PcoH4YWvAEHroVeL9ij0j0mXhgxdOYPTIT/W4YhtmPfRNDxTpGt6iuN2Y/Og9DvyAfn8fPXtuG865vYPm/zsNksd7Qu+Zh8UxTTfumh/H0P03E0OETMffevxMzjuIjo0xqdmAT1h7vjek/XIfZw/th0LjHxfN74/wb+7W/Dr58xxL8YFo/HHz+h/jB6jWoufUH+P5MWUsOu3ORfM3DMHTcQ3j6MbGvi3tw8H21MKQBm368B+cHiec88wDGDc/ELQVP48X5twDHt2H7MfU06daHsWRSP/1xK8enM783nW/P/PBfW9a/zpWP9+Pt2sl46GHj88lEYeFk21+1uojq5E0PYfljw1RDqa0X76Zu0F3WruqkVLsGk4337x0PTtiGEkxcUSvm7cWdb9v+Wu9EMY9B/IX+ohz+sCJ8xIMefkg7fv2zXG7qPtY/d0u1NnseHpLDKGIahrnPGdVsfX00NOjr7/Nhj2V9sfw58XOgWtGJ7e02vb93PYy52dYuQUv1XH13za8v2jqJiazOD3r4YUze6wv/LDvxuy2HstTUYs+db2vf03BFJYHXIz6f5ernftAdd4rPRrz/heq9vGki7sw+IT9ORXx+K0zbks83vR9xvwut2bceaxE+Fin6sJYGlJfvweTVpu+I+B1UKL5jCX2fr+IYE/rd1yrT+yuJbTxn+r078c5hqGlI6B0L/XyZ3zPt87VV3M3v1cRC8fNn+cxO4ZTxXPF9Nh2Zc9iGBVg+h4jfMRPx0GPA2+/IVxj7uxJN+/w+FCatw3bbv6XxtpHY9zZBcd8PqfM+73a5rNUXBomAWPYOju79Acb95Sie/pfXRbwUH+1J2SX/EZbfYYSbMXhahrr/J9f6M07tfhZLvjUJd94xBl/TutjsBiDtRvUQH6Lml+J/t4zGrZ/T50TIzAz9Mvryl7+sHkU69bE8rkvYXmQcVzYe2nwJ+Muf9CeIMDhx/sO49fwebD8gPuhF3xBHYjYAA0wzjH39WXtdZqdwWr6ooTdDRNSQAS658mk0N+ttTb+/hoqrCRyfZH5vOt/k1TLMqGlnlH8E5Dg4o8vUmObvifKLXLTF9z4zI/wLMoI2DhRqW10R3gYh0/KPrJ343onDt/6xZRZlvJD4BbBHe//WiX/sH43RFSO+ezvFc1YPwtopXdW1FuUYtDGOg8USuxifpdbFbP4H7cqdahTfHtPP+ZXRP6+Yor6+1j7jWORnb/sDT+uOU7/ku/y7Hd+gh3do39HM5yep72grr8fupkz5zsUf62vuuoz678CVSfy7Ems8X0d/nxP43XeFzMNMJj8fZ2icTfT3rLXffyYivG3ffSfell3NYt+xu5i7mgxdw1QBZT/27x0mcoj+OWjvgbmgoN5DPfTH+q5E04HfH7kN9TBS++437vvRyZ93uwRWwxcGfQOTbxUPflkj4qV4f4bIvxRHY/Huwzh4wDT96AGk7ViEyY9vx6WvP4MXX9uFg1tbq5IMwgD5hJpfh8aWXqlBN8nj6ofp62zHdeCfME57xiXsX70e7w/6O4wbpFdJjbG1uku4ZBqyejpwWvy3N3r30tth0Y9Zf37sL33rx5cE5A9HKJSZpohwGz0IyBMVLFTF54T2w9HZ/7DrVYpo1WHDxDvi/PWq/YVqO4klZCKekydlzY8TSLVqQIxA0FnMxxDzF1+cUBc14F4h+x892h9HbaH/Qo8pzi/2tocL+Y+9rPDafg5qdoSrH1363U6EPuZP/1wTeD1tIcNq+eDw74ndrf07kLhBGeI3TUKVzliBrHO+z63+7msjGVZfzNwb+mz2tKFiG/s9a8MJOjLEaPteB4g/bJwaWrVqvuwZsP1+1t6DSetC719o0npc2hDeO/L7E/d3XvvuN/77IXTi5311gfWdpzC96Fm8snsP9ohp0+o5eE4cbL+8vxMxVbzQ8eIL8bmjeOVfXtOuJiBPivpw55N4/aMvoPl8QM4QKfev0PvyaWwrf62Vf3QyMSlPbPXUi1hSuAZ7jjWgZt8aLN+a0DfH6ta/w/R+57F99Rq8HWgRM1pw+sB6rD3QDNnD/vt9P8RTO/4Lkx9djuWPTha/VH+I7+81n1R1Gpue/j72HD+vHcMPNog4Omg2JkekyfAxf/97r+Dg8QZ8tPspzF0tnn/rdEyK9Qu+leNLCrLrD2uwOOr17qy0E3yeXx8ObFqXs3osnFq/KPyPuFax6Xx6t+ejkcMfxD+42g+oCB1rM8VfoVGWD5t/ytRlGIVcd9IezFWD4vcvsoVX+QtVrN2ZZ6THPQatWzd8vNKp9S9qz9eC+/OLTaGrAeVL1gC2Kw5cKb3LeA1eDP1S1Lvo4juBteXmY12sdRXH7IaL8vrk5yivAjGxta67CPKPHWDtkijDZgQnfLcjGN/pEFmBEq9EC+vxX09b2St62pna6vFV0052tP4O2i++p5HHrf4gNf/RKH4HlYv3oGO/z6q7Ps7vPv07YRqKIpYvjlsxtVfX9CEcCYvynuk/L7H+4LaxfHdkuFMPncj4OZ+/x3pFFPkexOn1ivVdiabdvj9715u2sR9LxDHHOwm4Xb+38d6PTv68ry6wpg1A79M/w4tLxUE/vghPb25G5jdXYNMPJ0LrJBfJe8OP52HY2U2YO3MMxo0vwFM//wIGDBQvbeZczB4OHHxmGsbNWoNLX/e0+pf1gG+WY/s/TUa/E69gyTcnYfo/7UfzF1LV0jb48kQ89foKTP9yJZaL7YwbPwmFrzWgnysNuLgfTz2zB+fHL8EP7uqNL981D3NvvYT9z3wPe0Jn9g/D9Hv7YfuDYzB9/os4ccM3sPYn8yzd/gZ5zHuenYHeB9fgoZmTMPtJH74wbQX2lNuHGZjEO76kIbuS9a5EoxshVpeB7Hbc85gIdcbzlkDrrjEMyhD/QKouB9n9iNVXWM25KhPxnPgrUgulxnHKqXxwKPjIMZ/GmL/Q8vni2BOoPk1coQ8NkGMFMzNN70VoG9Yxrh0t/jHIz3Yv5jaIAK+WT35bzhW0Lm45fMBYdxLevnOvafzVVZJ/zWsVaWP7iwE5hk4tjm4Y5mb61PNld5Y4vmjDWEIiX9/VfAby+x3xvVFh2BnfbZvMwWiwdfljda1++TYh3utpq9Afgmo7ixsGtanCKsdPwn5Wdoj8mbX+DprbEA7HZhG/g6aoL3QHf5+191L+sWrs1/a7T3sN5u97xHI78d0ttP58NGRan9/W92zy23diT9yfFxPLd0d/r4zvjfPo4VP+fjCGA+jEe7B7nu1nIPxexfyuCBHvbXt9fyaJvxKXGNt4FA2PtfK+tuv3Ns770cmfd4o880o9pgTISzhMfh6Yu9sB/7AQkfZXvtatHPUfVXk5lvXI5M8rJQmjS7/d/sijpKZljoaHw13wPVi7jmElIupceveY/SYHRElJG3pir/gRkcTASkRJxHoBa6N7jNUoSkqyd8D8fZbj3dkbQBQVhwQQERERkaOxwkpEREREjsbASkRERESOxsBKRERERI7GwEpEREREjsbASkRERESOxsBKRERERI7GwEpEREREjsbASkRERESOdkU3Dkgp+bV6RE4TLL1ZPSIiIiLqHlhhJSIiIiJH461ZiYiIiMjRWGElIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdLCQrqccKq/+3fcPDnB3D58mU1h4iIiIioY1xRYPWuW4vpM+7FDTfeqOYQEREREXWMKxoSICurDKtERERE1Bk4hpWIiIiIHI2BlYiIiIgcrVMD65/O/Q6PrDoB99O1YhL/X9UIb92f1VIiIiIiokidFlj/41gjvr75j5j2D1nwfz9bTMPg/1Yq/vROEx45xtBKRERERNF1TmD97FM8vR94+qGBmHDd59RMIbUvFj58A/7n/tPY9Cc1Lxn9dhuW/MNaHFRNIiIiImo/nRNYqy/hw5uvwz1fVG2za1LxSM5n2PvL/1IzroIWHO/DA8b0/C/UAiIiIiJKVp0SWGtO/xeG9u+tWpFuSP3/cLblKgOrDKs/+AAjn3oVr/xEnx648Qx+oxYTERERUXLqlMA6dMBfwf+bFtWK1Hjh/4nQ+leqdRVuHIXRX1GPhXEzZ2Cgegz8AqtD1dfH8dPfqtnCwedNVdl/3BYKuXL+kq3b9PVC883buQ+rj2ozdUfXhrfD6i4RERFRu+icIQEjUvH1k+ex4/eqbfbZBXiP/X+Y/r+uMrB+ZRRGogL/HDUo/gY//cf1wMOq+vrUKBz7gTHm9Bc4hodVVfZp3CW24d0arss27wlgpFz2LzL8yrBq3k4e+qvnAb/EK1WjQvPTjlVYQjERERERXZnOCazX/A9MGPDf+Ja3EZtO/z81U13masXv8KeJ/TEt2vjWNhmIv/+XV/EA1kdWOI/uwj7kYdpo1Zbh9sYz+EQLlF/H/Me+rs2W2xg9MhxBpbTJUzFOPcbRD/BvIx/G/NB2ZuDvjcf4X3jA2I7a/pkzepOIiIiIrlwnBNY/4//u+C28va5Hzbd645N3GvVrsIpp0rY/4uv/cBNeGPkF9dyrN+4xvfqpBVdT9z4+qcA/G931//B97PskHCh/s/XxUFf+P++JnTJ/8+9nkHajS7XiGYgbrbmXiIiIiK5QBwdWPaw+jX54fVof3JB2HRbOuUm/BquY3i0eiNnXmy5z1Y7GPSa79z/AUaNbfqTR7R+eZKVUhtV//iQvNO+fJ8dPms2fBNQjIiIiIuoMHRhYrWH1f6q5HeboWusJUL/9AMc+UY9Hj8LfHltvXa785hNz1fQ3OHosdoV14K2jkGbezm+34adRtklERERE7aeDAmsnh1Vp9FT032V0+YtJu8TVs/h77aoBX8f8p/LwyXrTcjVcYNxjD+PGPd9X88twpn+cCutXZuA583bEPkxnXRERERFRB0gJCupxwpY/W4rFj5eoVqT/ONyAb/yub+eFVSIiIiLqtjqkwvo/x2RiL8MqEREREbWDTrhKABERERHRlWNgJSIiIiJHY2AlIiIiIke7osDaq1cvnP3EuGYUEREREVHHuaKrBFT/27/h4M8P4PLly2oOEREREVHHuKLASkRERETUWTiGlYiIiIgcjYGViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiBwtCQLru/ieuwA/rldNIiIiIupRrjCwNuLHBV/DSLdpKnhJzO0E+5d23r6IiIiIqMtdRYU1G0VvfIhjfn1anuHF937EGElERERE7avdhgRMGH836hpPqRbQ+KOCcPV16btqrlD/EmaHKrNL8Z4xz1I1lRXcyGEA2jYffwuo82KGWP97++VcOWTA2B6HDhARERF1N+0UWEXAfPkt3DX+dr25fylmvHc7thnVVyzCbK36KsLlve/i9lBldhkm6GskJOM7m3Hs2buBrGJt289MlMF2EfCssb3N+NZg9WQiIiIi6hauIrDWouzecKUUT8kAKeer8Prgt5GhPc9cfR2EQVm1ONVuIwcykCF20tjEoQhERERE3VW7jGFdfpcIry+buv2FfY8bYVZMshu/sVFE2Qx8a/MKQC3Tu/SvzoRlO3D7e9O07elVXCIiIiLqTtplSMCEZStw176XTONHrSdkadNmo+J6O57R5sng2h5jTmUIltvTg2t7hGAiIiIico52GsMqQuizGSj7gTxxKgMTJkA9jsc0PGBwBjLq3sV7Krw2/mgpyur0x4nj8AAiIiKi7qidAqsw8Tsoghczlr6rnRwlL3Mlz+Q3hgVEntE/De9O2KHGvd6Obz8sQq4aE/s9fBtFWXJ+FMZ+tG1arwe7GCuw6TvGyFkiIiIi6g5SgoJ6TERERETkOO1XYSUiIiIi6gAMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GhXdB3WM2fOqEdERERERB3rigNr//43oqWlRc3pWrXvH8ZWZOIHt96o5nS8Q4cOY+zYMarVMf5w6BU04FaknjyFL31rMq5X83HuMH71/MdIfewBpMuZJ/bgl68fxefvmouvjk3Tn6M5gdrvvY8vGs9T5HZrT96E7G+NwbXaczbhDxiNv3lG7qMZTT9ei/Nps/G/pgxTa7Tdr7bOw8uHx+LBtdPxVTUvMedw4IVSfDpuDWb8rZpFREREPdoVB9bU1N7a45SUFO3/nensLyvxxInLqiW4bkb57QNVo3Ps3fsmJk26R7U6yi/x4f3r0DLhUeTO+V9qnu70hn9A/XuqMXQWbrxxCz7961J8/Z7+wLFXUbn2HbXQMAw3LnscWQPk4zOoKy3BJzVq/ty/xadrz+MrG++DttiyXPdXs9S2E/R/X5yKf3nrHvxjxXfxv9W8xJzGru8/gnOTd6FwtJpFREREPdoVB9brruurWj3TG29sxb33zlQtaj+NeO2he3Hm7z/AkvFqFhEREfVoVxxY09L6dUl11Sl+8pON+Id/uF+1qP3U40cFE/HvcxrxQ4+aRURERD3aFQfW66//6x4dWH/0o5fwne98W7Wo/ZyEN+9/4weHVPPBn+DCismqQURERD3RFQfWG264vkcH1vXrX8TDDz+kWkRERETUUa44sMqrBPRkL7zgxSOPFKsWEREREXUU3jiAiIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiByNgTXZ7XwUKSnXiOlR7FCziIiIiLqTJA+sZ1G2wovBJa+i7BM1q0fZgUemeVG84zMEg+swTc0lIiIi6k6SOrA27N2PVf0GYIpq9zjHG1GDYkzIV20iIiKibih5A+snBzH/MLDAM0jNICIiIqLuKEkD61mUvV4NjJmIop59/wIiIiKibi85A+ux97EKI7B60g1qRg/1cRPeG5+OoapJRERE1B0lYWCtxoJtl7DgG+OQqeb0OMdX4nZ5ZYD94xF8byEDKxEREXVrKUFBPU7YmTNn0L9/F/XFH6vA4G2nVcMmaxzq54xQjY71wgtePPJIsWp1EXlJq3XpOMHQSkRERN1Y8lVYR+ahvrTYNI3DFPTGgsfE404Kq45xUzomHGhCjWoSERERdUfJe5UAIiIiIuoRukFgHYFVpffxagFERERE3RQrrMlseAaGogaNx1WbiIiIqBtiYE1q0/DCjqFYNOIapKQ8ih1qLhEREVF3wsCa7PLXIRj8TEzrRHwlIiIi6n4YWImIiIjI0RhYk9Dx479Wj4iIiIi6PwZWIiIiInI0BlYiIiIicjQGViIiIiJyNAbWHmkvfH99PbZ7TwK1ZdguHvveVIsMav5r2lSSBLd/PYmj06K8ju7gzRLxGczA0Vqg2TsjST4PIiKi9pOcgfWTg5hS4sVg87ShWi2kqyYD0rh/x7DfncM35bQBODatDM1qMXU3RzC/dDZ6lW2FyMRERESOk7wV1utGwFdajHpjmjNCLaDW3YTUMcC1GUOA7IG4FmOROkgtkpXKlz7G4IOlGKrm4J4HMBjvoYFppmsM+ht8SX5m2UBaxk3AmL9BP7XoqvnXoFfpB0h3u9QMIiIi5+GQgB5pCEbvOAfPPfLxJHh+tw2jRRjSvPkK6gc/EG5rhiDzdiDw7km9qXVRG8MF1NACE73bOtZwAr3rPrTcUrkNd+vXLDGeo3eFh+nDGaKvL5mXt7HrvJXXFXffliEUYlqyVy0QtGXyWEzr24477nuWXYTpv1N/QNxTim/uKEKatsDk+ErcnnINbl/elld8BPOPuuAvmQePmkNEROREDKxkUVP5MtJyJ4lH1nAFWdnTiPlzZAVWDRcQ0/TiIWqZIEKf78mbMFIt8zzxsWU4QbP3FWCZse4hDMaTOGgPvHOux4mBh7TnjJxzCPUvG+FPHtOD+MMT+jJtsoW35jmvIFU7NrHtMS/jRETojKWV1xV332LZuCdx7QZjXfEebnjQFnhfxrG/No5NLD/8JPzGeNtW3rOOcxtWF82E5W8TIiIiB0rewPppNTyhMawV8KnZdDVOoqVeHx5Qs0SFMxG8MHigraJ3KFxttdEC74bwcIK04gdEOAsPJ0grLjVVb2Xldiz++JuPVVuZ83IoLA7NFfuv/40W3mTYbR7zBMZZgqRV2gajWhxj23HFfl3x9m0sc2sVa2kSPOJ9++O771hCZ/jYJuErc4A/NOr7au09S8jwhXg3+BneXRwayEFERNRtJGdgvXEcdpvGr/rGXEIxQ2s7kWMl9+K3InANE+GsufFjfGmgUV2VRBg7+ATw5Ngo3dcy8OoV0nD39oPWSqGt69z35CG1IEyv8CqmLvDzvxHPjQjP7SXe60pg360e14P4SijQiiD+nFHBTeA9IyIi6uG6xZCATPdADFOP6Wp8jJbD4n+1v8Ef9BlaUJMnZ8ngqp2kJWljKk3d17ZwlxbqGjcmo7Kod53D1K3ueWKstk4i+g1M/LlXJM7ranXfqgpskO9XW8R+z4iIiKgbBNazKHu9GieyBvHEkaumXz0gTFZa5f9PouFdmK4kEKadtR6id8M3z7GdNGQTCr61ZTgYpcIaS9rtE/ClDQ9e3bVWjROrzCdFRWF9XfH3rS0zj0kV75tfvK60b0c5OSpCYu9Zq67opKsE7XwUKWLbKUU71AwiIqLOlZSBtWHvq6ZrsG7HvqHTeVmrdvMxWnAHXGPUSUJPPCjC1NjwlQNsZ9K/pp2oZB5/uU1VJ03PCZ1ANAnuJ2Q4U/PH/TtcbaiwahXQg0/gD+bu8/Y6OamV1xV33xHL9PG/+lUYWhf/PetITVhbNhu9SmfD7Q8ALTvhjnY91vzxKJb/r228ulBNRER0hVKCgnqcsDNnzqB//xtVq2d64QUvHnlE+2e80x0//msMH36zarUveTkpeYa+9Qx56tlqsHLCzViUvQ3BsmlqHhERUefpFmNYqf0MfVCeeLTUdO3TvfC10n1O3VfN8juQksKwSkREXYuBlaxk9/aGm1A/Lty9DfNZ+9SjDF38DoLBzxhWiYioSzGwUiR5KSnTGeuJjsUkIiIi6ggMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKw90l74/vp6bPee1G6Pul08vuLbnWrrm28r2o7bjnAeqx/djM8/K/dWg8embcadr5/XF3Up/bgeS/wus+2q9vWd+Py0/dgpHu98Vrw/j1ZZ71RFRESU5JI6sPo2GLdnFdOGajWXrs5JHJ1mukWomrQAmrRiBUo5fydWN8rHegD+vHnqlOCn79d6bNHmERER9VxJGljPomyFF8UYh/rSYn2aM0Ito9bdhNQxwLUZQ4DsgbgWY5E6SC3CEIzeIa+/egiDxXO+9MQh7Vqsid+qNd62r1Y/pA8Axg9IE4/TkOESu/hKP31RAmp/21o1th/e2FGA/zamdTnIVkucLPsr1wKuVAwVj4cO+CIw4MtJcdxERESJSs7Aeux9rOonwipD6hXSQ6l+Q4BJ8PxuG0a3JeG8WRKuvo57En9Us3WJbHsHHkm5BilFO1Q7cfmPF+Dtb8iQ2g/z1xXg+bH6fJ0aMhBRIdWDbqRmNAbUw7hkxXM/Vmtd73IYQo3aj1GdNZirtHoXvebQfjXPNLVn9XbsxFC4zv5GPv77cRldiYiIuo+kDKy+6tMYlnYRC4zhAGJacEwtpI4lx6XO+RiDD6o7YR18Al9Si7pa7esfAguMCulEPIcGPBYxxtU8DEC6FukZ6mFc57HkdDb+e34/HNj+ERrvLcAbt/4Jb/0ivP31q2uRsVLf/xu3nse92lhbQQbKUOVWHJfri3huQXJUb4mIiJwgCQPrWTSIjHDi8EV4jOEAMwZg97ZXUfaJegp1mJqXnwSeWNa2imyEaXihA+5Pn/2NiZgfCp/9cPfoL+LA6WatJbvKtceNp/BWwBo0w0TIDFVB7dVTETLvVZVLVyaKLJVd3cPz80P7z/96P+D90+Eqq1L7+iG8NXqs6ThbE2VsrZx4YhUREfUgSXvS1ZQZefCoxxh5KxZcdwn1Z1SbeqbGKtxpCnUjtv9JLTA53YLs+bcg++gp1Db+HrVq7KeItHg+VAUV0/xrsWShPbS2wYBUjFcPQ8TxPXbUhee1IQ2Jsh2XMSXJ+FoiIqL2kISB9QZkin/v68+eVW0iqQaPLWwApoe736unf1EtUycmCTt/AYwfOxTjBwTw1mltVnRjB+Bh9fCKiGB8IBSGpfNYvSqAuyOGAugnj0X6IjK0cbessBIRESVlhdUzYgBOHH4fPtXWTsL6dAA8I1WbOky/gWPxx3ffgd7Rvhe+iJOuEnHlJ121JnTVAFnNtFdYT9di/WnjbHrgrV+06POj2PnsR1jvcuHuhLvuzUQ4feM8xo8eFAqnO5/dH2MogH5C2PpfhK9kW/u6OM7QvtunwrqjSLzf4j1/xD5GgYiIKAkk55CAkXnwjbmEYuOkq22At9Q0RICugnEd1rGoPwz88cmxluuwphUvw2A8qd0c4LW/fgWpB1+GvMhU1xuKoulfxPrVqgK5sAV3myqsmoAIsCpEZn/dBbx/HgeMS0DZzuS/F7e0ORSG9j1NhtOJ6moGgtj2ve8DB7ab9xEebpD/+EQ8d/qj0LIR26/FG+3c5T9tYrH2/5r6cDAmIiJKFilBQT1O2JkzZ9C//42q1TO98IIXjzyih4DOdvz4rzF8+M2qRZSA4ytx+4jFGLrjM7yQr+YREREliaQ96YqIElGDlROuQQrDKhERJTEGVqJubSgWvvcZgkGGVSIiSl4MrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrD3SXu1OVdrdq2rLsF089r2pFnWxZu8MvPbXJZD3Y6pZcj1em1ambgN79Wpf3wl5Fyp5d9Kdz27ugvvxn8fqR407XYnp2Xa861RjFe6Mtk1jvjbpr7296e/rZjx2SM1oVx34npGJ/j7f+fp51bZTn4P2/tfgMfFZxH4uEVH7S8LAehZlK9QtWS3Tqyj7RD2FrtybJdqtWCMnPUS2BxlEjVu9OoUWYM2BTgW99vxHufb1Q1iCTFTvKMB/y+nxoWpJB8rIwdtyXyszMV7NSiZd8p7FE+0PA3lb3w7/4ydGSFTH0zF/LBAROUcSBtYbULSoGPWlpmnGADG/NzJ79t1i2+AmpI4Brs0YAmQPxLUYi9RBatE9pfjm786J6RAGi+d86YlDql2KzogKaRk3AWP+BvIu/P0GjgUGD0SavuiqZX/lWsCVqr2OoQO+CAz4srpf/3k0nf4ixrv+gCZ1f//aXwTEc8Vz2lHN6T9h/OhBap/tzAimXRDosr+Rr4XJ58XH1d469D27YuK7cvp0h1SrYxuK5+f3w4HtH1r2u/ONBhy49ZYOee+t+iFd/JodP0D+NKYhwyV/nuRPKRFR5+gWQwJ81acxbMyt8Kg2tWYIRu84B8898vEkeH63DaPbkggsVdgZOBoqLZ3E0WlinrkbXz1XDjnQu/uvx7ENwB+fHBvexpK96smCDMw7irSQmla8Dd98bpI+36Rm+R1ISbkDK4+rGYkaOxH/vS5HCz9ayLKFu7tHX4u3fiErWOfx1mkXHh4NHDgtX4le3bJWsaLNi0eGYvUwCqNbXZ92YrUKzlr17tkq1S0u5h9SFT5TRU+vDutTmyrCctv2YQLR5sWkvwcRx2xifV1trQTGec+MqqZ5yINluID52MRker/kMd35epVaLl6r9prt68dzrfiu/AHrY77X1n2HPhPtWKO8t8ZrUc2Yxn4Nz7nOh/crtrf+/S/iuXvD3+O473er75mZ8RrCx5v/eAHe/ob2pyTmr+uYP1CIiGJJ/sD6yUF46wageNINagZ1KDnmdc7HGHxQVl3FtOEm1I8zhgvIIPwy0g4/iYNal/9e+OaI9gY9HGsBVKwzco65ciumKKG0S3x9ALKPnhL/oJ9C44BBporyUIy/FVj/C3M38GmsF/9wj2/1H+3wP/xLAiIAb1fhyBzwRIAow1i9y1tM1dOBJatMAeb9BjTeW4A3bv0TlqxuwcM7bsHDgQDeUuvLICHXe0McY5uMHYCHRSg8YAo1O2Vgv3UAEruL61A8rx2zOB41x0K8rse2X4s31OtKvAqbwHsmBRowYqF8P8S25ZCH92vVcjne8iPUThd/oGj7nYjnIJ5rCmcHtgeQsVLMFwHw3jdSUa2tn3jVNP0b2fp3RbXD7Pu+Bdni+LXgmCEr+uEqftuJoHhvuMqqV1ezMT9DXyoD6Qjz+z2/H9avTvQ9M5Pvv/EaJib4XSAi6lhJH1h9vmqA1dVO0/zue8ATy8IV2XsewOAxL+O3oZO2JsFz8AngyVdw1PsKmkVg1Su57Wfo4ncQDL6DhcPVjKvWjEYRjLRgOiCAsjdakPF1a3dn/r3WQCOD3fjpX0vgH3Mj1MlwBLGOEWTyw0EjIwfPa5UrXfbXXRgfaFF/BAiuTBSpoJfYPhM1FEXTv2gK4jU4YKvYXT1rIE5MAu+ZRhyrCJ3a+5ExCHe7/oRGWZE99KEIuv3wcOg9FUFvgS2QmoLew/fqVfe2kd+VBpTZX5v4Y+ctZJo+T/N7rHela0zV1trf/sE0PKUVYyeKP0zOY/2z+23VVRGU37B9J7Xn/kn1GhhivGchp0JhVa+mEhE5Q3IHVq262ht3uVld7Sznf3PI2p3/12NRf1gtNGQXYdwTH6P+yZsw0inV0wTlf/1arD+dirtFmNHGvBq0f9yN8CWDnTkQXS11BrZWRRTTwgYcUEs6mhaOjSAnq8bmit3VkuNqtSqfel2JdHu3hculfU46Wze1GqvckeQfMbXmqrt0ugUHZBXT+CzFNGL7n9RCOQ5UD4i1v2hB9q0IVVv1saGJkfvF+yKETh8b8Vm1Oq403nsmHNjegPUi1N5t+4ONiKirJXVgldXVE1m3oIgnW3UqS3e+mixV1DdL4Ht3AkaK0HqsHS9L1bGuRbr8h9w0zlVz+vcqZPXD3aNVpUwLdol2m7du57P7rWfCd+YZ/aYgLqvGD3+9nWOefD/V63pjgAhy7R1aYzFXqCUZJNXDdiPfu9O1WP1b1Ta4TJ+lManx0vJkv9rf1uCtoyKk3puKRvGeyxPL2nQCkza0IHo4rf2tuZoaf9x0NLKarQ1JWZjoOGYios6RvIFVVVcXeEaoGdQZhuY+iD8+udR0opWdGrf67SIMLV6GwTDGs4bJs///+O47Vxxkr/ikq6uU/Y1sPPx+Le584w/t3G0umK5YsHpV51VYtSrbvf2w/o2dWH86PPSgI2hXZugMamxu+KSoKN3l7UK+d9eK8PkH1RbkvgMNeCzuyW+/R+MA8QePCJ44+iEOnP4iMuSFTq6K/geV5SoCEUMjEiNPSJTDDu5t8x8XNVg54Zou+dkkou4vSQPrWZS9zupql5Bn8WsnWhlDAuRknHQlb0jwoGnc6hCM/rYMuGMt111NU0FW3rwg4ioBXaHRqKK2Rp589ScRJs3dqldPHx/7kepC3o/G0W2psIbPSL/3/fAJSsaZ6aGzxrVhBiKEqH1YqmdayPoT0NbLRxln10/7COvxJyxZKB+HT/Kxn7GunRBkrl53GDkGVj/ZSd/3fiwZcEvHjMkU7122fO9C7PvWJ+NsfTnMRHa7Q6tky3GwIlgHVHX/KsmgWT39D+ozFtNq8YeVMV61jfIfVyeqxbj6Q3RDcc/dE8T/30Pjx/ocIqL2khIU1OOEnTlzBv379+yk+MILXjzySLFqda7jx3+N4cNvVi3qTPISUusHdLcTUmTorUXGSvtJTURttPNRpEyrwYrq9jwpkoioG1wlgKjTHNqPe9v1ZCtn2PnsR+17shX1QDvwSMo1DKtE1GEYWIlaY3R9X0UXqxMZNxy493Qmqrv6lqeU5KbhheBn7Xy5OSKiMA4JuEIcEkBERETUOVhhJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYO2R5B2prtfvPlVbhu3ise9NtegqNXtntOOdq07i6LQ4x/ZmCV6bVma7xav+2iLnX62Oe8808oLr8jqW2sRbWxIREZklbWD1bfBicEl4WnBMLaCrVrPEuOVqeDLfWpU6yPjlOMFrWRIREUVIysDasPdVFGMc6kuL9WnGAOzeVgGfWk6tuQmpY4BrM4YA2QNxLcYidZBapHzpiUP45u/OhabpxeK5SWESPPKYdxQhTc1pH62/Z0RERNQxkjKw1jdfwrA0Uxzp3wfD1ENKxBCM3nEOnnvkYxnwtmF0tragFbKLfgaOekv0yuuSvaFqrL173FyltS7Tu/mNZZHDB1SXvjaNRf1hNVvRhhwYy+e8rObqLMuibrcENao7X3uObdiAZX1jCm0ngffs+ErcnnINbl9eo2YQERFRe0jKwOoZMQAnDm9XwwDOouz1apzIGgSPtpQ61iHUv/s38Bx8Al/a8CBODDwEzxNj0VxpCohqvladFY+b54igqBbVLBEhdPDLqnJ7CIPrHzQNN5Bh9kH8IVTdFcvHqEXSmyXwPXkTRmrL9G2bpRVv0+bL44nuZRwb9+8Ypq3/MtIOPwm/EaZFkD34JDD4oL7tkXPEvDFPwPPcJH05ERERdZnkHMM6Mg/1peOAbXL86nbsGzod9XNGqIXUHv745FhLpdFcJU37ttHd/iCGRRsqMOfl8BCCe8aI576M32rr78VvRcgcGQqBQzD62w/ij+++o1c633wF9XgC46IOPxBh9iURMjeU4srvej9WBFJj/Un4igilf2jUw3Lzu+/hj2MmIFNVTYfmijB8+N9xXm8mZvhCvBv8DO8u5n35iYiI2lNyBtZjFSKofoTBj8kxrOMw+PB2DF5xEA1qMV09+xhWvSv8SuhjPzW1v8EfZJXTFITt3fpdJS3jJhFQ30NDrd6uqRTHNWfMVYRjIiIiai9JGFjPouzAaQwbMxFFN8r2CKwSoXXKp9Xw8koBDvQxWg6bT1B6MNylb0ztfoLUFRj0N/iSHO4wTg/SxyyVYCIiIupKyVlhFU40m06XOXYKu9Ebg/urNjlGs/cVNBtd7dl3wDXmZRyLdZ1WGRrNVU453jV00tUQpA4W2zPGysqTp9qxOlvz8pOApap8BUMPeNIVERFRh0jCwHoDihaNw5S6g+HrsG67hAWP3acqrtQe7GNY23Qd1g0PhtbzvTsBnlAFVZ5pr59oFXXb2UUY9wRCVU554pZ28pMy9Dk5hlWtK0+ekid+qWXmqw/4njwUPoZY4dhm6INix7bX/Jq8qoBaTkRERF0nJSioxwk7c+YM+vfv2enwhRe8eOSRYtXqXMeP/xrDh9+sWtQe5GW4ZEA2X29WzjuGl/HNzhgaIO90tS4dJ95byHGzRERENkk7JICo/ZxES716GCKvaAB8aeBNqk1ERERdhYGVSA5VWGYfEqBfD7ZT7/B1YDGGpVyDlJQ7sPK4mkdEREQcEnClOCSAiIiIqHOwwkpEREREjsbASkRERESOxsBKRERERI7GwEpEREREjsbASkRERESOxsBKRERERI6WtJe18m3worhONa4bAd+icchUzc7Ay1rFVutbDLc/oFo52FwyD3mq1bojmF/qRblq5bqX4U1PumrF37Z1ma7Qswmr3aoR2Ip7Nu5EpWpalgn29duyb42x/YxiXJ51m5op2PaL1Hz4i2YiWzWBJqwtW4qSFtW0r6+xvi/2YyciIurWZGBtq0AgIP77WZdN9Xt+EsxcXhmsV+19r6wLZr7yq4jndeS0bt26qPM7Y6quro463xHTsVXBLy1bFdyl2rs2zwp+yfvTYI35OTGnU8E13lnBu/ed0tunfxq8e9ms4Lxjankr267ZtzC8bsR0KDjPvC1t2wuDa06rtty2+TijLW9l37K9Rs7ffEhfJ8akrWt6jvW49eO0vA77+8CJEydOnDj1sCkJhwRUw3v4EqaMD1dUPZ4RGFZ3Cj7Vpq7ShLVHq5DrnhqqPOaNzUduSxV81sJndP5dKGnJQaFR1XTNRGEGUH7yiGhc3bZrfbtQnpqPQqMqqW07AN/xJq1Z2yw20rd/uOrp6o8s9bDVfQe2YsHFqbhcNBPhemxsWX1c6pEu27PcVMm9DbniNVdeDL+oikM7kcWKKhER9WBJOoa1Nwb3Vw+lG/tgMC6h4RPVpi4SQFOLC57hRvg6gvlaV7iYf07NikMLjRmjLF38BY3iwYUzqL3KbWvMgVSQwdEIhtnDc5Db6MU9Pj3AVmzxioCbA4+WLVvZtwi/b0Z04cfSBF9DAIVDEn3+EVQ2upDevAa9SmeraQ0q1FIiIqKeIAkD6wh4si5hla9ateV41oPYrR6TE8gxmTJYeQHPJmzOAOqa9SCYEL8eztwNOfDfLyuZARjDlVvbdqV/qQp1YtoiK7M6PZDuwlqjcCmroubxrjJ0liyDp0FfvwDFWsXUHHCv6nWp19SrdClKYKr02onnFYiAWjpWBdrAGfHaAyi5OAqXSzZp0+aMKhSUbRUhnoiIqGdIygqrZ844TKk7iMElXm3yjRBt9EZm154H1mNUbFGBUE1GVVInwtXGpWgarYer1e4mNF0AstIS6SwXGr3oddQFvwxnMjCeC6Ay1aW65+NvW3atG6HusgifpRfEtozQKgLpKhESSzaq466ACI0u5Brd8/LEKBEm9W2rdS2VzKt8Xe554WMbHYA7WpVUHoOvCoXidcy1jBrIwWZTBVcfjmAO8URERN1bkg4JGIFVpcWoV9Oq/hdRf50cFkCdIW+WCl5qCo+/dCE9VT+7PjzeUu9OT79eNePITpMpTYQzU2UzPLa0rdtOhyczcqxo6LjFPnAxEAqccpyoPLtf33Y65haJ0JpahXItjF/d64rgHoVCub6pwGtcSQCWfQjaWFrbc4mIiHqYJA2sJp8cxJTnf4O7vtG5l7WiaPSQWOn3hrre9ZOdjLGgilbNnI1e9m5tLchVoSDUlX8E5X5jvGeC2zaoLv9YY0VllVh2+1vCoTZWVgl8AF+LUUFt475bEbGuKayaL6OlkydhBVByKDy8QQ/X4bG+mp2PIiXlGqQU7VAziIiIuo+kvA5rw95X4Tl8SbUGwFuaB49qdRZehzU2y/VKI645KqiAVhltmTyhKc71RmNvW44vNV3LFC6U3m/uWrdu136N1cj127Jv67ZDjOupyvGrvio1U7C9bi08y5PLLMzHbzu2qNdp3YFHUmbAO345Try3EEPVXCIiou4gaW8c0NUYWMlZarByws1YlL0NwbJpah4REVH3kPxDAoh6uJrldyAlhWGViIi6LwZWoiQ3dPE7CAY/Y1glIqJui4GViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiByNgZWIiIiIHI2BlYiIiIgcjYGViIiIiBzN0Xe6Mm7BOmVGMVaNVDOVrr49K+90FZvlFqbIweaSedb73scV/xaq8bdtv0WqdXnkLVDDtz+NfnvUyNuz6s+z3/ZVMG43q5r29QzGfizLbetqot5+lYiIqGdyaIX1LMpWeDEfAzFFzbE4ViHCam8RUotRLyZv1mkUrziIBrWYupB/jQiULhEUN+GymDZnVKGgbCtq1eL45D3zvagTIVWue/n+fLG9pZjvV4vjbltfFyIIausay7cc0ZYaZFA0ll8uCYfOvFnm+WIS+84VgTb9en25HoZno7KPnG8nlonAmWVsW6xbJ4L1WiNXG8TxF1zIQWGqapul5sNv3j/DKhERUYgjA2vD3v2oH1+M3ZP6qDlmIsweOI1hY24NVVQ9nhEY9ulv4PtEzaAuIkLj0SrkuqeGqpp5Y0XAa6mCzx7eovHvQkmLCHRGRdU1E4UZQPlJGTpb23YATS3mgAlk9TGXQNum9ngVKjOmhgJtxZZdSL9/E1YP19tmtb5dKBeBs9ComGrHHYDveJOaIYlQ66tC4eipCNeLiYiIKBGODKyZk+6LGAIQ1oz6T3vjLvcNql2NBc9X4wQuof6MmkVdRA+NnuFGJNMrj5Vy/jk1K47aZpE8M0aFAqns/te66S+cQW2r274NuSIklmxcgwrZDGzFAn8AhUOupFJ5BOV+oHRseN28WbYhAHZ9+yNbPZRkWK68GE7pFVu8KM8ojjpMgIiIiOJL4pOu9GEDg0sOAjPksACg/uxZtYy6luyen41epXoX/eYMoK7ZXG1shX+NWHc23A058Muu+ZYA6tSieNvWuvU9QIFYt9fGKnhkRdQWEMt9cl19uscX/Zi0iqmputqa7OE5yG3cFR4CoMJyiGiXN+Zgc7xu/padcKvj6lWqQjcRERFpkjSwXsKq57drwwbkGNZVI8+i4Tww+Aaj6kodSZ44ZIS+yOAnq5xL0TRaH4u52t2EpgtAVlqCHeGNXvQ66tLHcxbNRPa5ACpTXRB/jwjxt60dl7GuxyWeaz026zjVYmT5l0YJrbK62sbKrGsmVolgLPenvScibRa6XcjVhiSIgF0hx7fGOfFMrP9m6Lg2we8OiNDN0EpERGRIwsCahsHXAcPGTDcNG9CHCQzur5rUoewnKIXP4nchPVU/sz9c2YwcWxpLdpoMeDnYLIOqPksfJqB1t7eyba2K6UJpnlrXPU+E1hxU+nfFCH5yCIF6aBIxHjVB2Z7l4fdEHD8uBvQgHfgAvhZzZXcpSox2jJPRtIqtekxERERJGVhvgGdob5w4vB9l6iSrhr0fYfd1A+Hp+CttUVzp8GS6REj0hrrH9QCYA4+5e11exkmGN3tgc49CIcxn9purnYls2zpWtuJkFRCqztrIM/ZFwA2PiZXU/kaHA/OV0C5dBTVe1VY9vVyyDKUieGtXKzAF8zC9IltpGsurOb4St6dcg5QJK1GjZhEREfUUzrwO67EKDN52WjXCzNdjtVyH9boR8C0ah0y91Sl4HdbYLNdKlZdrsgczGVjlCVPRlsmTqUzXUrVfzzTutuXYV58IqQbLcut2o10fVguaF6Idk22/Icb1WK3btl871kqOwdWHNRivy77t6OvXYOWEm7HoQDG2B9dhmppLRETUEzgzsCYBBlbqbDuKrsH02uU48d5CDFXziIiIeoIkvkoAUQ+x81GkpDCsEhFRz8XASuR0+esQDH6GIMMqERH1UAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORojg6s8varg0u8WHBMzbDxbfCK5a+i7BM1gxxB3mq0V+lsNa1BhZqfGHmbU2Pd2bjH16Tm61rftlq/bCtq1ZwI8hau8jlbjqgZVvIWrXL5fL+aocTbt7FOeFqMtbY7uRrr27crtWXb0dYnIiLqzhwaWM+ibIUX8zEQU9Qcq2osEEHWlzYCw9QccggRBt1+FzaXbMJlMW3OqEJBvPBoIe+z70Wde5m27uX788X2loYDWmvb1oLoB0h3u9SMaESg9QVQmBHjOWIbBRdyUJiq2oYEXlehR1+mT8sxN7QL+bpmYwHEdtUci1a2nTfLtF3xntSJcGsPw0RERN2ZIwNrw979qB9fjN2T+qg5Vr4NH2HwY8VY5VYzyCFEMDtahVz3VOSpOXlj85HbUgWREVvn34WSFhHqPOl62zVTBEug/KSshLa2bRFEj7rgL5kHj7Y0uootXpRnTEVh1K+WDLNVKBw9FeoIlKt7XbU+L5pGb8Kbnv5qjlkbt+3qjyz1kIiIqKdwZGDNnHQfVo1UjSg8c+5D0Y2qQQ4SQFOLC57hRtwTAXDjTlTK+efUrDhqm0VCyxgVCm6ym7ygUTy4cAa1rW77NqwumolsbVkMsnramIPNs25TM6z0MFuM1RF/CF3d68r2LI+yTUMbt+3/AOWpOfDEKyITERF1MzzpijqA3gXeq9Qr/rqQXdxAXbN1LGpcWtf+bLgbcuC/X1YbA6hTi65823ols9AzLxSILQJbUR4nzOri77vcFx5nah9727p42zaWiUmrALcSzImIiLoZBlZqM/tJQNZwFkDJxqVaF7gcc7na3YSmC0BWmrWTPaZGL3ppXftifVkxPRdAZapLdYNf+bZlt3xJ32jVU0kEwoqdyIoVZjXx920ZZ1pSjCz/0jaE1tZeVzrmFhnbXob0o1cSiImIiJIXAyu1mTWcybGZRrByIT0VyHUvMwVDvcs7/XrVjCM7TfZz52CzqWtfGybQt79oX822m+BrENuRYViFbLffaC/G2o8+gK/FXCFdihKjrZ381NZ934bcDPWwVW3ddjo8mS5UXow2wJWIiKh7YmCldqTClN8bOou91rcrcsxlYCvukcHQfvUA9ygUogoFoctNHUG5CJaFQ2Q3fYLbjspcodQnv7ySQEaxeLwcc2+ZiTdNy2QVs1SESO2sfy08t3Hf2lhZ87jUeNr6uszvicnOR5GScg1SinaoGURERN1HSlBQjxN25swZ9O/fgWc9HavA4G2nVSNsyoxi7WQseX1Wz+FLaq6hNxY81nknY73wghePPFKsWp3r+PFfY/jwm1XLeeTJUloFU0rNh99+MpQMrPLEomjL5ElHpV6Uq5YMjeZu/NjbluM89cqoRdR9qO1cnIrLUces6tuSXfSJ7dt6zFqVuMQ0vECOyfVVqUaY+bW15XXZ3xPdDjySMgPe8ctx4r2FGKrmEhERdQfODKxJgIGVnKUGKyfcjEXZ2xAsm6bmERERdQ8cEkCU5GqW34GUFIZVIiLqvhhYiZLc0MXvIBj8jGGViIi6LQZWIiIiInI0BlYiIiIicjQGViIiIiJyNAZWIiIiInI0BlYiIiIicjQGViIiIiJyNAZWIiIiInI0R9/pyrgFq3FL1rCzKFuxHas+Vc2scaifM0I1OgfvdBWb5Taj9tuUtsp6m9Nc9zK86THuyW+7TWlGcfRbq0a79WuM26NatmF5jgul9y/H3Ij7+RvHYF5uv31q5Lqx35PIW6/qYu2fiIio53FohVUGUi/mYyCmqDlmDXv3Y9/Q6agvLRbTOEypO4gpe8+qpdSlROhz+10ikG3CZTFtzqhCQdlW1KrF8cnw5kWdCKly3cv354vtLcV8v7601ueFL1MtKylGYaMX9/ia9IUhYhsVO5GVkaPainueWs+YlqE0VQTiPioRypDrC4iQqC/3u4GSjWtQoS8NkcdQ0jcHhaqtS8fcItO2PS7rurb3xO8OmN4T27ra+vLYXUhnWCUiItI4MrDKQFo/vhi7J/VRc6wyJ90nlt2gWiPgyQJONDerNnUdERaPViHXPTVUUc0bm4/cliqILNg6/y6UtIgwaFRUXTNRmAGUnzyiNbM9y03V1tuQK5ZVXrRuWAuUyEfhEDUjlsAH8Jn2VXFoJyozpoYqmtmeqSKUVqFShWWNCLULRLt07Cg1I4brXchVD6O9J9q247wnFSetzyciIurpHBlYZSC1DgGg5BBAU4sLnuFGqDyC+bJrXs4/p2bFUdssElzGqFBQk93oBY3iwYUziVVojUCZp4YBxCEDKkKhsAlNFyBCrjG8QK/0ymEJdc1GBVev3MJd3Go3fe1xEXRNr0PKSjPeE8mF9NQY74l4DeWNptBORERE3eCkq2MVKK7rjQWezh3DSvHIwDcbvUq9gEcOCzAHvwTIsaSls+FuyIH/flmhDaBOLQoRzylodKF0bHgMqx5CWw+U8UJhxRZ53Eu1oQd+tytcwZXVX+RjVcwgKcfeynXFcWtVWOO40uHJdKH8aHhYhFYFjhizqrMGaSIiIpKSO7B+chBTtp3GlBn3oajjzwEjRQ914ck6jjSAko1L0TRaH4+52q1XL60Vxjgaveh11AW/HMspT5g6F0BlqgtZarFGG29aJQKn6aQkGWAvxAuUYXr3v7UCKpX7ZqO8jz5GVg49qBNhVR/jKsKoHN8at3J7G1YbY1BLpqJp4+zQ2NtsTzFKsRNu9X4twFSUiteUfr2+PEQL0uYKNREREUnJG1hlWH2+GhgzncMHOlneLCOY6VN4XKns6tbP7F/tVrPUMIGIcBZFdpoMhznYbJzZL2jDBPr2DwdFGVY3yiqkeR8ihJ6sAlrCoVA721+1jeCoUaHQXJmVVdD0vuJ/GcWWKxKEgrb/A5RrQVxtW7uKgWpv0cfXWunja8NVZeuJVW96xNble2KrBNvH0RIREZEuOQOrKayGT76irqd3f1f6vViretJrfbtQnpoDjzmEydApg5/96gHuUdqJTgWhEHgE5f5AeGypKayGg6XOHqK1M+3lZa3EY0uwjREK84aI5zd6w+FWnQCWK9eNuMJAsThOedkp8TjGZbViV0r1y3bBY7vUV9QgbXJ8JW5PuQYpE1aiRs0iIiLqKZx5HdZjFRi87bRqhBnXY/Vt8KI4YlBjbyx4rPOGBvA6rLFZrjlqvhaqQQVPy3VSQ6zXYS30hAOnHIqgnYRlEeN6pXIcrBxaYN6+nKdduirG9U215cZ1WONdP1Ye4y6kh7ZjPeaIY7JsN9rxyjG/S1HSN8Z1ZTU1WDnhZiw6UIztwXWYpuYSERH1BI6+cYCTMbBSZ9tRdA2m1y7HifcWYqiaR0RE1BMk/1UCiLq7nY8iJYVhlYiIei4GViKny1+HYPAzBBlWiYioh2JgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHc/Sdrhr2vgrP4UuhW7KG2G/dmjUO9XNGqEbn4J2uYrPcmjXuLU6jsd7mNNe9DG96rPfk12/RGu0Wp/ZbpNr3HW/b9nWFqLeOVbdRbYmyf+OWsxmRt1i1vie2fRvr6S1dlG0QERH1VA6tsJ5F2Qov5mMgpqg5FiPzUF9arKbpWHD+IKbsPasWUpfyrxHBzCWC4iZcFtPmjCoUlG1FrVocnwyDXtSJMCfXvXx/vtjeUsz3q8VaqJyNyj75yFVzwvR14dH3G9r3liOW5bG3LcmAG17/ckRYlcHTi5K+OShUbYMMpL0qxO4z1Awz+Z405MBvbFfbtxdrw/lVD8fmfTOsEhERhTgysDbs3Y/68cXYPamPmhPPDcjspx5SFxOh8GgVct1TQ1XNvLEiXLZUwWcOZ7H4d6GkRYRBo/LomolCEQDLT+qhs2LLLqTfvwmrh2tNmwCaWlxIv141haw+pvJnK9tOSGArFoiAWzp2lJqhyPkXp2oB11oL1tU2ixfft384/Lr6I0s9JCIiotY5MrBmTrrPOgQgrmr46nrjLvcNqk1dRw+NnuGmbnatq1vMP6dmxaEFu4xRobArq5YFjeLBhTNahTZvln0IgNltyM0IoGTjGlTIphYuAygcolcqW9t260QYr9gJuIsjj0GE3zfjVESzh+cgt9GLe3xNWrtiixflqTnwxHwtREREZJa0J13J8a2DS7xiOojdWbegqOOH1FLCZPf7bPQq1bvoN2cAdc16WEuIf41Yd7bejX6/rNAGUKcWxZM3axMue4ACsW6vjVXwyGqsWy00xN12lb6uNi22dtnLCi3ysco2njYhMtCWLIOnYam27QIURw43aNkJd2jfKnQTERGRJmkDq6zCGuNYfWkfYfCKg2hQy6hjyZOe9GClT0blUCernEvRNFofi7na3YSmC0BWWoJBr9GLXkdd+nhOGerOBVCZ6kqoC107LmNdj0sch+3Y4m77NqyW841JW98IrUcw3xdAaV7kmNaEyJOqSo33ZBlKL4jjMIdSLdCG9+13B0RwZmglIiIyJG1gNct0D8SwTy+iXrWpY2mVTFPACp9p70J6qn4GfLiyGTm2NJbsNNlHnoPNpupjxPjPWEQoLJdXDjBCpXueCJ05qPTv0oJfm7ftHhU+scr/Acq1IG6EdHk1AdUOndQVW8Uh/coB+nuSjrlFIrSmVqHcEvTDtCEE6jERERF1k8Dq81XjRNYgeFSbuko6PJkuERLDZ8DX+nZFjtfUKo4i7NmvHqCFRPOZ/UdQbhqH2jrrWNmKk1WAUUFt47Yt40xl+DUF9MslxWJb8rJW4nGiZ/Obx8oGPoCvJVbVWR8rW2kab6s5vhK3p1yDlAkrUaNmERER9RTOvA6r/TqrinE9VuP6rCG8DqujWK45Gu1apjKwypOxol7n1Ho91EJPeByq/VqmOtP1UOX4VJ8IqYaI7cfedsS6ca+DKrcjr1hgnARm3W5IaBtyTK+8dqs+W4r3uqJdexYipq6ccDMWHSjG9uA6TFNziYiIegJH3zjAyRhYqbPtKLoG02uX48R7CzFUzSMiIuoJusWQAKJubeejSElhWCUiop6LgZXI6fLXIRj8DEGGVSIi6qEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEYWImIiIjI0Rx9pyvjFqzGLVkjnUXZiu1Y9WlvLHjsPhR14s23evSdriy3MTXdGjUhttuU2m+BGnfbraxru0Wq9Ran9tujxj7uii2zUdDYhlu32pcZ1HOM7dlZtk9EREQxObTCKoOoF/MxEFPUnGga9u7Hqn4D4j6H2llgK+7xBUTY24TLJZvgF4GrZOMaVKjFranYIkJjXxHkxLqXS4pR2OgV22tSS0Xg9AGbtWWR2671eeHLXBZjXRlIvagTIdVYnuVfivl+tRjpmFukb1ebPK7oxy3CZ8GFHBSmqrbBPS+8bskylF4w7duyTC0X6+f20dNw3izzMjHdn49cEZjTr9cWExERUSscGVhlEK0fX4zdk/qoOVF8chDzDwMLPIPUDOoMFYd2ojJjaqgyme2ZikJUoTIUDOMQYbe80YXSsUZV9DYUul2obPgAtaq9umQe8rTHYtvDc0SwC6ApoNqe5aaK6W3IzQAqL6qF/l0oaRFB07Rcbrv85BHVtrneJbZtJwNzFQpHTxXxNp50pPdVD6MJfACf5Visao+L98v0HhIREVF8jgysmZPuizEEwHAWZa9XA2MmduowAGpC0wWgcIgROPWqpuyCr2s2Kp1xnAugMjUHHiOo+dfA7ReBsyWAOjXrqqS6kKUeStlpYkcXzqgwbKWHxlGhcCxVbBGvJaM4gW76I6gUwdszPHoglaEe7qmWbYcdQbkI9+HQTkRERK1JzpOujr2PVRiB1ZNuUDOos8lxmb1Kl2pd9H5ZJTUqnYmQwwpKxfpa938xCk1V1DARhius1VwL2XVvrta6R6GwZacWBnV6tdRKjnGVxz1bBGVbaNSqvznYbBkTa1XrW6yt20uOk411XGo7Maurvl2x1yUiIqKokjCwVmPBtktY8I1xyFRzqHOV+2ajvI8+VlR20deJsGqM12yVCJXujQEUauM55yEvcAZ1cjynbXVtrCvy4Y8WILVxtFUiFJpPmroNqz052rHpofID5Io2+vZHtnqGPuTAGEs6FU0bZ6sxrno4zvKEhyNEI4ckGONQ/X12oVfZ1ojqrT5kwlq5DZPVVfHaQxVqIiIiSkTyBdZjp7Abl7DqeS8Gl8jpYLi9oVo9iTqGGruZUWw5+14OE8hKiz/qU6ONG5Vn55uCoTZMwNqVr59Vn4PNRTNNYVORYXWj7HJfFtl1bzn5aR6ymuMFaX0MrDaUQRtzqgdxPezqVxPQ2lFCqaSNr7UPZdCqq+YxulZadTU1H4W8MgAREVGbJF9gHZmH+tJi0zQOUyAvayUezxmhnkQdJW9IDtDoDZ99r052yrWEMDm2VQa/xVhr7up3jYInNYCSCiMEiucdrUJu5qhQMA2FVdPJVyGmsBoOzNHJ7nt3Qw5WxXqeCpfaOFTXTLwZCrpy0s/yl5eduhwtNAvRKqn2E9KsVHV1dPTtATVYOeEapKTcgZXH1SwiIiLSODOwHqswVU+B3dv0auqCY/pi6kKyimnuetfGocbvSg+Tl5YSYRA74VaVTDkGNhQ+tRApH1ShQFuupi36mf5aIBT/r/QvDS8LhWIjJOuT++JUW9gMj1/Vpo1V8LTh+rHh8av6VADbNWDtY2pttBO64lZXh+KeuyeI/7+Hxo/1OURERKRz9I0DnKxH3ziAOsbOR5EyrQYrqt/BwuFqHhERESXpVQKIupUdeCTlGoZVIiKiGBhYibrcNLwQ/AzBIMMqERFRNAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORojr7TVcPeV+E5fAlTZhRj1Ug1U/rkIKY8X40TqqnJGof6OSNUo+P16Dtd+degl69KNVwobcMtTvVbqC5FSYtqZthucardQtWLctXKdZtu3SpUbJmNAu32rTp5v//V9tudBrbino07UZmaD3/E7Vljb1uSt2B1y3v+R2y3teMWjP1qjejvi3H8UY+biIiIonJohfUsylZ4MR8DMUXNiXDdCPhKi1FvTJ0YVns0Gcp8ARHGNuFyySb4Regq2bgGFWpxayq2iNDXV4Q9se7lkmIUNnrF9prUUhkKvagTQVJbfn++CMdLMd+vL5VhUruHv7aumDw5KPfZ9y22UbETWRk5qm2Iv219+WwsQA4K1RyzWp8Xvky1bsRxCzLEb6yCR70vl0uihHjxnIILYvupqk1EREQJcWRgbdi7H/Xji7F7Uh81h5yi4tBOVGZMDYWxbM9UEfCqUBkKfnGIsFve6ELpWKMyeRsK3S5UNnyAWtn070JJiwh0RtXTNROFGUD5ySNas+5iALl9TCnwehdy1UODDJYlyEfhEDXD0Mq25XpNozfhTU9/rW2X7VluqsbehlyxbqU4Hp0Iu0dliI9XaT6C+b4qFI6eCmtNl4iIiFrjyMCaOek+6xAAcogmNF2ACING4NSrlrKLva7ZVG2M5VwAlak58Bihzr9G635HSwB1olnbLB5njEKevlSvqMru/wtntECbNyRHBGOjKqpXUitNz5eBeIFYVppnHgaga23bMpBecRd94AP4WsSLOr4YvUpn61PZVj2EKxVbxPuUUcxhAERERFcgeU+6+rQanhIvBmtTBXxqNnUOORazV+lSrZvcL6ukoWpjAuSwAhnqxIe2WXavI4Am8+qye10sdzfkwH9/PnJVoIV7ntYdD1943+ZxpLL6C3dx/PG0sbbdFrJr31wplkEcVeI7aAxXWIZS7IR7i1691SvLOdhsH/NKRERECUnOwHrjOOw2jV/1jbmEYobWTlMuAmN5H308p+wmj+iqj6dFBLmNARRqwW4e8gJnRGB0Id1YvdGLXkdd8Mvl8oQprSrrQpZcpoXNXUjXxokWI8u/NFzJlCHyQj5W2U6isoi37URpY3irUOixdf+nmvedjrmjc1T1Vq8EZ3nEa1VLiYiIqG2St8JqkukeiGHqMXWkdKT3Ff/LKDaN59SHCWSlJTAyUxtzKs+eN4U3U2jMTpMJMAebTWf2a135ffuLthwnWoXcUAX1NqyW1VkRgMv9QMXJKj0MG13y8ioGqi2HEMTfdoJkWN0oq7jLrF378nXFqtRqwwX0kK8PF9CvNKC1bcMGiIiIKLpuEFjPouz1apzIGgSPmkMdR44jlZXK0Nn16mSmXMvYTDm2VYazxVhr7up3jYInNYCSCiOoqRCaOUoPje5R2glcBUZXOo6IMBowjZk1n+gk+D9AuazOXi+Oa5asupomjzhOeVkr8VgLlwlsOy5TWLVfCkt/XVUihJqudmC8LtdMvGk+LjlcIFW/rJVW5VVrADVYOeEapKTcgZXH1SwiIiLSOPM6rMcqMHjbadUIM67Halyf1TBszHTsnnSDanUOXofVuA5rDjbLrn3V0snAKiuJ0a5FaizTW5HXQrVeK9V6vVLrsljXOtXIY5Td/5ZQGGfbltcUZjzHfv1XnXn/rV/jVae/fnlFAkuVVqhZfgeGLXkPxTs+wwv5aiYRERE5+8YBTtajAyt1jJ2PImVaDVZUv4OFw9U8IiIi6h5jWImS2w48knINwyoREVEMDKxEXW4aXgh+hmCQYZWIiCgaBlYiIiIicjQGViIiIiJyNAZWIiIiInI0BlYiIiIicjQGViIiIiJyNAZWIiIiInI0BlYiIiIicjRH3+nKuAWrcUtWO98GL4rrVCNrHOrnjFCNjsdbsxq3MY1ze9SorLdmRUYxLs+y3c9f3be/MtoywXKbVNNzan2L4fYHtMeG2LdfjX3cxvbj3rrVfmzGMasmUvNtt4XVRd02ERERxeXQCutZlK3wYj4GYoqaY6UvL4YIqaXF+tSJYbVHk8HMFxBhbxMul2yCX4Suko1rUKEWt6ZiiwirfUXYE+teLilGYaNXbK9JLdVDZy+xMU+GmmEhw64IfOKT19cXky3Qynv4h5aJKRwKj2C+D9is5sc8bhFMCy7koDBVtQ3ueabtLkPpBfNxi21vrIJHvSfacuzEAtPr0sTaNhEREcXlyMDasHc/6scXY/ekPmqOzbH3sapf51ZUSVdxSFY+p4Yqk9meqShEFSr9ejsuEXbLG10oHWuEzNtQ6HahsuED1MqmWL7g4lRcLpqJdG25jX+XHnajVF1bdxtWl8xDnmplD89BLgJoshRkZaitQuHoqdH3H5KO9L7qoRQ4gzq4kB6q1tqWaxLdNhEREdk5MrBmTrov6hAAg6/6NIalXcSCEi8Gq2nBMbWQOlATmi4AhUOMwCgrnl6Ui0d1zbZqYjTnAqhMzYHHCHb+NXoXfktABD7BNRNvxgmjFSerkNvnDOaXzkYvNc1PJCgnqGKLeC0ZxQl01R9BpQjenuEqerpGwZNahYKyrXrwlpVU83Ih8W0TERGRXRKedHUWDeeBE4cvwmMMB5gxALu3vYqyT9RTqMPJsZi9SpfCl7kMflklvWgdOxqXHFYgA6fWRV+MwohKZzR6WK4UATfX6Jr35KDctxhrTetW+peGwmyvLUfUXDsRtCuslWK9+puDzXECszZcQdu2DJ+mdZGOuUWb4M+sgjv0ukzjYxPYNhEREcWWtFcJmDIjDx71GCNvxYLrLqH+jGpThyr3zUZ5H32s6JuedNSJsJrbJ5Te4mvZCffGAAq10DkPeRHd6fEVesLd+nBPRWmqCLvn9Ga2Z7kaQ6rGkV7wRg2t2jha5MMfCpB6gM0ybzsK8/b9fXahl1FR1SrNs+GWwxnkMncABSK46tXfxLZNREREsSVhYL0Bmf2A+rNnVZs6jxqbmVGsBVWdXvnMSktgZOb1LuRqZ+ebwps2TMCFLNWMTd93QkMPNOnwZEamYP0s/RxsNp/BH/gAvhY9iOsVVP0qBlo7FEqttDGwxlAGOba2JVxBlcFWVp3Lj4p1r2DbREREZJWUFVbPiAE4cfh9+FRbOwnr0wHwxBn3Su0jb0gO0OgNjx1VYS3XMjZTrzj2KrV21+tjPQMoqTBVJo9WITdzVMTln6KR+64U+wud2R9134o8gcsfMI23NYVV08lXGjl2NlSZVdXZVP3SU/IEsGjHpp98Nsq0HfOwhib4GkSjb39kJ7ztGqyccA1SUu7AyuNqFhEREWmceR3WYxUYvO20aoSZr8dqXKNVNwDeUtMQgU7A67Aa1ySNEgC1wCoridGudWos01vyMlThau0RzJfjQ1UrJOa1Vs37tm434jqrctys+TqpBvv1VDX6tppGhy+LFXGNV9t6rS0Pi9y2oWb5HRi25D0U7/gML+SrmUREROTsGwc4WY8OrNQxdj6KlGk1WFH9DhYOV/OIiIgoeU+6Iuo+duCRlGsYVomIiGJgYCXqctPwQvAzBIMMq0RERNEwsBIRERGRozGwEhEREZGjMbASERERkaMxsBIRERGRozGwEhEREZGjMbASERERkaMxsBIRERGRozk6sMrbrw4u8WLBMTVDcxZlK7zafOv0Kso+UU+hLiVvU9qrdLaa1oTv/Z8QeXtWY93ZuMfXpOaHVWyRyxZjrelOqBbyNqxy/bKtqFWzdLG3rW8zcprvV09Q4u7b2K82RX+OsR/7dnXytq1xtk9ERNRDOTSw6qF0PgZiipoTdgOKFhWjvtQ0zRgg5vdGZs++W6wz+NfA7Xdhc8kmXBbT5owqFEQEx1hkYPOizr1MW/fy/flie0tN4U4PnJV98pGr5kQS26jYiayMHNU2xN923iz9eEOTWJ4LF9Kv15e3um/xunttrILnfmMbyzHXpZYZxHMKLuSgMFW1bWp9XpT0FctVm4iIiHSODKwNe/ejfnwxdk/qo+bE56s+jWFjboVHtamriFB4tAq57qnIU3PyxoqA11IFXyIVQ/8ulLSIwOZJ19uumSjMAMpPHtGaFVt2IV0EwtVx7galhT7ko3CImmFoZdt2tcerUJkxNRQ64+9bvu4ASu+PElJDROD1VaFw9FSoI7AKbMUCEZ5Lx45SM4iIiMjgyMCaOek+rBqpGq355CC8dQNQPOkGNYO6TgBNLS54hhuRTIS0jTtRKeefU7PiqG0WqTZjVCjsyqEFBY3iwYUzWoU2b1a8QCgYoS9vJrLVLENr27Y6gnItPN6m2q3sO/ABfOJ147hpKIStqlyxxYvyjGKsdqsZFnpVGO7i+K+PiIioh0r6k658vmqA1VWHkd3vMrh5AY8cFgDUNUeORY1Jdq+L0OduyIFfds23BFCnFsVTcSiB0JfAtmt9u0S4DFdXW3UuIEJ5FXwoVsMBlqEUO+Heoqq3IkiXN+Zg86xwALaQ1V/kY5VR/SUiIiKL5A6sWnW1N+5ys7ramewnKFlPjAqgZONSNI3Wx3Kudjeh6QKQlZZgGGv0otdRF/wy+BXNRLYMg6kuZKnFMWnjQ1sJfQltW1ZXAygcEiNcxpJq3nc65o7OUdVbvXqa5ZkXqu5ayaECgahVYSIiItIldWCV1dUTWbegiCdbdSr7CUpvhoKaC+mpQK57manrWx8mED55KbbsNFnSzMFmGSb1WXpXft/+rYa5ipNVQMtOuI0g7Qu35YlViW5bq66K8FkYtes+hutdsavA2nABoNxnBPylKDHactiA/wOUayHfWO4Nt40KLRERUQ+XvIFVVVcXeEaoGdT10uHJdKHS7w1dlkkPgDnwmLvXY112yj0KhahCQSioJV7tjDjL35OjVT1lNVULzwltW80b3cZqp2sUPKlVIoQalWZ18lnmKGS7ZuJN83HJ4QIi1Bd6xGMZnt3zTMvkVCyO04VSebUB8xCC4ytxe8o1SJmwEjVqFhERUU/hzMB6rEJdW/Ugdovm7m36tVbD12M9i7LXWV11omzPcvhFQDQqhtpYUVNVM77bsFoGNtl1r6qNcgysUa0NXd9Vncil7yPRa5bG37aknRgVo7oaf9/pmFtUjCz/UrXtpfBlLjNVntvB8Htwz3jx/wNNDKxERNTjpAQF9ThhZ86cQf/+PTspvvCCF488Uqxanev48V9j+PCbVYt6ih1F12B67XKceG8hhqp5REREPUHSXyWAqNvb+ShSUhhWiYio52JgJXK6/HUIBj9DkGGViIh6KAZWIiIiInI0BlYiIiIicjQGViIiIiJyNAZWIiIiInI0BlYiIiIicjQGViIiIiJyNAZWIiIiInI0R9/pqmHvq/AcvoQpM4qxaqSaqfg2eFFcpxpCtOd0JN7pKjZ5G1O337hfag42l8xDnmq17gjml3pRrlq57mi3OG3C2rKlKGmR99xfjrkuNTuwFfdot07Vyfv1m2+9Gn/b1mWa1HzLbWXjva6KLbNR0KgamvCxRS7TRR5fjNdFRETUwzm0wnoWZSu8mI+BmKLmmMkgW4xxqC8t1qcZA7B7WwV8ajl1If8aEepcIsxtwmUxbc6oQkHZVtSqxfHJwOZFnQiSct3L9+eL7S3FfL9arNT6vCjpm4NC1daJdSuq4Llf3+9lTw7KRcDU7/UvJbJtGULV+nIyhdVEXpcMoKF1S8KBM2+Web6YxL5zRaBNv15fboj+uoiIiMiRgbVh737Ujy/G7kl91Byr+uZLGJaWplpC/z4Yph5SVxKh8GgVct1TQ5XHvLEinLVUwRcKjnH4d6GkRQQ2o+rpmonCDKD85BG9LQW2YoEImaVjR6kZhnTMLTJVJd2jRPALoOmcaiey7Ziu8nXZ1B6vQmXGVGsFNebrIiIiIkcG1sxJ98Xt3veMGIATh7djwTHZOouy16txImsQPNpS6joiILa44Blu6mbXuuhNwTGO2maR/jJGhUKh7ILXutIvnFGVTFlF3SnCaHGbu8tb33Y8V/e6rI6gXAumt6m2dOWvi4iIqCdIzpOuRuahvnQcsM2LwSXbsW/odNTPGaEWUteT3e+z0avUK/66kN3nQF1zk1qWAP8ase5suBty4Jfd5y0BaMOVZZUU+VgVMaY1UsUWL8pT81FoGSMqxNq2pgoFYplc3qvUPJzAEP91lfuMdWfjHl/011vr24Vye3W1Da+LiIioJ0rOwHqsQgTVjzD4MTmGdRwGH96OwSsOokEtpo4lTyIygllkOAugZONSNI3Wx2uudjeh6QKQlZZgGGv0otdRF/xyrKccQ3ougMpUF7JkVdMXQGmeaVxpDHr1NAebzWNQpZjblm7DamOMqZw8LvE6zKE1/uuyjlMtRpZ/aZTQKqurARQOMVdXE39dREREPVUSBtazKDtwGsPGTESRdqGCEVglQuuUT6vh1YYIUEezn0QUPtPehfRU/ez78Nnvene6/QSjaLLTZNnRGjS1rvy+/ZHt/wDlWmg0grI8o1+1t4THoepn8gOl91uvTBB326ptoY2BNbT1dd2G3Az10ESrrtqrvgm+LiIiop4sOSuswonmZvVIOHYKu9Ebg/urNnWRdHgyXaj0e0OVST2k5cBj7gKXl5+S4cx+9QAtJFahIBTUTBVJ9zxLSJZVzELt0lHi8Sy9YhkOq1EuCRVv21HoQwqM407wdRn8a1DQaB7zKqn9jbZVUhN4XZrjK3F7yjVImbASNWoWERFRT+HM67DKLv9tp1UjLHyt1WosKDkoQqqhNxY8dp+quHYOXoc1Nj04qmRnu5apxrhearRlsovcdD3UyGuVGuTzdiE9FE6t64VY9hFn23Jsq69KNYSMYmtgFGK/Lvu+rddolbRrsV6I9nrt7K/LUIOVE27GogPF2B5ch2lqLhERUU/gzMCaBBhYqbPtKLoG02uX48R7CzFUzSMiIuoJknZIAFGPsfNRpKQwrBIRUc/FwErkdPnrEAx+hiDDKhER9VAMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaAysRERERORoDKxERERE5GgMrERERETkaI6+01XD3lfhOXzJdEvWMN8GL4rrVOO6EfAtGodM1ewMvNNVbJZbmEa5TWl81tuc5rqX4U1P+J788bZtXaYL3X7VfutVg7oFq3br1EY1z8R+a1j9efJe/9Zbp0aub32OZbn9lrSt3BbWvu3Yt6slIiLqpmRgbatAICD++1kHTmeC3uXrgv9nT2Vw/j+uC87/pXV5/Z6fBDOXVwbrVXvfK+uCma/8yvKcjp7WrVsXdX5nTNXV1VHnO2I6tir4pWWrgrtUe9fmWcEveX8arDE/J+Z0KrjGOyt4975Tevv0T4N3L5sVnHdMLW9l2zX7FobXbXWy7cs+afteGFxz2ph3KDhPHss++3x9kscSOk77JI978yHV1vcbbtunth4XJ06cOHHi1P0nRw4JaNi7H/Xji7F7Uh81x6waXll1HR+uqHo8IzCs7hR8qk1dpQlrj1Yh1z01VPXMG5uP3JYq+KyFz+j8u1DSkoNCo6LqmonCDKD85BHRuMpt2wU+gM+8L5va41WozJhqqpDuQvr9m7B6uN5uE/c8U8U0HZ5MsdELZ1Cr5lilI72vehiNqz+y1EMiIqKewpGBNXPSfRFDAKx6Y3B/9VC6sQ8G4xIaPlFt6iIBNLW44BluhMAjmL9xJyrl/HNqVhy1zSJ5ZoyydPFrXeFauLu6bdtVHNopgmQ4/FodQbkfKB0b7pbPm2UdAtBxjqCy0fw6bfwfoDw1B55OORYiIiJnSMKTrkbAk3UJq3zVqi3Hsx7EbvWYnKAJa8tmo1epF/BswuYMoK65SS1LgBzTWTob7oYc+O+XVdQAjOHKrW270r9UW1ebtsjKbBSBrShvjFNd9e1Cuam6mqhyn9qvmO7xxXi9Yt8L/AEUjjaNYRVkONfX9UbZt/GaxeSriliXiIiou0vKqwR45ozDlLqDGFzi1SbfCNFGb2R2/HlgJMiTgIxgFhnOAijZuBRNozfhcok8OagJTReArLQYFUO7Ri96HXXBL9a9LE9MOhdAZapLdYPH33a2Z7k2X5+WofSC2FaU0Cqrq5WmSq6VrK6KQDkkXF1NRN4sY79yKkaWCM6RoVWvCsO9LOKkKfOx+/vsQq+yraYhA+mYW2RsexnSj8YJxERERN1Qkl7WagRWlRajXk2r+l9E/XVyWAB1Bms422Q6i9+F9FT9zP5wINO78tOvV804stNkWTEHm01n0GvDBPr2F+22bluNFbXTqqsuS3e/mVZdTc1H4VWdhX8bcjPUwxB19YOMYstVD6LJHp5jqyqb6a+r8uKVDNwlIiJKTkkaWE0+OYgpz/8Gd32jcy9rRdGoMOX3Yq3KU3oAtI25FKHxHlmdtVQRBfcoFKIKBaGqqLnameC2DUbXu61SqldXY3X3q/1dbZe7f4126SvLeFsVVs2Xq4rliirAOx9FSso1SCnaoWYQERF1HylBQT1OWIdfh/VYBQZvO60aYcb1WI3rs+oGwFuaB49qdRZehzU2y/VQ7dcclWRglSdMRVtmhDvVsl9zNPa25TjPpShp0RpC5LVS9eudBiLnK9r1Ti9EOybbfkOMfViPOZHrw0rGa4tYbgm29tcV6zqsO/BIygx4xy/HifcWYqiaS0RE1B04M7AmAQZWcpYarJxwMxZlb0OwbJqaR0RE1D0k/5AAoh6uZvkdSElhWCUiou6LgZUoyQ1d/A6Cwc8YVomIqNtiYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR3Psna58G7worlON60bAt2gcMlVT6urbs/boO11ptzitUo0ot0CNy3ar0Yj761tvc5rrXoY3PcY9+a9mXcG4JaxqRrttbLzbymq3bm1UjYh14+3bfutWIcq+LduPeG1EREQ9mAysbRUIBMR/P+u46Zc7g5mv/Eq1zwS9y9eZ2mr5P+4M7lPtfa+I5csrg/XG8k6Y1q1bF3V+Z0zV1dVR53fKdPqnwbuXLQyuOa23a/YtDH5p2argLvvzYky7Ns8KfmnzIdU+FJy3bFbw7n2nVPtUcI3X1Nb2NSs475i+rtxX+LmtrKuWG+vq7fBxRz5fHZv3p8Ea1TZP2us0LbO+jvjHre873nukrx/eHidOnDhx4sTJPDlzSMDIPNTPGaEaN8AztDdw/iIatPZZlB04jWFjbg1VVD2eERj26W/g+0TNoA5TcWgnKjOmhiqq2Z6pKEQVKv16O67AVpQ3ulA61qgc3oZCtwuVDR+gVjb9u1DSkoNCozLpmonCDKD85BGtme1Zbqpa3oZcsazyoqqG2tdV2zbWReAM6uBCeqgSnI70vuqhJI/tQmTVU3cE5f4ACkeHl+WNzUdu4weokI1WjrtVcv2+rKgSERHFkoRjWJtR/2lv3OW+QbWrseD5apzAJdSfUbOogzSh6QJQOMQIVrKLXu/qrmtu0mfFcy6AytQceIzQ6F+jd7+3BESYBGqbxeOMUcjTl2rd81oX+YUzeqBtTaoLWeqhlJ0mdmSs6xoFT2oVCsq2qnC8RmzbBc9wPWTWHhehWwRYX9ls9CrVp3t85tckwu716qHk6i/2FUCTOOSrPe6Kk1XI7XMG89V+5TQ/kT8AiIiIegjnB9ZPDmL+4UuYMt46hlWrtK7wYnDJQWBGMbwiqdSfPauWUUeT4y17lS6FL3MZ/LJKalQ6EyHHkspg5gM2lxSjUAW/EDlGVix3N+TAf38+clWgtVCBM1StdY9CYctOlIeC3hHMD42zldIxt2gT/JlVcIf2HR57WyePv1E8P28TLpeISewX/qUqOMpqbgAlh8IV04ottjGpUtzjFmFZ7lebFmNt6PXqfwRUiuCeK/crJ08OykXoDT+HiIioZ3N4YNWrpxgzHatGqlmaS1j1/HbUjy9GfWmxWHYWDeeBwTcYVVfqSOW+2Sjvs0wLV7KLXoa93D6hvvb4RKh0bwygUAtn85Bn76pv9KLXURf8crnsnteqstbKqRZ4RRgt9JhP9roNq7WgZ4TCD5Ar2ujbX3Xjy2qwCJMXp2rH7XcHtABprmTmuovD21Pd+kblOG+WCNby2FTorBwig3aixy2OTXu9avK4ULLRGkgLPeK9UI/hnorSVBHiz6k2ERFRD+fgwCrCaslB7M4ah92TzEE0DYOvA4ZZQqw+TGBwf9WkDqLGfWYUW87clxXCrDSjHcf1LuRqVxUwhTNTsNO68JGDzaZxpFp3eyh0CupMf7iXYbVbzTO454VDoQjDWWLdUJBW40w3q3GicjysrAyXH9WHCGSJ58WvEltD5+rrRdBuy3GbyWqwemi8pwkNqSAiIuqhHBpYw2E1fPKVQT8J68Th/ShTJ1k17P0Iu68bCE/HXmmLhLwhOVo1MVSZVEEw1xIe9Wqmtetb0MaRBlBSocaRyucdrUJu5ig92GlBrgoFW4yud3WykzFm1hRWLZerikK7PFVDDlZZnmceetAEX0M4VGYPF6+hcVf4eOVJWKYxrhbacVTBk6cCamvHbaMNJzCN5ZXvaaV4H7UTuKRo7+nxlbg95RqkTFiJGjWLiIiop3DkdVit11gNmzJDdv/rjy3PiXKd1o7G67Aa40NzsFl27auWzrhearRrtBrL9FbEtVJt1ywt9GwKVVIt1ykNMfZh3W6065harrEq2Z9jeV3WY7euG+01xz5u63aFVo8t2vZrsHLCzVh0oBjbg+swTc0lIiLqCRwZWJNBjw6s1CV2FF2D6bXLceK9hRiq5hEREfUESXhZK6IeZuejSElhWCUiop6LgZXI6fLXIRj8DEGGVSIi6qEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEYWImIiIjI0Rx7pyvfBi+K61Qjxq1X9ef0xoLH7kNRJ994i7dmjX4L09a1fgtV/V79O1EZbZmg36I1cr8Rt14VLLdINRjHb96+sU+9BaTmw180E9mqKbV++1Qhyraj31I2xrERERFRBGdWWI9VoBjjUF9aLKbpWIBqeDZUq4VSNRaUeOFLG4Fhag51EhnsfAERFjfhcskm+EXgKtm4BhVqcWsqtoiw2leEObHu5ZJiFDZ6xfaa1FI9FPYSG/NkqBkW8n79s1HZJx+5ao5drnuZ2rY+RQZCsQ1x/IUZtoTtmok3Tett7rsT7i1H1EJBBFG33yVCqlqeUYWCsq2oVYt10bedNyu8XW26Xx6/C+nXqycQERFRXM4MrCPzUD9nhGrcAM/Q3sD5i2hQc3wbPsLgx4qxitWpTldxSFY+p4Yqm9meqShEFSr9ejsuEXbLZWV0rFE1vQ2FbhcqGz7Qg59YvuDiVFwumol0bblVxZZdSBdBefVwNeMKVGzxolwcf2EfNSOGrD7m0NmEtUerRBieGqqo5o0VobOlCiKfhiS67drj4v0yvYdEREQUX1KOYfXM6fwhACQ1oekCUDjECJyye1+ENPGorjlcJY3pXACVqTnwGEFNq1qKxNcSgDb6Q1Y5owwBMOTNasvQgyjE/goac7A5zj50TfA1BEyvM4CmFhc8w40YfQTzteEDYv45NSvhbR9BuQj34dBORERErXF+YP3kIOYfvoQp4yPHsFLXkeMye5UuhS9zGfyySnrROnY0LjmsoFSs7wM2y2EBMvi1YfV4Kv1LxXHJYxOTuUtfVUkLPVHGnRrk+FNt3aUoQT4KIyr4MqDL5V7xV5McFmAE9QS2rdT6dmlVWFZXiYiIEufwwFqNBc9XA2OmY9VINYu6XLlvNsr76GNF3/Sko06E1VxLF3ocLTvh3hhAoTaeUwS8wBnUyfGc7RDgsj3LTWNFl6H0gjcUWmt9Xm3sbNyTnNzzwuuPDsBdah6bG0DJxqVoGq0vX+3Wq81ZaemJbVsjq6vmyi0RERElwsGBVZ5YdRC7s8Zh96Qb1DzqWulI7yv+l1GsBVVdOLi16nqXdrJR6f2mSqQ2TMCFLNVsP+nwZBopWO/iR6MIsKr6qg1F0NqLsTZaddc9ylT5FYE6VT+hKxxK9WEC6dcnvm2tupoarXJLRERE8Tg0sIbDavjkK3KCvCE5Whibb5xk5d+FkpYcEeZUW2N0ndvCoGsUPKkBlFQYZ9frXem5maMsl49qF/IErlA1Mx1zi4zKqz7JYQzapadKoo+L1cOlMd5WD7+Vfm/o9YSXJ7ptVV0dbb1UVlgNVk64Bikpd2DlcTWLiIiINI68DmvD3lfhOXxJtcKmzCjWhgZEX96512PldViN67BGux6pDKzyWqvRrtFqLNNbsmoZrtbKy1bpJ3FZqGuaRrvOavg6sNbthuerpo22LXlFAuMkKctrElq7DmuU5YaIbQvatVgvxF5Hqll+B4YteQ/FOz7DC/lqJhERETn3xgFO16MDK3WMnY8iZVoNVlS/g4VXcekuIiKi7iYpL2tF1L3swCMp1zCsEhERxcDAStTlpuGF4GcIBhlWiYiIomFgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHY2AlIiIiIkdjYCUiIiIiR2NgJSIiIiJHc+ydrnwbvCiuU43rRsC3aBwyVRM4i7IV27HqU9XMGof6OSNUo3PwTlexWW+hGu3WrfFYb89qvXVra9u23Z5V3dLVIrAV92zciUqtYb19ayLHrd1itTHKbV8t2xXst2613Po1cn19u6ohFHo2YbVbNQzGPuLcFpaIiKg7cmaF9VgFiiFCaGmxmKZjAarh2VCtFgINe/dj39Dpavk4TKk7iCl7z6ql1KVEMHP7XSLsbcJlMW3OqEJB2VbUqsXxycDpRZ0IqXLdy/fni+0txXy/WtzKtiu2iLDaV4RUbXkxChu9uMfXpJYKMjRurILnfn39yyWm0Gjbtt8dsB23DNKzUdknH7lqTphYZtnuMpRiJxYY+5ZB0xcQIdXYNlCycQ0q9KVaUC4Q33h9XTF5clDuCy/XifemYieyMnJUm4iIqOdwZmAdmWeqmN4Az9DewPmLaFBzMifdh92TblCtEfBkASeam1Wbuo4IVUerkOueGqpM5o0VAa+lCiKvtc6/CyUtOSg0KqqumSjMAMpPHhGNVrYtQmG5rHyONSqqt6HQ7UJlwwcqdMr1ZWi0VUY1kdvO9kxFoem4K7bsQroInKuj3YkqcAZ1cCE9tN10pPdVD4WKQztRmTE1tF9t26hCpQridRcDyO1jOqjrXRGhuNbnRQnyUThEzSAiIupBOIaV2lEATS0ueIYbXfiy8ii7ycX8c2pWHLXNIh1mjAqFRq3yKLvJL5wRobOVbZ8LoDI1Bx5LxVRsryUgwqQQ+AA+sT6OL0av0tn6ZKv8ZqWFhx7Ibvv01PBx582KFnQV1yh4Uk3VXrFvOWxAP9YmNF2ACJpGkBbhuEwf8lDXrFdg84bkiPBqVJLF8goZcMPvgwzjC8Sy0jwOAyAiop7J+YH1k4OYf/gSpow3j2E1kcMH6npjgadzx7BSPDKUyVDoBTyy6z4czhIiu+5FoHQ35MB/v6yiqtCpaWXbsvtdhlEfsFkOC5CBVlZJZaBFFXyhrne92969RVZv0+HJdKH8aDjAahVNYyxsq9Ixt2gT/JlVcIf2HRlw5TjVXqVL4ctcBr+s/l5U5Vv3PHE8xYAvvNw89lZWaOEujh2YiYiIujmHB9ZqLHi+GhgzHatGqllmIsxO2XYaU2bch6KOPQeMTPTgFZ4s40RFQCzZuBRNo/XxmKvdeoXRWr2Mo9GLXkdd8MtQKU8s0iqnLmRpC1vZdosIoBsDKNQC6Tzk2bvqU/OxKnQClwiZo3NU9VZ20xfrAVa9pgWYilKx3/Tr9WfHp4do98Wp2nFp41/FNkJjb4VyEUbL++hjc+VJZJZhAFpA14ccyOCa5V8arv7Kau0F83ETERH1PA4OrCKslhzE7qxxpvGqJjKsxguz1GHyZumB0ZjCZ/HLbnT9zP7wGe56V34iwS87TQa4HGw2nQGvDRPo21+0W9m2Nu5Tnn1vOrPfHHblckul1k6vkoZfk4ihctuJVDXV2NvNqiqa7VmuVVD1iq0az5pRbHqfzEFbhF1t/KxRQb0Nq2VlWITvchF4K05W6UFcBWntSgOqbQ7ERERE3ZlDA2s4rEa9XJUprEYNs9RF9K71Sr8Xa1Vvd61vF8rNY0slo9veNoYU7lHayUgFWje9dESEtoAa/9nKtrVxpAGUVBjbVEEwc5QeftU40/JQNdi23EK/tBY8bbkclxp6oGmCr8EI2voYVVk5Dl/tQA+4uabLVoWGB0j+D1AuA7oI4vY/DuQVBLRLZonHlste7XwUKSnXIKVoh5pBRETUfTjyOqwNe1+F5/Al1QqbMqNYq6ZartEa0hsLHuu8oQG8DmtsluuZRrtmqAysMa8nqodF4zqs9uuRxt+27JoPX4fVfg1X+7Yty2W3fJzrpFqv0WoIPy9iuf0asJbt26/xaj2uaPsPkduRQyYi3rcdeCRlBrzjl+PEewsxVM0lIiLqDhx74wCnY2AlZ6nBygk3Y1H2NgTLpql5RERE3QMva0WU5GqW34GUFIZVIiLqvhhYiZLc0MXvIBj8jGGViIi6LQZWIiIiInI0BlYiIiIicjQGViIiIiJyNAZWIiIiInI0BlYiIiIicjQGViIiIiJyNAZWIiIiInI0x97pynL71etGwLdoHDJVE8cqMHjbadUQssahfs4I1egcvNNVbNbblNpvQ9qaOLdPtd161X7704ots1HQqBqC/bauIcZtUuPePtV+e9T4+9YYt5xt5bgMMY+PiIiILJxZYRWBtBgihJYWi2k6FqAang3VaqEwMk8tU8vPH8SUvWfVQupSIvS5/S4RUjfhspg2Z1ShoGwratXi+GQo9KJOhFS57uX788X2lmK+X19a6/PCl6mWlRSjsNGLe3xN+kIhb5a+T2PdOhGc19pv/y8DsS+Awgzbjfpl2BTzS+/X1/eLIFmycQ0q1OLW9i1Dei/xZE+GmmFiOS51bLkiEKdfr55AREREcTkzsMpAGqqY3gDP0N7A+YtoUHOsbkBmP/WQupgInEerkOueGqqo5o0V4aylCiILts6/CyUtOSg0KqqumSJYAuUnj2jNbM9yU7X1NuSKZZUXY2zY1R9Z6qFZxRYvyjOmorCPmqFUHJKV0amhimq2RzwHVahUYTnuvkXYXXBxKi4XzYTxjHhqj4vtmvZFRERE8XWDMazV8NX1xl3uG1Sbuk4ATS0ueIYbse0I5ssucjn/nJoVR22zCIAZo0JhV1Ytta70C2cSrNCa+D9AeWoOPOZQ6F8jtpeDzfaufBG0my4AhUOM+XqlVw5LqGsOV1FjEsH6zYhtxnIE5SIEl45N9PlERETk/MD6yUHMP3wJU8abxrAKDXtfxeASr5gOYnfWLSjq2CG11CYy8M1Gr1Iv4JHDAhIMfgY5lrR0NtwNOfDL7vOWAIzhzCFa+HTZgp+xXzH5qlA4eiay1RKj+lvoiT+eVo437VW6VOv+97td0Su4UfedmFrfLq3Cy+oqERFR4hweWKux4PlqYMx0rBqpZimZk+4LjWP1pX2EwSsOxhgyQO1ND3XhyTyWU1ZZSzYuRdNofbzmardevcxKS6SzXGj0otdRF/xyrGeRCJznAqhMdVm797XxpjJ8mk+KktIxt8gYK7oM6UfDxybHoJb0LY57klO5bzbK++jjVGX3f50Iq7l9bMky5r4TIaurAVMll4iIiBLh4MAqwqpWPR2H3ZPid/dnugdi2KcXUa/a1LHsJxGFx3a6kJ6qn9kfDob6MIFETjDKTpMJMAebZVDVZ+nDBPr2D1dK1Zn4sOwjmnR4Mo0KaRN8DeL/MgyrkK1dxUBryxOz0pHeVzwto9hyRYKIoJ3wvqPTqqup+Si8gnWJiIh6MocG1nBYTeRyVT5fNU5kDYJHtamrqJDo94bOztdDmm0sqQx+Mjjarx7gHqWd6FSwRT/JKqIiaQqM4WAZi3ldc+VVn2R3v3ZpqhK9Upo3JEcLsMYVCYwTwHKNcNmmfUejjscyTMGsBisnXIOUlDuw8riaRURERBpHXodVjk/1HL6kWmFTZhRrQwMilvM6rI5iuQ5raj78poqpRoW/ymjLRLAzX4fVfK3S6NczNa6XKsevmq6TKsS7zql2jPLM/pjXYbVePzb+vq3HHGK6Hqu2/oVorzesZvkdGLbkPRTv+Awv5KuZRERE5NwbBzgdAyu1u52PImVaDVZUv4OFw9U8IiIi6g6XtSJKdjvwSMo1DKtEREQxMLASdblpeCH4GYJBhlUiIqJoGFiJiIiIyNEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEYWImIiIjI0RhYiYiIiMjRGFiJiIiIyNEce6cr3wYviutU47oR8C0ah0zVDDuLshXbserT3ljw2H0o6sSbb/FOV7FZbs1qu8Vp66y3Oc213bs//rbtt0g1L7ffutW4rapqGreLVc1ot5S1357VeuvXeMcd5datUW9LS0RERFHJwNpWgUBA/Pezjpt+uTOY+cqvVPtM0Lt8nakdnur3/ETM3xmc/48/CXrPWJd19LRu3bqo8ztjqq6ujjrfEdOxVcEvLVsV3KXauzbPCn7J+9Ngjfk5MadTwTXeWcG7953S26d/Grx72azgvGNqedxt6+uGnmss33wo1LZMtm3ZJ/u6NfsWWrdlWb+V4w4eCs6Lsy9OnDhx4sSJU/zJmUMCRuahfs4I1bgBnqG9gfMX0aDmaD45iPmHgQWeQWoGdb0mrD1ahVz31FDVM29sPnJbquAziqLx+HehpCUHhUZl0jUThRlA+ckjotHatgNoanEh/XptkSarj1E+jeJ6F3LVw2js69ZdDCDXPM+8ftzjJiIioquVpGNYz6Ls9WpgzMROHQZArdFDo2e4qStc62YX88+pWXHUNovkmTEqFEhl97/WBX/hDGpb3fZtyM0IoGTjGlTIZmArFvgDKBxym2xFqD1ehUrTvqya4Guwrps3JAeV/qWY75ctEZ4rxL7V+vGPm4iIiK6W8wOrVkm9hCnjTWNYj72PVRiB1ZNuUDPIWeR40dnoVeoFPJuwOQOoa25SyxLgXyPWnQ13Qw7898sqagDGcOZ4286btQmXPUCBWLfXxip47jePMZXkWFK5rti2CJ6lY21hVu23V+lSlCAfheZ13fNwuaQY8OnLfZnLcHlW9PWjH3eVflzatBhrE6k4ExERkQD8/+3lMs0z1zWgAAAAAElFTkSuQmCC" + } + }, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![image.png](attachment:image.png)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's now try to query from Azure AI Search!\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "await search_memory_examples(kernel)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We have laid the foundation which will allow us to store an arbitrary amount of data in an external Vector Store above and beyond what could fit in memory at the expense of a little more latency.\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" }, - "nbformat": 4, - "nbformat_minor": 5 + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 } diff --git a/python/notebooks/08-native-function-inline.ipynb b/python/notebooks/08-native-function-inline.ipynb index afaa72e2bcff..b3d5d38e974f 100644 --- a/python/notebooks/08-native-function-inline.ipynb +++ b/python/notebooks/08-native-function-inline.ipynb @@ -51,32 +51,50 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, + "id": "fddb5403", + "metadata": {}, + "outputs": [], + "source": [ + "from services import Service\n", + "\n", + "# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)\n", + "selectedService = Service.OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": 9, "id": "dd150646", "metadata": {}, "outputs": [], "source": [ "import semantic_kernel as sk\n", - "from semantic_kernel.connectors.ai.open_ai import (\n", - " AzureChatCompletion,\n", - " OpenAIChatCompletion,\n", - ")\n", + "import semantic_kernel.connectors.ai.open_ai as sk_oai\n", + "from semantic_kernel.prompt_template.input_variable import InputVariable\n", "\n", "kernel = sk.Kernel()\n", "\n", - "useAzureOpenAI = False\n", + "if selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", "\n", - "# Configure AI service used by the kernel\n", - "if useAzureOpenAI:\n", " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", + " service_id = \"aoai_chat\" # used later in the notebook\n", " azure_chat_service = AzureChatCompletion(\n", - " deployment_name=\"turbo\", endpoint=endpoint, api_key=api_key\n", + " service_id=service_id, deployment_name=\"gpt-35-turbo\", endpoint=endpoint, api_key=api_key\n", " ) # set the deployment name to the value of your chat model\n", - " kernel.add_chat_service(\"chat_completion\", azure_chat_service)\n", - "else:\n", + " kernel.add_service(azure_chat_service)\n", + "\n", + "# Configure OpenAI service\n", + "if selectedService == Service.OpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", + "\n", " api_key, org_id = sk.openai_settings_from_dot_env()\n", - " oai_chat_service = OpenAIChatCompletion(ai_model_id=\"gpt-3.5-turbo\", api_key=api_key, org_id=org_id)\n", - " kernel.add_chat_service(\"chat-gpt\", oai_chat_service)" + " service_id = \"oai_chat\" # used later in the notebook\n", + " oai_chat_service = OpenAIChatCompletion(\n", + " service_id=service_id, ai_model_id=\"gpt-4-turbo-1106\", api_key=api_key, org_id=org_id\n", + " )\n", + " kernel.add_service(oai_chat_service)" ] }, { @@ -99,13 +117,13 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "id": "ae29c207", "metadata": {}, "outputs": [], "source": [ "import random\n", - "from semantic_kernel.plugin_definition import kernel_function\n", + "from semantic_kernel.functions import kernel_function\n", "\n", "\n", "class GenerateNumberPlugin:\n", @@ -145,28 +163,49 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "id": "7890943f", "metadata": {}, "outputs": [], "source": [ - "sk_prompt = \"\"\"\n", + "prompt = \"\"\"\n", "Write a short story about two Corgis on an adventure.\n", "The story must be:\n", "- G rated\n", "- Have a positive message\n", "- No sexism, racism or other bias/bigotry\n", - "- Be exactly {{$input}} paragraphs long\n", + "- Be exactly {{$input}} paragraphs long. It must be this length.\n", "\"\"\"\n", "\n", - "corgi_story = kernel.create_semantic_function(\n", - " prompt_template=sk_prompt,\n", + "if selectedService == Service.OpenAI:\n", + " execution_settings = sk_oai.OpenAIChatPromptExecutionSettings(\n", + " service_id=service_id,\n", + " ai_model_id=\"gpt-3.5-turbo-1106\",\n", + " max_tokens=2000,\n", + " temperature=0.7,\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " execution_settings = sk_oai.OpenAIChatPromptExecutionSettings(\n", + " service_id=service_id,\n", + " ai_model_id=deployment,\n", + " max_tokens=2000,\n", + " temperature=0.7,\n", + " )\n", + "\n", + "prompt_template_config = sk.PromptTemplateConfig(\n", + " template=prompt,\n", + " name=\"story\",\n", + " template_format=\"semantic-kernel\",\n", + " input_variables=[\n", + " InputVariable(name=\"input\", description=\"The user input\", is_required=True),\n", + " ],\n", + " execution_settings=execution_settings,\n", + ")\n", + "\n", + "corgi_story = kernel.create_function_from_prompt(\n", " function_name=\"CorgiStory\",\n", " plugin_name=\"CorgiPlugin\",\n", - " description=\"Write a short story about two Corgis on an adventure\",\n", - " max_tokens=500,\n", - " temperature=0.5,\n", - " top_p=0.5,\n", + " prompt_template_config=prompt_template_config,\n", ")\n", "\n", "generate_number_plugin = kernel.import_plugin(GenerateNumberPlugin(), \"GenerateNumberPlugin\")" @@ -174,35 +213,67 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "id": "2471c2ab", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "6\n" + ] + } + ], "source": [ "# Run the number generator\n", "generate_number_three_or_higher = generate_number_plugin[\"GenerateNumberThreeOrHigher\"]\n", - "number_result = await generate_number_three_or_higher(6)\n", + "number_result = await generate_number_three_or_higher(kernel, input=6)\n", "print(number_result)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "id": "f043a299", "metadata": {}, "outputs": [], "source": [ - "story = await corgi_story.invoke(input=number_result.result)" + "story = await corgi_story.invoke(kernel, input=number_result.value)" + ] + }, + { + "cell_type": "markdown", + "id": "7245e7a2", + "metadata": {}, + "source": [ + "_Note: depending on which model you're using, it may not respond with the proper number of paragraphs._" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "id": "59a60e2a", "metadata": {}, - "outputs": [], - "source": [ - "print(\"Generating a corgi story exactly {} paragraphs long: \".format(number_result.result))\n", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Generating a corgi story exactly 6 paragraphs long.\n", + "=====================================================\n", + "Once upon a time in a quaint little town, there lived two adventurous Corgis named Max and Ruby. Max and Ruby loved exploring the world around them, and one sunny day, they decided to embark on a grand adventure to the nearby forest. As they trotted through the tall trees and lush greenery, their tails wagged with excitement.\n", + "\n", + "As they journeyed deeper into the forest, they encountered all sorts of new and interesting sights and smells. They met friendly squirrels, playful rabbits, and even a wise old owl perched high in the trees. Max and Ruby were overjoyed by the beauty of the natural world and the kindness of the creatures they met along the way.\n", + "\n", + "As the sun began to set, Max and Ruby realized they had ventured quite far from home. They looked at each other with concern, but then remembered the valuable lesson their parents had taught them - always stick together and help each other out. With newfound determination, they set out on the journey back home, supporting each other every step of the way.\n", + "\n", + "Finally, with tired but happy hearts, Max and Ruby arrived back in their cozy little town. They realized that no matter where they went or what adventures they had, the most important thing was that they had each other. With a wag of their tails and a bark of joy, they knew that they could conquer any challenge as long as they faced it together. And so, Max and Ruby's grand adventure had not only brought them closer to nature but also closer to each other.\n" + ] + } + ], + "source": [ + "print(f\"Generating a corgi story exactly {number_result.value} paragraphs long.\")\n", "print(\"=====================================================\")\n", "print(story)" ] @@ -213,43 +284,56 @@ "id": "8ef29d16", "metadata": {}, "source": [ - "## Context Variables\n", + "## Kernel Functions with Annotated Parameters\n", "\n", "That works! But let's expand on our example to make it more generic.\n", "\n", "For the native function, we'll introduce the lower limit variable. This means that a user will input two numbers and the number generator function will pick a number between the first and second input.\n", "\n", - "We'll make use of the `semantic_kernel.ContextVariables` class to do hold these variables.\n" + "We'll make use of the Python's `Annotated` class to hold these variables.\n" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 17, "id": "d54983d8", "metadata": {}, "outputs": [], "source": [ + "import sys\n", "import semantic_kernel as sk\n", "from semantic_kernel.connectors.ai.open_ai import (\n", " AzureChatCompletion,\n", " OpenAIChatCompletion,\n", ")\n", "\n", + "if sys.version_info >= (3, 9):\n", + " from typing import Annotated\n", + "else:\n", + " from typing_extensions import Annotated\n", + "\n", "kernel = sk.Kernel()\n", "\n", - "useAzureOpenAI = False\n", + "if selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", "\n", - "# Configure AI service used by the kernel\n", - "if useAzureOpenAI:\n", " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", + " service_id = \"aoai_chat\" # used later in the notebook\n", " azure_chat_service = AzureChatCompletion(\n", - " deployment_name=\"turbo\", endpoint=endpoint, api_key=api_key\n", + " service_id=service_id, deployment_name=\"gpt-35-turbo\", endpoint=endpoint, api_key=api_key\n", " ) # set the deployment name to the value of your chat model\n", - " kernel.add_chat_service(\"chat_completion\", azure_chat_service)\n", - "else:\n", + " kernel.add_service(azure_chat_service)\n", + "\n", + "# Configure OpenAI service\n", + "if selectedService == Service.OpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", + "\n", " api_key, org_id = sk.openai_settings_from_dot_env()\n", - " oai_chat_service = OpenAIChatCompletion(ai_model_id=\"gpt-3.5-turbo\", api_key=api_key, org_id=org_id)\n", - " kernel.add_chat_service(\"chat-gpt\", oai_chat_service)" + " service_id = \"oai_chat\" # used later in the notebook\n", + " oai_chat_service = OpenAIChatCompletion(\n", + " service_id=service_id, ai_model_id=\"gpt-4-turbo-1106\", api_key=api_key, org_id=org_id\n", + " )\n", + " kernel.add_service(oai_chat_service)" ] }, { @@ -258,19 +342,18 @@ "id": "091f45e4", "metadata": {}, "source": [ - "Let's start with the native function. Notice that we're also adding `@kernel_function_context_parameter` decorators to the function here to provide context about what variables need to be provided to the function, and any defaults for those inputs. Using the `@kernel_function_context_parameter` decorator provides the name, description and default values for a function's inputs to the [planner.](./05-using-the-planner.ipynb)\n" + "Let's start with the native function. Notice that we're add the `@kernel_function` decorator that holds the name of the function as well as an optional description. The input parameters are configured as part of the function's signature, and we use the `Annotated` type to specify the required input arguments.\n" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 19, "id": "4ea462c2", "metadata": {}, "outputs": [], "source": [ "import random\n", - "from semantic_kernel.plugin_definition import kernel_function, kernel_function_context_parameter\n", - "from semantic_kernel import KernelContext\n", + "from semantic_kernel.functions import kernel_function\n", "\n", "\n", "class GenerateNumberPlugin:\n", @@ -279,12 +362,14 @@ " \"\"\"\n", "\n", " @kernel_function(\n", - " description=\"Generate a random number between min and max\",\n", " name=\"GenerateNumber\",\n", + " description=\"Generate a random number between min and max\",\n", " )\n", - " @kernel_function_context_parameter(name=\"min\", description=\"Minimum number of paragraphs.\")\n", - " @kernel_function_context_parameter(name=\"max\", description=\"Maximum number of paragraphs.\", default_value=\"10\")\n", - " def generate_number(self, context: KernelContext) -> str:\n", + " def generate_number(\n", + " self,\n", + " min: Annotated[int, \"the minimum number of paragraphs\"],\n", + " max: Annotated[int, \"the maximum number of paragraphs\"] = 10,\n", + " ) -> Annotated[int, \"the output is a number\"]:\n", " \"\"\"\n", " Generate a number between min-max\n", " Example:\n", @@ -296,18 +381,26 @@ " int value\n", " \"\"\"\n", " try:\n", - " return str(random.randint(int(context[\"min\"]), int(context[\"max\"])))\n", + " return str(random.randint(min, max))\n", " except ValueError as e:\n", - " print(f\"Invalid input {context['min']} {context['max']}\")\n", + " print(f\"Invalid input {min} and {max}\")\n", " raise e" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 20, "id": "48bcdf9e", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "plugin_name='GenerateNumberPlugin' description='Generate a random number between min and max' name='GenerateNumber' is_prompt=False stream_function=None parameters=[KernelParameterMetadata(name='min', description='the minimum number of paragraphs', default_value=None, type_='int', required=True, expose=True), KernelParameterMetadata(name='max', description='the maximum number of paragraphs', default_value=10, type_='int', required=True, expose=True)] return_parameter=KernelParameterMetadata(name='return', description='the output is a number', default_value=None, type_='int', required=True, expose=False) function=> plugins=KernelPluginCollection(plugins={'GenerateNumberPlugin': KernelPlugin(name='GenerateNumberPlugin', description=None, functions={'GenerateNumber': KernelFunction(plugin_name='GenerateNumberPlugin', description='Generate a random number between min and max', name='GenerateNumber', is_prompt=False, stream_function=None, parameters=[KernelParameterMetadata(name='min', description='the minimum number of paragraphs', default_value=None, type_='int', required=True, expose=True), KernelParameterMetadata(name='max', description='the maximum number of paragraphs', default_value=10, type_='int', required=True, expose=True)], return_parameter=KernelParameterMetadata(name='return', description='the output is a number', default_value=None, type_='int', required=True, expose=False), function=>, plugins=KernelPluginCollection(plugins={...}), prompt_execution_settings={}, prompt_template_config=None, metadata=KernelFunctionMetadata(name='GenerateNumber', plugin_name='GenerateNumberPlugin', description='Generate a random number between min and max', parameters=[KernelParameterMetadata(name='min', description='the minimum number of paragraphs', default_value=None, type_='int', required=True, expose=True), KernelParameterMetadata(name='max', description='the maximum number of paragraphs', default_value=10, type_='int', required=True, expose=True)], is_prompt=False, is_asynchronous=True, return_parameter=KernelParameterMetadata(name='return', description='the output is a number', default_value=None, type_='int', required=True, expose=False)))})}) prompt_execution_settings={} prompt_template_config=None metadata=KernelFunctionMetadata(name='GenerateNumber', plugin_name='GenerateNumberPlugin', description='Generate a random number between min and max', parameters=[KernelParameterMetadata(name='min', description='the minimum number of paragraphs', default_value=None, type_='int', required=True, expose=True), KernelParameterMetadata(name='max', description='the maximum number of paragraphs', default_value=10, type_='int', required=True, expose=True)], is_prompt=False, is_asynchronous=True, return_parameter=KernelParameterMetadata(name='return', description='the output is a number', default_value=None, type_='int', required=True, expose=False))\n" + ] + } + ], "source": [ "generate_number_plugin = kernel.import_plugin(GenerateNumberPlugin(), \"GenerateNumberPlugin\")\n", "generate_number = generate_number_plugin[\"GenerateNumber\"]" @@ -324,12 +417,12 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 22, "id": "8b8286fb", "metadata": {}, "outputs": [], "source": [ - "sk_prompt = \"\"\"\n", + "prompt = \"\"\"\n", "Write a short story about two Corgis on an adventure.\n", "The story must be:\n", "- G rated\n", @@ -339,81 +432,112 @@ "- Be written in this language: {{$language}}\n", "\"\"\"\n", "\n", - "corgi_story = kernel.create_semantic_function(\n", - " prompt_template=sk_prompt,\n", + "if selectedService == Service.OpenAI:\n", + " execution_settings = sk_oai.OpenAIChatPromptExecutionSettings(\n", + " service_id=service_id,\n", + " ai_model_id=\"gpt-3.5-turbo-1106\",\n", + " max_tokens=2000,\n", + " temperature=0.7,\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " execution_settings = sk_oai.OpenAIChatPromptExecutionSettings(\n", + " service_id=service_id,\n", + " ai_model_id=deployment,\n", + " max_tokens=2000,\n", + " temperature=0.7,\n", + " )\n", + "\n", + "prompt_template_config = sk.PromptTemplateConfig(\n", + " template=prompt,\n", + " name=\"summarize\",\n", + " template_format=\"semantic-kernel\",\n", + " input_variables=[\n", + " InputVariable(name=\"paragraph_count\", description=\"The number of paragraphs\", is_required=True),\n", + " InputVariable(name=\"language\", description=\"The language of the story\", is_required=True),\n", + " ],\n", + " execution_settings=execution_settings,\n", + ")\n", + "\n", + "corgi_story = kernel.create_function_from_prompt(\n", " function_name=\"CorgiStory\",\n", " plugin_name=\"CorgiPlugin\",\n", - " description=\"Write a short story about two Corgis on an adventure\",\n", - " max_tokens=500,\n", - " temperature=0.5,\n", - " top_p=0.5,\n", + " prompt_template_config=prompt_template_config,\n", ")" ] }, { "attachments": {}, "cell_type": "markdown", - "id": "fdce1872", + "id": "c8778bad", "metadata": {}, "source": [ - "Now we can call this using our `invoke` function by passing in our `context_variables` in the `variables` parameter.\n" + "Let's generate a paragraph count.\n" ] }, { "cell_type": "code", - "execution_count": null, - "id": "97d8d3c9", + "execution_count": 23, + "id": "28820d9d", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Generating a corgi story 3 paragraphs long.\n" + ] + } + ], "source": [ - "context_variables = sk.ContextVariables(variables={\"min\": \"1\", \"max\": \"5\", \"language\": \"Spanish\"})" + "result = await generate_number.invoke(kernel, min=1, max=5)\n", + "num_paragraphs = result.value\n", + "print(f\"Generating a corgi story {num_paragraphs} paragraphs long.\")" ] }, { - "attachments": {}, "cell_type": "markdown", - "id": "c8778bad", + "id": "225a9147", "metadata": {}, "source": [ - "Let's add a paragraph count to our context variables\n" + "We can now invoke our corgi_story function using the `kernel` and the keyword arguments `paragraph_count` and `language`." ] }, { "cell_type": "code", - "execution_count": null, - "id": "28820d9d", - "metadata": {}, - "outputs": [], - "source": [ - "num = await generate_number.invoke(variables=context_variables)\n", - "context_variables[\"paragraph_count\"] = num.result" - ] - }, - { - "cell_type": "code", - "execution_count": null, + "execution_count": 24, "id": "dbe07c4d", "metadata": {}, "outputs": [], "source": [ "# Pass the output to the semantic story function\n", - "story = await corgi_story.invoke(variables=context_variables)" + "desired_language = \"Spanish\"\n", + "story = await corgi_story.invoke(kernel, paragraph_count=num_paragraphs, language=desired_language)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 25, "id": "6732a30b", "metadata": { "scrolled": true }, - "outputs": [], - "source": [ - "print(\n", - " \"Generating a corgi story exactly {} paragraphs long in {} language: \".format(\n", - " context_variables[\"paragraph_count\"], context_variables[\"language\"]\n", - " )\n", - ")\n", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Generating a corgi story 3 paragraphs long in Spanish.\n", + "=====================================================\n", + "Había una vez dos corgis llamados Coco y Tito. Eran los mejores amigos y les encantaba ir en aventuras juntos. Un día, decidieron explorar el bosque cercano a su casa. Se adentraron en el bosque y descubrieron un hermoso arroyo donde jugaron y se divirtieron durante horas.\n", + "\n", + "Mientras exploraban, Coco y Tito se encontraron con un pajarito herido. Sin dudarlo, decidieron llevarlo a casa y cuidarlo hasta que estuviera completamente recuperado. A medida que pasaban los días, el pajarito se recuperó y se convirtió en un amigo más para Coco y Tito. Juntos, disfrutaron de muchas aventuras y aprendieron que la amistad y la compasión son los valores más importantes en la vida.\n", + "\n", + "Coco y Tito demostraron que la verdadera amistad y el amor por los demás pueden hacer del mundo un lugar mejor. Aprendieron que siempre hay espacio para ayudar a los demás y que las aventuras son mucho más emocionantes cuando se comparten con amigos. Y así, Coco, Tito y su nuevo amigo alado siguieron explorando el mundo juntos, llenando cada día de alegría y bondad.\n" + ] + } + ], + "source": [ + "print(f\"Generating a corgi story {num_paragraphs} paragraphs long in {desired_language}.\")\n", "print(\"=====================================================\")\n", "print(story)" ] @@ -426,7 +550,7 @@ "source": [ "## Calling Native Functions within a Semantic Function\n", "\n", - "One neat thing about the Semantic Kernel is that you can also call native functions from within Semantic Functions!\n", + "One neat thing about the Semantic Kernel is that you can also call native functions from within Prompt Functions!\n", "\n", "We will make our CorgiStory semantic function call a native function `GenerateNames` which will return names for our Corgi characters.\n", "\n", @@ -435,13 +559,13 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 26, "id": "d84c7d84", "metadata": {}, "outputs": [], "source": [ "import random\n", - "from semantic_kernel.plugin_definition import kernel_function\n", + "from semantic_kernel.functions import kernel_function\n", "\n", "\n", "class GenerateNamesPlugin:\n", @@ -468,7 +592,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 27, "id": "2ab7d65f", "metadata": {}, "outputs": [], @@ -479,12 +603,12 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 33, "id": "94decd3e", "metadata": {}, "outputs": [], "source": [ - "sk_prompt = \"\"\"\n", + "prompt = \"\"\"\n", "Write a short story about two Corgis on an adventure.\n", "The story must be:\n", "- G rated\n", @@ -498,39 +622,92 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 34, + "id": "be72a503", + "metadata": {}, + "outputs": [], + "source": [ + "if selectedService == Service.OpenAI:\n", + " execution_settings = sk_oai.OpenAIChatPromptExecutionSettings(\n", + " service_id=service_id,\n", + " ai_model_id=\"gpt-3.5-turbo-1106\",\n", + " max_tokens=2000,\n", + " temperature=0.7,\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " execution_settings = sk_oai.OpenAIChatPromptExecutionSettings(\n", + " service_id=service_id,\n", + " ai_model_id=deployment,\n", + " max_tokens=2000,\n", + " temperature=0.7,\n", + " )\n", + "\n", + "prompt_template_config = sk.PromptTemplateConfig(\n", + " template=prompt,\n", + " name=\"corgi-new\",\n", + " template_format=\"semantic-kernel\",\n", + " input_variables=[\n", + " InputVariable(name=\"paragraph_count\", description=\"The number of paragraphs\", is_required=True),\n", + " InputVariable(name=\"language\", description=\"The language of the story\", is_required=True),\n", + " ],\n", + " execution_settings=execution_settings,\n", + ")\n", + "\n", + "corgi_story = kernel.create_function_from_prompt(\n", + " function_name=\"CorgiStoryUpdated\",\n", + " plugin_name=\"CorgiPluginUpdated\",\n", + " prompt_template_config=prompt_template_config,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 35, "id": "56e6cf0f", "metadata": {}, "outputs": [], "source": [ - "context_variables = sk.ContextVariables(variables={\"min\": \"1\", \"max\": \"5\", \"language\": \"Spanish\"})\n", - "num = await generate_number.invoke(variables=context_variables)\n", - "context_variables[\"paragraph_count\"] = str(num.result)" + "result = await generate_number.invoke(kernel, min=1, max=5)\n", + "num_paragraphs = result.value" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 36, "id": "7e980348", "metadata": {}, "outputs": [], "source": [ - "# Pass the output to the semantic story function\n", - "story = await corgi_story.invoke(variables=context_variables)" + "desired_language = \"French\"\n", + "story = await corgi_story.invoke(kernel, paragraph_count=num_paragraphs, language=desired_language)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 37, "id": "c4ade048", "metadata": {}, - "outputs": [], - "source": [ - "print(\n", - " \"Generating a corgi story exactly {} paragraphs long in {} language: \".format(\n", - " context_variables[\"paragraph_count\"], context_variables[\"language\"]\n", - " )\n", - ")\n", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Generating a corgi story 5 paragraphs long in French.\n", + "=====================================================\n", + "Il était une fois, dans une petite ville pittoresque, deux adorables corgis nommés Bacon et Shorts. Ces deux petits chiens étaient inséparables et aimaient partir à l'aventure ensemble.\n", + "\n", + "Un jour, Bacon et Shorts décidèrent de partir explorer la forêt voisine. Ils couraient joyeusement à travers les sentiers, s'arrêtant de temps en temps pour renifler les fleurs sauvages et regarder les oiseaux dans les arbres.\n", + "\n", + "Alors qu'ils marchaient, ils rencontrèrent un lapin effrayé qui avait perdu son chemin. Sans hésiter, Bacon et Shorts s'approchèrent du lapin et lui offrirent leur aide. Ensemble, ils guidèrent le lapin de retour à son terrier, où sa famille l'attendait avec impatience.\n", + "\n", + "Le lapin était tellement reconnaissant qu'il invita Bacon et Shorts à un festin dans la clairière. Les corgis furent enchantés par la gentillesse du lapin et par la délicieuse nourriture qu'il leur offrit. Ils réalisèrent que même les plus petites actions de bonté peuvent apporter de la joie à ceux qui en ont besoin.\n", + "\n", + "Après cette aventure, Bacon et Shorts rentrèrent chez eux, le cœur rempli de gratitude. Ils savaient qu'ils avaient fait une différence dans la vie du lapin et qu'ils avaient vécu une belle aventure ensemble. Ils savaient aussi que peu importe la taille de quelqu'un, il était important d'être gentil et serviable envers tout le monde.\n" + ] + } + ], + "source": [ + "print(f\"Generating a corgi story {num_paragraphs} paragraphs long in {desired_language}.\")\n", "print(\"=====================================================\")\n", "print(story)" ] @@ -545,9 +722,9 @@ "\n", "A quick review of what we've learned here:\n", "\n", - "- We've learned how to create native and semantic functions and register them to the kernel\n", - "- We've seen how we can use context variables to pass in more custom variables into our prompt\n", - "- We've seen how we can call native functions within semantic function prompts.\n" + "- We've learned how to create native and prompt functions and register them to the kernel\n", + "- We've seen how we can use Kernel Arguments to pass in more custom variables into our prompt\n", + "- We've seen how we can call native functions within a prompt.\n" ] } ], diff --git a/python/notebooks/09-groundedness-checking.ipynb b/python/notebooks/09-groundedness-checking.ipynb index d0f1ff2885d4..328a68c7e81d 100644 --- a/python/notebooks/09-groundedness-checking.ipynb +++ b/python/notebooks/09-groundedness-checking.ipynb @@ -105,14 +105,18 @@ "# Configure AI service used by the kernel\n", "if useAzureOpenAI:\n", " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", + " service_id = \"chat_completion\"\n", " azure_chat_service = AzureChatCompletion(\n", - " deployment_name=\"turbo\", endpoint=endpoint, api_key=api_key\n", + " service_id=service_id, deployment_name=\"turbo\", endpoint=endpoint, api_key=api_key\n", " ) # set the deployment name to the value of your chat model\n", - " kernel.add_chat_service(\"chat_completion\", azure_chat_service)\n", + " kernel.add_service(azure_chat_service)\n", "else:\n", " api_key, org_id = sk.openai_settings_from_dot_env()\n", - " oai_chat_service = OpenAIChatCompletion(ai_model_id=\"gpt-3.5-turbo\", api_key=api_key, org_id=org_id)\n", - " kernel.add_chat_service(\"chat-gpt\", oai_chat_service)" + " service_id = \"chat-gpt\"\n", + " oai_chat_service = OpenAIChatCompletion(\n", + " service_id=service_id, ai_model_id=\"gpt-3.5-turbo\", api_key=api_key, org_id=org_id\n", + " )\n", + " kernel.add_service(oai_chat_service)" ] }, { @@ -135,7 +139,9 @@ "# note: using plugins from the samples folder\n", "plugins_directory = \"../../samples/plugins\"\n", "\n", - "groundingSemanticFunctions = kernel.import_semantic_plugin_from_directory(plugins_directory, \"GroundingPlugin\")" + "groundingSemanticFunctions = kernel.import_plugin_from_prompt_directory(\n", + " service_id, plugins_directory, \"GroundingPlugin\"\n", + ")" ] }, { @@ -209,28 +215,6 @@ "Now, let us start calling individual semantic functions." ] }, - { - "cell_type": "markdown", - "id": "4e435b18", - "metadata": {}, - "source": [ - "### Preparing the Context\n", - "\n", - "Semantic functions operate in a context, which provides extra parameters for their operation. For the grounding plugin, the context is expected to supply the topic for the reference checking, and some particular examples of them:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "60aa6fa0", - "metadata": {}, - "outputs": [], - "source": [ - "context = kernel.create_new_context()\n", - "context[\"topic\"] = \"people and places\"\n", - "context[\"example_entities\"] = \"John, Jane, mother, brother, Paris, Rome\"" - ] - }, { "cell_type": "markdown", "id": "071c05e4", @@ -248,9 +232,14 @@ "metadata": {}, "outputs": [], "source": [ - "extraction_result = await entity_extraction(summary_text, context=context)\n", + "extraction_result = await kernel.invoke(\n", + " entity_extraction,\n", + " input=summary_text,\n", + " topic=\"poeple and places\",\n", + " example_entities=\"John, Jane, mother, brother, Paris, Rome\",\n", + ")\n", "\n", - "print(extraction_result.result)" + "print(extraction_result)" ] }, { @@ -271,16 +260,6 @@ "We now use the grounding text to see if the entities we found are grounded. We start by adding the grounding text to our context:" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "16cef37a", - "metadata": {}, - "outputs": [], - "source": [ - "context[\"reference_context\"] = grounding_text" - ] - }, { "cell_type": "markdown", "id": "894e38d7", @@ -296,9 +275,9 @@ "metadata": {}, "outputs": [], "source": [ - "grounding_result = await reference_check(extraction_result.result, context=context)\n", + "grounding_result = await kernel.invoke(reference_check, input=extraction_result.value, reference_context=grounding_text)\n", "\n", - "print(grounding_result.result)" + "print(grounding_result)" ] }, { @@ -309,16 +288,6 @@ "So we now have a list of ungrounded entities (of course, this list may not be well grounded itself). Let us store this in the context:" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "a76aba58", - "metadata": {}, - "outputs": [], - "source": [ - "context[\"ungrounded_entities\"] = grounding_result.result" - ] - }, { "cell_type": "markdown", "id": "35c1c329", @@ -336,9 +305,9 @@ "metadata": {}, "outputs": [], "source": [ - "excision_result = await entity_excision(summary_text, context=context)\n", + "excision_result = await kernel.invoke(entity_excision, input=summary_text, ungrounded_entities=grounding_result.value)\n", "\n", - "print(excision_result.result)" + "print(excision_result)" ] } ], @@ -358,7 +327,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.10.12" } }, "nbformat": 4, diff --git a/python/notebooks/10-multiple-results-per-prompt.ipynb b/python/notebooks/10-multiple-results-per-prompt.ipynb index 018c8892c169..77ccf1d425c5 100644 --- a/python/notebooks/10-multiple-results-per-prompt.ipynb +++ b/python/notebooks/10-multiple-results-per-prompt.ipynb @@ -1,404 +1,417 @@ { - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "68e1c158", - "metadata": {}, - "source": [ - "# Multiple Results\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "fb81bacd", - "metadata": {}, - "source": [ - "In this notebook we show how you can in a single request, have the LLM model return multiple results per prompt. This is useful for running experiments where you want to evaluate the robustness of your prompt and the parameters of your config against a particular large language model.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a77bdf89", - "metadata": {}, - "outputs": [], - "source": [ - "!python -m pip install semantic-kernel==0.5.1.dev0" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3f4bfee4", - "metadata": {}, - "outputs": [], - "source": [ - "from services import Service\n", - "\n", - "# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)\n", - "selectedService = Service.AzureOpenAI" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "508ad44f", - "metadata": {}, - "outputs": [], - "source": [ - "import semantic_kernel as sk\n", - "\n", - "if selectedService == Service.OpenAI or selectedService == Service.AzureOpenAI:\n", - " from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import (\n", - " OpenAITextPromptExecutionSettings,\n", - " OpenAIChatPromptExecutionSettings,\n", - " )\n", - " from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.azure_chat_prompt_execution_settings import (\n", - " AzureChatPromptExecutionSettings,\n", - " )\n", - " from semantic_kernel.connectors.ai.open_ai import (\n", - " AzureTextCompletion,\n", - " AzureChatCompletion,\n", - " OpenAITextCompletion,\n", - " OpenAIChatCompletion,\n", - " )\n", - "if selectedService == Service.HuggingFace:\n", - " from semantic_kernel.connectors.ai.hugging_face import HuggingFaceTextCompletion" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "d8ddffc1", - "metadata": {}, - "source": [ - "First, we will set up the text and chat services we will be submitting prompts to.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8f8dcbc6", - "metadata": {}, - "outputs": [], - "source": [ - "kernel = sk.Kernel()\n", - "\n", - "# Configure Azure LLM service\n", - "if selectedService == Service.AzureOpenAI:\n", - " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", - " azure_text_service = AzureTextCompletion(\n", - " deployment_name=\"gpt-35-turbo-instruct\", endpoint=endpoint, api_key=api_key\n", - " ) # set the deployment name to the value of your text model (e.g. gpt-35-turbo-instruct or text-davinci-003)\n", - " azure_chat_service = AzureChatCompletion(\n", - " deployment_name=\"gpt-35-turbo\", endpoint=endpoint, api_key=api_key\n", - " ) # set the deployment name to the value of your chat model\n", - "\n", - "# Configure OpenAI service\n", - "if selectedService == Service.OpenAI:\n", - " api_key, org_id = sk.openai_settings_from_dot_env()\n", - " oai_text_service = OpenAITextCompletion(ai_model_id=\"gpt-3.5-turbo-instruct\", api_key=api_key, org_id=org_id)\n", - " oai_chat_service = OpenAIChatCompletion(ai_model_id=\"gpt-3.5-turbo\", api_key=api_key, org_id=org_id)\n", - "\n", - "# Configure Hugging Face service\n", - "if selectedService == Service.HuggingFace:\n", - " hf_text_service = HuggingFaceTextCompletion(ai_model_id=\"distilgpt2\", task=\"text-generation\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "50561d82", - "metadata": {}, - "source": [ - "Next, we'll set up the completion request settings for text completion services.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "628c843e", - "metadata": {}, - "outputs": [], - "source": [ - "oai_text_prompt_execution_settings = OpenAITextPromptExecutionSettings(\n", - " extension_data={\n", - " \"max_tokens\": 80,\n", - " \"temperature\": 0.7,\n", - " \"top_p\": 1,\n", - " \"frequency_penalty\": 0.5,\n", - " \"presence_penalty\": 0.5,\n", - " \"number_of_responses\": 3,\n", - " }\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "857a9c89", - "metadata": {}, - "source": [ - "## Multiple Open AI Text Completions\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e2979db8", - "metadata": {}, - "outputs": [], - "source": [ - "if selectedService == Service.OpenAI:\n", - " prompt = \"what is the purpose of a rubber duck?\"\n", - " results = await oai_text_service.complete(prompt=prompt, settings=oai_text_prompt_execution_settings)\n", - " i = 1\n", - " for result in results:\n", - " print(f\"Result {i}: {result}\")\n", - " i += 1" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "4288d09f", - "metadata": {}, - "source": [ - "## Multiple Azure Open AI Text Completions\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5319f14d", - "metadata": {}, - "outputs": [], - "source": [ - "if selectedService == Service.AzureOpenAI:\n", - " prompt = \"provide me a list of possible meanings for the acronym 'ORLD'\"\n", - " results = await azure_text_service.complete(prompt=prompt, settings=oai_text_prompt_execution_settings)\n", - " i = 1\n", - " for result in results:\n", - " print(f\"Result {i}: {result}\")\n", - " i += 1" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "eb548f9c", - "metadata": {}, - "source": [ - "## Multiple Hugging Face Text Completions\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4a148709", - "metadata": {}, - "outputs": [], - "source": [ - "if selectedService == Service.HuggingFace:\n", - " from semantic_kernel.connectors.ai.hugging_face.hf_prompt_execution_settings import (\n", - " HuggingFacePromptExecutionSettings,\n", - " )\n", - "\n", - " hf_prompt_execution_settings = HuggingFacePromptExecutionSettings(\n", - " extension_data={\"max_new_tokens\": 80, \"temperature\": 0.7, \"top_p\": 1}\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9525e4f3", - "metadata": {}, - "outputs": [], - "source": [ - "if selectedService == Service.HuggingFace:\n", - " prompt = \"The purpose of a rubber duck is\"\n", - " results = await hf_text_service.complete(prompt=prompt, prompt_execution_settings=hf_prompt_execution_settings)\n", - " print(\"\".join(results))" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "da632e12", - "metadata": {}, - "source": [ - "Here, we're setting up the settings for Chat completions.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e5f11e46", - "metadata": {}, - "outputs": [], - "source": [ - "oai_chat_prompt_execution_settings = OpenAIChatPromptExecutionSettings(\n", - " max_tokens=80,\n", - " temperature=0.7,\n", - " top_p=1,\n", - " frequency_penalty=0.5,\n", - " presence_penalty=0.5,\n", - " number_of_responses=3,\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "d6bf238e", - "metadata": {}, - "source": [ - "## Multiple OpenAI Chat Completions\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dabc6a4c", - "metadata": {}, - "outputs": [], - "source": [ - "if selectedService == Service.OpenAI:\n", - " role = \"user\"\n", - " content = (\n", - " \"It's a beautiful day outside, birds are singing, flowers are blooming. On days like these, kids like you...\"\n", - " )\n", - " message = {\"role\": role, \"content\": content}\n", - " results = await oai_chat_service.complete_chat(messages=[message], settings=oai_chat_prompt_execution_settings)\n", - " i = 0\n", - " for result in results:\n", - " print(f\"Result {i}: {result[0]}\")\n", - " i += 1" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "cdb8f740", - "metadata": {}, - "source": [ - "## Multiple Azure OpenAI Chat Completions\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "66ba4767", - "metadata": {}, - "outputs": [], - "source": [ - "az_oai_prompt_execution_settings = AzureChatPromptExecutionSettings(\n", - " max_tokens=80,\n", - " temperature=0.7,\n", - " top_p=1,\n", - " frequency_penalty=0.5,\n", - " presence_penalty=0.5,\n", - " number_of_responses=3,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b74a64a9", - "metadata": {}, - "outputs": [], - "source": [ - "if selectedService == Service.AzureOpenAI:\n", - " role = \"user\"\n", - " content = \"Tomorow is going to be a great day, I can feel it. I'm going to wake up early, go for a run, and then...\"\n", - " message = {\"role\": role, \"content\": content}\n", - " results = await azure_chat_service.complete_chat(messages=[message], settings=az_oai_prompt_execution_settings)\n", - " i = 0\n", - " for result in results:\n", - " print(f\"Result {i}: {result[0]}\")\n", - " i += 1" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "98c8191d", - "metadata": {}, - "source": [ - "## Streaming Multiple Results\n", - "\n", - "Here is an example pattern if you want to stream your multiple results. Note that this is not supported for Hugging Face text completions at this time.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "26a37702", - "metadata": {}, - "outputs": [], - "source": [ - "if selectedService == Service.OpenAI:\n", - " import os\n", - " from IPython.display import clear_output\n", - " import time\n", - "\n", - " # Determine the clear command based on OS\n", - " clear_command = \"cls\" if os.name == \"nt\" else \"clear\"\n", - "\n", - " prompt = \"what is the purpose of a rubber duck?\"\n", - " stream = oai_text_service.complete_stream(prompt=prompt, settings=oai_text_prompt_execution_settings)\n", - " number_of_responses = oai_text_prompt_execution_settings.number_of_responses\n", - " texts = [\"\"] * number_of_responses\n", - "\n", - " last_clear_time = time.time()\n", - " clear_interval = 0.5 # seconds\n", - "\n", - " # Note: there are some quirks with displaying the output, which sometimes flashes and disappears.\n", - " # This could be influenced by a few factors specific to Jupyter notebooks and asynchronous processing.\n", - " # The following code attempts to buffer the results to avoid the output flashing on/off the screen.\n", - "\n", - " async for results in stream:\n", - " current_time = time.time()\n", - "\n", - " # Update texts with new results\n", - " for idx, result in enumerate(results):\n", - " if idx < number_of_responses:\n", - " texts[idx] += result\n", - "\n", - " # Clear and display output at intervals\n", - " if current_time - last_clear_time > clear_interval:\n", - " clear_output(wait=True)\n", - " for idx, text in enumerate(texts):\n", - " print(f\"Result {idx + 1}: {text}\")\n", - " last_clear_time = current_time\n", - "\n", - " print(\"----------------------------------------\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.12" - } + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "68e1c158", + "metadata": {}, + "source": [ + "# Multiple Results\n" + ] }, - "nbformat": 4, - "nbformat_minor": 5 + { + "attachments": {}, + "cell_type": "markdown", + "id": "fb81bacd", + "metadata": {}, + "source": [ + "In this notebook we show how you can in a single request, have the LLM model return multiple results per prompt. This is useful for running experiments where you want to evaluate the robustness of your prompt and the parameters of your config against a particular large language model.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a77bdf89", + "metadata": {}, + "outputs": [], + "source": [ + "!python -m pip install semantic-kernel==0.5.1.dev0" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3f4bfee4", + "metadata": {}, + "outputs": [], + "source": [ + "from services import Service\n", + "\n", + "# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)\n", + "selectedService = Service.OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "508ad44f", + "metadata": {}, + "outputs": [], + "source": [ + "import semantic_kernel as sk\n", + "from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory\n", + "\n", + "if selectedService == Service.OpenAI or selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import (\n", + " OpenAITextPromptExecutionSettings,\n", + " OpenAIChatPromptExecutionSettings,\n", + " )\n", + " from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.azure_chat_prompt_execution_settings import (\n", + " AzureChatPromptExecutionSettings,\n", + " )\n", + " from semantic_kernel.connectors.ai.open_ai import (\n", + " AzureTextCompletion,\n", + " AzureChatCompletion,\n", + " OpenAITextCompletion,\n", + " OpenAIChatCompletion,\n", + " )\n", + "if selectedService == Service.HuggingFace:\n", + " from semantic_kernel.connectors.ai.hugging_face import HuggingFaceTextCompletion" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "d8ddffc1", + "metadata": {}, + "source": [ + "First, we will set up the text and chat services we will be submitting prompts to.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8f8dcbc6", + "metadata": {}, + "outputs": [], + "source": [ + "kernel = sk.Kernel()\n", + "\n", + "# Configure Azure LLM service\n", + "if selectedService == Service.AzureOpenAI:\n", + " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", + " azure_text_service = AzureTextCompletion(\n", + " service_id=\"aoai_text\", deployment_name=\"gpt-35-turbo-instruct\", endpoint=endpoint, api_key=api_key\n", + " ) # set the deployment name to the value of your text model (e.g. gpt-35-turbo-instruct or text-davinci-003)\n", + " azure_chat_service = AzureChatCompletion(\n", + " service_id=\"aoai_chat\", deployment_name=\"gpt-35-turbo\", endpoint=endpoint, api_key=api_key\n", + " ) # set the deployment name to the value of your chat model\n", + "\n", + "# Configure OpenAI service\n", + "if selectedService == Service.OpenAI:\n", + " api_key, org_id = sk.openai_settings_from_dot_env()\n", + " oai_text_service = OpenAITextCompletion(\n", + " service_id=\"oai_text\", ai_model_id=\"gpt-3.5-turbo-instruct\", api_key=api_key, org_id=org_id\n", + " )\n", + " oai_chat_service = OpenAIChatCompletion(\n", + " service_id=\"oai_chat\", ai_model_id=\"gpt-3.5-turbo\", api_key=api_key, org_id=org_id\n", + " )\n", + "\n", + "# Configure Hugging Face service\n", + "if selectedService == Service.HuggingFace:\n", + " hf_text_service = HuggingFaceTextCompletion(service_id=\"hf_text\", ai_model_id=\"distilgpt2\", task=\"text-generation\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "50561d82", + "metadata": {}, + "source": [ + "Next, we'll set up the completion request settings for text completion services.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "628c843e", + "metadata": {}, + "outputs": [], + "source": [ + "oai_text_prompt_execution_settings = OpenAITextPromptExecutionSettings(\n", + " service=\"oai_text\",\n", + " extension_data={\n", + " \"max_tokens\": 80,\n", + " \"temperature\": 0.7,\n", + " \"top_p\": 1,\n", + " \"frequency_penalty\": 0.5,\n", + " \"presence_penalty\": 0.5,\n", + " \"number_of_responses\": 3,\n", + " },\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "857a9c89", + "metadata": {}, + "source": [ + "## Multiple Open AI Text Completions\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e2979db8", + "metadata": {}, + "outputs": [], + "source": [ + "if selectedService == Service.OpenAI:\n", + " chat = ChatHistory()\n", + " chat.add_user_message(\"What is the purpose of a rubber duck?\")\n", + " results = await oai_text_service.complete(chat_history=chat, settings=oai_text_prompt_execution_settings)\n", + " i = 1\n", + " for result in results:\n", + " print(f\"Result {i}: {result}\")\n", + " i += 1" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "4288d09f", + "metadata": {}, + "source": [ + "## Multiple Azure Open AI Text Completions\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5319f14d", + "metadata": {}, + "outputs": [], + "source": [ + "if selectedService == Service.AzureOpenAI:\n", + " chat = ChatHistory()\n", + " chat.add_user_message(\"provide me a list of possible meanings for the acronym 'ORLD'\")\n", + " results = await azure_text_service.complete(chat_history=chat, settings=oai_text_prompt_execution_settings)\n", + " i = 1\n", + " for result in results:\n", + " print(f\"Result {i}: {result}\")\n", + " i += 1" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "eb548f9c", + "metadata": {}, + "source": [ + "## Multiple Hugging Face Text Completions\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4a148709", + "metadata": {}, + "outputs": [], + "source": [ + "if selectedService == Service.HuggingFace:\n", + " from semantic_kernel.connectors.ai.hugging_face.hf_prompt_execution_settings import (\n", + " HuggingFacePromptExecutionSettings,\n", + " )\n", + "\n", + " hf_prompt_execution_settings = HuggingFacePromptExecutionSettings(\n", + " service_id=\"hf_text\", extension_data={\"max_new_tokens\": 80, \"temperature\": 0.7, \"top_p\": 1}\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9525e4f3", + "metadata": {}, + "outputs": [], + "source": [ + "if selectedService == Service.HuggingFace:\n", + " prompt = \"The purpose of a rubber duck is\"\n", + " chat = ChatHistory()\n", + " chat.add_user_message(prompt)\n", + " results = await hf_text_service.complete(chat_history=chat, prompt_execution_settings=hf_prompt_execution_settings)\n", + " print(\"\".join(results))" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "da632e12", + "metadata": {}, + "source": [ + "Here, we're setting up the settings for Chat completions.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e5f11e46", + "metadata": {}, + "outputs": [], + "source": [ + "oai_chat_prompt_execution_settings = OpenAIChatPromptExecutionSettings(\n", + " service_id=\"oai_chat\",\n", + " max_tokens=80,\n", + " temperature=0.7,\n", + " top_p=1,\n", + " frequency_penalty=0.5,\n", + " presence_penalty=0.5,\n", + " number_of_responses=3,\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "d6bf238e", + "metadata": {}, + "source": [ + "## Multiple OpenAI Chat Completions\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dabc6a4c", + "metadata": {}, + "outputs": [], + "source": [ + "if selectedService == Service.OpenAI:\n", + " chat = ChatHistory()\n", + " chat.add_user_message(\n", + " \"It's a beautiful day outside, birds are singing, flowers are blooming. On days like these, kids like you...\"\n", + " )\n", + " results = await oai_chat_service.complete_chat(chat_history=chat, settings=oai_chat_prompt_execution_settings)\n", + " i = 0\n", + " for result in results:\n", + " print(f\"Result {i+1}: {str(result)}\")\n", + " i += 1" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "cdb8f740", + "metadata": {}, + "source": [ + "## Multiple Azure OpenAI Chat Completions\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "66ba4767", + "metadata": {}, + "outputs": [], + "source": [ + "az_oai_prompt_execution_settings = AzureChatPromptExecutionSettings(\n", + " service_id=\"aoai_chat\",\n", + " max_tokens=80,\n", + " temperature=0.7,\n", + " top_p=1,\n", + " frequency_penalty=0.5,\n", + " presence_penalty=0.5,\n", + " number_of_responses=3,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b74a64a9", + "metadata": {}, + "outputs": [], + "source": [ + "if selectedService == Service.AzureOpenAI:\n", + " content = \"Tomorow is going to be a great day, I can feel it. I'm going to wake up early, go for a run, and then...\"\n", + " chat = ChatHistory()\n", + " chat.add_user_message(content)\n", + " results = await azure_chat_service.complete_chat(chat_history=chat, settings=az_oai_prompt_execution_settings)\n", + " i = 0\n", + " for result in results:\n", + " print(f\"Result {i+1}: {str(result)}\")\n", + " i += 1" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "98c8191d", + "metadata": {}, + "source": [ + "## Streaming Multiple Results\n", + "\n", + "Here is an example pattern if you want to stream your multiple results. Note that this is not supported for Hugging Face text completions at this time.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26a37702", + "metadata": {}, + "outputs": [], + "source": [ + "if selectedService == Service.OpenAI:\n", + " import os\n", + " from IPython.display import clear_output\n", + " import time\n", + "\n", + " # Determine the clear command based on OS\n", + " clear_command = \"cls\" if os.name == \"nt\" else \"clear\"\n", + "\n", + " chat = ChatHistory()\n", + " chat.add_user_message(\"what is the purpose of a rubber duck?\")\n", + "\n", + " stream = oai_text_service.complete_stream(chat_history=chat, settings=oai_text_prompt_execution_settings)\n", + " number_of_responses = oai_text_prompt_execution_settings.number_of_responses\n", + " texts = [\"\"] * number_of_responses\n", + "\n", + " last_clear_time = time.time()\n", + " clear_interval = 0.5 # seconds\n", + "\n", + " # Note: there are some quirks with displaying the output, which sometimes flashes and disappears.\n", + " # This could be influenced by a few factors specific to Jupyter notebooks and asynchronous processing.\n", + " # The following code attempts to buffer the results to avoid the output flashing on/off the screen.\n", + "\n", + " async for results in stream:\n", + " current_time = time.time()\n", + "\n", + " # Update texts with new results\n", + " for idx, result in enumerate(results):\n", + " if idx < number_of_responses:\n", + " texts[idx] += str(result)\n", + "\n", + " # Clear and display output at intervals\n", + " if current_time - last_clear_time > clear_interval:\n", + " clear_output(wait=True)\n", + " for idx, text in enumerate(texts):\n", + " print(f\"Result {idx + 1}: {text}\")\n", + " last_clear_time = current_time\n", + "\n", + " print(\"----------------------------------------\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 } diff --git a/python/notebooks/11-streaming-completions.ipynb b/python/notebooks/11-streaming-completions.ipynb index c089fe9864e6..e20f22165bcb 100644 --- a/python/notebooks/11-streaming-completions.ipynb +++ b/python/notebooks/11-streaming-completions.ipynb @@ -1,331 +1,344 @@ { - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "68e1c158", - "metadata": {}, - "source": [ - "# Streaming Results\n", - "\n", - "Here is an example pattern if you want to stream your multiple results. Note that this is not supported for Hugging Face text completions at this time.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a77bdf89", - "metadata": {}, - "outputs": [], - "source": [ - "!python -m pip install semantic-kernel==0.5.1.dev0" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from services import Service\n", - "\n", - "# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)\n", - "selectedService = Service.AzureOpenAI" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "508ad44f", - "metadata": {}, - "outputs": [], - "source": [ - "import semantic_kernel as sk\n", - "\n", - "if selectedService == Service.OpenAI or selectedService == Service.AzureOpenAI:\n", - " from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import (\n", - " OpenAITextPromptExecutionSettings,\n", - " OpenAIChatPromptExecutionSettings,\n", - " )\n", - " from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.azure_chat_prompt_execution_settings import (\n", - " AzureChatPromptExecutionSettings,\n", - " )\n", - " from semantic_kernel.connectors.ai.open_ai import (\n", - " AzureTextCompletion,\n", - " AzureChatCompletion,\n", - " OpenAITextCompletion,\n", - " OpenAIChatCompletion,\n", - " )\n", - "if selectedService == Service.HuggingFace:\n", - " from semantic_kernel.connectors.ai.hugging_face import HuggingFaceTextCompletion" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "d8ddffc1", - "metadata": {}, - "source": [ - "First, we will set up the text and chat services we will be submitting prompts to.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8f8dcbc6", - "metadata": {}, - "outputs": [], - "source": [ - "kernel = sk.Kernel()\n", - "\n", - "# Configure Azure LLM service\n", - "if selectedService == Service.AzureOpenAI:\n", - " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", - " azure_text_service = AzureTextCompletion(\n", - " deployment_name=\"text-davinci-003\", endpoint=endpoint, api_key=api_key\n", - " ) # set the deployment name to the value of your text model (e.g. gpt-35-turbo-instruct or text-davinci-003)\n", - " azure_chat_service = AzureChatCompletion(\n", - " deployment_name=\"gpt-35-turbo\", endpoint=endpoint, api_key=api_key\n", - " ) # set the deployment name to the value of your chat model\n", - "\n", - "# Configure OpenAI service\n", - "if selectedService == Service.OpenAI:\n", - " api_key, org_id = sk.openai_settings_from_dot_env()\n", - " oai_text_service = OpenAITextCompletion(ai_model_id=\"gpt-3.5-turbo-instruct\", api_key=api_key, org_id=org_id)\n", - " oai_chat_service = OpenAIChatCompletion(ai_model_id=\"gpt-3.5-turbo\", api_key=api_key, org_id=org_id)\n", - "\n", - "# Configure Hugging Face service\n", - "if selectedService == Service.HuggingFace:\n", - " hf_text_service = HuggingFaceTextCompletion(ai_model_id=\"distilgpt2\", task=\"text-generation\")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "50561d82", - "metadata": {}, - "source": [ - "Next, we'll set up the completion request settings for text completion services.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "628c843e", - "metadata": {}, - "outputs": [], - "source": [ - "oai_prompt_execution_settings = OpenAITextPromptExecutionSettings(\n", - " max_tokens=150,\n", - " temperature=0.7,\n", - " top_p=1,\n", - " frequency_penalty=0.5,\n", - " presence_penalty=0.5,\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "857a9c89", - "metadata": {}, - "source": [ - "## Streaming Open AI Text Completion\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e2979db8", - "metadata": {}, - "outputs": [], - "source": [ - "if selectedService == Service.OpenAI:\n", - " prompt = \"what is the purpose of a rubber duck?\"\n", - " stream = oai_text_service.complete_stream(prompt=prompt, settings=oai_prompt_execution_settings)\n", - " async for text in stream:\n", - " print(text, end=\"\") # end = \"\" to avoid newlines" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "4288d09f", - "metadata": {}, - "source": [ - "## Streaming Azure Open AI Text Completion\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5319f14d", - "metadata": {}, - "outputs": [], - "source": [ - "if selectedService == Service.AzureOpenAI:\n", - " prompt = \"provide me a list of possible meanings for the acronym 'ORLD'\"\n", - " stream = azure_text_service.complete_stream(prompt=prompt, settings=oai_prompt_execution_settings)\n", - " async for text in stream:\n", - " print(text, end=\"\") # end = \"\" to avoid newlines" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "eb548f9c", - "metadata": {}, - "source": [ - "## Streaming Hugging Face Text Completion\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "be7b1c2e", - "metadata": {}, - "outputs": [], - "source": [ - "if selectedService == Service.HuggingFace:\n", - " from semantic_kernel.connectors.ai.hugging_face.hf_prompt_execution_settings import (\n", - " HuggingFacePromptExecutionSettings,\n", - " )\n", - "\n", - " hf_prompt_execution_settings = HuggingFacePromptExecutionSettings(\n", - " extension_data={\n", - " \"max_new_tokens\": 80,\n", - " \"top_p\": 1,\n", - " \"eos_token_id\": 11,\n", - " \"pad_token_id\": 0,\n", - " }\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9525e4f3", - "metadata": {}, - "outputs": [], - "source": [ - "if selectedService == Service.HuggingFace:\n", - " prompt = \"The purpose of a rubber duck is\"\n", - " stream = hf_text_service.complete_stream(prompt=prompt, prompt_execution_settings=hf_prompt_execution_settings)\n", - " async for text in stream:\n", - " print(text, end=\"\") # end = \"\" to avoid newlines" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "da632e12", - "metadata": {}, - "source": [ - "Here, we're setting up the settings for Chat completions.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e5f11e46", - "metadata": {}, - "outputs": [], - "source": [ - "oai_chat_prompt_execution_settings = OpenAIChatPromptExecutionSettings(\n", - " max_tokens=150,\n", - " temperature=0.7,\n", - " top_p=1,\n", - " frequency_penalty=0.5,\n", - " presence_penalty=0.5,\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "d6bf238e", - "metadata": {}, - "source": [ - "## Streaming OpenAI Chat Completion\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dabc6a4c", - "metadata": {}, - "outputs": [], - "source": [ - "if selectedService == Service.OpenAI:\n", - " role = \"system\"\n", - " content = \"You are an AI assistant that helps people find information.\"\n", - " message = {\"role\": role, \"content\": content}\n", - " stream = oai_chat_service.complete_chat_stream(messages=[message], settings=oai_chat_prompt_execution_settings)\n", - " async for text in stream:\n", - " print(text, end=\"\") # end = \"\" to avoid newlines" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "cdb8f740", - "metadata": {}, - "source": [ - "## Streaming Azure OpenAI Chat Completion\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "da1e9f59", - "metadata": {}, - "outputs": [], - "source": [ - "az_oai_chat_prompt_execution_settings = AzureChatPromptExecutionSettings(\n", - " max_tokens=150,\n", - " temperature=0.7,\n", - " top_p=1,\n", - " frequency_penalty=0.5,\n", - " presence_penalty=0.5,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b74a64a9", - "metadata": {}, - "outputs": [], - "source": [ - "if selectedService == Service.AzureOpenAI:\n", - " role = \"system\"\n", - " content = \"You are an AI assistant that helps people find information.\"\n", - " message = {\"role\": role, \"content\": content}\n", - " stream = azure_chat_service.complete_chat_stream(messages=[message], settings=az_oai_chat_prompt_execution_settings)\n", - " async for text in stream:\n", - " print(text, end=\"\") # end = \"\" to avoid newlines" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.12" - } + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "68e1c158", + "metadata": {}, + "source": [ + "# Streaming Results\n", + "\n", + "Here is an example pattern if you want to stream your multiple results. Note that this is not supported for Hugging Face text completions at this time.\n" + ] }, - "nbformat": 4, - "nbformat_minor": 5 + { + "cell_type": "code", + "execution_count": null, + "id": "a77bdf89", + "metadata": {}, + "outputs": [], + "source": [ + "!python -m pip install semantic-kernel==0.5.1.dev0" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from services import Service\n", + "\n", + "# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)\n", + "selectedService = Service.AzureOpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "508ad44f", + "metadata": {}, + "outputs": [], + "source": [ + "import semantic_kernel as sk\n", + "from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory\n", + "\n", + "if selectedService == Service.OpenAI or selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import (\n", + " OpenAITextPromptExecutionSettings,\n", + " OpenAIChatPromptExecutionSettings,\n", + " )\n", + " from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.azure_chat_prompt_execution_settings import (\n", + " AzureChatPromptExecutionSettings,\n", + " )\n", + " from semantic_kernel.connectors.ai.open_ai import (\n", + " AzureTextCompletion,\n", + " AzureChatCompletion,\n", + " OpenAITextCompletion,\n", + " OpenAIChatCompletion,\n", + " )\n", + "if selectedService == Service.HuggingFace:\n", + " from semantic_kernel.connectors.ai.hugging_face import HuggingFaceTextCompletion" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "d8ddffc1", + "metadata": {}, + "source": [ + "First, we will set up the text and chat services we will be submitting prompts to.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8f8dcbc6", + "metadata": {}, + "outputs": [], + "source": [ + "kernel = sk.Kernel()\n", + "\n", + "# Configure Azure LLM service\n", + "if selectedService == Service.AzureOpenAI:\n", + " deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env()\n", + " azure_text_service = AzureTextCompletion(\n", + " service_id=\"aoai_text\", deployment_name=\"text-davinci-003\", endpoint=endpoint, api_key=api_key\n", + " ) # set the deployment name to the value of your text model (e.g. gpt-35-turbo-instruct or text-davinci-003)\n", + " azure_chat_service = AzureChatCompletion(\n", + " service_id=\"aoai_chat\", deployment_name=\"gpt-35-turbo\", endpoint=endpoint, api_key=api_key\n", + " ) # set the deployment name to the value of your chat model\n", + "\n", + "# Configure OpenAI service\n", + "if selectedService == Service.OpenAI:\n", + " api_key, org_id = sk.openai_settings_from_dot_env()\n", + " oai_text_service = OpenAITextCompletion(\n", + " service_id=\"oai_text\", ai_model_id=\"gpt-3.5-turbo-instruct\", api_key=api_key, org_id=org_id\n", + " )\n", + " oai_chat_service = OpenAIChatCompletion(\n", + " service_id=\"oai_chat\", ai_model_id=\"gpt-3.5-turbo\", api_key=api_key, org_id=org_id\n", + " )\n", + "\n", + "# Configure Hugging Face service\n", + "if selectedService == Service.HuggingFace:\n", + " hf_text_service = HuggingFaceTextCompletion(ai_model_id=\"distilgpt2\", task=\"text-generation\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "50561d82", + "metadata": {}, + "source": [ + "Next, we'll set up the completion request settings for text completion services.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "628c843e", + "metadata": {}, + "outputs": [], + "source": [ + "oai_prompt_execution_settings = OpenAITextPromptExecutionSettings(\n", + " service_id=\"oai_text\",\n", + " max_tokens=150,\n", + " temperature=0.7,\n", + " top_p=1,\n", + " frequency_penalty=0.5,\n", + " presence_penalty=0.5,\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "857a9c89", + "metadata": {}, + "source": [ + "## Streaming Open AI Text Completion\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e2979db8", + "metadata": {}, + "outputs": [], + "source": [ + "if selectedService == Service.OpenAI:\n", + " chat = ChatHistory()\n", + " chat.add_user_message(\"What is the purpose of a rubber duck?\")\n", + " stream = oai_text_service.complete_stream(chat_history=chat, settings=oai_prompt_execution_settings)\n", + " async for message in stream:\n", + " print(str(message[0]), end=\"\") # end = \"\" to avoid newlines" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "4288d09f", + "metadata": {}, + "source": [ + "## Streaming Azure Open AI Text Completion\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5319f14d", + "metadata": {}, + "outputs": [], + "source": [ + "if selectedService == Service.AzureOpenAI:\n", + " chat = ChatHistory()\n", + " chat.add_user_message(\"provide me a list of possible meanings for the acronym 'ORLD'\")\n", + " stream = azure_text_service.complete_stream(chat_history=chat, settings=oai_prompt_execution_settings)\n", + " async for message in stream:\n", + " print(str(message[0]), end=\"\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "eb548f9c", + "metadata": {}, + "source": [ + "## Streaming Hugging Face Text Completion\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "be7b1c2e", + "metadata": {}, + "outputs": [], + "source": [ + "if selectedService == Service.HuggingFace:\n", + " from semantic_kernel.connectors.ai.hugging_face.hf_prompt_execution_settings import (\n", + " HuggingFacePromptExecutionSettings,\n", + " )\n", + "\n", + " hf_prompt_execution_settings = HuggingFacePromptExecutionSettings(\n", + " service_id=\"hf_text\",\n", + " extension_data={\n", + " \"max_new_tokens\": 80,\n", + " \"top_p\": 1,\n", + " \"eos_token_id\": 11,\n", + " \"pad_token_id\": 0,\n", + " },\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9525e4f3", + "metadata": {}, + "outputs": [], + "source": [ + "if selectedService == Service.HuggingFace:\n", + " chat = ChatHistory()\n", + " chat.add_user_message(\"The purpose of a rubber duck is\")\n", + " stream = hf_text_service.complete_stream(chat_history=chat, prompt_execution_settings=hf_prompt_execution_settings)\n", + " async for text in stream:\n", + " print(str(text[0]), end=\"\") # end = \"\" to avoid newlines" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "da632e12", + "metadata": {}, + "source": [ + "Here, we're setting up the settings for Chat completions.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e5f11e46", + "metadata": {}, + "outputs": [], + "source": [ + "oai_chat_prompt_execution_settings = OpenAIChatPromptExecutionSettings(\n", + " service_id=\"oai_chat\",\n", + " max_tokens=150,\n", + " temperature=0.7,\n", + " top_p=1,\n", + " frequency_penalty=0.5,\n", + " presence_penalty=0.5,\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "d6bf238e", + "metadata": {}, + "source": [ + "## Streaming OpenAI Chat Completion\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dabc6a4c", + "metadata": {}, + "outputs": [], + "source": [ + "if selectedService == Service.OpenAI:\n", + " content = \"You are an AI assistant that helps people find information.\"\n", + " chat = ChatHistory()\n", + " chat.add_system_message(content)\n", + " stream = oai_chat_service.complete_chat_stream(chat_history=chat, settings=oai_chat_prompt_execution_settings)\n", + " async for text in stream:\n", + " print(str(text[0]), end=\"\") # end = \"\" to avoid newlines" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "cdb8f740", + "metadata": {}, + "source": [ + "## Streaming Azure OpenAI Chat Completion\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "da1e9f59", + "metadata": {}, + "outputs": [], + "source": [ + "az_oai_chat_prompt_execution_settings = AzureChatPromptExecutionSettings(\n", + " service_id=\"aoai_chat\",\n", + " max_tokens=150,\n", + " temperature=0.7,\n", + " top_p=1,\n", + " frequency_penalty=0.5,\n", + " presence_penalty=0.5,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b74a64a9", + "metadata": {}, + "outputs": [], + "source": [ + "if selectedService == Service.AzureOpenAI:\n", + " content = \"You are an AI assistant that helps people find information.\"\n", + " chat = ChatHistory()\n", + " chat.add_system_message(content)\n", + " chat.add_user_message(\"What is the purpose of a rubber duck?\")\n", + " stream = azure_chat_service.complete_chat_stream(chat_history=chat, settings=az_oai_chat_prompt_execution_settings)\n", + " async for text in stream:\n", + " print(str(text[0]), end=\"\") # end = \"\" to avoid newlines" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 } diff --git a/python/samples/kernel-syntax-examples/action_planner.py b/python/samples/kernel-syntax-examples/action_planner.py index 354a7d7cbd49..13628f20596e 100644 --- a/python/samples/kernel-syntax-examples/action_planner.py +++ b/python/samples/kernel-syntax-examples/action_planner.py @@ -13,7 +13,9 @@ async def main(): kernel = sk.Kernel() api_key, org_id = sk.openai_settings_from_dot_env() - kernel.add_chat_service("chat-gpt", OpenAIChatCompletion("gpt-3.5-turbo", api_key, org_id)) + kernel.add_service( + OpenAIChatCompletion(service_id="chat-gpt", ai_model_id="gpt-3.5-turbo", api_key=api_key, org_id=org_id) + ) kernel.import_plugin(MathPlugin(), "math") kernel.import_plugin(TimePlugin(), "time") diff --git a/python/samples/kernel-syntax-examples/azure_chat_gpt_api.py b/python/samples/kernel-syntax-examples/azure_chat_gpt_api.py index 034cbf348b0f..3f79878a0d78 100644 --- a/python/samples/kernel-syntax-examples/azure_chat_gpt_api.py +++ b/python/samples/kernel-syntax-examples/azure_chat_gpt_api.py @@ -8,6 +8,9 @@ import semantic_kernel as sk import semantic_kernel.connectors.ai.open_ai as sk_oai from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.functions.kernel_function import KernelFunction +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.prompt_template.input_variable import InputVariable from semantic_kernel.utils.settings import azure_openai_settings_from_dot_env_as_dict logging.basicConfig(level=logging.INFO) @@ -31,6 +34,7 @@ kernel.add_service(chat_service) ## there are three ways to create the request settings in code: # noqa: E266 +## Note: the prompt_execution_settings are a dictionary with the service_id as the key and the request settings as the value. # noqa: E501 ## 1. create the request settings from the base class: # noqa: E266 # from semantic_kernel.connectors.ai.chat_completion_client_base import PromptExecutionSettings @@ -39,8 +43,8 @@ ## 2. create the request settings directly for the service you are using: # noqa: E266 # req_settings = sk_oai.AzureChatPromptExecutionSettings(max_tokens=2000, temperature=0.7, top_p=0.8) -## The second method is useful when you are using a single service, and you want to have type checking on the request settings or when you are using multiple instances of the same type of service, for instance gpt-35-turbo and gpt-4, both in openai and both for chat. # noqa: E501 E266 +## The second method is useful when you are using a single service, and you want to have type checking on the request settings or when you are using multiple instances of the same type of service, for instance gpt-35-turbo and gpt-4, both in openai and both for chat. # noqa: E501 E266 ## 3. create the request settings from the kernel based on the registered service class: # noqa: E266 req_settings = kernel.get_service("chat-gpt").get_prompt_execution_settings_class()(service_id="chat-gpt") req_settings.max_tokens = 2000 @@ -48,23 +52,34 @@ req_settings.top_p = 0.8 ## The third method is the most specific as the returned request settings class is the one that is registered for the service and has some fields already filled in, like the service_id and ai_model_id. # noqa: E501 E266 +prompt_template_config = sk.PromptTemplateConfig( + template="""Answer the following request: {{$request}}. + Additionally summarize the on-going chat history: {{$chat_history}}""", + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable(name="request", description="The user input", is_required=True), + InputVariable( + name=KernelFunction.CHAT_HISTORY_TAG, description="The history of the conversation", is_required=True + ), + ], + execution_settings=req_settings, +) -prompt_config = sk.PromptTemplateConfig(execution_settings=req_settings) +history = ChatHistory() -prompt_template = sk.ChatPromptTemplate("{{$user_input}}", kernel.prompt_template_engine, prompt_config) +history.add_system_message(system_message) +history.add_user_message("Hi there, who are you?") +history.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need.") -prompt_template.add_system_message(system_message) -prompt_template.add_user_message("Hi there, who are you?") -prompt_template.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need.") +arguments = KernelArguments() -function_config = sk.SemanticFunctionConfig(prompt_config, prompt_template) -chat_function = kernel.register_semantic_function("ChatBot", "Chat", function_config) +chat_function = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config) async def chat() -> bool: try: user_input = input("User:> ") - arguments = KernelArguments(user_input=user_input or "what is openai?") except KeyboardInterrupt: print("\n\nExiting chat...") return False @@ -78,14 +93,24 @@ async def chat() -> bool: stream = True if stream: - answer = kernel.invoke_stream(chat_function, arguments=arguments) + answer = kernel.invoke_stream( + chat_function, + request=user_input, + chat_history=history, + ) print("Mosscap:> ", end="") async for message in answer: print(str(message[0]), end="") print("\n") return True - answer = await kernel.invoke(chat_function, arguments=arguments) + answer = await kernel.invoke( + chat_function, + request=user_input, + chat_history=history, + ) print(f"Mosscap:> {answer}") + history.add_user_message(user_input) + history.add_assistant_message(str(answer)) return True diff --git a/python/samples/kernel-syntax-examples/azure_chat_gpt_with_data_api.py b/python/samples/kernel-syntax-examples/azure_chat_gpt_with_data_api.py index 2edd0b44ecb3..fcd4db9eeb3e 100644 --- a/python/samples/kernel-syntax-examples/azure_chat_gpt_with_data_api.py +++ b/python/samples/kernel-syntax-examples/azure_chat_gpt_with_data_api.py @@ -13,8 +13,10 @@ AzureDataSources, ExtraBody, ) -from semantic_kernel.connectors.ai.open_ai.prompt_template.open_ai_chat_prompt_template import OpenAIChatPromptTemplate from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.prompt_template.input_variable import InputVariable +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig kernel = sk.Kernel() @@ -42,26 +44,39 @@ az_source = AzureAISearchDataSources(**azure_ai_search_settings) az_data = AzureDataSources(type="AzureCognitiveSearch", parameters=az_source) extra = ExtraBody(dataSources=[az_data]) -req_settings = AzureChatPromptExecutionSettings(extra_body=extra) -prompt_config = sk.PromptTemplateConfig(execution_settings=req_settings) +req_settings = AzureChatPromptExecutionSettings(service_id="default", extra_body=extra) # When using data, set use_extensions=True and use the 2023-12-01-preview API version. chat_service = sk_oai.AzureChatCompletion( + service_id="chat-gpt", deployment_name=deployment, api_key=api_key, endpoint=endpoint, api_version="2023-12-01-preview", use_extensions=True, ) -kernel.add_chat_service("chat-gpt", chat_service) +kernel.add_service(chat_service) + +prompt_template_config = PromptTemplateConfig( + template="{{$user_input}}", + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable(name="request", description="The user input", is_required=True), + ], + execution_settings={"default": req_settings}, +) + +chat = ChatHistory() -prompt_template = OpenAIChatPromptTemplate("{{$user_input}}", kernel.prompt_template_engine, prompt_config) +chat.add_user_message("Hi there, who are you?") +chat.add_assistant_message("I am an AI assistant here to answer your questions.") -prompt_template.add_user_message("Hi there, who are you?") -prompt_template.add_assistant_message("I am an AI assistant here to answer your questions.") +arguments = KernelArguments() -function_config = sk.SemanticFunctionConfig(prompt_config, prompt_template) -chat_function = kernel.register_semantic_function("ChatBot", "Chat", function_config) +chat_function = kernel.create_function_from_prompt( + plugin_name="ChatBot", function_name="Chat", prompt_template_config=prompt_template_config +) async def chat() -> bool: @@ -88,12 +103,12 @@ async def chat() -> bool: async for message in kernel.invoke_stream(chat_function, arguments=arguments): print(str(message[0]), end="") full_message = message[0] if not full_message else full_message + message[0] - prompt_template.add_assistant_message(str(full_message)) + chat.add_assistant_message(str(full_message)) print("\n") # The tool message containing cited sources is available in the context if isinstance(full_message, AzureStreamingChatMessageContent): - prompt_template.add_function_response_message(name="tool", content=full_message.tool_message) + chat.add_function_response_message(name="tool", content=full_message.tool_message) print(f"Tool:> {full_message.tool_message}") return True diff --git a/python/samples/kernel-syntax-examples/azure_chat_gpt_with_data_api_function_calling.py b/python/samples/kernel-syntax-examples/azure_chat_gpt_with_data_api_function_calling.py index dbb34c920751..19d3f67912d2 100644 --- a/python/samples/kernel-syntax-examples/azure_chat_gpt_with_data_api_function_calling.py +++ b/python/samples/kernel-syntax-examples/azure_chat_gpt_with_data_api_function_calling.py @@ -11,15 +11,15 @@ AzureDataSources, ExtraBody, ) -from semantic_kernel.connectors.ai.open_ai.prompt_template.open_ai_chat_prompt_template import ( - OpenAIChatPromptTemplate, -) from semantic_kernel.connectors.ai.open_ai.utils import ( chat_completion_with_tool_call, get_tool_call_object, ) from semantic_kernel.core_plugins.time_plugin import TimePlugin from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.prompt_template.input_variable import InputVariable +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig kernel = sk.Kernel() @@ -40,21 +40,21 @@ # groundbreaking phenomenon in glaciology that could potentially reshape our understanding of climate change. chat_service = sk_oai.AzureChatCompletion( + service_id="chat-gpt", deployment_name=deployment, api_key=api_key, endpoint=endpoint, api_version="2023-12-01-preview", use_extensions=True, ) -kernel.add_chat_service( - "chat-gpt", +kernel.add_service( chat_service, ) plugins_directory = os.path.join(__file__, "../../../../samples/plugins") # adding plugins to the kernel # the joke plugin in the FunPlugins is a semantic plugin and has the function calling disabled. -kernel.import_semantic_plugin_from_directory(plugins_directory, "FunPlugin") +kernel.import_plugin_from_prompt_directory("chat-gpt", plugins_directory, "FunPlugin") # the math plugin is a core plugin and has the function calling enabled. kernel.import_plugin(TimePlugin(), plugin_name="time") @@ -63,13 +63,27 @@ # if you only want to use a specific tool, set the name of that tool in this parameter, # the format for that is 'PluginName-FunctionName', (i.e. 'math-Add'). # if the model or api version do not support this you will get an error. -prompt_config = sk.PromptTemplateConfig(execution_settings=req_settings) -prompt_template = OpenAIChatPromptTemplate("{{$user_input}}", kernel.prompt_template_engine, prompt_config) -prompt_template.add_user_message("Hi there, who are you?") -prompt_template.add_assistant_message("I am an AI assistant here to answer your questions.") +prompt_template_config = PromptTemplateConfig( + template="{{$user_input}}", + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable(name="history", description="The history of the conversation", is_required=True), + InputVariable(name="request", description="The user input", is_required=True), + ], + execution_settings=req_settings, +) + +history = ChatHistory() -function_config = sk.SemanticFunctionConfig(prompt_config, prompt_template) -chat_function = kernel.register_semantic_function("ChatBot", "Chat", function_config) +history.add_user_message("Hi there, who are you?") +history.add_assistant_message("I am an AI assistant here to answer your questions.") + +arguments = KernelArguments() + +chat_function = kernel.create_function_from_prompt( + plugin_name="ChatBot", function_name="Chat", prompt_template_config=prompt_template_config +) # calling the chat, you could add a overloaded version of the settings here, # to enable or disable function calling or set the function calling to a specific plugin. @@ -92,13 +106,15 @@ async def chat() -> bool: print("\n\nExiting chat...") return False - arguments = KernelArguments(user_input=user_input, execution_settings=req_settings) - result = await chat_completion_with_tool_call( + arguments = KernelArguments(request=user_input, execution_settings=req_settings) + answer = await chat_completion_with_tool_call( kernel=kernel, arguments=arguments, chat_function=chat_function, ) - print(f"Assistant:> {result}") + print(f"Mosscap:> {answer}") + history.add_user_message(user_input) + history.add_assistant_message(str(answer)) return True diff --git a/python/samples/kernel-syntax-examples/azure_chat_gpt_with_data_api_vector_search.py b/python/samples/kernel-syntax-examples/azure_chat_gpt_with_data_api_vector_search.py index 9aa0e7397b52..0740c6efb643 100644 --- a/python/samples/kernel-syntax-examples/azure_chat_gpt_with_data_api_vector_search.py +++ b/python/samples/kernel-syntax-examples/azure_chat_gpt_with_data_api_vector_search.py @@ -7,14 +7,19 @@ from semantic_kernel.connectors.ai.open_ai.contents.azure_streaming_chat_message_content import ( AzureStreamingChatMessageContent, ) +from semantic_kernel.connectors.ai.open_ai.contents.open_ai_chat_message_content import OpenAIChatMessageContent +from semantic_kernel.connectors.ai.open_ai.models.chat_completion.tool_calls import ToolCall from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.azure_chat_prompt_execution_settings import ( AzureAISearchDataSources, AzureChatPromptExecutionSettings, AzureDataSources, ExtraBody, ) -from semantic_kernel.connectors.ai.open_ai.prompt_template.open_ai_chat_prompt_template import OpenAIChatPromptTemplate from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.models.ai.chat_completion.chat_role import ChatRole +from semantic_kernel.prompt_template.input_variable import InputVariable +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig kernel = sk.Kernel() @@ -47,27 +52,40 @@ az_source = AzureAISearchDataSources(**azure_ai_search_settings) az_data = AzureDataSources(type="AzureCognitiveSearch", parameters=az_source) extra = ExtraBody(dataSources=[az_data]) -req_settings = AzureChatPromptExecutionSettings(extra_body=extra) -prompt_config = sk.PromptTemplateConfig(execution_settings=req_settings) +req_settings = AzureChatPromptExecutionSettings(service_id="default", extra_body=extra) # When using data, set use_extensions=True and use the 2023-12-01-preview API version. chat_service = sk_oai.AzureChatCompletion( + service_id="chat-gpt", deployment_name=deployment, api_key=api_key, endpoint=endpoint, api_version="2023-12-01-preview", use_extensions=True, ) -kernel.add_chat_service("chat-gpt", chat_service) +kernel.add_service(chat_service) + +prompt_template_config = PromptTemplateConfig( + template="{{$user_input}}", + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable(name="chat", description="The history of the conversation", is_required=True, default=""), + InputVariable(name="request", description="The user input", is_required=True), + ], + execution_settings={"default": req_settings}, +) +chat = ChatHistory() -prompt_template = OpenAIChatPromptTemplate("{{$user_input}}", kernel.prompt_template_engine, prompt_config) +chat.add_user_message("Hi there, who are you?") +chat.add_assistant_message("I am an AI assistant here to answer your questions.") -prompt_template.add_user_message("Hi there, who are you?") -prompt_template.add_assistant_message("I am an AI assistant here to answer your questions.") +arguments = KernelArguments() -function_config = sk.SemanticFunctionConfig(prompt_config, prompt_template) -chat_function = kernel.register_semantic_function("ChatBot", "Chat", function_config) +chat_function = kernel.create_function_from_prompt( + plugin_name="ChatBot", function_name="Chat", prompt_template_config=prompt_template_config +) async def chat() -> bool: @@ -84,8 +102,9 @@ async def chat() -> bool: print("\n\nExiting chat...") return False + arguments = KernelArguments(user_input=user_input, execution_settings=req_settings) # Non streaming - # answer = await kernel.run(chat_function, input_vars=context_vars) + # answer = await kernel.invoke(chat_function, input_vars=context_vars) # print(f"Assistant:> {answer}") arguments = KernelArguments(user_input=user_input, execution_settings=req_settings) @@ -94,11 +113,16 @@ async def chat() -> bool: async for message in kernel.invoke_stream(chat_function, arguments=arguments): print(str(message[0]), end="") full_message = message[0] if not full_message else full_message + message[0] - prompt_template.add_assistant_message(str(full_message)) + chat.add_assistant_message(str(full_message)) print("\n") # The tool message containing cited sources is available in the context if isinstance(full_message, AzureStreamingChatMessageContent): - prompt_template.add_function_response_message(name="tool", content=full_message.tool_message) + tool_call = ToolCall(full_message.tool_message) + chat.add_message( + role=ChatRole.TOOL, + content=full_message, + metadata={OpenAIChatMessageContent.ToolIdProperty: tool_call.function.name}, + ) print(f"Tool:> {full_message.tool_message}") return True diff --git a/python/samples/kernel-syntax-examples/bing_plugin_examples.py b/python/samples/kernel-syntax-examples/bing_plugin_examples.py new file mode 100644 index 000000000000..8361fc6cac91 --- /dev/null +++ b/python/samples/kernel-syntax-examples/bing_plugin_examples.py @@ -0,0 +1,116 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio + +import semantic_kernel as sk +import semantic_kernel.connectors.ai.open_ai as sk_oai +from semantic_kernel.connectors.search_engine import BingConnector +from semantic_kernel.core_plugins import WebSearchEnginePlugin +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.prompt_template.kernel_prompt_template import KernelPromptTemplate +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig + + +async def example1(kernel: sk.Kernel, search_plugin_name): + print("======== Bing and Google Search Plugins ========") + + question = "What's the largest building in the world?" + function = kernel.plugins[search_plugin_name]["search"] + result = await kernel.invoke(function, KernelArguments(query=question)) + + print(question) + print(f"----{search_plugin_name}----") + print(result) + + +async def example2(kernel: sk.Kernel): + print("======== Use Search Plugin to answer user questions ========") + + semantic_function = """ + Answer questions only when you know the facts or the information is provided. + When you don't have sufficient information you reply with a list of commands to find the information needed. + When answering multiple questions, use a bullet point list. + Note: make sure single and double quotes are escaped using a backslash char. + + [COMMANDS AVAILABLE] + - bing.search + + [INFORMATION PROVIDED] + {{ $externalInformation }} + + [EXAMPLE 1] + Question: what's the biggest lake in Italy? + Answer: Lake Garda, also known as Lago di Garda. + + [EXAMPLE 2] + Question: what's the biggest lake in Italy? What's the smallest positive number? + Answer: + * Lake Garda, also known as Lago di Garda. + * The smallest positive number is 1. + + [EXAMPLE 3] + Question: what's Ferrari stock price? Who is the current number one female tennis player in the world? + Answer: + {{ '{{' }} bing.search ""what\\'s Ferrari stock price?"" {{ '}}' }}. + {{ '{{' }} bing.search ""Who is the current number one female tennis player in the world?"" {{ '}}' }}. + + [END OF EXAMPLES] + + [TASK] + Question: {{ $question }}. + Answer: + """ + question = "Who is the most followed person on TikTok right now? What's the exchange rate EUR:USD?" + print(question) + + oracle = kernel.create_function_from_prompt( + template=semantic_function, + execution_settings=sk_oai.OpenAIChatPromptExecutionSettings(max_tokens=150, temperature=0, top_p=1), + ) + answer = await kernel.invoke( + oracle, + KernelArguments(question=question, externalInformation=""), + ) + + result = str(answer) + + if "bing.search" in result: + prompt_template = KernelPromptTemplate(PromptTemplateConfig(template=result)) + + print("--- Fetching information from Bing... ---") + information = await prompt_template.render(kernel, KernelArguments()) + + print("Information found:\n") + print(information) + + answer = await kernel.invoke(oracle, KernelArguments(question=question, externalInformation=information)) + print("---- Oracle's Answer ----:\n") + print(answer) + else: + print("AI had all of the information, there was no need to query Bing.") + + +async def main(): + kernel = sk.Kernel() + + model = "gpt-3.5-turbo-1106" + service_id = model + + api_key, org_id = sk.openai_settings_from_dot_env() + kernel.add_service( + sk_oai.OpenAIChatCompletion(service_id=service_id, ai_model_id=model, api_key=api_key, org_id=org_id), + ) + + bing_api_key = sk.bing_search_settings_from_dot_env() + assert bing_api_key is not None + + bing_connector = BingConnector(api_key=bing_api_key) + bing = WebSearchEnginePlugin(bing_connector) + kernel.import_plugin(bing, "bing") + + await example1(kernel, "bing") + await example2(kernel) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/kernel-syntax-examples/bing_search_plugin.py b/python/samples/kernel-syntax-examples/bing_search_plugin.py index 1ecbc04cbec5..7372126be882 100644 --- a/python/samples/kernel-syntax-examples/bing_search_plugin.py +++ b/python/samples/kernel-syntax-examples/bing_search_plugin.py @@ -14,9 +14,9 @@ async def main(): kernel = sk.Kernel() deployment, key, endpoint, api_version = sk.azure_openai_settings_from_dot_env(include_api_version=True) - kernel.add_chat_service( - "chat-gpt", + kernel.add_service( AzureChatCompletion( + service_id="chat-gpt", deployment_name=deployment, api_key=key, endpoint=endpoint, diff --git a/python/samples/kernel-syntax-examples/chat.py b/python/samples/kernel-syntax-examples/chat.py index 81848315ea50..32829160e529 100644 --- a/python/samples/kernel-syntax-examples/chat.py +++ b/python/samples/kernel-syntax-examples/chat.py @@ -4,8 +4,15 @@ import semantic_kernel as sk import semantic_kernel.connectors.ai.open_ai as sk_oai - -sk_prompt = """ +from semantic_kernel.connectors.ai.chat_completion_client_base import ( + ChatCompletionClientBase, +) +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.prompt_template.input_variable import InputVariable +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig + +prompt = """ ChatBot can have a conversation with you about any topic. It can give explicit instructions or say 'I don't know' when it doesn't know the answer. @@ -18,20 +25,47 @@ kernel = sk.Kernel() api_key, org_id = sk.openai_settings_from_dot_env() -kernel.add_chat_service("chat-gpt", sk_oai.OpenAIChatCompletion("gpt-3.5-turbo", api_key, org_id)) - -prompt_config = sk.PromptTemplateConfig.from_execution_settings(max_tokens=2000, temperature=0.7, top_p=0.4) - -prompt_template = sk.PromptTemplate(sk_prompt, kernel.prompt_template_engine, prompt_config) - -function_config = sk.SemanticFunctionConfig(prompt_config, prompt_template) -chat_function = kernel.register_semantic_function("ChatBot", "Chat", function_config) - - -async def chat(context_vars: sk.ContextVariables) -> bool: +kernel.add_service( + sk_oai.OpenAIChatCompletion(service_id="chat-gpt", ai_model_id="gpt-3.5-turbo", api_key=api_key, org_id=org_id) +) + +settings = kernel.get_prompt_execution_settings_from_service(ChatCompletionClientBase, "chat-gpt") +settings.max_tokens = 2000 +settings.temperature = 0.7 +settings.top_p = 0.8 + +prompt_template_config = PromptTemplateConfig( + template=prompt, + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable( + name="user_input", + description="The history of the conversation", + is_required=True, + default="", + ), + InputVariable( + name="chat_history", + description="The history of the conversation", + is_required=True, + ), + ], + execution_settings=settings, +) + +chat = ChatHistory() +chat.add_user_message("Hi there, who are you?") +chat.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need") + +chat_function = kernel.create_function_from_prompt( + plugin_name="ChatBot", function_name="Chat", prompt_template_config=prompt_template_config +) + + +async def chat() -> bool: try: user_input = input("User:> ") - context_vars["user_input"] = user_input except KeyboardInterrupt: print("\n\nExiting chat...") return False @@ -43,8 +77,9 @@ async def chat(context_vars: sk.ContextVariables) -> bool: print("\n\nExiting chat...") return False - answer = await kernel.run(chat_function, input_vars=context_vars) - context_vars["chat_history"] += f"\nUser:> {user_input}\nChatBot:> {answer}\n" + answer = await kernel.invoke(chat_function, KernelArguments(user_input=user_input, chat_history=chat)) + chat.add_user_message(user_input) + chat.add_assistant_message(str(answer)) print(f"ChatBot:> {answer}") return True diff --git a/python/samples/kernel-syntax-examples/chat_gpt_api.py b/python/samples/kernel-syntax-examples/chat_gpt_api.py index 7e4fa2816786..69ae1d5e8dbf 100644 --- a/python/samples/kernel-syntax-examples/chat_gpt_api.py +++ b/python/samples/kernel-syntax-examples/chat_gpt_api.py @@ -4,6 +4,13 @@ import semantic_kernel as sk import semantic_kernel.connectors.ai.open_ai as sk_oai +from semantic_kernel.connectors.ai.chat_completion_client_base import ( + ChatCompletionClientBase, +) +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.prompt_template.input_variable import InputVariable +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig system_message = """ You are a chat bot. Your name is Mosscap and @@ -17,26 +24,49 @@ kernel = sk.Kernel() api_key, org_id = sk.openai_settings_from_dot_env() -kernel.add_chat_service("chat-gpt", sk_oai.OpenAIChatCompletion("gpt-3.5-turbo", api_key, org_id)) - -prompt_config = sk.PromptTemplateConfig.from_execution_settings(max_tokens=2000, temperature=0.7, top_p=0.8) - -prompt_template = sk.ChatPromptTemplate("{{$user_input}}", kernel.prompt_template_engine, prompt_config) - -prompt_template.add_system_message(system_message) -prompt_template.add_user_message("Hi there, who are you?") -prompt_template.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need.") - -function_config = sk.SemanticFunctionConfig(prompt_config, prompt_template) -chat_function = kernel.register_semantic_function("ChatBot", "Chat", function_config) +kernel.add_service( + sk_oai.OpenAIChatCompletion(service_id="chat-gpt", ai_model_id="gpt-3.5-turbo", api_key=api_key, org_id=org_id) +) + +settings = kernel.get_prompt_execution_settings_from_service(ChatCompletionClientBase, "chat-gpt") +settings.max_tokens = 2000 +settings.temperature = 0.7 +settings.top_p = 0.8 + +prompt_template_config = PromptTemplateConfig( + template="{{$user_input}}", + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable( + name="user_input", + description="The history of the conversation", + is_required=True, + default="", + ), + InputVariable( + name="chat_history", + description="The history of the conversation", + is_required=True, + ), + ], + execution_settings=settings, +) + +chat = ChatHistory(system_message=system_message) +chat.add_user_message("Hi there, who are you?") +chat.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need") + +chat_function = kernel.create_function_from_prompt( + plugin_name="ChatBot", function_name="Chat", prompt_template_config=prompt_template_config +) + +chat.add_user_message("I want to find a hotel in Seattle with free wifi and a pool.") async def chat() -> bool: - context_vars = sk.ContextVariables() - try: user_input = input("User:> ") - context_vars["user_input"] = user_input except KeyboardInterrupt: print("\n\nExiting chat...") return False @@ -48,7 +78,9 @@ async def chat() -> bool: print("\n\nExiting chat...") return False - answer = await kernel.run(chat_function, input_vars=context_vars) + answer = await kernel.invoke(chat_function, KernelArguments(user_input=user_input, chat_history=chat)) + chat.add_user_message(user_input) + chat.add_assistant_message(str(answer)) print(f"Mosscap:> {answer}") return True diff --git a/python/samples/kernel-syntax-examples/chat_gpt_api_function_calling.py b/python/samples/kernel-syntax-examples/chat_gpt_api_function_calling.py index 3b32604833b3..dc7100014b84 100644 --- a/python/samples/kernel-syntax-examples/chat_gpt_api_function_calling.py +++ b/python/samples/kernel-syntax-examples/chat_gpt_api_function_calling.py @@ -5,15 +5,14 @@ import semantic_kernel as sk import semantic_kernel.connectors.ai.open_ai as sk_oai -from semantic_kernel.connectors.ai.open_ai.prompt_template.open_ai_chat_prompt_template import ( - OpenAIChatPromptTemplate, -) from semantic_kernel.connectors.ai.open_ai.utils import ( chat_completion_with_tool_call, get_tool_call_object, ) from semantic_kernel.core_plugins import MathPlugin from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.prompt_template.input_variable import InputVariable system_message = """ You are a chat bot. Your name is Mosscap and @@ -33,11 +32,11 @@ # Note: the underlying gpt-35/gpt-4 model version needs to be at least version 0613 to support tools. deployment_name, api_key, endpoint = sk.azure_openai_settings_from_dot_env() api_version = "2023-12-01-preview" -kernel.add_chat_service( - "chat-gpt", +kernel.add_service( sk_oai.AzureChatCompletion( - deployment_name, - endpoint, + service_id="chat", + deployment_name=deployment_name, + base_url=endpoint, api_key=api_key, api_version=api_version, ), @@ -46,7 +45,7 @@ plugins_directory = os.path.join(__file__, "../../../../samples/plugins") # adding plugins to the kernel # the joke plugin in the FunPlugins is a semantic plugin and has the function calling disabled. -kernel.import_semantic_plugin_from_directory(plugins_directory, "FunPlugin") +# kernel.import_plugin_from_prompt_directory("chat", plugins_directory, "FunPlugin") # the math plugin is a core plugin and has the function calling enabled. kernel.import_plugin(MathPlugin(), plugin_name="math") @@ -55,25 +54,40 @@ # if you only want to use a specific function, set the name of that function in this parameter, # the format for that is 'PluginName-FunctionName', (i.e. 'math-Add'). # if the model or api version do not support this you will get an error. -prompt_config = sk.PromptTemplateConfig( - execution_settings=sk_oai.AzureChatPromptExecutionSettings( - service_id="chat-gpt", - ai_model_id=deployment_name, - max_tokens=2000, - temperature=0.7, - top_p=0.8, - tool_choice="auto", - tools=get_tool_call_object(kernel, {"exclude_plugin": ["ChatBot"]}), - ) +execution_settings = sk_oai.AzureChatPromptExecutionSettings( + service_id="chat", + ai_model_id=deployment_name, + max_tokens=2000, + temperature=0.7, + top_p=0.8, + tool_choice="auto", + tools=get_tool_call_object(kernel, {"exclude_plugin": ["ChatBot"]}), +) + +prompt_template_config = sk.PromptTemplateConfig( + template="{{$user_input}}", + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable(name="user_input", description="The user input", is_required=True), + InputVariable(name="history", description="The history of the conversation", is_required=True, default=""), + ], + execution_settings={"default": execution_settings}, ) -prompt_template = OpenAIChatPromptTemplate("{{$user_input}}", kernel.prompt_template_engine, prompt_config) -prompt_template.add_system_message(system_message) -prompt_template.add_user_message("Hi there, who are you?") -prompt_template.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need.") +history = ChatHistory() -function_config = sk.SemanticFunctionConfig(prompt_config, prompt_template) -chat_function = kernel.register_semantic_function("ChatBot", "Chat", function_config) +history.add_system_message(system_message) +history.add_user_message("Hi there, who are you?") +history.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need.") + +arguments = KernelArguments() + +chat_function = kernel.create_function_from_prompt( + prompt_template_config=prompt_template_config, + plugin_name="ChatBot", + function_name="Chat", +) async def chat() -> bool: @@ -89,7 +103,9 @@ async def chat() -> bool: if user_input == "exit": print("\n\nExiting chat...") return False - arguments = KernelArguments(user_input=user_input, execution_settings=prompt_config.execution_settings) + arguments = KernelArguments( + user_input=user_input, history=("\n").join([f"{msg.role}: {msg.content}" for msg in history]) + ) result = await chat_completion_with_tool_call( kernel=kernel, arguments=arguments, diff --git a/python/samples/kernel-syntax-examples/configuring_prompts.py b/python/samples/kernel-syntax-examples/configuring_prompts.py new file mode 100644 index 000000000000..e262e1f29e9f --- /dev/null +++ b/python/samples/kernel-syntax-examples/configuring_prompts.py @@ -0,0 +1,73 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio + +import semantic_kernel as sk +import semantic_kernel.connectors.ai.open_ai as sk_oai +from semantic_kernel.core_plugins import ( + ConversationSummaryPlugin, +) +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.prompt_template.input_variable import InputVariable +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig + + +async def main(): + kernel = sk.Kernel() + + useAzureOpenAI = False + model = "gpt-35-turbo" if useAzureOpenAI else "gpt-3.5-turbo-1106" + service_id = model + + api_key, org_id = sk.openai_settings_from_dot_env() + kernel.add_service( + sk_oai.OpenAIChatCompletion(service_id=service_id, ai_model_id=model, api_key=api_key, org_id=org_id), + ) + + kernel.import_plugin(ConversationSummaryPlugin(kernel), "conversation_summary") + + template = """ + {{ConversationSummaryPlugin.SummarizeConversation $history}} + User: {{$request}} + Assistant: + """ + + print("--- Rendered Prompt ---") + prompt_template_config = PromptTemplateConfig( + template=template, + name="chat", + description="Chat with the assistant", + template_format="semantic-kernel", + input_variables=[ + InputVariable(name="chat_history", description="The conversation history", is_required=False, default=""), + InputVariable(name="request", description="The user's request", is_required=True), + ], + execution_settings=sk_oai.OpenAIChatPromptExecutionSettings( + service_id=service_id, max_tokens=4000, temperature=0.2 + ), + ) + + chat = kernel.create_function_from_prompt( + prompt_template_config=prompt_template_config, + ) + + chat_history = ChatHistory() + + print("User > ") + while (user_input := input()) != "exit": + result = await kernel.invoke( + chat, + KernelArguments( + request=user_input, + history=chat_history, + ), + ) + result = str(result) + print(result) + chat_history.add_user_message(user_input) + chat_history.add_assistant_message(result) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/kernel-syntax-examples/google_palm_chat_with_memory.py b/python/samples/kernel-syntax-examples/google_palm_chat_with_memory.py index b279da4b7fef..299f9af58d57 100644 --- a/python/samples/kernel-syntax-examples/google_palm_chat_with_memory.py +++ b/python/samples/kernel-syntax-examples/google_palm_chat_with_memory.py @@ -5,14 +5,17 @@ import semantic_kernel as sk import semantic_kernel.connectors.ai.google_palm as sk_gp +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.prompt_template.input_variable import InputVariable kernel = sk.Kernel() apikey = sk.google_palm_settings_from_dot_env() palm_text_embed = sk_gp.GooglePalmTextEmbedding("models/embedding-gecko-001", apikey) -kernel.add_text_embedding_generation_service("gecko", palm_text_embed) -palm_chat_completion = sk_gp.GooglePalmChatCompletion("models/chat-bison-001", apikey) -kernel.add_chat_service("models/chat-bison-001", palm_chat_completion) -kernel.register_memory_store(memory_store=sk.memory.VolatileMemoryStore()) +kernel.add_service(palm_text_embed) +chat_service_id = "models/chat-bison-001" +palm_chat_completion = sk_gp.GooglePalmChatCompletion(chat_service_id, apikey) +kernel.add_service(palm_chat_completion) +kernel.use_memory(storage=sk.memory.VolatileMemoryStore(), embeddings_generator=palm_text_embed) kernel.import_plugin(sk.core_plugins.TextMemoryPlugin(), "TextMemoryPlugin") @@ -42,7 +45,7 @@ async def search_memory_examples(kernel: sk.Kernel) -> None: async def setup_chat_with_memory( kernel: sk.Kernel, -) -> Tuple[sk.KernelFunction, sk.KernelContext]: +) -> Tuple[sk.KernelFunction, sk.KernelArguments]: """ When using Google PaLM to chat with memories, a chat prompt template is essential; otherwise, the kernel will send text prompts to the Google PaLM @@ -53,7 +56,7 @@ async def setup_chat_with_memory( Note that this is only an issue for the chat service; the text service does not require a chat prompt template. """ - sk_prompt = """ + prompt = """ ChatBot can have a conversation with you about any topic. It can give explicit instructions or say 'I don't know' if it does not have an answer. @@ -67,33 +70,46 @@ async def setup_chat_with_memory( """.strip() - prompt_config = sk.PromptTemplateConfig.from_execution_settings(max_tokens=2000, temperature=0.7, top_p=0.8) - prompt_template = sk.ChatPromptTemplate( # Create the chat prompt template - "{{$user_input}}", kernel.prompt_template_engine, prompt_config + req_settings = kernel.get_service(chat_service_id).get_prompt_execution_settings_class()(service_id=chat_service_id) + req_settings.max_tokens = 2000 + req_settings.temperature = 0.7 + req_settings.top_p = 0.8 + + prompt_template_config = sk.PromptTemplateConfig( + template="{{$user_input}}", + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable(name="user_input", description="The user input", is_required=True), + InputVariable(name="chat_history", description="The history of the conversation", is_required=True), + ], + execution_settings=req_settings, ) - prompt_template.add_system_message(sk_prompt) # Add the memory as a system message - function_config = sk.SemanticFunctionConfig(prompt_config, prompt_template) - chat_func = kernel.register_semantic_function(None, "ChatWithMemory", function_config) - context = kernel.create_new_context() - context["fact1"] = "what is my name?" - context["fact2"] = "what is my favorite hobby?" - context["fact3"] = "where's my family from?" - context["fact4"] = "where did I travel last year?" - context["fact5"] = "what do I do for work?" - - context[sk.core_plugins.TextMemoryPlugin.COLLECTION_PARAM] = "aboutMe" - context[sk.core_plugins.TextMemoryPlugin.RELEVANCE_PARAM] = 0.6 + chat_func = kernel.create_function_from_prompt( + plugin_name="chat_memory", function_name="ChatWithMemory", prompt_template_config=prompt_template_config + ) - context["chat_history"] = "" + chat_history = ChatHistory() + chat_history.add_system_message(prompt) + + arguments = sk.KernelArguments( + fact1="what is my name?", + fact2="what is my favorite hobby?", + fact3="where's my family from?", + fact4="where did I travel last year?", + fact5="what do I do for work?", + collection="aboutMe", + relevance=0.6, + chat_history=chat_history, + ) - return chat_func, context + return chat_func, arguments -async def chat(kernel: sk.Kernel, chat_func: sk.KernelFunction, context: sk.KernelContext) -> bool: +async def chat(kernel: sk.Kernel, chat_func: sk.KernelFunction, arguments: sk.KernelArguments) -> bool: try: user_input = input("User:> ") - context["user_input"] = user_input except KeyboardInterrupt: print("\n\nExiting chat...") return False @@ -105,8 +121,10 @@ async def chat(kernel: sk.Kernel, chat_func: sk.KernelFunction, context: sk.Kern print("\n\nExiting chat...") return False - answer = await kernel.run(chat_func, input_vars=context.variables) - context["chat_history"] += f"\nUser:> {user_input}\nChatBot:> {answer}\n" + arguments["user_input"] = user_input + answer = await kernel.invoke(chat_func, arguments) + arguments["chat_history"].add_user_message(user_input) + arguments["chat_history"].add_assistant_message(str(answer)) print(f"ChatBot:> {answer}") return True @@ -115,11 +133,11 @@ async def chat(kernel: sk.Kernel, chat_func: sk.KernelFunction, context: sk.Kern async def main() -> None: await populate_memory(kernel) await search_memory_examples(kernel) - chat_func, context = await setup_chat_with_memory(kernel) + chat_func, arguments = await setup_chat_with_memory(kernel) print("Begin chatting (type 'exit' to exit):\n") chatting = True while chatting: - chatting = await chat(kernel, chat_func, context) + chatting = await chat(kernel, chat_func, arguments) if __name__ == "__main__": diff --git a/python/samples/kernel-syntax-examples/google_palm_chat_with_plugin.py b/python/samples/kernel-syntax-examples/google_palm_chat_with_plugin.py index f2725b0263bd..49a4674a665a 100644 --- a/python/samples/kernel-syntax-examples/google_palm_chat_with_plugin.py +++ b/python/samples/kernel-syntax-examples/google_palm_chat_with_plugin.py @@ -4,6 +4,8 @@ import semantic_kernel as sk import semantic_kernel.connectors.ai.google_palm as sk_gp +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.prompt_template.input_variable import InputVariable """ System messages prime the assistant with different personalities or behaviors. @@ -28,23 +30,39 @@ kernel = sk.Kernel() api_key = sk.google_palm_settings_from_dot_env() -palm_chat_completion = sk_gp.GooglePalmChatCompletion("models/chat-bison-001", api_key) -kernel.add_chat_service("models/chat-bison-001", palm_chat_completion) -prompt_config = sk.PromptTemplateConfig.from_execution_settings(max_tokens=2000, temperature=0.7, top_p=0.8) -prompt_template = sk.ChatPromptTemplate("{{$user_input}}", kernel.prompt_template_engine, prompt_config) -prompt_template.add_system_message(system_message) # Add the system message for context -prompt_template.add_user_message("Hi there, my name is Andrea, who are you?") # Include a chat history -prompt_template.add_assistant_message("I am Blackbeard.") -function_config = sk.SemanticFunctionConfig(prompt_config, prompt_template) -chat_function = kernel.register_semantic_function("PiratePlugin", "Chat", function_config) +service_id = "models/chat-bison-001" +palm_chat_completion = sk_gp.GooglePalmChatCompletion(service_id, api_key) +kernel.add_service(palm_chat_completion) +req_settings = kernel.get_service(service_id).get_prompt_execution_settings_class()(service_id=service_id) +req_settings.max_tokens = 2000 +req_settings.temperature = 0.7 +req_settings.top_p = 0.8 -async def chat() -> bool: - context_vars = sk.ContextVariables() +prompt_template_config = sk.PromptTemplateConfig( + template="{{$user_input}}", + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable(name="user_input", description="The user input", is_required=True), + InputVariable(name="chat_history", description="The history of the conversation", is_required=True), + ], + execution_settings=req_settings, +) + +chat_func = kernel.create_function_from_prompt( + plugin_name="PiratePlugin", function_name="Chat", prompt_template_config=prompt_template_config +) + +chat_history = ChatHistory() +chat_history.add_system_message(system_message) +chat_history.add_user_message("Hi there, who are you?") +chat_history.add_assistant_message("I am Blackbeard.") + +async def chat() -> bool: try: user_input = input("User:> ") - context_vars["user_input"] = user_input except KeyboardInterrupt: print("\n\nExiting chat...") return False @@ -56,8 +74,10 @@ async def chat() -> bool: print("\n\nExiting chat...") return False - answer = await kernel.run(chat_function, input_vars=context_vars) + answer = await kernel.invoke(chat_func, user_input=user_input, chat_history=chat_history) print(f"Blackbeard:> {answer}") + chat_history.add_user_message(user_input) + chat_history.add_assistant_message(str(answer)) return True diff --git a/python/samples/kernel-syntax-examples/google_palm_text_completion.py b/python/samples/kernel-syntax-examples/google_palm_text_completion.py index 7a74e76faf98..d20e3a38c54f 100644 --- a/python/samples/kernel-syntax-examples/google_palm_text_completion.py +++ b/python/samples/kernel-syntax-examples/google_palm_text_completion.py @@ -14,7 +14,7 @@ async def text_completion_example_complete(kernel, api_key, user_mssg, settings) Complete a text prompt using the Google PaLM model and print the results. """ palm_text_completion = sk_gp.GooglePalmTextCompletion("models/text-bison-001", api_key) - kernel.add_text_completion_service("models/text-bison-001", palm_text_completion) + kernel.add_service(palm_text_completion) answer = await palm_text_completion.complete(user_mssg, settings) return answer diff --git a/python/samples/kernel-syntax-examples/google_search_plugin.py b/python/samples/kernel-syntax-examples/google_search_plugin.py index 00557f67e069..ad7fea11b057 100644 --- a/python/samples/kernel-syntax-examples/google_search_plugin.py +++ b/python/samples/kernel-syntax-examples/google_search_plugin.py @@ -15,7 +15,9 @@ async def main(): kernel = sk.Kernel() api_key, org_id = sk.openai_settings_from_dot_env() - kernel.add_chat_service("chat-gpt", OpenAIChatCompletion("gpt-3.5-turbo", api_key, org_id)) + kernel.add_service( + OpenAIChatCompletion(service_id="chat-gpt", ai_model_id="gpt-3.5-turbo", api_key=api_key, org_id=org_id) + ) """ Instantiate a Google Connector diff --git a/python/samples/kernel-syntax-examples/grounded.py b/python/samples/kernel-syntax-examples/grounded.py index 4baec7b2ac34..d3ef181f194a 100644 --- a/python/samples/kernel-syntax-examples/grounded.py +++ b/python/samples/kernel-syntax-examples/grounded.py @@ -58,9 +58,10 @@ def setup(use_azure: bool = False): # Configure AI service used by the kernel if useAzureOpenAI: deployment, api_key, endpoint = sk.azure_openai_settings_from_dot_env() - kernel.add_chat_service( - "chat_completion", + service_id = ("chat_completion",) + kernel.add_service( AzureChatCompletion( + service_id=service_id, deployment_name=deployment, endpoint=endpoint, api_key=api_key, @@ -70,19 +71,18 @@ def setup(use_azure: bool = False): ) else: api_key, org_id = sk.openai_settings_from_dot_env() - kernel.add_chat_service( - "chat-gpt", - OpenAIChatCompletion(ai_model_id="gpt-3.5-turbo", api_key=api_key, org_id=org_id), + service_id = "chat-gpt" + kernel.add_service( + OpenAIChatCompletion(service_id=service_id, ai_model_id="gpt-3.5-turbo", api_key=api_key, org_id=org_id), ) # note: using plugins from the samples folder plugins_directory = "../samples/plugins/" - grounding_semantic_functions = kernel.import_semantic_plugin_from_directory(plugins_directory, "GroundingPlugin") + grounding_semantic_functions = kernel.import_plugin_from_prompt_directory( + service_id, plugins_directory, "GroundingPlugin" + ) - # entity_extraction = grounding_semantic_functions["ExtractEntities"] - # reference_check = grounding_semantic_functions["ReferenceCheckEntities"] - # entity_excision = grounding_semantic_functions["ExciseEntities"] return kernel, grounding_semantic_functions diff --git a/python/samples/kernel-syntax-examples/memory.py b/python/samples/kernel-syntax-examples/memory.py index 4050e3bb95a8..a18f7bb4df71 100644 --- a/python/samples/kernel-syntax-examples/memory.py +++ b/python/samples/kernel-syntax-examples/memory.py @@ -5,6 +5,8 @@ import semantic_kernel as sk import semantic_kernel.connectors.ai.open_ai as sk_oai +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory async def populate_memory(kernel: sk.Kernel) -> None: @@ -31,6 +33,7 @@ async def search_memory_examples(kernel: sk.Kernel) -> None: print(f"Answer: {result[0].text}\n") +# TODO fix this ASAP async def setup_chat_with_memory( kernel: sk.Kernel, ) -> Tuple[sk.KernelFunction, sk.KernelContext]: @@ -68,10 +71,9 @@ async def setup_chat_with_memory( return chat_func, context -async def chat(kernel: sk.Kernel, chat_func: sk.KernelFunction, context: sk.KernelContext) -> bool: +async def chat(kernel: sk.Kernel, chat_func: sk.KernelFunction, chat_history: ChatHistory) -> bool: try: user_input = input("User:> ") - context["user_input"] = user_input except KeyboardInterrupt: print("\n\nExiting chat...") return False @@ -83,8 +85,9 @@ async def chat(kernel: sk.Kernel, chat_func: sk.KernelFunction, context: sk.Kern print("\n\nExiting chat...") return False - answer = await kernel.run(chat_func, input_vars=context.variables) - context["chat_history"] += f"\nUser:> {user_input}\nChatBot:> {answer}\n" + answer = await kernel.invoke(chat_func, KernelArguments(user_input=user_input, chat_history=chat_history)) + chat_history.add_user_message(user_input) + chat_history.add_assistant_message(str(answer)) print(f"ChatBot:> {answer}") return True @@ -94,12 +97,16 @@ async def main() -> None: kernel = sk.Kernel() api_key, org_id = sk.openai_settings_from_dot_env() - kernel.add_chat_service("chat-gpt", sk_oai.OpenAIChatCompletion("gpt-3.5-turbo", api_key, org_id)) - kernel.add_text_embedding_generation_service( - "ada", sk_oai.OpenAITextEmbedding("text-embedding-ada-002", api_key, org_id) + service_id = "chat-gpt" + kernel.add_service( + sk_oai.OpenAIChatCompletion(service_id=service_id, ai_model_id="gpt-3.5-turbo", api_key=api_key, org_id=org_id) ) + embedding_gen = sk_oai.OpenAITextEmbedding( + service_id="ada", ai_model_id="text-embedding-ada-002", api_key=api_key, org_id=org_id + ) + kernel.add_service(embedding_gen) - kernel.register_memory_store(memory_store=sk.memory.VolatileMemoryStore()) + kernel.use_memory(storage=sk.memory.VolatileMemoryStore(), embeddings_generator=embedding_gen) kernel.import_plugin(sk.core_plugins.TextMemoryPlugin(), "TextMemoryPlugin") print("Populating memory...") diff --git a/python/samples/kernel-syntax-examples/open_ai_chat_with_memory.py b/python/samples/kernel-syntax-examples/open_ai_chat_with_memory.py new file mode 100644 index 000000000000..7c02bafdf832 --- /dev/null +++ b/python/samples/kernel-syntax-examples/open_ai_chat_with_memory.py @@ -0,0 +1,155 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from typing import Tuple + +import semantic_kernel as sk +from semantic_kernel.connectors.ai.open_ai import ( + OpenAIChatCompletion, + OpenAITextEmbedding, +) +from semantic_kernel.functions import KernelFunction +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.prompt_template.input_variable import InputVariable + +kernel = sk.Kernel() + +api_key, org_id = sk.openai_settings_from_dot_env() +oai_text_embedding = OpenAITextEmbedding( + service_id="oai_text_embed", ai_model_id="text-embedding-ada-002", api_key=api_key, org_id=org_id +) +service_id = "oai_chat" +oai_chat_service = OpenAIChatCompletion( + service_id=service_id, ai_model_id="gpt-3.5-turbo", api_key=api_key, org_id=org_id +) +kernel.use_memory(storage=sk.memory.VolatileMemoryStore(), embeddings_generator=oai_text_embedding) +kernel.add_service(oai_chat_service) +kernel.add_service(oai_text_embedding) + +kernel.import_plugin(sk.core_plugins.TextMemoryPlugin(), "text_memory") + + +async def populate_memory(kernel: sk.Kernel) -> None: + await kernel.memory.save_information(collection="aboutMe", id="info1", text="My name is Andrea") + await kernel.memory.save_information(collection="aboutMe", id="info2", text="I currently work as a tour guide") + await kernel.memory.save_information( + collection="aboutMe", id="info3", text="I've been living in Seattle since 2005" + ) + await kernel.memory.save_information( + collection="aboutMe", + id="info4", + text="I visited France and Italy five times since 2015", + ) + await kernel.memory.save_information(collection="aboutMe", id="info5", text="My family is from New York") + + +async def search_memory_examples(kernel: sk.Kernel) -> None: + questions = [ + "what's my name", + "where do I live?", + "where's my family from?", + "where have I traveled?", + "what do I do for work", + ] + + for question in questions: + print(f"Question: {question}") + result = await kernel.memory.search("aboutMe", question) + print(f"Answer: {result[0].text}\n") + + +async def setup_chat_with_memory( + kernel: sk.Kernel, +) -> Tuple[KernelFunction, sk.KernelArguments]: + prompt = """ + ChatBot can have a conversation with you about any topic. + It can give explicit instructions or say 'I don't know' if + it does not have an answer. + + Information about me, from previous conversations: + {{$fact1}} {{recall $fact1}} + {{$fact2}} {{recall $fact2}} + {{$fact3}} {{recall $fact3}} + {{$fact4}} {{recall $fact4}} + {{$fact5}} {{recall $fact5}} + + {{$user_input}} + + """.strip() + req_settings = kernel.get_service(service_id).get_prompt_execution_settings_class()(service_id=service_id) + req_settings.max_tokens = 2000 + req_settings.temperature = 0.7 + req_settings.top_p = 0.8 + + prompt_template_config = sk.PromptTemplateConfig( + template="{{$user_input}}", + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable(name="user_input", description="The user input", is_required=True), + InputVariable(name="chat_history", description="The history of the conversation", is_required=True), + ], + execution_settings=req_settings, + ) + + chat_func = kernel.create_function_from_prompt( + plugin_name="chat_memory", function_name="ChatWithMemory", prompt_template_config=prompt_template_config + ) + + chat_history = ChatHistory() + chat_history.add_system_message(prompt) + + arguments = sk.KernelArguments( + fact1="what is my name?", + fact2="what is my favorite hobby?", + fact3="where's my family from?", + fact4="where did I travel last year?", + fact5="what do I do for work?", + collection="aboutMe", + relevance=0.6, + chat_history=chat_history, + ) + + return chat_func, arguments + + +async def chat(kernel: sk.Kernel, chat_func: KernelFunction, arguments: sk.KernelArguments) -> bool: + try: + user_input = input("User:> ") + print(f"User:> {user_input}") + except KeyboardInterrupt: + print("\n\nExiting chat...") + return False + except EOFError: + print("\n\nExiting chat...") + return False + + if user_input == "exit": + print("\n\nExiting chat...") + return False + + arguments["user_input"] = user_input + answer = await kernel.invoke(chat_func, arguments) + arguments["chat_history"].add_user_message(user_input) + arguments["chat_history"].add_system_message(str(answer)) + + print(f"ChatBot:> {answer}") + return True + + +async def main(): + print("Populating memory...") + await populate_memory(kernel) + + print("Asking questions... (manually)") + await search_memory_examples(kernel) + + print("Setting up a chat (with memory!)") + chat_func, arguments = await setup_chat_with_memory(kernel) + + result = await chat(kernel=kernel, chat_func=chat_func, arguments=arguments) + print(result) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/kernel-syntax-examples/openai_function_calling.py b/python/samples/kernel-syntax-examples/openai_function_calling.py index ef8ba18444fd..3a1fdd3e8c45 100644 --- a/python/samples/kernel-syntax-examples/openai_function_calling.py +++ b/python/samples/kernel-syntax-examples/openai_function_calling.py @@ -5,10 +5,15 @@ import semantic_kernel as sk import semantic_kernel.connectors.ai.open_ai as sk_oai -from semantic_kernel.connectors.ai.open_ai.models.chat.open_ai_chat_message import ( - OpenAIChatMessage, +from semantic_kernel.connectors.ai.chat_completion_client_base import ( + ChatCompletionClientBase, ) from semantic_kernel.core_plugins import MathPlugin +from semantic_kernel.functions.function_result import FunctionResult +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.prompt_template.input_variable import InputVariable +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig system_message = """ You are a chat bot. Your name is Mosscap and @@ -26,9 +31,9 @@ kernel = sk.Kernel() api_key, org_id = sk.openai_settings_from_dot_env() -kernel.add_chat_service( - "gpt-3.5-turbo", +kernel.add_service( sk_oai.OpenAIChatCompletion( + service_id="chat", ai_model_id="gpt-3.5-turbo-1106", api_key=api_key, ), @@ -37,7 +42,7 @@ plugins_directory = os.path.join(__file__, "../../../../samples/plugins") # adding plugins to the kernel # the joke plugin in the FunPlugins is a semantic plugin and has the function calling disabled. -kernel.import_semantic_plugin_from_directory(plugins_directory, "FunPlugin") +kernel.import_plugin_from_prompt_directory("chat", plugins_directory, "FunPlugin") # the math plugin is a core plugin and has the function calling enabled. kernel.import_plugin(MathPlugin(), plugin_name="math") @@ -74,46 +79,69 @@ } ] -prompt_config = sk.PromptTemplateConfig.from_execution_settings( - max_tokens=2000, - temperature=0.7, - top_p=0.8, - tool_choice="auto", - tools=tools, -) -prompt_template = sk.ChatPromptTemplate[OpenAIChatMessage]( - "{{$user_input}}", kernel.prompt_template_engine, prompt_config -) -prompt_template.add_system_message(system_message) -prompt_template.add_user_message("Hi there, who are you?") -prompt_template.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need.") -function_config = sk.SemanticFunctionConfig(prompt_config, prompt_template) -chat_function = kernel.register_semantic_function("ChatBot", "Chat", function_config) -# define the functions available +async def main(): + settings = kernel.get_prompt_execution_settings_from_service(ChatCompletionClientBase, "chat") + settings.service_id = "chat" + settings.tools = tools + settings.tool_choice = "auto" + settings.max_tokens = 2000 + settings.temperature = 0.7 + settings.top_p = 0.8 + + prompt_template_config = PromptTemplateConfig( + template="{{$user_input}}", + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable( + name="user_input", + description="The history of the conversation", + is_required=True, + default="", + ), + InputVariable( + name="chat_history", + description="The history of the conversation", + is_required=True, + ), + ], + execution_settings=settings, + ) + chat = ChatHistory(system_message=system_message) + chat.add_user_message("Hi there, who are you?") + chat.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need") -async def main() -> None: - context = kernel.create_new_context() - context.variables["user_input"] = "I want to find a hotel in Seattle with free wifi and a pool." + chat_function = kernel.create_function_from_prompt( + plugin_name="ChatBot", function_name="Chat", prompt_template_config=prompt_template_config + ) + + response = kernel.invoke_stream( + chat_function, + KernelArguments(user_input="I want to find a hotel in Seattle with free wifi and a pool.", chat_history=chat), + ) messages = [] tool_call = None - response = chat_function.invoke_stream_async(context=context) async for message in response: + if isinstance(message, FunctionResult): + # There's been an error, so print it + print(message) + return current = message[0] - messages.append(current) + messages.append(str(current)) if current.tool_calls: if tool_call is None: tool_call = current.tool_calls[0] else: tool_call += current.tool_calls[0] - if tool_call: print(f"Function to be called: {tool_call.function.name}") print(f"Function parameters: \n{tool_call.function.parse_arguments()}") return print("No function was called") - print(f"Output was: {str(context)}") + output = "".join([msg for msg in messages]) + print(f"Output was: {output}") if __name__ == "__main__": diff --git a/python/samples/kernel-syntax-examples/openai_logit_bias.py b/python/samples/kernel-syntax-examples/openai_logit_bias.py index 646475eab5aa..e6f226538446 100644 --- a/python/samples/kernel-syntax-examples/openai_logit_bias.py +++ b/python/samples/kernel-syntax-examples/openai_logit_bias.py @@ -12,6 +12,10 @@ from semantic_kernel.connectors.ai.text_completion_client_base import ( TextCompletionClientBase, ) +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.prompt_template.input_variable import InputVariable +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig """ Logit bias enables prioritizing certain tokens within a given output. @@ -29,9 +33,15 @@ def _config_ban_tokens(settings: PromptExecutionSettings, keys: Dict[Any, Any]): return settings +def _prepare_input_chat(chat: ChatHistory): + return "".join([f"{msg.role}: {msg.content}\n" for msg in chat]) + + async def chat_request_example(kernel, api_key, org_id): - openai_chat_completion = sk_oai.OpenAIChatCompletion("gpt-3.5-turbo", api_key, org_id) - kernel.add_chat_service("chat_service", openai_chat_completion) + openai_chat_completion = sk_oai.OpenAIChatCompletion( + service_id="chat_service", ai_model_id="gpt-3.5-turbo", api_key=api_key, org_id=org_id + ) + kernel.add_service(openai_chat_completion) # Spaces and capitalization affect the token ids. # The following is the token ids of basketball related words. @@ -68,46 +78,50 @@ async def chat_request_example(kernel, api_key, org_id): settings = kernel.get_prompt_execution_settings_from_service(ChatCompletionClientBase, "chat_service") settings = _config_ban_tokens(settings, keys) - prompt_config = sk.PromptTemplateConfig.from_execution_settings(max_tokens=2000, temperature=0.7, top_p=0.8) - prompt_template = sk.ChatPromptTemplate("{{$user_input}}", kernel.prompt_template_engine, prompt_config) + prompt_template_config = PromptTemplateConfig( + template="{{$user_input}}", + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable( + name="user_input", description="The history of the conversation", is_required=True, default="" + ), + ], + execution_settings=settings, + ) + + chat = ChatHistory() + + chat.add_user_message("Hi there, who are you?") + chat.add_assistant_message("I am an AI assistant here to answer your questions.") + + chat_function = kernel.create_function_from_prompt( + plugin_name="ChatBot", function_name="Chat", prompt_template_config=prompt_template_config + ) - # Setup chat with prompt - prompt_template.add_system_message("You are a basketball expert") - user_mssg = "I love the LA Lakers, tell me an interesting fact about LeBron James." - prompt_template.add_user_message(user_mssg) - function_config = sk.SemanticFunctionConfig(prompt_config, prompt_template) - kernel.register_semantic_function("ChatBot", "Chat", function_config) + chat.add_system_message("You are a basketball expert") + chat.add_user_message("I love the LA Lakers, tell me an interesting fact about LeBron James.") - chat_messages = [] - messages = [{"role": "user", "content": user_mssg}] + answer = await kernel.invoke(chat_function, KernelArguments(user_input=_prepare_input_chat(chat))) + chat.add_assistant_message(str(answer)) - chat_messages.append(("user", user_mssg)) - answer = await openai_chat_completion.complete_chat(messages=messages, settings=settings) - chat_messages.append(("assistant", str(answer[0]))) + chat.add_user_message("What are his best all-time stats?") + answer = await kernel.invoke(chat_function, KernelArguments(user_input=_prepare_input_chat(chat))) + chat.add_assistant_message(str(answer)) - user_mssg = "What are his best all-time stats?" - messages = [{"role": "user", "content": user_mssg}] - chat_messages.append(("user", user_mssg)) - answer = await openai_chat_completion.complete_chat(messages=messages, settings=settings) - chat_messages.append(("assistant", str(answer[0]))) + for msg in chat.messages: + print(f"{msg.role}: {msg.content}") - context_vars = sk.ContextVariables() - context_vars["chat_history"] = "" - context_vars["chat_bot_ans"] = "" - for role, mssg in chat_messages: - if role == "user": - context_vars["chat_history"] += f"User:> {mssg}\n" - elif role == "assistant": - context_vars["chat_history"] += f"ChatBot:> {mssg}\n" - context_vars["chat_bot_ans"] += f"{mssg}\n" + kernel.clear_all_services() - kernel.remove_chat_service("chat_service") - return context_vars, banned_words + return chat, banned_words async def text_complete_request_example(kernel, api_key, org_id): - openai_text_completion = sk_oai.OpenAITextCompletion("gpt-3.5-turbo-instruct", api_key, org_id) - kernel.add_text_completion_service("text_service", openai_text_completion) + openai_text_completion = sk_oai.OpenAITextCompletion( + service_id="text_service", ai_model_id="gpt-3.5-turbo-instruct", api_key=api_key, org_id=org_id + ) + kernel.add_service(openai_text_completion) # Spaces and capitalization affect the token ids. # The following is the token ids of pie related words. @@ -153,15 +167,35 @@ async def text_complete_request_example(kernel, api_key, org_id): settings = kernel.get_prompt_execution_settings_from_service(TextCompletionClientBase, "text_service") settings = _config_ban_tokens(settings, keys) - user_mssg = "The best pie flavor to have in autumn is" - answer = await openai_text_completion.complete(user_mssg, settings) + prompt_template_config = PromptTemplateConfig( + template="{{$user_input}}", + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable( + name="user_input", description="The history of the conversation", is_required=True, default="" + ), + ], + execution_settings=settings, + ) - context_vars = sk.ContextVariables() - context_vars["chat_history"] = f"User:> {user_mssg}\nChatBot:> {answer}\n" - context_vars["chat_bot_ans"] = str(answer) + chat = ChatHistory() + + chat.add_user_message("The best pie flavor to have in autumn is") + + text_function = kernel.create_function_from_prompt( + plugin_name="TextBot", function_name="TextCompletion", prompt_template_config=prompt_template_config + ) + + answer = await kernel.invoke(text_function, KernelArguments(user_input=_prepare_input_chat(chat))) + chat.add_assistant_message(str(answer)) + + for msg in chat.messages: + print(f"{msg.role}: {msg.content}") kernel.remove_text_completion_service("text_service") - return context_vars, banned_words + + return chat, banned_words def _check_banned_words(banned_list, actual_list) -> bool: @@ -173,9 +207,9 @@ def _check_banned_words(banned_list, actual_list) -> bool: return passed -def _format_output(context, banned_words) -> None: - print(context["chat_history"]) - chat_bot_ans_words = context["chat_bot_ans"].split() +def _format_output(chat, banned_words) -> None: + print("--- Checking for banned words ---") + chat_bot_ans_words = [word for msg in chat.messages if msg.role == "assistant" for word in msg.content.split()] if _check_banned_words(banned_words, chat_bot_ans_words): print("None of the banned words were found in the answer") diff --git a/python/samples/kernel-syntax-examples/plugins_from_dir.py b/python/samples/kernel-syntax-examples/plugins_from_dir.py index 08378fa44c9d..1602f7e752b2 100644 --- a/python/samples/kernel-syntax-examples/plugins_from_dir.py +++ b/python/samples/kernel-syntax-examples/plugins_from_dir.py @@ -5,30 +5,39 @@ import semantic_kernel as sk import semantic_kernel.connectors.ai.open_ai as sk_oai +from semantic_kernel.functions.kernel_arguments import KernelArguments -kernel = sk.Kernel() - -useAzureOpenAI = False -model = "gpt-35-turbo-instruct" if useAzureOpenAI else "gpt-3.5-turbo-instruct" -service_id = model - -# Configure AI service used by the kernel -if useAzureOpenAI: - deployment_name, api_key, endpoint = sk.azure_openai_settings_from_dot_env() - kernel.add_text_completion_service( - service_id, - sk_oai.AzureTextCompletion(deployment_name=model, api_key=api_key, endpoint=endpoint), - ) -else: - api_key, org_id = sk.openai_settings_from_dot_env() - kernel.add_text_completion_service( - service_id, - sk_oai.OpenAITextCompletion(ai_model_id=model, api_key=api_key, org_id=org_id), - ) - -# note: using plugins from the samples folder -plugins_directory = os.path.join(__file__, "../../../../samples/plugins") -plugin = kernel.import_semantic_plugin_from_directory(plugins_directory, "FunPlugin") - -result = asyncio.run(kernel.run(plugin["Joke"], input_str="time travel to dinosaur age")) -print(result) + +async def main(): + kernel = sk.Kernel() + + useAzureOpenAI = False + model = "gpt-35-turbo-instruct" if useAzureOpenAI else "gpt-3.5-turbo-instruct" + service_id = model + + # Configure AI service used by the kernel + if useAzureOpenAI: + deployment_name, api_key, endpoint = sk.azure_openai_settings_from_dot_env() + kernel.add_service( + sk_oai.AzureTextCompletion( + service_id=service_id, deployment_name=model, api_key=api_key, endpoint=endpoint + ), + ) + else: + api_key, org_id = sk.openai_settings_from_dot_env() + kernel.add_service( + sk_oai.OpenAITextCompletion(service_id=service_id, ai_model_id=model, api_key=api_key, org_id=org_id), + ) + + # note: using plugins from the samples folder + plugins_directory = os.path.join(__file__, "../../../../samples/plugins") + plugin = kernel.import_plugin_from_prompt_directory(service_id, plugins_directory, "FunPlugin") + + arguments = KernelArguments(input="time travel to dinosaur age", style="super silly") + + result = await kernel.invoke(plugin["Joke"], arguments) + print(result) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/kernel-syntax-examples/self-critique_rag.py b/python/samples/kernel-syntax-examples/self-critique_rag.py index be43ebc6121a..993244d3eb52 100644 --- a/python/samples/kernel-syntax-examples/self-critique_rag.py +++ b/python/samples/kernel-syntax-examples/self-critique_rag.py @@ -13,7 +13,6 @@ AzureCognitiveSearchMemoryStore, ) from semantic_kernel.core_plugins.text_memory_plugin import TextMemoryPlugin -from semantic_kernel.functions.old.context_variables import ContextVariables COLLECTION_NAME = "generic" @@ -45,19 +44,19 @@ async def main() -> None: vector_size = 1536 # Setting up OpenAI services for text completion and text embedding - kernel.add_text_completion_service( - "dv", + kernel.add_service( AzureTextCompletion( # Note: text-davinci-003 is deprecated and will be replaced by # AzureOpenAI's gpt-35-turbo-instruct model. + service_id="dv", deployment_name="gpt-35-turbo-instruct", endpoint=AZURE_OPENAI_ENDPOINT, api_key=AZURE_OPENAI_API_KEY, ), ) - kernel.add_text_embedding_generation_service( - "ada", + kernel.add_service( AzureTextEmbedding( + service_id="ada", deployment_name="text-embedding-ada-002", endpoint=AZURE_OPENAI_ENDPOINT, api_key=AZURE_OPENAI_API_KEY, @@ -99,15 +98,8 @@ async def main() -> None: chat_func = kernel.create_semantic_function(sk_prompt_rag, max_tokens=1000, temperature=0.5) self_critique_func = kernel.create_semantic_function(sk_prompt_rag_sc, max_tokens=4, temperature=0.0) - answer = await kernel.run( + answer = await kernel.invoke( chat_func, - input_vars=ContextVariables( - variables={ - "user_input": user_input, - "collection": COLLECTION_NAME, - "limit": "2", - } - ), ) print(f"Answer: {str(answer).strip()}") check = await kernel.run(self_critique_func, input_context=answer) diff --git a/python/samples/kernel-syntax-examples/sequential_planner.py b/python/samples/kernel-syntax-examples/sequential_planner.py index e6b549255201..f2c889c362cf 100644 --- a/python/samples/kernel-syntax-examples/sequential_planner.py +++ b/python/samples/kernel-syntax-examples/sequential_planner.py @@ -3,7 +3,6 @@ import semantic_kernel as sk from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion from semantic_kernel.core_plugins import ( - FileIOPlugin, MathPlugin, TextPlugin, TimePlugin, @@ -15,14 +14,16 @@ async def main(): kernel = sk.Kernel() api_key, org_id = sk.openai_settings_from_dot_env() - kernel.add_chat_service("gpt-3.5", OpenAIChatCompletion("gpt-3.5-turbo", api_key=api_key, org_id=org_id)) + service_id = "gpt-3.5" + kernel.add_service( + OpenAIChatCompletion(service_id=service_id, ai_model_id="gpt-3.5-turbo", api_key=api_key, org_id=org_id) + ) kernel.import_plugin(MathPlugin(), "math") - kernel.import_plugin(FileIOPlugin(), "fileIO") kernel.import_plugin(TimePlugin(), "time") kernel.import_plugin(TextPlugin(), "text") # create an instance of sequential planner. - planner = SequentialPlanner(kernel) + planner = SequentialPlanner(service_id=service_id, kernel=kernel) # the ask for which the sequential planner is going to find a relevant function. ask = "What day of the week is today, all uppercase?" diff --git a/python/samples/kernel-syntax-examples/setup_logging.py b/python/samples/kernel-syntax-examples/setup_logging.py index 2da0a5f397ce..5dff26b2083a 100644 --- a/python/samples/kernel-syntax-examples/setup_logging.py +++ b/python/samples/kernel-syntax-examples/setup_logging.py @@ -3,6 +3,7 @@ import semantic_kernel as sk from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion +from semantic_kernel.functions.kernel_arguments import KernelArguments from semantic_kernel.utils.logging import setup_logging @@ -16,13 +17,19 @@ async def main(): api_key, org_id = sk.openai_settings_from_dot_env() - kernel.add_chat_service("chat-gpt", OpenAIChatCompletion("gpt-3.5-turbo", api_key, org_id)) + kernel.add_service( + OpenAIChatCompletion(service_id="chat-gpt", ai_model_id="gpt-3.5-turbo", api_key=api_key, org_id=org_id) + ) - plugin = kernel.import_semantic_plugin_from_directory("../../samples/plugins", "FunPlugin") + plugin = kernel.import_plugin_from_prompt_directory( + service_id="chat-gpt", parent_directory="../../samples/plugins", plugin_directory_name="FunPlugin" + ) joke_function = plugin["Joke"] - print(joke_function("time travel to dinosaur age")) + result = await kernel.invoke(joke_function, KernelArguments(input="time travel to dinosaur age")) + + print(result) if __name__ == "__main__": diff --git a/python/samples/kernel-syntax-examples/template_language.py b/python/samples/kernel-syntax-examples/template_language.py new file mode 100644 index 000000000000..b161556ff133 --- /dev/null +++ b/python/samples/kernel-syntax-examples/template_language.py @@ -0,0 +1,55 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio + +import semantic_kernel as sk +import semantic_kernel.connectors.ai.open_ai as sk_oai +from semantic_kernel.core_plugins import ( + TimePlugin, +) +from semantic_kernel.prompt_template.kernel_prompt_template import KernelPromptTemplate +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig + + +async def main(): + kernel = sk.Kernel() + + useAzureOpenAI = False + model = "gpt-35-turbo" if useAzureOpenAI else "gpt-3.5-turbo-1106" + service_id = model + + api_key, org_id = sk.openai_settings_from_dot_env() + kernel.add_service( + sk_oai.OpenAIChatCompletion(service_id=service_id, ai_model_id=model, api_key=api_key, org_id=org_id), + ) + + kernel.import_plugin(TimePlugin(), "time") + + function_definition = """ + Today is: {{time.Date}} + Current time is: {{time.Time}} + + Answer to the following questions using JSON syntax, including the data used. + Is it morning, afternoon, evening, or night (morning/afternoon/evening/night)? + Is it weekend time (weekend/not weekend)? + """ + + print("--- Rendered Prompt ---") + prompt_template_config = PromptTemplateConfig(template=function_definition) + prompt_template = KernelPromptTemplate(prompt_template_config) + rendered_prompt = await prompt_template.render(kernel, arguments=None) + print(rendered_prompt) + + kind_of_day = kernel.create_function_from_prompt( + template=function_definition, + execution_settings=sk_oai.OpenAIChatPromptExecutionSettings(service_id=service_id, max_tokens=100), + function_name="kind_of_day", + ) + + print("--- Prompt Function Result ---") + result = await kernel.invoke(kind_of_day) + print(result) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/semantic_kernel/__init__.py b/python/semantic_kernel/__init__.py index 9417be3a61fd..4b6d5dd3ddb2 100644 --- a/python/semantic_kernel/__init__.py +++ b/python/semantic_kernel/__init__.py @@ -8,9 +8,6 @@ from semantic_kernel.prompt_template.prompt_template_config import ( PromptTemplateConfig, ) -from semantic_kernel.prompt_template.semantic_function_config import ( - SemanticFunctionConfig, -) from semantic_kernel.utils.logging import setup_logging from semantic_kernel.utils.null_logger import NullLogger from semantic_kernel.utils.settings import ( @@ -46,9 +43,8 @@ "PromptTemplateConfig", "PromptTemplate", "ChatPromptTemplate", - "SemanticFunctionConfig", "KernelArguments", "memory", "core_plugins", "setup_logging", -] +] \ No newline at end of file diff --git a/python/semantic_kernel/connectors/ai/chat_completion_client_base.py b/python/semantic_kernel/connectors/ai/chat_completion_client_base.py index 819bf0711744..cf1f1a00749b 100644 --- a/python/semantic_kernel/connectors/ai/chat_completion_client_base.py +++ b/python/semantic_kernel/connectors/ai/chat_completion_client_base.py @@ -8,22 +8,22 @@ if TYPE_CHECKING: from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.contents import ChatMessageContent, StreamingChatMessageContent - from semantic_kernel.models.ai.chat_completion.chat_message import ChatMessage + from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory class ChatCompletionClientBase(AIServiceClientBase, ABC): @abstractmethod async def complete_chat( self, - messages: List["ChatMessage"], + chat_history: "ChatHistory", settings: "PromptExecutionSettings", ) -> List["ChatMessageContent"]: """ This is the method that is called from the kernel to get a response from a chat-optimized LLM. Arguments: - messages {List[ChatMessage]} -- A list of chat messages, that can be rendered into a - set of messages, from system, user, assistant and function. + chat_history {ChatHistory} -- A list of chats in a chat_history object, that can be + rendered into messages from system, user, assistant and tools. settings {PromptExecutionSettings} -- Settings for the request. Returns: @@ -34,15 +34,15 @@ async def complete_chat( @abstractmethod async def complete_chat_stream( self, - messages: List["ChatMessage"], + chat_history: "ChatHistory", settings: "PromptExecutionSettings", ) -> AsyncIterable[List["StreamingChatMessageContent"]]: """ This is the method that is called from the kernel to get a stream response from a chat-optimized LLM. Arguments: - messages {List[ChatMessage]} -- A list of chat messages, that can be rendered into a - set of messages, from system, user, assistant and function. + chat_history {ChatHistory} -- A list of chat chat_history, that can be rendered into a + set of chat_history, from system, user, assistant and function. settings {PromptExecutionSettings} -- Settings for the request. Yields: diff --git a/python/semantic_kernel/connectors/ai/google_palm/services/gp_chat_completion.py b/python/semantic_kernel/connectors/ai/google_palm/services/gp_chat_completion.py index 6dfaa8f08d78..481bdb84d006 100644 --- a/python/semantic_kernel/connectors/ai/google_palm/services/gp_chat_completion.py +++ b/python/semantic_kernel/connectors/ai/google_palm/services/gp_chat_completion.py @@ -2,7 +2,7 @@ import logging import sys -from typing import Dict, List, Optional, Tuple +from typing import List, Optional, Tuple from semantic_kernel.contents.chat_message_content import ChatMessageContent from semantic_kernel.contents.text_content import TextContent @@ -28,19 +28,25 @@ from semantic_kernel.connectors.ai.text_completion_client_base import ( TextCompletionClientBase, ) +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.models.ai.chat_completion.chat_role import ChatRole +from semantic_kernel.utils.chat import prepare_chat_history_for_request logger: logging.Logger = logging.getLogger(__name__) +int_to_role = {1: ChatRole.USER, 2: ChatRole.SYSTEM, 3: ChatRole.ASSISTANT, 4: ChatRole.TOOL} + class GooglePalmChatCompletion(ChatCompletionClientBase, TextCompletionClientBase): api_key: Annotated[str, StringConstraints(strip_whitespace=True, min_length=1)] - _message_history: Optional[ChatResponse] = PrivateAttr() + _message_history: Optional[ChatHistory] = PrivateAttr() + service_id: Optional[str] = None def __init__( self, ai_model_id: str, api_key: str, - message_history: Optional[ChatResponse] = None, + message_history: Optional[ChatHistory] = None, ): """ Initializes a new instance of the GooglePalmChatCompletion class. @@ -50,7 +56,7 @@ def __init__( https://developers.generativeai.google/models/language api_key {str} -- GooglePalm API key, see https://developers.generativeai.google/products/palm - message_history {Optional[ChatResponse]} -- The message history to use for context. (Optional) + message_history {Optional[ChatHistory]} -- The message history to use for context. (Optional) """ super().__init__( ai_model_id=ai_model_id, @@ -60,7 +66,7 @@ def __init__( async def complete_chat( self, - messages: List[Dict[str, str]], + messages: ChatHistory, settings: GooglePalmPromptExecutionSettings, ) -> List[ChatMessageContent]: """ @@ -74,7 +80,7 @@ async def complete_chat( Returns: List[ChatMessageContent] -- A list of ChatMessageContent objects representing the response(s) from the LLM. """ - settings.messages = [{"author": message["role"], "content": message["content"]} for message in messages] + settings.messages = prepare_chat_history_for_request(messages, output_role_key="author", override_role="user") if not settings.ai_model_id: settings.ai_model_id = self.ai_model_id response = await self._send_chat_request(settings) @@ -94,13 +100,16 @@ def _create_chat_message_content( Returns: ChatMessageContent -- The created chat message content. """ - metadata = {"citation_metadata": candidate.get("citation_metadata"), "filters": response.filters} + metadata = { + "citation_metadata": candidate.get("citation_metadata"), + "filters": response.filters, + "choice_index": index, + } return ChatMessageContent( - choice_index=index, inner_content=response, ai_model_id=self.ai_model_id, metadata=metadata, - role=candidate.get("author"), + role=int_to_role[int(candidate.get("author"))], # TODO: why is author coming back as '1'? content=candidate.get("content"), ) diff --git a/python/semantic_kernel/connectors/ai/hugging_face/services/hf_text_completion.py b/python/semantic_kernel/connectors/ai/hugging_face/services/hf_text_completion.py index 3799f572ebd0..90dc02299ab4 100644 --- a/python/semantic_kernel/connectors/ai/hugging_face/services/hf_text_completion.py +++ b/python/semantic_kernel/connectors/ai/hugging_face/services/hf_text_completion.py @@ -47,6 +47,7 @@ def __init__( -- None if using device_map instead. (If both device and device_map are specified, device overrides device_map. If unintended, it can lead to unexpected behavior.) + service_id {Optional[str]} -- Service ID for the AI service. task {Optional[str]} -- Model completion task type, options are: - summarization: takes a long text and returns a shorter summary. - text-generation: takes incomplete text and returns a set of completion candidates. diff --git a/python/semantic_kernel/connectors/ai/ollama/services/ollama_chat_completion.py b/python/semantic_kernel/connectors/ai/ollama/services/ollama_chat_completion.py index a601b54cdb30..3096dda743f9 100644 --- a/python/semantic_kernel/connectors/ai/ollama/services/ollama_chat_completion.py +++ b/python/semantic_kernel/connectors/ai/ollama/services/ollama_chat_completion.py @@ -2,7 +2,7 @@ import json import logging -from typing import AsyncIterable, Dict, List, Optional +from typing import AsyncIterable, List, Optional import aiohttp from pydantic import HttpUrl @@ -21,6 +21,8 @@ from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent from semantic_kernel.contents.streaming_text_content import StreamingTextContent from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.utils.chat import prepare_chat_history_for_request logger: logging.Logger = logging.getLogger(__name__) @@ -42,22 +44,22 @@ class OllamaChatCompletion(TextCompletionClientBase, ChatCompletionClientBase): async def complete_chat( self, - messages: List[Dict[str, str]], + chat_history: ChatHistory, settings: OllamaChatPromptExecutionSettings, ) -> List[ChatMessageContent]: """ This is the method that is called from the kernel to get a response from a chat-optimized LLM. Arguments: - messages {List[ChatMessage]} -- A list of chat messages, that can be rendered into a - set of messages, from system, user, assistant and function. + chat_history {ChatHistory} -- A chat history that contains a list of chat messages, + that can be rendered into a set of messages, from system, user, assistant and function. settings {PromptExecutionSettings} -- Settings for the request. logger {Logger} -- A logger to use for logging. (Deprecated) Returns: List[ChatMessageContent] -- A list of ChatMessageContent objects representing the response(s) from the LLM. """ - settings.messages = messages + settings.messages = prepare_chat_history_for_request(chat_history) settings.stream = False async with AsyncSession(self.session) as session: async with session.post(str(self.url), json=settings.prepare_settings_dict()) as response: @@ -74,7 +76,7 @@ async def complete_chat( async def complete_chat_stream( self, - messages: List[Dict[str, str]], + chat_history: ChatHistory, settings: OllamaChatPromptExecutionSettings, ) -> AsyncIterable[List[StreamingChatMessageContent]]: """ @@ -82,13 +84,14 @@ async def complete_chat_stream( Note that this method does not support multiple responses. Arguments: - prompt {str} -- Prompt to complete. + chat_history {ChatHistory} -- A chat history that contains a list of chat messages, + that can be rendered into a set of messages, from system, user, assistant and function. settings {OllamaChatPromptExecutionSettings} -- Request settings. Yields: List[StreamingChatMessageContent] -- Stream of StreamingChatMessageContent objects. """ - settings.messages = messages + settings.messages = prepare_chat_history_for_request(chat_history) settings.stream = True async with AsyncSession(self.session) as session: async with session.post(str(self.url), json=settings.prepare_settings_dict()) as response: @@ -110,20 +113,20 @@ async def complete_chat_stream( async def complete( self, - prompt: str, + chat_history: ChatHistory, settings: OllamaChatPromptExecutionSettings, ) -> List[TextContent]: """ This is the method that is called from the kernel to get a response from a text-optimized LLM. Arguments: - prompt {str} -- The prompt to send to the LLM. + chat_history {ChatHistory} -- A chat history that contains the prompt to complete. settings {OllamaChatPromptExecutionSettings} -- Settings for the request. Returns: List["TextContent"] -- The completion result(s). """ - settings.messages = [{"role": "user", "content": prompt}] + settings.messages = [prepare_chat_history_for_request(chat_history)[-1]] settings.stream = False async with AsyncSession(self.session) as session: async with session.post(str(self.url), json=settings.prepare_settings_dict()) as response: @@ -139,7 +142,7 @@ async def complete( async def complete_stream( self, - prompt: str, + chat_history: ChatHistory, settings: OllamaChatPromptExecutionSettings, ) -> AsyncIterable[List[StreamingTextContent]]: """ @@ -147,14 +150,14 @@ async def complete_stream( Note that this method does not support multiple responses. Arguments: - prompt {str} -- Prompt to complete. + chat_history {ChatHistory} -- A chat history that contains the prompt to complete. settings {OllamaChatPromptExecutionSettings} -- Request settings. Yields: List["StreamingTextContent"] -- The result stream made up of StreamingTextContent objects. """ - settings.messages = [{"role": "user", "content": prompt}] + settings.messages = [prepare_chat_history_for_request(chat_history)[-1]] settings.stream = True async with AsyncSession(self.session) as session: async with session.post(str(self.url), json=settings.prepare_settings_dict()) as response: diff --git a/python/semantic_kernel/connectors/ai/open_ai/contents/open_ai_chat_message_content.py b/python/semantic_kernel/connectors/ai/open_ai/contents/open_ai_chat_message_content.py index 3d8539513ae2..2fb7c8b352eb 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/contents/open_ai_chat_message_content.py +++ b/python/semantic_kernel/connectors/ai/open_ai/contents/open_ai_chat_message_content.py @@ -30,3 +30,8 @@ class OpenAIChatMessageContent(ChatMessageContent): inner_content: ChatCompletion function_call: Optional[FunctionCall] = None tool_calls: Optional[List[ToolCall]] = None + + @staticmethod + def ToolIdProperty(): + # Directly using the class name and the attribute name as strings + return f"{ToolCall.__name__}.{ToolCall.id.__name__}" diff --git a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_chat_completion_base.py b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_chat_completion_base.py index 6061796eb57e..90ff6d3adecd 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_chat_completion_base.py +++ b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_chat_completion_base.py @@ -26,8 +26,10 @@ ) from semantic_kernel.connectors.ai.open_ai.services.open_ai_handler import OpenAIHandler from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory from semantic_kernel.models.ai.chat_completion.chat_role import ChatRole from semantic_kernel.models.ai.chat_completion.finish_reason import FinishReason +from semantic_kernel.utils.chat import prepare_chat_history_for_request logger: logging.Logger = logging.getLogger(__name__) @@ -41,21 +43,20 @@ def get_prompt_execution_settings_class(self) -> "PromptExecutionSettings": async def complete_chat( self, - messages: List[Dict[str, str]], + chat_history: ChatHistory, settings: OpenAIPromptExecutionSettings, ) -> List[OpenAIChatMessageContent]: """Executes a chat completion request and returns the result. Arguments: - messages {List[Dict[str,str]]} -- The messages to use for the chat completion. + chat_history {ChatHistory} -- The chat history to use for the chat completion. settings {OpenAIChatPromptExecutionSettings | AzureChatPromptExecutionSettings} -- The settings to use for the chat completion request. Returns: List[OpenAIChatMessageContent | AzureChatMessageContent] -- The completion result(s). """ - # TODO: replace messages with ChatHistory object with ChatMessageContent objects - settings.messages = messages + settings.messages = prepare_chat_history_for_request(chat_history) settings.stream = False if not settings.ai_model_id: settings.ai_model_id = self.ai_model_id @@ -65,13 +66,13 @@ async def complete_chat( async def complete_chat_stream( self, - messages: List[Dict[str, str]], + chat_history: ChatHistory, settings: OpenAIPromptExecutionSettings, ) -> AsyncIterable[List[OpenAIStreamingChatMessageContent]]: """Executes a streaming chat completion request and returns the result. Arguments: - messages {List[Tuple[str,str]]} -- The messages to use for the chat completion. + chat_history {ChatHistory} -- The chat history to use for the chat completion. settings {OpenAIChatPromptExecutionSettings | AzureChatPromptExecutionSettings} -- The settings to use for the chat completion request. @@ -79,7 +80,7 @@ async def complete_chat_stream( List[OpenAIStreamingChatMessageContent | AzureStreamingChatMessageContent] -- A stream of OpenAIStreamingChatMessages or AzureStreamingChatMessageContent when using Azure. """ - settings.messages = messages + settings.messages = prepare_chat_history_for_request(chat_history) settings.stream = True if not settings.ai_model_id: settings.ai_model_id = self.ai_model_id @@ -129,9 +130,9 @@ def _create_streaming_chat_message_content( inner_content=chunk, ai_model_id=self.ai_model_id, metadata=metadata, - role=ChatRole(choice.delta.role), + role=ChatRole(choice.delta.role) if choice.delta.role else None, content=choice.delta.content, - finish_reason=FinishReason(choice.finish_reason), + finish_reason=FinishReason(choice.finish_reason) if choice.finish_reason else None, function_call=self._get_function_call_from_chat_choice(choice), tool_calls=self._get_tool_calls_from_chat_choice(choice), ) diff --git a/python/semantic_kernel/connectors/ai/open_ai/utils.py b/python/semantic_kernel/connectors/ai/open_ai/utils.py index 8116774520fd..702effe046a4 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/utils.py +++ b/python/semantic_kernel/connectors/ai/open_ai/utils.py @@ -1,21 +1,20 @@ # Copyright (c) Microsoft. All rights reserved. -import asyncio import json import logging -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple, Union from openai.types.chat import ChatCompletion from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase from semantic_kernel.connectors.ai.open_ai.models.chat_completion.function_call import FunctionCall from semantic_kernel.connectors.ai.open_ai.models.chat_completion.tool_calls import ToolCall -from semantic_kernel.connectors.ai.open_ai.prompt_template.open_ai_chat_prompt_template import ( - OpenAIChatPromptTemplate, -) +from semantic_kernel.connectors.ai.text_completion_client_base import TextCompletionClientBase from semantic_kernel.functions.function_result import FunctionResult from semantic_kernel.functions.kernel_arguments import KernelArguments from semantic_kernel.functions.kernel_function import KernelFunction +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory logger: logging.Logger = logging.getLogger(__name__) @@ -52,7 +51,7 @@ def _describe_tool_call(function: KernelFunction) -> Dict[str, str]: for param in func_view.parameters if param.expose }, - "required": [p.name for p in func_view.parameters if p.required], + "required": [p.name for p in func_view.parameters if p.required and p.expose], }, }, } @@ -156,13 +155,13 @@ def get_function_calling_object( if include_function and exclude_function: raise ValueError("Cannot use both include_function and exclude_function at the same time.") if include_plugin: - include_plugin = [plugin.lower() for plugin in include_plugin] + include_plugin = [plugin for plugin in include_plugin] if exclude_plugin: - exclude_plugin = [plugin.lower() for plugin in exclude_plugin] + exclude_plugin = [plugin for plugin in exclude_plugin] if include_function: - include_function = [function.lower() for function in include_function] + include_function = [function for function in include_function] if exclude_function: - exclude_function = [function.lower() for function in exclude_function] + exclude_function = [function for function in exclude_function] result = [] for ( plugin_name, @@ -182,14 +181,14 @@ async def execute(kernel: Kernel, func: KernelFunction, arguments: KernelArgumen """Execute a function and return the result. Args: - kernel (Kernel): the kernel to use. - func (KernelFunction): the function to execute. - input_vars (ContextVariables): the input variables. + kernel: the kernel to use. + func: the function to execute. + arguments: the arguments to pass to the function. Returns: str: the result of the execution. """ - result = await kernel.invoke(func, arguments=arguments) + result = await kernel.invoke(functions=func, arguments=arguments) logger.info(f"Execution result: {result}") return result @@ -222,10 +221,11 @@ async def execute_tool_call(kernel: Kernel, tool_call: ToolCall) -> FunctionResu async def chat_completion_with_tool_call( kernel: Kernel, - arguments: KernelArguments, + arguments: Optional[KernelArguments] = None, chat_plugin_name: Optional[str] = None, chat_function_name: Optional[str] = None, chat_function: Optional[KernelFunction] = None, + chat_history: Optional[ChatHistory] = None, **kwargs: Dict[str, Any], ) -> FunctionResult: """Perform a chat completion with auto-executing function calling. @@ -246,6 +246,8 @@ async def chat_completion_with_tool_call( chat_function_name: the function name of the chat function. chat_function: the chat function, if not provided, it will be retrieved from the kernel. make sure to provide either the chat_function or the chat_plugin_name and chat_function_name. + chat_history: the chat history to use, if not provided, will attempt to retrieve from arguments + with key "chat_history". max_function_calls: the maximum number of function calls to execute, defaults to 5. current_call_count: the current number of function calls executed. @@ -253,6 +255,9 @@ async def chat_completion_with_tool_call( returns: the FunctionResult with the result of the chat completion, just like a regular invoke/run_async. """ + + chat_history or arguments["chat_history"] + # check the number of function calls max_function_calls = kwargs.get("max_function_calls", 5) current_call_count = kwargs.get("current_call_count", 0) @@ -261,25 +266,47 @@ async def chat_completion_with_tool_call( if chat_plugin_name is None or chat_function_name is None: raise ValueError("Please provide either the chat_function or the chat_plugin_name and chat_function_name.") chat_function = kernel.func(plugin_name=chat_plugin_name, function_name=chat_function_name) - assert isinstance( - chat_function.chat_prompt_template, OpenAIChatPromptTemplate + assert issubclass( + type(chat_function.ai_service), Union[ChatCompletionClientBase, TextCompletionClientBase] ), "Please make sure to initialize your chat function with the OpenAIChatPromptTemplate class." - settings = chat_function.chat_prompt_template.prompt_config.execution_settings + settings = chat_function.prompt_execution_settings[chat_function.ai_service.service_id] + + if not arguments: + arguments = KernelArguments() arguments.execution_settings[settings.service_id] = settings + arguments["user_input"] = ("\n").join([f"{msg.role}: {msg.content}" for msg in chat_history]) if current_call_count >= max_function_calls: + settings.functions = [] + result = await chat_function.invoke( + kernel=kernel, + arguments=arguments, # when the maximum number of function calls is reached, execute the chat function without Functions. - for settings in arguments.execution_settings.values(): - settings.tool_choice = None - result = await chat_function.invoke(kernel=kernel, arguments=arguments) - if hasattr(result.value[0], "tool_message") and (tool_message := result.value[0].tool_message): - chat_function.chat_prompt_template.add_function_response_message(name="tool", content=tool_message) - if not (tool_calls := result.value[0].tool_calls): - chat_function.chat_prompt_template.add_assistant_message(message=str(result)) + ) + if not isinstance(result, FunctionResult) and result.value[0].tool_call is None: return result - await asyncio.gather(*[execute_and_store_tool_call(kernel, tool_call, chat_function) for tool_call in tool_calls]) + function_call = next( + ( + fc + for fc in (result.value[0].function_call or result.value[0].tool_calls or [None]) + if isinstance(fc, (FunctionCall, ToolCall)) + ), + None, + ) + if function_call: + execute_call = execute_tool_call if isinstance(function_call, ToolCall) else execute_function_call + result = await execute_call(kernel, function_call) + tool_call_id = function_call.id + else: + return result + # add the result to the chat prompt template + chat_history.add_tool_message() + chat_function.chat_prompt_template.add_function_response_message( + name=function_call.function.name, content=str(result), tool_call_id=tool_call_id + ) + # request another completion return await chat_completion_with_tool_call( - kernel=kernel, + kernel, arguments=arguments, chat_function=chat_function, max_function_calls=max_function_calls, diff --git a/python/semantic_kernel/connectors/ai/prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/prompt_execution_settings.py index 769a880f63c0..50949225ce79 100644 --- a/python/semantic_kernel/connectors/ai/prompt_execution_settings.py +++ b/python/semantic_kernel/connectors/ai/prompt_execution_settings.py @@ -14,7 +14,7 @@ class PromptExecutionSettings(KernelBaseModel): prompt execution settings that each services returns by using the service.get_prompt_execution_settings() method. Parameters: - service_id (str, optional): The service ID to use for the request. Defaults to None. + service_id (str): The service ID to use for the request. extension_data (Dict[str, Any], optional): Any additional data to send with the request. Defaults to None. kwargs (Any): Additional keyword arguments, these are attempted to parse into the keys of the specific prompt execution settings. diff --git a/python/semantic_kernel/connectors/memory/redis/README.md b/python/semantic_kernel/connectors/memory/redis/README.md index 3373990ecf19..b0af63099f05 100644 --- a/python/semantic_kernel/connectors/memory/redis/README.md +++ b/python/semantic_kernel/connectors/memory/redis/README.md @@ -1,6 +1,6 @@ # semantic_kernel.connectors.memory.redis -This connector uses Redis to implement Semantic Memory. It requires the [RediSearch](https://redis.io/docs/interact/search-and-query/) module to be enabled on Redis to implement vector similarity search. +This connector uses Redis to implement Semantic Memory. It requires the [RediSearch](https://redis.io/docs/interact/search-and-query/) module to be enabled on Redis to implement vector similarity search. See the [.net README](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/src/Connectors/Connectors.Memory.Redis/README.md) for more information. @@ -22,10 +22,10 @@ docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:la kernel = sk.Kernel() api_key, org_id = sk.openai_settings_from_dot_env() - kernel.add_text_completion_service("dv", sk_oai.OpenAITextCompletion("text-davinci-003", api_key, org_id)) - kernel.add_text_embedding_generation_service("ada", sk_oai.OpenAITextEmbedding("text-embedding-ada-002", api_key, org_id)) + kernel.add_service(sk_oai.OpenAITextCompletion(service_id="dv", ai_model_id="text-davinci-003", api_key=api_key, org_id=org_id)) + embedding_generator = sk_oai.OpenAITextEmbedding(service_id="ada", ai_model_id="text-embedding-ada-002", api_key=api_key, org_id=org_id) + kernel.add_service(embedding_generator) redis_connection_string = sk.redis_settings_from_dot_env() - kernel.register_memory_store(memory_store=RedisMemoryStore(connection_string=redis_connection_string)) + kernel.use_memory(storage=RedisMemoryStore(connection_string=redis_connection_string), embeddings_generator=embedding_generator) ``` - diff --git a/python/semantic_kernel/contents/kernel_content.py b/python/semantic_kernel/contents/kernel_content.py index 18efc2d5818c..480089d1a4d4 100644 --- a/python/semantic_kernel/contents/kernel_content.py +++ b/python/semantic_kernel/contents/kernel_content.py @@ -12,7 +12,7 @@ class KernelContent(KernelBaseModel, ABC): inner_content: Optional[Any] = None ai_model_id: Optional[str] = None - metadata: Dict[str, Any] = Field(default_factory=dict) + metadata: Optional[Dict[str, Any]] = Field(default_factory=dict) @abstractmethod def __str__(self) -> str: diff --git a/python/semantic_kernel/core_plugins/conversation_summary_plugin.py b/python/semantic_kernel/core_plugins/conversation_summary_plugin.py index 52ad6ef922c1..f12786a56f10 100644 --- a/python/semantic_kernel/core_plugins/conversation_summary_plugin.py +++ b/python/semantic_kernel/core_plugins/conversation_summary_plugin.py @@ -7,9 +7,11 @@ else: from typing_extensions import Annotated + if TYPE_CHECKING: from semantic_kernel.functions.kernel_arguments import KernelArguments from semantic_kernel.kernel import Kernel + from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig class ConversationSummaryPlugin: @@ -24,7 +26,7 @@ class ConversationSummaryPlugin: _summarize_conversation_prompt_template = ( "BEGIN CONTENT TO SUMMARIZE:\n{{" - + "$INPUT" + + "$input" + "}}\nEND CONTENT TO SUMMARIZE.\nSummarize the conversation in 'CONTENT TO" " SUMMARIZE', identifying main points of discussion and any" " conclusions that were reached.\nDo not incorporate other general" @@ -32,15 +34,22 @@ class ConversationSummaryPlugin: " or tags.\n\nBEGIN SUMMARY:\n" ) - def __init__(self, kernel: "Kernel", return_key: str = "summary"): + def __init__( + self, kernel: "Kernel", prompt_template_config: "PromptTemplateConfig", return_key: str = "summary" + ) -> None: + """ + Initializes a new instance of the ConversationSummaryPlugin class. + + :param kernel: The kernel instance. + :param prompt_template_config: The prompt template configuration. + :param return_key: The key to use for the return value. + """ self.return_key = return_key - self._summarizeConversationFunction = kernel.create_semantic_function( + self._summarizeConversationFunction = kernel.create_function_from_prompt( ConversationSummaryPlugin._summarize_conversation_prompt_template, plugin_name=ConversationSummaryPlugin.__name__, - description=("Given a section of a conversation transcript, summarize the part of" " the conversation."), - max_tokens=ConversationSummaryPlugin._max_tokens, - temperature=0.1, - top_p=0.5, + function_name="SummarizeConversation", + prompt_template_config=prompt_template_config, ) @kernel_function( diff --git a/python/semantic_kernel/functions/function_result.py b/python/semantic_kernel/functions/function_result.py index e86358e07b69..37231a1d7380 100644 --- a/python/semantic_kernel/functions/function_result.py +++ b/python/semantic_kernel/functions/function_result.py @@ -38,6 +38,10 @@ def __str__(self) -> str: try: if isinstance(self.value, list): return str(self.value[0]) + elif isinstance(self.value, dict): + # TODO: remove this once function result doesn't include input args + # This is so an integration test can pass. + return str(list(self.value.values())[-1]) return str(self.value) except Exception as e: logger.warning(f"Failed to convert value to string: {e}") diff --git a/python/semantic_kernel/functions/kernel_function.py b/python/semantic_kernel/functions/kernel_function.py index 92f28b96c2ea..04c0d51337b9 100644 --- a/python/semantic_kernel/functions/kernel_function.py +++ b/python/semantic_kernel/functions/kernel_function.py @@ -6,7 +6,7 @@ import sys from functools import wraps from inspect import isawaitable -from typing import TYPE_CHECKING, Any, AsyncIterable, Callable, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Any, AsyncIterable, Callable, ClassVar, Dict, List, Optional, Union from pydantic import Field, StringConstraints @@ -30,10 +30,12 @@ from semantic_kernel.functions.kernel_arguments import KernelArguments from semantic_kernel.functions.kernel_function_metadata import KernelFunctionMetadata from semantic_kernel.functions.kernel_parameter_metadata import KernelParameterMetadata +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory from semantic_kernel.prompt_template.chat_prompt_template import ChatPromptTemplate -from semantic_kernel.prompt_template.semantic_function_config import ( - SemanticFunctionConfig, -) +from semantic_kernel.prompt_template.kernel_prompt_template import KernelPromptTemplate +from semantic_kernel.prompt_template.prompt_template_base import PromptTemplateBase +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig +from semantic_kernel.utils.naming import generate_random_ascii_name if TYPE_CHECKING: from semantic_kernel.functions.kernel_plugin_collection import KernelPluginCollection @@ -43,12 +45,11 @@ if platform.system() == "Windows" and sys.version_info >= (3, 8, 0): asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) - logger: logging.Logger = logging.getLogger(__name__) def store_results(chat_prompt: ChatPromptTemplate, results: List["ChatMessageContent"]): - """Stores specific results in the chat prompt template.""" + """Stores specific results in the context and chat prompt.""" if hasattr(results[0], "tool_message") and results[0].tool_message is not None: chat_prompt.add_message(role="tool", message=results[0].tool_message) chat_prompt.add_message( @@ -70,30 +71,34 @@ class KernelFunction(KernelBaseModel): description (Optional[str]): The description of the function. name (str): The name of the function. Must be upper/lower case letters and underscores with a minimum length of 1. - is_semantic (bool): Whether the function is semantic. + is_prompt (bool): Whether the function is semantic. stream_function (Optional[Callable[..., Any]]): The stream function for the function. parameters (List[KernelParameterMetadata]): The parameters for the function. return_parameter (Optional[KernelParameterMetadata]): The return parameter for the function. - delegate_type (DelegateTypes): The delegate type for the function. function (Callable[..., Any]): The function to call. plugins (Optional[KernelPluginCollection]): The collection of plugins. - ai_service (Optional[Union[TextCompletionClientBase, ChatCompletionClientBase]]): The AI service. - ai_prompt_execution_settings (PromptExecutionSettings): The AI prompt execution settings. - chat_prompt_template (Optional[ChatPromptTemplate]): The chat prompt template. + prompt_execution_settings (PromptExecutionSettings): The AI prompt execution settings. + prompt_template_config (PromptTemplateConfig): The prompt template configuration. + metadata (Optional[KernelFunctionMetadata]): The metadata for the function. + + Note: the CHAT_HISTORY_TAG is a class-level attribute that is used to tag the chat history in the + arguments of the function. Pydantic ignores this attribute when performing model validation. """ plugin_name: Annotated[str, StringConstraints(pattern=r"^[A-Za-z_]+$", min_length=1)] description: Optional[str] = Field(default=None) name: Annotated[str, StringConstraints(pattern=r"^[A-Za-z_]+$", min_length=1)] - is_semantic: bool = Field(...) + is_prompt: bool = Field(...) stream_function: Optional[Callable[..., Any]] = Field(default=None) parameters: List[KernelParameterMetadata] = Field(...) return_parameter: Optional[KernelParameterMetadata] = None function: Callable[..., Any] = Field(...) plugins: Optional["KernelPluginCollection"] = Field(default=None) - # ai_service: Optional[Union[TextCompletionClientBase, ChatCompletionClientBase]] = Field(default=None) prompt_execution_settings: Dict[str, PromptExecutionSettings] = Field(default_factory=dict) - chat_prompt_template: Optional[ChatPromptTemplate] = Field(default=None) + prompt_template_config: Optional[PromptTemplateConfig] = Field(default=PromptTemplateConfig) + metadata: Optional[KernelFunctionMetadata] = Field(default=KernelFunctionMetadata) + + CHAT_HISTORY_TAG: ClassVar[str] = "chat_history" def __init__( self, @@ -102,9 +107,10 @@ def __init__( description: str, plugin_name: str, function_name: str, - is_semantic: bool, + is_prompt: bool, return_parameter: Optional[KernelParameterMetadata] = None, stream_function: Optional[Callable[..., Any]] = None, + prompt_template_config: Optional[PromptTemplateConfig] = None, **kwargs: Dict[str, Any], ) -> None: """ @@ -116,11 +122,19 @@ def __init__( description (str): The description for the function plugin_name (str): The name of the plugin name (str): The name of the function - is_semantic (bool): Whether the function is semantic + is_prompt (bool): Whether the function is semantic delegate_stream_function (Optional[Callable[..., Any]]): The delegate stream function for the function kwargs (Dict[str, Any]): Additional keyword arguments """ - chat_prompt_template = kwargs.pop("chat_prompt_template", None) + + metadata = KernelFunctionMetadata( + name=function_name, + description=description, + parameters=parameters, + return_parameter=return_parameter, + is_prompt=is_prompt, + plugin_name=plugin_name, + ) super().__init__( function=function, @@ -129,9 +143,11 @@ def __init__( description=description, plugin_name=plugin_name, name=function_name, - is_semantic=is_semantic, + is_prompt=is_prompt, stream_function=stream_function, - chat_prompt_template=chat_prompt_template, + prompt_template_config=prompt_template_config, + metadata=metadata, + **kwargs, ) @property @@ -170,6 +186,7 @@ def from_native_method(method: Callable[..., Any], plugin_name: str) -> "KernelF default_value=param["default_value"], type=param.get("type", "str"), required=param.get("required", False), + expose=True, ) ) return_param = KernelParameterMetadata( @@ -208,28 +225,55 @@ async def _non_streaming_function(*args, **kwargs): parameters=parameters, return_parameter=return_param, stream_function=streaming_method, - is_semantic=False, + is_prompt=False, ) @staticmethod - def from_semantic_config( - plugin_name: str, - function_name: str, - function_config: SemanticFunctionConfig, + def from_prompt( + prompt: str, + execution_settings: Optional[PromptExecutionSettings] = None, + plugin_name: Optional[str] = None, + function_name: Optional[str] = None, + description: Optional[str] = None, + template_format: Optional[str] = None, + prompt_template: Optional[PromptTemplateBase] = None, + prompt_template_config: Optional[PromptTemplateConfig] = None, ) -> "KernelFunction": """ - Create a KernelFunction from a semantic configuration. + Create a Kernel Function from a prompt Args: - plugin_name (str): The name of the plugin - function_name (str): The name of the function - function_config (SemanticFunctionConfig): The function configuration + prompt (str): The prompt + execution_settings (Optional[PromptExecutionSettings]): The execution settings + plugin_name (Optional[str]): The name of the plugin + function_name (Optional[str]): The name of the function + description (Optional[str]): The description of the function + template_format (Optional[str]): The template format + prompt_template (Optional[PromptTemplateBase]): The prompt template + prompt_template_config (Optional[PromptTemplateConfig]): The prompt template configuration Returns: KernelFunction: The kernel function """ - if function_config is None: - raise ValueError("Function configuration cannot be `None`") + + if prompt_template: + if not template_format: + raise ValueError(f"Template format cannot be `None` when providing a {prompt_template}") + + if not plugin_name: + plugin_name = f"p_{generate_random_ascii_name()}" + + if not prompt_template_config: + prompt_template_config = PromptTemplateConfig( + name=function_name, + template_format=template_format if template_format else "semantic-kernel", + description=description if description else "Generic function, unknown purpose", + template=prompt, + execution_settings=execution_settings if execution_settings else PromptExecutionSettings(), + ) + + if not prompt_template: + prompt_template = KernelPromptTemplate(prompt_template_config) async def _local_func( function: KernelFunctionMetadata, @@ -237,69 +281,83 @@ async def _local_func( service: Union[TextCompletionClientBase, ChatCompletionClientBase], request_settings: PromptExecutionSettings, arguments: KernelArguments, + chat_history: Optional[ChatHistory] = None, **kwargs: Dict[str, Any], ) -> "FunctionResult": if service is None: raise ValueError("AI LLM service cannot be `None`") - # from semantic_kernel.functions.kernel_function import KernelFunction # noqa # pylint: disable=unused-import - # FunctionResult.model_rebuild() + prompt = await prompt_template.render(kernel, arguments) - if isinstance(service, ChatCompletionClientBase): - try: - messages = await function_config.prompt_template.render_messages(kernel, arguments) - completions = await service.complete_chat(messages, request_settings) + if not chat_history or len(chat_history) == 0: + chat_history = ChatHistory(system_message=prompt) + else: + chat_history.add_user_message(prompt) + + try: + if isinstance(service, ChatCompletionClientBase): + completions = await service.complete_chat(chat_history, request_settings) return FunctionResult( function=function, value=completions, metadata={ - "messages": messages, + "messages": chat_history, "arguments": arguments, "metadata": [completion.metadata for completion in completions], }, ) - except Exception as exc: - logger.error(f"Error occurred while invoking function {function.name}: {exc}") - raise exc + except Exception as exc: + logger.error(f"Error occurred while invoking function {function.name}: {exc}") + raise exc + try: - prompt = await function_config.prompt_template.render(kernel, arguments) - completions = await service.complete(prompt, request_settings) - return FunctionResult( - function=function, - value=completions, - metadata={ - "prompt": prompt, - "arguments": arguments, - "metadata": [completion.metadata for completion in completions], - }, - ) + if isinstance(service, TextCompletionClientBase): + completions = await service.complete(prompt, request_settings) + return FunctionResult( + function=function, + value=completions, + metadata={ + "prompt": prompt, + "arguments": arguments, + "metadata": [completion.metadata for completion in completions], + }, + ) except Exception as e: logger.error(f"Error occurred while invoking function {function.name}: {e}") raise e + raise ValueError(f"Service `{type(service)}` is not a valid AI service") + async def _local_stream_func( function: KernelFunctionMetadata, kernel: "Kernel", service: Union[TextCompletionClientBase, ChatCompletionClientBase], request_settings: PromptExecutionSettings, arguments: KernelArguments, + chat_history: Optional[ChatHistory] = None, **kwargs: Dict[str, Any], ) -> AsyncIterable[Union[FunctionResult, List[StreamingKernelContent]]]: if service is None: raise ValueError("AI LLM service cannot be `None`") + prompt = await prompt_template.render(kernel, arguments) + + if not chat_history or len(chat_history) == 0: + chat_history = ChatHistory(system_message=prompt) + else: + chat_history.add_user_message(prompt) + try: if isinstance(service, ChatCompletionClientBase): - messages = await function_config.prompt_template.render_messages(kernel, arguments) async for partial_content in service.complete_chat_stream( - messages=messages, settings=request_settings + chat_history=chat_history, settings=request_settings ): yield partial_content - else: - prompt = await function_config.prompt_template.render(kernel, arguments) + elif isinstance(service, TextCompletionClientBase): async for partial_content in service.complete_stream(prompt, request_settings): yield partial_content - + else: + raise ValueError(f"Service `{type(service)}` is not a valid AI service") except Exception as e: logger.error(f"Error occurred while invoking function {function.name}: {e}") raise e @@ -346,11 +404,11 @@ async def _local_stream_func( expose=False, ), ] - semantic_function_params.extend(function_config.prompt_template.get_parameters()) + semantic_function_params.extend(prompt_template_config.get_kernel_parameter_metadata()) return KernelFunction( function_name=function_name, plugin_name=plugin_name, - description=function_config.prompt_template_config.description, + description=description, function=_local_func, parameters=semantic_function_params, return_parameter=KernelParameterMetadata( @@ -361,8 +419,8 @@ async def _local_stream_func( required=True, ), stream_function=_local_stream_func, - is_semantic=True, - chat_prompt_template=function_config.prompt_template if function_config.has_chat_prompt else None, + is_prompt=True, + prompt_template_config=prompt_template_config, ) def set_default_plugin_collection(self, plugins: "KernelPluginCollection") -> "KernelFunction": @@ -374,7 +432,7 @@ def describe(self) -> KernelFunctionMetadata: name=self.name, plugin_name=self.plugin_name, description=self.description or "", - is_semantic=self.is_semantic, + is_prompt=self.is_prompt, parameters=self.parameters, ) @@ -384,7 +442,20 @@ async def __call__( arguments: Optional[KernelArguments] = None, **kwargs: Dict[str, Any], ) -> "FunctionResult": - return await self.invoke(kernel, arguments, **kwargs) + """Invoke the function with the given arguments. + + Args: + kernel (Kernel): The kernel + arguments (Optional[KernelArguments]): The Kernel arguments. + Optional, defaults to None. + kwargs (Dict[str, Any]): Additional keyword arguments that will be + + Returns: + FunctionResult: The result of the function + """ + if not arguments: + arguments = KernelArguments(**kwargs) + return await self.invoke(kernel, arguments) async def invoke( self, @@ -392,9 +463,22 @@ async def invoke( arguments: Optional[KernelArguments] = None, **kwargs: Dict[str, Any], ) -> "FunctionResult": + """Invoke the function with the given arguments. + + Args: + kernel (Kernel): The kernel + arguments (KernelArguments): The Kernel arguments + kwargs (Dict[str, Any]): Additional keyword arguments that will be + added to the KernelArguments. + + Returns: + FunctionResult: The result of the function + """ if not arguments: arguments = KernelArguments(**kwargs) function_arguments = self.gather_function_parameters(kernel, arguments) + if self.is_prompt and self.CHAT_HISTORY_TAG not in function_arguments: + function_arguments[self.CHAT_HISTORY_TAG] = ChatHistory() logger.debug("Invoking %s with arguments: %s", self.name, function_arguments) try: result = self.function(**function_arguments) @@ -418,6 +502,14 @@ async def invoke_stream( **kwargs: Dict[str, Any], ) -> AsyncIterable[Union[FunctionResult, List[Union[StreamingKernelContent, Any]]]]: """ + Invoke a stream async function with the given arguments. + + Args: + kernel (Kernel): The kernel + arguments (KernelArguments): The Kernel arguments + kwargs (Dict[str, Any]): Additional keyword arguments that will be + added to the KernelArguments. + Yields: StreamingKernelContent or FunctionResult -- The results of the function, if there is an error a FunctionResult is yielded. @@ -427,6 +519,8 @@ async def invoke_stream( if not self.stream_function: raise ValueError("Function does not support streaming") function_arguments = self.gather_function_parameters(kernel, arguments) + if self.is_prompt and self.CHAT_HISTORY_TAG not in function_arguments: + function_arguments[self.CHAT_HISTORY_TAG] = ChatHistory() logger.debug("Invoking %s with arguments: %s", self.name, function_arguments) try: async for stream_msg in self.stream_function(**function_arguments): @@ -438,6 +532,7 @@ async def invoke_stream( ) def gather_function_parameters(self, kernel: "Kernel", arguments: "KernelArguments") -> Dict[str, Any]: + """Gathers the function parameters from the arguments.""" function_arguments: Dict[str, Any] = {} for param in self.parameters: if param.name == "function": @@ -455,7 +550,16 @@ def gather_function_parameters(self, kernel: "Kernel", arguments: "KernelArgumen if param.name == "arguments": function_arguments[param.name] = arguments continue - if self.is_semantic: + if param.name == "prompt_template_config": + function_arguments[param.name] = self.prompt_template_config + continue + if param.name == self.CHAT_HISTORY_TAG: + chat = arguments.get(self.CHAT_HISTORY_TAG, ChatHistory()) + if not isinstance(chat, ChatHistory): + raise ValueError(f"Parameter {param.name} is not a valid ChatHistory object.") + function_arguments[param.name] = chat + continue + if self.is_prompt: # a semantic function will use the arguments (KernelArguments) instead of named arguments continue if param.name in arguments: @@ -463,7 +567,13 @@ def gather_function_parameters(self, kernel: "Kernel", arguments: "KernelArgumen continue if param.required: raise ValueError(f"Parameter {param.name} is required but not provided in the arguments.") - logger.debug( - f"Parameter {param.name} is not provided, using default value {param.default_value}" - ) # default value is not set here but in the functions + logger.debug(f"Parameter {param.name} is not provided, using default value {param.default_value}") + if self.prompt_template_config: + self.add_default_values(function_arguments, self.prompt_template_config) return function_arguments + + def add_default_values(self, arguments: dict[str, Any], prompt_template_config: PromptTemplateConfig) -> None: + """Adds default values to the arguments.""" + for parameter in prompt_template_config.input_variables: + if not arguments.get(parameter.name) and parameter.default not in {None, "", False, 0}: + arguments[parameter.name] = parameter.default diff --git a/python/semantic_kernel/functions/kernel_function_decorator.py b/python/semantic_kernel/functions/kernel_function_decorator.py index f560fd092cb3..2192816d7d5e 100644 --- a/python/semantic_kernel/functions/kernel_function_decorator.py +++ b/python/semantic_kernel/functions/kernel_function_decorator.py @@ -96,6 +96,7 @@ def _parse_annotation(annotation: Parameter) -> Tuple[str, str, bool]: def _parse_internal_annotation(annotation: Parameter, required: bool) -> Tuple[str, bool]: logger.debug(f"Internal {annotation=}") + logger.debug(f"{annotation=}") if hasattr(annotation, "__forward_arg__"): return annotation.__forward_arg__, required if getattr(annotation, "__name__", None) == "Optional": diff --git a/python/semantic_kernel/functions/kernel_function_metadata.py b/python/semantic_kernel/functions/kernel_function_metadata.py index cfd908daea81..cf5b390b249d 100644 --- a/python/semantic_kernel/functions/kernel_function_metadata.py +++ b/python/semantic_kernel/functions/kernel_function_metadata.py @@ -1,6 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. -from typing import List +from typing import List, Optional from pydantic import Field @@ -10,12 +10,13 @@ class KernelFunctionMetadata(KernelBaseModel): - name: str = Field(pattern=FUNCTION_NAME_REGEX, min_length=1) + name: str = Field(pattern=FUNCTION_NAME_REGEX) plugin_name: str - description: str + description: Optional[str] = Field(default=None) parameters: List[KernelParameterMetadata] = Field(default_factory=list) - is_semantic: bool - is_asynchronous: bool = True + is_prompt: bool + is_asynchronous: Optional[bool] = Field(default=True) + return_parameter: Optional[KernelParameterMetadata] = None def __eq__(self, other: "KernelFunctionMetadata") -> bool: """ @@ -35,6 +36,7 @@ def __eq__(self, other: "KernelFunctionMetadata") -> bool: and self.plugin_name == other.plugin_name and self.description == other.description and self.parameters == other.parameters - and self.is_semantic == other.is_semantic + and self.is_prompt == other.is_prompt and self.is_asynchronous == other.is_asynchronous + and self.return_parameter == other.return_parameter ) diff --git a/python/semantic_kernel/functions/kernel_parameter_metadata.py b/python/semantic_kernel/functions/kernel_parameter_metadata.py index 5cba6a535f8c..36b94382514b 100644 --- a/python/semantic_kernel/functions/kernel_parameter_metadata.py +++ b/python/semantic_kernel/functions/kernel_parameter_metadata.py @@ -16,4 +16,4 @@ class KernelParameterMetadata(KernelBaseModel): type_: Optional[str] = Field(default="str", alias="type") required: Optional[bool] = False # expose is used to distinguish between parameters that should be exposed to tool calling and those that should not - expose: Optional[bool] = Field(default=True, exclude=True) + expose: Optional[bool] = Field(default=False, exclude=True) diff --git a/python/semantic_kernel/functions/kernel_plugin.py b/python/semantic_kernel/functions/kernel_plugin.py index fc54f15f68ee..7f45ec23df86 100644 --- a/python/semantic_kernel/functions/kernel_plugin.py +++ b/python/semantic_kernel/functions/kernel_plugin.py @@ -1,7 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import sys -from typing import Dict, List, Optional +from typing import TYPE_CHECKING, Dict, List, Optional if sys.version_info >= (3, 9): from typing import Annotated @@ -10,9 +10,12 @@ from pydantic import Field, StringConstraints -from semantic_kernel.functions.kernel_function import KernelFunction from semantic_kernel.kernel_pydantic import KernelBaseModel +if TYPE_CHECKING: + from semantic_kernel.functions.kernel_function import KernelFunction + from semantic_kernel.functions.kernel_function_metadata import KernelFunctionMetadata + class KernelPlugin(KernelBaseModel): """ @@ -28,9 +31,11 @@ class KernelPlugin(KernelBaseModel): name: Annotated[str, StringConstraints(pattern=r"^[A-Za-z_]+$", min_length=1)] description: Optional[str] = Field(default=None) - functions: Optional[Dict[str, KernelFunction]] = Field(default_factory=dict) + functions: Optional[Dict[str, "KernelFunction"]] = Field(default_factory=dict) - def __init__(self, name: str, description: Optional[str] = None, functions: Optional[List[KernelFunction]] = None): + def __init__( + self, name: str, description: Optional[str] = None, functions: Optional[List["KernelFunction"]] = None + ): """ Initialize a new instance of the KernelPlugin class @@ -105,3 +110,23 @@ def from_functions( A KernelPlugin instance. """ return cls(name=plugin_name, description=description, functions=functions) + + def get_functions_metadata(self) -> List["KernelFunctionMetadata"]: + """ + Get the metadata for the functions in the plugin. + + Returns: + A list of KernelFunctionMetadata instances. + """ + return [ + KernelFunctionMetadata( + name=func.name, + plugin_name=self.name, + description=func.description, + parameters=func.parameters, + is_prompt=func.is_prompt, + is_asynchronous=func.is_asynchronous, + return_parameter=func.return_parameter, + ) + for func in self.functions.values() + ] diff --git a/python/semantic_kernel/functions/kernel_plugin_collection.py b/python/semantic_kernel/functions/kernel_plugin_collection.py index 81204870ae56..a9d5f96706b5 100644 --- a/python/semantic_kernel/functions/kernel_plugin_collection.py +++ b/python/semantic_kernel/functions/kernel_plugin_collection.py @@ -1,10 +1,9 @@ # Copyright (c) Microsoft. All rights reserved. -from typing import Any, Dict, Iterable, List, Optional, TypeVar, Union +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, TypeVar, Union from pydantic import Field -from semantic_kernel.functions.kernel_function import KernelFunction from semantic_kernel.functions.kernel_function_metadata import KernelFunctionMetadata from semantic_kernel.functions.kernel_plugin import KernelPlugin from semantic_kernel.kernel_pydantic import KernelBaseModel @@ -12,6 +11,9 @@ # To support Python 3.8, need to use TypeVar since Iterable is not scriptable KernelPluginType = TypeVar("KernelPluginType", bound=KernelPlugin) +if TYPE_CHECKING: + from semantic_kernel.functions.kernel_function import KernelFunction + class KernelPluginCollection(KernelBaseModel): """ @@ -21,7 +23,7 @@ class KernelPluginCollection(KernelBaseModel): plugins (Dict[str, KernelPlugin]): The plugins in the collection, indexed by their name. """ - plugins: Optional[Dict[str, KernelPlugin]] = Field(default_factory=dict) + plugins: Optional[Dict[str, "KernelPlugin"]] = Field(default_factory=dict) def __init__(self, plugins: Union[None, "KernelPluginCollection", Iterable[KernelPluginType]] = None): """ @@ -50,7 +52,7 @@ def __init__(self, plugins: Union[None, "KernelPluginCollection", Iterable[Kerne super().__init__(plugins=plugins) @staticmethod - def _process_plugins_iterable(plugins_input: Iterable[KernelPlugin]) -> Dict[str, KernelPlugin]: + def _process_plugins_iterable(plugins_input: Iterable[KernelPlugin]) -> Dict[str, "KernelPlugin"]: plugins_dict = {} for plugin in plugins_input: if plugin is None: @@ -60,7 +62,7 @@ def _process_plugins_iterable(plugins_input: Iterable[KernelPlugin]) -> Dict[str plugins_dict[plugin.name] = plugin return plugins_dict - def add(self, plugin: KernelPlugin) -> None: + def add(self, plugin: "KernelPlugin") -> None: """ Add a single plugin to the collection @@ -76,7 +78,7 @@ def add(self, plugin: KernelPlugin) -> None: raise ValueError(f"Plugin with name {plugin.name} already exists") self.plugins[plugin.name] = plugin - def add_plugin_from_functions(self, plugin_name: str, functions: List[KernelFunction]) -> None: + def add_plugin_from_functions(self, plugin_name: str, functions: List["KernelFunction"]) -> None: """ Add a function to a new plugin in the collection @@ -95,7 +97,7 @@ def add_plugin_from_functions(self, plugin_name: str, functions: List[KernelFunc plugin = KernelPlugin.from_functions(plugin_name=plugin_name, functions=functions) self.plugins[plugin_name] = plugin - def add_functions_to_plugin(self, functions: List[KernelFunction], plugin_name: str) -> None: + def add_functions_to_plugin(self, functions: List["KernelFunction"], plugin_name: str) -> None: """ Add functions to a plugin in the collection @@ -120,7 +122,7 @@ def add_functions_to_plugin(self, functions: List[KernelFunction], plugin_name: raise ValueError(f"Function with name '{func.name}' already exists in plugin '{plugin_name}'") plugin.functions[func.name] = func - def add_list_of_plugins(self, plugins: List[KernelPlugin]) -> None: + def add_list_of_plugins(self, plugins: List["KernelPlugin"]) -> None: """ Add a list of plugins to the collection @@ -136,7 +138,7 @@ def add_list_of_plugins(self, plugins: List[KernelPlugin]) -> None: for plugin in plugins: self.add(plugin) - def remove(self, plugin: KernelPlugin) -> bool: + def remove(self, plugin: "KernelPlugin") -> bool: """ Remove a plugin from the collection @@ -185,13 +187,13 @@ def clear(self): self.plugins.clear() def get_list_of_function_metadata( - self, include_semantic: bool = True, include_native: bool = True + self, include_prompt: bool = True, include_native: bool = True ) -> List[KernelFunctionMetadata]: """ Get a list of the function metadata in the plugin collection Args: - include_semantic (bool): Whether to include semantic functions in the list. + include_prompt (bool): Whether to include semantic functions in the list. include_native (bool): Whether to include native functions in the list. Returns: @@ -203,7 +205,7 @@ def get_list_of_function_metadata( func.describe() for plugin in self.plugins.values() for func in plugin.functions.values() - if (include_semantic and func.is_semantic) or (include_native and not func.is_semantic) + if (include_prompt and func.is_prompt) or (include_native and not func.is_prompt) ] def __iter__(self) -> Any: diff --git a/python/semantic_kernel/functions/prompt_rendering_result.py b/python/semantic_kernel/functions/prompt_rendering_result.py new file mode 100644 index 000000000000..1a2ef4115537 --- /dev/null +++ b/python/semantic_kernel/functions/prompt_rendering_result.py @@ -0,0 +1,23 @@ +# Copyright (c) Microsoft. All rights reserved. + +from typing import Any, Optional + +from pydantic import Field + +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.kernel_pydantic import KernelBaseModel + + +class PromptRenderingResult(KernelBaseModel): + """ + Represents the result of rendering a prompt template. + + Attributes: + rendered_prompt (str): The rendered prompt. + ai_service (Any): The AI service that rendered the prompt. + prompt_template (PromptTemplateConfig): The prompt template used to render the prompt. + """ + + rendered_prompt: str + ai_service: Any + execution_settings: Optional[PromptExecutionSettings] = Field(default_factory=PromptExecutionSettings) diff --git a/python/semantic_kernel/kernel.py b/python/semantic_kernel/kernel.py index af1c5413b4a7..2991bcb1ee1b 100644 --- a/python/semantic_kernel/kernel.py +++ b/python/semantic_kernel/kernel.py @@ -10,7 +10,6 @@ from pydantic import Field -from semantic_kernel.connectors.ai.ai_exception import AIException from semantic_kernel.connectors.ai.chat_completion_client_base import ( ChatCompletionClientBase, ) @@ -37,13 +36,12 @@ from semantic_kernel.memory.null_memory import NullMemory from semantic_kernel.memory.semantic_text_memory import SemanticTextMemory from semantic_kernel.memory.semantic_text_memory_base import SemanticTextMemoryBase -from semantic_kernel.prompt_template.prompt_template import PromptTemplate +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.prompt_template.kernel_prompt_template import KernelPromptTemplate +from semantic_kernel.prompt_template.prompt_template_base import PromptTemplateBase from semantic_kernel.prompt_template.prompt_template_config import ( PromptTemplateConfig, ) -from semantic_kernel.prompt_template.semantic_function_config import ( - SemanticFunctionConfig, -) from semantic_kernel.reliability.pass_through_without_retry import ( PassThroughWithoutRetry, ) @@ -153,6 +151,8 @@ async def invoke_stream( """ if not arguments: arguments = KernelArguments(**kwargs) + if KernelFunction.CHAT_HISTORY_TAG not in arguments: + arguments[KernelFunction.CHAT_HISTORY_TAG] = ChatHistory() if isinstance(functions, KernelFunction): stream_function = functions results = [] @@ -195,7 +195,8 @@ async def invoke_stream( try: async for stream_message in stream_function.invoke_stream(self, arguments): if isinstance(stream_message, FunctionResult): - raise stream_message.metadata.get("error", Exception("Error occurred in stream function")) + err_msg = stream_message.metadata.get("error", None) + raise KernelException(f"Error occurred in stream function: {err_msg}") function_result.append(stream_message) yield stream_message except Exception as exc: @@ -269,6 +270,8 @@ async def invoke( """ if not arguments: arguments = KernelArguments(**kwargs) + if KernelFunction.CHAT_HISTORY_TAG not in arguments: + arguments[KernelFunction.CHAT_HISTORY_TAG] = ChatHistory() results = [] pipeline_step = 0 if not isinstance(functions, list): @@ -442,7 +445,7 @@ def import_plugin(self, plugin_instance: Union[Any, Dict[str, Any]], plugin_name candidates = inspect.getmembers(plugin_instance, inspect.ismethod) # Read every method from the plugin instance for _, candidate in candidates: - # If the method is a semantic function, register it + # If the method is a prompt function, register it if not hasattr(candidate, "__kernel_function__"): continue @@ -500,7 +503,17 @@ def import_native_plugin_from_directory(self, parent_directory: str, plugin_dire return {} - def import_semantic_plugin_from_directory(self, parent_directory: str, plugin_directory_name: str) -> KernelPlugin: + def import_plugin_from_prompt_directory( + self, service_id: str, parent_directory: str, plugin_directory_name: str + ) -> KernelPlugin: + """ + Import a plugin from a directory containing prompt templates. + + Args: + service_id (str): The service id + parent_directory (str): The parent directory + plugin_directory_name (str): The plugin directory name + """ CONFIG_FILE = "config.json" PROMPT_FILE = "skprompt.txt" @@ -526,16 +539,37 @@ def import_semantic_plugin_from_directory(self, parent_directory: str, plugin_di config_path = os.path.join(directory, CONFIG_FILE) with open(config_path, "r") as config_file: - config = PromptTemplateConfig.from_json(config_file.read()) + prompt_template_config = PromptTemplateConfig.from_json(config_file.read()) + prompt_template_config.name = function_name + + # TODO: remove this once the PromptTemplateConfig supports a dict of execution_settings + if ( + prompt_template_config.execution_settings + and "default" in prompt_template_config.execution_settings.extension_data + ): + prompt_template_config.execution_settings.extension_data = ( + prompt_template_config.execution_settings.extension_data["default"] + ) + + prompt_template_config.execution_settings.service_id = service_id # Load Prompt Template with open(prompt_path, "r") as prompt_file: - template = PromptTemplate(prompt_file.read(), self.prompt_template_engine, config) - - # Prepare lambda wrapping AI logic - function_config = SemanticFunctionConfig(config, template) - - functions += [self.register_semantic_function(plugin_directory_name, function_name, function_config)] + prompt = prompt_file.read() + prompt_template_config.template = prompt + + kernel_prompt_template = KernelPromptTemplate(prompt_template_config) + + functions += [ + self.create_function_from_prompt( + plugin_name=plugin_directory_name, + prompt_template=kernel_prompt_template, + prompt_template_config=prompt_template_config, + template_format="semantic-kernel", + function_name=function_name, + description=prompt_template_config.description, + ) + ] plugin = KernelPlugin(name=plugin_directory_name, functions=functions) @@ -551,82 +585,70 @@ def func(self, plugin_name: str, function_name: str) -> KernelFunction: raise ValueError(f"Function '{function_name}' not found in plugin '{plugin_name}'") return self.plugins[plugin_name][function_name] - def register_semantic_function( + def create_function_from_prompt( self, - plugin_name: Optional[str], - function_name: str, - function_config: SemanticFunctionConfig, + template: Optional[str] = None, + prompt_template_config: Optional[PromptTemplateConfig] = None, + execution_settings: Optional[PromptExecutionSettings] = None, + function_name: Optional[str] = None, + plugin_name: Optional[str] = None, + description: Optional[str] = None, + template_format: Optional[str] = None, + prompt_template: Optional[PromptTemplateBase] = None, + **kwargs: Any, ) -> KernelFunction: """ - Creates a semantic function from the plugin name, function name and function config + Create a Kernel Function from a prompt. Args: - plugin_name (Optional[str]): The name of the plugin. If empty, a random name will be generated. - function_name (str): The name of the function - function_config (SemanticFunctionConfig): The function config + template (Optional[str]): The prompt template + prompt_template_config (Optional[PromptTemplateConfig]): The prompt template configuration + execution_settings (Optional[PromptExecutionSettings]): The execution settings + function_name (Optional[str]): The name of the function + plugin_name (Optional[str]): The name of the plugin + description (Optional[str]): The description of the function + template_format (Optional[str]): The format of the prompt template + prompt_template (Optional[PromptTemplateBase]): The prompt template + kwargs (Any): Additional arguments Returns: - KernelFunction: The created semantic function - - Raises: - ValueError: If the plugin name or function name are invalid + KernelFunction: The created Kernel Function """ - if plugin_name is None or plugin_name == "": + if not plugin_name: plugin_name = f"p_{generate_random_ascii_name()}" assert plugin_name is not None # for type checker + if not function_name: + function_name = f"f_{generate_random_ascii_name()}" + assert function_name is not None # for type checker + validate_plugin_name(plugin_name) validate_function_name(function_name) - function = self._create_semantic_function(plugin_name, function_name, function_config) - self.add_plugin(plugin_name, [function]) - function.set_default_plugin_collection(self.plugins) - - return function - - def create_semantic_function( - self, - prompt_template: str, - function_name: Optional[str] = None, - plugin_name: Optional[str] = None, - description: Optional[str] = None, - **kwargs: Any, - ) -> "KernelFunction": - function_name = function_name if function_name is not None else f"f_{generate_random_ascii_name()}" - - config = PromptTemplateConfig( - description=(description if description is not None else "Generic function, unknown purpose"), - type="completion", - execution_settings=PromptExecutionSettings(extension_data=kwargs), + if not prompt_template_config.execution_settings: + if execution_settings: + prompt_template_config.execution_settings = execution_settings + else: + prompt_template_config.execution_settings = PromptExecutionSettings(extension_data=kwargs) + + function = KernelFunction.from_prompt( + prompt=template, + function_name=function_name, + plugin_name=plugin_name, + description=description, + template_format=template_format, + prompt_template=prompt_template, + prompt_template_config=prompt_template_config, ) - validate_function_name(function_name) - if plugin_name is not None: - validate_plugin_name(plugin_name) - - template = PromptTemplate(prompt_template, self.prompt_template_engine, config) - function_config = SemanticFunctionConfig(config, template) - - return self.register_semantic_function(plugin_name, function_name, function_config) - - def _create_semantic_function( - self, - plugin_name: str, - function_name: str, - function_config: SemanticFunctionConfig, - ) -> KernelFunction: - if not function_config.prompt_template_config.type == "completion": - raise AIException( - AIException.ErrorCodes.FunctionTypeNotSupported, - f"Function type not supported: {function_config.prompt_template_config.type}", - ) - - function = KernelFunction.from_semantic_config(plugin_name, function_name, function_config) - if exec_settings := function_config.prompt_template_config.execution_settings: + if exec_settings := prompt_template_config.execution_settings: if exec_settings.service_id in function.prompt_execution_settings: logger.warning("Overwriting execution settings for service_id: %s", exec_settings.service_id) function.prompt_execution_settings[exec_settings.service_id] = exec_settings + self.add_plugin(plugin_name, [function]) + function.set_default_plugin_collection(self.plugins) + return function def register_native_function( diff --git a/python/semantic_kernel/models/ai/chat_completion/chat_history.py b/python/semantic_kernel/models/ai/chat_completion/chat_history.py new file mode 100644 index 000000000000..ba37c23762b6 --- /dev/null +++ b/python/semantic_kernel/models/ai/chat_completion/chat_history.py @@ -0,0 +1,235 @@ +# Copyright (c) Microsoft. All rights reserved. + +import json +from typing import Any, Iterator, List, Optional, Union + +from pydantic import Field, ValidationError +from pydantic.json import pydantic_encoder +from pydantic.tools import parse_obj_as + +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.kernel_pydantic import KernelBaseModel +from semantic_kernel.models.ai.chat_completion.chat_role import ChatRole + + +class ChatHistory(KernelBaseModel): + """ + This class holds the history of chat messages from a chat conversation. + + Note: the constructor takes a system_message parameter, which is not part + of the class definition. This is to allow the system_message to be passed in + as a keyword argument, but not be part of the class definition. + + Attributes: + messages (List[ChatMessageContent]): The list of chat messages in the history. + """ + + messages: Optional[List[ChatMessageContent]] = Field(default_factory=list) + + def __init__(self, **data): + """ + Initializes a new instance of the ChatHistory class, optionally incorporating a message and/or + a system message at the beginning of the chat history. + + This constructor allows for flexible initialization with chat messages and an optional messages or a + system message. If both 'messages' (a list of ChatMessageContent instances) and 'system_message' are + provided, the 'system_message' is prepended to the list of messages, ensuring it appears as the first + message in the history. If only 'system_message' is provided without any 'messages', the chat history is + initialized with the 'system_message' as its first item. If 'messages' are provided without a + 'system_message', the chat history is initialized with the provided messages as is. + + Parameters: + - **data: Arbitrary keyword arguments. The constructor looks for two optional keys: + - 'messages': Optional[List[ChatMessageContent]], a list of chat messages to include in the history. + - 'system_message' Optional[str]: An optional string representing a system-generated message to be + included at the start of the chat history. + + Note: The 'system_message' is not retained as part of the class's attributes; it's used during + initialization and then discarded. The rest of the keyword arguments are passed to the superclass + constructor and handled according to the Pydantic model's behavior. + """ + system_message_content = data.pop("system_message", None) + + if system_message_content: + system_message = ChatMessageContent(role=ChatRole.SYSTEM, content=system_message_content) + + if "messages" in data: + data["messages"] = [system_message] + data["messages"] + else: + data["messages"] = [system_message] + + super().__init__(**data) + + def add_system_message(self, content: str) -> None: + """Add a system message to the chat history.""" + self.add_message(message=self._prepare_for_add(ChatRole.SYSTEM, content)) + + def add_user_message(self, content: str) -> None: + """Add a user message to the chat history.""" + self.add_message(message=self._prepare_for_add(ChatRole.USER, content)) + + def add_assistant_message(self, content: str) -> None: + """Add an assistant message to the chat history.""" + self.add_message(message=self._prepare_for_add(ChatRole.ASSISTANT, content)) + + def add_tool_message(self, content: str, metadata: Optional[dict[str, Any]] = None) -> None: + """Add a tool message to the chat history.""" + self.add_message(message=self._prepare_for_add(ChatRole.TOOL, content), metadata=metadata) + + def add_message( + self, + message: Union[ChatMessageContent, dict], + encoding: Optional[str] = None, + metadata: Optional[dict[str, Any]] = None, + ) -> None: + """Add a message to the history. + + This method accepts either a ChatMessageContent instance or a + dictionary with the necessary information to construct a ChatMessageContent instance. + + Args: + message (Union[ChatMessageContent, dict]): The message to add, either as + a pre-constructed ChatMessageContent instance or a dictionary specifying 'role' and 'content'. + encoding (Optional[str]): The encoding of the message. Required if 'message' is a dict. + metadata (Optional[dict[str, Any]]): Any metadata to attach to the message. Required if 'message' is a dict. + """ + if isinstance(message, ChatMessageContent): + chat_message = message + elif isinstance(message, dict): + required_keys = {"role", "content"} + if not required_keys.issubset(message.keys()): + raise ValueError(f"Dictionary must contain the following keys: {required_keys}") + chat_message = ChatMessageContent( + role=message["role"], content=message["content"], encoding=encoding, metadata=metadata + ) + else: + raise TypeError("message must be an instance of ChatMessageContent or a dictionary") + + self.messages.append(chat_message) + + def _prepare_for_add(self, role: ChatRole, content: str) -> dict[str, str]: + """Prepare a message to be added to the history.""" + return {"role": role, "content": content} + + def remove_message(self, message: ChatMessageContent) -> bool: + """Remove a message from the history. + + Args: + message (ChatMessageContent): The message to remove. + + Returns: + bool: True if the message was removed, False if the message was not found. + """ + try: + self.messages.remove(message) + return True + except ValueError: + return False + + def __len__(self) -> int: + """Return the number of messages in the history.""" + return len(self.messages) + + def __getitem__(self, index: int) -> ChatMessageContent: + """Get a message from the history using the [] operator. + + Args: + index (int): The index of the message to get. + + Returns: + ChatMessageContent: The message at the specified index. + """ + return self.messages[index] + + def __contains__(self, item: ChatMessageContent) -> bool: + """Check if a message is in the history. + + Args: + item (ChatMessageContent): The message to check for. + + Returns: + bool: True if the message is in the history, False otherwise. + """ + return item in self.messages + + def __str__(self) -> str: + """Return a string representation of the history.""" + return "\n".join([f"{msg.role}: {msg.content}" for msg in self.messages]) + + def __iter__(self) -> Iterator[ChatMessageContent]: + """Return an iterator over the messages in the history.""" + return iter(self.messages) + + def __eq__(self, other: "ChatHistory") -> bool: + """Check if two ChatHistory instances are equal.""" + if not isinstance(other, ChatHistory): + return False + + return self.messages == other.messages + + def serialize(self) -> str: + """ + Serializes the ChatHistory instance to a JSON string. + + Returns: + str: A JSON string representation of the ChatHistory instance. + + Raises: + ValueError: If the ChatHistory instance cannot be serialized to JSON. + """ + try: + return json.dumps(self.model_dump(), indent=4, default=pydantic_encoder) + except TypeError as e: + raise ValueError(f"Unable to serialize ChatHistory to JSON: {e}") + + @classmethod + def restore_chat_history(cls, chat_history_json: str) -> "ChatHistory": + """ + Restores a ChatHistory instance from a JSON string. + + Args: + chat_history_json (str): The JSON string to deserialize + into a ChatHistory instance. + + Returns: + ChatHistory: The deserialized ChatHistory instance. + + Raises: + ValueError: If the JSON string is invalid or the deserialized data + fails validation. + """ + try: + history_dict = json.loads(chat_history_json) + except json.JSONDecodeError as e: + raise ValueError(f"Invalid JSON format: {e}") + + try: + return parse_obj_as(cls, history_dict) + except ValidationError as e: + raise ValueError(f"Data validation error during deserialization: {e}") + + def store_chat_history_to_file(chat_history: "ChatHistory", file_path: str) -> None: + """ + Stores the serialized ChatHistory to a file. + + Args: + chat_history (ChatHistory): The ChatHistory instance to serialize and store. + file_path (str): The path to the file where the serialized data will be stored. + """ + json_str = chat_history.serialize() + with open(file_path, "w") as file: + file.write(json_str) + + def load_chat_history_from_file(file_path: str) -> "ChatHistory": + """ + Loads the ChatHistory from a file. + + Args: + file_path (str): The path to the file from which to load the ChatHistory. + + Returns: + ChatHistory: The deserialized ChatHistory instance. + """ + with open(file_path, "r") as file: + json_str = file.read() + return ChatHistory.restore_chat_history(json_str) diff --git a/python/semantic_kernel/planners/action_planner/action_planner.py b/python/semantic_kernel/planners/action_planner/action_planner.py index 95472621874a..90825886f7a3 100644 --- a/python/semantic_kernel/planners/action_planner/action_planner.py +++ b/python/semantic_kernel/planners/action_planner/action_planner.py @@ -7,6 +7,11 @@ from textwrap import dedent from typing import Optional +if sys.version_info >= (3, 9): + from typing import Annotated +else: + from typing_extensions import Annotated + if sys.version_info >= (3, 9): from typing import Annotated else: @@ -14,6 +19,7 @@ import regex from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.functions.kernel_arguments import KernelArguments from semantic_kernel.functions.kernel_function import KernelFunction from semantic_kernel.functions.kernel_function_decorator import kernel_function @@ -66,11 +72,14 @@ def __init__( self._prompt_template = prompt if prompt else open(__prompt_file, "r").read() - self._planner_function = kernel.create_semantic_function( + execute_settings = PromptExecutionSettings( + extension_data={"max_tokens": self.config.max_tokens, "stop_sequences": self._stop_sequence} + ) + + self._planner_function = kernel.create_function_from_prompt( plugin_name=self.RESTRICTED_PLUGIN_NAME, - prompt_template=self._prompt_template, - max_tokens=self.config.max_tokens, - stop_sequences=[self._stop_sequence], + template=self._prompt_template, + execution_settings={"default": execute_settings}, ) kernel.import_plugin(self, self.RESTRICTED_PLUGIN_NAME) diff --git a/python/semantic_kernel/planners/basic_planner.py b/python/semantic_kernel/planners/basic_planner.py index 012bf9dcd3c4..7e0fe15e6acb 100644 --- a/python/semantic_kernel/planners/basic_planner.py +++ b/python/semantic_kernel/planners/basic_planner.py @@ -5,8 +5,10 @@ import regex +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.functions.kernel_arguments import KernelArguments from semantic_kernel.kernel import Kernel +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig class Plan: @@ -128,6 +130,9 @@ class BasicPlanner: Basic JSON-based planner for the Semantic Kernel. """ + def __init__(self, service_id: str) -> None: + self.service_id = service_id + def _create_available_functions_string(self, kernel: Kernel) -> str: """ Given an instance of the Kernel, create the [AVAILABLE FUNCTIONS] @@ -172,9 +177,23 @@ async def create_plan( Creates a plan for the given goal based off the functions that are available in the kernel. """ + exec_settings = PromptExecutionSettings( + service_id=self.service_id, + max_tokens=1000, + temperature=0.8, + ) - # Create the semantic function for the planner with the given prompt - planner = kernel.create_semantic_function(prompt, max_tokens=1000, temperature=0.8) + prompt_template_config = PromptTemplateConfig( + template=prompt, + execution_settings=exec_settings, + ) + + # Create the prompt function for the planner with the given prompt + planner = kernel.create_function_from_prompt( + plugin_name="PlannerPlugin", + function_name="CreatePlan", + prompt_template_config=prompt_template_config, + ) available_functions_string = self._create_available_functions_string(kernel) @@ -191,8 +210,14 @@ async def execute_plan(self, plan: Plan, kernel: Kernel) -> str: # Filter out good JSON from the result in case additional text is present json_regex = r"\{(?:[^{}]|(?R))*\}" - generated_plan_string = regex.search(json_regex, plan.generated_plan.result).group() - generated_plan = json.loads(generated_plan_string) + generated_plan_string = regex.search(json_regex, str(plan.generated_plan.value)).group() + + # TODO: there is some silly escape chars affecting the result of plan.generated_plan.value + # There should be \n only but they are showing up as \\n + encoded_bytes = generated_plan_string.encode("utf-8") + decoded_string = encoded_bytes.decode("unicode_escape") + + generated_plan = json.loads(decoded_string) arguments = KernelArguments(input=generated_plan["input"]) subtasks = generated_plan["subtasks"] diff --git a/python/semantic_kernel/planners/plan.py b/python/semantic_kernel/planners/plan.py index bf118bc03e5b..43554cf26c48 100644 --- a/python/semantic_kernel/planners/plan.py +++ b/python/semantic_kernel/planners/plan.py @@ -33,7 +33,7 @@ class Plan: _name: str = PrivateAttr() _plugin_name: str = PrivateAttr() _description: str = PrivateAttr() - _is_semantic: bool = PrivateAttr() + _is_prompt: bool = PrivateAttr() _prompt_execution_settings: PromptExecutionSettings = PrivateAttr() DEFAULT_RESULT_KEY: ClassVar[str] = "PLAN.RESULT" @@ -66,15 +66,15 @@ def parameters(self) -> KernelArguments: return self._parameters @property - def is_semantic(self) -> bool: - return self._is_semantic + def is_prompt(self) -> bool: + return self._is_prompt @property def is_native(self) -> bool: - if self._is_semantic is None: + if self._is_prompt is None: return None else: - return not self._is_semantic + return not self._is_prompt @property def prompt_execution_settings(self) -> PromptExecutionSettings: @@ -110,7 +110,7 @@ def __init__( self._outputs = [] if outputs is None else outputs self._steps = [] if steps is None else steps self._has_next_step = len(self._steps) > 0 - self._is_semantic = None + self._is_prompt = None self._function = function or None self._prompt_execution_settings = None @@ -203,7 +203,7 @@ def describe(self) -> KernelFunctionMetadata: plugin_name=self._plugin_name, parameters=[], description=self._description, - is_semantic=self._is_semantic or False, + is_prompt=self._is_prompt or False, ) def set_available_functions(self, plan: "Plan", kernel: "Kernel", arguments: "KernelArguments") -> "Plan": @@ -247,7 +247,7 @@ def set_function(self, function: KernelFunction) -> None: self._name = function.name self._plugin_name = function.plugin_name self._description = function.description - self._is_semantic = function.is_semantic + self._is_prompt = function.is_prompt self._prompt_execution_settings = function.prompt_execution_settings async def run_next_step( diff --git a/python/semantic_kernel/planners/sequential_planner/Plugins/SequentialPlanning/config.json b/python/semantic_kernel/planners/sequential_planner/Plugins/SequentialPlanning/config.json index a6abef524f3d..71d9357b6085 100644 --- a/python/semantic_kernel/planners/sequential_planner/Plugins/SequentialPlanning/config.json +++ b/python/semantic_kernel/planners/sequential_planner/Plugins/SequentialPlanning/config.json @@ -2,13 +2,15 @@ "schema": 1, "description": "Given a request or command or goal generate a step by step plan to fulfill the request using functions. This ability is also known as decision making and function flow", "type": "completion", - "completion": { + "execution_settings": { + "default": { "max_tokens": 1024, "temperature": 0, "top_p": 0, "presence_penalty": 0, "frequency_penalty": 0, "stop_sequences": [""] + } }, "input_variables": [ { diff --git a/python/semantic_kernel/planners/sequential_planner/sequential_planner.py b/python/semantic_kernel/planners/sequential_planner/sequential_planner.py index ec878c969066..f8bca5f8b65c 100644 --- a/python/semantic_kernel/planners/sequential_planner/sequential_planner.py +++ b/python/semantic_kernel/planners/sequential_planner/sequential_planner.py @@ -17,13 +17,9 @@ from semantic_kernel.planners.sequential_planner.sequential_planner_parser import ( SequentialPlanParser, ) -from semantic_kernel.prompt_template.prompt_template import PromptTemplate from semantic_kernel.prompt_template.prompt_template_config import ( PromptTemplateConfig, ) -from semantic_kernel.prompt_template.semantic_function_config import ( - SemanticFunctionConfig, -) SEQUENTIAL_PLANNER_DEFAULT_DESCRIPTION = ( "Given a request or command or goal generate a step by step plan to " @@ -48,7 +44,22 @@ class SequentialPlanner: _arguments: "KernelArguments" _function_flow_function: "KernelFunction" - def __init__(self, kernel: Kernel, config: SequentialPlannerConfig = None, prompt: str = None): + def __init__( + self, + kernel: Kernel, + service_id: str, + config: SequentialPlannerConfig = None, + prompt: str = None, + ) -> None: + """ + Initializes a new instance of the SequentialPlanner class. + + Args: + kernel (Kernel): The kernel instance to use for planning + service_id (str): The service id to use to get the AI service + config (SequentialPlannerConfig, optional): The configuration to use for planning. Defaults to None. + prompt (str, optional): The prompt to use for planning. Defaults to None. + """ assert isinstance(kernel, Kernel) self.config = config or SequentialPlannerConfig() @@ -56,24 +67,25 @@ def __init__(self, kernel: Kernel, config: SequentialPlannerConfig = None, promp self._kernel = kernel self._arguments = KernelArguments() - self._function_flow_function = self._init_flow_function(prompt) + self._function_flow_function = self._init_flow_function(prompt, service_id) - def _init_flow_function(self, prompt: str): + def _init_flow_function(self, prompt: str, service_id: str) -> "KernelFunction": prompt_config = PromptTemplateConfig.from_json(read_file(PROMPT_CONFIG_FILE_PATH)) prompt_template = prompt or read_file(PROMPT_TEMPLATE_FILE_PATH) - prompt_config.execution_settings.extension_data["max_tokens"] = self.config.max_tokens - prompt_template = PromptTemplate( - template=prompt_template, - template_engine=self._kernel.prompt_template_engine, - prompt_config=prompt_config, - ) - function_config = SemanticFunctionConfig(prompt_config, prompt_template) + # TODO: fix when extension settings in PromptTemplateConfig are a dictonary + # While the extension settings are not, grab the value for the 'default' key + if "default" in prompt_config.execution_settings.extension_data: + prompt_config.execution_settings = prompt_config.execution_settings.extension_data["default"] + + prompt_config.execution_settings.service_id = service_id + prompt_config.execution_settings.extension_data["max_tokens"] = self.config.max_tokens + prompt_config.template = prompt_template - return self._kernel.register_semantic_function( + return self._kernel.create_function_from_prompt( plugin_name=self.RESTRICTED_PLUGIN_NAME, function_name=self.RESTRICTED_PLUGIN_NAME, - function_config=function_config, + prompt_template_config=prompt_config, ) async def create_plan(self, goal: str) -> Plan: diff --git a/python/semantic_kernel/planners/stepwise_planner/Plugins/StepwiseStep/config.json b/python/semantic_kernel/planners/stepwise_planner/Plugins/StepwiseStep/config.json index 0706266c27fb..6c3110fcc87f 100644 --- a/python/semantic_kernel/planners/stepwise_planner/Plugins/StepwiseStep/config.json +++ b/python/semantic_kernel/planners/stepwise_planner/Plugins/StepwiseStep/config.json @@ -1,14 +1,15 @@ { "schema": 1, "description": "Given a request or command or goal generate multi-step plan to reach the goal. After each step LLM is called to perform the reasoning for the next step.", - "type": "completion", - "completion": { - "max_tokens": 1024, - "temperature": 0, - "top_p": 0, - "presence_penalty": 0, - "frequency_penalty": 0, - "stop_sequences": ["[OBSERVATION]", "\n[THOUGHT]"] + "execution_settings": { + "default": { + "max_tokens": 1024, + "temperature": 0, + "top_p": 0, + "presence_penalty": 0, + "frequency_penalty": 0, + "stop_sequences": ["[OBSERVATION]", "\n[THOUGHT]"] + } }, "input_variables": [ { diff --git a/python/semantic_kernel/planners/stepwise_planner/stepwise_planner.py b/python/semantic_kernel/planners/stepwise_planner/stepwise_planner.py index eda5ebcfb6e3..04c94a2b0e6a 100644 --- a/python/semantic_kernel/planners/stepwise_planner/stepwise_planner.py +++ b/python/semantic_kernel/planners/stepwise_planner/stepwise_planner.py @@ -25,13 +25,9 @@ StepwisePlannerConfig, ) from semantic_kernel.planners.stepwise_planner.system_step import SystemStep -from semantic_kernel.prompt_template.prompt_template import PromptTemplate from semantic_kernel.prompt_template.prompt_template_config import ( PromptTemplateConfig, ) -from semantic_kernel.prompt_template.semantic_function_config import ( - SemanticFunctionConfig, -) if TYPE_CHECKING: from semantic_kernel.functions.kernel_function import KernelFunction @@ -91,10 +87,9 @@ def __init__( prompt_config = PromptTemplateConfig.from_json(read_file(PROMPT_CONFIG_FILE_PATH)) prompt_config.execution_settings.extension_data["max_tokens"] = self.config.max_tokens + prompt_config.template = prompt_template - self._system_step_function = self.import_semantic_function( - kernel, "StepwiseStep", prompt_template, prompt_config - ) + self._system_step_function = self.import_function_from_prompt(kernel, "StepwiseStep", prompt_config) self._native_functions = self._kernel.import_plugin(self, RESTRICTED_PLUGIN_NAME) self._context = KernelArguments() @@ -107,7 +102,7 @@ def describe(self) -> KernelFunctionMetadata: parameters=[ KernelParameterMetadata(name="goal", description="The goal to achieve", default_value="", required=True) ], - is_semantic=True, + is_prompt=True, is_asynchronous=True, ) @@ -386,17 +381,15 @@ def get_function_descriptions(self) -> str: function_descriptions = "\n".join([self.to_manual_string(f) for f in available_functions]) return function_descriptions - def import_semantic_function( + def import_function_from_prompt( self, kernel: Kernel, function_name: str, - prompt_template: str, - config: "PromptTemplateConfig" = None, + config: PromptTemplateConfig = None, ) -> "KernelFunction": - template = PromptTemplate(prompt_template, kernel.prompt_template_engine, config) - function_config = SemanticFunctionConfig(config, template) - - return kernel.register_semantic_function(RESTRICTED_PLUGIN_NAME, function_name, function_config) + return kernel.create_function_from_prompt( + plugin_name=RESTRICTED_PLUGIN_NAME, function_name=function_name, prompt_template_config=config + ) def to_manual_string(self, function: KernelFunctionMetadata) -> str: inputs = [ diff --git a/python/semantic_kernel/prompt_template/chat_prompt_template.py b/python/semantic_kernel/prompt_template/chat_prompt_template.py index 1d8aec30fef0..73f8297f244c 100644 --- a/python/semantic_kernel/prompt_template/chat_prompt_template.py +++ b/python/semantic_kernel/prompt_template/chat_prompt_template.py @@ -2,9 +2,7 @@ import asyncio import logging -from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar - -from pydantic import Field +from typing import TYPE_CHECKING, Any, Dict, Generic, List, TypeVar from semantic_kernel.models.ai.chat_completion.chat_message import ChatMessage from semantic_kernel.prompt_template.prompt_template import PromptTemplate @@ -25,7 +23,7 @@ class ChatPromptTemplate(PromptTemplate, Generic[ChatMessageT]): - messages: List[ChatMessageT] = Field(default_factory=list) + # TODO Note this class will be removed after the auto function calling work is complete def __init__( self, @@ -34,7 +32,6 @@ def __init__( prompt_config: PromptTemplateConfig, parse_chat_system_prompt: bool = False, parse_messages: bool = False, - **kwargs: Any, ) -> None: """Initialize a chat prompt template. @@ -56,68 +53,29 @@ def __init__( """ super().__init__(template, template_engine, prompt_config) - if "log" in kwargs: - logger.warning("The `log` parameter is deprecated. Please use the `logging` module instead.") - - if parse_chat_system_prompt and "chat_system_prompt" in self.prompt_config.execution_settings.extension_data: - self.add_system_message(self.prompt_config.execution_settings.extension_data["chat_system_prompt"]) - - if ( - parse_messages - and hasattr(self.prompt_config.execution_settings, "messages") - and self.prompt_config.execution_settings.messages - ): - for message in self.prompt_config.execution_settings.messages: - self.add_message(**message) async def render(self, kernel: "Kernel", arguments: "KernelArguments") -> str: raise NotImplementedError("Can't call render on a ChatPromptTemplate.\n" "Use render_messages instead.") - def add_system_message(self, message: str) -> None: - """Add a system message to the chat template.""" - self.add_message("system", message) - - def add_user_message(self, message: str) -> None: - """Add a user message to the chat template.""" - self.add_message("user", message) - - def add_assistant_message(self, message: str) -> None: - """Add an assistant message to the chat template.""" - self.add_message("assistant", message) - - def add_message(self, role: str, message: Optional[str] = None, **kwargs: Any) -> None: - """Add a message to the chat template. - - Arguments: - role: The role of the message, one of "user", "assistant", "system". - message: The message to add, can include templating components. - kwargs: can be used by inherited classes. - """ - concrete_message = self.model_fields["messages"].annotation.__args__[0] - # When the type is not explicitly set, it is still the typevar, replace with generic ChatMessage - if isinstance(concrete_message, TypeVar): - concrete_message = ChatMessage - assert issubclass(concrete_message, ChatMessage) - if not message and "content" in kwargs: - message = kwargs["content"] - self.messages.append( - concrete_message( - role=role, - content_template=PromptTemplate(message, self.template_engine, self.prompt_config) if message else None, - **kwargs, - ) - ) - async def render_messages(self, kernel: "Kernel", arguments: "KernelArguments") -> List[Dict[str, str]]: - """Render the content of the message in the chat template, based on the context.""" - if len(self.messages) == 0 or self.messages[-1].role in ["assistant", "system", "function"]: + messages = arguments.get("history", None) + """Render the content of the message in the chat template, based on the arguments.""" + if ( + messages is None + or len(messages) == 0 + or messages[-1].role + in [ + "assistant", + "system", + ] + ): self.add_user_message(message=self.template) - await asyncio.gather(*[message.render_message(kernel, arguments) for message in self.messages]) + await asyncio.gather(*[message.render_message(kernel, arguments) for message in messages]) # Don't resend the assistant + tool_calls message as it will error return [ message.as_dict() - for message in self.messages - if not (message.role == "assistant" and getattr(message, "tool_calls", None)) + for message in messages + if not (message.role == "assistant" and hasattr(message, "tool_calls")) ] def dump_messages(self) -> List[Dict[str, str]]: diff --git a/python/semantic_kernel/prompt_template/input_variable.py b/python/semantic_kernel/prompt_template/input_variable.py new file mode 100644 index 000000000000..550bbbebd6b0 --- /dev/null +++ b/python/semantic_kernel/prompt_template/input_variable.py @@ -0,0 +1,15 @@ +# Copyright (c) Microsoft. All rights reserved. + +from typing import Any, Optional + +from pydantic import Field + +from semantic_kernel.kernel_pydantic import KernelBaseModel + + +class InputVariable(KernelBaseModel): + name: str = Field(..., alias="name") + description: Optional[str] = Field("", alias="description") + default: Optional[Any] = Field("", alias="default") + is_required: Optional[bool] = Field(True, alias="is_required") + json_schema: Optional[str] = Field("", alias="json_schema") diff --git a/python/semantic_kernel/prompt_template/kernel_prompt_template.py b/python/semantic_kernel/prompt_template/kernel_prompt_template.py new file mode 100644 index 000000000000..801bd4a5c7b3 --- /dev/null +++ b/python/semantic_kernel/prompt_template/kernel_prompt_template.py @@ -0,0 +1,163 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from typing import TYPE_CHECKING, List, Optional + +from pydantic import PrivateAttr + +from semantic_kernel.prompt_template.input_variable import InputVariable +from semantic_kernel.prompt_template.prompt_template_base import PromptTemplateBase +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig +from semantic_kernel.template_engine.blocks.block import Block +from semantic_kernel.template_engine.blocks.block_types import BlockTypes +from semantic_kernel.template_engine.protocols.text_renderer import TextRenderer +from semantic_kernel.template_engine.template_tokenizer import TemplateTokenizer + +if TYPE_CHECKING: + from semantic_kernel.functions.kernel_arguments import KernelArguments + from semantic_kernel.kernel import Kernel + +logger: logging.Logger = logging.getLogger(__name__) + + +class KernelPromptTemplate(PromptTemplateBase): + _tokenizer: TemplateTokenizer = PrivateAttr() + _blocks: List[Block] = PrivateAttr() + + def __init__(self, prompt_config: PromptTemplateConfig): + super().__init__() + self._tokenizer = TemplateTokenizer() + self._blocks = self.extract_blocks(prompt_config) + self._add_missing_input_variables(self._blocks, prompt_config) + + def _add_missing_input_variables(self, blocks, config): + # Add all of the existing input variables to our known set. We'll avoid adding any + # dynamically discovered input variables with the same name. + seen = {iv.name.lower() for iv in config.input_variables} + + def add_if_missing(variable_name): + # Convert variable_name to lower case to handle case-insensitivity + if variable_name and variable_name.lower() not in seen: + seen.add(variable_name.lower()) + config.input_variables.append(InputVariable(name=variable_name)) + + # Enumerate every block in the template, adding any variables that are referenced. + for block in blocks: + if block.type == BlockTypes.VARIABLE: + # Add all variables from variable blocks, e.g. "{{$a}}". + add_if_missing(block.name) + elif block.type == BlockTypes.CODE: + for code_block in block.tokens: + if code_block.type == BlockTypes.VARIABLE: + # Add all variables from code blocks, e.g. "{{p.bar $b}}". + add_if_missing(code_block.name) + elif code_block.type == BlockTypes.NAMED_ARG and getattr(code_block, "var_block", None): + # Add all variables from named arguments, e.g. "{{p.bar b = $b}}". + # represents a named argument for a function call. + # For example, in the template {{ MyPlugin.MyFunction var1="foo" }}, var1="foo" + # is a named arg block. + add_if_missing(code_block.var_block.name) + + def extract_blocks(self, config: PromptTemplateConfig) -> List[Block]: + """ + Given a prompt template string, extract all the blocks + (text, variables, function calls). + + Args: + template_text: Prompt template + + Returns: + A list of all the blocks, ie the template tokenized in + text, variables and function calls + """ + template_text = config.template + logger.debug(f"Extracting blocks from template: {template_text}") + if not template_text: + return [] + logger.debug(f"Extracting blocks from template: {template_text}") + return TemplateTokenizer.tokenize(template_text) + + async def render(self, kernel: "Kernel", arguments: Optional["KernelArguments"] = None) -> str: + """ + Using the prompt template, replace the variables with their values + and execute the functions replacing their reference with the + function result. + + Args: + kernel: The kernel instance + arguments: The kernel arguments + + Returns: + The prompt template ready to be used for an AI request + """ + return await self.render_blocks(self._blocks, kernel, arguments) + + async def render_blocks(self, blocks: List[Block], kernel: "Kernel", arguments: "KernelArguments") -> str: + """ + Given a list of blocks render each block and compose the final result. + + :param blocks: Template blocks generated by ExtractBlocks + :param context: Access into the current kernel execution context + :return: The prompt template ready to be used for an AI request + """ + from semantic_kernel.template_engine.protocols.code_renderer import CodeRenderer + + logger.debug(f"Rendering list of {len(blocks)} blocks") + rendered_blocks: List[str] = [] + for block in blocks: + if isinstance(block, TextRenderer): + rendered_blocks.append(block.render(kernel, arguments)) + continue + if isinstance(block, CodeRenderer): + rendered_blocks.append(await block.render_code(kernel, arguments)) + prompt = "".join(rendered_blocks) + logger.debug(f"Rendered prompt: {prompt}") + return prompt + + def render_variables( + self, blocks: List[Block], kernel: "Kernel", arguments: Optional["KernelArguments"] = None + ) -> List[Block]: + """ + Given a list of blocks, render the Variable Blocks, replacing + placeholders with the actual value in memory. + + :param blocks: List of blocks, typically all the blocks found in a template + :param variables: Container of all the temporary variables known to the kernel + :return: An updated list of blocks where Variable Blocks have rendered to + Text Blocks + """ + from semantic_kernel.template_engine.blocks.text_block import TextBlock + + logger.debug("Rendering variables") + + rendered_blocks: List[Block] = [] + for block in blocks: + if block.type == BlockTypes.VARIABLE: + rendered_blocks.append(TextBlock.from_text(block.render(kernel, arguments))) + continue + rendered_blocks.append(block) + + return rendered_blocks + + async def render_code(self, blocks: List[Block], kernel: "Kernel", arguments: "KernelArguments") -> List[Block]: + """ + Given a list of blocks, render the Code Blocks, executing the + functions and replacing placeholders with the functions result. + + :param blocks: List of blocks, typically all the blocks found in a template + :param execution_context: Access into the current kernel execution context + :return: An updated list of blocks where Code Blocks have rendered to + Text Blocks + """ + from semantic_kernel.template_engine.blocks.text_block import TextBlock + + logger.debug("Rendering code") + + rendered_blocks: List[Block] = [] + for block in blocks: + if block.type == BlockTypes.CODE: + rendered_blocks.append(TextBlock.from_text(await block.render_code(kernel, arguments))) + continue + rendered_blocks.append(block) + + return rendered_blocks diff --git a/python/semantic_kernel/prompt_template/prompt_template_base.py b/python/semantic_kernel/prompt_template/prompt_template_base.py index 560f8c5cbb48..e04ea7e5089d 100644 --- a/python/semantic_kernel/prompt_template/prompt_template_base.py +++ b/python/semantic_kernel/prompt_template/prompt_template_base.py @@ -1,21 +1,16 @@ # Copyright (c) Microsoft. All rights reserved. from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, List +from typing import TYPE_CHECKING from semantic_kernel.kernel_pydantic import KernelBaseModel if TYPE_CHECKING: from semantic_kernel.functions.kernel_arguments import KernelArguments - from semantic_kernel.functions.kernel_parameter_metadata import KernelParameterMetadata from semantic_kernel.kernel import Kernel class PromptTemplateBase(KernelBaseModel, ABC): - @abstractmethod - def get_parameters(self) -> List["KernelParameterMetadata"]: - pass - @abstractmethod async def render(self, kernel: "Kernel", arguments: "KernelArguments") -> str: pass diff --git a/python/semantic_kernel/prompt_template/prompt_template_config.py b/python/semantic_kernel/prompt_template/prompt_template_config.py index 52283c5cc310..2ae5f7c50c94 100644 --- a/python/semantic_kernel/prompt_template/prompt_template_config.py +++ b/python/semantic_kernel/prompt_template/prompt_template_config.py @@ -1,83 +1,106 @@ # Copyright (c) Microsoft. All rights reserved. import json -from typing import Generic, List, TypeVar +import logging +from typing import Generic, List, Optional, TypeVar from pydantic import Field from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.functions.kernel_parameter_metadata import KernelParameterMetadata from semantic_kernel.kernel_pydantic import KernelBaseModel +from semantic_kernel.prompt_template.input_variable import InputVariable PromptExecutionSettingsT = TypeVar("PromptExecutionSettingsT", bound=PromptExecutionSettings) +logger: logging.Logger = logging.getLogger(__name__) -class PromptTemplateConfig(KernelBaseModel, Generic[PromptExecutionSettingsT]): - schema_: int = Field(default=1, alias="schema") - type: str = "completion" - description: str = "" - execution_settings: PromptExecutionSettingsT = Field( - default_factory=PromptExecutionSettings - ) # todo: this should be a dict - default_services: List[str] = Field(default_factory=list) - parameters: List[KernelParameterMetadata] = Field(default_factory=list) - @classmethod - def from_dict(cls, data: dict) -> "PromptTemplateConfig": - config = { - key: value for key, value in data.items() if key in ["schema", "type", "description", "default_services"] - } - config["parameters"] = [] - - config = cls._process_execution_settings(config, data) - - if "input_variables" in data: - for parameter in data["input_variables"]: - name = parameter.get("name", "") - description = parameter.get("description", "") - defaultValue = parameter.get("default", "") - type_ = parameter.get("type") - required = parameter.get("required", False) - - config["parameters"].append( - KernelParameterMetadata( - name=name, - description=description, - default_value=defaultValue, - type_=type_, - required=required, - ) - ) - - return cls(**config) +class PromptTemplateConfig(KernelBaseModel, Generic[PromptExecutionSettingsT]): + name: Optional[str] = Field(default="", alias="name") + description: Optional[str] = Field(default="", alias="description") + template: Optional[str] = Field(None, alias="template") + template_format: Optional[str] = Field(default="semantic-kernel", alias="template_format") + input_variables: Optional[List[InputVariable]] = Field(default_factory=list, alias="input_variables") + execution_settings: Optional[PromptExecutionSettings] = Field( + default_factory=PromptExecutionSettings, alias="execution_settings" + ) # TODO Make this a dict + + def __init__(self, **kwargs) -> None: + """Create a new PromptTemplateConfig instance. + + Args: + **kwargs: The data to initialize the instance with. + """ + super().__init__(**kwargs) + + def add_execution_settings(self, settings: PromptExecutionSettings, overwrite: bool = True) -> None: + """Add execution settings to the prompt template.""" + if overwrite: + self.execution_settings = settings + return + logger.warning("Execution settings already exist and overwrite is set to False") + + def get_kernel_parameter_metadata(self) -> List[KernelParameterMetadata]: + """Get the kernel parameter metadata for the input variables.""" + return [ + KernelParameterMetadata( + name=variable.name, + description=variable.description, + default_value=variable.default, + type_=variable.json_schema, # TODO: update to handle complex JSON schemas + required=variable.is_required, + expose=True, + ) + for variable in self.input_variables + ] @classmethod def from_json(cls, json_str: str) -> "PromptTemplateConfig": - return cls.from_dict(json.loads(json_str)) + """Create a PromptTemplateConfig instance from a JSON string.""" + if not json_str: + raise ValueError("json_str is empty") + + try: + parsed_json = json.loads(json_str) + config = PromptTemplateConfig(**parsed_json) + except Exception as e: + raise ValueError( + "Unable to deserialize PromptTemplateConfig from the " + f"specified JSON string: {json_str} with exception: {e}" + ) + + # Verify that input variable default values are string only + for variable in config.input_variables: + if variable.default and not isinstance(variable.default, str): + raise ValueError(f"Default value for input variable {variable.name} must be a string for {config.name}") - @classmethod - def from_execution_settings(cls, **kwargs) -> "PromptTemplateConfig": - concrete_class = cls.model_fields["execution_settings"].annotation - if isinstance(concrete_class, TypeVar): - concrete_class = PromptExecutionSettings - return PromptTemplateConfig(execution_settings=concrete_class(extension_data=kwargs)) + return config @classmethod - def _process_execution_settings(cls, config: dict, data: dict) -> dict: - exec_settings = data.get("execution_settings", {}) - - for service_id, settings in exec_settings.items(): - # Copy settings to avoid modifying the original data - settings = settings.copy() - - # Extract and remove 'service_id' if it exists - # service_id = settings.pop("service_id", service_id) - - # Determine the concrete type - concrete_type = cls.model_fields["execution_settings"].annotation - if isinstance(concrete_type, TypeVar): - concrete_type = PromptExecutionSettings - - # Initialize the concrete type with the service_id and remaining settings - config["execution_settings"] = concrete_type(service_id=service_id, extension_data=settings) - - return config + def restore( + cls, + name: str, + description: str, + template: str, + input_variables: List[InputVariable], + execution_settings: PromptExecutionSettings, + ) -> "PromptTemplateConfig": + """Restore a PromptTemplateConfig instance from the specified parameters. + + Args: + name: The name of the prompt template. + description: The description of the prompt template. + template: The template for the prompt. + input_variables: The input variables for the prompt. + execution_settings: The execution settings for the prompt. + + Returns: + A new PromptTemplateConfig instance. + """ + return cls( + name=name, + description=description, + template=template, + input_variables=input_variables, + execution_settings=execution_settings, + ) diff --git a/python/semantic_kernel/services/__init__.py b/python/semantic_kernel/services/__init__.py new file mode 100644 index 000000000000..2a50eae89411 --- /dev/null +++ b/python/semantic_kernel/services/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Microsoft. All rights reserved. diff --git a/python/semantic_kernel/services/ai_service_selector.py b/python/semantic_kernel/services/ai_service_selector.py index 443ae60f1f9a..8ca751435980 100644 --- a/python/semantic_kernel/services/ai_service_selector.py +++ b/python/semantic_kernel/services/ai_service_selector.py @@ -5,11 +5,11 @@ from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.connectors.ai.text_completion_client_base import TextCompletionClientBase from semantic_kernel.functions.kernel_arguments import KernelArguments -from semantic_kernel.functions.kernel_function import KernelFunction ALL_COMPLETION_SERVICE_TYPES = Union[TextCompletionClientBase, ChatCompletionClientBase] if TYPE_CHECKING: + from semantic_kernel.functions.kernel_function import KernelFunction from semantic_kernel.kernel import Kernel @@ -21,7 +21,7 @@ class AIServiceSelector: """ def select_ai_service( - self, kernel: "Kernel", function: KernelFunction, arguments: KernelArguments + self, kernel: "Kernel", function: "KernelFunction", arguments: KernelArguments ) -> Tuple[ALL_COMPLETION_SERVICE_TYPES, PromptExecutionSettings]: """Select a AI Service on a first come, first served basis, starting with execution settings in the arguments, diff --git a/python/semantic_kernel/template_engine/blocks/code_block.py b/python/semantic_kernel/template_engine/blocks/code_block.py index a38bb5363e3c..786eb2c442c0 100644 --- a/python/semantic_kernel/template_engine/blocks/code_block.py +++ b/python/semantic_kernel/template_engine/blocks/code_block.py @@ -6,7 +6,6 @@ from pydantic import Field, field_validator, model_validator -from semantic_kernel.functions.kernel_function import KernelFunction from semantic_kernel.functions.kernel_function_metadata import KernelFunctionMetadata from semantic_kernel.functions.kernel_plugin_collection import KernelPluginCollection from semantic_kernel.template_engine.blocks.block import Block @@ -17,6 +16,7 @@ if TYPE_CHECKING: from semantic_kernel.functions.kernel_arguments import KernelArguments + from semantic_kernel.functions.kernel_function import KernelFunction from semantic_kernel.kernel import Kernel logger: logging.Logger = logging.getLogger(__name__) @@ -157,7 +157,7 @@ def _enrich_function_arguments( def _get_function_from_plugin_collection( self, plugins: KernelPluginCollection, function_block: FunctionIdBlock - ) -> Optional[KernelFunction]: + ) -> Optional["KernelFunction"]: """ Get the function from the plugin collection diff --git a/python/semantic_kernel/template_engine/blocks/var_block.py b/python/semantic_kernel/template_engine/blocks/var_block.py index 93a4490297a0..351d79ccdbea 100644 --- a/python/semantic_kernel/template_engine/blocks/var_block.py +++ b/python/semantic_kernel/template_engine/blocks/var_block.py @@ -65,10 +65,13 @@ def parse_content(cls, fields: Any) -> Any: fields["name"] = name return fields - def render(self, _: "Kernel", arguments: Optional["KernelArguments"] = None) -> Any: + def render(self, _: "Kernel", arguments: Optional["KernelArguments"] = None) -> str: + """Render the variable block with the given arguments. + If the variable is not found in the arguments, return an empty string.""" if arguments is None: return "" value = arguments.get(self.name, None) if not value: logger.warning(f"Variable `{Symbols.VAR_PREFIX}{self.name}` not found in the KernelArguments") - return value or "" + + return str(value) if value else "" diff --git a/python/semantic_kernel/template_engine/protocols/prompt_templating_engine.py b/python/semantic_kernel/template_engine/protocols/prompt_templating_engine.py index 5f85ef7a0d3f..113668ffca1b 100644 --- a/python/semantic_kernel/template_engine/protocols/prompt_templating_engine.py +++ b/python/semantic_kernel/template_engine/protocols/prompt_templating_engine.py @@ -1,7 +1,9 @@ # Copyright (c) Microsoft. All rights reserved. -from typing import TYPE_CHECKING, List, Optional, Protocol, runtime_checkable +from abc import ABC +from typing import TYPE_CHECKING, List, Optional +from semantic_kernel.kernel_pydantic import KernelBaseModel from semantic_kernel.template_engine.blocks.block import Block if TYPE_CHECKING: @@ -9,10 +11,9 @@ from semantic_kernel.functions.kernel_arguments import KernelArguments -@runtime_checkable -class PromptTemplatingEngine(Protocol): +class PromptTemplatingEngine(KernelBaseModel, ABC): """ - Prompt templating engine protocol. + Prompt templating engine base class. """ def extract_blocks(self, template_text: Optional[str] = None, validate: bool = True) -> List[Block]: @@ -26,6 +27,7 @@ def extract_blocks(self, template_text: Optional[str] = None, validate: bool = T :return: A list of all the blocks, ie the template tokenized in text, variables and function calls """ + pass async def render(self, template_text: str, kernel: "Kernel", arguments: "KernelArguments") -> str: """ @@ -37,6 +39,7 @@ async def render(self, template_text: str, kernel: "Kernel", arguments: "KernelA :param context: Access into the current kernel execution context :return: The prompt template ready to be used for an AI request """ + pass async def render_blocks(self, blocks: List[Block], kernel: "Kernel", arguments: "KernelArguments") -> str: """ @@ -46,6 +49,7 @@ async def render_blocks(self, blocks: List[Block], kernel: "Kernel", arguments: :param context: Access into the current kernel execution context :return: The prompt template ready to be used for an AI request """ + pass def render_variables( self, blocks: List[Block], kernel: "Kernel", arguments: Optional["KernelArguments"] = None @@ -59,6 +63,7 @@ def render_variables( :return: An updated list of blocks where Variable Blocks have rendered to Text Blocks """ + pass async def render_code(self, blocks: List[Block], kernel: "Kernel", arguments: "KernelArguments") -> List[Block]: """ @@ -70,3 +75,4 @@ async def render_code(self, blocks: List[Block], kernel: "Kernel", arguments: "K :return: An updated list of blocks where Code Blocks have rendered to Text Blocks """ + pass diff --git a/python/semantic_kernel/utils/chat.py b/python/semantic_kernel/utils/chat.py new file mode 100644 index 000000000000..63d73e39c7dc --- /dev/null +++ b/python/semantic_kernel/utils/chat.py @@ -0,0 +1,28 @@ +# Copyright (c) Microsoft. All rights reserved. + +from typing import Dict, List, Optional + +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory + + +def prepare_chat_history_for_request( + chat_history: ChatHistory, + output_role_key: str = "role", # Default to "role", change to "author" as needed + override_role: Optional[str] = None, # Default to None, change to "user" as needed +) -> List[Dict[str, str]]: + """ + Prepare the chat history for a request, allowing customization of the key names for role/author, + and optionally overriding the role. + + OpenAI requires the structure: [{"role": "user", "content": }] + Google Palm requires the structure: [{"author": "user", "content": }] + + :param chat_history: ChatHistory object containing the chat messages. + :param output_role_key: The key name to use for the role/author field in the output. + :param override_role: Optional string to override the role in all messages. If None, use the original role. + :return: A list of message dictionaries formatted as per the specified keys and optional role override. + """ + return [ + {output_role_key: override_role if override_role else message.role, "content": message.content} + for message in chat_history.messages + ] diff --git a/python/semantic_kernel/utils/settings.py b/python/semantic_kernel/utils/settings.py index 3bba2168bc81..33dd21d6df33 100644 --- a/python/semantic_kernel/utils/settings.py +++ b/python/semantic_kernel/utils/settings.py @@ -192,7 +192,7 @@ def bing_search_settings_from_dot_env() -> str: """Reads the Bing Search API key from the .env file. Returns: - Tuple[str, str]: The Bing Search API key, the Bing Search endpoint + str: The Bing Search API key """ api_key = None diff --git a/python/tests/conftest.py b/python/tests/conftest.py index 6e9d59975b0f..08baa232ac8d 100644 --- a/python/tests/conftest.py +++ b/python/tests/conftest.py @@ -55,7 +55,7 @@ def enable_debug_mode(): builtins.pr = snoop.pp -@pytest.fixture(scope="module") +@pytest.fixture(scope="function") def create_kernel(plugin: Optional[KernelPlugin] = None): kernel = Kernel() if plugin: diff --git a/python/tests/integration/completions/conftest.py b/python/tests/integration/completions/conftest.py index 72096efeb4a8..4cdce4647b1a 100644 --- a/python/tests/integration/completions/conftest.py +++ b/python/tests/integration/completions/conftest.py @@ -4,18 +4,20 @@ import pytest +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig + if sys.version_info >= (3, 9): import semantic_kernel.connectors.ai.google_palm as sk_gp -@pytest.fixture(scope="module") +@pytest.fixture(scope="function") def setup_tldr_function_for_oai_models(create_kernel): kernel = create_kernel # Define semantic function using SK prompt template language - sk_prompt = """ + prompt = """ {{$input}} - {{$input2}} (hyphenated words count as 1 word) Give me the TLDR in exactly 5 words: @@ -36,10 +38,10 @@ def setup_tldr_function_for_oai_models(create_kernel): print("TLDR: ") print(text_to_summarize) print() - yield kernel, sk_prompt, text_to_summarize + yield kernel, prompt, text_to_summarize -@pytest.fixture(scope="module") +@pytest.fixture(scope="function") def setup_summarize_conversation_using_plugin(create_kernel): kernel = create_kernel ChatTranscript = """John: Hello, how are you? @@ -83,19 +85,25 @@ def setup_summarize_conversation_using_plugin(create_kernel): yield kernel, ChatTranscript -@pytest.fixture(scope="module") +@pytest.fixture(scope="function") def setup_gp_text_completion_function(create_kernel, get_gp_config): kernel = create_kernel api_key = get_gp_config # Configure LLM service palm_text_completion = sk_gp.GooglePalmTextCompletion(ai_model_id="models/text-bison-001", api_key=api_key) - kernel.add_text_completion_service("models/text-bison-001", palm_text_completion) + kernel.add_service(palm_text_completion) # Define semantic function using SK prompt template language - sk_prompt = "Hello, I like {{$input}}{{$input2}}" + prompt = "Hello, I like {{$input}}{{$input2}}" + + exec_settings = PromptExecutionSettings( + service_id="models/text-bison-001", extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} + ) + + prompt_template_config = PromptTemplateConfig(template=prompt, execution_settings=exec_settings) # Create the semantic function - text2text_function = kernel.create_semantic_function(sk_prompt, max_tokens=25, temperature=0.7, top_p=0.5) + text2text_function = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config) # User input simple_input = "sleeping and " diff --git a/python/tests/integration/completions/test_azure_oai_chat_service.py b/python/tests/integration/completions/test_azure_oai_chat_service.py index fbaeb756d2cf..ab1cf991d280 100644 --- a/python/tests/integration/completions/test_azure_oai_chat_service.py +++ b/python/tests/integration/completions/test_azure_oai_chat_service.py @@ -7,11 +7,14 @@ from test_utils import retry import semantic_kernel.connectors.ai.open_ai as sk_oai +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig @pytest.mark.asyncio async def test_azure_e2e_chat_completion_with_plugin(setup_tldr_function_for_oai_models, get_aoai_config): - kernel, sk_prompt, text_to_summarize = setup_tldr_function_for_oai_models + kernel, prompt, text_to_summarize = setup_tldr_function_for_oai_models _, api_key, endpoint = get_aoai_config @@ -26,13 +29,25 @@ async def test_azure_e2e_chat_completion_with_plugin(setup_tldr_function_for_oai # Configure LLM service kernel.add_service( - sk_oai.AzureChatCompletion(deployment_name=deployment_name, endpoint=endpoint, api_key=api_key), + sk_oai.AzureChatCompletion( + service_id="chat", deployment_name=deployment_name, endpoint=endpoint, api_key=api_key + ), + ) + + exec_settings = PromptExecutionSettings( + service_id="chat", extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} + ) + + prompt_template_config = PromptTemplateConfig( + template=prompt, description="Write a short story.", execution_settings=exec_settings ) # Create the semantic function - tldr_function = kernel.create_semantic_function(sk_prompt, max_tokens=200, temperature=0, top_p=0.5) + tldr_function = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config) + + arguments = KernelArguments(input=text_to_summarize) - summary = await retry(lambda: kernel.run(tldr_function, input_str=text_to_summarize)) + summary = await retry(lambda: kernel.invoke(tldr_function, arguments)) output = str(summary).strip() print(f"TLDR using input string: '{output}'") assert "First Law" not in output and ("human" in output or "Human" in output or "preserve" in output) @@ -43,7 +58,7 @@ async def test_azure_e2e_chat_completion_with_plugin(setup_tldr_function_for_oai async def test_azure_e2e_chat_completion_with_plugin_and_provided_client( setup_tldr_function_for_oai_models, get_aoai_config ): - kernel, sk_prompt, text_to_summarize = setup_tldr_function_for_oai_models + kernel, prompt, text_to_summarize = setup_tldr_function_for_oai_models _, api_key, endpoint = get_aoai_config @@ -65,18 +80,28 @@ async def test_azure_e2e_chat_completion_with_plugin_and_provided_client( ) # Configure LLM service - kernel.add_chat_service( - "chat_completion", + kernel.add_service( sk_oai.AzureChatCompletion( + service_id="chat_completion", deployment_name=deployment_name, async_client=client, ), ) + exec_settings = PromptExecutionSettings( + service_id="chat_completion", extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} + ) + + prompt_template_config = PromptTemplateConfig( + template=prompt, description="Write a short story.", execution_settings=exec_settings + ) + # Create the semantic function - tldr_function = kernel.create_semantic_function(sk_prompt, max_tokens=200, temperature=0, top_p=0.5) + tldr_function = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config) + + arguments = KernelArguments(input=text_to_summarize) - summary = await retry(lambda: kernel.run(tldr_function, input_str=text_to_summarize)) + summary = await retry(lambda: kernel.invoke(tldr_function, arguments)) output = str(summary).strip() print(f"TLDR using input string: '{output}'") assert "First Law" not in output and ("human" in output or "Human" in output or "preserve" in output) diff --git a/python/tests/integration/completions/test_azure_oai_chat_service_extensions.py b/python/tests/integration/completions/test_azure_oai_chat_service_extensions.py index 96a6cea38dcd..7375c6780bf6 100644 --- a/python/tests/integration/completions/test_azure_oai_chat_service_extensions.py +++ b/python/tests/integration/completions/test_azure_oai_chat_service_extensions.py @@ -7,15 +7,16 @@ import numpy as np import pytest -import semantic_kernel as sk import semantic_kernel.connectors.ai.open_ai as sk_oai from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.azure_chat_prompt_execution_settings import ( AzureAISearchDataSources, - AzureChatPromptExecutionSettings, AzureDataSources, ExtraBody, ) +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.functions.kernel_arguments import KernelArguments from semantic_kernel.memory.memory_record import MemoryRecord +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig try: from semantic_kernel.connectors.memory.azure_cognitive_search.azure_cognitive_search_memory_store import ( @@ -107,28 +108,29 @@ async def create_with_data_chat_function(get_aoai_config, create_kernel, create_ ) chat_service = sk_oai.AzureChatCompletion( + service_id="chat-gpt-extensions", deployment_name=deployment_name, api_key=api_key, endpoint=endpoint, api_version="2023-12-01-preview", use_extensions=True, ) - kernel.add_chat_service("chat-gpt-extensions", chat_service) - - prompt_config = sk.PromptTemplateConfig( - execution_settings=AzureChatPromptExecutionSettings( - max_tokens=2000, - temperature=0.7, - top_p=0.8, - extra_body=extra, - ) + kernel.add_service(chat_service) + + prompt = "{{$input}}" + + exec_settings = PromptExecutionSettings( + service_id="chat-gpt-extensions", + extension_data={"max_tokens": 2000, "temperature": 0.7, "top_p": 0.8, "extra_body": extra}, + ) + + prompt_template_config = PromptTemplateConfig( + template=prompt, description="Write a short story.", execution_settings=exec_settings ) - prompt_config.default_services = ["chat-gpt-extensions"] - prompt_template = sk.ChatPromptTemplate("{{$input}}", kernel.prompt_template_engine, prompt_config) + # Create the semantic function + chat_function = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config) - function_config = sk.SemanticFunctionConfig(prompt_config, prompt_template) - chat_function = kernel.register_semantic_function("ChatBot", "Chat", function_config) return chat_function, kernel, collection, memory_store except: await memory_store.delete_collection(collection) @@ -148,23 +150,26 @@ async def test_azure_e2e_chat_completion_with_extensions( memory_store, ) = await create_with_data_chat_function + arguments = KernelArguments(input="who are Emily and David?") + + # TODO: get streaming working for this test + use_streaming = False + try: result = None - async for message in kernel.run_stream(chat_function, input_str="who are Emily and David?"): - result = message[0] if not result else result + message[0] - print(message, end="") - - print(f"Answer using input string: '{result}'") - print(f"Tool message: {result.tool_message}") - assert result.tool_message is not None - assert "two passionate scientists" in result.tool_message - assert len(result.content) > 1 - - context = await kernel.run(chat_function, input_str="who are Emily and David?") - print(f"Answer using input string: '{context}'") - assert context.objects["results"][0].tool_message is not None - assert "two passionate scientists" in context.objects["results"][0].tool_message - assert len(context.result) > 1 + if use_streaming: + async for message in kernel.invoke_stream(chat_function, arguments): + result = message[0] if not result else result + message[0] + print(message, end="") + + print(f"Answer using input string: '{result}'") + print(f"Tool message: {result.tool_message}") + assert result.tool_message is not None + assert "two passionate scientists" in result.tool_message + assert len(result.content) > 1 + else: + result = await kernel.invoke(chat_function, arguments) + print(f"Answer using input string: '{result}'") await memory_store.delete_collection(collection) except: diff --git a/python/tests/integration/completions/test_azure_oai_text_service.py b/python/tests/integration/completions/test_azure_oai_text_service.py index ca5df3ca92d3..5313fca8e22c 100644 --- a/python/tests/integration/completions/test_azure_oai_text_service.py +++ b/python/tests/integration/completions/test_azure_oai_text_service.py @@ -7,11 +7,14 @@ from test_utils import retry import semantic_kernel.connectors.ai.open_ai as sk_oai +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig @pytest.mark.asyncio async def test_azure_e2e_text_completion_with_plugin(setup_tldr_function_for_oai_models, get_aoai_config): - kernel, sk_prompt, text_to_summarize = setup_tldr_function_for_oai_models + kernel, prompt, text_to_summarize = setup_tldr_function_for_oai_models _, api_key, endpoint = get_aoai_config @@ -25,19 +28,29 @@ async def test_azure_e2e_text_completion_with_plugin(setup_tldr_function_for_oai print(f"* Deployment: {deployment_name}") # Configure LLM service - kernel.add_text_completion_service( - "text_completion", + kernel.add_service( sk_oai.AzureTextCompletion( + service_id="text_completion", deployment_name=deployment_name, endpoint=endpoint, api_key=api_key, ), ) + exec_settings = PromptExecutionSettings( + service_id="text_completion", extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} + ) + + prompt_template_config = PromptTemplateConfig( + template=prompt, description="Write a short story.", execution_settings=exec_settings + ) + # Create the semantic function - tldr_function = kernel.create_semantic_function(sk_prompt, max_tokens=200, temperature=0, top_p=0.5) + tldr_function = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config) + + arguments = KernelArguments(input=text_to_summarize) - summary = await retry(lambda: kernel.run(tldr_function, input_str=text_to_summarize)) + summary = await retry(lambda: kernel.invoke(tldr_function, arguments)) output = str(summary).strip() print(f"TLDR using input string: '{output}'") assert "First Law" not in output and ("human" in output or "Human" in output or "preserve" in output) @@ -48,7 +61,7 @@ async def test_azure_e2e_text_completion_with_plugin(setup_tldr_function_for_oai async def test_azure_e2e_text_completion_with_plugin_with_provided_client( setup_tldr_function_for_oai_models, get_aoai_config ): - kernel, sk_prompt, text_to_summarize = setup_tldr_function_for_oai_models + kernel, prompt, text_to_summarize = setup_tldr_function_for_oai_models _, api_key, endpoint = get_aoai_config @@ -70,18 +83,29 @@ async def test_azure_e2e_text_completion_with_plugin_with_provided_client( ) # Configure LLM service - kernel.add_text_completion_service( - "text_completion", + kernel.add_service( sk_oai.AzureTextCompletion( + service_id="text_completion", deployment_name=deployment_name, async_client=client, ), + overwrite=True, # Overwrite the service for the test if it already exists + ) + + exec_settings = PromptExecutionSettings( + service_id="text_completion", extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} + ) + + prompt_template_config = PromptTemplateConfig( + template=prompt, description="Write a short story.", execution_settings=exec_settings ) # Create the semantic function - tldr_function = kernel.create_semantic_function(sk_prompt, max_tokens=200, temperature=0, top_p=0.5) + tldr_function = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config) + + arguments = KernelArguments(input=text_to_summarize) - summary = await retry(lambda: kernel.run(tldr_function, input_str=text_to_summarize)) + summary = await retry(lambda: kernel.invoke(tldr_function, arguments)) output = str(summary).strip() print(f"TLDR using input string: '{output}'") assert "First Law" not in output and ("human" in output or "Human" in output or "preserve" in output) diff --git a/python/tests/integration/completions/test_conversation_summary_plugin.py b/python/tests/integration/completions/test_conversation_summary_plugin.py index ac5b055f51ba..4ab354ae8a18 100644 --- a/python/tests/integration/completions/test_conversation_summary_plugin.py +++ b/python/tests/integration/completions/test_conversation_summary_plugin.py @@ -7,9 +7,12 @@ import semantic_kernel as sk import semantic_kernel.connectors.ai.open_ai as sk_oai +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.core_plugins.conversation_summary_plugin import ( ConversationSummaryPlugin, ) +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig @pytest.mark.asyncio @@ -25,17 +28,31 @@ async def test_azure_summarize_conversation_using_plugin(setup_summarize_convers deployment_name, api_key, endpoint = get_aoai_config deployment_name = "gpt-35-turbo-instruct" - kernel.add_text_completion_service( - "text_completion", - sk_oai.AzureTextCompletion(deployment_name=deployment_name, endpoint=endpoint, api_key=api_key), + service_id = "text_completion" + + execution_settings = PromptExecutionSettings( + service_id=service_id, max_tokens=ConversationSummaryPlugin._max_tokens, temperature=0.1, top_p=0.5 + ) + prompt_template_config = PromptTemplateConfig( + template=ConversationSummaryPlugin._summarize_conversation_prompt_template, + description="Given a section of a conversation transcript, summarize the part of" " the conversation.", + execution_settings=execution_settings, ) - conversationSummaryPlugin = kernel.import_plugin(ConversationSummaryPlugin(kernel), "conversationSummary") + kernel.add_service( + sk_oai.AzureTextCompletion( + service_id=service_id, deployment_name=deployment_name, endpoint=endpoint, api_key=api_key + ), + ) - summary = await retry( - lambda: kernel.run(conversationSummaryPlugin["SummarizeConversation"], input_str=chatTranscript) + conversationSummaryPlugin = kernel.import_plugin( + ConversationSummaryPlugin(kernel, prompt_template_config), "conversationSummary" ) + arguments = KernelArguments(input=chatTranscript) + + summary = await retry(lambda: kernel.invoke(conversationSummaryPlugin["SummarizeConversation"], arguments)) + output = str(summary).strip().lower() print(output) assert "john" in output and "jane" in output @@ -43,14 +60,14 @@ async def test_azure_summarize_conversation_using_plugin(setup_summarize_convers @pytest.mark.asyncio -@pytest.mark.xfail(reason="This test fails intermittently when run in parallel with other tests") async def test_oai_summarize_conversation_using_plugin( setup_summarize_conversation_using_plugin, ): _, chatTranscript = setup_summarize_conversation_using_plugin - # Defining a new kernel here to avoid using the same kernel as the previous test - # which causes failures. + # Even though the kernel is scoped to the function, it appears that + # it is shared because adding the same plugin throws an error. + # Create a new kernel for this test. kernel = sk.Kernel() if "Python_Integration_Tests" in os.environ: @@ -60,17 +77,29 @@ async def test_oai_summarize_conversation_using_plugin( # Load credentials from .env file api_key, org_id = sk.openai_settings_from_dot_env() - kernel.add_text_completion_service( - "davinci-003", - sk_oai.OpenAITextCompletion("gpt-3.5-turbo-instruct", api_key, org_id=org_id), + execution_settings = PromptExecutionSettings( + service_id="conversation_summary", max_tokens=ConversationSummaryPlugin._max_tokens, temperature=0.1, top_p=0.5 + ) + prompt_template_config = PromptTemplateConfig( + template=ConversationSummaryPlugin._summarize_conversation_prompt_template, + description="Given a section of a conversation transcript, summarize the part of" " the conversation.", + execution_settings=execution_settings, ) - conversationSummaryPlugin = kernel.import_plugin(ConversationSummaryPlugin(kernel), "conversationSummary") + kernel.add_service( + sk_oai.OpenAITextCompletion( + service_id="conversation_summary", ai_model_id="gpt-3.5-turbo-instruct", api_key=api_key, org_id=org_id + ), + ) - summary = await retry( - lambda: kernel.run(conversationSummaryPlugin["SummarizeConversation"], input_str=chatTranscript) + conversationSummaryPlugin = kernel.import_plugin( + ConversationSummaryPlugin(kernel, prompt_template_config), "conversationSummary" ) + arguments = KernelArguments(input=chatTranscript) + + summary = await retry(lambda: kernel.invoke(conversationSummaryPlugin["SummarizeConversation"], arguments)) + output = str(summary).strip().lower() print(output) assert "john" in output and "jane" in output diff --git a/python/tests/integration/completions/test_gp_chat_service.py b/python/tests/integration/completions/test_gp_chat_service.py index 976928680f2d..27fc4e9122bc 100644 --- a/python/tests/integration/completions/test_gp_chat_service.py +++ b/python/tests/integration/completions/test_gp_chat_service.py @@ -1,10 +1,14 @@ # Copyright (c) Microsoft. All rights reserved. -import asyncio import os import sys import pytest +from test_utils import retry + +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig if sys.version_info >= (3, 9): import semantic_kernel.connectors.ai.google_palm as sk_gp @@ -20,31 +24,28 @@ @pytest.mark.asyncio async def test_gp_chat_service_with_plugins(setup_tldr_function_for_oai_models, get_gp_config): - kernel, sk_prompt, text_to_summarize = setup_tldr_function_for_oai_models + kernel, prompt, text_to_summarize = setup_tldr_function_for_oai_models api_key = get_gp_config print("* Service: Google PaLM Chat Completion") print("* Model: chat-bison-001") - palm_chat_completion = sk_gp.GooglePalmChatCompletion(ai_model_id="models/chat-bison-001", api_key=api_key) - kernel.add_chat_service("models/chat-bison-001", palm_chat_completion) + model_id = "models/chat-bison-001" + palm_chat_completion = sk_gp.GooglePalmChatCompletion(ai_model_id=model_id, api_key=api_key) + kernel.add_service(palm_chat_completion) + + exec_settings = PromptExecutionSettings( + service_id=model_id, extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} + ) + + prompt_template_config = PromptTemplateConfig(template=prompt, execution_settings=exec_settings) # Create the semantic function - tldr_function = kernel.create_semantic_function(sk_prompt, max_tokens=200, temperature=0, top_p=0.5) - - max_retries = 5 # Adjust the number of retries as per your requirement - retry_delay = 2 # Adjust the delay (in seconds) between retries - - for _ in range(max_retries): - try: - summary = await kernel.run(tldr_function, input_str=text_to_summarize) - output = str(summary).strip() - print(f"TLDR using input string: '{output}'") - # assert "First Law" not in output and ("human" in output or "Human" in output or "preserve" in output) - assert len(output) > 0 - break - except Exception as e: - print(f"Error occurred: {e}") - await asyncio.sleep(retry_delay) # Introduce a delay before the next retry - else: - # The loop completed without breaking, meaning all retries failed - raise AssertionError("Test failed after multiple retries") + tldr_function = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config) + + arguments = KernelArguments(input=text_to_summarize) + + summary = await retry(lambda: kernel.invoke(tldr_function, arguments)) + output = str(summary).strip() + print(f"TLDR using input string: '{output}'") + # assert "First Law" not in output and ("human" in output or "Human" in output or "preserve" in output) + assert len(output) > 0 diff --git a/python/tests/integration/completions/test_gp_text_service.py b/python/tests/integration/completions/test_gp_text_service.py index 9b27ca222542..bf7147922e3b 100644 --- a/python/tests/integration/completions/test_gp_text_service.py +++ b/python/tests/integration/completions/test_gp_text_service.py @@ -5,7 +5,7 @@ import pytest -import semantic_kernel as sk +from semantic_kernel.functions.kernel_arguments import KernelArguments pytestmark = [ pytest.mark.skipif(sys.version_info < (3, 9), reason="Google Palm requires Python 3.9 or greater"), @@ -20,8 +20,10 @@ async def test_text2text_generation_input_str(setup_gp_text_completion_function): kernel, text2text_function, simple_input = setup_gp_text_completion_function + arguments = KernelArguments(input=simple_input, input2="") + # Complete input string and print - summary = await kernel.run(text2text_function, input_str=simple_input) + summary = await kernel.invoke(text2text_function, arguments) output = str(summary).strip() print(f"Completion using input string: '{output}'") @@ -29,82 +31,24 @@ async def test_text2text_generation_input_str(setup_gp_text_completion_function) @pytest.mark.asyncio -async def test_text2text_generation_input_vars(setup_gp_text_completion_function): - kernel, text2text_function, simple_input = setup_gp_text_completion_function - - # Complete input as context variable and print - context_vars = sk.ContextVariables(simple_input) - summary = await kernel.run(text2text_function, input_vars=context_vars) - - output = str(summary).strip() - print(f"Completion using context variables: '{output}'") - assert len(output) > 0 - - -@pytest.mark.asyncio -async def test_text2text_generation_input_context(setup_gp_text_completion_function): - kernel, text2text_function, simple_input = setup_gp_text_completion_function - - # Complete input context and print - context = kernel.create_new_context() - context["input"] = simple_input - summary = await kernel.run(text2text_function, input_context=context) - - output = str(summary).strip() - print(f"Completion using input context: '{output}'") - assert len(output) > 0 - - -@pytest.mark.asyncio -async def test_text2text_generation_input_context_with_vars( - setup_gp_text_completion_function, -): - kernel, text2text_function, simple_input = setup_gp_text_completion_function - - # Complete input context with additional variables and print - context = kernel.create_new_context() - context["input"] = simple_input - context_vars = sk.ContextVariables("running and") - summary = await kernel.run(text2text_function, input_context=context, input_vars=context_vars) - - output = str(summary).strip() - print(f"Completion using context and additional variables: '{output}'") - assert len(output) > 0 - - -@pytest.mark.asyncio -async def test_text2text_generation_input_context_with_str( - setup_gp_text_completion_function, -): +async def test_text2text_generation_empty_input_arguments(setup_gp_text_completion_function): kernel, text2text_function, simple_input = setup_gp_text_completion_function - # Complete input context with additional input string and print - context = kernel.create_new_context() - context["input"] = simple_input - summary = await kernel.run(text2text_function, input_context=context, input_str="running and") + arguments = KernelArguments(input=simple_input, input2="") + summary = await kernel.invoke(text2text_function, arguments) output = str(summary).strip() - print(f"Completion using context and additional string: '{output}'") + print(f"Completion using arguments: '{output}'") assert len(output) > 0 @pytest.mark.asyncio -async def test_text2text_generation_input_context_with_vars_and_str( - setup_gp_text_completion_function, -): +async def test_text2text_generation_input_arguments_provided(setup_gp_text_completion_function): kernel, text2text_function, simple_input = setup_gp_text_completion_function - # Complete input context with additional variables and string and print - context = kernel.create_new_context() - context["input"] = simple_input - context_vars = sk.ContextVariables(variables={"input2": "running and"}) - summary = await kernel.run( - text2text_function, - input_context=context, - input_vars=context_vars, - input_str="new text", - ) + arguments = KernelArguments(input=simple_input, input2="running and") + summary = await kernel.invoke(text2text_function, arguments) output = str(summary).strip() - print(f"Completion using context, additional variables, and additional string: '{output}'") + print(f"Completion using input arguments: '{output}'") assert len(output) > 0 diff --git a/python/tests/integration/completions/test_oai_chat_service.py b/python/tests/integration/completions/test_oai_chat_service.py index db0a46fa5385..f782480fe229 100644 --- a/python/tests/integration/completions/test_oai_chat_service.py +++ b/python/tests/integration/completions/test_oai_chat_service.py @@ -6,11 +6,14 @@ from test_utils import retry import semantic_kernel.connectors.ai.open_ai as sk_oai +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig @pytest.mark.asyncio async def test_oai_chat_service_with_plugins(setup_tldr_function_for_oai_models, get_oai_config): - kernel, sk_prompt, text_to_summarize = setup_tldr_function_for_oai_models + kernel, prompt, text_to_summarize = setup_tldr_function_for_oai_models api_key, org_id = get_oai_config @@ -18,15 +21,24 @@ async def test_oai_chat_service_with_plugins(setup_tldr_function_for_oai_models, print("* Endpoint: OpenAI") print("* Model: gpt-3.5-turbo") - kernel.add_chat_service( - "chat-gpt", - sk_oai.OpenAIChatCompletion(ai_model_id="gpt-3.5-turbo", api_key=api_key, org_id=org_id), + kernel.add_service( + sk_oai.OpenAIChatCompletion(service_id="chat-gpt", ai_model_id="gpt-3.5-turbo", api_key=api_key, org_id=org_id), + ) + + exec_settings = PromptExecutionSettings( + service_id="chat-gpt", extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} + ) + + prompt_template_config = PromptTemplateConfig( + template=prompt, description="Write a short story.", execution_settings=exec_settings ) # Create the semantic function - tldr_function = kernel.create_semantic_function(sk_prompt, max_tokens=200, temperature=0, top_p=0.5) + tldr_function = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config) - summary = await retry(lambda: kernel.run(tldr_function, input_str=text_to_summarize)) + arguments = KernelArguments(input=text_to_summarize) + + summary = await retry(lambda: kernel.invoke(tldr_function, arguments)) output = str(summary).strip() print(f"TLDR using input string: '{output}'") assert "First Law" not in output and ("human" in output or "Human" in output or "preserve" in output) @@ -35,7 +47,7 @@ async def test_oai_chat_service_with_plugins(setup_tldr_function_for_oai_models, @pytest.mark.asyncio async def test_oai_chat_service_with_plugins_with_provided_client(setup_tldr_function_for_oai_models, get_oai_config): - kernel, sk_prompt, text_to_summarize = setup_tldr_function_for_oai_models + kernel, prompt, text_to_summarize = setup_tldr_function_for_oai_models api_key, org_id = get_oai_config @@ -48,18 +60,29 @@ async def test_oai_chat_service_with_plugins_with_provided_client(setup_tldr_fun organization=org_id, ) - kernel.add_chat_service( - "chat-gpt", + kernel.add_service( sk_oai.OpenAIChatCompletion( + service_id="chat-gpt", ai_model_id="gpt-3.5-turbo", async_client=client, ), + overwrite=True, # Overwrite the service if it already exists since add service says it does + ) + + exec_settings = PromptExecutionSettings( + service_id="chat-gpt", extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} + ) + + prompt_template_config = PromptTemplateConfig( + template=prompt, description="Write a short story.", execution_settings=exec_settings ) # Create the semantic function - tldr_function = kernel.create_semantic_function(sk_prompt, max_tokens=200, temperature=0, top_p=0.5) + tldr_function = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config) + + arguments = KernelArguments(input=text_to_summarize) - summary = await retry(lambda: kernel.run(tldr_function, input_str=text_to_summarize)) + summary = await retry(lambda: kernel.invoke(tldr_function, arguments)) output = str(summary).strip() print(f"TLDR using input string: '{output}'") assert "First Law" not in output and ("human" in output or "Human" in output or "preserve" in output) @@ -68,7 +91,7 @@ async def test_oai_chat_service_with_plugins_with_provided_client(setup_tldr_fun @pytest.mark.asyncio async def test_oai_chat_stream_service_with_plugins(setup_tldr_function_for_oai_models, get_aoai_config): - kernel, sk_prompt, text_to_summarize = setup_tldr_function_for_oai_models + kernel, prompt, text_to_summarize = setup_tldr_function_for_oai_models _, api_key, endpoint = get_aoai_config @@ -82,16 +105,28 @@ async def test_oai_chat_stream_service_with_plugins(setup_tldr_function_for_oai_ print(f"* Deployment: {deployment_name}") # Configure LLM service - kernel.add_chat_service( - "chat_completion", - sk_oai.AzureChatCompletion(deployment_name=deployment_name, endpoint=endpoint, api_key=api_key), + kernel.add_service( + sk_oai.AzureChatCompletion( + service_id="chat_completion", deployment_name=deployment_name, endpoint=endpoint, api_key=api_key + ), + overwrite=True, + ) + + exec_settings = PromptExecutionSettings( + service_id="chat_completion", extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} + ) + + prompt_template_config = PromptTemplateConfig( + template=prompt, description="Write a short story.", execution_settings=exec_settings ) # Create the semantic function - tldr_function = kernel.create_semantic_function(sk_prompt, max_tokens=200, temperature=0, top_p=0.5) + tldr_function = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config) + + arguments = KernelArguments(input=text_to_summarize) result = None - async for message in kernel.run_stream(tldr_function, input_str=text_to_summarize): + async for message in kernel.invoke_stream(tldr_function, arguments): result = message[0] if not result else result + message[0] output = str(result) diff --git a/python/tests/integration/completions/test_oai_text_service.py b/python/tests/integration/completions/test_oai_text_service.py index 6f8e117e8987..d6b89b5c4482 100644 --- a/python/tests/integration/completions/test_oai_text_service.py +++ b/python/tests/integration/completions/test_oai_text_service.py @@ -6,11 +6,14 @@ from test_utils import retry import semantic_kernel.connectors.ai.open_ai as sk_oai +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig @pytest.mark.asyncio async def test_oai_text_completion_with_plugins(setup_tldr_function_for_oai_models, get_oai_config): - kernel, sk_prompt, text_to_summarize = setup_tldr_function_for_oai_models + kernel, prompt, text_to_summarize = setup_tldr_function_for_oai_models api_key, org_id = get_oai_config @@ -18,15 +21,26 @@ async def test_oai_text_completion_with_plugins(setup_tldr_function_for_oai_mode print("* Endpoint: OpenAI") print("* Model: gpt-3.5-turbo-instruct") - kernel.add_text_completion_service( - "text-completion", - sk_oai.OpenAITextCompletion(ai_model_id="gpt-3.5-turbo-instruct", api_key=api_key, org_id=org_id), + kernel.add_service( + sk_oai.OpenAITextCompletion( + service_id="text-completion", ai_model_id="gpt-3.5-turbo-instruct", api_key=api_key, org_id=org_id + ), + ) + + exec_settings = PromptExecutionSettings( + service_id="text-completion", extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} + ) + + prompt_template_config = PromptTemplateConfig( + template=prompt, description="Write a short story.", execution_settings=exec_settings ) # Create the semantic function - tldr_function = kernel.create_semantic_function(sk_prompt, max_tokens=200, temperature=0, top_p=0.5) + tldr_function = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config) + + arguments = KernelArguments(input=text_to_summarize) - summary = await retry(lambda: kernel.run(tldr_function, input_str=text_to_summarize)) + summary = await retry(lambda: kernel.invoke(tldr_function, arguments)) output = str(summary).strip() print(f"TLDR using input string: '{output}'") # assert "First Law" not in output and ("human" in output or "Human" in output or "preserve" in output) @@ -37,7 +51,7 @@ async def test_oai_text_completion_with_plugins(setup_tldr_function_for_oai_mode async def test_oai_text_completion_with_plugins_with_provided_client( setup_tldr_function_for_oai_models, get_oai_config ): - kernel, sk_prompt, text_to_summarize = setup_tldr_function_for_oai_models + kernel, prompt, text_to_summarize = setup_tldr_function_for_oai_models api_key, org_id = get_oai_config @@ -50,18 +64,29 @@ async def test_oai_text_completion_with_plugins_with_provided_client( organization=org_id, ) - kernel.add_text_completion_service( - "text-completion", + kernel.add_service( sk_oai.OpenAITextCompletion( + service_id="text-completion", ai_model_id="gpt-3.5-turbo-instruct", async_client=client, ), + overwrite=True, + ) + + exec_settings = PromptExecutionSettings( + service_id="text-completion", extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} + ) + + prompt_template_config = PromptTemplateConfig( + template=prompt, description="Write a short story.", execution_settings=exec_settings ) # Create the semantic function - tldr_function = kernel.create_semantic_function(sk_prompt, max_tokens=200, temperature=0, top_p=0.5) + tldr_function = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config) + + arguments = KernelArguments(input=text_to_summarize) - summary = await retry(lambda: kernel.run(tldr_function, input_str=text_to_summarize)) + summary = await retry(lambda: kernel.invoke(tldr_function, arguments)) output = str(summary).strip() print(f"TLDR using input string: '{output}'") # assert "First Law" not in output and ("human" in output or "Human" in output or "preserve" in output) @@ -70,23 +95,23 @@ async def test_oai_text_completion_with_plugins_with_provided_client( @pytest.mark.asyncio async def test_oai_text_stream_completion_with_plugins(setup_tldr_function_for_oai_models, get_aoai_config): - kernel, sk_prompt, text_to_summarize = setup_tldr_function_for_oai_models + kernel, prompt, text_to_summarize = setup_tldr_function_for_oai_models _, api_key, endpoint = get_aoai_config if "Python_Integration_Tests" in os.environ: deployment_name = os.environ["AzureOpenAI__DeploymentName"] else: - deployment_name = "gpt-3.5-turbo-instruct" + deployment_name = "gpt-35-turbo-instruct" print("* Service: Azure OpenAI Text Completion") print(f"* Endpoint: {endpoint}") print(f"* Deployment: {deployment_name}") # Configure LLM service - kernel.add_text_completion_service( - "text_completion", + kernel.add_service( sk_oai.AzureTextCompletion( + service_id="text_completion", deployment_name=deployment_name, endpoint=endpoint, api_key=api_key, @@ -94,10 +119,21 @@ async def test_oai_text_stream_completion_with_plugins(setup_tldr_function_for_o ) # Create the semantic function - tldr_function = kernel.create_semantic_function(sk_prompt, max_tokens=200, temperature=0, top_p=0.5) + exec_settings = PromptExecutionSettings( + service_id="text_completion", extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} + ) + + prompt_template_config = PromptTemplateConfig( + template=prompt, description="Write a short story.", execution_settings=exec_settings + ) + + # Create the semantic function + tldr_function = kernel.create_function_from_prompt(prompt_template_config=prompt_template_config) + + arguments = KernelArguments(input=text_to_summarize) result = None - async for message in kernel.run_stream(tldr_function, input_str=text_to_summarize): + async for message in kernel.invoke_stream(tldr_function, arguments): result = message[0] if not result else result + message[0] output = str(result) diff --git a/python/tests/integration/embeddings/test_azure_oai_embedding_service.py b/python/tests/integration/embeddings/test_azure_oai_embedding_service.py index fd53a0e17398..198b64286bed 100644 --- a/python/tests/integration/embeddings/test_azure_oai_embedding_service.py +++ b/python/tests/integration/embeddings/test_azure_oai_embedding_service.py @@ -20,15 +20,16 @@ async def test_azure_text_embedding_service(create_kernel, get_aoai_config): else: deployment_name = "text-embedding-ada-002" - kernel.add_text_embedding_generation_service( - "aoai-ada", - sk_oai.AzureTextEmbedding( - deployment_name=deployment_name, - endpoint=endpoint, - api_key=api_key, - ), + embeddings_gen = sk_oai.AzureTextEmbedding( + service_id="aoai-ada", + deployment_name=deployment_name, + endpoint=endpoint, + api_key=api_key, ) - kernel.register_memory_store(memory_store=sk.memory.VolatileMemoryStore()) + + kernel.add_service(embeddings_gen) + + kernel.use_memory(storage=sk.memory.VolatileMemoryStore(), embeddings_generator=embeddings_gen) await kernel.memory.save_information("test", id="info1", text="this is a test") await kernel.memory.save_reference( @@ -58,14 +59,14 @@ async def test_azure_text_embedding_service_with_provided_client(create_kernel, default_headers={"Test-User-X-ID": "test"}, ) - kernel.add_text_embedding_generation_service( - "aoai-ada-2", - sk_oai.AzureTextEmbedding( - deployment_name=deployment_name, - async_client=client, - ), + embedding_gen = sk_oai.AzureTextEmbedding( + service_id="aoai-ada-2", + deployment_name=deployment_name, + async_client=client, ) - kernel.register_memory_store(memory_store=sk.memory.VolatileMemoryStore()) + + kernel.add_service(embedding_gen) + kernel.use_memory(storage=sk.memory.VolatileMemoryStore(), embeddings_generator=embedding_gen) await kernel.memory.save_information("test", id="info1", text="this is a test") await kernel.memory.save_reference( @@ -85,14 +86,14 @@ async def test_batch_azure_embeddings(get_aoai_config): deployment_name = os.environ["AzureOpenAIEmbeddings__DeploymentName"] else: - deployment_name = "ada-002" + deployment_name = "text-embedding-ada-002" embeddings_service = sk_oai.AzureTextEmbedding( deployment_name=deployment_name, endpoint=endpoint, api_key=api_key, ) - texts = ["hello world", "goodbye world"] + texts = ["hello world"] results = await embeddings_service.generate_embeddings(texts) batch_results = await embeddings_service.generate_embeddings(texts, batch_size=1) assert len(results) == len(batch_results) diff --git a/python/tests/integration/embeddings/test_gp_embedding_service.py b/python/tests/integration/embeddings/test_gp_embedding_service.py index 398d2bd2d499..b20a480b51e0 100644 --- a/python/tests/integration/embeddings/test_gp_embedding_service.py +++ b/python/tests/integration/embeddings/test_gp_embedding_service.py @@ -26,8 +26,8 @@ async def test_gp_embedding_service(create_kernel, get_gp_config): api_key = get_gp_config palm_text_embed = sk_gp.GooglePalmTextEmbedding("models/embedding-gecko-001", api_key) - kernel.add_text_embedding_generation_service("gecko", palm_text_embed) - kernel.register_memory_store(memory_store=sk.memory.VolatileMemoryStore()) + kernel.add_service(palm_text_embed) + kernel.use_memory(storage=sk.memory.VolatileMemoryStore(), embeddings_generator=palm_text_embed) await kernel.memory.save_information("test", id="info1", text="this is a test") await kernel.memory.save_reference( diff --git a/python/tests/integration/embeddings/test_hf_embedding_service.py b/python/tests/integration/embeddings/test_hf_embedding_service.py index fb59249651c0..caab23c65509 100644 --- a/python/tests/integration/embeddings/test_hf_embedding_service.py +++ b/python/tests/integration/embeddings/test_hf_embedding_service.py @@ -10,12 +10,13 @@ async def test_hf_embeddings_with_memories(): kernel = sk.Kernel() + model_id = "sentence-transformers/all-MiniLM-L6-v2" + + embedding_gen = sk_hf.HuggingFaceTextEmbedding(service_id=model_id, ai_model_id=model_id) + # Configure LLM service - kernel.add_text_embedding_generation_service( - "sentence-transformers/all-MiniLM-L6-v2", - sk_hf.HuggingFaceTextEmbedding(ai_model_id="sentence-transformers/all-MiniLM-L6-v2"), - ) - kernel.register_memory_store(memory_store=sk.memory.VolatileMemoryStore()) + kernel.add_service(embedding_gen) + kernel.use_memory(storage=sk.memory.VolatileMemoryStore(), embeddings_generator=embedding_gen) # Add some documents to the semantic memory await kernel.memory.save_information("test", id="info1", text="Sharks are fish.") diff --git a/python/tests/integration/embeddings/test_oai_embedding_service.py b/python/tests/integration/embeddings/test_oai_embedding_service.py index 947137c3908a..3a37f480a956 100644 --- a/python/tests/integration/embeddings/test_oai_embedding_service.py +++ b/python/tests/integration/embeddings/test_oai_embedding_service.py @@ -13,11 +13,12 @@ async def test_oai_embedding_service(create_kernel, get_oai_config): api_key, org_id = get_oai_config - kernel.add_text_embedding_generation_service( - "oai-ada", - sk_oai.OpenAITextEmbedding("text-embedding-ada-002", api_key, org_id=org_id), + embedding_gen = sk_oai.OpenAITextEmbedding( + service_id="oai-ada", ai_model_id="text-embedding-ada-002", api_key=api_key, org_id=org_id ) - kernel.register_memory_store(memory_store=sk.memory.VolatileMemoryStore()) + + kernel.add_service(embedding_gen) + kernel.use_memory(storage=sk.memory.VolatileMemoryStore(), embeddings_generator=embedding_gen) await kernel.memory.save_information("test", id="info1", text="this is a test") await kernel.memory.save_reference( @@ -39,14 +40,12 @@ async def test_oai_embedding_service_with_provided_client(create_kernel, get_oai organization=org_id, ) - kernel.add_text_embedding_generation_service( - "oai-ada-2", - sk_oai.OpenAITextEmbedding( - ai_model_id="text-embedding-ada-002", - async_client=client, - ), + embedding_gen = sk_oai.OpenAITextEmbedding( + service_id="oai-ada", ai_model_id="text-embedding-ada-002", async_client=client ) - kernel.register_memory_store(memory_store=sk.memory.VolatileMemoryStore()) + + kernel.add_service(embedding_gen) + kernel.use_memory(storage=sk.memory.VolatileMemoryStore(), embeddings_generator=embedding_gen) await kernel.memory.save_information("test", id="info1", text="this is a test") await kernel.memory.save_reference( diff --git a/python/tests/integration/fakes/writer_plugin_fake.py b/python/tests/integration/fakes/writer_plugin_fake.py index 548e1859db35..368c81903707 100644 --- a/python/tests/integration/fakes/writer_plugin_fake.py +++ b/python/tests/integration/fakes/writer_plugin_fake.py @@ -1,6 +1,12 @@ # Copyright (c) Microsoft. All rights reserved. +import sys -from semantic_kernel.plugin_definition import kernel_function, kernel_function_context_parameter +if sys.version_info >= (3, 9): + from typing import Annotated +else: + from typing_extensions import Annotated + +from semantic_kernel.functions import kernel_function # TODO: this fake plugin is temporal usage. # C# supports import plugin from samples dir by using test helper and python should do the same @@ -15,11 +21,12 @@ class WriterPluginFake: def translate(self, language: str) -> str: return f"Translate: {language}" - @kernel_function(description="Write an outline for a novel", name="NovelOutline") - @kernel_function_context_parameter( - name="endMarker", - description="The marker to use to end each chapter.", - default_value="", - ) - def write_novel_outline(self, input: str) -> str: + @kernel_function(name="NovelOutline") + def write_novel_outline( + self, + input: Annotated[str, "The input of the function"], + name: Annotated[str, "The name of the function"] = "endMarker", + description: Annotated[str, "The marker to use to end each chapter"] = "Write an outline for a novel.", + default_value: Annotated[str, "The default value used for the function"] = "", + ) -> str: return f"Novel outline: {input}" diff --git a/python/tests/integration/planning/sequential_planner/test_sequential_plan_parser.py b/python/tests/integration/planning/sequential_planner/test_sequential_plan_parser.py index fb3867f45037..302d9c44d84c 100644 --- a/python/tests/integration/planning/sequential_planner/test_sequential_plan_parser.py +++ b/python/tests/integration/planning/sequential_planner/test_sequential_plan_parser.py @@ -18,9 +18,9 @@ async def test_can_call_to_plan_from_xml(get_aoai_config): kernel = Kernel() # Configure LLM service - kernel.add_text_completion_service( - "text_completion", + kernel.add_service( sk_oai.AzureChatCompletion( + service_id="text_completion", deployment_name=deployment_name, endpoint=endpoint, api_key=api_key, @@ -43,7 +43,7 @@ async def test_can_call_to_plan_from_xml(get_aoai_config): plan = SequentialPlanParser.to_plan_from_xml( plan_string, goal, - SequentialPlanParser.get_plugin_function(kernel.create_new_context()), + SequentialPlanParser.get_plugin_function(kernel), ) assert plan is not None diff --git a/python/tests/integration/planning/sequential_planner/test_sequential_planner.py b/python/tests/integration/planning/sequential_planner/test_sequential_planner.py index dd0eb7fc13a6..bdae67a5df26 100644 --- a/python/tests/integration/planning/sequential_planner/test_sequential_planner.py +++ b/python/tests/integration/planning/sequential_planner/test_sequential_planner.py @@ -4,10 +4,10 @@ import pytest -import semantic_kernel import semantic_kernel.connectors.ai.open_ai as sk_oai from semantic_kernel.kernel import Kernel from semantic_kernel.planners import SequentialPlanner +from semantic_kernel.planners.planning_exception import PlanningException from semantic_kernel.planners.sequential_planner.sequential_planner_config import ( SequentialPlannerConfig, ) @@ -34,28 +34,28 @@ def initialize_kernel(get_aoai_config, use_embeddings=False, use_chat_model=Fals kernel = Kernel() if use_chat_model: - kernel.add_chat_service( - "chat_completion", + kernel.add_service( sk_oai.AzureChatCompletion( - deployment_name="gpt-35-turbo", + service_id="chat_completion", + deployment_name="gpt-35-turbo-0613", endpoint=endpoint, api_key=api_key, ), ) else: - kernel.add_text_completion_service( - "text_completion", - sk_oai.AzureChatCompletion( - deployment_name="gpt-35-turbo", + kernel.add_service( + sk_oai.AzureTextCompletion( + service_id="text_completion", + deployment_name="gpt-35-turbo-instruct", endpoint=endpoint, api_key=api_key, ), ) if use_embeddings: - kernel.add_text_embedding_generation_service( - "text_embedding", + kernel.add_service( sk_oai.AzureTextEmbedding( + service_id="text_embedding", deployment_name="text-embedding-ada-002", endpoint=endpoint, api_key=api_key, @@ -84,11 +84,13 @@ def initialize_kernel(get_aoai_config, use_embeddings=False, use_chat_model=Fals @pytest.mark.asyncio async def test_create_plan_function_flow(get_aoai_config, use_chat_model, prompt, expected_function, expected_plugin): # Arrange + service_id = "chat_completion" if use_chat_model else "text_completion" + kernel = initialize_kernel(get_aoai_config, False, use_chat_model) kernel.import_plugin(EmailPluginFake(), "email_plugin_fake") kernel.import_plugin(FunPluginFake(), "fun_plugin_fake") - planner = SequentialPlanner(kernel) + planner = SequentialPlanner(kernel, service_id=service_id) # Act plan = await planner.create_plan(prompt) @@ -110,7 +112,7 @@ async def test_create_plan_function_flow(get_aoai_config, use_chat_model, prompt ) @pytest.mark.asyncio @pytest.mark.xfail( - raises=semantic_kernel.planning.planning_exception.PlanningException, + raises=PlanningException, reason="Test is known to occasionally produce unexpected results.", ) async def test_create_plan_with_defaults(get_aoai_config, prompt, expected_function, expected_plugin, expected_default): @@ -119,7 +121,7 @@ async def test_create_plan_with_defaults(get_aoai_config, prompt, expected_funct kernel.import_plugin(EmailPluginFake(), "email_plugin_fake") kernel.import_plugin(WriterPluginFake(), "WriterPlugin") - planner = SequentialPlanner(kernel) + planner = SequentialPlanner(kernel, service_id="text_completion") # Act plan = await retry(lambda: planner.create_plan(prompt)) @@ -145,7 +147,7 @@ async def test_create_plan_with_defaults(get_aoai_config, prompt, expected_funct ) @pytest.mark.asyncio @pytest.mark.xfail( - raises=semantic_kernel.planning.planning_exception.PlanningException, + raises=PlanningException, reason="Test is known to occasionally produce unexpected results.", ) async def test_create_plan_goal_relevant(get_aoai_config, prompt, expected_function, expected_plugin): @@ -157,7 +159,8 @@ async def test_create_plan_goal_relevant(get_aoai_config, prompt, expected_funct planner = SequentialPlanner( kernel, - SequentialPlannerConfig(relevancy_threshold=0.65, max_relevant_functions=30), + service_id="text_completion", + config=SequentialPlannerConfig(relevancy_threshold=0.65, max_relevant_functions=30), ) # Act diff --git a/python/tests/integration/planning/stepwise_planner/test_stepwise_planner.py b/python/tests/integration/planning/stepwise_planner/test_stepwise_planner.py index 179ef63eea3e..843dc037a94f 100644 --- a/python/tests/integration/planning/stepwise_planner/test_stepwise_planner.py +++ b/python/tests/integration/planning/stepwise_planner/test_stepwise_planner.py @@ -10,13 +10,13 @@ from semantic_kernel.connectors.search_engine import BingConnector from semantic_kernel.core_plugins.math_plugin import MathPlugin from semantic_kernel.core_plugins.time_plugin import TimePlugin -from semantic_kernel.functions.old.kernel_context import KernelContext +from semantic_kernel.functions import kernel_function +from semantic_kernel.functions.kernel_arguments import KernelArguments from semantic_kernel.kernel import Kernel from semantic_kernel.planners import StepwisePlanner from semantic_kernel.planners.stepwise_planner.stepwise_planner_config import ( StepwisePlannerConfig, ) -from semantic_kernel.plugin_definition import kernel_function, kernel_function_context_parameter class TempWebSearchEnginePlugin: @@ -36,12 +36,8 @@ def __init__(self, connector) -> None: self._connector = connector @kernel_function(description="Performs a web search for a given query", name="searchAsync") - @kernel_function_context_parameter( - name="query", - description="The search query", - ) - async def search(self, query: str, context: KernelContext) -> str: - query = query or context.variables.get("query") + async def search(self, query: str, arguments: KernelArguments) -> str: + query = query or arguments.get("query") result = await self._connector.search(query, num_results=5, offset=0) return str(result) @@ -62,24 +58,25 @@ def initialize_kernel(get_aoai_config, use_embeddings=False, use_chat_model=Fals kernel = Kernel() if use_chat_model: - kernel.add_chat_service( - "chat_completion", - sk_oai.AzureChatCompletion(deployment_name="gpt-35-turbo", endpoint=endpoint, api_key=api_key), + kernel.add_service( + sk_oai.AzureChatCompletion( + service_id="chat_completion", deployment_name="gpt-35-turbo", endpoint=endpoint, api_key=api_key + ), ) else: - kernel.add_text_completion_service( - "text_completion", - sk_oai.AzureChatCompletion( - deployment_name="gpt-35-turbo", + kernel.add_service( + sk_oai.AzureTextCompletion( + service_id="text_completion", + deployment_name="gpt-35-turbo-instruct", endpoint=endpoint, api_key=api_key, ), ) if use_embeddings: - kernel.add_text_embedding_generation_service( - "text_embedding", + kernel.add_service( sk_oai.AzureTextEmbedding( + service_id="text_embedding", deployment_name="text-embedding-ada-002", endpoint=endpoint, api_key=api_key, diff --git a/python/tests/template_engine/prompt_template_e2e_tests.py b/python/tests/template_engine/prompt_template_e2e_tests.py index b6efa7ecce10..69223e555b83 100644 --- a/python/tests/template_engine/prompt_template_e2e_tests.py +++ b/python/tests/template_engine/prompt_template_e2e_tests.py @@ -6,7 +6,7 @@ from pytest import mark, raises from semantic_kernel import Kernel -from semantic_kernel.plugin_definition import kernel_function +from semantic_kernel.functions import kernel_function from semantic_kernel.template_engine.prompt_template_engine import PromptTemplateEngine diff --git a/python/tests/unit/ai/hugging_face/test_hf_local_text_completions.py b/python/tests/unit/ai/hugging_face/test_hf_local_text_completions.py index 02b23e1c518b..3c172e4acbe8 100644 --- a/python/tests/unit/ai/hugging_face/test_hf_local_text_completions.py +++ b/python/tests/unit/ai/hugging_face/test_hf_local_text_completions.py @@ -3,8 +3,10 @@ import pytest import semantic_kernel.connectors.ai.hugging_face as sk_hf +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.functions.kernel_arguments import KernelArguments from semantic_kernel.kernel import Kernel +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig @pytest.mark.asyncio @@ -41,15 +43,25 @@ async def test_text_completion(model_name, task, input_str): service=sk_hf.HuggingFaceTextCompletion(service_id=model_name, ai_model_id=model_name, task=task), ) + exec_settings = PromptExecutionSettings( + service_id=model_name, extension_data={"max_tokens": 25, "temperature": 0.7, "top_p": 0.5} + ) + # Define semantic function using SK prompt template language - sk_prompt = "{{$input}}" + prompt = "{{$input}}" - # Create the semantic function - function = kernel.create_semantic_function( - sk_prompt, service_id=model_name, max_tokens=25, temperature=0.7, top_p=0.5 + prompt_template_config = PromptTemplateConfig(template=prompt, execution_settings=exec_settings) + + test_func = kernel.create_function_from_prompt( + prompt_template_config=prompt_template_config, + function_name="TestFunction", + plugin_name="TestPlugin", + execution_settings=exec_settings, ) - summary = await kernel.invoke(function, input=input_str) + arguments = KernelArguments(input=input_str) + + summary = await kernel.invoke(test_func, arguments) output = str(summary).strip() print(f"Completion using input string: '{output}'") @@ -91,16 +103,24 @@ async def test_text_completion_stream(model_name, task, input_str): sk_hf.HuggingFaceTextCompletion(service_id=model_name, ai_model_id=model_name, task=task), ) + exec_settings = PromptExecutionSettings( + service_id=model_name, extension_data={"max_tokens": 25, "temperature": 0.7, "top_p": 0.5} + ) + # Define semantic function using SK prompt template language - sk_prompt = "{{$input}}" + prompt = "{{$input}}" + + prompt_template_config = PromptTemplateConfig(template=prompt, execution_settings=exec_settings) - # Create the semantic function - function = kernel.create_semantic_function( - sk_prompt, service_id=model_name, max_tokens=25, temperature=0.7, top_p=0.5 + test_func = kernel.create_function_from_prompt( + prompt_template_config=prompt_template_config, + function_name="TestFunction", + plugin_name="TestPlugin", + execution_settings=exec_settings, ) summary = "" - async for text in kernel.invoke_stream(function, arguments=KernelArguments(input=input_str)): + async for text in kernel.invoke_stream(test_func, input=input_str): summary += str(text[0]) output = str(summary).strip() diff --git a/python/tests/unit/connectors/google_palm/services/test_palm_chat_completion.py b/python/tests/unit/connectors/google_palm/services/test_palm_chat_completion.py index 207bbe59437e..ab96cd9a3982 100644 --- a/python/tests/unit/connectors/google_palm/services/test_palm_chat_completion.py +++ b/python/tests/unit/connectors/google_palm/services/test_palm_chat_completion.py @@ -16,6 +16,8 @@ from semantic_kernel.connectors.ai.google_palm.services.gp_chat_completion import ( GooglePalmChatCompletion, ) + from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory + from semantic_kernel.utils.chat import prepare_chat_history_for_request pytestmark = pytest.mark.skipif(sys.version_info < (3, 9), reason="Google Palm requires Python 3.9 or greater") @@ -56,7 +58,7 @@ def reply(self): return self gp_response = MockChatResponse() - gp_response.candidates = [MessageDict(content="Example response", author="assistant")] + gp_response.candidates = [MessageDict(content="Example response", author=3)] gp_response.filters = None mock_response = MagicMock() mock_response.last = asyncio.Future() @@ -69,14 +71,13 @@ def reply(self): ): ai_model_id = "test_model_id" api_key = "test_api_key" - prompt = [{"role": "user", "content": "hello world"}] - rewritten_prompt = [{"author": "user", "content": "hello world"}] + chats = ChatHistory(system_message="hello word") gp_chat_completion = GooglePalmChatCompletion( ai_model_id=ai_model_id, api_key=api_key, ) settings = GooglePalmChatPromptExecutionSettings() - response = await gp_chat_completion.complete_chat(prompt, settings) + response = await gp_chat_completion.complete_chat(chats, settings) assert isinstance(response[0].content, str) and len(response) > 0 print(mock_gp.chat) @@ -86,5 +87,5 @@ def reply(self): top_p=settings.top_p, top_k=settings.top_k, candidate_count=settings.candidate_count, - messages=rewritten_prompt, + messages=prepare_chat_history_for_request(chats, output_role_key="author", override_role="user"), ) diff --git a/python/tests/unit/connectors/ollama/services/test_ollama_chat_completion.py b/python/tests/unit/connectors/ollama/services/test_ollama_chat_completion.py index d0883482ec5c..b43af55d9bfe 100644 --- a/python/tests/unit/connectors/ollama/services/test_ollama_chat_completion.py +++ b/python/tests/unit/connectors/ollama/services/test_ollama_chat_completion.py @@ -8,6 +8,7 @@ from semantic_kernel.connectors.ai.ollama.services.ollama_chat_completion import ( OllamaChatCompletion, ) +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory from tests.unit.connectors.ollama.utils import MockResponse @@ -22,8 +23,10 @@ def test_settings(): async def test_complete_chat(mock_post): mock_post.return_value = MockResponse(response={"message": {"content": "test_response"}}) ollama = OllamaChatCompletion(ai_model_id="test_model") + chat_history = ChatHistory() + chat_history.add_user_message("test_prompt") response = await ollama.complete_chat( - [{"role": "user", "content": "test_prompt"}], + chat_history, OllamaChatPromptExecutionSettings(ai_model_id="test_model", options={"test": "test"}), ) assert response[0].content == "test_response" @@ -43,8 +46,10 @@ async def test_complete_chat(mock_post): async def test_complete(mock_post): mock_post.return_value = MockResponse(response={"message": {"content": "test_response"}}) ollama = OllamaChatCompletion(ai_model_id="test_model") + chat_history = ChatHistory() + chat_history.add_user_message("test_prompt") response = await ollama.complete( - "test_prompt", + chat_history, OllamaChatPromptExecutionSettings(ai_model_id="test-model", options={"test": "test"}), ) assert response[0].text == "test_response" @@ -55,8 +60,10 @@ async def test_complete(mock_post): async def test_complete_chat_stream(mock_post): mock_post.return_value = MockResponse(response={"message": {"content": "test_response"}}) ollama = OllamaChatCompletion(ai_model_id="test_model") + chat_history = ChatHistory() + chat_history.add_user_message("test_prompt") response = ollama.complete_chat_stream( - [{"role": "user", "content": "test_prompt"}], + chat_history, OllamaChatPromptExecutionSettings(ai_model_id="test_model", options={"test": "test"}), ) async for line in response: @@ -78,8 +85,10 @@ async def test_complete_chat_stream(mock_post): async def test_complete_stream(mock_post): mock_post.return_value = MockResponse(response={"message": {"content": "test_response"}}) ollama = OllamaChatCompletion(ai_model_id="test_model") + chat_history = ChatHistory() + chat_history.add_user_message("test_prompt") response = ollama.complete_stream( - "test_prompt", + chat_history, OllamaChatPromptExecutionSettings(ai_model_id="test_model", options={"test": "test"}), ) async for line in response: diff --git a/python/tests/unit/connectors/ollama/services/test_ollama_test_completion.py b/python/tests/unit/connectors/ollama/services/test_ollama_test_completion.py index 48204a1762d0..6d63b56c8333 100644 --- a/python/tests/unit/connectors/ollama/services/test_ollama_test_completion.py +++ b/python/tests/unit/connectors/ollama/services/test_ollama_test_completion.py @@ -23,7 +23,7 @@ async def test_complete(mock_post): mock_post.return_value = MockResponse(response="test_response") ollama = OllamaTextCompletion(ai_model_id="test_model") response = await ollama.complete( - "test_prompt", + "test prompt", OllamaTextPromptExecutionSettings(ai_model_id="test-model", options={"test": "test"}), ) assert response[0].text == "test_response" diff --git a/python/tests/unit/connectors/open_ai/services/test_azure_chat_completion.py b/python/tests/unit/connectors/open_ai/services/test_azure_chat_completion.py index 3290c5260ea2..76093804d46b 100644 --- a/python/tests/unit/connectors/open_ai/services/test_azure_chat_completion.py +++ b/python/tests/unit/connectors/open_ai/services/test_azure_chat_completion.py @@ -30,6 +30,8 @@ AzureDataSources, ExtraBody, ) +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.utils.chat import prepare_chat_history_for_request def test_azure_chat_completion_init() -> None: @@ -160,7 +162,8 @@ async def test_azure_chat_completion_call_with_parameters(mock_create) -> None: endpoint = "https://test-endpoint.com" api_key = "test_api_key" api_version = "2023-03-15-preview" - messages = [{"role": "user", "content": "hello world"}] + messages = ChatHistory() + messages.add_user_message("hello world") complete_prompt_execution_settings = AzureChatPromptExecutionSettings(service_id="test_service_id") azure_chat_completion = AzureChatCompletion( @@ -169,7 +172,7 @@ async def test_azure_chat_completion_call_with_parameters(mock_create) -> None: api_version=api_version, api_key=api_key, ) - await azure_chat_completion.complete_chat(messages=messages, settings=complete_prompt_execution_settings) + await azure_chat_completion.complete_chat(chat_history=messages, settings=complete_prompt_execution_settings) mock_create.assert_awaited_once_with( model=deployment_name, frequency_penalty=complete_prompt_execution_settings.frequency_penalty, @@ -180,7 +183,7 @@ async def test_azure_chat_completion_call_with_parameters(mock_create) -> None: stream=False, temperature=complete_prompt_execution_settings.temperature, top_p=complete_prompt_execution_settings.top_p, - messages=messages, + messages=prepare_chat_history_for_request(messages), ) @@ -195,7 +198,8 @@ async def test_azure_chat_completion_call_with_parameters_and_Logit_Bias_Defined api_version = "2023-03-15-preview" prompt = "hello world" - messages = [{"role": "user", "content": prompt}] + messages = ChatHistory() + messages.add_user_message(prompt) complete_prompt_execution_settings = AzureChatPromptExecutionSettings() token_bias = {"1": -100} @@ -208,11 +212,11 @@ async def test_azure_chat_completion_call_with_parameters_and_Logit_Bias_Defined api_version=api_version, ) - await azure_chat_completion.complete_chat(messages=messages, settings=complete_prompt_execution_settings) + await azure_chat_completion.complete_chat(chat_history=messages, settings=complete_prompt_execution_settings) mock_create.assert_awaited_once_with( model=deployment_name, - messages=messages, + messages=prepare_chat_history_for_request(messages), temperature=complete_prompt_execution_settings.temperature, top_p=complete_prompt_execution_settings.top_p, n=complete_prompt_execution_settings.number_of_responses, @@ -248,7 +252,7 @@ async def test_azure_chat_completion_call_with_parameters_and_Stop_Defined( api_version=api_version, ) - await azure_chat_completion.complete(prompt, complete_prompt_execution_settings) + await azure_chat_completion.complete(prompt=prompt, settings=complete_prompt_execution_settings) mock_create.assert_awaited_once_with( model=deployment_name, @@ -307,8 +311,10 @@ async def test_azure_chat_completion_with_data_call_with_parameters( api_key = "test_api_key" api_version = "2023-03-15-preview" prompt = "hello world" - messages_in = [{"role": "user", "content": prompt}] - messages_out = [{"role": "user", "content": prompt}] + messages_in = ChatHistory() + messages_in.add_user_message(prompt) + messages_out = ChatHistory() + messages_out.add_user_message(prompt) expected_data_settings = { "dataSources": [ @@ -333,11 +339,11 @@ async def test_azure_chat_completion_with_data_call_with_parameters( use_extensions=True, ) - await azure_chat_completion.complete_chat(messages=messages_in, settings=complete_prompt_execution_settings) + await azure_chat_completion.complete_chat(chat_history=messages_in, settings=complete_prompt_execution_settings) mock_create.assert_awaited_once_with( model=deployment_name, - messages=messages_out, + messages=prepare_chat_history_for_request(messages_out), temperature=complete_prompt_execution_settings.temperature, frequency_penalty=complete_prompt_execution_settings.frequency_penalty, presence_penalty=complete_prompt_execution_settings.presence_penalty, @@ -360,7 +366,8 @@ async def test_azure_chat_completion_call_with_data_parameters_and_function_call api_key = "test_api_key" api_version = "2023-03-15-preview" prompt = "hello world" - messages = [{"role": "user", "content": prompt}] + messages = ChatHistory() + messages.add_user_message(prompt) ai_source = AzureAISearchDataSources(indexName="test-index", endpoint="test-endpoint", key="test-key") extra = ExtraBody(data_sources=[AzureDataSources(type="AzureCognitiveSearch", parameters=ai_source)]) @@ -381,7 +388,7 @@ async def test_azure_chat_completion_call_with_data_parameters_and_function_call ) await azure_chat_completion.complete_chat( - messages=messages, + chat_history=messages, settings=complete_prompt_execution_settings, ) @@ -389,7 +396,7 @@ async def test_azure_chat_completion_call_with_data_parameters_and_function_call mock_create.assert_awaited_once_with( model=deployment_name, - messages=messages, + messages=prepare_chat_history_for_request(messages), temperature=complete_prompt_execution_settings.temperature, top_p=complete_prompt_execution_settings.top_p, n=complete_prompt_execution_settings.number_of_responses, @@ -413,7 +420,8 @@ async def test_azure_chat_completion_call_with_data_with_parameters_and_Stop_Def endpoint = "https://test-endpoint.com" api_key = "test_api_key" api_version = "2023-03-15-preview" - messages = [{"role": "user", "content": "hello world"}] + messages = ChatHistory() + messages.add_user_message("hello world") complete_prompt_execution_settings = AzureChatPromptExecutionSettings() stop = ["!"] @@ -438,7 +446,7 @@ async def test_azure_chat_completion_call_with_data_with_parameters_and_Stop_Def mock_create.assert_awaited_once_with( model=deployment_name, - messages=messages, + messages=prepare_chat_history_for_request(messages), temperature=complete_prompt_execution_settings.temperature, top_p=complete_prompt_execution_settings.top_p, n=complete_prompt_execution_settings.number_of_responses, @@ -475,7 +483,8 @@ async def test_azure_chat_completion_content_filtering_raises_correct_exception( api_key = "test_api_key" api_version = "2023-03-15-preview" prompt = "some prompt that would trigger the content filtering" - messages = [{"role": "user", "content": prompt}] + messages = ChatHistory() + messages.add_user_message(prompt) complete_prompt_execution_settings = AzureChatPromptExecutionSettings() mock_create.side_effect = openai.BadRequestError( @@ -526,7 +535,8 @@ async def test_azure_chat_completion_content_filtering_without_response_code_rai api_key = "test_api_key" api_version = "2023-03-15-preview" prompt = "some prompt that would trigger the content filtering" - messages = [{"role": "user", "content": prompt}] + messages = ChatHistory() + messages.add_user_message(prompt) complete_prompt_execution_settings = AzureChatPromptExecutionSettings() mock_create.side_effect = openai.BadRequestError( diff --git a/python/tests/unit/core_plugins/test_text_plugin.py b/python/tests/unit/core_plugins/test_text_plugin.py index 36dfc438fe9c..35200f3b6893 100644 --- a/python/tests/unit/core_plugins/test_text_plugin.py +++ b/python/tests/unit/core_plugins/test_text_plugin.py @@ -9,13 +9,13 @@ def test_can_be_instantiated(): def test_can_be_imported(): kernel = sk.Kernel() assert kernel.import_plugin(TextPlugin(), "text_plugin") - assert not kernel.plugins["text_plugin"]["trim"].is_semantic + assert not kernel.plugins["text_plugin"]["trim"].is_prompt def test_can_be_imported_with_name(): kernel = sk.Kernel() assert kernel.import_plugin(TextPlugin(), "text") - assert not kernel.plugins["text"]["trim"].is_semantic + assert not kernel.plugins["text"]["trim"].is_prompt def test_can_trim(): diff --git a/python/tests/unit/functions/test_kernel_function_decorators.py b/python/tests/unit/functions/test_kernel_function_decorators.py index 495998bf0039..f4223079f84f 100644 --- a/python/tests/unit/functions/test_kernel_function_decorators.py +++ b/python/tests/unit/functions/test_kernel_function_decorators.py @@ -168,9 +168,7 @@ def test_kernel_function_return_type_streaming(): (Annotated[Optional[Union[str, int]], "test"], "test", "str, int", False), (str, "", "str", True), (Union[str, int, float, "KernelArguments"], "", "str, int, float, KernelArguments", True), - ("test-return-type", "", "test-return-type", True), ], - ids=["ann-str", "ann-opt-str", "ann-async-str", "ann-opt-str-int", "str", "union", "test"], ) def test_annotation_parsing(annotation, description, type_, required): out_description, out_type_, out_required = _parse_annotation(annotation) diff --git a/python/tests/unit/functions/test_kernel_function_metadata.py b/python/tests/unit/functions/test_kernel_function_metadata.py index 6a7503866c6e..e5c9f19679cd 100644 --- a/python/tests/unit/functions/test_kernel_function_metadata.py +++ b/python/tests/unit/functions/test_kernel_function_metadata.py @@ -11,10 +11,10 @@ def test_kernel_function_metadata(): plugin_name="plugin1", description="Semantic function", parameters=[], - is_semantic=True, + is_prompt=True, is_asynchronous=True, ) - assert function_metadata.is_semantic + assert function_metadata.is_prompt def test_kernel_function_metadata_defaults(): @@ -22,7 +22,7 @@ def test_kernel_function_metadata_defaults(): name="function1", plugin_name="plugin1", description="Semantic function", - is_semantic=True, + is_prompt=True, ) assert function_metadata.parameters == [] assert function_metadata.is_asynchronous @@ -34,7 +34,7 @@ def test_kernel_function_metadata_name_pattern_error(): name="-", plugin_name="plugin1", description="Semantic function", - is_semantic=True, + is_prompt=True, ) @@ -44,7 +44,7 @@ def test_kernel_function_metadata_name_empty_error(): name="", plugin_name="plugin1", description="Semantic function", - is_semantic=True, + is_prompt=True, ) @@ -53,13 +53,13 @@ def test_kernel_function_equals(): name="function1", plugin_name="plugin1", description="Semantic function", - is_semantic=True, + is_prompt=True, ) function_metadata_2 = KernelFunctionMetadata( name="function1", plugin_name="plugin1", description="Semantic function", - is_semantic=True, + is_prompt=True, ) assert function_metadata_1 == function_metadata_2 @@ -69,13 +69,13 @@ def test_kernel_function_not_equals(): name="function1", plugin_name="plugin1", description="Semantic function", - is_semantic=True, + is_prompt=True, ) function_metadata_2 = KernelFunctionMetadata( name="function2", plugin_name="plugin1", description="Semantic function", - is_semantic=True, + is_prompt=True, ) assert function_metadata_1 != function_metadata_2 @@ -85,7 +85,7 @@ def test_kernel_function_not_equals_other_object(): name="function1", plugin_name="plugin1", description="Semantic function", - is_semantic=True, + is_prompt=True, ) function_metadata_2 = KernelParameterMetadata(name="function2", description="Semantic function", default_value="") assert function_metadata_1 != function_metadata_2 diff --git a/python/tests/unit/functions/test_kernel_plugins.py b/python/tests/unit/functions/test_kernel_plugins.py index 941bc99edc99..bcf1c0ff1a47 100644 --- a/python/tests/unit/functions/test_kernel_plugins.py +++ b/python/tests/unit/functions/test_kernel_plugins.py @@ -3,12 +3,11 @@ import pytest +from semantic_kernel.connectors.ai import PromptExecutionSettings from semantic_kernel.functions.kernel_function import KernelFunction from semantic_kernel.functions.kernel_plugin import KernelPlugin -from semantic_kernel.prompt_template.chat_prompt_template import ChatPromptTemplate +from semantic_kernel.prompt_template.input_variable import InputVariable from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig -from semantic_kernel.prompt_template.semantic_function_config import SemanticFunctionConfig -from semantic_kernel.template_engine.prompt_template_engine import PromptTemplateEngine def test_throws_for_missing_name(): @@ -94,15 +93,28 @@ def mock_function(input: str) -> None: assert plugin[func.name] == func -def test_default_kernel_plugin_construction_with_semantic_function(): - prompt_config = PromptTemplateConfig.from_execution_settings(max_tokens=2000, temperature=0.7, top_p=0.8) - prompt_template = ChatPromptTemplate("{{$user_input}}", PromptTemplateEngine(), prompt_config) - function_config = SemanticFunctionConfig(prompt_config, prompt_template) +def test_default_kernel_plugin_construction_with_prompt_function(): + req_settings = PromptExecutionSettings(extension_data={"max_tokens": 2000, "temperature": 0.7, "top_p": 0.8}) + + prompt = "Use this input: {{$request}}" + + prompt_template_config = PromptTemplateConfig( + template=prompt, + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable(name="request", description="The user input", is_required=True), + ], + execution_settings={"default": req_settings}, + ) expected_plugin_name = "test_plugin" expected_function_name = "mock_function" - semantic_function = KernelFunction.from_semantic_config( - plugin_name=expected_plugin_name, function_name=expected_function_name, function_config=function_config + semantic_function = KernelFunction.from_prompt( + prompt=prompt, + prompt_template_config=prompt_template_config, + plugin_name=expected_plugin_name, + function_name=expected_function_name, ) expected_plugin_description = "A unit test plugin" @@ -118,15 +130,27 @@ def test_default_kernel_plugin_construction_with_semantic_function(): def test_default_kernel_plugin_construction_with_both_function_types(): - # Construct a semantic function - prompt_config = PromptTemplateConfig.from_execution_settings(max_tokens=2000, temperature=0.7, top_p=0.8) - prompt_template = ChatPromptTemplate("{{$user_input}}", PromptTemplateEngine(), prompt_config) - function_config = SemanticFunctionConfig(prompt_config, prompt_template) + req_settings = PromptExecutionSettings(extension_data={"max_tokens": 2000, "temperature": 0.7, "top_p": 0.8}) + + prompt = "Use this input: {{$request}}" + + prompt_template_config = PromptTemplateConfig( + template=prompt, + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable(name="request", description="The user input", is_required=True), + ], + execution_settings={"default": req_settings}, + ) expected_plugin_name = "test_plugin" - expected_function_name = "mock_semantic_function" - semantic_function = KernelFunction.from_semantic_config( - plugin_name=expected_plugin_name, function_name=expected_function_name, function_config=function_config + expected_function_name = "mock_function" + semantic_function = KernelFunction.from_prompt( + prompt=prompt, + prompt_template_config=prompt_template_config, + plugin_name=expected_plugin_name, + function_name=expected_function_name, ) # Construct a nativate function @@ -169,15 +193,27 @@ def mock_function(input: str) -> None: def test_default_kernel_plugin_construction_with_same_function_names_throws(): - # Construct a semantic function - prompt_config = PromptTemplateConfig.from_execution_settings(max_tokens=2000, temperature=0.7, top_p=0.8) - prompt_template = ChatPromptTemplate("{{$user_input}}", PromptTemplateEngine(), prompt_config) - function_config = SemanticFunctionConfig(prompt_config, prompt_template) + req_settings = PromptExecutionSettings(extension_data={"max_tokens": 2000, "temperature": 0.7, "top_p": 0.8}) + + prompt = "Use this input: {{$request}}" + + prompt_template_config = PromptTemplateConfig( + template=prompt, + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable(name="request", description="The user input", is_required=True), + ], + execution_settings={"default": req_settings}, + ) expected_plugin_name = "test_plugin" expected_function_name = "mock_function" - semantic_function = KernelFunction.from_semantic_config( - plugin_name=expected_plugin_name, function_name=expected_function_name, function_config=function_config + semantic_function = KernelFunction.from_prompt( + prompt=prompt, + prompt_template_config=prompt_template_config, + plugin_name=expected_plugin_name, + function_name=expected_function_name, ) # Construct a nativate function diff --git a/python/tests/unit/functions/test_prompt_templates.py b/python/tests/unit/functions/test_prompt_templates.py index 9d8d422c5e3e..6a2b581d9d50 100644 --- a/python/tests/unit/functions/test_prompt_templates.py +++ b/python/tests/unit/functions/test_prompt_templates.py @@ -1,183 +1,98 @@ # Copyright (c) Microsoft. All rights reserved. -import json -import pytest +from typing import List -from semantic_kernel.connectors.ai.open_ai.models.chat_completion.open_ai_chat_message import ( - OpenAIChatMessage, -) -from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import ( - OpenAIChatPromptExecutionSettings, -) -from semantic_kernel.prompt_template.chat_prompt_template import ChatPromptTemplate +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.functions.kernel_parameter_metadata import KernelParameterMetadata +from semantic_kernel.prompt_template.input_variable import InputVariable from semantic_kernel.prompt_template.prompt_template_config import ( PromptTemplateConfig, ) -from semantic_kernel.template_engine.prompt_template_engine import PromptTemplateEngine - - -def test_default_prompt_template_config(): - prompt_template_config = PromptTemplateConfig() - assert prompt_template_config.schema_ == 1 - assert prompt_template_config.type == "completion" - assert prompt_template_config.description == "" - assert prompt_template_config.execution_settings.extension_data == {} - - -def test_default_chat_prompt_template_from_empty_dict(): - prompt_template_config = PromptTemplateConfig.from_dict({}) - assert prompt_template_config.schema_ == 1 - assert prompt_template_config.type == "completion" - assert prompt_template_config.description == "" - assert prompt_template_config.execution_settings.extension_data == {} - - -def test_default_chat_prompt_template_from_empty_string(): - with pytest.raises(json.decoder.JSONDecodeError): - _ = PromptTemplateConfig.from_json("") - - -def test_default_chat_prompt_template_from_empty_json(): - prompt_template_config = PromptTemplateConfig.from_dict({}) - assert prompt_template_config.schema_ == 1 - assert prompt_template_config.type == "completion" - assert prompt_template_config.description == "" - assert prompt_template_config.execution_settings.extension_data == {} - - -def test_custom_prompt_template_config(): - prompt_template_config = PromptTemplateConfig( - schema_=2, - type="completion2", - description="Custom description.", - execution_settings=OpenAIChatPromptExecutionSettings( - temperature=0.5, - top_p=0.5, - presence_penalty=0.5, - frequency_penalty=0.5, - max_tokens=128, - number_of_responses=2, - stop=["\n"], - logit_bias={"1": 1.0}, - ), - ) - assert prompt_template_config.schema_ == 2 - assert prompt_template_config.type == "completion2" - assert prompt_template_config.description == "Custom description." - assert prompt_template_config.execution_settings.temperature == 0.5 - assert prompt_template_config.execution_settings.top_p == 0.5 - assert prompt_template_config.execution_settings.presence_penalty == 0.5 - assert prompt_template_config.execution_settings.frequency_penalty == 0.5 - assert prompt_template_config.execution_settings.max_tokens == 128 - assert prompt_template_config.execution_settings.number_of_responses == 2 - assert prompt_template_config.execution_settings.stop == ["\n"] - assert prompt_template_config.execution_settings.logit_bias == {"1": 1.0} - - -def test_custom_prompt_template_config_from_dict(): - prompt_template_dict = { - "schema": 2, - "type": "completion2", - "description": "Custom description.", - "execution_settings": { - "default": { - "temperature": 0.5, - "top_p": 0.5, - "presence_penalty": 0.5, - "frequency_penalty": 0.5, - "max_tokens": 128, - "number_of_responses": 2, - "stop": ["\n"], - "logit_bias": {"1": 1}, - }, - }, - } - prompt_template_config = PromptTemplateConfig.from_dict(prompt_template_dict) - assert prompt_template_config.schema_ == 2 - assert prompt_template_config.type == "completion2" - assert prompt_template_config.description == "Custom description." - assert prompt_template_config.execution_settings.extension_data["temperature"] == 0.5 - assert prompt_template_config.execution_settings.extension_data["top_p"] == 0.5 - assert prompt_template_config.execution_settings.extension_data["presence_penalty"] == 0.5 - assert prompt_template_config.execution_settings.extension_data["frequency_penalty"] == 0.5 - assert prompt_template_config.execution_settings.extension_data["max_tokens"] == 128 - assert prompt_template_config.execution_settings.extension_data["number_of_responses"] == 2 - assert prompt_template_config.execution_settings.extension_data["stop"] == ["\n"] - assert prompt_template_config.execution_settings.extension_data["logit_bias"] == {"1": 1} - - -def test_custom_prompt_template_config_from_json(): - prompt_template_json = """ - { - "schema": 2, - "type": "completion2", - "description": "Custom description.", - "execution_settings": { - "default": { - "temperature": 0.5, - "top_p": 0.5, - "presence_penalty": 0.5, - "frequency_penalty": 0.5, - "max_tokens": 128, - "number_of_responses": 2, - "stop": ["s"], - "logit_bias": {"1": 1} - } - } - } - """ - prompt_template_config = PromptTemplateConfig[OpenAIChatPromptExecutionSettings].from_json(prompt_template_json) - assert prompt_template_config.schema_ == 2 - assert prompt_template_config.type == "completion2" - assert prompt_template_config.description == "Custom description." - assert prompt_template_config.execution_settings.temperature == 0.5 - assert prompt_template_config.execution_settings.top_p == 0.5 - assert prompt_template_config.execution_settings.presence_penalty == 0.5 - assert prompt_template_config.execution_settings.frequency_penalty == 0.5 - assert prompt_template_config.execution_settings.max_tokens == 128 - assert prompt_template_config.execution_settings.number_of_responses == 2 - assert prompt_template_config.execution_settings.stop == ["s"] - assert prompt_template_config.execution_settings.logit_bias == {"1": 1} - - -def test_chat_prompt_template(): - chat_prompt_template = ChatPromptTemplate( - "{{$user_input}}", - PromptTemplateEngine(), - prompt_config=PromptTemplateConfig(), - ) - assert chat_prompt_template.messages == [] - -def test_chat_prompt_template_with_messages(): - prompt_template_config = PromptTemplateConfig[OpenAIChatPromptExecutionSettings].from_execution_settings( - messages=[{"role": "system", "content": "Custom system prompt."}], +def test_prompt_template_config_initialization_minimal(): + config = PromptTemplateConfig(template="Example template") + assert config.template == "Example template" + assert config.name == "" + assert config.description == "" + assert config.template_format == "semantic-kernel" + assert config.input_variables == [] + assert config.execution_settings == PromptExecutionSettings() + + +def test_prompt_template_config_initialization_full(): + input_variables = [ + InputVariable( + name="var1", description="A variable", default="default_val", is_required=True, json_schema="string" + ) + ] + execution_settings = {"setting1": PromptExecutionSettings(setting_value="value1")} + config = PromptTemplateConfig( + name="Test Config", + description="Test Description", + template="Example template", + template_format="custom-format", + input_variables=input_variables, + execution_settings=execution_settings, ) - chat_prompt_template = ChatPromptTemplate[OpenAIChatMessage]( - "{{$user_input}}", - PromptTemplateEngine(), - prompt_config=prompt_template_config, - parse_messages=True, + assert config.name == "Test Config" + assert config.description == "Test Description" + assert config.template_format == "custom-format" + assert len(config.input_variables) == 1 + assert config.execution_settings is not None + + +def test_add_execution_settings(): + config = PromptTemplateConfig(template="Example template") + new_settings = PromptExecutionSettings(setting_value="new_value") + config.add_execution_settings(new_settings) + assert config.execution_settings == new_settings + + +def test_get_kernel_parameter_metadata_empty(): + config = PromptTemplateConfig(template="Example template") + metadata = config.get_kernel_parameter_metadata() + assert metadata == [] + + +def test_get_kernel_parameter_metadata_with_variables(): + input_variables = [ + InputVariable( + name="var1", description="A variable", default="default_val", is_required=True, json_schema="string" + ) + ] + config = PromptTemplateConfig(template="Example template", input_variables=input_variables) + metadata: List[KernelParameterMetadata] = config.get_kernel_parameter_metadata() + assert len(metadata) == 1 + assert metadata[0].name == "var1" + assert metadata[0].description == "A variable" + assert metadata[0].default_value == "default_val" + assert metadata[0].type_ == "string" + assert metadata[0].required is True + + +def test_restore(): + name = "Test Template" + description = "This is a test template." + template = "Hello, {{$name}}!" + input_variables = [InputVariable(name="name", description="Name of the person to greet", type="string")] + execution_settings = PromptExecutionSettings(timeout=30, max_tokens=100) + + restored_template = PromptTemplateConfig.restore( + name=name, + description=description, + template=template, + input_variables=input_variables, + execution_settings=execution_settings, ) - print(chat_prompt_template.messages) - assert len(chat_prompt_template.messages) == 1 - assert chat_prompt_template.messages[0].role == "system" - assert chat_prompt_template.messages[0].content_template.template == "Custom system prompt." - -def test_chat_prompt_template_with_system_prompt(): - prompt_template_config = PromptTemplateConfig[OpenAIChatPromptExecutionSettings].from_execution_settings( - chat_system_prompt="Custom system prompt.", - ) - chat_prompt_template = ChatPromptTemplate[OpenAIChatMessage]( - "{{$user_input}}", - PromptTemplateEngine(), - prompt_config=prompt_template_config, - parse_chat_system_prompt=True, - ) - print(chat_prompt_template.messages) - assert len(chat_prompt_template.messages) == 1 - assert chat_prompt_template.messages[0].role == "system" - assert chat_prompt_template.messages[0].content_template.template == "Custom system prompt." + assert restored_template.name == name, "The name attribute does not match the expected value." + assert restored_template.description == description, "The description attribute does not match the expected value." + assert restored_template.template == template, "The template attribute does not match the expected value." + assert ( + restored_template.input_variables == input_variables + ), "The input_variables attribute does not match the expected value." + assert ( + restored_template.execution_settings == execution_settings + ), "The execution_settings attribute does not match the expected value." diff --git a/python/tests/unit/kernel/test_kernel_invoke_filters.py b/python/tests/unit/kernel/test_kernel_invoke_filters.py index b659aeaef74c..f126d1062157 100644 --- a/python/tests/unit/kernel/test_kernel_invoke_filters.py +++ b/python/tests/unit/kernel/test_kernel_invoke_filters.py @@ -20,7 +20,7 @@ def create_mock_function(name) -> KernelFunction: plugin_name="SummarizePlugin", description="Summarize an input", parameters=[], - is_semantic=True, + is_prompt=True, is_asynchronous=True, ) mock_function = Mock(spec=KernelFunction) diff --git a/python/tests/unit/kernel/test_kernel_service_management.py b/python/tests/unit/kernel/test_kernel_service_management.py index 515274f62811..745453f79910 100644 --- a/python/tests/unit/kernel/test_kernel_service_management.py +++ b/python/tests/unit/kernel/test_kernel_service_management.py @@ -1,3 +1,5 @@ +# Copyright (c) Microsoft. All rights reserved. + import sys from typing import Union diff --git a/python/tests/unit/kernel_extensions/test_import_plugins.py b/python/tests/unit/kernel_extensions/test_import_plugins.py index 78f7e9effb21..89f70df02d53 100644 --- a/python/tests/unit/kernel_extensions/test_import_plugins.py +++ b/python/tests/unit/kernel_extensions/test_import_plugins.py @@ -5,7 +5,9 @@ import semantic_kernel as sk import semantic_kernel.connectors.ai.open_ai as sk_oai +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.functions.kernel_function_decorator import kernel_function +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig def test_plugin_can_be_imported(): @@ -13,18 +15,20 @@ def test_plugin_can_be_imported(): kernel = sk.Kernel() api_key = "test-api-key" org_id = "test-org-id" + service_id = "text-davinci-003" kernel.add_service( - sk_oai.OpenAITextCompletion("text-davinci-003", api_key, org_id, service_id="test-completion-service"), + sk_oai.OpenAITextCompletion(service_id, api_key, org_id, service_id="test-completion-service"), ) # import plugins plugins_directory = os.path.join(os.path.dirname(__file__), "../..", "test_plugins") # path to plugins directory - plugin = kernel.import_semantic_plugin_from_directory(plugins_directory, "TestPlugin") + plugin = kernel.import_plugin_from_prompt_directory(service_id, plugins_directory, "TestPlugin") assert plugin is not None assert len(plugin.functions) == 1 - assert plugin.functions.get("TestFunction") is not None + func = plugin.functions["TestFunction"] + assert func is not None def test_native_plugin_can_be_imported(): @@ -44,7 +48,7 @@ def test_native_plugin_can_be_imported(): assert plugin_config.description == "Echo for input text" -def test_create_semantic_function_succeeds(): +def test_create_function_from_prompt_succeeds(): # create a kernel kernel = sk.Kernel() @@ -69,7 +73,7 @@ def generate_names(self) -> str: # import plugins _ = kernel.import_plugin(GenerateNamesPlugin(), plugin_name="GenerateNames") - sk_prompt = """ + prompt = """ Write a short story about two Corgis on an adventure. The story must be: - G rated @@ -80,16 +84,19 @@ def generate_names(self) -> str: - The two names of the corgis are {{GenerateNames.generate_names}} """ - print(sk_prompt) + print(prompt) - test_func = kernel.create_semantic_function( - prompt_template=sk_prompt, + exec_settings = PromptExecutionSettings(extension_data={"max_tokens": 500, "temperature": 0.5, "top_p": 0.5}) + + prompt_template_config = PromptTemplateConfig( + template=prompt, description="Write a short story.", execution_settings={"default": exec_settings} + ) + + test_func = kernel.create_function_from_prompt( + prompt_template_config=prompt_template_config, function_name="TestFunction", plugin_name="TestPlugin", description="Write a short story.", - max_tokens=500, - temperature=0.5, - top_p=0.5, ) assert len(test_func.plugins) > 0 diff --git a/python/tests/unit/models/ai/chat_completion/test_chat_history.py b/python/tests/unit/models/ai/chat_completion/test_chat_history.py new file mode 100644 index 000000000000..8dc7dc77fbf9 --- /dev/null +++ b/python/tests/unit/models/ai/chat_completion/test_chat_history.py @@ -0,0 +1,173 @@ +# Copyright (c) Microsoft. All rights reserved. + +import pytest + +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory +from semantic_kernel.models.ai.chat_completion.chat_role import ChatRole + + +def test_init_with_system_message_only(): + system_msg = "test message" + chat_history = ChatHistory(system_message=system_msg) + assert len(chat_history.messages) == 1 + assert chat_history.messages[0].content == system_msg + + +def test_init_with_messages_only(): + msgs = [ChatMessageContent(role=ChatRole.USER, content=f"Message {i}") for i in range(3)] + chat_history = ChatHistory(messages=msgs) + assert chat_history.messages == msgs, "Chat history should contain exactly the provided messages" + + +def test_init_with_messages_and_system_message(): + system_msg = "a test system prompt" + msgs = [ChatMessageContent(role=ChatRole.USER, content=f"Message {i}") for i in range(3)] + chat_history = ChatHistory(messages=msgs, system_message=system_msg) + assert chat_history.messages[0].role == ChatRole.SYSTEM, "System message should be the first in history" + assert chat_history.messages[0].content == system_msg, "System message should be the first in history" + assert chat_history.messages[1:] == msgs, "Remaining messages should follow the system message" + + +def test_init_without_messages_and_system_message(): + chat_history = ChatHistory() + assert chat_history.messages == [], "Chat history should be empty if no messages and system_message are provided" + + +def test_add_system_message(): + chat_history = ChatHistory() + content = "System message" + chat_history.add_system_message(content) + assert chat_history.messages[-1].content == content + assert chat_history.messages[-1].role == ChatRole.SYSTEM + + +def test_add_system_message_at_init(): + chat_history = ChatHistory() + content = "System message" + chat_history = ChatHistory(system_message=content) + assert chat_history.messages[-1].content == content + assert chat_history.messages[-1].role == ChatRole.SYSTEM + + +def test_add_user_message(): + chat_history = ChatHistory() + content = "User message" + chat_history.add_user_message(content) + assert chat_history.messages[-1].content == content + assert chat_history.messages[-1].role == ChatRole.USER + + +def test_add_assistant_message(): + chat_history = ChatHistory() + content = "Assistant message" + chat_history.add_assistant_message(content) + assert chat_history.messages[-1].content == content + assert chat_history.messages[-1].role == ChatRole.ASSISTANT + + +def test_add_tool_message(): + chat_history = ChatHistory() + content = "Tool message" + chat_history.add_tool_message(content) + assert chat_history.messages[-1].content == content + assert chat_history.messages[-1].role == ChatRole.TOOL + + +def test_add_message(): + chat_history = ChatHistory() + content = "Test message" + role = ChatRole.USER + encoding = "utf-8" + chat_history.add_message(message={"role": role, "content": content}, encoding=encoding) + assert chat_history.messages[-1].content == content + assert chat_history.messages[-1].role == role + assert chat_history.messages[-1].encoding == encoding + + +def test_remove_message(): + chat_history = ChatHistory() + content = "Message to remove" + role = ChatRole.USER + encoding = "utf-8" + message = ChatMessageContent(role=role, content=content, encoding=encoding) + chat_history.messages.append(message) + assert chat_history.remove_message(message) is True + assert message not in chat_history.messages + + +def test_len(): + chat_history = ChatHistory() + content = "Message" + chat_history.add_user_message(content) + chat_history.add_system_message(content) + assert len(chat_history) == 2 + + +def test_getitem(): + chat_history = ChatHistory() + content = "Message for index" + chat_history.add_user_message(content) + assert chat_history[0].content == content + + +def test_contains(): + chat_history = ChatHistory() + content = "Message to check" + role = ChatRole.USER + encoding = "utf-8" + message = ChatMessageContent(role=role, content=content, encoding=encoding) + chat_history.messages.append(message) + assert message in chat_history + + +def test_iter(): + chat_history = ChatHistory() + messages = ["Message 1", "Message 2"] + for msg in messages: + chat_history.add_user_message(msg) + for i, message in enumerate(chat_history): + assert message.content == messages[i] + + +def test_eq(): + # Create two instances of ChatHistory + chat_history1 = ChatHistory() + chat_history2 = ChatHistory() + + # Populate both instances with the same set of messages + messages = [("Message 1", ChatRole.USER), ("Message 2", ChatRole.ASSISTANT)] + for content, role in messages: + chat_history1.add_message({"role": role, "content": content}) + chat_history2.add_message({"role": role, "content": content}) + + # Assert that the two instances are considered equal + assert chat_history1 == chat_history2 + + # Additionally, test inequality by adding an extra message to one of the histories + chat_history1.add_user_message("Extra message") + assert chat_history1 != chat_history2 + + +def test_serialize(): + system_msg = "a test system prompt" + msgs = [ChatMessageContent(role=ChatRole.USER, content=f"Message {i}") for i in range(3)] + chat_history = ChatHistory(messages=msgs, system_message=system_msg) + json_str = chat_history.serialize() + assert json_str is not None + + +def test_serialize_and_deserialize_to_chat_history(): + system_msg = "a test system prompt" + msgs = [ChatMessageContent(role=ChatRole.USER, content=f"Message {i}") for i in range(3)] + chat_history = ChatHistory(messages=msgs, system_message=system_msg) + json_str = chat_history.serialize() + new_chat_history = ChatHistory.restore_chat_history(json_str) + assert new_chat_history == chat_history + + +def test_deserialize_invalid_json_raises_exception(): + invalid_json = "invalid json" + + with pytest.raises(ValueError): + ChatHistory.restore_chat_history(invalid_json) diff --git a/python/tests/unit/models/ai/chat_completion/test_chat_message.py b/python/tests/unit/models/ai/chat_completion/test_chat_message.py index 385e9d3163c9..04c4fc7c2231 100644 --- a/python/tests/unit/models/ai/chat_completion/test_chat_message.py +++ b/python/tests/unit/models/ai/chat_completion/test_chat_message.py @@ -1,11 +1,7 @@ -import pytest +# Copyright (c) Microsoft. All rights reserved. + -from semantic_kernel.functions.kernel_arguments import KernelArguments from semantic_kernel.models.ai.chat_completion.chat_message import ChatMessage -from semantic_kernel.prompt_template.prompt_template import PromptTemplate -from semantic_kernel.prompt_template.prompt_template_config import ( - PromptTemplateConfig, -) def test_chat_message(): @@ -15,25 +11,3 @@ def test_chat_message(): assert message.fixed_content is None assert message.content is None assert message.content_template is None - - -@pytest.mark.asyncio -async def test_chat_message_rendering(create_kernel): - # Test initialization with custom values - kernel = create_kernel - expected_content = "Hello, world!" - prompt_config = PromptTemplateConfig.from_execution_settings(max_tokens=2000, temperature=0.7, top_p=0.8) - content_template = PromptTemplate("Hello, {{$input}}!", kernel.prompt_template_engine, prompt_config) - - message = ChatMessage( - role="user", - content_template=content_template, - ) - arguments = KernelArguments(input="world") - await message.render_message(kernel, arguments) - assert message.role == "user" - assert message.fixed_content == expected_content - assert message.content_template == content_template - - # Test content property - assert message.content == expected_content diff --git a/python/tests/unit/planners/action_planner/test_action_planner.py b/python/tests/unit/planners/action_planner/test_action_planner.py index 38fe55bffab0..a52501815bc5 100644 --- a/python/tests/unit/planners/action_planner/test_action_planner.py +++ b/python/tests/unit/planners/action_planner/test_action_planner.py @@ -27,7 +27,7 @@ def create_mock_function(kernel_function_metadata: KernelFunctionMetadata) -> Mo mock_function.describe.return_value = kernel_function_metadata mock_function.name = kernel_function_metadata.name mock_function.plugin_name = kernel_function_metadata.plugin_name - mock_function.is_semantic = kernel_function_metadata.is_semantic + mock_function.is_prompt = kernel_function_metadata.is_prompt mock_function.description = kernel_function_metadata.description mock_function.prompt_execution_settings = PromptExecutionSettings() return mock_function @@ -46,13 +46,13 @@ def mock_kernel(plugins_input): mock_plugins = {} - for name, plugin_name, description, is_semantic in plugins_input: + for name, plugin_name, description, is_prompt in plugins_input: kernel_function_metadata = KernelFunctionMetadata( name=name, plugin_name=plugin_name, description=description, parameters=[], - is_semantic=is_semantic, + is_prompt=is_prompt, is_asynchronous=True, ) mock_function = create_mock_function(kernel_function_metadata) @@ -96,7 +96,7 @@ async def test_plan_creation(): name="Translate", description="Translate something", plugin_name="WriterPlugin", - is_semantic=False, + is_prompt=False, parameters=[], ) mock_function = create_mock_function(kernel_function_metadata) @@ -106,7 +106,7 @@ async def test_plan_creation(): function_result = FunctionResult(function=kernel_function_metadata, value=plan_str, metadata={}) mock_function.invoke.return_value = function_result - kernel.create_semantic_function.return_value = mock_function + kernel.create_function_from_prompt.return_value = mock_function planner = ActionPlanner(kernel) plan = await planner.create_plan(goal) @@ -187,7 +187,7 @@ async def test_empty_goal_throw(): name="Translate", description="Translate something", plugin_name="WriterPlugin", - is_semantic=False, + is_prompt=False, parameters=[], ) mock_function = create_mock_function(kernel_function_metadata) @@ -214,7 +214,7 @@ async def test_invalid_json_throw(): name="Translate", plugin_name="WriterPlugin", description="Translate something", - is_semantic=False, + is_prompt=False, parameters=[], ) mock_function = create_mock_function(kernel_function_metadata) @@ -224,7 +224,7 @@ async def test_invalid_json_throw(): function_result = FunctionResult(function=kernel_function_metadata, value=plan_str, metadata={}) mock_function.invoke.return_value = function_result - kernel.create_semantic_function.return_value = mock_function + kernel.create_function_from_prompt.return_value = mock_function planner = ActionPlanner(kernel) diff --git a/python/tests/unit/planners/sequential_planner/test_sequential_planner.py b/python/tests/unit/planners/sequential_planner/test_sequential_planner.py index 39f923ee0879..d5a963289d8a 100644 --- a/python/tests/unit/planners/sequential_planner/test_sequential_planner.py +++ b/python/tests/unit/planners/sequential_planner/test_sequential_planner.py @@ -25,7 +25,7 @@ def create_mock_function(kernel_function_metadata: KernelFunctionMetadata): mock_function.describe.return_value = kernel_function_metadata mock_function.name = kernel_function_metadata.name mock_function.plugin_name = kernel_function_metadata.plugin_name - mock_function.is_semantic = kernel_function_metadata.is_semantic + mock_function.is_prompt = kernel_function_metadata.is_prompt mock_function.description = kernel_function_metadata.description mock_function.prompt_execution_settings = PromptExecutionSettings() return mock_function @@ -51,13 +51,13 @@ async def test_it_can_create_plan(goal): functions_list = [] kernel.plugins = KernelPluginCollection() mock_functions = [] - for name, pluginName, description, isSemantic in input: + for name, pluginName, description, is_prompt in input: kernel_function_metadata = KernelFunctionMetadata( name=name, plugin_name=pluginName, description=description, parameters=[], - is_semantic=isSemantic, + is_prompt=is_prompt, is_asynchronous=True, ) mock_function = create_mock_function(kernel_function_metadata) @@ -85,14 +85,14 @@ async def test_it_can_create_plan(goal): mock_function_flow_function = Mock(spec=KernelFunction) mock_function_flow_function.invoke.return_value = FunctionResult( function=KernelFunctionMetadata( - name="func", plugin_name="plugin", description="", parameters=[], is_semantic=False + name="func", plugin_name="plugin", description="", parameters=[], is_prompt=False ), value=plan_string, metadata={}, ) - kernel.register_semantic_function.return_value = mock_function_flow_function + kernel.create_function_from_prompt.return_value = mock_function_flow_function - planner = SequentialPlanner(kernel) + planner = SequentialPlanner(kernel, service_id="test") # Act plan = await planner.create_plan(goal) @@ -111,7 +111,7 @@ async def test_empty_goal_throws(): # Arrange kernel = Mock(spec=Kernel) kernel.prompt_template_engine = Mock() - planner = SequentialPlanner(kernel) + planner = SequentialPlanner(kernel, service_id="test") # Act & Assert with pytest.raises(PlanningException): @@ -133,7 +133,7 @@ async def test_invalid_xml_throws(): plan_string = "notvalid<" function_result = FunctionResult( function=KernelFunctionMetadata( - name="func", plugin_name="plugin", description="", parameters=[], is_semantic=False + name="func", plugin_name="plugin", description="", parameters=[], is_prompt=False ), value=plan_string, metadata={}, @@ -143,9 +143,9 @@ async def test_invalid_xml_throws(): mock_function_flow_function.invoke.return_value = function_result kernel.plugins = plugins - kernel.register_semantic_function.return_value = mock_function_flow_function + kernel.create_function_from_prompt.return_value = mock_function_flow_function - planner = SequentialPlanner(kernel) + planner = SequentialPlanner(kernel, service_id="test") # Act & Assert with pytest.raises(PlanningException): diff --git a/python/tests/unit/planners/sequential_planner/test_sequential_planner_extensions.py b/python/tests/unit/planners/sequential_planner/test_sequential_planner_extensions.py index b7cae6d0d601..c8a00b93903f 100644 --- a/python/tests/unit/planners/sequential_planner/test_sequential_planner_extensions.py +++ b/python/tests/unit/planners/sequential_planner/test_sequential_planner_extensions.py @@ -67,7 +67,7 @@ async def test_can_call_get_available_functions_with_functions(): plugin_name="pluginName", description="description", parameters=[], - is_semantic=True, + is_prompt=True, is_asynchronous=False, ) native_kernel_function_metadata = KernelFunctionMetadata( @@ -75,7 +75,7 @@ async def test_can_call_get_available_functions_with_functions(): plugin_name="pluginName", description="description", parameters=[], - is_semantic=False, + is_prompt=False, is_asynchronous=False, ) functions_list.append(kernel_function_metadata) @@ -86,8 +86,6 @@ async def test_can_call_get_available_functions_with_functions(): kernel.plugins = mock_plugins - kernel.plugins = mock_plugins - memory_query_result = MemoryQueryResult( is_reference=False, id=SequentialPlannerFunctionViewExtension.to_fully_qualified_name(kernel_function_metadata), @@ -137,7 +135,7 @@ async def test_can_call_get_available_functions_with_functions_and_relevancy(): plugin_name="pluginName", description="description", parameters=[], - is_semantic=True, + is_prompt=True, is_asynchronous=False, ) native_kernel_function_metadata = KernelFunctionMetadata( @@ -145,7 +143,7 @@ async def test_can_call_get_available_functions_with_functions_and_relevancy(): plugin_name="pluginName", description="description", parameters=[], - is_semantic=False, + is_prompt=False, is_asynchronous=False, ) functions_list.append(kernel_function_metadata) diff --git a/python/tests/unit/planners/sequential_planner/test_sequential_planner_parser.py b/python/tests/unit/planners/sequential_planner/test_sequential_planner_parser.py index 59e824c0a624..e8ec89a3d31c 100644 --- a/python/tests/unit/planners/sequential_planner/test_sequential_planner_parser.py +++ b/python/tests/unit/planners/sequential_planner/test_sequential_planner_parser.py @@ -22,7 +22,7 @@ def create_mock_function(kernel_function_metadata: KernelFunctionMetadata) -> Ke mock_function.name = kernel_function_metadata.name mock_function.plugin_name = kernel_function_metadata.plugin_name mock_function.description = kernel_function_metadata.description - mock_function.is_semantic = kernel_function_metadata.is_semantic + mock_function.is_prompt = kernel_function_metadata.is_prompt mock_function.prompt_execution_settings = PromptExecutionSettings() return mock_function @@ -30,13 +30,13 @@ def create_mock_function(kernel_function_metadata: KernelFunctionMetadata) -> Ke def create_kernel_and_functions_mock(functions) -> Kernel: kernel = Kernel() functions_list = [] - for name, plugin_name, description, is_semantic, result_string in functions: + for name, plugin_name, description, is_prompt, result_string in functions: kernel_function_metadata = KernelFunctionMetadata( name=name, plugin_name=plugin_name, description=description, parameters=[], - is_semantic=is_semantic, + is_prompt=is_prompt, is_asynchronous=True, ) functions_list.append(kernel_function_metadata) diff --git a/python/tests/unit/planners/test_plan_creation.py b/python/tests/unit/planners/test_plan_creation.py index a9fdae969b55..4dbec64abee1 100644 --- a/python/tests/unit/planners/test_plan_creation.py +++ b/python/tests/unit/planners/test_plan_creation.py @@ -15,7 +15,7 @@ def test_create_empty_plan(): assert plan.description == "" assert plan.function is None assert type(plan.parameters) is KernelArguments - assert plan.is_semantic is None + assert plan.is_prompt is None assert plan.is_native is None assert plan.prompt_execution_settings is None assert plan.has_next_step is False @@ -32,7 +32,7 @@ def test_create_plan_with_name(): assert plan.description == "" assert plan.function is None assert type(plan.parameters) is KernelArguments - assert plan.is_semantic is None + assert plan.is_prompt is None assert plan.is_native is None assert plan.prompt_execution_settings is None assert plan.has_next_step is False @@ -49,7 +49,7 @@ def test_create_plan_with_name_and_description(): assert plan.description == "test description" assert plan.function is None assert type(plan.parameters) is KernelArguments - assert plan.is_semantic is None + assert plan.is_prompt is None assert plan.is_native is None assert plan.prompt_execution_settings is None assert plan.has_next_step is False @@ -66,7 +66,7 @@ def test_create_plan_with_state_and_parameters(): assert plan.description == "" assert plan.function is None assert plan.parameters["test_param"] == "test_param_val" - assert plan.is_semantic is None + assert plan.is_prompt is None assert plan.is_native is None assert plan.prompt_execution_settings is None assert plan.has_next_step is False @@ -92,8 +92,8 @@ def test_create_plan_with_name_and_function(): assert plan.description == test_function.description assert plan.function is test_function assert type(plan.parameters) is KernelArguments - assert plan.is_semantic is test_function.is_semantic - assert plan.is_native is not test_function.is_semantic + assert plan.is_prompt is test_function.is_prompt + assert plan.is_native is not test_function.is_prompt assert plan.prompt_execution_settings == test_function.prompt_execution_settings assert plan.has_next_step is False assert plan.next_step_index == 0 @@ -121,7 +121,7 @@ def test_create_multistep_plan_with_functions(): assert plan.description == "" assert plan.function is None assert type(plan.parameters) is KernelArguments - assert plan.is_semantic is None + assert plan.is_prompt is None assert plan.is_native is None assert plan.prompt_execution_settings is None assert plan.has_next_step is True @@ -152,7 +152,7 @@ def test_create_multistep_plan_with_plans(): assert plan.description == "" assert plan.function is None assert type(plan.parameters) is KernelArguments - assert plan.is_semantic is None + assert plan.is_prompt is None assert plan.is_native is None assert plan.prompt_execution_settings is None assert plan.has_next_step is True @@ -180,8 +180,8 @@ def test_add_step_to_plan(): assert plan.description == test_function1.description assert plan.function is test_function1 assert type(plan.parameters) is KernelArguments - assert plan.is_semantic is test_function1.is_semantic - assert plan.is_native is not test_function1.is_semantic + assert plan.is_prompt is test_function1.is_prompt + assert plan.is_native is not test_function1.is_prompt assert plan.prompt_execution_settings == test_function1.prompt_execution_settings assert plan.has_next_step is True assert plan.next_step_index == 0 diff --git a/python/tests/unit/template_engine/blocks/test_code_block.py b/python/tests/unit/template_engine/blocks/test_code_block.py index 5222e42d87aa..9bc1cabb251e 100644 --- a/python/tests/unit/template_engine/blocks/test_code_block.py +++ b/python/tests/unit/template_engine/blocks/test_code_block.py @@ -67,13 +67,13 @@ def invoke(): raise Exception("error") function = KernelFunction( - function_name="functionName", + function_name="funcName", plugin_name="pluginName", description="", function=invoke, parameters=[], return_parameter=None, - is_semantic=False, + is_prompt=False, ) dkp = KernelPlugin(name="test", functions=[function]) @@ -138,7 +138,7 @@ def invoke(arguments: KernelArguments): function=invoke, parameters=[KernelParameterMetadata(name="arguments", description="", default_value=None, required=True)], return_parameter=None, - is_semantic=False, + is_prompt=False, ) dkp = KernelPlugin(name="test", functions=[function]) @@ -194,7 +194,7 @@ def invoke(arguments): description="", parameters=[KernelParameterMetadata(name="arguments", description="", default_value=None, required=True)], return_parameter=None, - is_semantic=False, + is_prompt=False, ) dkp = KernelPlugin(name="test", functions=[function]) @@ -240,7 +240,7 @@ def invoke(arguments): description="", parameters=[KernelParameterMetadata(name="arguments", description="", default_value=None, required=True)], return_parameter=None, - is_semantic=False, + is_prompt=False, ) dkp = KernelPlugin(name="test", functions=[function]) @@ -295,7 +295,7 @@ def invoke(input, arg1, arg2): KernelParameterMetadata(name="arg2", description="", default_value=None, required=True), ], return_parameter=None, - is_semantic=False, + is_prompt=False, ) dkp = KernelPlugin(name="test", functions=[function]) @@ -341,7 +341,7 @@ def invoke(arg1, arg2): KernelParameterMetadata(name="arg2", description="", default_value=None, required=True), ], return_parameter=None, - is_semantic=False, + is_prompt=False, ) dkp = KernelPlugin(name="test", functions=[function]) @@ -379,7 +379,7 @@ def invoke(): description="", parameters=[], return_parameter=None, - is_semantic=False, + is_prompt=False, ) dkp = KernelPlugin(name="test", functions=[function]) diff --git a/python/tests/unit/test_serialization.py b/python/tests/unit/test_serialization.py index 5a8649d2a278..ff8d79a9170a 100644 --- a/python/tests/unit/test_serialization.py +++ b/python/tests/unit/test_serialization.py @@ -25,6 +25,7 @@ from semantic_kernel.kernel_pydantic import KernelBaseModel from semantic_kernel.memory.null_memory import NullMemory from semantic_kernel.memory.semantic_text_memory_base import SemanticTextMemoryBase +from semantic_kernel.models.ai.chat_completion.chat_history import ChatHistory from semantic_kernel.template_engine.blocks.block import Block from semantic_kernel.template_engine.blocks.block_types import BlockTypes from semantic_kernel.template_engine.blocks.code_block import CodeBlock @@ -64,6 +65,9 @@ def my_function(arguments: KernelArguments) -> str: return KernelFunction.from_native_method(my_function, "plugin") + def create_chat_history() -> ChatHistory: + return ChatHistory() + def create_plugin_collection() -> KernelPluginCollection: """Return a plugin collection.""" # TODO: Add a few plugins to this collection. @@ -90,9 +94,10 @@ def create_plugin_collection() -> KernelPluginCollection: plugin_name="bar", description="baz", parameters=[KernelParameterMetadata(name="qux", description="bar", default_value="baz")], - is_semantic=True, + is_prompt=True, is_asynchronous=False, ), + ChatHistory: create_chat_history(), KernelPluginCollection: create_plugin_collection(), NullMemory: NullMemory(), KernelFunction: create_kernel_function(), @@ -140,6 +145,7 @@ def constructor(cls: t.Type[_Serializable]) -> _Serializable: KernelParameterMetadata, KernelFunctionMetadata, KernelPluginCollection, + ChatHistory, pytest.param( KernelFunction, marks=pytest.mark.xfail(reason="Need to implement Pickle serialization."), diff --git a/python/tests/unit/text/test_function_extension.py b/python/tests/unit/text/test_function_extension.py index f70ebb3b56ff..2e32d1150b87 100644 --- a/python/tests/unit/text/test_function_extension.py +++ b/python/tests/unit/text/test_function_extension.py @@ -2,28 +2,42 @@ import semantic_kernel.connectors.ai.open_ai as sk_oai from semantic_kernel import Kernel +from semantic_kernel.connectors.ai import PromptExecutionSettings from semantic_kernel.functions.function_result import FunctionResult from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.prompt_template.input_variable import InputVariable +from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig from semantic_kernel.text import aggregate_chunked_results @pytest.mark.asyncio async def test_aggregate_results(): kernel = Kernel() - kernel.add_service(sk_oai.OpenAITextCompletion("text-davinci-002", "none", "none")) - sk_prompt = """ + kernel.add_service(sk_oai.OpenAITextCompletion("text-davinci-002", "none", "none", service_id="text-davinci-002")) + prompt = """ {{$input}} How is that ? """ - func = kernel.create_semantic_function( - sk_prompt, - service_id="text-davinci-002", - max_tokens=200, - temperature=0, - top_p=0.5, + req_settings = PromptExecutionSettings( + service_id="text-davinci-002", extension_data={"max_tokens": 2000, "temperature": 0.7, "top_p": 0.8} ) - func.function = lambda function, kernel, arguments, service, request_settings: FunctionResult( + + prompt_template_config = PromptTemplateConfig( + template=prompt, + name="chat", + template_format="semantic-kernel", + input_variables=[ + InputVariable(name="request", description="The user input", is_required=True), + ], + execution_settings=req_settings, + ) + + func = kernel.create_function_from_prompt( + prompt_template_config=prompt_template_config, + ) + + func.function = lambda function, kernel, arguments, service, request_settings, chat_history: FunctionResult( function=function, value=arguments["input"], metadata={} )