Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Python: #6499 Mistral AI Function Calling #8016

Merged
Merged
Show file tree
Hide file tree
Changes from 35 commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
ec3ebb6
added non streaming function calling
nmoeller Aug 9, 2024
8f69361
adding streaming function calling
nmoeller Aug 9, 2024
453fac6
cleaned up unit tests for function calling
nmoeller Aug 10, 2024
c88472a
added integration tests and sample for testing
nmoeller Aug 11, 2024
c9031bf
fixed comment in sample
nmoeller Aug 12, 2024
8d1777c
Merge branch 'main' into issue-6499-Mistral-Ai-Function-Calling
nmoeller Aug 13, 2024
9c62cab
fixed function choice required
nmoeller Aug 13, 2024
74692d5
adjust terminate behavior to new solution
nmoeller Aug 14, 2024
6578865
Merge branch 'main' into issue-6499-Mistral-Ai-Function-Calling
nmoeller Aug 14, 2024
62fd4fe
Merge branch 'main' into issue-6499-Mistral-Ai-Function-Calling
eavanvalkenburg Aug 14, 2024
165e839
removed function call behavior backwards compability
nmoeller Aug 16, 2024
56a6068
Merge branch 'issue-6499-Mistral-Ai-Function-Calling' of https://gith…
nmoeller Aug 16, 2024
f047a2e
Merge branch 'main' into issue-6499-Mistral-Ai-Function-Calling
nmoeller Aug 16, 2024
da3b3ef
Merge remote-tracking branch 'origin/main' into issue-6499-Mistral-Ai…
nmoeller Aug 22, 2024
b306ba3
implemented PR feedback
nmoeller Aug 22, 2024
482a261
Merge branch 'main' into issue-6499-Mistral-Ai-Function-Calling
nmoeller Aug 22, 2024
0869be1
Merge branch 'main' into issue-6499-Mistral-Ai-Function-Calling
nmoeller Aug 23, 2024
20effd7
Merge branch 'main' into issue-6499-Mistral-Ai-Function-Calling
nmoeller Aug 26, 2024
f29a7af
Merge remote-tracking branch 'origin/main' into issue-6499-Mistral-Ai…
nmoeller Sep 1, 2024
24597f0
Merge branch 'main' into issue-6499-Mistral-Ai-Function-Calling
nmoeller Sep 4, 2024
0585edb
Merge branch 'main' into issue-6499-Mistral-Ai-Function-Calling
nmoeller Sep 5, 2024
26d0d86
Merge remote-tracking branch 'origin/main' into issue-6499-Mistral-Ai…
nmoeller Sep 10, 2024
471fdd2
Merge branch 'main' into issue-6499-Mistral-Ai-Function-Calling
nmoeller Sep 10, 2024
cc9332b
Update python/tests/integration/completions/test_chat_completion_with…
nmoeller Sep 10, 2024
ced4c3d
fixed mypy issue
nmoeller Sep 10, 2024
0ee355d
Merge branch 'issue-6499-Mistral-Ai-Function-Calling' of https://gith…
nmoeller Sep 10, 2024
5bbd053
Merge branch 'main' into issue-6499-Mistral-Ai-Function-Calling
nmoeller Sep 10, 2024
b94f673
Merge branch 'main' into issue-6499-Mistral-Ai-Function-Calling
nmoeller Sep 10, 2024
b99a1a3
skipped integration tests
nmoeller Sep 11, 2024
ea02046
Merge branch 'issue-6499-Mistral-Ai-Function-Calling' of https://gith…
nmoeller Sep 11, 2024
527a0f2
Merge branch 'main' into issue-6499-Mistral-Ai-Function-Calling
nmoeller Sep 11, 2024
591d961
Merge branch 'main' into issue-6499-Mistral-Ai-Function-Calling
moonbox3 Sep 11, 2024
0ad5530
fixed wrong env variable for vertex ai
nmoeller Sep 11, 2024
7f808ce
Merge branch 'issue-6499-Mistral-Ai-Function-Calling' of https://gith…
nmoeller Sep 11, 2024
60a541c
Merge branch 'main' into issue-6499-Mistral-Ai-Function-Calling
nmoeller Sep 11, 2024
d8627e2
added ServiceInitializationError when when ai_model id is missing
nmoeller Sep 11, 2024
ebfd35a
Merge branch 'issue-6499-Mistral-Ai-Function-Calling' of https://gith…
nmoeller Sep 11, 2024
172d1bf
resolve conflicts
nmoeller Sep 11, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,212 @@
# Copyright (c) Microsoft. All rights reserved.

import asyncio
import os
from functools import reduce
from typing import TYPE_CHECKING

from semantic_kernel import Kernel
from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior
from semantic_kernel.connectors.ai.mistral_ai import MistralAIChatCompletion, MistralAIChatPromptExecutionSettings
from semantic_kernel.contents import ChatHistory
from semantic_kernel.contents.chat_message_content import ChatMessageContent
from semantic_kernel.contents.function_call_content import FunctionCallContent
from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent
from semantic_kernel.core_plugins.math_plugin import MathPlugin
from semantic_kernel.core_plugins.time_plugin import TimePlugin
from semantic_kernel.functions import KernelArguments

if TYPE_CHECKING:
from semantic_kernel.functions import KernelFunction


system_message = """
You are a chat bot. Your name is Mosscap and
you have one goal: figure out what people need.
Your full name, should you need to know it, is
Splendid Speckled Mosscap. You communicate
effectively, but you tend to answer with long
flowery prose. You are also a math wizard,
especially for adding and subtracting.
You also excel at joke telling, where your tone is often sarcastic.
Once you have the answer I am looking for,
you will return a full answer to me as soon as possible.
"""

# This concept example shows how to handle both streaming and non-streaming responses
# To toggle the behavior, set the following flag accordingly:
stream = True

kernel = Kernel()

# Note: the underlying Model must be Mistral Small, Mistral Large, Mixtral 8x22B, Mistral Nemo.
# You can use MISTRALAI_API_KEY and MISTRALAI_CHAT_MODEL_ID environment variables to set the API key and model ID.
# Or just set it here in the Constructor for testing
kernel.add_service(
MistralAIChatCompletion(
service_id="chat",
# api_key=XXXXXXX,
# ai_model_id="mistral-large",
)
)

plugins_directory = os.path.join(__file__, "../../../../../prompt_template_samples/")
# adding plugins to the kernel
kernel.add_plugin(MathPlugin(), plugin_name="math")
kernel.add_plugin(TimePlugin(), plugin_name="time")

chat_function = kernel.add_function(
prompt="{{$chat_history}}{{$user_input}}",
plugin_name="ChatBot",
function_name="Chat",
)

# Enabling or disabling function calling is done by setting the `function_choice_behavior` attribute for the
# prompt execution settings. When the function_call parameter is set to "auto" the model will decide which
# function to use, if any.
#
# There are two ways to define the `function_choice_behavior` parameter:
# 1. Using the type string as `"auto"`, `"required"`, or `"none"`. For example:
# configure `function_choice_behavior="auto"` parameter directly in the execution settings.
# 2. Using the FunctionChoiceBehavior class. For example:
# `function_choice_behavior=FunctionChoiceBehavior.Auto()`.
# Both of these configure the `auto` tool_choice and all of the available plugins/functions
# registered on the kernel. If you want to limit the available plugins/functions, you must
# configure the `filters` dictionary attribute for each type of function choice behavior.
# For example:
#
# from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior

# function_choice_behavior = FunctionChoiceBehavior.Auto(
# filters={"included_functions": ["time-date", "time-time", "math-Add"]}
# )
#
# The filters attribute allows you to specify either: `included_functions`, `excluded_functions`,
# `included_plugins`, or `excluded_plugins`.

# Note: the number of responses for auto invoking tool calls is limited to 1.
# If configured to be greater than one, this value will be overridden to 1.
execution_settings = MistralAIChatPromptExecutionSettings(
service_id="chat",
max_tokens=2000,
temperature=0.7,
top_p=0.8,
function_choice_behavior=FunctionChoiceBehavior.Auto(auto_invoke=True),
)

history = ChatHistory()

history.add_system_message(system_message)
history.add_user_message("Hi there, who are you?")
history.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need.")

arguments = KernelArguments(settings=execution_settings)


def print_tool_calls(message: ChatMessageContent) -> None:
# A helper method to pretty print the tool calls from the message.
# This is only triggered if auto invoke tool calls is disabled.
items = message.items
formatted_tool_calls = []
for i, item in enumerate(items, start=1):
if isinstance(item, FunctionCallContent):
tool_call_id = item.id
function_name = item.name
function_arguments = item.arguments
formatted_str = (
f"tool_call {i} id: {tool_call_id}\n"
f"tool_call {i} function name: {function_name}\n"
f"tool_call {i} arguments: {function_arguments}"
)
formatted_tool_calls.append(formatted_str)
if len(formatted_tool_calls) > 0:
print("Tool calls:\n" + "\n\n".join(formatted_tool_calls))
else:
print("The model used its own knowledge and didn't return any tool calls.")


async def handle_streaming(
kernel: Kernel,
chat_function: "KernelFunction",
arguments: KernelArguments,
) -> str | None:
response = kernel.invoke_stream(
chat_function,
return_function_results=False,
arguments=arguments,
)

print("Mosscap:> ", end="")
streamed_chunks: list[StreamingChatMessageContent] = []
result_content = []
async for message in response:
if not execution_settings.function_choice_behavior.auto_invoke_kernel_functions and isinstance(
message[0], StreamingChatMessageContent
):
streamed_chunks.append(message[0])
else:
result_content.append(message[0])
print(str(message[0]), end="")

if streamed_chunks:
streaming_chat_message = reduce(lambda first, second: first + second, streamed_chunks)
if hasattr(streaming_chat_message, "content"):
print(streaming_chat_message.content)
print("Auto tool calls is disabled, printing returned tool calls...")
print_tool_calls(streaming_chat_message)

print("\n")
if result_content:
return "".join([str(content) for content in result_content])
return None


async def chat() -> bool:
try:
user_input = input("User:> ")
except KeyboardInterrupt:
print("\n\nExiting chat...")
return False
except EOFError:
print("\n\nExiting chat...")
return False

if user_input == "exit":
print("\n\nExiting chat...")
return False
arguments["user_input"] = user_input
arguments["chat_history"] = history

if stream:
result = await handle_streaming(kernel, chat_function, arguments=arguments)
else:
result = await kernel.invoke(chat_function, arguments=arguments)

# If tools are used, and auto invoke tool calls is False, the response will be of type
# ChatMessageContent with information about the tool calls, which need to be sent
# back to the model to get the final response.
function_calls = [item for item in result.value[-1].items if isinstance(item, FunctionCallContent)]
if not execution_settings.function_choice_behavior.auto_invoke_kernel_functions and len(function_calls) > 0:
print_tool_calls(result.value[0])
return True

print(f"Mosscap:> {result}")

history.add_user_message(user_input)
history.add_assistant_message(str(result))
return True


async def main() -> None:
chatting = True
print(
"Welcome to the chat bot!\
\n Type 'exit' to exit.\
\n Try a math question to see the function calling in action (i.e. what is 3+3?)."
)
while chatting:
chatting = await chat()


if __name__ == "__main__":
asyncio.run(main())
Original file line number Diff line number Diff line change
@@ -1,9 +1,15 @@
# Copyright (c) Microsoft. All rights reserved.

import logging
import sys
from typing import Any, Literal

from pydantic import Field, model_validator
if sys.version_info >= (3, 11):
pass # pragma: no cover
else:
pass # pragma: no cover

from pydantic import Field

from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings

Expand All @@ -28,11 +34,14 @@ class MistralAIChatPromptExecutionSettings(MistralAIPromptExecutionSettings):
temperature: float | None = Field(None, ge=0.0, le=2.0)
top_p: float | None = Field(None, ge=0.0, le=1.0)
random_seed: int | None = None

@model_validator(mode="after")
def check_function_call_behavior(self) -> "MistralAIChatPromptExecutionSettings":
"""Check if the user is requesting function call behavior."""
if self.function_choice_behavior is not None:
raise NotImplementedError("MistralAI does not support function call behavior.")

return self
tools: list[dict[str, Any]] | None = Field(
None,
max_length=64,
description="Do not set this manually. It is set by the service based on the function choice configuration.",
)
tool_choice: str | None = Field(
None,
description="Do not set this manually. It is set by the service based on the function choice configuration.",
)


Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import logging
import sys
from collections.abc import AsyncGenerator
from collections.abc import AsyncGenerator, Callable
from typing import Any, ClassVar

if sys.version_info >= (3, 12):
Expand All @@ -22,21 +22,31 @@
from pydantic import ValidationError

from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase
from semantic_kernel.connectors.ai.function_call_choice_configuration import FunctionCallChoiceConfiguration
from semantic_kernel.connectors.ai.function_calling_utils import (
kernel_function_metadata_to_function_call_format,
)
from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceType
from semantic_kernel.connectors.ai.mistral_ai.prompt_execution_settings.mistral_ai_prompt_execution_settings import (
MistralAIChatPromptExecutionSettings,
)
from semantic_kernel.connectors.ai.mistral_ai.services.mistral_ai_base import MistralAIBase
from semantic_kernel.connectors.ai.mistral_ai.settings.mistral_ai_settings import MistralAISettings
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
from semantic_kernel.contents import (
ChatMessageContent,
FunctionCallContent,
StreamingChatMessageContent,
StreamingTextContent,
TextContent,
)
from semantic_kernel.contents.chat_history import ChatHistory
from semantic_kernel.contents.chat_message_content import ChatMessageContent
from semantic_kernel.contents.function_call_content import FunctionCallContent
from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent
from semantic_kernel.contents.streaming_text_content import StreamingTextContent
from semantic_kernel.contents.text_content import TextContent
from semantic_kernel.contents.utils.author_role import AuthorRole
from semantic_kernel.contents.utils.finish_reason import FinishReason
from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError, ServiceResponseException
from semantic_kernel.exceptions.service_exceptions import (
ServiceInitializationError,
ServiceResponseException,
)
from semantic_kernel.utils.experimental_decorator import experimental_class
from semantic_kernel.utils.telemetry.model_diagnostics.decorators import trace_chat_completion

Expand All @@ -47,7 +57,7 @@
class MistralAIChatCompletion(MistralAIBase, ChatCompletionClientBase):
"""Mistral Chat completion class."""

SUPPORTS_FUNCTION_CALLING: ClassVar[bool] = False
SUPPORTS_FUNCTION_CALLING: ClassVar[bool] = True

def __init__(
self,
Expand Down Expand Up @@ -248,3 +258,40 @@ def _get_tool_calls_from_chat_choice(
]

# endregion

def update_settings_from_function_call_configuration_mistral(
self,
function_choice_configuration: "FunctionCallChoiceConfiguration",
settings: "PromptExecutionSettings",
type: "FunctionChoiceType",
) -> None:
"""Update the settings from a FunctionChoiceConfiguration."""
if (
function_choice_configuration.available_functions
and hasattr(settings, "tool_choice")
and hasattr(settings, "tools")
):
settings.tool_choice = type
settings.tools = [
kernel_function_metadata_to_function_call_format(f)
for f in function_choice_configuration.available_functions
]
# Function Choice behavior required maps to MistralAI any
if (
settings.function_choice_behavior
and settings.function_choice_behavior.type_ == FunctionChoiceType.REQUIRED
):
settings.tool_choice = "any"

@override
def _update_function_choice_settings_callback(
self,
) -> Callable[[FunctionCallChoiceConfiguration, "PromptExecutionSettings", FunctionChoiceType], None]:
return self.update_settings_from_function_call_configuration_mistral

@override
def _reset_function_choice_settings(self, settings: "PromptExecutionSettings") -> None:
if hasattr(settings, "tool_choice"):
settings.tool_choice = None
if hasattr(settings, "tools"):
settings.tools = None
Loading
Loading