-
Notifications
You must be signed in to change notification settings - Fork 3.1k
/
Gemini_FunctionCalling.cs
229 lines (199 loc) · 9.9 KB
/
Gemini_FunctionCalling.cs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
// Copyright (c) Microsoft. All rights reserved.
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.ChatCompletion;
using Microsoft.SemanticKernel.Connectors.Google;
using xRetry;
namespace FunctionCalling;
/// <summary>
/// These examples demonstrate two ways functions called by the Gemini LLM can be invoked using the SK streaming and non-streaming AI API:
///
/// 1. Automatic Invocation by SK:
/// Functions called by the LLM are invoked automatically by SK. The results of these function invocations
/// are automatically added to the chat history and returned to the LLM. The LLM reasons about the chat history
/// and generates the final response.
/// This approach is fully automated and requires no manual intervention from the caller.
///
/// 2. Manual Invocation by a Caller:
/// Functions called by the LLM are returned to the AI API caller. The caller controls the invocation phase where
/// they may decide which function to call, when to call them, how to handle exceptions, call them in parallel or sequentially, etc.
/// The caller then adds the function results or exceptions to the chat history and returns it to the LLM, which reasons about it
/// and generates the final response.
/// This approach is manual and provides more control over the function invocation phase to the caller.
/// </summary>
public sealed class Gemini_FunctionCalling(ITestOutputHelper output) : BaseTest(output)
{
[RetryFact]
public async Task GoogleAIAsync()
{
Console.WriteLine("============= Google AI - Gemini Chat Completion with function calling =============");
string geminiApiKey = TestConfiguration.GoogleAI.ApiKey;
string geminiModelId = TestConfiguration.GoogleAI.Gemini.ModelId;
if (geminiApiKey is null || geminiModelId is null)
{
Console.WriteLine("Gemini credentials not found. Skipping example.");
return;
}
Kernel kernel = Kernel.CreateBuilder()
.AddGoogleAIGeminiChatCompletion(
modelId: geminiModelId,
apiKey: geminiApiKey)
.Build();
await this.RunSampleAsync(kernel);
}
[RetryFact]
public async Task VertexAIAsync()
{
Console.WriteLine("============= Vertex AI - Gemini Chat Completion with function calling =============");
string geminiApiKey = TestConfiguration.VertexAI.BearerKey;
string geminiModelId = TestConfiguration.VertexAI.Gemini.ModelId;
string geminiLocation = TestConfiguration.VertexAI.Location;
string geminiProject = TestConfiguration.VertexAI.ProjectId;
if (geminiApiKey is null || geminiModelId is null || geminiLocation is null || geminiProject is null)
{
Console.WriteLine("Gemini vertex ai credentials not found. Skipping example.");
return;
}
Kernel kernel = Kernel.CreateBuilder()
.AddVertexAIGeminiChatCompletion(
modelId: geminiModelId,
bearerKey: geminiApiKey,
location: geminiLocation,
projectId: geminiProject)
.Build();
// To generate bearer key, you need installed google sdk or use google web console with command:
//
// gcloud auth print-access-token
//
// Above code pass bearer key as string, it is not recommended way in production code,
// especially if IChatCompletionService will be long lived, tokens generated by google sdk lives for 1 hour.
// You should use bearer key provider, which will be used to generate token on demand:
//
// Example:
//
// Kernel kernel = Kernel.CreateBuilder()
// .AddVertexAIGeminiChatCompletion(
// modelId: TestConfiguration.VertexAI.Gemini.ModelId,
// bearerKeyProvider: () =>
// {
// // This is just example, in production we recommend using Google SDK to generate your BearerKey token.
// // This delegate will be called on every request,
// // when providing the token consider using caching strategy and refresh token logic when it is expired or close to expiration.
// return GetBearerKey();
// },
// location: TestConfiguration.VertexAI.Location,
// projectId: TestConfiguration.VertexAI.ProjectId);
await this.RunSampleAsync(kernel);
}
private async Task RunSampleAsync(Kernel kernel)
{
// Add a plugin with some helper functions we want to allow the model to utilize.
kernel.ImportPluginFromFunctions("HelperFunctions",
[
kernel.CreateFunctionFromMethod(() => DateTime.UtcNow.ToString("R"), "GetCurrentUtcTime", "Retrieves the current time in UTC."),
kernel.CreateFunctionFromMethod((string cityName) =>
cityName switch
{
"Boston" => "61 and rainy",
"London" => "55 and cloudy",
"Miami" => "80 and sunny",
"Paris" => "60 and rainy",
"Tokyo" => "50 and sunny",
"Sydney" => "75 and sunny",
"Tel Aviv" => "80 and sunny",
_ => "31 and snowing",
}, "Get_Weather_For_City", "Gets the current weather for the specified city"),
]);
Console.WriteLine("======== Example 1: Use automated function calling with a non-streaming prompt ========");
{
GeminiPromptExecutionSettings settings = new() { ToolCallBehavior = GeminiToolCallBehavior.AutoInvokeKernelFunctions };
Console.WriteLine(await kernel.InvokePromptAsync(
"Check current UTC time, and return current weather in Paris city", new(settings)));
Console.WriteLine();
}
Console.WriteLine("======== Example 2: Use automated function calling with a streaming prompt ========");
{
GeminiPromptExecutionSettings settings = new() { ToolCallBehavior = GeminiToolCallBehavior.AutoInvokeKernelFunctions };
await foreach (var update in kernel.InvokePromptStreamingAsync(
"Check current UTC time, and return current weather in Boston city", new(settings)))
{
Console.Write(update);
}
Console.WriteLine();
}
Console.WriteLine("======== Example 3: Use manual function calling with a non-streaming prompt ========");
{
var chat = kernel.GetRequiredService<IChatCompletionService>();
var chatHistory = new ChatHistory();
GeminiPromptExecutionSettings settings = new() { ToolCallBehavior = GeminiToolCallBehavior.EnableKernelFunctions };
chatHistory.AddUserMessage("Check current UTC time, and return current weather in London city");
while (true)
{
var result = (GeminiChatMessageContent)await chat.GetChatMessageContentAsync(chatHistory, settings, kernel);
if (result.Content is not null)
{
Console.Write(result.Content);
}
if (result.ToolCalls is not { Count: > 0 })
{
break;
}
chatHistory.Add(result);
foreach (var toolCall in result.ToolCalls)
{
KernelArguments? arguments = null;
if (kernel.Plugins.TryGetFunction(toolCall.PluginName, toolCall.FunctionName, out var function))
{
// Add parameters to arguments
if (toolCall.Arguments is not null)
{
arguments = [];
foreach (var parameter in toolCall.Arguments)
{
arguments[parameter.Key] = parameter.Value?.ToString();
}
}
}
else
{
Console.WriteLine("Unable to find function. Please try again!");
continue;
}
var functionResponse = await function.InvokeAsync(kernel, arguments);
Assert.NotNull(functionResponse);
var calledToolResult = new GeminiFunctionToolResult(toolCall, functionResponse);
chatHistory.Add(new GeminiChatMessageContent(calledToolResult));
}
}
Console.WriteLine();
}
/* Uncomment this to try in a console chat loop.
Console.WriteLine("======== Example 4: Use automated function calling with a streaming chat ========");
{
GeminiPromptExecutionSettings settings = new() { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions };
var chat = kernel.GetRequiredService<IChatCompletionService>();
var chatHistory = new ChatHistory();
while (true)
{
Console.Write("Question (Type \"quit\" to leave): ");
string question = Console.ReadLine() ?? string.Empty;
if (question == "quit")
{
break;
}
chatHistory.AddUserMessage(question);
System.Text.StringBuilder sb = new();
await foreach (var update in chat.GetStreamingChatMessageContentsAsync(chatHistory, settings, kernel))
{
if (update.Content is not null)
{
Console.Write(update.Content);
sb.Append(update.Content);
}
}
chatHistory.AddAssistantMessage(sb.ToString());
Console.WriteLine();
}
}
*/
}
}