From a1d70d22121ff8de39aba4bbc3388fc9f632e7b0 Mon Sep 17 00:00:00 2001
From: Chris <66376200+crickman@users.noreply.github.com>
Date: Wed, 16 Jul 2025 03:09:17 -0700
Subject: [PATCH 01/10] .Net Fix - Display thread type name in error message
(#12723)
### Motivation and Context
Error message confusing
### Description
Error message does not display thread type and instead displays the name
of the generic parameter:
`Microsoft.SemanticKernel.KernelException: OpenAIResponseAgent currently
only supports agent threads of type TThreadType.`
(Customer reported an error using the erroneous error message.)
### Contribution Checklist
- [X] The code builds clean without any errors or warnings
- [X] The PR follows the [SK Contribution
Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md)
and the [pre-submission formatting
script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts)
raises no violations
- [X] All unit tests pass, and I have added new tests where possible
- [X] I didn't break anyone :smile:
---
dotnet/src/Agents/Abstractions/Agent.cs | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/dotnet/src/Agents/Abstractions/Agent.cs b/dotnet/src/Agents/Abstractions/Agent.cs
index e35fbc5738a6..0413475be286 100644
--- a/dotnet/src/Agents/Abstractions/Agent.cs
+++ b/dotnet/src/Agents/Abstractions/Agent.cs
@@ -350,7 +350,7 @@ protected virtual async Task EnsureThreadExistsWithMessagesAsync
Date: Wed, 16 Jul 2025 11:10:50 +0100
Subject: [PATCH 02/10] .Net: Google Gemini - Move API key from the URL to
`x-goog-api-key` HTTP header (#12717)
### Motivation and Context
- Resolves #12666
The implementation successfully addresses the security concern raised in
GitHub issue #12666 by moving the Google API key from the URL query
parameter to the secure x-goog-api-key HTTP header, preventing sensitive
information from being logged or traced.
- Modified ClientBase class to support API key in headers
- Updated all Google AI clients (Chat, Streaming, Token Counter,
Embeddings) to use header-based authentication
- Removed API key from URLs to prevent exposure in logs and OTEL traces
- Comprehensive Testing:
---
.../Clients/GeminiChatGenerationTests.cs | 32 +++++++++++++++++++
.../Clients/GeminiChatStreamingTests.cs | 32 +++++++++++++++++++
.../Clients/GeminiCountingTokensTests.cs | 30 +++++++++++++++++
...GoogleAIClientEmbeddingsGenerationTests.cs | 32 +++++++++++++++++++
.../Connectors.Google/Core/ClientBase.cs | 9 +++++-
.../Clients/GeminiChatCompletionClient.cs | 7 ++--
.../Clients/GeminiTokenCounterClient.cs | 5 +--
.../Core/GoogleAI/GoogleAIEmbeddingClient.cs | 5 +--
8 files changed, 144 insertions(+), 8 deletions(-)
diff --git a/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiChatGenerationTests.cs b/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiChatGenerationTests.cs
index 579ba5656cb5..2c19b210b2c8 100644
--- a/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiChatGenerationTests.cs
+++ b/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiChatGenerationTests.cs
@@ -422,6 +422,38 @@ public async Task ItCreatesPostRequestWithSemanticKernelVersionHeaderAsync()
Assert.Equal(expectedVersion, header);
}
+ [Fact]
+ public async Task ItCreatesPostRequestWithApiKeyInHeaderAsync()
+ {
+ // Arrange
+ var client = this.CreateChatCompletionClient();
+ var chatHistory = CreateSampleChatHistory();
+
+ // Act
+ await client.GenerateChatMessageAsync(chatHistory);
+
+ // Assert
+ Assert.NotNull(this._messageHandlerStub.RequestHeaders);
+ var apiKeyHeader = this._messageHandlerStub.RequestHeaders.GetValues("x-goog-api-key").SingleOrDefault();
+ Assert.NotNull(apiKeyHeader);
+ Assert.Equal("fake-key", apiKeyHeader);
+ }
+
+ [Fact]
+ public async Task ItCreatesPostRequestWithoutApiKeyInUrlAsync()
+ {
+ // Arrange
+ var client = this.CreateChatCompletionClient();
+ var chatHistory = CreateSampleChatHistory();
+
+ // Act
+ await client.GenerateChatMessageAsync(chatHistory);
+
+ // Assert
+ Assert.NotNull(this._messageHandlerStub.RequestUri);
+ Assert.DoesNotContain("key=", this._messageHandlerStub.RequestUri.ToString());
+ }
+
[Fact]
public async Task ItCreatesPostRequestWithResponseSchemaPropertyAsync()
{
diff --git a/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiChatStreamingTests.cs b/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiChatStreamingTests.cs
index f5fb92803f5f..692da9146b04 100644
--- a/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiChatStreamingTests.cs
+++ b/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiChatStreamingTests.cs
@@ -392,6 +392,38 @@ public async Task ItCreatesPostRequestWithSemanticKernelVersionHeaderAsync()
Assert.Equal(expectedVersion, header);
}
+ [Fact]
+ public async Task ItCreatesPostRequestWithApiKeyInHeaderAsync()
+ {
+ // Arrange
+ var client = this.CreateChatCompletionClient();
+ var chatHistory = CreateSampleChatHistory();
+
+ // Act
+ await client.StreamGenerateChatMessageAsync(chatHistory).ToListAsync();
+
+ // Assert
+ Assert.NotNull(this._messageHandlerStub.RequestHeaders);
+ var apiKeyHeader = this._messageHandlerStub.RequestHeaders.GetValues("x-goog-api-key").SingleOrDefault();
+ Assert.NotNull(apiKeyHeader);
+ Assert.Equal("fake-key", apiKeyHeader);
+ }
+
+ [Fact]
+ public async Task ItCreatesPostRequestWithoutApiKeyInUrlAsync()
+ {
+ // Arrange
+ var client = this.CreateChatCompletionClient();
+ var chatHistory = CreateSampleChatHistory();
+
+ // Act
+ await client.StreamGenerateChatMessageAsync(chatHistory).ToListAsync();
+
+ // Assert
+ Assert.NotNull(this._messageHandlerStub.RequestUri);
+ Assert.DoesNotContain("key=", this._messageHandlerStub.RequestUri.ToString());
+ }
+
private static ChatHistory CreateSampleChatHistory()
{
var chatHistory = new ChatHistory();
diff --git a/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiCountingTokensTests.cs b/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiCountingTokensTests.cs
index 560642875d46..dd28b46ddebe 100644
--- a/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiCountingTokensTests.cs
+++ b/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiCountingTokensTests.cs
@@ -116,6 +116,36 @@ public async Task ItCreatesPostRequestWithSemanticKernelVersionHeaderAsync()
Assert.Equal(expectedVersion, header);
}
+ [Fact]
+ public async Task ItCreatesPostRequestWithApiKeyInHeaderAsync()
+ {
+ // Arrange
+ var client = this.CreateTokenCounterClient();
+
+ // Act
+ await client.CountTokensAsync("fake-text");
+
+ // Assert
+ Assert.NotNull(this._messageHandlerStub.RequestHeaders);
+ var apiKeyHeader = this._messageHandlerStub.RequestHeaders.GetValues("x-goog-api-key").SingleOrDefault();
+ Assert.NotNull(apiKeyHeader);
+ Assert.Equal("fake-key", apiKeyHeader);
+ }
+
+ [Fact]
+ public async Task ItCreatesPostRequestWithoutApiKeyInUrlAsync()
+ {
+ // Arrange
+ var client = this.CreateTokenCounterClient();
+
+ // Act
+ await client.CountTokensAsync("fake-text");
+
+ // Assert
+ Assert.NotNull(this._messageHandlerStub.RequestUri);
+ Assert.DoesNotContain("key=", this._messageHandlerStub.RequestUri.ToString());
+ }
+
[Theory]
[InlineData("https://malicious-site.com")]
[InlineData("http://internal-network.local")]
diff --git a/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/GoogleAI/GoogleAIClientEmbeddingsGenerationTests.cs b/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/GoogleAI/GoogleAIClientEmbeddingsGenerationTests.cs
index 855740b7421f..24b095874cb0 100644
--- a/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/GoogleAI/GoogleAIClientEmbeddingsGenerationTests.cs
+++ b/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/GoogleAI/GoogleAIClientEmbeddingsGenerationTests.cs
@@ -142,6 +142,38 @@ public async Task ItCreatesPostRequestWithSemanticKernelVersionHeaderAsync()
Assert.Equal(expectedVersion, header);
}
+ [Fact]
+ public async Task ItCreatesPostRequestWithApiKeyInHeaderAsync()
+ {
+ // Arrange
+ var client = this.CreateEmbeddingsClient();
+ IList data = ["sample data"];
+
+ // Act
+ await client.GenerateEmbeddingsAsync(data);
+
+ // Assert
+ Assert.NotNull(this._messageHandlerStub.RequestHeaders);
+ var apiKeyHeader = this._messageHandlerStub.RequestHeaders.GetValues("x-goog-api-key").SingleOrDefault();
+ Assert.NotNull(apiKeyHeader);
+ Assert.Equal("fake-key", apiKeyHeader);
+ }
+
+ [Fact]
+ public async Task ItCreatesPostRequestWithoutApiKeyInUrlAsync()
+ {
+ // Arrange
+ var client = this.CreateEmbeddingsClient();
+ IList data = ["sample data"];
+
+ // Act
+ await client.GenerateEmbeddingsAsync(data);
+
+ // Assert
+ Assert.NotNull(this._messageHandlerStub.RequestUri);
+ Assert.DoesNotContain("key=", this._messageHandlerStub.RequestUri.ToString());
+ }
+
[Fact]
public async Task ShouldIncludeDimensionsInAllRequestsAsync()
{
diff --git a/dotnet/src/Connectors/Connectors.Google/Core/ClientBase.cs b/dotnet/src/Connectors/Connectors.Google/Core/ClientBase.cs
index b94ca9eeebc6..ed31204ea67e 100644
--- a/dotnet/src/Connectors/Connectors.Google/Core/ClientBase.cs
+++ b/dotnet/src/Connectors/Connectors.Google/Core/ClientBase.cs
@@ -15,6 +15,7 @@ namespace Microsoft.SemanticKernel.Connectors.Google.Core;
internal abstract class ClientBase
{
private readonly Func>? _bearerTokenProvider;
+ private readonly string? _apiKey;
protected ILogger Logger { get; }
@@ -32,12 +33,14 @@ protected ClientBase(
protected ClientBase(
HttpClient httpClient,
- ILogger? logger)
+ ILogger? logger,
+ string? apiKey = null)
{
Verify.NotNull(httpClient);
this.HttpClient = httpClient;
this.Logger = logger ?? NullLogger.Instance;
+ this._apiKey = apiKey;
}
protected static void ValidateMaxTokens(int? maxTokens)
@@ -96,6 +99,10 @@ protected async Task CreateHttpRequestAsync(object requestDa
httpRequestMessage.Headers.Authorization =
new AuthenticationHeaderValue("Bearer", bearerKey);
}
+ else if (!string.IsNullOrWhiteSpace(this._apiKey))
+ {
+ httpRequestMessage.Headers.Add("x-goog-api-key", this._apiKey);
+ }
return httpRequestMessage;
}
diff --git a/dotnet/src/Connectors/Connectors.Google/Core/Gemini/Clients/GeminiChatCompletionClient.cs b/dotnet/src/Connectors/Connectors.Google/Core/Gemini/Clients/GeminiChatCompletionClient.cs
index b62384718ad7..3d52a92f8825 100644
--- a/dotnet/src/Connectors/Connectors.Google/Core/Gemini/Clients/GeminiChatCompletionClient.cs
+++ b/dotnet/src/Connectors/Connectors.Google/Core/Gemini/Clients/GeminiChatCompletionClient.cs
@@ -100,7 +100,8 @@ public GeminiChatCompletionClient(
ILogger? logger = null)
: base(
httpClient: httpClient,
- logger: logger)
+ logger: logger,
+ apiKey: apiKey)
{
Verify.NotNullOrWhiteSpace(modelId);
Verify.NotNullOrWhiteSpace(apiKey);
@@ -108,8 +109,8 @@ public GeminiChatCompletionClient(
string versionSubLink = GetApiVersionSubLink(apiVersion);
this._modelId = modelId;
- this._chatGenerationEndpoint = new Uri($"https://generativelanguage.googleapis.com/{versionSubLink}/models/{this._modelId}:generateContent?key={apiKey}");
- this._chatStreamingEndpoint = new Uri($"https://generativelanguage.googleapis.com/{versionSubLink}/models/{this._modelId}:streamGenerateContent?key={apiKey}&alt=sse");
+ this._chatGenerationEndpoint = new Uri($"https://generativelanguage.googleapis.com/{versionSubLink}/models/{this._modelId}:generateContent");
+ this._chatStreamingEndpoint = new Uri($"https://generativelanguage.googleapis.com/{versionSubLink}/models/{this._modelId}:streamGenerateContent?alt=sse");
}
///
diff --git a/dotnet/src/Connectors/Connectors.Google/Core/Gemini/Clients/GeminiTokenCounterClient.cs b/dotnet/src/Connectors/Connectors.Google/Core/Gemini/Clients/GeminiTokenCounterClient.cs
index ceddaffada02..057bd8bd86b0 100644
--- a/dotnet/src/Connectors/Connectors.Google/Core/Gemini/Clients/GeminiTokenCounterClient.cs
+++ b/dotnet/src/Connectors/Connectors.Google/Core/Gemini/Clients/GeminiTokenCounterClient.cs
@@ -33,7 +33,8 @@ public GeminiTokenCounterClient(
ILogger? logger = null)
: base(
httpClient: httpClient,
- logger: logger)
+ logger: logger,
+ apiKey: apiKey)
{
Verify.NotNullOrWhiteSpace(modelId);
Verify.NotNullOrWhiteSpace(apiKey);
@@ -41,7 +42,7 @@ public GeminiTokenCounterClient(
string versionSubLink = GetApiVersionSubLink(apiVersion);
this._modelId = modelId;
- this._tokenCountingEndpoint = new Uri($"https://generativelanguage.googleapis.com/{versionSubLink}/models/{this._modelId}:countTokens?key={apiKey}");
+ this._tokenCountingEndpoint = new Uri($"https://generativelanguage.googleapis.com/{versionSubLink}/models/{this._modelId}:countTokens");
}
///
diff --git a/dotnet/src/Connectors/Connectors.Google/Core/GoogleAI/GoogleAIEmbeddingClient.cs b/dotnet/src/Connectors/Connectors.Google/Core/GoogleAI/GoogleAIEmbeddingClient.cs
index ff2542549c78..6a801acff76e 100644
--- a/dotnet/src/Connectors/Connectors.Google/Core/GoogleAI/GoogleAIEmbeddingClient.cs
+++ b/dotnet/src/Connectors/Connectors.Google/Core/GoogleAI/GoogleAIEmbeddingClient.cs
@@ -37,7 +37,8 @@ public GoogleAIEmbeddingClient(
int? dimensions = null)
: base(
httpClient: httpClient,
- logger: logger)
+ logger: logger,
+ apiKey: apiKey)
{
Verify.NotNullOrWhiteSpace(modelId);
Verify.NotNullOrWhiteSpace(apiKey);
@@ -45,7 +46,7 @@ public GoogleAIEmbeddingClient(
string versionSubLink = GetApiVersionSubLink(apiVersion);
this._embeddingModelId = modelId;
- this._embeddingEndpoint = new Uri($"https://generativelanguage.googleapis.com/{versionSubLink}/models/{this._embeddingModelId}:batchEmbedContents?key={apiKey}");
+ this._embeddingEndpoint = new Uri($"https://generativelanguage.googleapis.com/{versionSubLink}/models/{this._embeddingModelId}:batchEmbedContents");
this._dimensions = dimensions;
}
From d96cadcebe75e4d21705b125d1d5db88bb645d88 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 17 Jul 2025 15:01:11 +0900
Subject: [PATCH 03/10] Python: Update sentence-transformers requirement from
<5.0,>=2.2 to >=2.2,<6.0 in /python (#12679)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Updates the requirements on
[sentence-transformers](https://github.com/UKPLab/sentence-transformers)
to permit the latest version.
Release notes
Sourced from sentence-transformers's
releases.
v5.0.0 - SparseEncoder support; encode_query & encode_document;
multi-processing in encode; Router; and more
This release consists of significant updates including the
introduction of Sparse Encoder models, new methods
encode_query
and encode_document
,
multi-processing support in encode
, the Router
module for asymmetric models, custom learning rates for parameter
groups, composite loss logging, and various small improvements and bug
fixes.
Install this version with
# Training + Inference
pip install sentence-transformers[train]==5.0.0
Inference only, use one of:
pip install sentence-transformers==5.0.0
pip install sentence-transformers[onnx-gpu]==5.0.0
pip install sentence-transformers[onnx]==5.0.0
pip install sentence-transformers[openvino]==5.0.0
[!TIP]
Our Training
and Finetuning Sparse Embedding Models with Sentence Transformers v5
blogpost is an excellent place to learn about finetuning sparse
embedding models!
[!NOTE]
This release is designed to be fully backwards compatible, meaning that
you should be able to upgrade from older versions to v5.x without any
issues. If you are running into issues when upgrading, feel free to open
an
issue. Also see the Migration Guide
for changes that we would recommend.
Sparse Encoder models
The Sentence Transformers v5.0 release introduces Sparse Embedding
models, also known as Sparse Encoders. These models generate
high-dimensional embeddings, often with 30,000+ dimensions, where often
only <1% of dimensions are non-zero. This is in contrast to the
standard dense embedding models, which produce low-dimensional
embeddings (e.g., 384, 768, or 1024 dimensions) where all values are
non-zero.
Usually, each active dimension (i.e. the dimension with a non-zero
value) in a sparse embedding corresponds to a specific token in the
model's vocabulary, allowing for interpretability. This means that you
can e.g. see exactly which words/tokens are important in an embedding,
and that you can inspect exactly because of which words/tokens two texts
are deemed similar.
Let's have a look at naver/splade-v3, a
strong sparse embedding model, as an example:
from sentence_transformers import SparseEncoder
Download from the 🤗 Hub
model = SparseEncoder("naver/splade-v3")
Run inference
sentences = [
"The weather is lovely today.",
"It's so sunny outside!",
"He drove to the stadium.",
]
embeddings = model.encode(sentences)
print(embeddings.shape)
(3, 30522)
Get the similarity scores for the embeddings
similarities = model.similarity(embeddings, embeddings)
print(similarities)
tensor([[ 32.4323, 5.8528, 0.0258],
</tr></table>
... (truncated)
Commits
8dc0fca
Release v5.0.0
e91af6a
Update links for SPLADE and Inference-Free SPLADE models collections in
docum...
4c00aea
[fix
] Remove hub_kwargs in SparseStaticEmbedding.from_json
in favor of more...
28685bb
Clean up gitignore (#3409)
85dd175
Fix formatting of docstring arguments in
SpladeRegularizerWeightSchedulerCall...
14afc4b
Merge PR #3401:
[v5
] Add support for Sparse Embedding models
2d24841
Update tip phrasing and fix links
ed043c5
typo
b2679d1
fix broken link
d30341e
Update tips to prepared for v5.0
- Additional commits viewable in compare
view
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Evan Mattson <35585003+moonbox3@users.noreply.github.com>
---
python/pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/python/pyproject.toml b/python/pyproject.toml
index 6f977994dd27..34f486050680 100644
--- a/python/pyproject.toml
+++ b/python/pyproject.toml
@@ -93,7 +93,7 @@ google = [
]
hugging_face = [
"transformers[torch] ~= 4.28",
- "sentence-transformers >= 2.2,< 5.0",
+ "sentence-transformers >= 2.2,< 6.0",
"torch == 2.7.1"
]
mcp = [
From f369170a15506efb48426e2a3e3f3e30cd599e8d Mon Sep 17 00:00:00 2001
From: Alexander Batishchev
Date: Thu, 17 Jul 2025 05:51:06 -0700
Subject: [PATCH 04/10] Fixed typo in README.md (#12735)
### Description
Fixed the casing of word Core in the name of the package.
### Contribution Checklist
- [x] The code builds clean without any errors or warnings
- [x] The PR follows the [SK Contribution
Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md)
and the [pre-submission formatting
script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts)
raises no violations
- [x] All unit tests pass, and I have added new tests where possible
- [x] I didn't break anyone :smile:
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index a5747ea5428e..039e60f62982 100644
--- a/README.md
+++ b/README.md
@@ -55,7 +55,7 @@ pip install semantic-kernel
```bash
dotnet add package Microsoft.SemanticKernel
-dotnet add package Microsoft.SemanticKernel.Agents.core
+dotnet add package Microsoft.SemanticKernel.Agents.Core
```
### Java
From bbe179f05041c0df792610a18358211845dac8bb Mon Sep 17 00:00:00 2001
From: Damien Guard
Date: Thu, 17 Jul 2025 22:12:34 +0100
Subject: [PATCH 05/10] .Net: Ensure MongoDB filter/offset tests don't conflict
(#12739)
### Motivation and Context
Two of the MongoDB tests intermittently fail when run on our CI (once
they are enabled).
This is, I believe, because two different tests are using and setting up
the same collection name.
### Description
Rename the Filter test to not use the Offset test collection name.
### Contribution Checklist
- [x] The code builds clean without any errors or warnings
- [x] The PR follows the [SK Contribution
Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md)
and the [pre-submission formatting
script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts)
raises no violations
- [x] All unit tests pass, and I have added new tests where possible
- [x] I didn't break anyone :smile:
---
.../Memory/CosmosMongoDB/CosmosMongoCollectionTests.cs | 2 +-
.../Memory/MongoDB/MongoDBVectorStoreRecordCollectionTests.cs | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/CosmosMongoDB/CosmosMongoCollectionTests.cs b/dotnet/src/IntegrationTests/Connectors/Memory/CosmosMongoDB/CosmosMongoCollectionTests.cs
index 059d4705f02f..14c01630c3c5 100644
--- a/dotnet/src/IntegrationTests/Connectors/Memory/CosmosMongoDB/CosmosMongoCollectionTests.cs
+++ b/dotnet/src/IntegrationTests/Connectors/Memory/CosmosMongoDB/CosmosMongoCollectionTests.cs
@@ -390,7 +390,7 @@ public async Task SearchReturnsValidResultsWithFilterAsync()
var hotel3 = this.CreateTestHotel(hotelId: "key3", embedding: new[] { 20f, 20f, 20f, 20f });
var hotel4 = this.CreateTestHotel(hotelId: "key4", embedding: new[] { -1000f, -1000f, -1000f, -1000f });
- using var sut = new CosmosMongoCollection(fixture.MongoDatabase, "TestVectorizedSearchWithOffset");
+ using var sut = new CosmosMongoCollection(fixture.MongoDatabase, "TestVectorizedSearchWithFilter");
await sut.EnsureCollectionExistsAsync();
diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/MongoDB/MongoDBVectorStoreRecordCollectionTests.cs b/dotnet/src/IntegrationTests/Connectors/Memory/MongoDB/MongoDBVectorStoreRecordCollectionTests.cs
index 8ee2ccc2374a..aac0713dc366 100644
--- a/dotnet/src/IntegrationTests/Connectors/Memory/MongoDB/MongoDBVectorStoreRecordCollectionTests.cs
+++ b/dotnet/src/IntegrationTests/Connectors/Memory/MongoDB/MongoDBVectorStoreRecordCollectionTests.cs
@@ -389,7 +389,7 @@ public async Task SearchReturnsValidResultsWithFilterAsync()
var hotel3 = this.CreateTestHotel(hotelId: "key3", embedding: new[] { 20f, 20f, 20f, 20f });
var hotel4 = this.CreateTestHotel(hotelId: "key4", embedding: new[] { -1000f, -1000f, -1000f, -1000f });
- using var sut = new MongoCollection(fixture.MongoDatabase, "TestVectorizedSearchWithOffset");
+ using var sut = new MongoCollection(fixture.MongoDatabase, "TestVectorizedSearchWithFilter");
await sut.EnsureCollectionExistsAsync();
From 48f71b1f2ea9e83d56ae9ad88b37dd6839d6560e Mon Sep 17 00:00:00 2001
From: Evan Mattson <35585003+moonbox3@users.noreply.github.com>
Date: Fri, 18 Jul 2025 16:08:00 +0900
Subject: [PATCH 06/10] Python: Support AzureAI agent MCP tools for streaming
and non-streaming invocations (#12736)
### Motivation and Context
MCP tool support was released for the AzureAI Agent and it has yet to be
added to the SK Python AzureAIAgent. This PR adds functionality to
handle the MCP tools for both streaming and non-streaming invocations.
### Description
Adds MCP support for the Python AzureAI Agent.
- Upgrades dependent packages to be able to use the mcp tool from the
Azure SDK.
### Contribution Checklist
- [X] The code builds clean without any errors or warnings
- [X] The PR follows the [SK Contribution
Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md)
and the [pre-submission formatting
script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts)
raises no violations
- [X] All unit tests pass, and I have added new tests where possible
- [X] I didn't break anyone :smile:
---
python/pyproject.toml | 4 +-
python/samples/concepts/README.md | 1 +
.../azure_ai_agent_mcp_streaming.py | 121 ++++++++++
...e_ai_agent.py => step01_azure_ai_agent.py} | 0
...gin.py => step02_azure_ai_agent_plugin.py} | 0
...py => step03_azure_ai_agent_group_chat.py} | 0
...step04_azure_ai_agent_code_interpreter.py} | 0
...y => step05_azure_ai_agent_file_search.py} | 0
...pi.py => step06_azure_ai_agent_openapi.py} | 0
....py => step07_azure_ai_agent_retrieval.py} | 0
...y => step08_azure_ai_agent_declarative.py} | 0
.../step09_azure_ai_agent_mcp.py | 119 ++++++++++
.../azure_ai/agent_content_generation.py | 139 +++++++++++
.../agents/azure_ai/agent_thread_actions.py | 220 ++++++++++++++----
.../services/test_azure_text_to_image.py | 6 +-
.../services/test_openai_text_to_image.py | 11 +-
python/uv.lock | 20 +-
17 files changed, 570 insertions(+), 71 deletions(-)
create mode 100644 python/samples/concepts/agents/azure_ai_agent/azure_ai_agent_mcp_streaming.py
rename python/samples/getting_started_with_agents/azure_ai_agent/{step1_azure_ai_agent.py => step01_azure_ai_agent.py} (100%)
rename python/samples/getting_started_with_agents/azure_ai_agent/{step2_azure_ai_agent_plugin.py => step02_azure_ai_agent_plugin.py} (100%)
rename python/samples/getting_started_with_agents/azure_ai_agent/{step3_azure_ai_agent_group_chat.py => step03_azure_ai_agent_group_chat.py} (100%)
rename python/samples/getting_started_with_agents/azure_ai_agent/{step4_azure_ai_agent_code_interpreter.py => step04_azure_ai_agent_code_interpreter.py} (100%)
rename python/samples/getting_started_with_agents/azure_ai_agent/{step5_azure_ai_agent_file_search.py => step05_azure_ai_agent_file_search.py} (100%)
rename python/samples/getting_started_with_agents/azure_ai_agent/{step6_azure_ai_agent_openapi.py => step06_azure_ai_agent_openapi.py} (100%)
rename python/samples/getting_started_with_agents/azure_ai_agent/{step7_azure_ai_agent_retrieval.py => step07_azure_ai_agent_retrieval.py} (100%)
rename python/samples/getting_started_with_agents/azure_ai_agent/{step8_azure_ai_agent_declarative.py => step08_azure_ai_agent_declarative.py} (100%)
create mode 100644 python/samples/getting_started_with_agents/azure_ai_agent/step09_azure_ai_agent_mcp.py
diff --git a/python/pyproject.toml b/python/pyproject.toml
index 34f486050680..5a8cd960f85c 100644
--- a/python/pyproject.toml
+++ b/python/pyproject.toml
@@ -24,8 +24,8 @@ classifiers = [
]
dependencies = [
# azure agents
- "azure-ai-projects >= 1.0.0b11",
- "azure-ai-agents >= 1.1.0b1",
+ "azure-ai-projects >= 1.0.0b12",
+ "azure-ai-agents >= 1.1.0b4",
"aiohttp ~= 3.8",
"cloudevents ~=1.0",
"pydantic >=2.0,!=2.10.0,!=2.10.1,!=2.10.2,!=2.10.3,<2.12",
diff --git a/python/samples/concepts/README.md b/python/samples/concepts/README.md
index 8daa34c3d0fb..683c37e068f8 100644
--- a/python/samples/concepts/README.md
+++ b/python/samples/concepts/README.md
@@ -21,6 +21,7 @@
- [Azure AI Agent Declarative with OpenAPI Interpreter](./agents/azure_ai_agent/azure_ai_agent_declarative_openapi.py)
- [Azure AI Agent Declarative with Existing Agent ID](./agents/azure_ai_agent/azure_ai_agent_declarative_with_existing_agent_id.py)
- [Azure AI Agent File Manipulation](./agents/azure_ai_agent/azure_ai_agent_file_manipulation.py)
+- [Azure AI Agent MCP Streaming](./agents/azure_ai_agent/azure_ai_agent_mcp_streaming.py)
- [Azure AI Agent Prompt Templating](./agents/azure_ai_agent/azure_ai_agent_prompt_templating.py)
- [Azure AI Agent Message Callback Streaming](./agents/azure_ai_agent/azure_ai_agent_message_callback_streaming.py)
- [Azure AI Agent Message Callback](./agents/azure_ai_agent/azure_ai_agent_message_callback.py)
diff --git a/python/samples/concepts/agents/azure_ai_agent/azure_ai_agent_mcp_streaming.py b/python/samples/concepts/agents/azure_ai_agent/azure_ai_agent_mcp_streaming.py
new file mode 100644
index 000000000000..3e06a87257c0
--- /dev/null
+++ b/python/samples/concepts/agents/azure_ai_agent/azure_ai_agent_mcp_streaming.py
@@ -0,0 +1,121 @@
+# Copyright (c) Microsoft. All rights reserved.
+
+import asyncio
+
+from azure.ai.agents.models import McpTool
+from azure.identity.aio import DefaultAzureCredential
+
+from semantic_kernel.agents import AzureAIAgent, AzureAIAgentSettings, AzureAIAgentThread
+from semantic_kernel.contents import ChatMessageContent, FunctionCallContent, FunctionResultContent
+
+"""
+The following sample demonstrates how to create a simple, Azure AI agent that
+uses the mcp tool to connect to an mcp server with streaming responses.
+"""
+
+TASK = "Please summarize the Azure REST API specifications Readme"
+
+
+async def handle_intermediate_messages(message: ChatMessageContent) -> None:
+ for item in message.items or []:
+ if isinstance(item, FunctionResultContent):
+ print(f"Function Result:> {item.result} for function: {item.name}")
+ elif isinstance(item, FunctionCallContent):
+ print(f"Function Call:> {item.name} with arguments: {item.arguments}")
+ else:
+ print(f"{item}")
+
+
+async def main() -> None:
+ async with (
+ DefaultAzureCredential() as creds,
+ AzureAIAgent.create_client(credential=creds) as client,
+ ):
+ # 1. Define the MCP tool with the server URL
+ mcp_tool = McpTool(
+ server_label="github",
+ server_url="https://gitmcp.io/Azure/azure-rest-api-specs",
+ allowed_tools=[], # Specify allowed tools if needed
+ )
+
+ # Optionally you may configure to require approval
+ # Allowed values are "never" or "always"
+ mcp_tool.set_approval_mode("never")
+
+ # 2. Create an agent with the MCP tool on the Azure AI agent service
+ agent_definition = await client.agents.create_agent(
+ model=AzureAIAgentSettings().model_deployment_name,
+ tools=mcp_tool.definitions,
+ instructions="You are a helpful agent that can use MCP tools to assist users.",
+ )
+
+ # 3. Create a Semantic Kernel agent for the Azure AI agent
+ agent = AzureAIAgent(
+ client=client,
+ definition=agent_definition,
+ )
+
+ # 4. Create a thread for the agent
+ # If no thread is provided, a new thread will be
+ # created and returned with the initial response
+ thread: AzureAIAgentThread | None = None
+
+ try:
+ print(f"# User: '{TASK}'")
+ # 5. Invoke the agent for the specified thread for response
+ async for response in agent.invoke_stream(
+ messages=TASK,
+ thread=thread,
+ on_intermediate_message=handle_intermediate_messages,
+ ):
+ print(f"{response}", end="", flush=True)
+ thread = response.thread
+ finally:
+ # 6. Cleanup: Delete the thread, agent, and file
+ await thread.delete() if thread else None
+ await client.agents.delete_agent(agent.id)
+
+ """
+ Sample Output:
+
+ # User: 'Please summarize the Azure REST API specifications Readme'
+ Function Call:> fetch_azure_rest_api_docs with arguments: {}
+ The Azure REST API specifications Readme provides comprehensive documentation and guidelines for designing,
+ authoring, validating, and evolving Azure REST APIs. It covers key areas including:
+
+ 1. Breaking changes and versioning: Guidelines to manage API changes that break backward compatibility, when to
+ increment API versions, and how to maintain smooth API evolution.
+
+ 2. OpenAPI/Swagger specifications: How to author REST APIs using OpenAPI specification 2.0 (Swagger), including
+ structure, conventions, validation tools, and extensions used by AutoRest for generating client SDKs.
+
+ 3. TypeSpec language: Introduction to TypeSpec, a powerful language for describing and generating REST API
+ specifications and client SDKs with extensibility to other API styles.
+
+ 4. Directory structure and uniform versioning: Organizing service specifications by teams, resource provider
+ namespaces, and following uniform versioning to keep API versions consistent across documentation and SDKs.
+
+ 5. Validation and tooling: Tools and processes like OAV, AutoRest, RESTler, and CI checks used to validate API
+ specs, generate SDKs, detect breaking changes, lint specifications, and test service contract accuracy.
+
+ 6. Authoring best practices: Manual and automated guidelines for quality API spec authoring, including writing
+ effective descriptions, resource modeling, naming conventions, and examples.
+
+ 7. Code generation configurations: How to configure readme files to generate SDKs for various languages
+ including .NET, Java, Python, Go, Typescript, and Azure CLI using AutoRest.
+
+ 8. API Scenarios and testing: Defining API scenario test files for end-to-end REST API workflows, including
+ variables, ARM template integration, and usage of test-proxy for recording traffic.
+
+ 9. SDK automation and release requests: Workflows for SDK generation validation, suppressing breaking change
+ warnings, and requesting official Azure SDK releases.
+
+ Overall, the Readme acts as a central hub providing references, guidelines, examples, and tools for maintaining
+ high-quality Azure REST API specifications and seamless SDK generation across multiple languages and
+ platforms. It ensures consistent API design, versioning, validation, and developer experience in the Azure
+ ecosystem.
+ """
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/python/samples/getting_started_with_agents/azure_ai_agent/step1_azure_ai_agent.py b/python/samples/getting_started_with_agents/azure_ai_agent/step01_azure_ai_agent.py
similarity index 100%
rename from python/samples/getting_started_with_agents/azure_ai_agent/step1_azure_ai_agent.py
rename to python/samples/getting_started_with_agents/azure_ai_agent/step01_azure_ai_agent.py
diff --git a/python/samples/getting_started_with_agents/azure_ai_agent/step2_azure_ai_agent_plugin.py b/python/samples/getting_started_with_agents/azure_ai_agent/step02_azure_ai_agent_plugin.py
similarity index 100%
rename from python/samples/getting_started_with_agents/azure_ai_agent/step2_azure_ai_agent_plugin.py
rename to python/samples/getting_started_with_agents/azure_ai_agent/step02_azure_ai_agent_plugin.py
diff --git a/python/samples/getting_started_with_agents/azure_ai_agent/step3_azure_ai_agent_group_chat.py b/python/samples/getting_started_with_agents/azure_ai_agent/step03_azure_ai_agent_group_chat.py
similarity index 100%
rename from python/samples/getting_started_with_agents/azure_ai_agent/step3_azure_ai_agent_group_chat.py
rename to python/samples/getting_started_with_agents/azure_ai_agent/step03_azure_ai_agent_group_chat.py
diff --git a/python/samples/getting_started_with_agents/azure_ai_agent/step4_azure_ai_agent_code_interpreter.py b/python/samples/getting_started_with_agents/azure_ai_agent/step04_azure_ai_agent_code_interpreter.py
similarity index 100%
rename from python/samples/getting_started_with_agents/azure_ai_agent/step4_azure_ai_agent_code_interpreter.py
rename to python/samples/getting_started_with_agents/azure_ai_agent/step04_azure_ai_agent_code_interpreter.py
diff --git a/python/samples/getting_started_with_agents/azure_ai_agent/step5_azure_ai_agent_file_search.py b/python/samples/getting_started_with_agents/azure_ai_agent/step05_azure_ai_agent_file_search.py
similarity index 100%
rename from python/samples/getting_started_with_agents/azure_ai_agent/step5_azure_ai_agent_file_search.py
rename to python/samples/getting_started_with_agents/azure_ai_agent/step05_azure_ai_agent_file_search.py
diff --git a/python/samples/getting_started_with_agents/azure_ai_agent/step6_azure_ai_agent_openapi.py b/python/samples/getting_started_with_agents/azure_ai_agent/step06_azure_ai_agent_openapi.py
similarity index 100%
rename from python/samples/getting_started_with_agents/azure_ai_agent/step6_azure_ai_agent_openapi.py
rename to python/samples/getting_started_with_agents/azure_ai_agent/step06_azure_ai_agent_openapi.py
diff --git a/python/samples/getting_started_with_agents/azure_ai_agent/step7_azure_ai_agent_retrieval.py b/python/samples/getting_started_with_agents/azure_ai_agent/step07_azure_ai_agent_retrieval.py
similarity index 100%
rename from python/samples/getting_started_with_agents/azure_ai_agent/step7_azure_ai_agent_retrieval.py
rename to python/samples/getting_started_with_agents/azure_ai_agent/step07_azure_ai_agent_retrieval.py
diff --git a/python/samples/getting_started_with_agents/azure_ai_agent/step8_azure_ai_agent_declarative.py b/python/samples/getting_started_with_agents/azure_ai_agent/step08_azure_ai_agent_declarative.py
similarity index 100%
rename from python/samples/getting_started_with_agents/azure_ai_agent/step8_azure_ai_agent_declarative.py
rename to python/samples/getting_started_with_agents/azure_ai_agent/step08_azure_ai_agent_declarative.py
diff --git a/python/samples/getting_started_with_agents/azure_ai_agent/step09_azure_ai_agent_mcp.py b/python/samples/getting_started_with_agents/azure_ai_agent/step09_azure_ai_agent_mcp.py
new file mode 100644
index 000000000000..5317cbf43bbc
--- /dev/null
+++ b/python/samples/getting_started_with_agents/azure_ai_agent/step09_azure_ai_agent_mcp.py
@@ -0,0 +1,119 @@
+# Copyright (c) Microsoft. All rights reserved.
+
+import asyncio
+
+from azure.ai.agents.models import McpTool
+from azure.identity.aio import DefaultAzureCredential
+
+from semantic_kernel.agents import AzureAIAgent, AzureAIAgentSettings, AzureAIAgentThread
+from semantic_kernel.contents import ChatMessageContent, FunctionCallContent, FunctionResultContent
+
+"""
+The following sample demonstrates how to create a simple, Azure AI agent that
+uses the mcp tool to connect to an mcp server.
+"""
+
+TASK = "Please summarize the Azure REST API specifications Readme"
+
+
+async def handle_intermediate_messages(message: ChatMessageContent) -> None:
+ for item in message.items or []:
+ if isinstance(item, FunctionResultContent):
+ print(f"Function Result:> {item.result} for function: {item.name}")
+ elif isinstance(item, FunctionCallContent):
+ print(f"Function Call:> {item.name} with arguments: {item.arguments}")
+ else:
+ print(f"{item}")
+
+
+async def main() -> None:
+ async with (
+ DefaultAzureCredential() as creds,
+ AzureAIAgent.create_client(credential=creds) as client,
+ ):
+ # 1. Define the MCP tool with the server URL
+ mcp_tool = McpTool(
+ server_label="github",
+ server_url="https://gitmcp.io/Azure/azure-rest-api-specs",
+ allowed_tools=[], # Specify allowed tools if needed
+ )
+
+ # Optionally you may configure to require approval
+ # Allowed values are "never" or "always"
+ mcp_tool.set_approval_mode("never")
+
+ # 2. Create an agent with the MCP tool on the Azure AI agent service
+ agent_definition = await client.agents.create_agent(
+ model=AzureAIAgentSettings().model_deployment_name,
+ tools=mcp_tool.definitions,
+ instructions="You are a helpful agent that can use MCP tools to assist users.",
+ )
+
+ # 3. Create a Semantic Kernel agent for the Azure AI agent
+ agent = AzureAIAgent(
+ client=client,
+ definition=agent_definition,
+ )
+
+ # 4. Create a thread for the agent
+ # If no thread is provided, a new thread will be
+ # created and returned with the initial response
+ thread: AzureAIAgentThread | None = None
+
+ try:
+ print(f"# User: '{TASK}'")
+ # 5. Invoke the agent for the specified thread for response
+ async for response in agent.invoke(
+ messages=TASK, thread=thread, on_intermediate_message=handle_intermediate_messages
+ ):
+ print(f"# Agent: {response}")
+ thread = response.thread
+ finally:
+ # 6. Cleanup: Delete the thread, agent, and file
+ await thread.delete() if thread else None
+ await client.agents.delete_agent(agent.id)
+
+ """
+ Sample Output:
+
+ # User: 'Please summarize the Azure REST API specifications Readme'
+ Function Call:> fetch_azure_rest_api_docs with arguments: {}
+ The Azure REST API specifications Readme provides comprehensive documentation and guidelines for designing,
+ authoring, validating, and evolving Azure REST APIs. It covers key areas including:
+
+ 1. Breaking changes and versioning: Guidelines to manage API changes that break backward compatibility, when to
+ increment API versions, and how to maintain smooth API evolution.
+
+ 2. OpenAPI/Swagger specifications: How to author REST APIs using OpenAPI specification 2.0 (Swagger), including
+ structure, conventions, validation tools, and extensions used by AutoRest for generating client SDKs.
+
+ 3. TypeSpec language: Introduction to TypeSpec, a powerful language for describing and generating REST API
+ specifications and client SDKs with extensibility to other API styles.
+
+ 4. Directory structure and uniform versioning: Organizing service specifications by teams, resource provider
+ namespaces, and following uniform versioning to keep API versions consistent across documentation and SDKs.
+
+ 5. Validation and tooling: Tools and processes like OAV, AutoRest, RESTler, and CI checks used to validate API
+ specs, generate SDKs, detect breaking changes, lint specifications, and test service contract accuracy.
+
+ 6. Authoring best practices: Manual and automated guidelines for quality API spec authoring, including writing
+ effective descriptions, resource modeling, naming conventions, and examples.
+
+ 7. Code generation configurations: How to configure readme files to generate SDKs for various languages
+ including .NET, Java, Python, Go, Typescript, and Azure CLI using AutoRest.
+
+ 8. API Scenarios and testing: Defining API scenario test files for end-to-end REST API workflows, including
+ variables, ARM template integration, and usage of test-proxy for recording traffic.
+
+ 9. SDK automation and release requests: Workflows for SDK generation validation, suppressing breaking change
+ warnings, and requesting official Azure SDK releases.
+
+ Overall, the Readme acts as a central hub providing references, guidelines, examples, and tools for maintaining
+ high-quality Azure REST API specifications and seamless SDK generation across multiple languages and
+ platforms. It ensures consistent API design, versioning, validation, and developer experience in the Azure
+ ecosystem.
+ """
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/python/semantic_kernel/agents/azure_ai/agent_content_generation.py b/python/semantic_kernel/agents/azure_ai/agent_content_generation.py
index c2152e894c3d..7e389be21c47 100644
--- a/python/semantic_kernel/agents/azure_ai/agent_content_generation.py
+++ b/python/semantic_kernel/agents/azure_ai/agent_content_generation.py
@@ -15,6 +15,7 @@
MessageTextFilePathAnnotation,
MessageTextUrlCitationAnnotation,
RequiredFunctionToolCall,
+ RequiredMcpToolCall,
RunStep,
RunStepAzureAISearchToolCall,
RunStepBingGroundingToolCall,
@@ -25,6 +26,7 @@
RunStepDeltaFunctionToolCall,
RunStepFileSearchToolCall,
RunStepFunctionToolCall,
+ RunStepMcpToolCall,
RunStepOpenAPIToolCall,
ThreadMessage,
ThreadRun,
@@ -774,3 +776,140 @@ def generate_streaming_annotation_content(
title=title,
citation_type=citation_type,
)
+
+
+@experimental
+def generate_mcp_content(agent_name: str, mcp_tool_call: RunStepMcpToolCall) -> ChatMessageContent:
+ """Generate MCP tool content.
+
+ Args:
+ agent_name: The name of the agent.
+ mcp_tool_call: The MCP tool call.
+
+ Returns:
+ The generated content.
+ """
+ mcp_result = FunctionResultContent(
+ function_name=mcp_tool_call.name,
+ id=mcp_tool_call.id,
+ result=mcp_tool_call.output,
+ )
+
+ return ChatMessageContent(
+ role=AuthorRole.ASSISTANT,
+ name=agent_name,
+ items=[mcp_result],
+ inner_content=mcp_tool_call, # type: ignore
+ )
+
+
+@experimental
+def generate_mcp_call_content(agent_name: str, mcp_tool_calls: list[RequiredMcpToolCall]) -> ChatMessageContent:
+ """Generate MCP tool call content.
+
+ Args:
+ agent_name: The name of the agent.
+ mcp_tool_calls: The MCP tool calls.
+
+ Returns:
+ The generated content.
+ """
+ content_items: list[FunctionCallContent] = []
+ for mcp_call in mcp_tool_calls:
+ content_items.append(
+ FunctionCallContent(
+ id=mcp_call.id,
+ name=mcp_call.name,
+ function_name=mcp_call.name,
+ arguments=mcp_call.arguments,
+ server_label=mcp_call.server_label,
+ )
+ )
+
+ return ChatMessageContent(
+ role=AuthorRole.ASSISTANT,
+ name=agent_name,
+ items=content_items, # type: ignore
+ )
+
+
+@experimental
+def generate_streaming_mcp_call_content(
+ agent_name: str, mcp_tool_calls: list["RequiredMcpToolCall"]
+) -> "StreamingChatMessageContent | None":
+ """Generate streaming MCP content.
+
+ Args:
+ agent_name: The name of the agent.
+ mcp_tool_calls: The mcp tool call details.
+
+ Returns:
+ The generated streaming content.
+ """
+ items: list[FunctionCallContent] = []
+ for index, tool in enumerate(mcp_tool_calls or []):
+ if isinstance(tool, RequiredMcpToolCall):
+ items.append(
+ FunctionCallContent(
+ id=tool.id,
+ index=index,
+ name=tool.name,
+ function_name=tool.name,
+ arguments=tool.arguments,
+ server_label=tool.server_label,
+ )
+ )
+
+ return (
+ StreamingChatMessageContent(
+ role=AuthorRole.ASSISTANT,
+ name=agent_name,
+ items=items, # type: ignore
+ choice_index=0,
+ )
+ if items
+ else None
+ )
+
+
+@experimental
+def generate_streaming_mcp_content(
+ agent_name: str, step_details: "RunStepDeltaToolCallObject"
+) -> StreamingChatMessageContent | None:
+ """Generate MCP tool content.
+
+ Args:
+ agent_name: The name of the agent.
+ step_details: The steps details with mcp tool call.
+
+ Returns:
+ The generated content.
+ """
+ if not step_details.tool_calls:
+ return None
+
+ items: list[FunctionResultContent] = []
+
+ for _, tool in enumerate(step_details.tool_calls):
+ if tool.type == "mcp":
+ mcp_tool_call = cast(RunStepMcpToolCall, tool)
+ if not mcp_tool_call.get("output"):
+ continue
+ mcp_result = FunctionResultContent(
+ function_name=mcp_tool_call.get("name"),
+ id=mcp_tool_call.get("id"),
+ result=mcp_tool_call.get("output"),
+ )
+ items.append(mcp_result)
+
+ return (
+ StreamingChatMessageContent(
+ role=AuthorRole.ASSISTANT,
+ name=agent_name,
+ items=items, # type: ignore
+ inner_content=mcp_tool_call, # type: ignore
+ choice_index=0,
+ )
+ if items
+ else None
+ ) # type: ignore
diff --git a/python/semantic_kernel/agents/azure_ai/agent_thread_actions.py b/python/semantic_kernel/agents/azure_ai/agent_thread_actions.py
index 3ade5ff35282..7f8f900e9395 100644
--- a/python/semantic_kernel/agents/azure_ai/agent_thread_actions.py
+++ b/python/semantic_kernel/agents/azure_ai/agent_thread_actions.py
@@ -12,6 +12,7 @@
AsyncAgentRunStream,
BaseAsyncAgentEventHandler,
FunctionToolDefinition,
+ RequiredMcpToolCall,
ResponseFormatJsonSchemaType,
RunStep,
RunStepAzureAISearchToolCall,
@@ -20,13 +21,16 @@
RunStepDeltaChunk,
RunStepDeltaToolCallObject,
RunStepFileSearchToolCall,
+ RunStepMcpToolCall,
RunStepMessageCreationDetails,
RunStepOpenAPIToolCall,
RunStepToolCallDetails,
RunStepType,
+ SubmitToolApprovalAction,
SubmitToolOutputsAction,
ThreadMessage,
ThreadRun,
+ ToolApproval,
ToolDefinition,
TruncationObject,
)
@@ -40,12 +44,16 @@
generate_function_call_content,
generate_function_call_streaming_content,
generate_function_result_content,
+ generate_mcp_call_content,
+ generate_mcp_content,
generate_message_content,
generate_openapi_content,
generate_streaming_azure_ai_search_content,
generate_streaming_bing_grounding_content,
generate_streaming_code_interpreter_content,
generate_streaming_file_search_content,
+ generate_streaming_mcp_call_content,
+ generate_streaming_mcp_content,
generate_streaming_message_content,
generate_streaming_openapi_content,
get_function_call_contents,
@@ -199,31 +207,73 @@ async def invoke(
)
# Check if function calling is required
- if run.status == "requires_action" and isinstance(run.required_action, SubmitToolOutputsAction):
- logger.debug(f"Run [{run.id}] requires tool action for agent `{agent.name}` and thread `{thread_id}`")
- fccs = get_function_call_contents(run, function_steps)
- if fccs:
+ if run.status == "requires_action":
+ if isinstance(run.required_action, SubmitToolOutputsAction):
logger.debug(
- f"Yielding generate_function_call_content for agent `{agent.name}` and "
- f"thread `{thread_id}`, visibility False"
+ f"Run [{run.id}] requires tool action for agent `{agent.name}` and thread `{thread_id}`"
)
- yield False, generate_function_call_content(agent_name=agent.name, fccs=fccs)
+ fccs = get_function_call_contents(run, function_steps)
+ if fccs:
+ logger.debug(
+ f"Yielding generate_function_call_content for agent `{agent.name}` and "
+ f"thread `{thread_id}`, visibility False"
+ )
+ yield False, generate_function_call_content(agent_name=agent.name, fccs=fccs)
- from semantic_kernel.contents.chat_history import ChatHistory
+ from semantic_kernel.contents.chat_history import ChatHistory
- chat_history = ChatHistory() if kwargs.get("chat_history") is None else kwargs["chat_history"]
- _ = await cls._invoke_function_calls(
- kernel=kernel, fccs=fccs, chat_history=chat_history, arguments=arguments
- )
+ chat_history = ChatHistory() if kwargs.get("chat_history") is None else kwargs["chat_history"]
+ _ = await cls._invoke_function_calls(
+ kernel=kernel, fccs=fccs, chat_history=chat_history, arguments=arguments
+ )
- tool_outputs = cls._format_tool_outputs(fccs, chat_history)
- await agent.client.agents.runs.submit_tool_outputs(
- run_id=run.id,
- thread_id=thread_id,
- tool_outputs=tool_outputs, # type: ignore
+ tool_outputs = cls._format_tool_outputs(fccs, chat_history)
+ await agent.client.agents.runs.submit_tool_outputs(
+ run_id=run.id,
+ thread_id=thread_id,
+ tool_outputs=tool_outputs, # type: ignore
+ )
+ logger.debug(f"Submitted tool outputs for agent `{agent.name}` and thread `{thread_id}`")
+ continue
+
+ # Check if MCP tool approval is required
+ elif isinstance(run.required_action, SubmitToolApprovalAction):
+ logger.debug(
+ f"Run [{run.id}] requires MCP tool approval for agent `{agent.name}` and thread `{thread_id}`"
)
- logger.debug(f"Submitted tool outputs for agent `{agent.name}` and thread `{thread_id}`")
- continue
+ tool_calls = run.required_action.submit_tool_approval.tool_calls
+ if not tool_calls:
+ logger.warning(f"No tool calls provided for MCP approval - cancelling run [{run.id}]")
+ await agent.client.agents.runs.cancel(run_id=run.id, thread_id=thread_id)
+ continue
+
+ mcp_tool_calls = [tc for tc in tool_calls if isinstance(tc, RequiredMcpToolCall)]
+ if mcp_tool_calls:
+ logger.debug(
+ f"Yielding generate_mcp_call_content for agent `{agent.name}` and "
+ f"thread `{thread_id}`, visibility False"
+ )
+ yield False, generate_mcp_call_content(agent_name=agent.name, mcp_tool_calls=mcp_tool_calls)
+
+ # Create tool approvals for MCP calls
+ tool_approvals = []
+ for mcp_call in mcp_tool_calls:
+ tool_approvals.append(
+ ToolApproval(
+ tool_call_id=mcp_call.id,
+ # TODO(evmattso): we don't support manual tool calling yet
+ # so we always approve
+ approve=True,
+ )
+ )
+
+ await agent.client.agents.runs.submit_tool_outputs(
+ run_id=run.id,
+ thread_id=thread_id,
+ tool_approvals=tool_approvals, # type: ignore
+ )
+ logger.debug(f"Submitted MCP tool approvals for agent `{agent.name}` and thread `{thread_id}`")
+ continue
steps: list[RunStep] = []
async for steps_response in agent.client.agents.run_steps.list(thread_id=thread_id, run_id=run.id):
@@ -331,6 +381,16 @@ def sort_key(step: RunStep):
agent_name=agent.name,
openapi_tool_call=openapi_tool_call,
)
+ case AgentsNamedToolChoiceType.MCP:
+ logger.debug(
+ f"Entering tool_calls (mcp) for run [{run.id}], agent "
+ f" `{agent.name}` and thread `{thread_id}`"
+ )
+ mcp_tool_call: RunStepMcpToolCall = cast(RunStepMcpToolCall, tool_call)
+ content = generate_mcp_content(
+ agent_name=agent.name,
+ mcp_tool_call=mcp_tool_call,
+ )
if content:
message_count += 1
@@ -552,6 +612,10 @@ async def _process_stream_events(
content = generate_streaming_openapi_content(
agent_name=agent.name, step_details=details
)
+ case AgentsNamedToolChoiceType.MCP:
+ content = generate_streaming_mcp_content(
+ agent_name=agent.name, step_details=details
+ )
if content:
if output_messages is not None:
output_messages.append(content)
@@ -564,41 +628,95 @@ async def _process_stream_events(
f"thread `{thread_id}` with event data: {event_data}"
)
run = cast(ThreadRun, event_data)
- action_result = await cls._handle_streaming_requires_action(
- agent_name=agent.name,
- kernel=kernel,
- run=run,
- function_steps=function_steps,
- arguments=arguments,
- )
- if action_result is None:
- raise RuntimeError(
- f"Function call required but no function steps found for agent `{agent.name}` "
- f"thread: {thread_id}."
+
+ # Check if this is a function call request
+ if isinstance(run.required_action, SubmitToolOutputsAction):
+ action_result = await cls._handle_streaming_requires_action(
+ agent_name=agent.name,
+ kernel=kernel,
+ run=run,
+ function_steps=function_steps,
+ arguments=arguments,
)
+ if action_result is None:
+ raise RuntimeError(
+ f"Function call required but no function steps found for agent `{agent.name}` "
+ f"thread: {thread_id}."
+ )
- for content in (
- action_result.function_call_streaming_content,
- action_result.function_result_streaming_content,
- ):
- if content and output_messages is not None:
- output_messages.append(content)
-
- handler: BaseAsyncAgentEventHandler = AsyncAgentEventHandler()
- await agent.client.agents.runs.submit_tool_outputs_stream(
- run_id=run.id,
- thread_id=thread_id,
- tool_outputs=action_result.tool_outputs, # type: ignore
- event_handler=handler,
- )
- # Pass the handler to the stream to continue processing
- stream = handler # type: ignore
+ for content in (
+ action_result.function_call_streaming_content,
+ action_result.function_result_streaming_content,
+ ):
+ if content and output_messages is not None:
+ output_messages.append(content)
- logger.debug(
- f"Submitted tool outputs stream for agent `{agent.name}` and "
- f"thread `{thread_id}` and run id `{run.id}`"
- )
- break
+ handler: BaseAsyncAgentEventHandler = AsyncAgentEventHandler()
+ await agent.client.agents.runs.submit_tool_outputs_stream(
+ run_id=run.id,
+ thread_id=thread_id,
+ tool_outputs=action_result.tool_outputs, # type: ignore
+ event_handler=handler,
+ )
+ # Pass the handler to the stream to continue processing
+ stream = handler # type: ignore
+
+ logger.debug(
+ f"Submitted tool outputs stream for agent `{agent.name}` and "
+ f"thread `{thread_id}` and run id `{run.id}`"
+ )
+ break
+
+ # Check if this is an MCP tool approval request
+ elif isinstance(run.required_action, SubmitToolApprovalAction):
+ tool_calls = run.required_action.submit_tool_approval.tool_calls
+ if not tool_calls:
+ logger.warning(f"No tool calls provided for MCP approval - cancelling run [{run.id}]")
+ await agent.client.agents.runs.cancel(run_id=run.id, thread_id=thread_id)
+ break
+
+ mcp_tool_calls = [tc for tc in tool_calls if isinstance(tc, RequiredMcpToolCall)]
+ if mcp_tool_calls:
+ logger.debug(
+ f"Processing MCP tool approvals for agent `{agent.name}` and "
+ f"thread `{thread_id}` and run id `{run.id}`"
+ )
+
+ if output_messages is not None:
+ content = generate_streaming_mcp_call_content(
+ agent_name=agent.name, mcp_tool_calls=mcp_tool_calls
+ )
+ if content:
+ output_messages.append(content)
+
+ # Create tool approvals for MCP calls
+ tool_approvals = []
+ for mcp_call in mcp_tool_calls:
+ tool_approvals.append(
+ ToolApproval(
+ tool_call_id=mcp_call.id,
+ approve=True,
+ # Note: headers would need to be provided by the MCP tool configuration
+ # This is a simplified implementation
+ headers={},
+ )
+ )
+
+ handler: BaseAsyncAgentEventHandler = AsyncAgentEventHandler() # type: ignore
+ await agent.client.agents.runs.submit_tool_outputs_stream(
+ run_id=run.id,
+ thread_id=thread_id,
+ tool_approvals=tool_approvals, # type: ignore
+ event_handler=handler,
+ )
+ # Pass the handler to the stream to continue processing
+ stream = handler # type: ignore
+
+ logger.debug(
+ f"Submitted MCP tool approvals stream for agent `{agent.name}` and "
+ f"thread `{thread_id}` and run id `{run.id}`"
+ )
+ break
elif event_type == AgentStreamEvent.THREAD_RUN_COMPLETED:
logger.debug(
diff --git a/python/tests/unit/connectors/ai/open_ai/services/test_azure_text_to_image.py b/python/tests/unit/connectors/ai/open_ai/services/test_azure_text_to_image.py
index 20e46f27fcc0..5e5ab9a108f8 100644
--- a/python/tests/unit/connectors/ai/open_ai/services/test_azure_text_to_image.py
+++ b/python/tests/unit/connectors/ai/open_ai/services/test_azure_text_to_image.py
@@ -78,10 +78,10 @@ def test_azure_text_to_image_init_with_from_dict(azure_openai_unit_test_env) ->
assert azure_text_to_image.client.default_headers[key] == value
-@patch.object(AsyncImages, "generate", return_value=AsyncMock(spec=ImagesResponse))
+@patch.object(AsyncImages, "generate", new_callable=AsyncMock)
async def test_azure_text_to_image_calls_with_parameters(mock_generate, azure_openai_unit_test_env) -> None:
- mock_generate.return_value.data = [Image(url="abc")]
- mock_generate.return_value.usage = None
+ mock_response = ImagesResponse(created=1, data=[Image(url="abc")], usage=None)
+ mock_generate.return_value = mock_response
prompt = "A painting of a vase with flowers"
width = 512
diff --git a/python/tests/unit/connectors/ai/open_ai/services/test_openai_text_to_image.py b/python/tests/unit/connectors/ai/open_ai/services/test_openai_text_to_image.py
index 18ff4b749d0f..6d5845994fda 100644
--- a/python/tests/unit/connectors/ai/open_ai/services/test_openai_text_to_image.py
+++ b/python/tests/unit/connectors/ai/open_ai/services/test_openai_text_to_image.py
@@ -32,10 +32,11 @@ def test_init(openai_unit_test_env):
assert openai_text_to_image.ai_model_id == openai_unit_test_env["OPENAI_TEXT_TO_IMAGE_MODEL_ID"]
-def test_init_validation_fail() -> None:
+@pytest.mark.parametrize("exclude_list", [["OPENAI_TEXT_TO_IMAGE_MODEL_ID"]], indirect=True)
+def test_init_validation_fail(openai_unit_test_env) -> None:
"""Test that initialization fails when required parameters are missing."""
with pytest.raises(ServiceInitializationError):
- OpenAITextToImage(api_key="34523", ai_model_id=None)
+ OpenAITextToImage(api_key="34523", ai_model_id=None, env_file_path="test.env")
def test_init_to_from_dict(openai_unit_test_env):
@@ -77,11 +78,11 @@ def test_prompt_execution_settings_class(openai_unit_test_env) -> None:
assert openai_text_to_image.get_prompt_execution_settings_class() == OpenAITextToImageExecutionSettings
-@patch.object(AsyncImages, "generate", return_value=AsyncMock(spec=ImagesResponse))
+@patch.object(AsyncImages, "generate", new_callable=AsyncMock)
async def test_generate_calls_with_parameters(mock_generate, openai_unit_test_env) -> None:
"""Test that generate_image calls the OpenAI API with correct parameters."""
- mock_generate.return_value.data = [Image(url="abc")]
- mock_generate.return_value.usage = None
+ mock_response = ImagesResponse(created=1, data=[Image(url="abc")], usage=None)
+ mock_generate.return_value = mock_response
ai_model_id = "test_model_id"
prompt = "painting of flowers in vase"
diff --git a/python/uv.lock b/python/uv.lock
index 46f5f6d4c1d7..4627973d356c 100644
--- a/python/uv.lock
+++ b/python/uv.lock
@@ -350,16 +350,16 @@ wheels = [
[[package]]
name = "azure-ai-agents"
-version = "1.1.0b2"
+version = "1.1.0b4"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "azure-core", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "isodate", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/18/7b/fa5452b4cf7559ac827140edf38097026da66bfe78df70783057aea68238/azure_ai_agents-1.1.0b2.tar.gz", hash = "sha256:432ce359c3d02e05873d1d670bd91c1393bbe51f734ec4ce3f76fcafd8104c75", size = 302724, upload-time = "2025-06-09T16:53:47.994Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/30/8f/741c57202f4e4b6a7782f5a7ce7b18fd607696a154f5c9f2c716b207fa61/azure_ai_agents-1.1.0b4.tar.gz", hash = "sha256:126007543e3e9b9a4be017287e230e911fa126081f05b1c593e0d75702d01cd5", size = 331198, upload-time = "2025-07-11T19:55:35.776Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/2c/34/3f47d9cb320b672c8482b15b0431fc546aea5b00104e405042e75d2f1397/azure_ai_agents-1.1.0b2-py3-none-any.whl", hash = "sha256:4e48aba6ac2cdb4955adae5a5c94324b51005d2360ef5bc30a4d2ed86d7a9bde", size = 189945, upload-time = "2025-06-09T16:53:49.721Z" },
+ { url = "https://files.pythonhosted.org/packages/3f/40/7a1bd4b98c7c16b863782b07c9c3e98a1b531068a3b5462c935a468db5ce/azure_ai_agents-1.1.0b4-py3-none-any.whl", hash = "sha256:2256cdddd6176ac3855c9b1fb7174e156d1a2f7538cfb6bd80a743e2b205d775", size = 200368, upload-time = "2025-07-11T19:55:37.512Z" },
]
[[package]]
@@ -378,7 +378,7 @@ wheels = [
[[package]]
name = "azure-ai-projects"
-version = "1.0.0b11"
+version = "1.0.0b12"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "azure-ai-agents", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
@@ -387,9 +387,9 @@ dependencies = [
{ name = "isodate", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
{ name = "typing-extensions", marker = "sys_platform == 'darwin' or sys_platform == 'linux' or sys_platform == 'win32'" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/01/4b/0a879eb66b5d9a08ab09292ff9a36a4e9c855b458d0e843b5e838fc6f6fd/azure_ai_projects-1.0.0b11.tar.gz", hash = "sha256:68a115c48cde7d5f9c29aee61c7fbf0b6de69aecbd1dc749b847a1e1348216b5", size = 133087, upload-time = "2025-05-16T00:33:32.286Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/a5/57/9a89c1978ec9ce29a3be454b83b66885982261762d7a436cad73c47c9225/azure_ai_projects-1.0.0b12.tar.gz", hash = "sha256:1a3784e4be6af3b0fc76e9e4a64158a38f6679fe3a1f8b9c33f12bc8914ae36c", size = 144358, upload-time = "2025-06-27T04:12:48.334Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/3e/2d/5502377ecc07677365a1e86be64d8cb9959eb6e9b605fcc28f1f68d3777a/azure_ai_projects-1.0.0b11-py3-none-any.whl", hash = "sha256:3572f2989627e896ecfebe2fa7326d5b940f920cc581e98809b244af7a38cbf0", size = 130983, upload-time = "2025-05-16T00:33:33.789Z" },
+ { url = "https://files.pythonhosted.org/packages/73/e4/50cd2c3bd5ab745e85a4a1bd591bf4343d6e3470580f1eadceed55fd57c0/azure_ai_projects-1.0.0b12-py3-none-any.whl", hash = "sha256:4e3d3ef275f7409ea8030e474626968848055d4b3717ff7ef03681da809c096f", size = 129783, upload-time = "2025-06-27T04:12:49.837Z" },
]
[[package]]
@@ -5669,14 +5669,14 @@ requires-dist = [
{ name = "aiortc", marker = "extra == 'realtime'", specifier = ">=1.9.0" },
{ name = "anthropic", marker = "extra == 'anthropic'", specifier = "~=0.32" },
{ name = "autogen-agentchat", marker = "extra == 'autogen'", specifier = ">=0.2,<0.4" },
- { name = "azure-ai-agents", specifier = ">=1.1.0b1" },
+ { name = "azure-ai-agents", specifier = ">=1.1.0b4" },
{ name = "azure-ai-inference", marker = "extra == 'azure'", specifier = ">=1.0.0b6" },
- { name = "azure-ai-projects", specifier = ">=1.0.0b11" },
+ { name = "azure-ai-projects", specifier = ">=1.0.0b12" },
{ name = "azure-core-tracing-opentelemetry", marker = "extra == 'azure'", specifier = ">=1.0.0b11" },
{ name = "azure-cosmos", marker = "extra == 'azure'", specifier = "~=4.7" },
{ name = "azure-identity", specifier = ">=1.13" },
{ name = "azure-search-documents", marker = "extra == 'azure'", specifier = ">=11.6.0b4" },
- { name = "boto3", marker = "extra == 'aws'", specifier = ">=1.36.4,<1.39.0" },
+ { name = "boto3", marker = "extra == 'aws'", specifier = ">=1.36.4,<1.40.0" },
{ name = "chromadb", marker = "extra == 'chroma'", specifier = ">=0.5,<1.1" },
{ name = "cloudevents", specifier = "~=1.0" },
{ name = "dapr", marker = "extra == 'dapr'", specifier = ">=1.14.0" },
@@ -5711,7 +5711,7 @@ requires-dist = [
{ name = "pydantic", specifier = ">=2.0,!=2.10.0,!=2.10.1,!=2.10.2,!=2.10.3,<2.12" },
{ name = "pydantic-settings", specifier = "~=2.0" },
{ name = "pymilvus", marker = "extra == 'milvus'", specifier = ">=2.3,<2.6" },
- { name = "pymongo", marker = "extra == 'mongo'", specifier = ">=4.8.0,<4.13" },
+ { name = "pymongo", marker = "extra == 'mongo'", specifier = ">=4.8.0,<4.14" },
{ name = "pyodbc", marker = "extra == 'sql'", specifier = ">=5.2" },
{ name = "qdrant-client", marker = "extra == 'qdrant'", specifier = "~=1.9" },
{ name = "redis", extras = ["hiredis"], marker = "extra == 'redis'", specifier = "~=6.0" },
From d2b22ef3e9cf9aa0dd864d65f055f19467a97128 Mon Sep 17 00:00:00 2001
From: Stephen Toub
Date: Fri, 18 Jul 2025 04:57:18 -0400
Subject: [PATCH 07/10] .Net: Update to latest M.E.AI{.Abstractions} + OpenAI
Related Packages (#12685)
This pull request updates package dependencies, refactors the OpenAI
Realtime demo to align with API changes, and modifies related unit tests
to reflect these updates. The most significant changes include updating
package versions, transitioning from the `RealtimeConversation` API to
the `Realtime` API, and adjusting test logic to accommodate updated data
structures.
### Dependency Updates:
* Updated `Azure.AI.OpenAI` to version `2.2.0-beta.5` and `OpenAI` to
version `2.2.0`.
[[1]](diffhunk://#diff-21abc2ac38e0ade95299a2450724507fe1d080c383a3024337f9177278c64186L22-R22)
[[2]](diffhunk://#diff-21abc2ac38e0ade95299a2450724507fe1d080c383a3024337f9177278c64186L73-R73)
* Updated `System.Text.Json` to version `8.0.6`.
* Upgraded `Microsoft.Extensions.AI` and related libraries to version
`9.7.1`.
### OpenAI Realtime Demo Refactor:
* Replaced `RealtimeConversationClient` and related classes with
`RealtimeClient` and its updated API. This includes changes to session
initialization, configuration, and item handling in `Program.cs`.
[[1]](diffhunk://#diff-fa8b1514e6458341bf08441b1dc14550fb9bb75d2a0c108c75e1615c2e2ba09dL11-R19)
[[2]](diffhunk://#diff-fa8b1514e6458341bf08441b1dc14550fb9bb75d2a0c108c75e1615c2e2ba09dL36-R45)
[[3]](diffhunk://#diff-fa8b1514e6458341bf08441b1dc14550fb9bb75d2a0c108c75e1615c2e2ba09dL378-R400)
* Adjusted method calls and data types to align with the new API, such
as replacing `ConversationItem` with `RealtimeItem` and updating event
types like `ConversationUpdate` to `RealtimeUpdate`.
[[1]](diffhunk://#diff-fa8b1514e6458341bf08441b1dc14550fb9bb75d2a0c108c75e1615c2e2ba09dL65-R70)
[[2]](diffhunk://#diff-fa8b1514e6458341bf08441b1dc14550fb9bb75d2a0c108c75e1615c2e2ba09dL85-R84)
[[3]](diffhunk://#diff-fa8b1514e6458341bf08441b1dc14550fb9bb75d2a0c108c75e1615c2e2ba09dL229-R228)
### Unit Test Updates:
* Updated test cases to use new data structures, such as replacing
`IReadOnlyList` with `IReadOnlyList` and
adapting related assertions.
[[1]](diffhunk://#diff-3e486c31e4424c1a3ea61e23b1f7a266d186a4016ad57d7187dacf7b4bc8140fL198-R206)
[[2]](diffhunk://#diff-8ea1809160a96b9a3ebca5c1695353800f302aa2c5aabb4a2be51c6494f6f315L105-R105)
* Added `OPENAI001` to `NoWarn` lists in several `.csproj` files to
suppress warnings related to the OpenAI SDK.
[[1]](diffhunk://#diff-ae520434a4ad683f6736397dfbd0b308dda10dbce75af0c5064efbb3b0059dc0L11-R11)
[[2]](diffhunk://#diff-5703c2716d4b9753ae67ce3c1434595c4adbff1eb6564dd2fa0a0882c3925551L8-R8)
[[3]](diffhunk://#diff-8788687b51a5a626ca287b5c18f667bf1f5fa9b13af4ee425765f8e84b765ac3L10-R10)
---------
Co-authored-by: Mark Wallace <127216156+markwallace-microsoft@users.noreply.github.com>
Co-authored-by: Roger Barreto <19890735+rogerbarreto@users.noreply.github.com>
---
dotnet/Directory.Packages.props | 14 ++--
.../OpenAIRealtime/OpenAIRealtime.csproj | 2 +-
.../samples/Demos/OpenAIRealtime/Program.cs | 49 +++++++------
.../Extensions/OpenAIResponseExtensions.cs | 13 ++--
.../Extensions/ResponseItemExtensionsTests.cs | 4 +-
.../OpenAIResponseExtensionsTests.cs | 68 +++++++++++++------
.../Connectors.AzureOpenAI.UnitTests.csproj | 2 +-
.../Connectors.AzureOpenAI.csproj | 2 +-
.../KernelFunctionMetadataExtensionsTests.cs | 2 +-
.../Connectors.OpenAI.UnitTests.csproj | 2 +-
.../Core/OpenAIJsonSchemaTransformerTests.cs | 2 +-
.../KernelFunctionMetadataExtensionsTests.cs | 2 +-
.../OpenAIChatResponseFormatBuilderTests.cs | 29 ++++++--
.../Connectors.OpenAI.csproj | 2 +-
.../AIFunctionKernelFunction.cs | 2 +-
.../SemanticKernel.UnitTests.csproj | 2 +-
16 files changed, 120 insertions(+), 77 deletions(-)
diff --git a/dotnet/Directory.Packages.props b/dotnet/Directory.Packages.props
index c60c6aedd669..22779c89ef7c 100644
--- a/dotnet/Directory.Packages.props
+++ b/dotnet/Directory.Packages.props
@@ -19,7 +19,7 @@
-
+
@@ -70,7 +70,7 @@
-
+
@@ -88,7 +88,7 @@
-
+
@@ -97,10 +97,10 @@
-
-
-
-
+
+
+
+
diff --git a/dotnet/samples/Demos/OpenAIRealtime/OpenAIRealtime.csproj b/dotnet/samples/Demos/OpenAIRealtime/OpenAIRealtime.csproj
index 7aaa8d7e8c4c..79a0672716e0 100644
--- a/dotnet/samples/Demos/OpenAIRealtime/OpenAIRealtime.csproj
+++ b/dotnet/samples/Demos/OpenAIRealtime/OpenAIRealtime.csproj
@@ -5,7 +5,7 @@
net8.0
enable
enable
- $(NoWarn);VSTHRD111,CA2007,CS8618,CS1591,CA1052,SKEXP0001
+ $(NoWarn);VSTHRD111,CA2007,CS8618,CS1591,CA1052,CA1810,SKEXP0001
5ee045b0-aea3-4f08-8d31-32d1a6f8fed0
diff --git a/dotnet/samples/Demos/OpenAIRealtime/Program.cs b/dotnet/samples/Demos/OpenAIRealtime/Program.cs
index fb17b4bbfd3e..2de269dd1609 100644
--- a/dotnet/samples/Demos/OpenAIRealtime/Program.cs
+++ b/dotnet/samples/Demos/OpenAIRealtime/Program.cs
@@ -8,7 +8,7 @@
using Microsoft.Extensions.Configuration;
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.Connectors.OpenAI;
-using OpenAI.RealtimeConversation;
+using OpenAI.Realtime;
namespace OpenAIRealtime;
@@ -16,7 +16,7 @@ namespace OpenAIRealtime;
///
/// Demonstrates the use of the OpenAI Realtime API with function calling and Semantic Kernel.
-/// For conversational experiences, it is recommended to use from the Azure/OpenAI SDK.
+/// For conversational experiences, it is recommended to use from the Azure/OpenAI SDK.
/// Since the OpenAI Realtime API supports function calling, the example shows how to combine it with Semantic Kernel plugins and functions.
///
internal sealed class Program
@@ -33,7 +33,7 @@ public static async Task Main(string[] args)
kernel.ImportPluginFromType();
// Start a new conversation session.
- using RealtimeConversationSession session = await realtimeConversationClient.StartConversationSessionAsync();
+ using RealtimeSession session = await realtimeConversationClient.StartConversationSessionAsync("gpt-4o-realtime-preview");
// Initialize session options.
// Session options control connection-wide behavior shared across all conversations,
@@ -41,8 +41,8 @@ public static async Task Main(string[] args)
ConversationSessionOptions sessionOptions = new()
{
Voice = ConversationVoice.Alloy,
- InputAudioFormat = ConversationAudioFormat.Pcm16,
- OutputAudioFormat = ConversationAudioFormat.Pcm16,
+ InputAudioFormat = RealtimeAudioFormat.Pcm16,
+ OutputAudioFormat = RealtimeAudioFormat.Pcm16,
InputTranscriptionOptions = new()
{
Model = "whisper-1",
@@ -62,13 +62,12 @@ public static async Task Main(string[] args)
}
// Configure session with defined options.
- await session.ConfigureSessionAsync(sessionOptions);
+ await session.ConfigureConversationSessionAsync(sessionOptions);
// Items such as user, assistant, or system messages, as well as input audio, can be sent to the session.
// An example of sending user message to the session.
// ConversationItem can be constructed from Microsoft.SemanticKernel.ChatMessageContent if needed by mapping the relevant fields.
- await session.AddItemAsync(
- ConversationItem.CreateUserMessage(["I'm trying to decide what to wear on my trip."]));
+ await session.AddItemAsync(RealtimeItem.CreateUserMessage(["I'm trying to decide what to wear on my trip."]));
// Use audio file that contains a recorded question: "What's the weather like in San Francisco, California?"
string inputAudioPath = FindFile("Assets\\realtime_whats_the_weather_pcm16_24khz_mono.wav");
@@ -82,7 +81,7 @@ await session.AddItemAsync(
Dictionary functionArgumentBuildersById = [];
// Define a loop to receive conversation updates in the session.
- await foreach (ConversationUpdate update in session.ReceiveUpdatesAsync())
+ await foreach (RealtimeUpdate update in session.ReceiveUpdatesAsync())
{
// Notification indicating the start of the conversation session.
if (update is ConversationSessionStartedUpdate sessionStartedUpdate)
@@ -92,21 +91,21 @@ await session.AddItemAsync(
}
// Notification indicating the start of detected voice activity.
- if (update is ConversationInputSpeechStartedUpdate speechStartedUpdate)
+ if (update is InputAudioSpeechStartedUpdate speechStartedUpdate)
{
Console.WriteLine(
$" -- Voice activity detection started at {speechStartedUpdate.AudioStartTime}");
}
// Notification indicating the end of detected voice activity.
- if (update is ConversationInputSpeechFinishedUpdate speechFinishedUpdate)
+ if (update is InputAudioSpeechFinishedUpdate speechFinishedUpdate)
{
Console.WriteLine(
$" -- Voice activity detection ended at {speechFinishedUpdate.AudioEndTime}");
}
// Notification indicating the start of item streaming, such as a function call or response message.
- if (update is ConversationItemStreamingStartedUpdate itemStreamingStartedUpdate)
+ if (update is OutputStreamingStartedUpdate itemStreamingStartedUpdate)
{
Console.WriteLine(" -- Begin streaming of new item");
if (!string.IsNullOrEmpty(itemStreamingStartedUpdate.FunctionName))
@@ -116,7 +115,7 @@ await session.AddItemAsync(
}
// Notification about item streaming delta, which may include audio transcript, audio bytes, or function arguments.
- if (update is ConversationItemStreamingPartDeltaUpdate deltaUpdate)
+ if (update is OutputDeltaUpdate deltaUpdate)
{
Console.Write(deltaUpdate.AudioTranscript);
Console.Write(deltaUpdate.Text);
@@ -148,7 +147,7 @@ await session.AddItemAsync(
// Notification indicating the end of item streaming, such as a function call or response message.
// At this point, audio transcript can be displayed on console, or a function can be called with aggregated arguments.
- if (update is ConversationItemStreamingFinishedUpdate itemStreamingFinishedUpdate)
+ if (update is OutputStreamingFinishedUpdate itemStreamingFinishedUpdate)
{
Console.WriteLine();
Console.WriteLine($" -- Item streaming finished, item_id={itemStreamingFinishedUpdate.ItemId}");
@@ -176,7 +175,7 @@ await session.AddItemAsync(
var resultContent = await functionCallContent.InvokeAsync(kernel);
// Create a function call output conversation item with function call result.
- ConversationItem functionOutputItem = ConversationItem.CreateFunctionCallOutput(
+ RealtimeItem functionOutputItem = RealtimeItem.CreateFunctionCallOutput(
callId: itemStreamingFinishedUpdate.FunctionCallId,
output: ProcessFunctionResult(resultContent.Result));
@@ -198,7 +197,7 @@ await session.AddItemAsync(
}
// Notification indicating the completion of transcription from input audio.
- if (update is ConversationInputTranscriptionFinishedUpdate transcriptionCompletedUpdate)
+ if (update is InputAudioTranscriptionFinishedUpdate transcriptionCompletedUpdate)
{
Console.WriteLine();
Console.WriteLine($" -- User audio transcript: {transcriptionCompletedUpdate.Transcript}");
@@ -206,7 +205,7 @@ await session.AddItemAsync(
}
// Notification about completed model response turn.
- if (update is ConversationResponseFinishedUpdate turnFinishedUpdate)
+ if (update is ResponseFinishedUpdate turnFinishedUpdate)
{
Console.WriteLine($" -- Model turn generation finished. Status: {turnFinishedUpdate.Status}");
@@ -226,7 +225,7 @@ await session.AddItemAsync(
}
// Notification about error in conversation session.
- if (update is ConversationErrorUpdate errorUpdate)
+ if (update is RealtimeErrorUpdate errorUpdate)
{
Console.WriteLine();
Console.WriteLine($"ERROR: {errorUpdate.Message}");
@@ -375,24 +374,22 @@ private static string FindFile(string fileName)
}
///
- /// Helper method to get an instance of based on provided
+ /// Helper method to get an instance of based on provided
/// OpenAI or Azure OpenAI configuration.
///
- private static RealtimeConversationClient GetRealtimeConversationClient()
+ private static RealtimeClient GetRealtimeConversationClient()
{
var config = new ConfigurationBuilder()
.AddUserSecrets()
.AddEnvironmentVariables()
.Build();
- var openAIOptions = config.GetSection(OpenAIOptions.SectionName).Get();
- var azureOpenAIOptions = config.GetSection(AzureOpenAIOptions.SectionName).Get();
+ var openAIOptions = config.GetSection(OpenAIOptions.SectionName).Get()!;
+ var azureOpenAIOptions = config.GetSection(AzureOpenAIOptions.SectionName).Get()!;
if (openAIOptions is not null && openAIOptions.IsValid)
{
- return new RealtimeConversationClient(
- model: "gpt-4o-realtime-preview",
- credential: new ApiKeyCredential(openAIOptions.ApiKey));
+ return new RealtimeClient(new ApiKeyCredential(openAIOptions.ApiKey));
}
else if (azureOpenAIOptions is not null && azureOpenAIOptions.IsValid)
{
@@ -400,7 +397,7 @@ private static RealtimeConversationClient GetRealtimeConversationClient()
endpoint: new Uri(azureOpenAIOptions.Endpoint),
credential: new ApiKeyCredential(azureOpenAIOptions.ApiKey));
- return client.GetRealtimeConversationClient(azureOpenAIOptions.DeploymentName);
+ return client.GetRealtimeClient();
}
else
{
diff --git a/dotnet/src/Agents/OpenAI/Extensions/OpenAIResponseExtensions.cs b/dotnet/src/Agents/OpenAI/Extensions/OpenAIResponseExtensions.cs
index d65e0f940fff..dc1ec795cae0 100644
--- a/dotnet/src/Agents/OpenAI/Extensions/OpenAIResponseExtensions.cs
+++ b/dotnet/src/Agents/OpenAI/Extensions/OpenAIResponseExtensions.cs
@@ -52,7 +52,7 @@ public static ChatMessageContent ToChatMessageContent(this OpenAIResponse respon
}
else if (item is ReasoningResponseItem reasoningResponseItem)
{
- if (reasoningResponseItem.SummaryTextParts is not null && reasoningResponseItem.SummaryTextParts.Count > 0)
+ if (reasoningResponseItem.SummaryParts is not null && reasoningResponseItem.SummaryParts.Count > 0)
{
return new ChatMessageContent(AuthorRole.Assistant, item.ToChatMessageContentItemCollection(), innerContent: reasoningResponseItem);
}
@@ -77,7 +77,7 @@ public static ChatMessageContentItemCollection ToChatMessageContentItemCollectio
}
else if (item is ReasoningResponseItem reasoningResponseItem)
{
- return reasoningResponseItem.SummaryTextParts.ToChatMessageContentItemCollection();
+ return reasoningResponseItem.SummaryParts.ToChatMessageContentItemCollection();
}
else if (item is FunctionCallResponseItem functionCallResponseItem)
{
@@ -195,12 +195,15 @@ private static ChatMessageContentItemCollection ToChatMessageContentItemCollecti
return collection;
}
- private static ChatMessageContentItemCollection ToChatMessageContentItemCollection(this IReadOnlyList texts)
+ private static ChatMessageContentItemCollection ToChatMessageContentItemCollection(this IReadOnlyList parts)
{
var collection = new ChatMessageContentItemCollection();
- foreach (var text in texts)
+ foreach (var part in parts)
{
- collection.Add(new TextContent(text, innerContent: null));
+ if (part is ReasoningSummaryTextPart text)
+ {
+ collection.Add(new TextContent(text.Text, innerContent: text));
+ }
}
return collection;
}
diff --git a/dotnet/src/Agents/UnitTests/Extensions/ResponseItemExtensionsTests.cs b/dotnet/src/Agents/UnitTests/Extensions/ResponseItemExtensionsTests.cs
index 8683a19f379f..d547b27be0fa 100644
--- a/dotnet/src/Agents/UnitTests/Extensions/ResponseItemExtensionsTests.cs
+++ b/dotnet/src/Agents/UnitTests/Extensions/ResponseItemExtensionsTests.cs
@@ -66,7 +66,7 @@ public void VerifyToChatMessageContentFromInputFile()
{
// Arrange
var fileBytes = new ReadOnlyMemory([1, 2, 3, 4, 5]);
- IEnumerable contentParts = [ResponseContentPart.CreateInputFilePart("fileId", "fileName", new(fileBytes))];
+ IEnumerable contentParts = [ResponseContentPart.CreateInputFilePart(BinaryData.FromBytes(fileBytes), "text/plain", "fileName")];
MessageResponseItem responseItem = ResponseItem.CreateUserMessageItem(contentParts);
// Act
@@ -102,7 +102,7 @@ public void VerifyToChatMessageContentFromRefusal()
public void VerifyToChatMessageContentFromReasoning()
{
// Arrange
- IEnumerable summaryParts = ["Foo"];
+ IEnumerable summaryParts = [ReasoningSummaryPart.CreateTextPart("Foo")];
ReasoningResponseItem responseItem = ResponseItem.CreateReasoningItem(summaryParts);
// Act
diff --git a/dotnet/src/Agents/UnitTests/OpenAI/Extensions/OpenAIResponseExtensionsTests.cs b/dotnet/src/Agents/UnitTests/OpenAI/Extensions/OpenAIResponseExtensionsTests.cs
index 6fa230534427..8bd86c950e7b 100644
--- a/dotnet/src/Agents/UnitTests/OpenAI/Extensions/OpenAIResponseExtensionsTests.cs
+++ b/dotnet/src/Agents/UnitTests/OpenAI/Extensions/OpenAIResponseExtensionsTests.cs
@@ -2,6 +2,7 @@
using System;
using System.Collections.Generic;
+using System.Linq;
using System.Reflection;
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.Agents.OpenAI;
@@ -178,25 +179,39 @@ private OpenAIResponse CreateMockOpenAIResponse(string model, IEnumerable tools, float topP, IDictionary metadata, ResponseIncompleteStatusDetails incompleteStatusDetails, IEnumerable outputItems, bool parallelToolCallsEnabled, ResponseToolChoice toolChoice)
{
Type type = typeof(OpenAIResponse);
+ var assembly = type.Assembly;
+ var internalServiceTierType = assembly.GetType("OpenAI.Internal.InternalServiceTier");
+ var nullableInternalServiceTierType = typeof(Nullable<>).MakeGenericType(internalServiceTierType!);
ConstructorInfo? constructor = type.GetConstructor(
BindingFlags.Instance | BindingFlags.NonPublic,
null,
[
+ typeof(IDictionary),
+ typeof(float?),
+ typeof(float?),
+ nullableInternalServiceTierType,
typeof(string),
- typeof(DateTimeOffset),
- typeof(ResponseError),
+ typeof(bool?),
typeof(string),
+ typeof(IList),
typeof(string),
+ typeof(ResponseStatus?),
+ typeof(DateTimeOffset),
+ typeof(ResponseError),
+ typeof(ResponseTokenUsage),
typeof(string),
- typeof(float),
- typeof(IEnumerable),
- typeof(float),
- typeof(IDictionary),
+ typeof(ResponseReasoningOptions),
+ typeof(int?),
+ typeof(ResponseTextOptions),
+ typeof(ResponseTruncationMode?),
typeof(ResponseIncompleteStatusDetails),
- typeof(IEnumerable),
+ typeof(IList),
typeof(bool),
- typeof(ResponseToolChoice)
+ typeof(ResponseToolChoice),
+ typeof(string),
+ typeof(string),
+ typeof(IDictionary)
],
null);
@@ -204,20 +219,31 @@ private OpenAIResponse CreateMockOpenAIResponse(string id, DateTimeOffset create
{
return (OpenAIResponse)constructor.Invoke(
[
- id,
- createdAt,
- error,
- instructions,
- model,
- previousResponseId,
- temperature,
- tools,
- topP,
metadata,
- incompleteStatusDetails,
- outputItems,
- parallelToolCallsEnabled,
- toolChoice
+ (float?)temperature,
+ (float?)topP,
+ null, // serviceTier
+ previousResponseId,
+ null, // background
+ instructions,
+ tools.ToList(),
+ id,
+ null, // status
+ createdAt,
+ error,
+ null, // usage
+ null, // endUserId
+ null, // reasoningOptions
+ null, // maxOutputTokenCount
+ null, // textOptions
+ null, // truncationMode
+ incompleteStatusDetails,
+ outputItems.ToList(),
+ parallelToolCallsEnabled,
+ toolChoice,
+ model,
+ "response",
+ null // additionalBinaryDataProperties
]
);
}
diff --git a/dotnet/src/Connectors/Connectors.AzureOpenAI.UnitTests/Connectors.AzureOpenAI.UnitTests.csproj b/dotnet/src/Connectors/Connectors.AzureOpenAI.UnitTests/Connectors.AzureOpenAI.UnitTests.csproj
index a0a695a6719c..efae2b241d3c 100644
--- a/dotnet/src/Connectors/Connectors.AzureOpenAI.UnitTests/Connectors.AzureOpenAI.UnitTests.csproj
+++ b/dotnet/src/Connectors/Connectors.AzureOpenAI.UnitTests/Connectors.AzureOpenAI.UnitTests.csproj
@@ -8,7 +8,7 @@
true
enable
false
- $(NoWarn);SKEXP0001;SKEXP0010;CA2007,CA1806,CA1869,CA1861,IDE0300,VSTHRD111,IDE1006
+ $(NoWarn);SKEXP0001;SKEXP0010;CA2007,CA1806,CA1869,CA1861,IDE0300,VSTHRD111,IDE1006,OPENAI001
diff --git a/dotnet/src/Connectors/Connectors.AzureOpenAI/Connectors.AzureOpenAI.csproj b/dotnet/src/Connectors/Connectors.AzureOpenAI/Connectors.AzureOpenAI.csproj
index d5e590afabbe..47d0ed0a85e5 100644
--- a/dotnet/src/Connectors/Connectors.AzureOpenAI/Connectors.AzureOpenAI.csproj
+++ b/dotnet/src/Connectors/Connectors.AzureOpenAI/Connectors.AzureOpenAI.csproj
@@ -5,7 +5,7 @@
Microsoft.SemanticKernel.Connectors.AzureOpenAI
$(AssemblyName)
net8.0;netstandard2.0
- $(NoWarn);NU5104;SKEXP0001,SKEXP0010
+ $(NoWarn);NU5104;SKEXP0001,SKEXP0010,OPENAI001
true
diff --git a/dotnet/src/Connectors/Connectors.Google.UnitTests/Extensions/KernelFunctionMetadataExtensionsTests.cs b/dotnet/src/Connectors/Connectors.Google.UnitTests/Extensions/KernelFunctionMetadataExtensionsTests.cs
index a87816bfb949..360d5173cab8 100644
--- a/dotnet/src/Connectors/Connectors.Google.UnitTests/Extensions/KernelFunctionMetadataExtensionsTests.cs
+++ b/dotnet/src/Connectors/Connectors.Google.UnitTests/Extensions/KernelFunctionMetadataExtensionsTests.cs
@@ -200,7 +200,7 @@ public void ItCanCreateValidGeminiFunctionManualForPlugin()
// Assert
Assert.NotNull(result);
Assert.Equal(
- """{"type":"object","required":["parameter1","parameter2","parameter3"],"properties":{"parameter1":{"description":"String parameter","type":"string"},"parameter2":{"description":"Enum parameter","type":"string","enum":["Value1","Value2"]},"parameter3":{"description":"DateTime parameter","type":"string"}}}""",
+ """{"type":"object","required":["parameter1","parameter2","parameter3"],"properties":{"parameter1":{"description":"String parameter","type":"string"},"parameter2":{"description":"Enum parameter","type":"string","enum":["Value1","Value2"]},"parameter3":{"description":"DateTime parameter","type":"string","format":"date-time"}}}""",
JsonSerializer.Serialize(result.Parameters)
);
}
diff --git a/dotnet/src/Connectors/Connectors.OpenAI.UnitTests/Connectors.OpenAI.UnitTests.csproj b/dotnet/src/Connectors/Connectors.OpenAI.UnitTests/Connectors.OpenAI.UnitTests.csproj
index 0a7171bbcd0d..0366175e98f8 100644
--- a/dotnet/src/Connectors/Connectors.OpenAI.UnitTests/Connectors.OpenAI.UnitTests.csproj
+++ b/dotnet/src/Connectors/Connectors.OpenAI.UnitTests/Connectors.OpenAI.UnitTests.csproj
@@ -7,7 +7,7 @@
true
enable
false
- $(NoWarn);SKEXP0001;SKEXP0010;CS1591;IDE1006;RCS1261;CA1031;CA1308;CA1861;CA2007;CA2234;VSTHRD111;CA1812
+ $(NoWarn);SKEXP0001;SKEXP0010;CS1591;IDE1006;RCS1261;CA1031;CA1308;CA1861;CA2007;CA2234;VSTHRD111;CA1812;OPENAI001
diff --git a/dotnet/src/Connectors/Connectors.OpenAI.UnitTests/Core/OpenAIJsonSchemaTransformerTests.cs b/dotnet/src/Connectors/Connectors.OpenAI.UnitTests/Core/OpenAIJsonSchemaTransformerTests.cs
index 2c15249a3ca6..e41408e2b36c 100644
--- a/dotnet/src/Connectors/Connectors.OpenAI.UnitTests/Core/OpenAIJsonSchemaTransformerTests.cs
+++ b/dotnet/src/Connectors/Connectors.OpenAI.UnitTests/Core/OpenAIJsonSchemaTransformerTests.cs
@@ -73,7 +73,7 @@ public void ItTransformsJsonSchemaCorrectly()
"null"
],
"items": {
- "type": "object",
+ "type": ["object","null"],
"properties": {
"TextProperty": {
"type": [
diff --git a/dotnet/src/Connectors/Connectors.OpenAI.UnitTests/Extensions/KernelFunctionMetadataExtensionsTests.cs b/dotnet/src/Connectors/Connectors.OpenAI.UnitTests/Extensions/KernelFunctionMetadataExtensionsTests.cs
index ec64801c51b0..3029777f56a1 100644
--- a/dotnet/src/Connectors/Connectors.OpenAI.UnitTests/Extensions/KernelFunctionMetadataExtensionsTests.cs
+++ b/dotnet/src/Connectors/Connectors.OpenAI.UnitTests/Extensions/KernelFunctionMetadataExtensionsTests.cs
@@ -208,7 +208,7 @@ public void ItCanCreateValidAzureOpenAIFunctionManualForPlugin(bool strict)
else
{
Assert.Equal(
- """{"type":"object","required":["parameter1","parameter2","parameter3"],"properties":{"parameter1":{"description":"String parameter","type":"string"},"parameter2":{"description":"Enum parameter","type":"string","enum":["Value1","Value2"]},"parameter3":{"description":"DateTime parameter","type":"string"}}}""",
+ """{"type":"object","required":["parameter1","parameter2","parameter3"],"properties":{"parameter1":{"description":"String parameter","type":"string"},"parameter2":{"description":"Enum parameter","type":"string","enum":["Value1","Value2"]},"parameter3":{"description":"DateTime parameter","type":"string","format":"date-time"}}}""",
parametersResult
);
}
diff --git a/dotnet/src/Connectors/Connectors.OpenAI.UnitTests/Helpers/OpenAIChatResponseFormatBuilderTests.cs b/dotnet/src/Connectors/Connectors.OpenAI.UnitTests/Helpers/OpenAIChatResponseFormatBuilderTests.cs
index 13a5862b19b7..419a97c6b500 100644
--- a/dotnet/src/Connectors/Connectors.OpenAI.UnitTests/Helpers/OpenAIChatResponseFormatBuilderTests.cs
+++ b/dotnet/src/Connectors/Connectors.OpenAI.UnitTests/Helpers/OpenAIChatResponseFormatBuilderTests.cs
@@ -1,6 +1,7 @@
// Copyright (c) Microsoft. All rights reserved.
using System;
+using System.Reflection;
using System.Text.Json;
using System.Text.Json.Serialization;
using Microsoft.SemanticKernel.Connectors.OpenAI;
@@ -34,11 +35,9 @@ public void GetJsonSchemaResponseFormatReturnsChatResponseFormatByDefault(
// Act
var chatResponseFormat = OpenAIChatResponseFormatBuilder.GetJsonSchemaResponseFormat(jsonElement);
- var responseFormat = this.GetResponseFormat(chatResponseFormat);
+ var (jsonSchema, schema) = this.GetResponseFormatJsonSchema(chatResponseFormat);
// Assert
- Assert.True(responseFormat.TryGetProperty("JsonSchema", out var jsonSchema));
- Assert.True(jsonSchema.TryGetProperty("Schema", out var schema));
Assert.True(jsonSchema.TryGetProperty("Name", out var name));
Assert.True(jsonSchema.TryGetProperty("Strict", out var strict));
@@ -145,10 +144,28 @@ public void GetJsonSchemaResponseFormatThrowsExceptionWhenSchemaDoesNotExist()
#region private
- private JsonElement GetResponseFormat(ChatResponseFormat chatResponseFormat)
+ private (JsonElement JsonSchema, JsonElement JsonSchemaSchema) GetResponseFormatJsonSchema(ChatResponseFormat chatResponseFormat)
{
- var settings = new OpenAIPromptExecutionSettings { ResponseFormat = chatResponseFormat };
- return JsonDocument.Parse(JsonSerializer.Serialize(settings, this._options)).RootElement.GetProperty("response_format");
+ var jsonSchemaProperty = chatResponseFormat.GetType().GetProperty("JsonSchema", BindingFlags.NonPublic | BindingFlags.Instance);
+
+ // Assert
+ Assert.NotNull(jsonSchemaProperty);
+ var jsonSchemaPropertyValue = jsonSchemaProperty.GetValue(chatResponseFormat);
+
+ Assert.NotNull(jsonSchemaPropertyValue);
+ var schemaProperty = jsonSchemaPropertyValue.GetType().GetProperty("Schema", BindingFlags.Public | BindingFlags.Instance);
+
+ Assert.NotNull(schemaProperty);
+ var schemaPropertyValue = schemaProperty.GetValue(jsonSchemaPropertyValue);
+
+ Assert.NotNull(schemaPropertyValue);
+
+ var jsonSchema = JsonSerializer.Deserialize(JsonSerializer.Serialize(jsonSchemaProperty.GetValue(chatResponseFormat)));
+
+ // Schema property gets serialized into a non-readable pattern in the jsonSchema JsonElement variable and needs to be returned separately.
+ var schema = JsonSerializer.Deserialize(schemaPropertyValue.ToString()!);
+
+ return (jsonSchema, schema);
}
private sealed class BinaryDataJsonConverter : JsonConverter
diff --git a/dotnet/src/Connectors/Connectors.OpenAI/Connectors.OpenAI.csproj b/dotnet/src/Connectors/Connectors.OpenAI/Connectors.OpenAI.csproj
index 2f280b843e10..aab81a532403 100644
--- a/dotnet/src/Connectors/Connectors.OpenAI/Connectors.OpenAI.csproj
+++ b/dotnet/src/Connectors/Connectors.OpenAI/Connectors.OpenAI.csproj
@@ -5,7 +5,7 @@
Microsoft.SemanticKernel.Connectors.OpenAI
$(AssemblyName)
net8.0;netstandard2.0
- $(NoWarn);NU5104;SKEXP0001,SKEXP0010
+ $(NoWarn);NU5104;SKEXP0001,SKEXP0010,OPENAI001
true
diff --git a/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/AIFunctionKernelFunction.cs b/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/AIFunctionKernelFunction.cs
index 374ddc1a5fe6..a319f6b1c85d 100644
--- a/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/AIFunctionKernelFunction.cs
+++ b/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/AIFunctionKernelFunction.cs
@@ -28,7 +28,7 @@ public AIFunctionKernelFunction(AIFunction aiFunction) :
{
Description = aiFunction.UnderlyingMethod?.ReturnParameter.GetCustomAttribute()?.Description,
ParameterType = aiFunction.UnderlyingMethod?.ReturnParameter.ParameterType,
- Schema = new KernelJsonSchema(AIJsonUtilities.CreateJsonSchema(aiFunction.UnderlyingMethod?.ReturnParameter.ParameterType)),
+ Schema = new KernelJsonSchema(aiFunction.ReturnJsonSchema ?? AIJsonUtilities.CreateJsonSchema(aiFunction.UnderlyingMethod?.ReturnParameter.ParameterType)),
})
{
// Kernel functions created from AI functions are always fully qualified
diff --git a/dotnet/src/SemanticKernel.UnitTests/SemanticKernel.UnitTests.csproj b/dotnet/src/SemanticKernel.UnitTests/SemanticKernel.UnitTests.csproj
index 512cbf00ad91..6c4d9765fd02 100644
--- a/dotnet/src/SemanticKernel.UnitTests/SemanticKernel.UnitTests.csproj
+++ b/dotnet/src/SemanticKernel.UnitTests/SemanticKernel.UnitTests.csproj
@@ -6,7 +6,7 @@
net8.0
true
false
- $(NoWarn);CA2007,CA1861,IDE1006,VSTHRD111,SKEXP0001,SKEXP0010,SKEXP0050,SKEXP0110,SKEXP0120,SKEXP0130,MEVD9000
+ $(NoWarn);CA2007,CA1861,IDE1006,VSTHRD111,SKEXP0001,SKEXP0010,SKEXP0050,SKEXP0110,SKEXP0120,SKEXP0130,MEVD9000,OPENAI001
From 1269d3c3aeb77ddc9758c0fc431716f63b102a4b Mon Sep 17 00:00:00 2001
From: Roger Barreto <19890735+rogerbarreto@users.noreply.github.com>
Date: Fri, 18 Jul 2025 12:11:13 +0100
Subject: [PATCH 08/10] .Net: Update GettingStarted to use M.E.AI.ChatClient
(#12740)
### Motivation and Context
This pull request updates the `GettingStarted` samples to replace the
use of `AddOpenAIChatCompletion` with `AddOpenAIChatClient` for creating
kernels, aligning the code with the newer `ChatClient` API.
Additionally, it updates documentation and examples to reflect this
change and introduces minor improvements to the dependency injection
setup.
---
.../GettingStarted/Step1_Create_Kernel.cs | 8 +++----
.../GettingStarted/Step2_Add_Plugins.cs | 22 +++++++++++--------
.../GettingStarted/Step3_Yaml_Prompt.cs | 2 +-
.../Step4_Dependency_Injection.cs | 8 +++++--
.../GettingStarted/Step5_Chat_Prompt.cs | 2 +-
.../GettingStarted/Step6_Responsible_AI.cs | 2 +-
.../GettingStarted/Step7_Observability.cs | 2 +-
.../GettingStarted/Step8_Pipelining.cs | 2 +-
.../GettingStarted/Step9_OpenAPI_Plugins.cs | 12 ++++++----
9 files changed, 36 insertions(+), 24 deletions(-)
diff --git a/dotnet/samples/GettingStarted/Step1_Create_Kernel.cs b/dotnet/samples/GettingStarted/Step1_Create_Kernel.cs
index bcb704b6654d..132d2b830639 100644
--- a/dotnet/samples/GettingStarted/Step1_Create_Kernel.cs
+++ b/dotnet/samples/GettingStarted/Step1_Create_Kernel.cs
@@ -6,19 +6,19 @@
namespace GettingStarted;
///
-/// This example shows how to create and use a .
+/// This example shows how to create and use a with ChatClient.
///
public sealed class Step1_Create_Kernel(ITestOutputHelper output) : BaseTest(output)
{
///
- /// Show how to create a and use it to execute prompts.
+ /// Show how to create a using ChatClient and use it to execute prompts.
///
[Fact]
public async Task CreateKernel()
{
- // Create a kernel with OpenAI chat completion
+ // Create a kernel with OpenAI chat completion using ChatClient
Kernel kernel = Kernel.CreateBuilder()
- .AddOpenAIChatCompletion(
+ .AddOpenAIChatClient(
modelId: TestConfiguration.OpenAI.ChatModelId,
apiKey: TestConfiguration.OpenAI.ApiKey)
.Build();
diff --git a/dotnet/samples/GettingStarted/Step2_Add_Plugins.cs b/dotnet/samples/GettingStarted/Step2_Add_Plugins.cs
index 3f6b277fe5f3..5b233f734b2f 100644
--- a/dotnet/samples/GettingStarted/Step2_Add_Plugins.cs
+++ b/dotnet/samples/GettingStarted/Step2_Add_Plugins.cs
@@ -9,36 +9,40 @@
namespace GettingStarted;
///
-/// This example shows how to load a instances.
+/// This example shows how to load a instances with ChatClient.
///
public sealed class Step2_Add_Plugins(ITestOutputHelper output) : BaseTest(output)
{
///
- /// Shows different ways to load a instances.
+ /// Shows different ways to load a instances with ChatClient.
///
[Fact]
public async Task AddPlugins()
{
- // Create a kernel with OpenAI chat completion
+ // Create a kernel with ChatClient and plugins
IKernelBuilder kernelBuilder = Kernel.CreateBuilder();
- kernelBuilder.AddOpenAIChatCompletion(
- modelId: TestConfiguration.OpenAI.ChatModelId,
- apiKey: TestConfiguration.OpenAI.ApiKey);
+ kernelBuilder.AddOpenAIChatClient(
+ modelId: TestConfiguration.OpenAI.ChatModelId,
+ apiKey: TestConfiguration.OpenAI.ApiKey);
kernelBuilder.Plugins.AddFromType();
kernelBuilder.Plugins.AddFromType();
Kernel kernel = kernelBuilder.Build();
// Example 1. Invoke the kernel with a prompt that asks the AI for information it cannot provide and may hallucinate
+ Console.WriteLine("Example 1: Asking the AI for information it cannot provide:");
Console.WriteLine(await kernel.InvokePromptAsync("How many days until Christmas?"));
- // Example 2. Invoke the kernel with a templated prompt that invokes a plugin and display the result
+ // Example 2. Use kernel for templated prompts that invoke plugins directly
+ Console.WriteLine("\nExample 2: Using templated prompts that invoke plugins directly:");
Console.WriteLine(await kernel.InvokePromptAsync("The current time is {{TimeInformation.GetCurrentUtcTime}}. How many days until Christmas?"));
- // Example 3. Invoke the kernel with a prompt and allow the AI to automatically invoke functions
+ // Example 3. Use kernel with function calling for automatic plugin invocation
OpenAIPromptExecutionSettings settings = new() { FunctionChoiceBehavior = FunctionChoiceBehavior.Auto() };
+ Console.WriteLine("\nExample 3: Using function calling for automatic plugin invocation:");
Console.WriteLine(await kernel.InvokePromptAsync("How many days until Christmas? Explain your thinking.", new(settings)));
- // Example 4. Invoke the kernel with a prompt and allow the AI to automatically invoke functions that use enumerations
+ // Example 4. Use kernel with function calling for complex scenarios with enumerations
+ Console.WriteLine("\nExample 4: Using function calling for complex scenarios with enumerations:");
Console.WriteLine(await kernel.InvokePromptAsync("Create a handy lime colored widget for me.", new(settings)));
Console.WriteLine(await kernel.InvokePromptAsync("Create a beautiful scarlet colored widget for me.", new(settings)));
Console.WriteLine(await kernel.InvokePromptAsync("Create an attractive maroon and navy colored widget for me.", new(settings)));
diff --git a/dotnet/samples/GettingStarted/Step3_Yaml_Prompt.cs b/dotnet/samples/GettingStarted/Step3_Yaml_Prompt.cs
index 911933b0909c..2c8090e28beb 100644
--- a/dotnet/samples/GettingStarted/Step3_Yaml_Prompt.cs
+++ b/dotnet/samples/GettingStarted/Step3_Yaml_Prompt.cs
@@ -19,7 +19,7 @@ public async Task CreatePromptFromYaml()
{
// Create a kernel with OpenAI chat completion
Kernel kernel = Kernel.CreateBuilder()
- .AddOpenAIChatCompletion(
+ .AddOpenAIChatClient(
modelId: TestConfiguration.OpenAI.ChatModelId,
apiKey: TestConfiguration.OpenAI.ApiKey)
.Build();
diff --git a/dotnet/samples/GettingStarted/Step4_Dependency_Injection.cs b/dotnet/samples/GettingStarted/Step4_Dependency_Injection.cs
index 4ee22ba39261..11a819791342 100644
--- a/dotnet/samples/GettingStarted/Step4_Dependency_Injection.cs
+++ b/dotnet/samples/GettingStarted/Step4_Dependency_Injection.cs
@@ -38,7 +38,7 @@ public async Task GetKernelUsingDependencyInjection()
[Fact]
public async Task PluginUsingDependencyInjection()
{
- // If an application follows DI guidelines, the following line is unnecessary because DI will inject an instance of the KernelClient class to a class that references it.
+ // If an application follows DI guidelines, the following line is unnecessary because DI will inject an instance of the Kernel class to a class that references it.
// DI container guidelines - https://learn.microsoft.com/en-us/dotnet/core/extensions/dependency-injection-guidelines#recommendations
var serviceProvider = BuildServiceProvider();
var kernel = serviceProvider.GetRequiredService();
@@ -57,8 +57,12 @@ private ServiceProvider BuildServiceProvider()
collection.AddSingleton(new XunitLogger(this.Output));
collection.AddSingleton(new FakeUserService());
+ // Add ChatClient using OpenAI
+ collection.AddOpenAIChatClient(
+ modelId: TestConfiguration.OpenAI.ChatModelId,
+ apiKey: TestConfiguration.OpenAI.ApiKey);
+
var kernelBuilder = collection.AddKernel();
- kernelBuilder.Services.AddOpenAIChatCompletion(TestConfiguration.OpenAI.ChatModelId, TestConfiguration.OpenAI.ApiKey);
kernelBuilder.Plugins.AddFromType();
kernelBuilder.Plugins.AddFromType();
diff --git a/dotnet/samples/GettingStarted/Step5_Chat_Prompt.cs b/dotnet/samples/GettingStarted/Step5_Chat_Prompt.cs
index dc7eb4206592..b4dd6c951f20 100644
--- a/dotnet/samples/GettingStarted/Step5_Chat_Prompt.cs
+++ b/dotnet/samples/GettingStarted/Step5_Chat_Prompt.cs
@@ -14,7 +14,7 @@ public async Task InvokeChatPrompt()
{
// Create a kernel with OpenAI chat completion
Kernel kernel = Kernel.CreateBuilder()
- .AddOpenAIChatCompletion(
+ .AddOpenAIChatClient(
modelId: TestConfiguration.OpenAI.ChatModelId,
apiKey: TestConfiguration.OpenAI.ApiKey)
.Build();
diff --git a/dotnet/samples/GettingStarted/Step6_Responsible_AI.cs b/dotnet/samples/GettingStarted/Step6_Responsible_AI.cs
index 255e9d2bc619..836732abd6c6 100644
--- a/dotnet/samples/GettingStarted/Step6_Responsible_AI.cs
+++ b/dotnet/samples/GettingStarted/Step6_Responsible_AI.cs
@@ -15,7 +15,7 @@ public async Task AddPromptFilter()
{
// Create a kernel with OpenAI chat completion
var builder = Kernel.CreateBuilder()
- .AddOpenAIChatCompletion(
+ .AddOpenAIChatClient(
modelId: TestConfiguration.OpenAI.ChatModelId,
apiKey: TestConfiguration.OpenAI.ApiKey);
diff --git a/dotnet/samples/GettingStarted/Step7_Observability.cs b/dotnet/samples/GettingStarted/Step7_Observability.cs
index 1504097cbbf6..765f7051421e 100644
--- a/dotnet/samples/GettingStarted/Step7_Observability.cs
+++ b/dotnet/samples/GettingStarted/Step7_Observability.cs
@@ -17,7 +17,7 @@ public async Task ObservabilityWithFilters()
{
// Create a kernel with OpenAI chat completion
IKernelBuilder kernelBuilder = Kernel.CreateBuilder();
- kernelBuilder.AddOpenAIChatCompletion(
+ kernelBuilder.AddOpenAIChatClient(
modelId: TestConfiguration.OpenAI.ChatModelId,
apiKey: TestConfiguration.OpenAI.ApiKey);
diff --git a/dotnet/samples/GettingStarted/Step8_Pipelining.cs b/dotnet/samples/GettingStarted/Step8_Pipelining.cs
index 96f305c37a17..c136a21538e6 100644
--- a/dotnet/samples/GettingStarted/Step8_Pipelining.cs
+++ b/dotnet/samples/GettingStarted/Step8_Pipelining.cs
@@ -17,7 +17,7 @@ public sealed class Step8_Pipelining(ITestOutputHelper output) : BaseTest(output
public async Task CreateFunctionPipeline()
{
IKernelBuilder builder = Kernel.CreateBuilder();
- builder.AddOpenAIChatCompletion(
+ builder.AddOpenAIChatClient(
TestConfiguration.OpenAI.ChatModelId,
TestConfiguration.OpenAI.ApiKey);
builder.Services.AddLogging(c => c.AddConsole().SetMinimumLevel(LogLevel.Trace));
diff --git a/dotnet/samples/GettingStarted/Step9_OpenAPI_Plugins.cs b/dotnet/samples/GettingStarted/Step9_OpenAPI_Plugins.cs
index 5bff73bab0ca..15d500af582a 100644
--- a/dotnet/samples/GettingStarted/Step9_OpenAPI_Plugins.cs
+++ b/dotnet/samples/GettingStarted/Step9_OpenAPI_Plugins.cs
@@ -19,7 +19,7 @@ public async Task AddOpenAPIPlugins()
{
// Create a kernel with OpenAI chat completion
IKernelBuilder kernelBuilder = Kernel.CreateBuilder();
- kernelBuilder.AddOpenAIChatCompletion(
+ kernelBuilder.AddOpenAIChatClient(
modelId: TestConfiguration.OpenAI.ChatModelId,
apiKey: TestConfiguration.OpenAI.ApiKey);
Kernel kernel = kernelBuilder.Build();
@@ -33,12 +33,12 @@ public async Task AddOpenAPIPlugins()
}
///
- /// Shows how to transform an Open API instance to support dependency injection.
+ /// Shows how to transform an Open API instance to support dependency injection with ChatClient.
///
[Fact]
public async Task TransformOpenAPIPlugins()
{
- // Create a kernel with OpenAI chat completion
+ // Create a kernel with ChatClient and dependency injection
var serviceProvider = BuildServiceProvider();
var kernel = serviceProvider.GetRequiredService();
@@ -61,8 +61,12 @@ private ServiceProvider BuildServiceProvider()
var collection = new ServiceCollection();
collection.AddSingleton(new FakeMechanicService());
+ // Add ChatClient using OpenAI
+ collection.AddOpenAIChatClient(
+ modelId: TestConfiguration.OpenAI.ChatModelId,
+ apiKey: TestConfiguration.OpenAI.ApiKey);
+
var kernelBuilder = collection.AddKernel();
- kernelBuilder.Services.AddOpenAIChatCompletion(TestConfiguration.OpenAI.ChatModelId, TestConfiguration.OpenAI.ApiKey);
return collection.BuildServiceProvider();
}
From d2e6e3c4f16b80e28fc62301446c099c2c949bb0 Mon Sep 17 00:00:00 2001
From: Ben Thomas
Date: Fri, 18 Jul 2025 05:01:57 -0700
Subject: [PATCH 09/10] Removing Java related extensions and settings. (#11290)
Removing unused extensions and VS Code settings.
### Contribution Checklist
- [ ] The code builds clean without any errors or warnings
- [ ] The PR follows the [SK Contribution
Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md)
and the [pre-submission formatting
script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts)
raises no violations
- [ ] All unit tests pass, and I have added new tests where possible
- [ ] I didn't break anyone :smile:
Co-authored-by: Ben Thomas
Co-authored-by: Roger Barreto <19890735+rogerbarreto@users.noreply.github.com>
---
.vscode/extensions.json | 1 -
.vscode/settings.json | 10 ----------
2 files changed, 11 deletions(-)
diff --git a/.vscode/extensions.json b/.vscode/extensions.json
index 318937cc4ef7..11b26b34101f 100644
--- a/.vscode/extensions.json
+++ b/.vscode/extensions.json
@@ -8,7 +8,6 @@
"esbenp.prettier-vscode",
"dbaeumer.vscode-eslint",
"ms-semantic-kernel.semantic-kernel",
- "emeraldwalk.RunOnSave",
"ms-java.vscode-java-pack",
"ms-azuretools.vscode-dapr"
]
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 8be83c425f2f..f1cc14ec42ad 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -82,16 +82,6 @@
"source.fixAll": "never"
}
},
- "emeraldwalk.runonsave": {
- "commands": [
- {
- "match": "\\.java$",
- "cmd": "java -Xmx128m -jar ${workspaceFolder}/java/utilities/google-java-format-1.17.0-all-deps.jar --replace --aosp ${file}"
- }
- ]
- },
- "java.debug.settings.onBuildFailureProceed": true,
- "java.compile.nullAnalysis.mode": "disabled",
"dotnet.defaultSolution": "dotnet\\SK-dotnet.sln",
"python.testing.pytestArgs": ["python/tests"],
"python.testing.unittestEnabled": false,
From 79d3dde556e4cdc482d83c9f5f0a459c5cc79a48 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 18 Jul 2025 13:04:46 +0100
Subject: [PATCH 10/10] .Net: Bump danielpalme/ReportGenerator-GitHub-Action
from 5.4.7 to 5.4.8 (#12497)
Bumps
[danielpalme/ReportGenerator-GitHub-Action](https://github.com/danielpalme/reportgenerator-github-action)
from 5.4.7 to 5.4.8.
Release notes
Sourced from danielpalme/ReportGenerator-GitHub-Action's
releases.
5.4.8
- #737
Improved lcov support (take FNDA elements into account to determine
whether a code element has been covered)
- #741
Charts does not render "Full method coverage" elements if
coverage information is not available
- Added new setting "applyMaximumGroupingLevel". This allows
to apply the maximum grouping level instead of the default 'By assembly'
grouping in HTML reports.
Commits
[](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/dotnet-build-and-test.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/dotnet-build-and-test.yml b/.github/workflows/dotnet-build-and-test.yml
index 60e2b1887b3b..f6c4255da8f8 100644
--- a/.github/workflows/dotnet-build-and-test.yml
+++ b/.github/workflows/dotnet-build-and-test.yml
@@ -184,7 +184,7 @@ jobs:
# Generate test reports and check coverage
- name: Generate test reports
- uses: danielpalme/ReportGenerator-GitHub-Action@5.4.7
+ uses: danielpalme/ReportGenerator-GitHub-Action@5.4.8
with:
reports: "./TestResults/Coverage/**/coverage.cobertura.xml"
targetdir: "./TestResults/Reports"
pFad - Phonifier reborn
Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.
Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.
Alternative Proxies:
Alternative Proxy
pFad Proxy
pFad v3 Proxy
pFad v4 Proxy