Skip to content

Commit 893eac0

Browse files
Add basic/multi-turn/streaming examples
1 parent a37737c commit 893eac0

File tree

9 files changed

+244
-0
lines changed

9 files changed

+244
-0
lines changed

samples/curl/basic.sh

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
curl -X POST "https://models.inference.ai.azure.com/chat/completions" \
2+
-H "Content-Type: application/json" \
3+
-H "Authorization: Bearer $GITHUB_TOKEN" \
4+
-d '{
5+
"messages": [
6+
{
7+
"role": "system",
8+
"content": "You are a helpful assistant."
9+
},
10+
{
11+
"role": "user",
12+
"content": "What is the capital of France?"
13+
}
14+
],
15+
"model": "gpt-4o"
16+
}'

samples/curl/multi_turn.sh

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
curl -X POST "https://models.inference.ai.azure.com/chat/completions" \
2+
-H "Content-Type: application/json" \
3+
-H "Authorization: Bearer $GITHUB_TOKEN" \
4+
-d '{
5+
"messages": [
6+
{
7+
"role": "system",
8+
"content": "You are a helpful assistant."
9+
},
10+
{
11+
"role": "user",
12+
"content": "What is the capital of France?"
13+
},
14+
{
15+
"role": "assistant",
16+
"content": "The capital of France is Paris."
17+
},
18+
{
19+
"role": "user",
20+
"content": "What about Spain?"
21+
}
22+
],
23+
"model": "gpt-4o"
24+
}'

samples/curl/streaming.sh

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
curl -X POST "https://models.inference.ai.azure.com/chat/completions" \
2+
-H "Content-Type: application/json" \
3+
-H "Authorization: Bearer $GITHUB_TOKEN" \
4+
-d '{
5+
"messages": [
6+
{
7+
"role": "system",
8+
"content": "You are a helpful assistant."
9+
},
10+
{
11+
"role": "user",
12+
"content": "Give me 5 good reasons why I should exercise every day."
13+
}
14+
],
15+
"stream": true,
16+
"model": "gpt-4o"
17+
}'

samples/js/basic.js

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
import ModelClient from "@azure-rest/ai-inference";
2+
import { AzureKeyCredential } from "@azure/core-auth";
3+
4+
const token = process.env["GITHUB_TOKEN"];
5+
const endpoint = "https://models.inference.ai.azure.com";
6+
const modelName = "gpt-4o";
7+
8+
export async function main() {
9+
10+
const client = new ModelClient(endpoint, new AzureKeyCredential(token));
11+
12+
const response = await client.path("/chat/completions").post({
13+
body: {
14+
messages: [
15+
{ role:"system", content: "You are a helpful assistant." },
16+
{ role:"user", content: "What is the capital of France?" }
17+
],
18+
model: modelName
19+
}
20+
});
21+
22+
if (response.status !== "200") {
23+
throw response.body.error;
24+
}
25+
console.log(response.body.choices[0].message.content);
26+
}
27+
28+
main().catch((err) => {
29+
console.error("The sample encountered an error:", err);
30+
});

samples/js/multi_turn.js

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
import ModelClient from "@azure-rest/ai-inference";
2+
import { AzureKeyCredential } from "@azure/core-auth";
3+
4+
const token = process.env["GITHUB_TOKEN"];
5+
const endpoint = "https://models.inference.ai.azure.com";
6+
const modelName = "gpt-4o";
7+
8+
export async function main() {
9+
10+
const client = new ModelClient(endpoint, new AzureKeyCredential(token));
11+
12+
const response = await client.path("/chat/completions").post({
13+
body: {
14+
messages: [
15+
{ role: "system", content: "You are a helpful assistant." },
16+
{ role: "user", content: "What is the capital of France?" },
17+
{ role: "assistant", content: "The capital of France is Paris." },
18+
{ role: "user", content: "What about Spain?" },
19+
],
20+
model: modelName,
21+
}
22+
});
23+
24+
if (response.status !== "200") {
25+
throw response.body.error;
26+
}
27+
28+
for (const choice of response.body.choices) {
29+
console.log(choice.message.content);
30+
}
31+
}
32+
33+
main().catch((err) => {
34+
console.error("The sample encountered an error:", err);
35+
});

samples/js/streaming.js

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
import ModelClient from "@azure-rest/ai-inference";
2+
import { AzureKeyCredential } from "@azure/core-auth";
3+
import { createSseStream } from "@azure/core-sse";
4+
5+
const token = process.env["GITHUB_TOKEN"];
6+
const endpoint = "https://models.inference.ai.azure.com";
7+
const modelName = "gpt-4o";
8+
9+
export async function main() {
10+
11+
const client = new ModelClient(endpoint, new AzureKeyCredential(token));
12+
13+
const response = await client.path("/chat/completions").post({
14+
body: {
15+
messages: [
16+
{ role: "system", content: "You are a helpful assistant." },
17+
{ role: "user", content: "Give me 5 good reasons why I should exercise every day." },
18+
],
19+
model: modelName,
20+
stream: true
21+
}
22+
}).asNodeStream();
23+
24+
const stream = response.body;
25+
if (!stream) {
26+
throw new Error("The response stream is undefined");
27+
}
28+
29+
if (response.status !== "200") {
30+
throw new Error(`Failed to get chat completions: ${response.body.error}`);
31+
}
32+
33+
const sseStream = createSseStream(stream);
34+
35+
for await (const event of sseStream) {
36+
if (event.data === "[DONE]") {
37+
return;
38+
}
39+
for (const choice of (JSON.parse(event.data)).choices) {
40+
process.stdout.write(choice.delta?.content ?? ``);
41+
}
42+
}
43+
}
44+
45+
main().catch((err) => {
46+
console.error("The sample encountered an error:", err);
47+
});

samples/python/basic.py

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
import os
2+
from azure.ai.inference import ChatCompletionsClient
3+
from azure.ai.inference.models import SystemMessage, UserMessage
4+
from azure.core.credentials import AzureKeyCredential
5+
6+
endpoint = "https://models.inference.ai.azure.com"
7+
model_name = "gpt-4o"
8+
token = os.environ["GITHUB_TOKEN"]
9+
10+
client = ChatCompletionsClient(
11+
endpoint=endpoint,
12+
credential=AzureKeyCredential(token),
13+
)
14+
15+
response = client.complete(
16+
messages=[
17+
SystemMessage(content="You are a helpful assistant."),
18+
UserMessage(content="What is the capital of France?"),
19+
],
20+
model=model_name,
21+
)
22+
23+
print(response.choices[0].message.content)

samples/python/multi_turn.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
import os
2+
from azure.ai.inference import ChatCompletionsClient
3+
from azure.ai.inference.models import AssistantMessage, SystemMessage, UserMessage
4+
from azure.core.credentials import AzureKeyCredential
5+
6+
token = os.environ["GITHUB_TOKEN"]
7+
endpoint = "https://models.inference.ai.azure.com"
8+
model_name = "gpt-4o"
9+
10+
client = ChatCompletionsClient(
11+
endpoint=endpoint,
12+
credential=AzureKeyCredential(token),
13+
)
14+
15+
messages = [
16+
SystemMessage(content="You are a helpful assistant."),
17+
UserMessage(content="What is the capital of France?"),
18+
AssistantMessage(content="The capital of France is Paris."),
19+
UserMessage(content="What about Spain?"),
20+
]
21+
22+
response = client.complete(messages=messages, model=model_name)
23+
24+
print(response.choices[0].message.content)

samples/python/streaming.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
import os
2+
from azure.ai.inference import ChatCompletionsClient
3+
from azure.ai.inference.models import SystemMessage, UserMessage
4+
from azure.core.credentials import AzureKeyCredential
5+
6+
token = os.environ["GITHUB_TOKEN"]
7+
endpoint = "https://models.inference.ai.azure.com"
8+
model_name = "gpt-4o"
9+
10+
client = ChatCompletionsClient(
11+
endpoint=endpoint,
12+
credential=AzureKeyCredential(token),
13+
)
14+
15+
response = client.complete(
16+
stream=True,
17+
messages=[
18+
SystemMessage(content="You are a helpful assistant."),
19+
UserMessage(content="Give me 5 good reasons why I should exercise every day."),
20+
],
21+
model=model_name,
22+
)
23+
24+
for update in response:
25+
if update.choices:
26+
print(update.choices[0].delta.content or "", end="")
27+
28+
client.close()

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy