Skip to content

Commit dcfaeaa

Browse files
committed
Switch base endpoint to github.ai
1 parent a8fb712 commit dcfaeaa

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

49 files changed

+195
-412
lines changed

cookbooks/python/langchain/lc_openai_getting_started.ipynb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,9 @@
5050
" raise ValueError(\"GITHUB_TOKEN is not set\")\n",
5151
"\n",
5252
"os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"GITHUB_TOKEN\")\n",
53-
"os.environ[\"OPENAI_BASE_URL\"] = \"https://models.inference.ai.azure.com/\"\n",
53+
"os.environ[\"OPENAI_BASE_URL\"] = \"https://models.github.ai/inference\"\n",
5454
"\n",
55-
"GPT_MODEL = \"gpt-4o-mini\"\n",
55+
"GPT_MODEL = \"openai/gpt-4o-mini\"\n",
5656
"\n",
5757
"llm = ChatOpenAI(model=GPT_MODEL)"
5858
]
@@ -359,7 +359,7 @@
359359
],
360360
"metadata": {
361361
"kernelspec": {
362-
"display_name": "gh-cookbook",
362+
"display_name": "Python 3",
363363
"language": "python",
364364
"name": "python3"
365365
},

cookbooks/python/llamaindex/rag_getting_started.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@
244244
],
245245
"metadata": {
246246
"kernelspec": {
247-
"display_name": "gh-cookbook",
247+
"display_name": "Python 3",
248248
"language": "python",
249249
"name": "python3"
250250
},

cookbooks/python/mistralai/evaluation.ipynb

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,8 @@
3333
"github_token = os.environ[\"GITHUB_TOKEN\"]\n",
3434
"\n",
3535
"# We can use some defaults for the other two variables\n",
36-
"endpoint = \"https://models.inference.ai.azure.com\"\n",
37-
"model_name = \"mistral-large\""
36+
"endpoint = \"https://models.github.ai/inference\"\n",
37+
"model_name = \"mistral-ai/mistral-large\""
3838
]
3939
},
4040
{
@@ -100,7 +100,7 @@
100100
"from mistralai.models.chat_completion import ChatMessage\n",
101101
"\n",
102102
"\n",
103-
"def run_mistral(user_message, model=\"mistral-small\"):\n",
103+
"def run_mistral(user_message, model=\"mistral-ai/mistral-small\"):\n",
104104
" client = MistralClient(api_key=github_token, endpoint=endpoint)\n",
105105
" messages = [ChatMessage(role=\"user\", content=user_message)]\n",
106106
" chat_response = client.chat(\n",
@@ -225,7 +225,7 @@
225225
"from mistralai.models.chat_completion import ChatMessage\n",
226226
"\n",
227227
"\n",
228-
"def run_mistral(user_message, model=\"mistral-small\"):\n",
228+
"def run_mistral(user_message, model=\"mistral-ai/mistral-small\"):\n",
229229
" client = MistralClient(api_key=github_token, endpoint=endpoint)\n",
230230
" messages = [ChatMessage(role=\"user\", content=user_message)]\n",
231231
" chat_response = client.chat(model=model, messages=messages)\n",
@@ -379,7 +379,7 @@
379379
"from mistralai.models.chat_completion import ChatMessage\n",
380380
"\n",
381381
"\n",
382-
"def run_mistral(user_message, model=\"mistral-small\", is_json=False):\n",
382+
"def run_mistral(user_message, model=\"mistral-ai/mistral-small\", is_json=False):\n",
383383
" client = MistralClient(api_key=github_token, endpoint=endpoint)\n",
384384
" messages = [ChatMessage(role=\"user\", content=user_message)]\n",
385385
"\n",
@@ -502,7 +502,7 @@
502502
" scoring_prompt.format(\n",
503503
" news=news, summary=summary, metric=i[\"metric\"], rubrics=i[\"rubrics\"]\n",
504504
" ),\n",
505-
" model=\"mistral-small\",\n",
505+
" model=\"mistral-ai/mistral-small\",\n",
506506
" is_json=True,\n",
507507
" )\n",
508508
" print(eval_output)"
@@ -511,7 +511,7 @@
511511
],
512512
"metadata": {
513513
"kernelspec": {
514-
"display_name": "Python 3 (ipykernel)",
514+
"display_name": "Python 3",
515515
"language": "python",
516516
"name": "python3"
517517
},

cookbooks/python/mistralai/function_calling.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,8 @@
4848
"github_token = os.environ[\"GITHUB_TOKEN\"]\n",
4949
"\n",
5050
"# We can use some defaults for the other two variables\n",
51-
"endpoint = \"https://models.inference.ai.azure.com\"\n",
52-
"model_name = \"mistral-large\"\n",
51+
"endpoint = \"https://models.github.ai/inference\"\n",
52+
"model_name = \"mistral-ai/mistral-small\"\n",
5353
"\n",
5454
"\n",
5555
"# Assuming we have the following data\n",
@@ -216,7 +216,7 @@
216216
"source": [
217217
"from mistralai.client import MistralClient\n",
218218
"\n",
219-
"model = \"mistral-large\"\n",
219+
"model = \"mistral-ai/mistral-small\"\n",
220220
"\n",
221221
"client = MistralClient(api_key=github_token, endpoint=endpoint)\n",
222222
"\n",
@@ -335,7 +335,7 @@
335335
],
336336
"metadata": {
337337
"kernelspec": {
338-
"display_name": "Python 3 (ipykernel)",
338+
"display_name": "Python 3",
339339
"language": "python",
340340
"name": "python3"
341341
},

0 commit comments

Comments
 (0)
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy