Content-Length: 394677 | pFad | http://github.com/NielsRogge/huggingface.js/commit/376ef044e37b722dc586a404651c2b9bc85f7819

CA Merge remote-tracking branch 'upstream/main' into add_t5_tts · NielsRogge/huggingface.js@376ef04 · GitHub
Skip to content

Commit 376ef04

Browse files
committed
Merge remote-tracking branch 'upstream/main' into add_t5_tts
2 parents 2d5ef28 + 0b05b68 commit 376ef04

File tree

2 files changed

+44
-3
lines changed

2 files changed

+44
-3
lines changed

.github/workflows/lint.yml

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,15 @@ jobs:
1818
- name: "Extracting the merge base into 'SINCE'"
1919
id: since
2020
run: |
21-
if [ -z "${{ github.event.pull_request.head.ref }}" ]
21+
if [ -z $PR_REF ]
2222
then
23-
echo "SINCE=${{ github.sha }}^1" >> $GITHUB_OUTPUT
23+
echo "SINCE=$SHA^1" >> $GITHUB_OUTPUT
2424
else
25-
echo "SINCE=$(git merge-base origen/${{ github.event.pull_request.base.ref }} ${{ github.sha }})" >> $GITHUB_OUTPUT
25+
echo "SINCE=$(git merge-base origen/$PR_REF $SHA)" >> $GITHUB_OUTPUT
2626
fi
27+
env:
28+
PR_REF: ${{ github.event.pull_request.head.ref }}
29+
SHA: ${{ github.sha }}
2730

2831
- run: corepack enable
2932

packages/tasks/src/local-apps.ts

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,9 @@ function isMarlinModel(model: ModelData): boolean {
7777
function isTransformersModel(model: ModelData): boolean {
7878
return model.tags.includes("transformers");
7979
}
80+
function isTgiModel(model: ModelData): boolean {
81+
return model.tags.includes("text-generation-inference");
82+
}
8083

8184
function isLlamaCppGgufModel(model: ModelData) {
8285
return !!model.gguf?.context_length;
@@ -184,6 +187,34 @@ const snippetVllm = (model: ModelData): LocalAppSnippet[] => {
184187
},
185188
];
186189
};
190+
const snippetTgi = (model: ModelData): LocalAppSnippet[] => {
191+
const runCommand = [
192+
"# Call the server using curl:",
193+
`curl -X POST "http://localhost:8000/v1/chat/completions" \\`,
194+
` -H "Content-Type: application/json" \\`,
195+
` --data '{`,
196+
` "model": "${model.id}",`,
197+
` "messages": [`,
198+
` {"role": "user", "content": "What is the capital of France?"}`,
199+
` ]`,
200+
` }'`,
201+
];
202+
return [
203+
{
204+
title: "Use Docker images",
205+
setup: [
206+
"# Deploy with docker on Linux:",
207+
`docker run --gpus all \\`,
208+
` -v ~/.cache/huggingface:/root/.cache/huggingface \\`,
209+
` -e HF_TOKEN="<secret>" \\`,
210+
` -p 8000:80 \\`,
211+
` ghcr.io/huggingface/text-generation-inference:latest \\`,
212+
` --model-id ${model.id}`,
213+
].join("\n"),
214+
content: [runCommand.join("\n")],
215+
},
216+
];
217+
};
187218

188219
/**
189220
* Add your new local app here.
@@ -218,6 +249,13 @@ export const LOCAL_APPS = {
218249
(model.pipeline_tag === "text-generation" || model.pipeline_tag === "image-text-to-text"),
219250
snippet: snippetVllm,
220251
},
252+
tgi: {
253+
prettyLabel: "TGI",
254+
docsUrl: "https://huggingface.co/docs/text-generation-inference/",
255+
mainTask: "text-generation",
256+
displayOnModelPage: isTgiModel,
257+
snippet: snippetTgi,
258+
},
221259
lmstudio: {
222260
prettyLabel: "LM Studio",
223261
docsUrl: "https://lmstudio.ai",

0 commit comments

Comments
 (0)








ApplySandwichStrip

pFad - (p)hone/(F)rame/(a)nonymizer/(d)eclutterfier!      Saves Data!


--- a PPN by Garber Painting Akron. With Image Size Reduction included!

Fetched URL: http://github.com/NielsRogge/huggingface.js/commit/376ef044e37b722dc586a404651c2b9bc85f7819

Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy