snippets "showToken" feature
Browse files
src/lib/components/InferencePlayground/InferencePlayground.svelte
CHANGED
@@ -196,6 +196,7 @@
|
|
196 |
{conversation}
|
197 |
index={0}
|
198 |
{viewCode}
|
|
|
199 |
on:addMessage={addMessage}
|
200 |
on:deleteMessage={e => deleteMessage(e.detail)}
|
201 |
/>
|
|
|
196 |
{conversation}
|
197 |
index={0}
|
198 |
{viewCode}
|
199 |
+
{hfToken}
|
200 |
on:addMessage={addMessage}
|
201 |
on:deleteMessage={e => deleteMessage(e.detail)}
|
202 |
/>
|
src/lib/components/InferencePlayground/InferencePlaygroundCodeSnippets.svelte
CHANGED
@@ -15,6 +15,7 @@
|
|
15 |
hljs.registerLanguage("http", http);
|
16 |
|
17 |
export let conversation: Conversation;
|
|
|
18 |
|
19 |
const lanuages = ["javascript", "python", "http"];
|
20 |
type Language = (typeof lanuages)[number];
|
@@ -28,6 +29,7 @@
|
|
28 |
label: string;
|
29 |
code: string;
|
30 |
language?: Language;
|
|
|
31 |
}
|
32 |
|
33 |
interface MessagesJoiner {
|
@@ -36,14 +38,24 @@
|
|
36 |
end: string;
|
37 |
}
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
$: snippetsByLanguage = {
|
40 |
-
javascript: getJavascriptSnippets(conversation),
|
41 |
-
python: getPythonSnippets(conversation),
|
42 |
-
http: getHttpSnippets(conversation),
|
43 |
};
|
44 |
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
47 |
|
48 |
function getMessages() {
|
49 |
const placeholder = [{ role: "user", content: "Tell me a story" }];
|
@@ -70,7 +82,7 @@
|
|
70 |
return hljs.highlight(code, { language }).value;
|
71 |
}
|
72 |
|
73 |
-
function getJavascriptSnippets(conversation: Conversation) {
|
74 |
const formattedMessages = ({ sep, start, end }: MessagesJoiner) =>
|
75 |
start +
|
76 |
getMessages()
|
@@ -94,9 +106,10 @@
|
|
94 |
if (conversation.streaming) {
|
95 |
snippets.push({
|
96 |
label: "Streaming API",
|
|
|
97 |
code: `import { HfInference } from "@huggingface/inference"
|
98 |
|
99 |
-
const inference = new HfInference("
|
100 |
|
101 |
let out = "";
|
102 |
|
@@ -116,9 +129,10 @@ for await (const chunk of inference.chatCompletionStream({
|
|
116 |
// non-streaming
|
117 |
snippets.push({
|
118 |
label: "Non-Streaming API",
|
|
|
119 |
code: `import { HfInference } from '@huggingface/inference'
|
120 |
|
121 |
-
const inference = new HfInference("
|
122 |
|
123 |
const out = await inference.chatCompletion({
|
124 |
model: "${conversation.model.id}",
|
@@ -133,7 +147,7 @@ console.log(out.choices[0].message);`,
|
|
133 |
return snippets;
|
134 |
}
|
135 |
|
136 |
-
function getPythonSnippets(conversation: Conversation) {
|
137 |
const formattedMessages = ({ sep, start, end }: MessagesJoiner) =>
|
138 |
start +
|
139 |
getMessages()
|
@@ -157,9 +171,10 @@ console.log(out.choices[0].message);`,
|
|
157 |
if (conversation.streaming) {
|
158 |
snippets.push({
|
159 |
label: "Streaming API",
|
|
|
160 |
code: `from huggingface_hub import InferenceClient
|
161 |
|
162 |
-
client = InferenceClient(api_key="
|
163 |
|
164 |
messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
|
165 |
|
@@ -177,10 +192,11 @@ for chunk in output:
|
|
177 |
// non-streaming
|
178 |
snippets.push({
|
179 |
label: "Non-Streaming API",
|
|
|
180 |
code: `from huggingface_hub import InferenceClient
|
181 |
|
182 |
model_id="${conversation.model.id}"
|
183 |
-
client = InferenceClient(api_key="
|
184 |
|
185 |
messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
|
186 |
|
@@ -197,7 +213,10 @@ print(output.choices[0].message)`,
|
|
197 |
return snippets;
|
198 |
}
|
199 |
|
200 |
-
function getHttpSnippets(conversation: Conversation) {
|
|
|
|
|
|
|
201 |
const formattedMessages = ({ sep, start, end }: MessagesJoiner) =>
|
202 |
start +
|
203 |
getMessages()
|
@@ -217,8 +236,9 @@ print(output.choices[0].message)`,
|
|
217 |
if (conversation.streaming) {
|
218 |
snippets.push({
|
219 |
label: "Streaming API",
|
|
|
220 |
code: `curl 'https://api-inference.huggingface.co/models/${conversation.model.id}/v1/chat/completions' \\
|
221 |
-
--header "Authorization: Bearer {
|
222 |
--header 'Content-Type: application/json' \\
|
223 |
--data '{
|
224 |
"model": "${conversation.model.id}",
|
@@ -231,8 +251,9 @@ print(output.choices[0].message)`,
|
|
231 |
// non-streaming
|
232 |
snippets.push({
|
233 |
label: "Non-Streaming API",
|
|
|
234 |
code: `curl 'https://api-inference.huggingface.co/models/${conversation.model.id}/v1/chat/completions' \\
|
235 |
-
--header "Authorization: Bearer {
|
236 |
--header 'Content-Type: application/json' \\
|
237 |
--data '{
|
238 |
"model": "${conversation.model.id}",
|
@@ -271,9 +292,16 @@ print(output.choices[0].message)`,
|
|
271 |
</ul>
|
272 |
</div>
|
273 |
|
274 |
-
{#each snippetsByLanguage[selectedLanguage] as { label, code, language }}
|
275 |
<div class="flex items-center justify-between px-2 md:px-4 pb-4 pt-6">
|
276 |
<h2 class="font-semibold">{label}</h2>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
277 |
<button
|
278 |
class="flex items-center gap-x-1.5 rounded-md bg-gray-200 px-1.5 py-0.5 text-sm transition dark:bg-gray-950/80"
|
279 |
on:click={e => {
|
@@ -291,6 +319,7 @@ print(output.choices[0].message)`,
|
|
291 |
<IconCopyCode /> Copy code
|
292 |
</button>
|
293 |
</div>
|
|
|
294 |
<pre
|
295 |
class="overflow-x-auto rounded-lg border border-gray-200/80 bg-white px-4 py-6 text-sm shadow-sm dark:border-gray-800 dark:bg-gray-800/50">{@html highlight(
|
296 |
code,
|
|
|
15 |
hljs.registerLanguage("http", http);
|
16 |
|
17 |
export let conversation: Conversation;
|
18 |
+
export let hfToken: string;
|
19 |
|
20 |
const lanuages = ["javascript", "python", "http"];
|
21 |
type Language = (typeof lanuages)[number];
|
|
|
29 |
label: string;
|
30 |
code: string;
|
31 |
language?: Language;
|
32 |
+
needsToken?: boolean;
|
33 |
}
|
34 |
|
35 |
interface MessagesJoiner {
|
|
|
38 |
end: string;
|
39 |
}
|
40 |
|
41 |
+
let selectedLanguage: Language = "javascript";
|
42 |
+
let timeout: ReturnType<typeof setTimeout>;
|
43 |
+
let showToken = false;
|
44 |
+
|
45 |
+
$: tokenStr = getTokenStr(showToken);
|
46 |
+
|
47 |
$: snippetsByLanguage = {
|
48 |
+
javascript: getJavascriptSnippets(conversation, tokenStr),
|
49 |
+
python: getPythonSnippets(conversation, tokenStr),
|
50 |
+
http: getHttpSnippets(conversation, tokenStr),
|
51 |
};
|
52 |
|
53 |
+
function getTokenStr(showToken: boolean){
|
54 |
+
if(hfToken && showToken){
|
55 |
+
return hfToken;
|
56 |
+
}
|
57 |
+
return "YOUR_HF_TOKEN";
|
58 |
+
}
|
59 |
|
60 |
function getMessages() {
|
61 |
const placeholder = [{ role: "user", content: "Tell me a story" }];
|
|
|
82 |
return hljs.highlight(code, { language }).value;
|
83 |
}
|
84 |
|
85 |
+
function getJavascriptSnippets(conversation: Conversation, tokenStr: string) {
|
86 |
const formattedMessages = ({ sep, start, end }: MessagesJoiner) =>
|
87 |
start +
|
88 |
getMessages()
|
|
|
106 |
if (conversation.streaming) {
|
107 |
snippets.push({
|
108 |
label: "Streaming API",
|
109 |
+
needsToken: true,
|
110 |
code: `import { HfInference } from "@huggingface/inference"
|
111 |
|
112 |
+
const inference = new HfInference("${tokenStr}")
|
113 |
|
114 |
let out = "";
|
115 |
|
|
|
129 |
// non-streaming
|
130 |
snippets.push({
|
131 |
label: "Non-Streaming API",
|
132 |
+
needsToken: true,
|
133 |
code: `import { HfInference } from '@huggingface/inference'
|
134 |
|
135 |
+
const inference = new HfInference("${tokenStr}")
|
136 |
|
137 |
const out = await inference.chatCompletion({
|
138 |
model: "${conversation.model.id}",
|
|
|
147 |
return snippets;
|
148 |
}
|
149 |
|
150 |
+
function getPythonSnippets(conversation: Conversation, tokenStr: string) {
|
151 |
const formattedMessages = ({ sep, start, end }: MessagesJoiner) =>
|
152 |
start +
|
153 |
getMessages()
|
|
|
171 |
if (conversation.streaming) {
|
172 |
snippets.push({
|
173 |
label: "Streaming API",
|
174 |
+
needsToken: true,
|
175 |
code: `from huggingface_hub import InferenceClient
|
176 |
|
177 |
+
client = InferenceClient(api_key="${tokenStr}")
|
178 |
|
179 |
messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
|
180 |
|
|
|
192 |
// non-streaming
|
193 |
snippets.push({
|
194 |
label: "Non-Streaming API",
|
195 |
+
needsToken: true,
|
196 |
code: `from huggingface_hub import InferenceClient
|
197 |
|
198 |
model_id="${conversation.model.id}"
|
199 |
+
client = InferenceClient(api_key="${tokenStr}")
|
200 |
|
201 |
messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
|
202 |
|
|
|
213 |
return snippets;
|
214 |
}
|
215 |
|
216 |
+
function getHttpSnippets(conversation: Conversation, tokenStr: string) {
|
217 |
+
if(tokenStr === "YOUR_HF_TOKEN"){
|
218 |
+
tokenStr = "{YOUR_HF_TOKEN}";
|
219 |
+
}
|
220 |
const formattedMessages = ({ sep, start, end }: MessagesJoiner) =>
|
221 |
start +
|
222 |
getMessages()
|
|
|
236 |
if (conversation.streaming) {
|
237 |
snippets.push({
|
238 |
label: "Streaming API",
|
239 |
+
needsToken: true,
|
240 |
code: `curl 'https://api-inference.huggingface.co/models/${conversation.model.id}/v1/chat/completions' \\
|
241 |
+
--header "Authorization: Bearer ${tokenStr}" \\
|
242 |
--header 'Content-Type: application/json' \\
|
243 |
--data '{
|
244 |
"model": "${conversation.model.id}",
|
|
|
251 |
// non-streaming
|
252 |
snippets.push({
|
253 |
label: "Non-Streaming API",
|
254 |
+
needsToken: true,
|
255 |
code: `curl 'https://api-inference.huggingface.co/models/${conversation.model.id}/v1/chat/completions' \\
|
256 |
+
--header "Authorization: Bearer ${tokenStr}" \\
|
257 |
--header 'Content-Type: application/json' \\
|
258 |
--data '{
|
259 |
"model": "${conversation.model.id}",
|
|
|
292 |
</ul>
|
293 |
</div>
|
294 |
|
295 |
+
{#each snippetsByLanguage[selectedLanguage] as { label, code, language, needsToken }}
|
296 |
<div class="flex items-center justify-between px-2 md:px-4 pb-4 pt-6">
|
297 |
<h2 class="font-semibold">{label}</h2>
|
298 |
+
<div class="flex items-center gap-x-4">
|
299 |
+
{#if needsToken && hfToken}
|
300 |
+
<label class="flex items-center gap-x-1.5">
|
301 |
+
<input type="checkbox" bind:checked={showToken}>
|
302 |
+
<p class="leading-none">show token</p>
|
303 |
+
</label>
|
304 |
+
{/if}
|
305 |
<button
|
306 |
class="flex items-center gap-x-1.5 rounded-md bg-gray-200 px-1.5 py-0.5 text-sm transition dark:bg-gray-950/80"
|
307 |
on:click={e => {
|
|
|
319 |
<IconCopyCode /> Copy code
|
320 |
</button>
|
321 |
</div>
|
322 |
+
</div>
|
323 |
<pre
|
324 |
class="overflow-x-auto rounded-lg border border-gray-200/80 bg-white px-4 py-6 text-sm shadow-sm dark:border-gray-800 dark:bg-gray-800/50">{@html highlight(
|
325 |
code,
|
src/lib/components/InferencePlayground/InferencePlaygroundConversation.svelte
CHANGED
@@ -10,6 +10,7 @@
|
|
10 |
export let conversation: Conversation;
|
11 |
export let loading: boolean;
|
12 |
export let viewCode: boolean;
|
|
|
13 |
|
14 |
const dispatch = createEventDispatcher<{
|
15 |
addMessage: void;
|
@@ -57,6 +58,6 @@
|
|
57 |
</div>
|
58 |
</button>
|
59 |
{:else}
|
60 |
-
<CodeSnippets {conversation} />
|
61 |
{/if}
|
62 |
</div>
|
|
|
10 |
export let conversation: Conversation;
|
11 |
export let loading: boolean;
|
12 |
export let viewCode: boolean;
|
13 |
+
export let hfToken: string;
|
14 |
|
15 |
const dispatch = createEventDispatcher<{
|
16 |
addMessage: void;
|
|
|
58 |
</div>
|
59 |
</button>
|
60 |
{:else}
|
61 |
+
<CodeSnippets {conversation} {hfToken} />
|
62 |
{/if}
|
63 |
</div>
|