update
Browse files
src/lib/components/Playground/Playground.svelte
CHANGED
@@ -175,7 +175,12 @@
|
|
175 |
<div class="!p-0 text-sm font-semibold">Add message</div>
|
176 |
</button>
|
177 |
{:else}
|
178 |
-
<PlaygroundCode
|
|
|
|
|
|
|
|
|
|
|
179 |
{/if}
|
180 |
</div>
|
181 |
|
|
|
175 |
<div class="!p-0 text-sm font-semibold">Add message</div>
|
176 |
</button>
|
177 |
{:else}
|
178 |
+
<PlaygroundCode
|
179 |
+
model={$currentModel}
|
180 |
+
streaming={$streaming}
|
181 |
+
temperature={$temperature}
|
182 |
+
maxTokens={$maxTokens}
|
183 |
+
/>
|
184 |
{/if}
|
185 |
</div>
|
186 |
|
src/lib/components/Playground/PlaygroundCode.svelte
CHANGED
@@ -1,29 +1,33 @@
|
|
1 |
-
<script>
|
2 |
-
export let model
|
|
|
|
|
|
|
|
|
3 |
|
4 |
const npmSnippet = `import { HfInference } from '@huggingface/inference'
|
5 |
|
6 |
const hf = new HfInference('your access token')`;
|
7 |
|
8 |
-
$:
|
9 |
model: "${model}",
|
10 |
messages: [
|
11 |
{ role: "user", content: "Complete the this sentence with words one plus one is equal " }
|
12 |
],
|
13 |
-
max_tokens:
|
14 |
-
temperature:
|
15 |
seed: 0,
|
16 |
});`;
|
17 |
|
18 |
-
$:
|
19 |
|
20 |
for await (const chunk of hf.chatCompletionStream({
|
21 |
-
model: "
|
22 |
messages: [
|
23 |
{ role: "user", content: "Complete the equation 1+1= ,just the answer" },
|
24 |
],
|
25 |
-
max_tokens:
|
26 |
-
temperature:
|
27 |
seed: 0,
|
28 |
})) {
|
29 |
if (chunk.choices && chunk.choices.length > 0) {
|
@@ -59,14 +63,13 @@ for await (const chunk of hf.chatCompletionStream({
|
|
59 |
</div>
|
60 |
<pre
|
61 |
class="overflow-x-auto border-y border-y-gray-100 bg-gray-50 px-4 py-6 text-sm">{npmSnippet}</pre>
|
|
|
62 |
<div class="px-4 pb-4 pt-6">
|
63 |
-
<h2 class="font-semibold">Non-Streaming API</h2>
|
64 |
-
</div>
|
65 |
-
<pre
|
66 |
-
class="overflow-x-auto border-y border-y-gray-100 bg-gray-50 px-4 py-6 text-sm">{nonStreaming}</pre>
|
67 |
-
<div class="px-4 pb-4 pt-6">
|
68 |
-
<h2 class="font-semibold">Streaming API</h2>
|
69 |
</div>
|
70 |
-
|
71 |
-
|
|
|
|
|
|
|
72 |
</div>
|
|
|
1 |
+
<script lang="ts">
|
2 |
+
export let model: string;
|
3 |
+
export let streaming: Boolean;
|
4 |
+
export let temperature: number;
|
5 |
+
export let maxTokens: number;
|
6 |
+
export let messages: Array;
|
7 |
|
8 |
const npmSnippet = `import { HfInference } from '@huggingface/inference'
|
9 |
|
10 |
const hf = new HfInference('your access token')`;
|
11 |
|
12 |
+
$: nonStreamingSnippet = `await hf.chatCompletion({
|
13 |
model: "${model}",
|
14 |
messages: [
|
15 |
{ role: "user", content: "Complete the this sentence with words one plus one is equal " }
|
16 |
],
|
17 |
+
max_tokens: ${maxTokens},
|
18 |
+
temperature: ${temperature},
|
19 |
seed: 0,
|
20 |
});`;
|
21 |
|
22 |
+
$: streamingSnippet = `let out = "";
|
23 |
|
24 |
for await (const chunk of hf.chatCompletionStream({
|
25 |
+
model: "${model}",
|
26 |
messages: [
|
27 |
{ role: "user", content: "Complete the equation 1+1= ,just the answer" },
|
28 |
],
|
29 |
+
max_tokens: ${maxTokens},
|
30 |
+
temperature: ${temperature},
|
31 |
seed: 0,
|
32 |
})) {
|
33 |
if (chunk.choices && chunk.choices.length > 0) {
|
|
|
63 |
</div>
|
64 |
<pre
|
65 |
class="overflow-x-auto border-y border-y-gray-100 bg-gray-50 px-4 py-6 text-sm">{npmSnippet}</pre>
|
66 |
+
|
67 |
<div class="px-4 pb-4 pt-6">
|
68 |
+
<h2 class="font-semibold">{streaming ? 'Streaming API' : 'Non-Streaming API'}</h2>
|
|
|
|
|
|
|
|
|
|
|
69 |
</div>
|
70 |
+
|
71 |
+
<pre class="overflow-x-auto border-y border-gray-100 bg-gray-50 px-4 py-6 text-sm">{streaming
|
72 |
+
? streamingSnippet
|
73 |
+
: nonStreamingSnippet}
|
74 |
+
</pre>
|
75 |
</div>
|