Improve snippets content (#52)
Browse files
src/lib/components/InferencePlayground/InferencePlaygroundCodeSnippets.svelte
CHANGED
@@ -122,20 +122,22 @@
|
|
122 |
needsToken: true,
|
123 |
code: `import { HfInference } from "@huggingface/inference"
|
124 |
|
125 |
-
const
|
126 |
|
127 |
let out = "";
|
128 |
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
})
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
|
|
|
|
139 |
}`,
|
140 |
});
|
141 |
} else {
|
@@ -145,15 +147,15 @@ for await (const chunk of inference.chatCompletionStream({
|
|
145 |
needsToken: true,
|
146 |
code: `import { HfInference } from '@huggingface/inference'
|
147 |
|
148 |
-
const
|
149 |
|
150 |
-
const
|
151 |
model: "${conversation.model.id}",
|
152 |
messages: ${formattedMessages({ sep: ",\n\t\t", start: "[\n\t\t", end: "\n\t]" })},
|
153 |
${formattedConfig({ sep: ",\n\t", start: "", end: "" })}
|
154 |
});
|
155 |
|
156 |
-
console.log(
|
157 |
});
|
158 |
}
|
159 |
|
@@ -188,23 +190,25 @@ console.log(out.choices[0].message);`,
|
|
188 |
code: `import { OpenAI } from "openai"
|
189 |
|
190 |
const client = new OpenAI({
|
191 |
-
|
192 |
apiKey: "${tokenStr}"
|
193 |
})
|
194 |
|
195 |
let out = "";
|
196 |
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
})
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
|
|
|
|
208 |
}`,
|
209 |
});
|
210 |
} else {
|
@@ -215,17 +219,17 @@ for await (const chunk of await client.chat.completions.create({
|
|
215 |
code: `import { OpenAI } from "openai"
|
216 |
|
217 |
const client = new OpenAI({
|
218 |
-
baseURL: "https://api-inference.huggingface.co/
|
219 |
apiKey: "${tokenStr}"
|
220 |
})
|
221 |
|
222 |
-
const
|
223 |
model: "${conversation.model.id}",
|
224 |
messages: ${formattedMessages({ sep: ",\n\t\t", start: "[\n\t\t", end: "\n\t]" })},
|
225 |
${formattedConfig({ sep: ",\n\t", start: "", end: "" })}
|
226 |
});
|
227 |
|
228 |
-
console.log(
|
229 |
});
|
230 |
}
|
231 |
|
@@ -263,14 +267,14 @@ client = InferenceClient(api_key="${tokenStr}")
|
|
263 |
|
264 |
messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
|
265 |
|
266 |
-
|
267 |
model="${conversation.model.id}",
|
268 |
messages=messages,
|
269 |
-
|
270 |
-
|
271 |
)
|
272 |
|
273 |
-
for chunk in
|
274 |
print(chunk.choices[0].delta.content)`,
|
275 |
});
|
276 |
} else {
|
@@ -284,13 +288,13 @@ client = InferenceClient(api_key="${tokenStr}")
|
|
284 |
|
285 |
messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
|
286 |
|
287 |
-
|
288 |
model="${conversation.model.id}",
|
289 |
messages=messages,
|
290 |
${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })}
|
291 |
)
|
292 |
|
293 |
-
print(
|
294 |
});
|
295 |
}
|
296 |
|
@@ -331,14 +335,14 @@ client = OpenAI(
|
|
331 |
|
332 |
messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
|
333 |
|
334 |
-
|
335 |
model="${conversation.model.id}",
|
336 |
messages=messages,
|
337 |
-
|
338 |
-
|
339 |
)
|
340 |
|
341 |
-
for chunk in
|
342 |
print(chunk.choices[0].delta.content)`,
|
343 |
});
|
344 |
} else {
|
@@ -355,13 +359,13 @@ client = OpenAI(
|
|
355 |
|
356 |
messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
|
357 |
|
358 |
-
|
359 |
model="${conversation.model.id}",
|
360 |
messages=messages,
|
361 |
${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })}
|
362 |
)
|
363 |
|
364 |
-
print(
|
365 |
});
|
366 |
}
|
367 |
|
|
|
122 |
needsToken: true,
|
123 |
code: `import { HfInference } from "@huggingface/inference"
|
124 |
|
125 |
+
const client = new HfInference("${tokenStr}")
|
126 |
|
127 |
let out = "";
|
128 |
|
129 |
+
const stream = client.chatCompletionStream({
|
130 |
+
model: "${conversation.model.id}",
|
131 |
+
messages: ${formattedMessages({ sep: ",\n\t", start: "[\n\t", end: "\n ]" })},
|
132 |
+
${formattedConfig({ sep: ",\n ", start: "", end: "" })}
|
133 |
+
});
|
134 |
+
|
135 |
+
for await (const chunk of stream) {
|
136 |
+
if (chunk.choices && chunk.choices.length > 0) {
|
137 |
+
const newContent = chunk.choices[0].delta.content;
|
138 |
+
out += newContent;
|
139 |
+
console.log(newContent);
|
140 |
+
}
|
141 |
}`,
|
142 |
});
|
143 |
} else {
|
|
|
147 |
needsToken: true,
|
148 |
code: `import { HfInference } from '@huggingface/inference'
|
149 |
|
150 |
+
const client = new HfInference("${tokenStr}")
|
151 |
|
152 |
+
const chatCompletion = await client.chatCompletion({
|
153 |
model: "${conversation.model.id}",
|
154 |
messages: ${formattedMessages({ sep: ",\n\t\t", start: "[\n\t\t", end: "\n\t]" })},
|
155 |
${formattedConfig({ sep: ",\n\t", start: "", end: "" })}
|
156 |
});
|
157 |
|
158 |
+
console.log(chatCompletion.choices[0].message);`,
|
159 |
});
|
160 |
}
|
161 |
|
|
|
190 |
code: `import { OpenAI } from "openai"
|
191 |
|
192 |
const client = new OpenAI({
|
193 |
+
baseURL: "https://api-inference.huggingface.co/v1/",
|
194 |
apiKey: "${tokenStr}"
|
195 |
})
|
196 |
|
197 |
let out = "";
|
198 |
|
199 |
+
const stream = await client.chat.completions.create({
|
200 |
+
model: "${conversation.model.id}",
|
201 |
+
messages: ${formattedMessages({ sep: ",\n\t", start: "[\n\t", end: "\n ]" })},
|
202 |
+
${formattedConfig({ sep: ",\n ", start: "", end: "" })},
|
203 |
+
stream: true,
|
204 |
+
});
|
205 |
+
|
206 |
+
for await (const chunk of stream) {
|
207 |
+
if (chunk.choices && chunk.choices.length > 0) {
|
208 |
+
const newContent = chunk.choices[0].delta.content;
|
209 |
+
out += newContent;
|
210 |
+
console.log(newContent);
|
211 |
+
}
|
212 |
}`,
|
213 |
});
|
214 |
} else {
|
|
|
219 |
code: `import { OpenAI } from "openai"
|
220 |
|
221 |
const client = new OpenAI({
|
222 |
+
baseURL: "https://api-inference.huggingface.co/v1/",
|
223 |
apiKey: "${tokenStr}"
|
224 |
})
|
225 |
|
226 |
+
const chatCompletion = await client.chat.completions.create({
|
227 |
model: "${conversation.model.id}",
|
228 |
messages: ${formattedMessages({ sep: ",\n\t\t", start: "[\n\t\t", end: "\n\t]" })},
|
229 |
${formattedConfig({ sep: ",\n\t", start: "", end: "" })}
|
230 |
});
|
231 |
|
232 |
+
console.log(chatCompletion.choices[0].message);`,
|
233 |
});
|
234 |
}
|
235 |
|
|
|
267 |
|
268 |
messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
|
269 |
|
270 |
+
stream = client.chat.completions.create(
|
271 |
model="${conversation.model.id}",
|
272 |
messages=messages,
|
273 |
+
${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })},
|
274 |
+
stream=True
|
275 |
)
|
276 |
|
277 |
+
for chunk in stream:
|
278 |
print(chunk.choices[0].delta.content)`,
|
279 |
});
|
280 |
} else {
|
|
|
288 |
|
289 |
messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
|
290 |
|
291 |
+
completion = client.chat.completions.create(
|
292 |
model="${conversation.model.id}",
|
293 |
messages=messages,
|
294 |
${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })}
|
295 |
)
|
296 |
|
297 |
+
print(completion.choices[0].message)`,
|
298 |
});
|
299 |
}
|
300 |
|
|
|
335 |
|
336 |
messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
|
337 |
|
338 |
+
stream = client.chat.completions.create(
|
339 |
model="${conversation.model.id}",
|
340 |
messages=messages,
|
341 |
+
${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })},
|
342 |
+
stream=True
|
343 |
)
|
344 |
|
345 |
+
for chunk in stream:
|
346 |
print(chunk.choices[0].delta.content)`,
|
347 |
});
|
348 |
} else {
|
|
|
359 |
|
360 |
messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
|
361 |
|
362 |
+
completion = client.chat.completions.create(
|
363 |
model="${conversation.model.id}",
|
364 |
messages=messages,
|
365 |
${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })}
|
366 |
)
|
367 |
|
368 |
+
print(completion.choices[0].message)`,
|
369 |
});
|
370 |
}
|
371 |
|