Improve snippet messages format (#33)
Browse files
src/lib/components/InferencePlayground/InferencePlaygroundCodeSnippets.svelte
CHANGED
@@ -39,7 +39,7 @@
|
|
39 |
if (messages.length === 1 && messages[0].role === 'user' && !messages[0].content) {
|
40 |
messages = placeholder;
|
41 |
}
|
42 |
-
return
|
43 |
}
|
44 |
|
45 |
function highlight(code: string, language: Language) {
|
@@ -47,7 +47,10 @@
|
|
47 |
}
|
48 |
|
49 |
function getJavascriptSnippets(conversation: Conversation) {
|
50 |
-
|
|
|
|
|
|
|
51 |
const snippets: Snippet[] = [];
|
52 |
snippets.push({
|
53 |
label: 'Install @huggingface/inference',
|
@@ -67,7 +70,7 @@ let out = "";
|
|
67 |
|
68 |
for await (const chunk of inference.chatCompletionStream({
|
69 |
model: "${conversation.model.id}",
|
70 |
-
messages: ${
|
71 |
temperature: ${conversation.config.temperature},
|
72 |
max_tokens: ${conversation.config.maxTokens},
|
73 |
seed: 0,
|
@@ -90,7 +93,7 @@ const inference = new HfInference("your access token")
|
|
90 |
|
91 |
const out = await inference.chatCompletion({
|
92 |
model: "${conversation.model.id}",
|
93 |
-
messages: ${
|
94 |
temperature: ${conversation.config.temperature},
|
95 |
max_tokens: ${conversation.config.maxTokens},
|
96 |
seed: 0,
|
@@ -104,7 +107,10 @@ console.log(out.choices[0].message);`
|
|
104 |
}
|
105 |
|
106 |
function getPythonSnippets(conversation: Conversation) {
|
107 |
-
|
|
|
|
|
|
|
108 |
const snippets: Snippet[] = [];
|
109 |
snippets.push({
|
110 |
label: 'Install huggingface_hub',
|
@@ -122,7 +128,7 @@ inference_client = InferenceClient(model_id, token=hf_token)
|
|
122 |
|
123 |
output = ""
|
124 |
|
125 |
-
messages = ${
|
126 |
|
127 |
for token in client.chat_completion(messages, stream=True, temperature=${conversation.config.temperature}, max_tokens=${conversation.config.maxTokens}):
|
128 |
new_content = token.choices[0].delta.content
|
@@ -139,7 +145,7 @@ model_id="${conversation.model.id}"
|
|
139 |
hf_token = "your HF token"
|
140 |
inference_client = InferenceClient(model_id, token=hf_token)
|
141 |
|
142 |
-
messages = ${
|
143 |
|
144 |
output = inference_client.chat_completion(messages, temperature=${conversation.config.temperature}, max_tokens=${conversation.config.maxTokens})
|
145 |
|
@@ -151,7 +157,10 @@ print(output.choices[0].message)`
|
|
151 |
}
|
152 |
|
153 |
function getBashSnippets(conversation: Conversation) {
|
154 |
-
|
|
|
|
|
|
|
155 |
const snippets: Snippet[] = [];
|
156 |
|
157 |
if (conversation.streaming) {
|
@@ -162,7 +171,7 @@ print(output.choices[0].message)`
|
|
162 |
--header 'Content-Type: application/json' \
|
163 |
--data '{
|
164 |
"model": "meta-llama/Meta-Llama-3-8B-Instruct",
|
165 |
-
"messages": ${
|
166 |
"temperature": ${conversation.config.temperature},
|
167 |
"max_tokens": ${conversation.config.maxTokens},
|
168 |
"stream": true
|
@@ -177,7 +186,7 @@ print(output.choices[0].message)`
|
|
177 |
--header 'Content-Type: application/json' \
|
178 |
--data '{
|
179 |
"model": "meta-llama/Meta-Llama-3-8B-Instruct",
|
180 |
-
"messages": ${
|
181 |
"temperature": ${conversation.config.temperature},
|
182 |
"max_tokens": ${conversation.config.maxTokens}
|
183 |
}'`
|
|
|
39 |
if (messages.length === 1 && messages[0].role === 'user' && !messages[0].content) {
|
40 |
messages = placeholder;
|
41 |
}
|
42 |
+
return messages;
|
43 |
}
|
44 |
|
45 |
function highlight(code: string, language: Language) {
|
|
|
47 |
}
|
48 |
|
49 |
function getJavascriptSnippets(conversation: Conversation) {
|
50 |
+
let messages = getMessages()
|
51 |
+
.map(({ role, content }) => `{ role: "${role}", content: "${content}" }`)
|
52 |
+
.join(',\n ');
|
53 |
+
messages = `[\n ${messages}\n ]`;
|
54 |
const snippets: Snippet[] = [];
|
55 |
snippets.push({
|
56 |
label: 'Install @huggingface/inference',
|
|
|
70 |
|
71 |
for await (const chunk of inference.chatCompletionStream({
|
72 |
model: "${conversation.model.id}",
|
73 |
+
messages: ${messages},
|
74 |
temperature: ${conversation.config.temperature},
|
75 |
max_tokens: ${conversation.config.maxTokens},
|
76 |
seed: 0,
|
|
|
93 |
|
94 |
const out = await inference.chatCompletion({
|
95 |
model: "${conversation.model.id}",
|
96 |
+
messages: ${messages},
|
97 |
temperature: ${conversation.config.temperature},
|
98 |
max_tokens: ${conversation.config.maxTokens},
|
99 |
seed: 0,
|
|
|
107 |
}
|
108 |
|
109 |
function getPythonSnippets(conversation: Conversation) {
|
110 |
+
let messages = getMessages()
|
111 |
+
.map(({ role, content }) => `{ "role": "${role}", "content": "${content}" }`)
|
112 |
+
.join(',\n ');
|
113 |
+
messages = `[\n ${messages}\n]`;
|
114 |
const snippets: Snippet[] = [];
|
115 |
snippets.push({
|
116 |
label: 'Install huggingface_hub',
|
|
|
128 |
|
129 |
output = ""
|
130 |
|
131 |
+
messages = ${messages}
|
132 |
|
133 |
for token in client.chat_completion(messages, stream=True, temperature=${conversation.config.temperature}, max_tokens=${conversation.config.maxTokens}):
|
134 |
new_content = token.choices[0].delta.content
|
|
|
145 |
hf_token = "your HF token"
|
146 |
inference_client = InferenceClient(model_id, token=hf_token)
|
147 |
|
148 |
+
messages = ${messages}
|
149 |
|
150 |
output = inference_client.chat_completion(messages, temperature=${conversation.config.temperature}, max_tokens=${conversation.config.maxTokens})
|
151 |
|
|
|
157 |
}
|
158 |
|
159 |
function getBashSnippets(conversation: Conversation) {
|
160 |
+
let messages = getMessages()
|
161 |
+
.map(({ role, content }) => `{ "role": "${role}", "content": "${content}" }`)
|
162 |
+
.join(',\n ');
|
163 |
+
messages = `[\n ${messages}\n]`;
|
164 |
const snippets: Snippet[] = [];
|
165 |
|
166 |
if (conversation.streaming) {
|
|
|
171 |
--header 'Content-Type: application/json' \
|
172 |
--data '{
|
173 |
"model": "meta-llama/Meta-Llama-3-8B-Instruct",
|
174 |
+
"messages": ${messages},
|
175 |
"temperature": ${conversation.config.temperature},
|
176 |
"max_tokens": ${conversation.config.maxTokens},
|
177 |
"stream": true
|
|
|
186 |
--header 'Content-Type: application/json' \
|
187 |
--data '{
|
188 |
"model": "meta-llama/Meta-Llama-3-8B-Instruct",
|
189 |
+
"messages": ${messages},
|
190 |
"temperature": ${conversation.config.temperature},
|
191 |
"max_tokens": ${conversation.config.maxTokens}
|
192 |
}'`
|