typo maxTokens vs max_tokens
Browse files
src/lib/components/InferencePlayground/inferencePlaygroundUtils.ts
CHANGED
@@ -24,7 +24,7 @@ export async function handleStreamingResponse(
|
|
24 |
model: model.id,
|
25 |
messages,
|
26 |
temperature: conversation.config.temperature,
|
27 |
-
max_tokens: conversation.config.
|
28 |
},
|
29 |
{ signal: abortController.signal, use_cache: false }
|
30 |
)) {
|
@@ -50,7 +50,7 @@ export async function handleNonStreamingResponse(
|
|
50 |
model: model.id,
|
51 |
messages,
|
52 |
temperature: conversation.config.temperature,
|
53 |
-
max_tokens: conversation.config.
|
54 |
},
|
55 |
{ use_cache: false }
|
56 |
);
|
|
|
24 |
model: model.id,
|
25 |
messages,
|
26 |
temperature: conversation.config.temperature,
|
27 |
+
max_tokens: conversation.config.max_tokens,
|
28 |
},
|
29 |
{ signal: abortController.signal, use_cache: false }
|
30 |
)) {
|
|
|
50 |
model: model.id,
|
51 |
messages,
|
52 |
temperature: conversation.config.temperature,
|
53 |
+
max_tokens: conversation.config.max_tokens,
|
54 |
},
|
55 |
{ use_cache: false }
|
56 |
);
|