mishig HF staff commited on
Commit
dcf3974
·
1 Parent(s): 279de18

Stronger type to type { ChatCompletionInput } from "@huggingface/tasks";

Browse files
src/lib/components/InferencePlayground/generationConfigSettings.ts CHANGED
@@ -1,3 +1,11 @@
 
 
 
 
 
 
 
 
1
  interface GenerationKeySettings {
2
  default: number;
3
  step: number;
@@ -6,7 +14,7 @@ interface GenerationKeySettings {
6
  label: string;
7
  }
8
 
9
- export const GENERATION_CONFIG_SETTINGS: Record<string, GenerationKeySettings> = {
10
  temperature: {
11
  default: 0.5,
12
  step: 0.1,
@@ -30,12 +38,6 @@ export const GENERATION_CONFIG_SETTINGS: Record<string, GenerationKeySettings> =
30
  },
31
  };
32
 
33
- export type GenerationConfigKey = keyof typeof GENERATION_CONFIG_SETTINGS;
34
-
35
- export const GENERATION_CONFIG_KEYS: GenerationConfigKey[] = ["temperature", "max_tokens", "top_p"];
36
-
37
- export type GenerationConfig = Record<GenerationConfigKey, number>;
38
-
39
  export const defaultGenerationConfig = GENERATION_CONFIG_KEYS.reduce((acc, key) => {
40
  acc[key] = GENERATION_CONFIG_SETTINGS[key].default;
41
  return acc;
 
1
+ import type { ChatCompletionInput } from "@huggingface/tasks";
2
+
3
+ export const GENERATION_CONFIG_KEYS = ["temperature", "max_tokens", "top_p"] as const;
4
+
5
+ export type GenerationConfigKey = (typeof GENERATION_CONFIG_KEYS)[number];
6
+
7
+ export type GenerationConfig = Pick<ChatCompletionInput, GenerationConfigKey>;
8
+
9
  interface GenerationKeySettings {
10
  default: number;
11
  step: number;
 
14
  label: string;
15
  }
16
 
17
+ export const GENERATION_CONFIG_SETTINGS: Record<GenerationConfigKey, GenerationKeySettings> = {
18
  temperature: {
19
  default: 0.5,
20
  step: 0.1,
 
38
  },
39
  };
40
 
 
 
 
 
 
 
41
  export const defaultGenerationConfig = GENERATION_CONFIG_KEYS.reduce((acc, key) => {
42
  acc[key] = GENERATION_CONFIG_SETTINGS[key].default;
43
  return acc;
src/lib/components/InferencePlayground/inferencePlaygroundUtils.ts CHANGED
@@ -23,8 +23,7 @@ export async function handleStreamingResponse(
23
  {
24
  model: model.id,
25
  messages,
26
- temperature: conversation.config.temperature,
27
- max_tokens: conversation.config.max_tokens,
28
  },
29
  { signal: abortController.signal, use_cache: false }
30
  )) {
@@ -49,8 +48,7 @@ export async function handleNonStreamingResponse(
49
  {
50
  model: model.id,
51
  messages,
52
- temperature: conversation.config.temperature,
53
- max_tokens: conversation.config.max_tokens,
54
  },
55
  { use_cache: false }
56
  );
 
23
  {
24
  model: model.id,
25
  messages,
26
+ ...conversation.config,
 
27
  },
28
  { signal: abortController.signal, use_cache: false }
29
  )) {
 
48
  {
49
  model: model.id,
50
  messages,
51
+ ...conversation.config,
 
52
  },
53
  { use_cache: false }
54
  );