mishig HF staff commited on
Commit
d4fcb0f
1 Parent(s): 0bcf467
src/lib/components/InferencePlayground/InferencePlaygroundHFTokenModal.svelte CHANGED
@@ -11,7 +11,7 @@
11
  const dispatch = createEventDispatcher<{ close: void }>();
12
 
13
  function handleKeydown(event: KeyboardEvent) {
14
- const { key } = event;
15
  if (key === "Escape") {
16
  event.preventDefault();
17
  dispatch("close");
 
11
  const dispatch = createEventDispatcher<{ close: void }>();
12
 
13
  function handleKeydown(event: KeyboardEvent) {
14
+ const { key } = event;
15
  if (key === "Escape") {
16
  event.preventDefault();
17
  dispatch("close");
src/lib/components/InferencePlayground/InferencePlaygroundModelSelectorModal.svelte CHANGED
@@ -13,7 +13,7 @@
13
  const dispatch = createEventDispatcher<{ modelSelected: string; close: void }>();
14
 
15
  function handleKeydown(event: KeyboardEvent) {
16
- const { key } = event;
17
  if (key === "Escape") {
18
  event.preventDefault();
19
  dispatch("close");
 
13
  const dispatch = createEventDispatcher<{ modelSelected: string; close: void }>();
14
 
15
  function handleKeydown(event: KeyboardEvent) {
16
+ const { key } = event;
17
  if (key === "Escape") {
18
  event.preventDefault();
19
  dispatch("close");
src/lib/components/InferencePlayground/inferencePlaygroundUtils.ts CHANGED
@@ -3,12 +3,10 @@ import type { Conversation, ModelEntryWithTokenizer } from "$lib/types";
3
 
4
  import { HfInference } from "@huggingface/inference";
5
 
6
-
7
  export function createHfInference(token: string): HfInference {
8
  return new HfInference(token);
9
  }
10
 
11
-
12
  export async function handleStreamingResponse(
13
  hf: HfInference,
14
  conversation: Conversation,
@@ -37,7 +35,6 @@ export async function handleStreamingResponse(
37
  }
38
  }
39
 
40
-
41
  export async function handleNonStreamingResponse(
42
  hf: HfInference,
43
  conversation: Conversation,
@@ -61,7 +58,6 @@ export async function handleNonStreamingResponse(
61
  throw new Error("No response from the model");
62
  }
63
 
64
-
65
  export function isSystemPromptSupported(model: ModelEntryWithTokenizer) {
66
  return model.tokenizerConfig?.chat_template?.includes("system");
67
  }
 
3
 
4
  import { HfInference } from "@huggingface/inference";
5
 
 
6
  export function createHfInference(token: string): HfInference {
7
  return new HfInference(token);
8
  }
9
 
 
10
  export async function handleStreamingResponse(
11
  hf: HfInference,
12
  conversation: Conversation,
 
35
  }
36
  }
37
 
 
38
  export async function handleNonStreamingResponse(
39
  hf: HfInference,
40
  conversation: Conversation,
 
58
  throw new Error("No response from the model");
59
  }
60
 
 
61
  export function isSystemPromptSupported(model: ModelEntryWithTokenizer) {
62
  return model.tokenizerConfig?.chat_template?.includes("system");
63
  }