gyulukeyi commited on
Commit
7aedff4
โ€ข
1 Parent(s): 6401f5b

updated question guard

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. app.py +33 -19
  3. question_undetector.pkl +3 -0
  4. requirements.txt +2 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ question_undetector.pkl filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  import gradio as gr
3
  from openai import OpenAI
4
 
@@ -7,6 +8,16 @@ client = OpenAI(
7
  base_url="https://yo4x63mj3sbmgpwc.us-east-1.aws.endpoints.huggingface.cloud/v1/",
8
  api_key=os.environ.get("hf_token"),
9
  )
 
 
 
 
 
 
 
 
 
 
10
 
11
 
12
  def respond(
@@ -17,25 +28,28 @@ def respond(
17
  temperature,
18
  top_p,
19
  ):
20
- messages = [{"role": "municipality", "content": ์ง€์ž์ฒด}]
21
- messages.append({"role": "title", "content": ์ œ๋ชฉ})
22
- messages.append({"role": "question", "content": ์งˆ๋ฌธ})
23
- response = ""
24
-
25
- chat_completion = client.chat.completions.create(
26
- model="tgi",
27
- messages=messages,
28
- stream=True,
29
- max_tokens=max_tokens,
30
- temperature=temperature,
31
- top_p=top_p,
32
- )
33
-
34
- for message in chat_completion:
35
- token = message.choices[0].delta.content
36
- if token:
37
- response += token
38
- yield response
 
 
 
39
 
40
 
41
  demo = gr.Interface(
 
1
  import os
2
+ import pickle
3
  import gradio as gr
4
  from openai import OpenAI
5
 
 
8
  base_url="https://yo4x63mj3sbmgpwc.us-east-1.aws.endpoints.huggingface.cloud/v1/",
9
  api_key=os.environ.get("hf_token"),
10
  )
11
+ with open("./question_undetector.pkl", "rb") as f:
12
+ (vectorizer, model) = pickle.load(f)
13
+
14
+
15
+ def guard_question(question):
16
+ pred = model.predict(vectorizer.transform([question]))
17
+ if pred[0] == 1:
18
+ return True
19
+ else:
20
+ return False
21
 
22
 
23
  def respond(
 
28
  temperature,
29
  top_p,
30
  ):
31
+ if guard_question(์งˆ๋ฌธ):
32
+ messages = [{"role": "municipality", "content": ์ง€์ž์ฒด}]
33
+ messages.append({"role": "title", "content": ์ œ๋ชฉ})
34
+ messages.append({"role": "question", "content": ์งˆ๋ฌธ})
35
+ response = ""
36
+
37
+ chat_completion = client.chat.completions.create(
38
+ model="tgi",
39
+ messages=messages,
40
+ stream=True,
41
+ max_tokens=max_tokens,
42
+ temperature=temperature,
43
+ top_p=top_p,
44
+ )
45
+
46
+ for message in chat_completion:
47
+ token = message.choices[0].delta.content
48
+ if token:
49
+ response += token
50
+ yield response
51
+ else:
52
+ yield "์ œ๊ฐ€ ๋‹ตํ•  ์ˆ˜ ์žˆ๋Š” ์งˆ๋ฌธ์ด ์•„๋‹Œ ๊ฒƒ ๊ฐ™์Šต๋‹ˆ๋‹ค. ์ €๋Š” ๋ฏผ์› ๊ฒŒ์‹œ๊ธ€์„ ์ฒ˜๋ฆฌํ•  ์ˆ˜ ์žˆ์–ด์š”."
53
 
54
 
55
  demo = gr.Interface(
question_undetector.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dc7997100b3e24f5683560e117b15a96eced62eebb0fca245c927d3c4f01f8f
3
+ size 1194927
requirements.txt CHANGED
@@ -1,2 +1,4 @@
1
  huggingface_hub==0.22.2
2
  openai
 
 
 
1
  huggingface_hub==0.22.2
2
  openai
3
+ kiwipiepy
4
+ scikit-learn