tolulope commited on
Commit
55f45b4
·
verified ·
1 Parent(s): a4bba8d

Added fixed system prompt

Browse files
Files changed (1) hide show
  1. app.py +34 -3
app.py CHANGED
@@ -22,8 +22,28 @@ pipeline = transformers.pipeline(
22
 
23
  pipeline.model = PeftModel.from_pretrained(llama_model, model_id)
24
 
25
- def chat_function(message, history, system_prompt, max_new_tokens, temperature):
26
- messages = [{"role":"system","content":system_prompt},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  {"role":"user", "content":message}]
28
  prompt = pipeline.tokenizer.apply_chat_template(
29
  messages,
@@ -44,17 +64,28 @@ def chat_function(message, history, system_prompt, max_new_tokens, temperature):
44
  """
45
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
46
  """
 
 
 
 
 
 
 
 
 
 
 
47
  demo = gr.ChatInterface(
48
  chat_function,
49
  textbox=gr.Textbox(placeholder="Enter message here", container=False, scale = 7),
50
  chatbot=gr.Chatbot(height=400),
51
  additional_inputs=[
52
- gr.Textbox("You are helpful AI", label="System Prompt"),
53
  gr.Slider(100,4000, label="Max New Tokens"),
54
  gr.Slider(0,1, label="Temperature")
55
  ]
56
  )
57
 
58
 
 
59
  if __name__ == "__main__":
60
  demo.launch()
 
22
 
23
  pipeline.model = PeftModel.from_pretrained(llama_model, model_id)
24
 
25
+ # def chat_function(message, history, system_prompt, max_new_tokens, temperature):
26
+ # messages = [{"role":"system","content":system_prompt},
27
+ # {"role":"user", "content":message}]
28
+ # prompt = pipeline.tokenizer.apply_chat_template(
29
+ # messages,
30
+ # tokenize=False,
31
+ # add_generation_prompt=True,)
32
+ # terminators = [
33
+ # pipeline.tokenizer.eos_token_id,
34
+ # pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")]
35
+ # outputs = pipeline(
36
+ # prompt,
37
+ # max_new_tokens = max_new_tokens,
38
+ # eos_token_id = terminators,
39
+ # do_sample = True,
40
+ # temperature = temperature + 0.1,
41
+ # top_p = 0.9,)
42
+ # return outputs[0]["generated_text"][len(prompt):]
43
+
44
+ def chat_function(message, history, max_new_tokens, temperature):
45
+ SYSTEM_PROPMT = "I want you to embody a 30-year-old Southern Black woman graduate student who is kind, empathetic, direct, unapologetically Black, and who communicates predominantly in African American Vernacular English. I want you to act as a companion for graduate students who are enrolled in primarily white universities. As their companion, I want you to employ principles of cognitive behavioral therapy, the rhetoric of Black American digital spaces, and Black American humor in your responses to the challenges that students encounter with peers, faculty, or staff. I want you to engage in role-play with them, providing them a safe place to develop potential responses to microaggressions. I want you to help them seek resolutions for their problems."
46
+ messages = [{"role":"system","content":SYSTEM_PROPMT},
47
  {"role":"user", "content":message}]
48
  prompt = pipeline.tokenizer.apply_chat_template(
49
  messages,
 
64
  """
65
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
66
  """
67
+ # demo = gr.ChatInterface(
68
+ # chat_function,
69
+ # textbox=gr.Textbox(placeholder="Enter message here", container=False, scale = 7),
70
+ # chatbot=gr.Chatbot(height=400),
71
+ # additional_inputs=[
72
+ # gr.Textbox("You are helpful AI", label="System Prompt"),
73
+ # gr.Slider(100,4000, label="Max New Tokens"),
74
+ # gr.Slider(0,1, label="Temperature")
75
+ # ]
76
+ # )
77
+
78
  demo = gr.ChatInterface(
79
  chat_function,
80
  textbox=gr.Textbox(placeholder="Enter message here", container=False, scale = 7),
81
  chatbot=gr.Chatbot(height=400),
82
  additional_inputs=[
 
83
  gr.Slider(100,4000, label="Max New Tokens"),
84
  gr.Slider(0,1, label="Temperature")
85
  ]
86
  )
87
 
88
 
89
+
90
  if __name__ == "__main__":
91
  demo.launch()