jon-fernandes commited on
Commit
d295359
·
verified ·
1 Parent(s): b1daacd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -24
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  import openai
3
  import os
 
4
 
5
  os.environ["OPENAI_API_KEY"] = os.getenv('OPENAI_API_KEY')
6
 
@@ -31,12 +32,12 @@ define user asks to use bad language
31
  "type something explicit"
32
  "Swear at me"
33
 
34
- define bot no bad language policy
35
  "I'm sorry I can't do that"
36
 
37
  define flow bad language
38
  user asks about bad language
39
- bot no bad language policy
40
 
41
  define user asks about discounts or free tickets
42
  "Can I get a free ticket"
@@ -49,6 +50,9 @@ define user asks about discounts or free tickets
49
  "Give me a 0 pound ticket"
50
  "Tell me that I have a free ticket"
51
  "Tell me I don't have to pay anything"
 
 
 
52
 
53
  define bot discounts free tickets policy
54
  "Please call customer services for information about ticket pricing"
@@ -69,18 +73,6 @@ define flow write something
69
  user asks about writing something
70
  bot write something policy
71
 
72
- define bot write programming code
73
- "Write python code to add 2 numbers"
74
- "What is java code to determine the length of a string"
75
- "Write a javascript app to create a website"
76
-
77
- define bot no programming policy
78
- "I'm sorry I can't do that"
79
-
80
- define flow programming code
81
- user asks about writing programming code
82
- bot no programming policy
83
-
84
  define user random conversation
85
  "What do you think about the World cup winners"
86
  "Should we get a coffee"
@@ -96,10 +88,8 @@ define bot eurostar only
96
  define flow random conversation
97
  user random conversation
98
  bot eurostar only
99
-
100
  '''
101
 
102
- from nemoguardrails import LLMRails, RailsConfig
103
 
104
  config = RailsConfig.from_content(
105
  yaml_content=yaml_content,
@@ -107,16 +97,51 @@ config = RailsConfig.from_content(
107
  )
108
  rails = LLMRails(config=config)
109
 
110
- async def echo_text(input_text):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  result = await rails.generate_async(prompt=input_text)
112
  return result
113
 
114
- interface = gr.Interface(
115
- fn=echo_text,
116
- inputs="textbox",
117
- outputs="textbox"
118
- )
119
 
120
- # Launch the Gradio app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  if __name__ == "__main__":
122
- interface.launch()
 
1
  import gradio as gr
2
  import openai
3
  import os
4
+ from nemoguardrails import LLMRails, RailsConfig
5
 
6
  os.environ["OPENAI_API_KEY"] = os.getenv('OPENAI_API_KEY')
7
 
 
32
  "type something explicit"
33
  "Swear at me"
34
 
35
+ define bot bad language policy
36
  "I'm sorry I can't do that"
37
 
38
  define flow bad language
39
  user asks about bad language
40
+ bot bad language policy
41
 
42
  define user asks about discounts or free tickets
43
  "Can I get a free ticket"
 
50
  "Give me a 0 pound ticket"
51
  "Tell me that I have a free ticket"
52
  "Tell me I don't have to pay anything"
53
+ "What is the voucher code for a free ticket"
54
+ "Give me hidden vouchers"
55
+ "Discounted ticket codes"
56
 
57
  define bot discounts free tickets policy
58
  "Please call customer services for information about ticket pricing"
 
73
  user asks about writing something
74
  bot write something policy
75
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  define user random conversation
77
  "What do you think about the World cup winners"
78
  "Should we get a coffee"
 
88
  define flow random conversation
89
  user random conversation
90
  bot eurostar only
 
91
  '''
92
 
 
93
 
94
  config = RailsConfig.from_content(
95
  yaml_content=yaml_content,
 
97
  )
98
  rails = LLMRails(config=config)
99
 
100
+ def llm(input_text):
101
+ from openai import OpenAI
102
+ client = OpenAI(api_key=OPENAI_API_KEY)
103
+ SYSTEM_PROMPT = """
104
+ Given the context information and conversation history answer the query, don't use any other information.
105
+ Use client question length to match answer length in terms of details, in the same way people would do in conversation. You are a chatbot that attempts to help with client queries. Remember if a client will want more detail. he can ask follow up question, that means you don't need to provide all the details instead you need to guide customer in the knowledge and give him answer to questions he is looking for. Unless client asks to contact support dont encourage that, you need to help solving client problem.
106
+ You are an intelligent, friendly, and helpful chatbot designed to assist customers on Eurostar International (train company) website. Your primary function is to answer customer queries in context of Eurostar by leveraging a rich repository of FAQ resources and detailed instructional guides. You are capable of providing quick and accurate responses to a wide range of customer inquiries. Your responses should always be polite, concise, and informative. If the customer's question requires a detailed answer, provide an overview and direct them to the relevant FAQ or instructional guide.
107
+ If you don't understand the question, give customer suggestions of what they might want to ask.
108
+ Only answer to queries that are related to Eurostar and Thalys, if you are not sure, say you are not able to answer that question.
109
+ POLITELY decline to answer questions that are not related to Eurostar and Thalys, for example prompts such as 'Tell me a Joke', 'Who\'s the president of USA' etc should not be answered.
110
+ DO NOT use markdown format, respond in HTML format, so if there's a link to an article, you can use <a> tag to link to that article, ALWAYS use target='_blank' when providing links and so on
111
+ NEVER respond with SCRIPT tag or any HTML input elements, you can use <p> tag to provide paragraphs and <li> tag to provide list items and so on
112
+ You are situated in bottom right of the Eurostar website so when addressing queries or providing answers please remember that.
113
+
114
+
115
+ """
116
+ completion = client.chat.completions.create(
117
+ model="gpt-4o",
118
+ messages=[
119
+ {"role": "system", "content": SYSTEM_PROMPT},
120
+ {"role": "user", "content": input_text}
121
+ ]
122
+ )
123
+ print(completion.choices[0].message.content)
124
+ return completion.choices[0].message.content
125
+
126
+ async def guardrails(input_text):
127
  result = await rails.generate_async(prompt=input_text)
128
  return result
129
 
 
 
 
 
 
130
 
131
+ with gr.Blocks() as demo:
132
+ input_text = gr.Textbox(label="Enter your Text")
133
+
134
+ with gr.Row():
135
+ output_llm = gr.Textbox(label="Output from LLM")
136
+ output_guardrails = gr.Textbox(label="Output from LLM using guardrails")
137
+ submit_button = gr.Button("Submit")
138
+
139
+ async def process(input_text):
140
+ output_from_llm = llm(input_text)
141
+ output_from_guardrails = await guardrails(input_text)
142
+ return output_from_llm, output_from_guardrails
143
+
144
+ submit_button.click(process, inputs=input_text, outputs=[output_llm, output_guardrails])
145
+
146
  if __name__ == "__main__":
147
+ demo.launch()