amirmmahdavikia commited on
Commit
4639678
·
verified ·
1 Parent(s): 198dec9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -10
app.py CHANGED
@@ -1,4 +1,5 @@
1
  # Load the packages and configurations
 
2
  import pandas as pd
3
  from retriever import BM25Plus
4
  import streamlit as st
@@ -6,7 +7,7 @@ from groq import Groq
6
 
7
  # Configure GROQ API_KEY
8
  client = Groq(
9
- api_key="GROQ_API_KEY",
10
  )
11
 
12
  # RTL formatting of the streamlit
@@ -87,18 +88,11 @@ if prompt := st.chat_input():
87
 
88
  prompt = get_prompt(prompt, topics)
89
  response = client.chat.completions.create(
90
- #
91
- # Required parameters
92
- #
93
  messages=[
94
- # Set an optional system message. This sets the behavior of the
95
- # assistant and can be used to provide specific instructions for
96
- # how it should behave throughout the conversation.
97
  {
98
  "role": "system",
99
  "content": "تو یک دستیار سودمند هستی."
100
  },
101
- # Set a user message for the assistant to respond to.
102
  {
103
  "role": "user",
104
  "content": prompt,
@@ -107,8 +101,6 @@ if prompt := st.chat_input():
107
 
108
  # The language model which will generate the completion.
109
  model="llama3-70b-8192",
110
- #
111
- # Optional parameters
112
  )
113
  msg = response.choices[0].message.content
114
  st.session_state.messages.append({"role": "assistant", "content": msg})
 
1
  # Load the packages and configurations
2
+ import os
3
  import pandas as pd
4
  from retriever import BM25Plus
5
  import streamlit as st
 
7
 
8
  # Configure GROQ API_KEY
9
  client = Groq(
10
+ api_key=api_key = os.environ.get("GROQ_API_KEY"),
11
  )
12
 
13
  # RTL formatting of the streamlit
 
88
 
89
  prompt = get_prompt(prompt, topics)
90
  response = client.chat.completions.create(
 
 
 
91
  messages=[
 
 
 
92
  {
93
  "role": "system",
94
  "content": "تو یک دستیار سودمند هستی."
95
  },
 
96
  {
97
  "role": "user",
98
  "content": prompt,
 
101
 
102
  # The language model which will generate the completion.
103
  model="llama3-70b-8192",
 
 
104
  )
105
  msg = response.choices[0].message.content
106
  st.session_state.messages.append({"role": "assistant", "content": msg})