andreped commited on
Commit
14bade9
·
1 Parent(s): ff2d6c7

Added linting; format workflow; formatted code

Browse files
Files changed (5) hide show
  1. .github/workflows/format.yml +26 -0
  2. app.py +22 -19
  3. setup.cfg +14 -0
  4. shell/format.sh +4 -0
  5. shell/lint.sh +23 -0
.github/workflows/format.yml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Check formatting
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - '*'
7
+ pull_request:
8
+ branches:
9
+ - '*'
10
+ workflow_dispatch:
11
+
12
+ jobs:
13
+ test:
14
+ runs-on: ubuntu-20.04
15
+ steps:
16
+ - uses: actions/checkout@v1
17
+ - name: Set up Python 3.9
18
+ uses: actions/setup-python@v2
19
+ with:
20
+ python-version: 3.9
21
+
22
+ - name: Install lint dependencies
23
+ run: pip install black==22.3.0 isort==5.10.1 flake8==4.0.1
24
+
25
+ - name: Lint the code
26
+ run: sh shell/lint.sh
app.py CHANGED
@@ -1,22 +1,22 @@
1
- import streamlit as st
2
- from llama_index import VectorStoreIndex, ServiceContext, set_global_service_context
3
- from llama_index.llms import AzureOpenAI
4
- from llama_index.embeddings import OpenAIEmbedding
5
  import json
6
  import os
7
- from llama_index import SimpleDirectoryReader
8
 
 
 
 
 
 
 
 
9
 
10
  # Initialize message history
11
  st.header("Chat with André's research 💬 📚")
12
 
13
- if "messages" not in st.session_state.keys(): # Initialize the chat message history
14
- st.session_state.messages = [
15
- {"role": "assistant", "content": "Ask me a question about André's research!"}
16
- ]
17
 
18
  # Load config values
19
- with open(r'config.json') as config_file:
20
  config_details = json.load(config_file)
21
 
22
 
@@ -29,23 +29,26 @@ def load_data():
29
  engine=config_details["ENGINE"],
30
  temperature=0.5,
31
  api_key=os.getenv("OPENAI_API_KEY"),
32
- api_base=config_details['OPENAI_API_BASE'],
33
  api_type="azure",
34
- api_version=config_details['OPENAI_API_VERSION'],
35
- system_prompt="You are an expert on André's research and your job is to answer technical questions. Assume that all questions are related to André's research. Keep your answers technical and based on facts – do not hallucinate features."
 
 
 
36
  )
37
  # You need to deploy your own embedding model as well as your own chat completion model
38
  embed_model = OpenAIEmbedding(
39
  model="text-embedding-ada-002",
40
  deployment_name=config_details["ENGINE_EMBEDDING"],
41
  api_key=os.getenv("OPENAI_API_KEY"),
42
- api_base=config_details['OPENAI_API_BASE'],
43
  api_type="azure",
44
- api_version=config_details['OPENAI_API_VERSION'],
45
  )
46
  service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
47
  set_global_service_context(service_context)
48
- index = VectorStoreIndex.from_documents(documents) #, service_context=service_context)
49
  return index
50
 
51
 
@@ -53,10 +56,10 @@ def main():
53
  index = load_data()
54
  chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
55
 
56
- if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
57
  st.session_state.messages.append({"role": "user", "content": prompt})
58
 
59
- for message in st.session_state.messages: # Display the prior chat messages
60
  with st.chat_message(message["role"]):
61
  st.write(message["content"])
62
 
@@ -67,7 +70,7 @@ def main():
67
  response = chat_engine.chat(prompt)
68
  st.write(response.response)
69
  message = {"role": "assistant", "content": response.response}
70
- st.session_state.messages.append(message) # Add response to message history
71
 
72
 
73
  if __name__ == "__main__":
 
 
 
 
 
1
  import json
2
  import os
 
3
 
4
+ import streamlit as st
5
+ from llama_index import ServiceContext
6
+ from llama_index import SimpleDirectoryReader
7
+ from llama_index import VectorStoreIndex
8
+ from llama_index import set_global_service_context
9
+ from llama_index.embeddings import OpenAIEmbedding
10
+ from llama_index.llms import AzureOpenAI
11
 
12
  # Initialize message history
13
  st.header("Chat with André's research 💬 📚")
14
 
15
+ if "messages" not in st.session_state.keys(): # Initialize the chat message history
16
+ st.session_state.messages = [{"role": "assistant", "content": "Ask me a question about André's research!"}]
 
 
17
 
18
  # Load config values
19
+ with open(r"config.json") as config_file:
20
  config_details = json.load(config_file)
21
 
22
 
 
29
  engine=config_details["ENGINE"],
30
  temperature=0.5,
31
  api_key=os.getenv("OPENAI_API_KEY"),
32
+ api_base=config_details["OPENAI_API_BASE"],
33
  api_type="azure",
34
+ api_version=config_details["OPENAI_API_VERSION"],
35
+ system_prompt="You are an expert on André's research and your job is to answer"
36
+ "technical questions. Assume that all questions are related to"
37
+ "André's research. Keep your answers technical and based on facts"
38
+ " – do not hallucinate features.",
39
  )
40
  # You need to deploy your own embedding model as well as your own chat completion model
41
  embed_model = OpenAIEmbedding(
42
  model="text-embedding-ada-002",
43
  deployment_name=config_details["ENGINE_EMBEDDING"],
44
  api_key=os.getenv("OPENAI_API_KEY"),
45
+ api_base=config_details["OPENAI_API_BASE"],
46
  api_type="azure",
47
+ api_version=config_details["OPENAI_API_VERSION"],
48
  )
49
  service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
50
  set_global_service_context(service_context)
51
+ index = VectorStoreIndex.from_documents(documents) # , service_context=service_context)
52
  return index
53
 
54
 
 
56
  index = load_data()
57
  chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
58
 
59
+ if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
60
  st.session_state.messages.append({"role": "user", "content": prompt})
61
 
62
+ for message in st.session_state.messages: # Display the prior chat messages
63
  with st.chat_message(message["role"]):
64
  st.write(message["content"])
65
 
 
70
  response = chat_engine.chat(prompt)
71
  st.write(response.response)
72
  message = {"role": "assistant", "content": response.response}
73
+ st.session_state.messages.append(message) # Add response to message history
74
 
75
 
76
  if __name__ == "__main__":
setup.cfg ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [metadata]
2
+ description-file = README.md
3
+
4
+ [isort]
5
+ force_single_line=True
6
+ known_first_party=gradient_accumulator
7
+ line_length=120
8
+ profile=black
9
+
10
+ [flake8]
11
+ # imported but unused in __init__.py, that's ok.
12
+ per-file-ignores=*__init__.py:F401
13
+ ignore=E203,W503,W605,F632,E266,E731,E712,E741
14
+ max-line-length=120
shell/format.sh ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #!/bin/bash
2
+ isort --sl app.py
3
+ black --line-length 120 app.py
4
+ flake8 app.py
shell/lint.sh ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ isort --check --sl -c app.py
3
+ if ! [ $? -eq 0 ]
4
+ then
5
+ echo "Please run \"sh shell/format.sh\" to format the code."
6
+ exit 1
7
+ fi
8
+ echo "no issues with isort"
9
+ flake8 app.py
10
+ if ! [ $? -eq 0 ]
11
+ then
12
+ echo "Please fix the code style issue."
13
+ exit 1
14
+ fi
15
+ echo "no issues with flake8"
16
+ black --check --line-length 120 app.py
17
+ if ! [ $? -eq 0 ]
18
+ then
19
+ echo "Please run \"sh shell/format.sh\" to format the code."
20
+ exit 1
21
+ fi
22
+ echo "no issues with black"
23
+ echo "linting success!"