Datasculptor cheesyFishes commited on
Commit
bfc7fc8
0 Parent(s):

Duplicate from llamaindex/llama_agi_auto

Browse files

Co-authored-by: Logan Markewich <cheesyFishes@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +14 -0
  3. app.py +90 -0
  4. requirements.txt +2 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Llama AGI Auto
3
+ emoji: 🤖 🦙
4
+ colorFrom: gray
5
+ colorTo: red
6
+ sdk: streamlit
7
+ sdk_version: 1.19.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: llamaindex/llama_agi_auto
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ from langchain.agents import load_tools
4
+ from langchain.chat_models import ChatOpenAI
5
+ from langchain.llms import OpenAI
6
+
7
+ from llama_agi.execution_agent import ToolExecutionAgent
8
+ from llama_agi.runners import AutoStreamlitAGIRunner
9
+ from llama_agi.task_manager import LlamaTaskManager
10
+
11
+ from llama_index import ServiceContext, LLMPredictor
12
+
13
+
14
+ st.set_page_config(layout="wide")
15
+ st.header("🤖 Llama AGI 🦙")
16
+ st.markdown("This demo uses the [llama-agi](https://github.com/run-llama/llama-lab/tree/main/llama_agi) package to create an AutoGPT-like agent, powered by [LlamaIndex](https://github.com/jerryjliu/llama_index) and Langchain. The AGI has access to tools that search the web and record notes, as it works to achieve an objective. Use the setup tab to configure your LLM settings and initial objective+tasks. Then use the Launch tab to run the AGI. Kill the AGI by refreshing the page.")
17
+
18
+ setup_tab, launch_tab = st.tabs(["Setup", "Launch"])
19
+
20
+ with setup_tab:
21
+ if 'init' in st.session_state:
22
+ st.success("Initialized!")
23
+
24
+ st.subheader("LLM Setup")
25
+ col1, col2, col3 = st.columns(3)
26
+
27
+ with col1:
28
+ openai_api_key = st.text_input("Enter your OpenAI API key here", type="password")
29
+ llm_name = st.selectbox(
30
+ "Which LLM?", ["text-davinci-003", "gpt-3.5-turbo", "gpt-4"]
31
+ )
32
+
33
+ with col2:
34
+ google_api_key = st.text_input("Enter your Google API key here", type="password")
35
+ model_temperature = st.slider(
36
+ "LLM Temperature", min_value=0.0, max_value=1.0, step=0.1, value=0.0
37
+ )
38
+
39
+ with col3:
40
+ google_cse_id = st.text_input("Enter your Google CSE ID key here", type="password")
41
+ max_tokens = st.slider(
42
+ "LLM Max Tokens", min_value=256, max_value=1024, step=8, value=512
43
+ )
44
+
45
+ st.subheader("AGI Setup")
46
+ objective = st.text_input("Objective:", value="Solve world hunger")
47
+ initial_task = st.text_input("Initial Task:", value="Create a list of tasks")
48
+ max_iterations = st.slider("Iterations until pause", value=1, min_value=1, max_value=10, step=1)
49
+
50
+ if st.button('Initialize?'):
51
+ os.environ['OPENAI_API_KEY'] = openai_api_key
52
+ os.environ['GOOGLE_API_KEY'] = google_api_key
53
+ os.environ['GOOGLE_CSE_ID'] = google_cse_id
54
+ if llm_name == "text-davinci-003":
55
+ llm = OpenAI(
56
+ temperature=model_temperature, model_name=llm_name, max_tokens=max_tokens
57
+ )
58
+ else:
59
+ llm= ChatOpenAI(
60
+ temperature=model_temperature, model_name=llm_name, max_tokens=max_tokens
61
+ )
62
+
63
+ service_context = ServiceContext.from_defaults(
64
+ llm_predictor=LLMPredictor(llm=llm), chunk_size_limit=512
65
+ )
66
+
67
+ st.session_state['task_manager'] = LlamaTaskManager(
68
+ [initial_task], task_service_context=service_context
69
+ )
70
+
71
+ from llama_agi.tools import search_notes, record_note, search_webpage
72
+ tools = load_tools(["google-search-results-json"])
73
+ tools = tools + [search_notes, record_note, search_webpage]
74
+ st.session_state['execution_agent'] = ToolExecutionAgent(llm=llm, tools=tools)
75
+
76
+ st.session_state['initial_task'] = initial_task
77
+ st.session_state['objective'] = objective
78
+
79
+ st.session_state['init'] = True
80
+ st.experimental_rerun()
81
+
82
+ with launch_tab:
83
+ st.subheader("AGI Status")
84
+ if st.button(f"Continue for {max_iterations} Steps"):
85
+ if st.session_state.get('init', False):
86
+ # launch the auto runner
87
+ with st.spinner("Running!"):
88
+ runner = AutoStreamlitAGIRunner(st.session_state['task_manager'], st.session_state['execution_agent'])
89
+ runner.run(st.session_state['objective'], st.session_state['initial_task'], 2, max_iterations=max_iterations)
90
+
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ google-api-python-client
2
+ llama-agi==0.1.1