acecalisto3 commited on
Commit
bd89f4a
1 Parent(s): 80dbb6b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -212
app.py CHANGED
@@ -1,213 +1,34 @@
1
- # config.py
2
- import os
3
- from huggingface_hub import InferenceClient
4
- from transformers import pipeline
5
-
6
- # Initialize clients and models
7
- MIXTRAL_CLIENT = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
8
- LLAMA_PIPELINE = pipeline("text-generation", model="bartowski/Llama-3-8B-Instruct-Coder-GGUF")
9
-
10
- AGENTS = [
11
- "WEB_DEV",
12
- "AI_SYSTEM_PROMPT",
13
- "PYTHON_CODE_DEV",
14
- "CODE_REVIEW_ASSISTANT",
15
- "CONTENT_WRITER_EDITOR",
16
- "QUESTION_GENERATOR",
17
- "HUGGINGFACE_FILE_DEV",
18
- ]
19
-
20
- # ai_agent.py
21
- import random
22
- from typing import List, Dict, Tuple
23
-
24
- class AIAgent:
25
- def __init__(self, name: str, description: str, skills: List[str]):
26
- self.name = name
27
- self.description = description
28
- self.skills = skills
29
-
30
- def create_agent_prompt(self) -> str:
31
- skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
32
- return f"""
33
- As an elite expert developer, my name is {self.name}.
34
- I possess a comprehensive understanding of the following areas:
35
- {skills_str}
36
- I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications.
37
- Please feel free to ask any questions or present any challenges you may encounter.
38
- """
39
-
40
- def autonomous_build(self, chat_history: List[Tuple[str, str]], workspace_projects: Dict[str, Dict]) -> Tuple[str, str]:
41
- summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
42
- summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
43
- next_step = "Based on the current state, the next logical step is to implement the main application logic."
44
- return summary, next_step
45
-
46
- # utils.py
47
- import os
48
- import subprocess
49
- from typing import List, Tuple
50
-
51
- def format_prompt(message: str, history: List[Tuple[str, str]]) -> str:
52
- prompt = "<s>"
53
- for user_prompt, bot_response in history:
54
- prompt += f"[INST] {user_prompt} [/INST] {bot_response}</s> "
55
- prompt += f"[INST] {message} [/INST]"
56
- return prompt
57
-
58
- def generate(prompt: str, history: List[Tuple[str, str]], agent_name: str = AGENTS[0], sys_prompt: str = "",
59
- temperature: float = 0.9, max_new_tokens: int = 256, top_p: float = 0.95, repetition_penalty: float = 1.0):
60
- seed = random.randint(1, 1111111111111111)
61
- generate_kwargs = dict(
62
- temperature=temperature,
63
- max_new_tokens=max_new_tokens,
64
- top_p=top_p,
65
- repetition_penalty=repetition_penalty,
66
- do_sample=True,
67
- seed=seed,
68
- )
69
- formatted_prompt = format_prompt(f"{sys_prompt}, {prompt}", history)
70
- stream = MIXTRAL_CLIENT.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
71
- output = ""
72
- for response in stream:
73
- output += response.token.text
74
- yield output
75
- return output
76
-
77
- def terminal_interface(command: str, project_name: str) -> str:
78
- try:
79
- result = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=project_name)
80
- return result.stdout if result.returncode == 0 else result.stderr
81
- except Exception as e:
82
- return str(e)
83
-
84
- def add_code_to_workspace(project_name: str, code: str, file_name: str) -> str:
85
- project_path = os.path.join(os.getcwd(), project_name)
86
- os.makedirs(project_path, exist_ok=True)
87
- file_path = os.path.join(project_path, file_name)
88
- with open(file_path, 'w') as file:
89
- file.write(code)
90
- return f"Added {file_name} to {project_name}"
91
-
92
- # main.py
93
  import streamlit as st
94
- import gradio as gr
95
- from config import AGENTS
96
- from ai_agent import AIAgent
97
- from utils import generate, terminal_interface, add_code_to_workspace
98
-
99
- # Streamlit UI
100
- def main():
101
- st.title("DevToolKit: AI-Powered Development Environment")
102
-
103
- # Project Management
104
- st.header("Project Management")
105
- project_name = st.text_input("Enter project name:")
106
- if st.button("Create Project"):
107
- if project_name not in st.session_state.workspace_projects:
108
- st.session_state.workspace_projects[project_name] = {'files': []}
109
- st.success(f"Created project: {project_name}")
110
- else:
111
- st.warning(f"Project {project_name} already exists")
112
-
113
- # Code Addition
114
- st.subheader("Add Code to Workspace")
115
- code_to_add = st.text_area("Enter code to add to workspace:")
116
- file_name = st.text_input("Enter file name (e.g. 'app.py'):")
117
- if st.button("Add Code"):
118
- add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
119
- st.success(add_code_status)
120
-
121
- # Terminal Interface
122
- st.subheader("Terminal (Workspace Context)")
123
- terminal_input = st.text_input("Enter a command within the workspace:")
124
- if st.button("Run Command"):
125
- terminal_output = terminal_interface(terminal_input, project_name)
126
- st.code(terminal_output, language="bash")
127
-
128
- # Chat Interface
129
- st.subheader("Chat with DevToolKit for Guidance")
130
- chat_input = st.text_area("Enter your message for guidance:")
131
- if st.button("Get Guidance"):
132
- chat_response = next(generate(chat_input, st.session_state.chat_history))
133
- st.session_state.chat_history.append((chat_input, chat_response))
134
- st.write(f"DevToolKit: {chat_response}")
135
-
136
- # Display Chat History
137
- st.subheader("Chat History")
138
- for user_input, response in st.session_state.chat_history:
139
- st.write(f"User: {user_input}")
140
- st.write(f"DevToolKit: {response}")
141
-
142
- # Display Terminal History
143
- st.subheader("Terminal History")
144
- for command, output in st.session_state.terminal_history:
145
- st.write(f"Command: {command}")
146
- st.code(output, language="bash")
147
-
148
- # Display Projects and Files
149
- st.subheader("Workspace Projects")
150
- for project, details in st.session_state.workspace_projects.items():
151
- st.write(f"Project: {project}")
152
- for file in details['files']:
153
- st.write(f" - {file}")
154
-
155
- # Chat with AI Agents
156
- st.subheader("Chat with AI Agents")
157
- selected_agent = st.selectbox("Select an AI agent", AGENTS)
158
- agent_chat_input = st.text_area("Enter your message for the agent:")
159
- if st.button("Send to Agent"):
160
- agent_chat_response = next(generate(agent_chat_input, st.session_state.chat_history, agent_name=selected_agent))
161
- st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
162
- st.write(f"{selected_agent}: {agent_chat_response}")
163
-
164
- # Automate Build Process
165
- st.subheader("Automate Build Process")
166
- if st.button("Automate"):
167
- agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now
168
- summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects)
169
- st.write("Autonomous Build Summary:")
170
- st.write(summary)
171
- st.write("Next Step:")
172
- st.write(next_step)
173
-
174
- # Display current state for debugging
175
- st.sidebar.subheader("Current State")
176
- st.sidebar.json(st.session_state.current_state)
177
-
178
- if __name__ == "__main__":
179
- main()
180
-
181
- # gradio_interface.py
182
- import gradio as gr
183
- from config import AGENTS
184
- from utils import generate
185
-
186
- def create_gradio_interface():
187
- additional_inputs = [
188
- gr.Dropdown(label="Agents", choices=[s for s in AGENTS], value=AGENTS[0], interactive=True),
189
- gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
190
- gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
191
- gr.Slider(label="Max new tokens", value=1048*10, minimum=0, maximum=1000*10, step=64, interactive=True, info="The maximum numbers of new tokens"),
192
- gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
193
- gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens"),
194
- ]
195
-
196
- examples = [
197
- ["Create a simple web application using Flask", AGENTS[0], None, None, None, None],
198
- ["Generate a Python script to perform a linear regression analysis", AGENTS[2], None, None, None, None],
199
- ["Create a Dockerfile for a Node.js application", AGENTS[1], None, None, None, None],
200
- ]
201
-
202
- return gr.ChatInterface(
203
- fn=generate,
204
- chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
205
- additional_inputs=additional_inputs,
206
- title="DevToolKit AI Assistant",
207
- examples=examples,
208
- concurrency_limit=20,
209
- )
210
-
211
- if __name__ == "__main__":
212
- interface = create_gradio_interface()
213
- interface.launch(show_api=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from git import Repo
3
+ from git_monitor import GitMonitor
4
+ from huggingface_models import HuggingFaceModels
5
+
6
+ # Initialize GitHub and Hugging Face modules
7
+ github_monitor = GitMonitor()
8
+ huggingface_models = HuggingFaceModels()
9
+
10
+ # Title and sidebar
11
+ st.title('GitHub-HF Monitor')
12
+ st.sidebar('Select a repository')
13
+
14
+ # Repository selection
15
+ selected_repo = st.sidebar.selectbox('', ['enricoros/big-agi', 'Ig0tU/miagiii'])
16
+
17
+ # Repository monitoring
18
+ if st.button('Monitor'):
19
+ if selected_repo == 'enricoros/big-agi':
20
+ issues = github_monitor.get_issues(selected_repo)
21
+ for issue in issues:
22
+ st.write(f"Issue {issue.number}: {issue.title}")
23
+ st.write(issue.body)
24
+ # Replicate and resolve issues
25
+ if st.button('Replicate & Resolve'):
26
+ github_monitor.clone_repo(selected_repo)
27
+ github_monitor.replicate_issue(issue)
28
+ code_changes = huggingface_models.resolve_issue(issue)
29
+ github_monitor.apply_code_changes(code_changes)
30
+ github_monitor.push_changes()
31
+ github_monitor.create_pull_request(selected_repo)
32
+ st.write('Issue resolved and pull request created!')
33
+ elif selected_repo == 'Ig0tU/miagiii':
34
+ st.write('Monitoring the Ig0tU/miagiii repository. No issues to display.')