MakcukBobrov commited on
Commit
211451d
·
verified ·
1 Parent(s): 4affb23

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -41
app.py CHANGED
@@ -1,49 +1,87 @@
1
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import gradio as gr
3
- import requests
4
-
5
- ollama_url = '0.0.0.0'
6
-
7
- console_output = []
8
-
9
- def call_ollama_api(prompt):
10
- try:
11
- response = requests.post(
12
- f'{ollama_url}/generate',
13
- json={"prompt": prompt}
14
- )
15
- response.raise_for_status() # Raise an exception for HTTP errors
16
- result = response.json()
17
- output = result.get('text', '')
18
- console_output.append(f"Prompt: {prompt}\nResponse: {output}")
19
- return output
20
- except requests.exceptions.RequestException as e:
21
- error_message = f"Error: {e}"
22
- console_output.append(error_message)
23
- return error_message
24
-
25
- def chat(prompt):
26
- output = call_ollama_api(prompt)
27
- return output
28
-
29
- def show_console_output():
30
- return "\n\n".join(console_output)
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  iface = gr.Interface(
33
- fn=chat,
34
  inputs="text",
35
  outputs="text",
36
- title="Ollama Chat",
37
- description="Chat with Ollama API and see the responses."
38
- )
39
-
40
- console_iface = gr.Interface(
41
- fn=show_console_output,
42
- inputs=None,
43
- outputs="text",
44
- title="Console Output",
45
- description="See the console outputs here."
46
  )
47
 
48
- # Launch a tabbed interface
49
- gr.TabbedInterface([iface, console_iface], ["Chat", "Console"]).launch()
 
 
1
  import os
2
+ import threading
3
+ import time
4
+ import subprocess
5
+
6
+ print("Expanding user path for Ollama")
7
+ OLLAMA = os.path.expanduser("~/ollama")
8
+
9
+ print("Checking if Ollama exists at the path")
10
+ if not os.path.exists(OLLAMA):
11
+ print("Ollama not found, downloading it")
12
+ subprocess.run("curl -L https://ollama.com/download/ollama-linux-amd64 -o ~/ollama", shell=True)
13
+ os.chmod(OLLAMA, 0o755)
14
+
15
+ def ollama_service_thread():
16
+ print("Starting Ollama service thread")
17
+ subprocess.run("~/ollama serve", shell=True)
18
+
19
+ print("Creating and starting Ollama service thread")
20
+ OLLAMA_SERVICE_THREAD = threading.Thread(target=ollama_service_thread)
21
+ OLLAMA_SERVICE_THREAD.start()
22
+
23
+ print("Giving Ollama serve a moment to start")
24
+ time.sleep(10)
25
+
26
+ print("Setting model to 'gemma2'")
27
+ model = "gemma2"
28
+
29
+ print(f"Pulling model {model}")
30
+ subprocess.run(f"~/ollama pull {model}", shell=True)
31
+
32
+ ################################################
33
+ ################################################
34
+ import copy
35
  import gradio as gr
36
+ from ollama import Client
37
+
38
+ print("Initializing Ollama client")
39
+ client = Client(host='http://localhost:11434', timeout=120)
40
+
41
+ print("Getting Hugging Face token and model ID from environment variables")
42
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
43
+ MODEL_ID = os.environ.get("MODEL_ID", "google/gemma-2-9b-it")
44
+ MODEL_NAME = MODEL_ID.split("/")[-1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
+ print("Setting up title and description for Gradio interface")
47
+ TITLE = "<h1><center>ollama-Chat</center></h1>"
48
+ DESCRIPTION = f"""
49
+ <h3>MODEL: <a href="https://hf.co/{MODEL_ID}">{MODEL_NAME}</a></h3>
50
+ <p>Running on Ollama backend.</p>
51
+ """
52
+
53
+
54
+ CSS = """
55
+ .duplicate-button {
56
+ margin: auto !important;
57
+ color: white !important;
58
+ background: black !important;
59
+ border-radius: 100vh !important;
60
+ }
61
+ h3 {
62
+ text-align: center;
63
+ }
64
+ """
65
+ import gradio as gr
66
+ from llama_index.llms.ollama import Ollama
67
+
68
+ # Initialize the Ollama model
69
+ llm = Ollama(model="llama3", request_timeout=120.0)
70
+
71
+ # Define the function to get the response from Ollama
72
+ def get_response(question):
73
+ resp = llm.complete(question)
74
+ return resp
75
+
76
+ # Create the Gradio interface
77
  iface = gr.Interface(
78
+ fn=get_response,
79
  inputs="text",
80
  outputs="text",
81
+ title="Ask Paul Graham",
82
+ description="Enter a question to learn more about Paul Graham."
 
 
 
 
 
 
 
 
83
  )
84
 
85
+ # Launch the Gradio app
86
+ if __name__ == "__main__":
87
+ iface.launch()