test / setup_llama.py
Rejekts's picture
Upload 2 files
51cca30 verified
raw
history blame
2.28 kB
import os
import subprocess
import time
import requests
os.environ["OLLAMA_HOST"] = "0.0.0.0"
os.environ["NGROK_AUTHTOKEN"] = None
def run_command(command, check=True):
"""Run a shell command and handle output/errors."""
try:
subprocess.run(command, shell=True, check=check)
except subprocess.CalledProcessError as e:
print(f"Error running command: {command}\n{e}")
exit(1)
def download_model():
"""Download the Llama model from HuggingFace."""
model_url = "https://huggingface.co/Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2-GGUF/resolve/main/Llama-3.1-8B-Lexi-Uncensored_V2_Q4.gguf"
print(f"Downloading model from {model_url}...")
run_command(f"wget {model_url} -O llama.gguf")
def start_ollama_service():
"""Start Ollama serve in the background."""
print("Starting Ollama service...")
run_command("ollama serve &")
while True:
try:
response = requests.get("http://localhost:11434")
if response.status_code == 200:
break
except requests.ConnectionError:
time.sleep(2)
def create_model():
"""Create the model using Ollama."""
print("Creating the model in Ollama...")
run_command("ollama create llama -f Modelfile")
def setup_ngrok():
run_command("pip install pyngrok")
ngrok_authtoken = os.getenv("NGROK_AUTHTOKEN")
if not ngrok_authtoken:
print("Error: NGROK_AUTHTOKEN secret not found.")
exit(1)
run_command(f"./ngrok authtoken {ngrok_authtoken}")
run_command("ngrok http 11434 &")
# Wait for the public URL from ngrok
public_url = None
while not public_url:
try:
tunnels_response = requests.get("http://127.0.0.1:4040/api/tunnels")
public_url = tunnels_response.json()['tunnels'][0]['public_url']
except (requests.ConnectionError, KeyError, IndexError):
time.sleep(2)
print(f"Ngrok public URL: {public_url}")
return public_url
if __name__ == "__main__":
run_command("pip install ollama")
download_model()
create_model()
start_ollama_service()
setup_ngrok()
print("Ollama service is running and accessible through ngrok.")