import gradio as gr from huggingface_hub import InferenceClient import torch from transformers import pipeline # Inference client setup client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct") pipe = pipeline("text-generation", "microsoft/Phi-3.5-mini-instruct", torch_dtype=torch.bfloat16, device_map="auto") # Global flag to handle cancellation stop_inference = False def respond( message, history: list[tuple[str, str]], system_message="You are a friendly Chatbot. Your job is to assist users in emergencies so reply fast but accuratly.", max_tokens=2048, temperature=0.7, top_p=0.95, use_local_model=False, ): global stop_inference stop_inference = False # Reset cancellation flag # Initialize history if it's None if history is None: history = [] if use_local_model: # local inference messages = [{"role": "system", "content": system_message}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) response = "" for output in pipe( messages, max_new_tokens=max_tokens, temperature=temperature, do_sample=True, top_p=top_p, ): if stop_inference: response = "Inference cancelled." yield history + [(message, response)] return token = output['generated_text'][-1]['content'] response += token yield history + [(message, response)] # Yield history + new response else: # API-based inference messages = [{"role": "system", "content": system_message}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) response = "" for message_chunk in client.chat_completion( messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p, ): if stop_inference: response = "Inference cancelled." yield history + [(message, response)] return if stop_inference: response = "Inference cancelled." break token = message_chunk.choices[0].delta.content response += token yield history + [(message, response)] # Yield history + new response def cancel_inference(): global stop_inference stop_inference = True def clear_conversation(): return None # Custom CSS for an enhanced look custom_css = """ body { font-family: 'Roboto', sans-serif; background-color: #f5f7fa; } #main-container { max-width: 900px; margin: 0 auto; padding: 20px; background: white; box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1); border-radius: 15px; } .gradio-container { margin-top: 20px; } .gr-button { background-color: #4a90e2; color: white; border: none; border-radius: 5px; padding: 10px 20px; cursor: pointer; transition: all 0.3s ease; font-weight: bold; } .gr-button:hover { background-color: #357abd; transform: translateY(-2px); box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); } .gr-button.secondary { background-color: #f0f0f0; color: #333; } .gr-button.secondary:hover { background-color: #e0e0e0; } .gr-button.cancel { background-color: #e74c3c; } .gr-button.cancel:hover { background-color: #c0392b; } .gr-form { border: 1px solid #e0e0e0; padding: 15px; border-radius: 10px; background-color: #f9f9f9; } .gr-box { border-radius: 8px; border: 1px solid #e0e0e0; } .gr-padded { padding: 15px; } .gr-chat { font-size: 16px; border: 1px solid #e0e0e0; border-radius: 10px; overflow: hidden; } .gr-chat .message { padding: 10px 15px; border-bottom: 1px solid #f0f0f0; } .gr-chat .user { background-color: #e8f0fe; } .gr-chat .bot { background-color: #ffffff; } #title { text-align: center; font-size: 2.5em; margin-bottom: 20px; color: #2c3e50; text-shadow: 1px 1px 2px rgba(0,0,0,0.1); } """ # Define the interface with gr.Blocks(css=custom_css) as demo: gr.Markdown("