import gradio as gr from huggingface_hub import InferenceClient from PIL import Image, ImageEnhance import torch import os import numpy as np from torch.autograd import Variable from torchvision import transforms import torch.nn.functional as F # --- Model 1: AI Chatbot Setup --- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") # Personalities for AI Chatbot PERSONALITIES = { "Friendly": "You are a friendly and helpful assistant.", "Professional": "You are a professional and concise assistant.", "Humorous": "You are a witty and humorous assistant.", "Empathetic": "You are a compassionate and empathetic assistant." } # Chatbot Functions def respond(message, history, personality): system_message = PERSONALITIES[personality] messages = [{"role": "system", "content": system_message}] for user_message, bot_message in history: messages.append({"role": "user", "content": user_message}) messages.append({"role": "assistant", "content": bot_message}) messages.append({"role": "user", "content": message}) response = client.chat_completion(messages, max_tokens=1024) bot_message = response["choices"][0]["message"]["content"] history.append((message, bot_message)) return history, "" def generate_fun_fact(history): message = "Give me a fun fact." system_message = "You are a helpful assistant that shares fun facts when asked." messages = [{"role": "system", "content": system_message}] for user_message, bot_message in history: messages.append({"role": "user", "content": user_message}) messages.append({"role": "assistant", "content": bot_message}) messages.append({"role": "user", "content": message}) response = client.chat_completion(messages, max_tokens=256) fun_fact = response["choices"][0]["message"]["content"] history.append((message, fun_fact)) return history def generate_daily_challenge(history): message = "Give me a daily challenge." system_message = "You are a helpful assistant that gives fun or motivational daily challenges." messages = [{"role": "system", "content": system_message}] for user_message, bot_message in history: messages.append({"role": "user", "content": user_message}) messages.append({"role": "assistant", "content": bot_message}) messages.append({"role": "user", "content": message}) response = client.chat_completion(messages, max_tokens=256) challenge = response["choices"][0]["message"]["content"] history.append((message, challenge)) return history def generate_inspiration(history): message = "Give me an inspirational quote or motivational message." system_message = "You are a helpful assistant that provides inspiring or motivational quotes when asked." messages = [{"role": "system", "content": system_message}] for user_message, bot_message in history: messages.append({"role": "user", "content": user_message}) messages.append({"role": "assistant", "content": bot_message}) messages.append({"role": "user", "content": message}) response = client.chat_completion(messages, max_tokens=256) inspiration = response["choices"][0]["message"]["content"] history.append((message, inspiration)) return history def clear_conversation(): return [], "" ####### os.system("git clone https://github.com/xuebinqin/DIS") os.system("mv DIS/IS-Net/* .") from data_loader_cache import normalize, im_reader, im_preprocess from models import * device = 'cuda' if torch.cuda.is_available() else 'cpu' if not os.path.exists("saved_models"): os.mkdir("saved_models") os.system("mv isnet.pth saved_models/") class GOSNormalize(object): def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]): self.mean = mean self.std = std def __call__(self, image): image = normalize(image, self.mean, self.std) return image transform = transforms.Compose([GOSNormalize([0.5, 0.5, 0.5], [1.0, 1.0, 1.0])]) def load_image(im_path, hypar): im = im_reader(im_path) im, im_shp = im_preprocess(im, hypar["cache_size"]) im = torch.divide(im, 255.0) shape = torch.from_numpy(np.array(im_shp)) return transform(im).unsqueeze(0), shape.unsqueeze(0) def build_model(hypar, device): net = hypar["model"] if hypar["model_digit"] == "half": net.half() for layer in net.modules(): if isinstance(layer, nn.BatchNorm2d): layer.float() net.to(device) if hypar["restore_model"] != "": net.load_state_dict(torch.load(hypar["model_path"] + "/" + hypar["restore_model"], map_location=device)) net.eval() return net def predict(net, inputs_val, shapes_val, hypar, device): net.eval() if hypar["model_digit"] == "full": inputs_val = inputs_val.type(torch.FloatTensor) else: inputs_val = inputs_val.type(torch.HalfTensor) inputs_val_v = Variable(inputs_val, requires_grad=False).to(device) ds_val = net(inputs_val_v)[0] pred_val = ds_val[0][0, :, :, :] pred_val = torch.squeeze(F.upsample(torch.unsqueeze(pred_val, 0), (shapes_val[0][0], shapes_val[0][1]), mode='bilinear')) ma = torch.max(pred_val) mi = torch.min(pred_val) pred_val = (pred_val - mi) / (ma - mi) if device == 'cuda': torch.cuda.empty_cache() return (pred_val.detach().cpu().numpy() * 255).astype(np.uint8) hypar = {} hypar["model_path"] = "./saved_models" hypar["restore_model"] = "isnet.pth" hypar["interm_sup"] = False hypar["model_digit"] = "full" hypar["seed"] = 0 hypar["cache_size"] = [1024, 1024] hypar["input_size"] = [1024, 1024] hypar["crop_size"] = [1024, 1024] hypar["model"] = ISNetDIS() net = build_model(hypar, device) def inference(image): image_path = image image_tensor, orig_size = load_image(image_path, hypar) mask = predict(net, image_tensor, orig_size, hypar, device) pil_mask = Image.fromarray(mask).convert('L') im_rgb = Image.open(image).convert("RGB") im_rgba = im_rgb.copy() im_rgba.putalpha(pil_mask) return [im_rgba, pil_mask] # Functions Added From Team def rotate_image(image, degrees): img = Image.open(image).rotate(degrees) return img def resize_image(image, width, height): img = Image.open(image).resize((width, height)) return img def convert_to_grayscale(image): img = Image.open(image).convert('L') return img def adjust_brightness(image, brightness_factor): img = Image.open(image) enhancer = ImageEnhance.Brightness(img) img_enhanced = enhancer.enhance(brightness_factor) return img_enhanced # Custom CSS Added From Team custom_css = """ body { background-color: #f0f0f0; } .gradio-container { max-width: 900px; margin: auto; background-color: #ffffff; padding: 20px; border-radius: 12px; box-shadow: 0px 4px 16px rgba(0, 0, 0, 0.2); } button.lg { background-color: #4CAF50; color: white; border: none; padding: 10px 20px; text-align: center; text-decoration: none; display: inline-block; font-size: 16px; margin: 4px 2px; transition-duration: 0.4s; cursor: pointer; border-radius: 8px; } button.lg:hover { background-color: #45a049; color: white; } """ # Used Some Codes From Yang's Chatbot with gr.Blocks(css=custom_css) as background_remover_interface: gr.Markdown("