import torch import cv2 from PIL import Image import numpy as np import matplotlib.pyplot as plt from transformers import pipeline import gradio as gr from sklearn.cluster import KMeans from colorsys import rgb_to_hsv # Emotion detection pipeline for text (if any text is included in assets) emotion_classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True) # Function to analyze colors in an image def analyze_colors(image): try: # Ensure the image is in RGB format if image.mode != "RGB": image = image.convert("RGB") # Resize the image for faster processing image = image.resize((150, 150)) # Convert to numpy array img_array = np.array(image) # Reshape image to be a list of pixels pixels = img_array.reshape((-1, 3)) kmeans = KMeans(n_clusters=5, random_state=0) kmeans.fit(pixels) dominant_colors = kmeans.cluster_centers_ # Plot the colors for visualization plt.figure(figsize=(8, 6)) plt.imshow([dominant_colors.astype(int)]) plt.axis('off') plt.show() return dominant_colors except Exception as e: print(f"Error in analyze_colors: {e}") return None # Function to analyze emotions based on color (hue, brightness, saturation) and stress with weights def color_emotion_analysis(dominant_colors): try: emotions = [] stress_levels = [] # Weight coefficients for each factor brightness_weight = 0.5 hue_weight = 0.3 saturation_weight = 0.2 for color in dominant_colors: # Normalize RGB values to 0-1 range r, g, b = color / 255.0 # Convert RGB to HSV h, s, v = rgb_to_hsv(r, g, b) # Hue, Saturation, Value (brightness) # Calculate weighted emotion and stress levels weighted_brightness = v * brightness_weight weighted_hue = h * hue_weight weighted_saturation = s * saturation_weight # Combine weighted factors score = weighted_brightness + weighted_hue + weighted_saturation # Analyze emotion and stress based on combined score if score < 0.3: # Lower combined score, less rigid "high stress" emotions.append("Sadness") stress_levels.append("Moderate-High Stress") elif 0.3 <= score < 0.5: emotions.append("Neutral") stress_levels.append("Moderate Stress") elif 0.5 <= score < 0.7: emotions.append("Okay") stress_levels.append("Low Stress") elif 0.7 <= score < 0.85: emotions.append("Happiness") stress_levels.append("Very Low Stress") else: emotions.append("Very Happy") stress_levels.append("No Stress") return emotions, stress_levels except Exception as e: print(f"Error in color_emotion_analysis: {e}") return ["Error analyzing emotions"], ["Error analyzing stress levels"] # Function to analyze patterns and shapes using OpenCV def analyze_patterns(image): try: # Convert to grayscale for edge detection gray_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY) edges = cv2.Canny(gray_image, 100, 200) # Calculate the number of edges (chaos metric) num_edges = np.sum(edges > 0) if num_edges > 10000: # Arbitrary threshold for "chaos" return "Chaotic patterns - possibly distress" else: return "Orderly patterns - possibly calm" except Exception as e: print(f"Error in analyze_patterns: {e}") return "Error analyzing patterns" # Main function to process image and analyze emotional expression def analyze_emotion_from_image(image): try: # Ensure the input image is a PIL image if isinstance(image, np.ndarray): image = Image.fromarray(image) # Convert to PIL Image if it's a NumPy array # Analyze colors dominant_colors = analyze_colors(image) if dominant_colors is None: return "Error analyzing colors" color_emotions, stress_levels = color_emotion_analysis(dominant_colors) # Analyze patterns pattern_analysis = analyze_patterns(image) return f"Color-based emotions: {color_emotions}\nStress levels: {stress_levels}\nPattern analysis: {pattern_analysis}" except Exception as e: return f"Error processing image: {str(e)}" # Gradio interface to upload image files and perform analysis iface = gr.Interface(fn=analyze_emotion_from_image, inputs="image", outputs="text") # Launch the interface if __name__ == "__main__": iface.launch()