File size: 3,628 Bytes
228a778
74fb5a4
 
 
 
 
 
 
 
 
 
 
 
 
785de28
1994e95
785de28
 
1994e95
785de28
 
 
 
 
 
1994e95
 
 
 
785de28
1994e95
785de28
 
 
 
 
 
 
 
 
 
1994e95
785de28
 
 
e8710db
1994e95
74fb5a4
 
785de28
 
 
 
 
 
 
 
 
 
 
 
 
 
e8710db
74fb5a4
 
785de28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74fb5a4
 
 
785de28
 
 
 
 
 
 
 
 
 
 
 
 
 
74fb5a4
 
 
e8710db
74fb5a4
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import torch
import cv2
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from transformers import pipeline
import gradio as gr
from sklearn.cluster import KMeans

# Emotion detection pipeline for text (if any text is included in assets)
emotion_classifier = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True)

# Function to analyze colors in an image
def analyze_colors(image):
    try:
        # Convert image to RGB if not already
        if image.mode != "RGB":
            image = image.convert("RGB")
        # Resize the image for faster processing (small but still sufficient for color analysis)
        image = image.resize((150, 150))
        # Convert to numpy array
        img_array = np.array(image)
        # Reshape image to be a list of pixels
        pixels = img_array.reshape((-1, 3))

        # Check if there are enough pixels to perform KMeans
        if len(pixels) < 5:
            return "Image has too few pixels for analysis"

        # Use KMeans to find the dominant colors
        kmeans = KMeans(n_clusters=5, random_state=0)
        kmeans.fit(pixels)
        dominant_colors = kmeans.cluster_centers_

        # Plot the colors for visualization
        plt.figure(figsize=(8, 6))
        plt.imshow([dominant_colors.astype(int)])
        plt.axis('off')
        plt.show()

        return dominant_colors

    except Exception as e:
        print(f"Error in analyze_colors: {e}")
        return None


# Function to detect emotions from colors (simplified emotion-color mapping)
def color_emotion_analysis(dominant_colors):
    try:
        emotions = []
        for color in dominant_colors:
            # Simple logic: darker tones could indicate sadness
            if np.mean(color) < 85:
                emotions.append("Sadness")
            elif np.mean(color) > 170:
                emotions.append("Happiness")
            else:
                emotions.append("Neutral")
        return emotions
    except Exception as e:
        print(f"Error in color_emotion_analysis: {e}")
        return ["Error analyzing emotions"]

# Function to analyze patterns and shapes using OpenCV
def analyze_patterns(image):
    try:
        # Convert to grayscale for edge detection
        gray_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
        edges = cv2.Canny(gray_image, 100, 200)

        # Calculate the number of edges (chaos metric)
        num_edges = np.sum(edges > 0)

        if num_edges > 10000:  # Arbitrary threshold for "chaos"
            return "Chaotic patterns - possibly distress"
        else:
            return "Orderly patterns - possibly calm"
    except Exception as e:
        print(f"Error in analyze_patterns: {e}")
        return "Error analyzing patterns"

# Main function to process image and analyze emotional expression
def analyze_emotion_from_image(image):
    try:
        # Analyze colors
        dominant_colors = analyze_colors(image)
        if dominant_colors is None:
            return "Error analyzing colors"
        
        color_emotions = color_emotion_analysis(dominant_colors)

        # Analyze patterns
        pattern_analysis = analyze_patterns(image)

        return f"Color-based emotions: {color_emotions}\nPattern analysis: {pattern_analysis}"
    except Exception as e:
        return f"Error processing image: {str(e)}"

# Gradio interface to upload image files and perform analysis
iface = gr.Interface(fn=analyze_emotion_from_image, inputs="image", outputs="text")

# Launch the interface
if __name__ == "__main__":
    iface.launch()