Spaces:
Running
Running
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import PIL.Image | |
import io | |
import base64 | |
client = InferenceClient( | |
model="Kwai-Kolors/Kolors-Virtual-Try-On" | |
) | |
def virtual_try_on(person_image, garment_image): | |
""" | |
Process the virtual try-on request | |
Args: | |
person_image: PIL Image of the person | |
garment_image: PIL Image of the garment | |
Returns: | |
PIL Image of the result | |
""" | |
try: | |
# Convert images to base64 | |
person_bytes = io.BytesIO() | |
garment_bytes = io.BytesIO() | |
person_image.save(person_bytes, format='PNG') | |
garment_image.save(garment_bytes, format='PNG') | |
person_base64 = base64.b64encode(person_bytes.getvalue()).decode('utf-8') | |
garment_base64 = base64.b64encode(garment_bytes.getvalue()).decode('utf-8') | |
# Make API request | |
response = client.post( | |
json={ | |
"inputs": [ | |
{"image": person_base64}, | |
{"image": garment_base64} | |
] | |
} | |
) | |
# Eğer response bytes ise doğrudan kullan, değilse base64'ten decode et | |
if isinstance(response, bytes): | |
result_bytes = response | |
else: | |
result_bytes = base64.b64decode(response) | |
# Convert response to image | |
result_image = PIL.Image.open(io.BytesIO(result_bytes)) | |
return result_image, "Success" | |
except Exception as e: | |
return None, f"Error: {str(e)}" | |
# Create Gradio interface | |
demo = gr.Interface( | |
fn=virtual_try_on, | |
inputs=[ | |
gr.Image(type="pil", label="Person Image"), | |
gr.Image(type="pil", label="Garment Image") | |
], | |
outputs=[ | |
gr.Image(type="pil", label="Result"), | |
gr.Text(label="Status") | |
], | |
title="Virtual Try-On API", | |
description="Upload a person image and a garment image to see how the garment would look on the person." | |
) | |
if __name__ == "__main__": | |
demo.launch() |