Spaces:
Runtime error
Runtime error
File size: 3,661 Bytes
0b9e99a c77361d 0b9e99a f6b3153 0b9e99a f6b3153 0b9e99a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
import base64
import gradio as gr
import requests
def encode_image(image_file):
with open(image_file.name, "rb") as img_file:
return base64.b64encode(img_file.read()).decode('utf-8')
def send_to_openai(api_key, image_file):
base64_image = encode_image(image_file)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
payload = {
"model": "gpt-4-vision-preview",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "Answer in only one of the following options - Leaf , Sheath , Question - You are given a picture of Rice Paddy which part of the Paddy Crop is visible "
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
}
}
]
}
],
"max_tokens": 300
}
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
# Extract words from the assistant's response
assistant_response = response.json()['choices'][0]['message']['content']
words = assistant_response.split('\n')
checkresponse_lower = words.lower()
if "leaf" in checkresponse_lower:
payload = {
"model": "gpt-4-vision-preview",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "Answer in three words only, does the image uploaded have a healthy rice leaf - Yes or No , does the image uploaded have a rice leaf with Major (not small) circular spots - Yes or No , does the image uploaded have a rice leaf have a major yellowish discoloration in some areas (ignore spots) - Yes or No , DO NOT RESPOND IN MORE THAN THREE WORDS "
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
}
}
]
}
],
"max_tokens": 300
}
elif "sheath" in checkresponse_lower:
payload = {
"model": "gpt-4-vision-preview",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "ANSWER IN ONLY ONE WORD , does the sheath part of the paddy in the image have sheath rot "
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
}
}
]
}
],
"max_tokens": 300
}
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
assistant_response = response.json()['choices'][0]['message']['content']
recognition = assistant_response.split('\n')
return ' '.join(words), ' '.join(recognition)
iface = gr.Interface(send_to_openai, ["text", "file"], ["text", "text"])
iface.launch()
|