Hang Nguyen
commited on
Commit
•
a16a609
1
Parent(s):
b43dd91
recipe
Browse files- app.py +76 -3
- requirements.txt +5 -0
app.py
CHANGED
@@ -1,4 +1,77 @@
|
|
1 |
import streamlit as st
|
2 |
-
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from audiorecorder import audiorecorder
|
3 |
+
import os
|
4 |
+
import openai
|
5 |
+
import requests
|
6 |
+
import re
|
7 |
+
def create_recipe(path):
|
8 |
+
import whisper
|
9 |
+
# small model for transcription
|
10 |
+
model = whisper.load_model("base")
|
11 |
+
result = model.transcribe("audio.wav")
|
12 |
+
# print(result["text"])
|
13 |
+
openai.api_type = "azure"
|
14 |
+
openai.api_version = "2023-05-15"
|
15 |
+
openai.api_base = "https://futurice-data-day-2023.openai.azure.com/"
|
16 |
+
openai.api_key = "d2cb77316cee4feb9d96d70ed77ef27d"
|
17 |
+
response = openai.ChatCompletion.create(
|
18 |
+
engine="gpt-35-16k", # engine options = ["gpt-35-16k", "gpt-4", "gpt-4-32k"]
|
19 |
+
messages=[
|
20 |
+
{"role": "system", "content": "You are a Recipe generator."},
|
21 |
+
{"role": "user", "content": result["text"]},
|
22 |
+
{"role": "assistant", "content": "Create a recipe based on user's dietary restriction and personalize it. Write the recipe name after word 'Recipe:' and then give the ingredients after word 'Ingredients:' and instruction after word 'Instruction:'"},
|
23 |
+
],
|
24 |
+
)
|
25 |
+
# print(response)
|
26 |
+
# print(response["choices"][0]["message"]["content"])
|
27 |
+
recipe_text = response["choices"][0]["message"]["content"]
|
28 |
+
return recipe_text
|
29 |
+
def create_recipe_image(recipe_text):
|
30 |
+
openai.api_base = "https://futurice-data-day-2023.openai.azure.com/"
|
31 |
+
openai.api_key = "d2cb77316cee4feb9d96d70ed77ef27d"
|
32 |
+
# Assign the API version (DALL-E is currently supported for the 2023-06-01-preview API version only)
|
33 |
+
openai.api_version = "2023-06-01-preview"
|
34 |
+
openai.api_type = "azure"
|
35 |
+
pattern = re.compile(r'Recipe:(.*?)Ingredients:', re.DOTALL)
|
36 |
+
# Use re.search() to find the matching portion of the text.
|
37 |
+
match = pattern.search(recipe_text)
|
38 |
+
if match:
|
39 |
+
extracted_text = match.group(1).strip()
|
40 |
+
generation_response = openai.Image.create(
|
41 |
+
prompt=extracted_text, size="1024x1024", n=2 # Enter your prompt text here
|
42 |
+
)
|
43 |
+
# Set the directory for the stored image
|
44 |
+
image_dir = os.path.join(os.curdir, "images")
|
45 |
+
# If the directory doesn't exist, create it
|
46 |
+
if not os.path.isdir(image_dir):
|
47 |
+
os.mkdir(image_dir)
|
48 |
+
# Initialize the image path (note the filetype should be png)
|
49 |
+
image_path = os.path.join(image_dir, "generated_image.png")
|
50 |
+
# # Retrieve the generated image
|
51 |
+
image_url = generation_response["data"][0]["url"] # extract image URL from response
|
52 |
+
generated_image = requests.get(image_url).content # download the image
|
53 |
+
with open(image_path, "wb") as image_file:
|
54 |
+
image_file.write(generated_image)
|
55 |
+
from PIL import Image
|
56 |
+
image = Image.open(image_path)
|
57 |
+
st.image(image, caption='Recipe')
|
58 |
+
st.write(recipe_text)
|
59 |
+
st.markdown(""" <style> .font {
|
60 |
+
font-size:50px ; font-family: 'Cooper Black'; color: #FF9633;}
|
61 |
+
</style> """, unsafe_allow_html=True)
|
62 |
+
st.markdown('<p class="font">Cooking with a Dash of AI: Recipe Generator Delivers Delicious Delights!</p>', unsafe_allow_html=True)
|
63 |
+
# st.title("Cooking with a Dash of AI: Recipe Generator Delivers Delicious Delights!")
|
64 |
+
st.markdown(""" <style> .font2 {
|
65 |
+
font-size:30px ; font-family: 'Cooper Black'; color: #000000;}
|
66 |
+
</style> """, unsafe_allow_html=True)
|
67 |
+
st.markdown('<p class="font2">Could you please share your favorite dish and any dietary constraints you have?</p>', unsafe_allow_html=True)
|
68 |
+
audio = audiorecorder("Click to record your voice", "Click to stop recording")
|
69 |
+
if len(audio) > 0:
|
70 |
+
# To play audio in frontend:
|
71 |
+
# st.audio(audio.export().read())
|
72 |
+
# # To save audio to a file, use pydub export method:
|
73 |
+
audio.export("audio.wav", format="wav")
|
74 |
+
recipe_text = create_recipe("audio.wav")
|
75 |
+
create_recipe_image(recipe_text)
|
76 |
+
# To get audio properties, use pydub AudioSegment properties:
|
77 |
+
# st.write(f"Frame rate: {audio.frame_rate}, Frame width: {audio.frame_width}, Duration: {audio.duration_seconds} seconds")
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit-audiorecorder
|
2 |
+
openai
|
3 |
+
requests
|
4 |
+
pillow
|
5 |
+
openai-whisper
|