yatinece commited on
Commit
ff05b59
β€’
1 Parent(s): f89c69c

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +117 -60
app.py CHANGED
@@ -5,29 +5,35 @@ import streamlit as st
5
  import numpy as np
6
  from PIL import Image
7
  import os
8
-
9
- st.sidebar.write("# FashionCLIP Resources")
10
- st.sidebar.write("We have several resources related to FashionCLIP.")
11
- st.sidebar.write("## Documentation")
12
- st.sidebar.write("* πŸ“š [Blog Post](https://towardsdatascience.com/teaching-clip-some-fashion-3005ac3fdcc3)")
13
- st.sidebar.write("* πŸ“š [Paper](https://www.nature.com/articles/s41598-022-23052-9)")
14
-
15
- st.sidebar.write("## Code")
16
- st.sidebar.write("* πŸ“š [Repo](https://github.com/patrickjohncyh/fashion-clip)")
17
- st.sidebar.write("* πŸ“š [Colab](https://colab.research.google.com/drive/1Z1hAxBnWjF76bEi9KQ6CMBBEmI_FVDrW#scrollTo=FzUQGwS1lhGS)")
18
- st.sidebar.write("* πŸ“š [HuggingFace Weights](https://huggingface.co/patrickjohncyh/fashion-clip)")
19
-
20
-
21
- st.write("# FashionCLIP. A Foundation Model for Fashion.")
22
-
23
- st.write("[![Youtube Video](https://img.shields.io/badge/youtube-video-red)](https://www.youtube.com/watch?v=uqRSc-KSA1Y) [![HuggingFace Model](https://img.shields.io/badge/HF%20Model-Weights-yellow)](https://huggingface.co/patrickjohncyh/fashion-clip) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1Z1hAxBnWjF76bEi9KQ6CMBBEmI_FVDrW?usp=sharing) [![Medium Blog Post](https://raw.githubusercontent.com/aleen42/badges/master/src/medium.svg)](https://towardsdatascience.com/teaching-clip-some-fashion-3005ac3fdcc3) [![Open in Streamlit](https://static.streamlit.io/badges/streamlit_badge_black_white.svg)](https://huggingface.co/spaces/vinid/fashion-clip-app)")
24
-
25
-
26
- st.write("This web app uses FashionCLIP, our new foundation model for fashion, to find clothing items based on a query of the item you want to find.")
27
-
28
- st.write("The model is going to find the most similar item to your query, given a list of 5000 items that have been released by Alexey Grigorev [here](https://github.com/alexeygrigorev/clothing-dataset).")
29
- st.write("Note that some queries might not return anything useful. This could be both due to model's limitation or to the fact that the item you are looking for is missing from the collection.")
30
- st.write("You can find more about FashionCLIP on the [repo](https://github.com/patrickjohncyh/fashion-clip) or on our [paper](https://www.nature.com/articles/s41598-022-23052-9)")
 
 
 
 
 
 
31
 
32
  @st.cache_resource
33
  def load_embedding_file():
@@ -43,43 +49,94 @@ fclip = FashionCLIP('fashion-clip')
43
  if not os.path.exists("clothing-dataset"):
44
  subprocess.run("git clone https://github.com/alexeygrigorev/clothing-dataset", shell=True)
45
 
46
- st.write("## Simple FashionCLIP search engine")
47
- query = st.text_input("Enter a description of the clothing item you want to find", "a red dress")
48
-
49
  images, image_embeddings = load_embedding_file()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
- text_embedding = fclip.encode_text([query], 32)[0]
52
-
53
- id_of_matched_object = np.argmax(text_embedding.dot(image_embeddings.T))
54
-
55
- image = Image.open(images[id_of_matched_object])
56
-
57
- st.image(image)
58
-
59
-
60
- st.write("If you use FashionCLIP in your work, please cite our paper:")
61
- st.write("""
62
- ```
63
- @Article{Chia2022,
64
- title="Contrastive language and vision learning of general fashion concepts",
65
- author="Chia, Patrick John
66
- and Attanasio, Giuseppe
67
- and Bianchi, Federico
68
- and Terragni, Silvia
69
- and Magalh{\~a}es, Ana Rita
70
- and Goncalves, Diogo
71
- and Greco, Ciro
72
- and Tagliabue, Jacopo",
73
- journal="Scientific Reports",
74
- year="2022",
75
- month="Nov",
76
- day="08",
77
- volume="12",
78
- number="1",
79
- pages="18958",
80
- issn="2045-2322",
81
- doi="10.1038/s41598-022-23052-9",
82
- url="https://doi.org/10.1038/s41598-022-23052-9"
83
- ```
84
- }""")
85
 
 
5
  import numpy as np
6
  from PIL import Image
7
  import os
8
+ from streamlit_image_select import image_select
9
+ os.environ["CUDA_VISIBLE_DEVICES"] =""
10
+ import torch
11
+ torch.cuda.is_available = lambda : False
12
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
13
+ st.sidebar.write("# Shoping muse")
14
+
15
+ #query = st.sidebar.text_input("Enter some text", "A red dress")
16
+ #prompt = st.chat_input("Say something")
17
+ st.write("Shoping MUSE")
18
+
19
+ def horizontal_scroll_images(images):
20
+ with st.beta_container():
21
+ for img_path in images:
22
+ st.image(img_path, use_column_width=True)
23
+
24
+ def horizontal_scroll_images(images,image_width=300):
25
+ cols = st.columns(len(images))
26
+ for col, img_path in zip(cols, images):
27
+
28
+ col.image(img_path, use_column_width=True)
29
+
30
+ #def horizontal_scroll_images(images, image_width=300):
31
+ # cols = st.columns(len(images))
32
+ # for col, img_path in zip(cols, images):
33
+ # col.image(img_path, width=image_width)
34
+
35
+
36
+ new_size = (800, 600) # Set your desired width and height
37
 
38
  @st.cache_resource
39
  def load_embedding_file():
 
49
  if not os.path.exists("clothing-dataset"):
50
  subprocess.run("git clone https://github.com/alexeygrigorev/clothing-dataset", shell=True)
51
 
52
+ #st.write("## Simple FashionCLIP search engine")
53
+ #query = st.text_input("Enter a description of the clothing item you want to find", "a red dress")
54
+ #query = prompt
55
  images, image_embeddings = load_embedding_file()
56
+ image_cnt=8
57
+ def append_message(sender, message):
58
+ chat_history.append((sender, message))
59
+
60
+ def chatbot_interface():
61
+ st.sidebar.title("Chatbot Interface")
62
+
63
+ user_input = st.sidebar.text_input("You:", key="user_input")
64
+
65
+ if st.sidebar.button("Send"):
66
+ append_message("You", user_input)
67
+ # Replace the following line with your chatbot logic to generate a response
68
+ append_message("Chatbot", f"Bot response to: {user_input}")
69
+
70
+
71
+ query=user_input
72
+ text_embedding = fclip.encode_text([query], 32)[0]
73
+ arr=text_embedding.dot(image_embeddings.T)
74
+ id_of_matched_object1=(-arr).argsort()[:image_cnt]
75
+ id_of_matched_object = np.argmax(arr)
76
+
77
+ image = Image.open(images[id_of_matched_object])
78
+ #st.image(image)
79
+ image=[]
80
+ for k in id_of_matched_object1:
81
+ image.append(Image.open(images[k]).resize(new_size))
82
+ img = image_select(
83
+ label="Results",
84
+ images=image,
85
+ captions=[str(query) + "result "] * (image_cnt),
86
+ )
87
+
88
+ st.sidebar.markdown("---")
89
+
90
+ # Display the chat history
91
+ st.sidebar.title("Chat History")
92
+
93
+ for sender, message in chat_history:
94
+ st.sidebar.text(f"{sender}: {message}")
95
+
96
+ # Initialize the chat history
97
+ chat_history = []
98
+
99
+ # Main content area
100
+ st.title("Muse Chatbot")
101
+
102
+ # Display the chatbot interface inside a box in the sidebar
103
+ st.sidebar.markdown("## Chatbot Box")
104
+
105
+
106
+
107
+
108
+ #text_embedding = fclip.encode_text([query], 32)[0]
109
+ #arr=text_embedding.dot(image_embeddings.T)
110
+ #id_of_matched_object1=(-arr).argsort()[:image_cnt]
111
+ #id_of_matched_object = np.argmax(arr)
112
+
113
+ #image = Image.open(images[id_of_matched_object])
114
+ #st.image(image)
115
+ #image=[]
116
+
117
+
118
+
119
+ #for k in id_of_matched_object1:
120
+ # image.append(Image.open(images[k]).resize(new_size))
121
+
122
+
123
+ #img = image_select(
124
+ # label="Results",
125
+ # images=image,
126
+ # captions=[str(query) + "result "] * (image_cnt),
127
+ #)
128
+
129
+
130
+ #st.title("Horizontal Scroll of Images")
131
+
132
+ # Specify the width of the images
133
+ #image_width = 300
134
+
135
+ #horizontal_scroll_images(image)
136
+ #print(image)
137
+ #st.image(image , use_column_width=True, caption=["some generic text"] * (image_cnt))
138
+
139
+
140
+ chatbot_interface()
141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142