abdullah2 commited on
Commit
6b1d376
1 Parent(s): 6ceaa12
Files changed (1) hide show
  1. app.py +184 -0
app.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled31 (2).ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1jx1zW74zl2vFolee01ukC1b11uyTJDZ4
8
+ """
9
+
10
+ pip install -r requirements.txt
11
+
12
+ pip install gradio
13
+
14
+ import os
15
+
16
+ from datasets import load_dataset
17
+
18
+ # download dataset
19
+ dataset = load_dataset("neuralwork/fashion-style-instruct")
20
+ print(dataset)
21
+
22
+ # print a sample triplet
23
+ print(dataset["train"][0])
24
+
25
+ def format_instruction(sample):
26
+ return f"""You are a personal stylist recommending fashion advice and clothing combinations. Use the self body and style description below, combined with the event described in the context to generate 5 self-contained and complete outfit combinations.
27
+ ### Input:
28
+ {sample["input"]}
29
+
30
+ ### Context:
31
+ {sample["context"]}
32
+
33
+ ### Response:
34
+ {sample["completion"]}
35
+ """
36
+
37
+ sample = dataset["train"][0]
38
+ print(format_instruction(sample))
39
+
40
+ import os
41
+ import random
42
+
43
+ import torch
44
+ import gradio as gr
45
+ from peft import AutoPeftModelForCausalLM
46
+ from transformers import AutoTokenizer
47
+
48
+
49
+ events = [
50
+ "nature retreat",
51
+ "work / office event",
52
+ "wedding as a guest",
53
+ "tropical vacation",
54
+ "conference",
55
+ "sports event",
56
+ "winter vacation",
57
+ "beach",
58
+ "play / concert",
59
+ "picnic",
60
+ "night club",
61
+ "national parks",
62
+ "music festival",
63
+ "job interview",
64
+ "city tour",
65
+ "halloween party",
66
+ "graduation",
67
+ "gala / exhibition opening",
68
+ "fancy date",
69
+ "cruise",
70
+ "casual gathering",
71
+ "concert",
72
+ "cocktail party",
73
+ "casual date",
74
+ "business meeting",
75
+ "camping / hiking",
76
+ "birthday party",
77
+ "bar",
78
+ "business lunch",
79
+ "bachelorette / bachelor party",
80
+ "semi-casual event",
81
+ ]
82
+
83
+
84
+ def format_instruction(input, context):
85
+ return f"""You are a personal stylist recommending fashion advice and clothing combinations. Use the self body and style description below, combined with the event described in the context to generate 5 self-contained and complete outfit combinations.
86
+ ### Input:
87
+ {input}
88
+
89
+ ### Context:
90
+ I'm going to a {context}.
91
+
92
+ ### Response:
93
+ """
94
+
95
+
96
+ def main():
97
+ # load base LLM model, LoRA params and tokenizer
98
+ model = AutoPeftModelForCausalLM.from_pretrained(
99
+ "neuralwork/mistral-7b-style-instruct",
100
+ low_cpu_mem_usage=True,
101
+ torch_dtype=torch.float16,
102
+ load_in_4bit=True,
103
+ )
104
+ tokenizer = AutoTokenizer.from_pretrained("neuralwork/mistral-7b-style-instruct")
105
+
106
+ def postprocess(outputs, prompt):
107
+ outputs = outputs.detach().cpu().numpy()
108
+ output = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
109
+ output = output[len(prompt) :]
110
+ return output
111
+
112
+ def generate(
113
+ prompt: str,
114
+ event: str,
115
+
116
+ ):
117
+ torch.manual_seed(1347)
118
+ prompt = format_instruction(str(prompt), str(event))
119
+ input_ids = tokenizer(
120
+ prompt, return_tensors="pt", truncation=True
121
+ ).input_ids.cuda()
122
+
123
+ with torch.inference_mode():
124
+ outputs = model.generate(
125
+ input_ids=input_ids,
126
+ max_new_tokens=1500,
127
+ min_new_tokens=10,
128
+ do_sample=True,
129
+ top_p=0.9,
130
+ temperature=.9,
131
+ )
132
+
133
+ output = postprocess(outputs, prompt)
134
+ return output
135
+
136
+ with gr.Blocks() as demo:
137
+ gr.HTML(
138
+ """
139
+ <h1 style="font-weight: 900; margin-bottom: 7px;">
140
+ Instruct Fine-tune Mistral-7B-v0
141
+ </h1>
142
+ <p>Mistral-7B-v0 fine-tuned on the <a href="https://huggingface.co/datasets/neuralwork/fashion-style-instruct">neuralwork/style-instruct</a> dataset.
143
+ To use the model, simply describe your body type and personal style and select the type of event you're planning to go.
144
+ <br/>
145
+ See our <a href="https://neuralwork.ai/">blog post</a> for a detailed tutorial to fine-tune Mistral on your own dataset.
146
+ <p/>"""
147
+ )
148
+ with gr.Row():
149
+ with gr.Column(scale=1):
150
+ prompt = gr.Textbox(
151
+ lines=4,
152
+ label="Style prompt, describe your body type and fashion style.",
153
+ interactive=True,
154
+ value="I'm an above average height athletic woman with slightly of broad shoulders and a medium sized bust. I generally prefer a casual but sleek look with dark colors and jeans.",
155
+ )
156
+ event = gr.Dropdown(
157
+ choices=events, value="semi-casual event", label="Event type"
158
+ )
159
+
160
+ generate_button = gr.Button("Get outfit suggestions")
161
+
162
+ with gr.Column(scale=2):
163
+ response = gr.Textbox(
164
+ lines=6, label="Outfit suggestions", interactive=False
165
+ )
166
+
167
+ gr.Markdown("From [neuralwork](https://neuralwork.ai/) with :heart:")
168
+
169
+ generate_button.click(
170
+ fn=generate,
171
+ inputs=[
172
+ prompt,
173
+ event,
174
+
175
+ ],
176
+ outputs=response,
177
+ )
178
+
179
+ demo.launch(share=True)
180
+
181
+
182
+ if __name__ == "__main__":
183
+ main()
184
+