# -*- coding: utf-8 -*- """Untitled31 (2).ipynb Automatically generated by Colab. Original file is located at https://colab.research.google.com/drive/1jx1zW74zl2vFolee01ukC1b11uyTJDZ4 """ !pip install accelerate import os from datasets import load_dataset # download dataset dataset = load_dataset("neuralwork/fashion-style-instruct") print(dataset) # print a sample triplet print(dataset["train"][0]) def format_instruction(sample): return f"""You are a personal stylist recommending fashion advice and clothing combinations. Use the self body and style description below, combined with the event described in the context to generate 5 self-contained and complete outfit combinations. ### Input: {sample["input"]} ### Context: {sample["context"]} ### Response: {sample["completion"]} """ sample = dataset["train"][0] print(format_instruction(sample)) import os import random import torch import gradio as gr from peft import AutoPeftModelForCausalLM from transformers import AutoTokenizer events = [ "nature retreat", "work / office event", "wedding as a guest", "tropical vacation", "conference", "sports event", "winter vacation", "beach", "play / concert", "picnic", "night club", "national parks", "music festival", "job interview", "city tour", "halloween party", "graduation", "gala / exhibition opening", "fancy date", "cruise", "casual gathering", "concert", "cocktail party", "casual date", "business meeting", "camping / hiking", "birthday party", "bar", "business lunch", "bachelorette / bachelor party", "semi-casual event", ] def format_instruction(input, context): return f"""You are a personal stylist recommending fashion advice and clothing combinations. Use the self body and style description below, combined with the event described in the context to generate 5 self-contained and complete outfit combinations. ### Input: {input} ### Context: I'm going to a {context}. ### Response: """ def main(): # load base LLM model, LoRA params and tokenizer model = AutoPeftModelForCausalLM.from_pretrained( "neuralwork/mistral-7b-style-instruct", low_cpu_mem_usage=True, torch_dtype=torch.float16, load_in_4bit=True, ) tokenizer = AutoTokenizer.from_pretrained("neuralwork/mistral-7b-style-instruct") def postprocess(outputs, prompt): outputs = outputs.detach().cpu().numpy() output = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0] output = output[len(prompt) :] return output def generate( prompt: str, event: str, ): torch.manual_seed(1347) prompt = format_instruction(str(prompt), str(event)) input_ids = tokenizer( prompt, return_tensors="pt", truncation=True ).input_ids.cuda() with torch.inference_mode(): outputs = model.generate( input_ids=input_ids, max_new_tokens=1500, min_new_tokens=10, do_sample=True, top_p=0.9, temperature=.9, ) output = postprocess(outputs, prompt) return output with gr.Blocks() as demo: gr.HTML( """
Mistral-7B-v0 fine-tuned on the neuralwork/style-instruct dataset.
To use the model, simply describe your body type and personal style and select the type of event you're planning to go.
See our blog post for a detailed tutorial to fine-tune Mistral on your own dataset.