beginner guide

#2
by chengyiming - opened

I am a beginner, can this model generate prompt words? For example, if I input 1girl, can it randomly generate the remaining tags?thanks

yes

qwopqwop changed discussion status to closed

my code:
system_content = (
"You are an assistant that provides complete tags based on partial tags. "
"When given a partial tag, provide a list of related tags."
)

class Danbooru_llama:
def init(self):
pass

@classmethod
def INPUT_TYPES(cls):
    return {
        "required": {
            "repo_id": ("STRING", {"forceInput": True, }),
            "max_new_tokens": ("INT", {"default": 256, "min": 32, "max": 4096, "step": 32, "display": "number"}),
            "temperature": (
                "FLOAT",
                {"default": 0.6, "min": 0.01, "max": 0.99, "step": 0.01, "round": False, "display": "number"}),

            "top_p": (
                "FLOAT",
                {"default": 0.95, "min": 0.01, "max": 0.99, "step": 0.01, "round": False, "display": "number"}),

            "top_k": (
                "INT",
                {"default": 90, "min": 1, "max": 1000, "step": 1, "display": "number"}),
            "seed": (
                "INT",
                {"default": -1, "min": -1, "max": 2**32 - 1, "step": 1, "display": "number"}),

            "reply_language": (["english", "chinese", "russian", "german", "french", "spanish", "japanese","Original_language"],),

            "system_content": (
                "STRING", {"multiline": True, "default": system_content}),

            "user_content": ("STRING", {"multiline": True, "default": "1girl"}),
            
        },

    }

RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("prompt",)
FUNCTION = "do_danbooru"
CATEGORY = "πŸ’― Illustrious Assistant/danbooru"

def do_danbooru(self, repo_id, max_new_tokens,temperature, top_p, top_k,seed,reply_language,

system_content, user_content):

    user_content = trans_reply(reply_language, user_content)

    if seed == -1:
        seed = random.randint(0, 2**32 - 1)
    torch.manual_seed(seed)

    model = LlamaForCausalLM.from_pretrained(
        repo_id, 
        device_map="cuda", 
        torch_dtype="auto", 
    )
    tokenizer = LlamaTokenizer.from_pretrained(repo_id)
    text_generator = transformers.pipeline(
        "text-generation",
        model=model,
        tokenizer=tokenizer,
        
    )
    outputs = text_generator(
        # generate_prompt(system_content,user_content),
        user_content,                
        max_new_tokens=max_new_tokens,
        do_sample=False,
        temperature=temperature,
        top_p=top_p,
        top_k=top_k,
        
        )
    
    print(outputs)
    prompt_output = (outputs[0]["generated_text"])
    
    return (prompt_output,)

def generate_prompt(system_content,user_content):
prompt = ''

prompt += f"<|system|>\n{system_content.strip()}</s>\n"
prompt += f"<|user|>\n{user_content.strip()}</s>\n"
prompt += f"<|assistant|>\n"

return prompt.strip()

did not respone anything seem useful

Sign up or log in to comment