Usage

from transformers import pipeline
import torch

pipeline = pipeline(
    "text-generation",
    model="datapaf/llama3_8b_instruct_hn_rus_it",
    torch_dtype=torch.bfloat16,
    device_map='cuda:2'
)

messages = [
    {"role": "system", "content": "Ты AI-помощник, ответь на вопрос"},
    {"role": "user", "content": "Привет! Как дела?"},
]

prompt = pipeline.tokenizer.apply_chat_template(
    messages, 
    tokenize=False,
    add_generation_prompt=True
)

terminators = [
    pipeline.tokenizer.eos_token_id,
    pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
]

outputs = pipeline(
    prompt,
    max_new_tokens=256,
    eos_token_id=terminators,
    do_sample=False,
    repetition_penalty=1.2,
    return_full_text=False
)

print(outputs)
Downloads last month
2
Safetensors
Model size
8.03B params
Tensor type
BF16
·
Inference Providers NEW
This model is not currently available via any of the supported third-party Inference Providers, and the model is not deployed on the HF Inference API.