|
--- |
|
datasets: |
|
- yahma/alpaca-cleaned |
|
- Nebulous/gpt4all_pruned |
|
language: |
|
- en |
|
--- |
|
|
|
## Inference Example: |
|
|
|
```python |
|
from peft import PeftModel, PeftConfig |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
peft_model_id = "edu-linguistic/opt-1.3b-edu-sft" |
|
model_name = 'facebook/opt-1.3b' |
|
|
|
config = PeftConfig.from_pretrained(peft_model_id) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
model = PeftModel.from_pretrained(model, peft_model_id) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
question = "<|prompter|> Consider the following function: f(x1, x2) = ln(x1). This function is…" |
|
|
|
question = tokenizer.encode(question, return_tensors='pt') |
|
|
|
generation_kwargs = { |
|
"do_sample": True, |
|
"top_k": 0, |
|
"top_p": 0.9, |
|
"bos_token_id": tokenizer.bos_token_id, |
|
"pad_token_id": tokenizer.pad_token_id, |
|
"eos_token_id": tokenizer.eos_token_id, |
|
"num_return_sequences": 1, |
|
"min_new_tokens": 10, |
|
"max_new_tokens": 512, |
|
} |
|
|
|
response = model.generate(input_ids=question, **generation_kwargs) |
|
response = tokenizer.decode(response[0], |
|
skip_special_tokens=False, |
|
clean_up_tokenization_spaces=False |
|
) |
|
print(response) |
|
``` |