File size: 1,626 Bytes
147b54b 739de96 147b54b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
# Load the tokenizer and model
model_name = "synCAI-144k-gpt2.5"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Check if GPU is available and move model to GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def generate_text(prompt, model, tokenizer, device, max_length=100, temperature=0.7, top_p=0.9, top_k=50):
try:
# Tokenize the input prompt
inputs = tokenizer(prompt, return_tensors="pt")
inputs = {key: value.to(device) for key, value in inputs.items()}
# Generate text
outputs = model.generate(
inputs['input_ids'],
max_length=max_length,
temperature=temperature,
top_p=top_p,
top_k=top_k
)
# Decode and return the generated text
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return generated_text
except Exception as e:
print(f"Error generating text for prompt '{prompt}': {e}")
return None
# Example input prompts
input_prompts = [
"Explain the significance of the project:",
"What methodologies were used in the research?",
"What are the future implications of the findings?"
]
# Generate and print texts for each prompt
for prompt in input_prompts:
generated_text = generate_text(prompt, model, tokenizer, device)
if generated_text:
print(f"Prompt: {prompt}")
print(f"Generated Text: {generated_text}\n")
|