Correct typos in usage

#7
by mabrowning - opened
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -41,7 +41,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
41
 
42
  # Load the tokenizer and model
43
  tokenizer = AutoTokenizer.from_pretrained("cerebras/btlm-3b-8k-base")
44
- model = AutoModelForCausalLM.from_pretrained("cerebras/-3b-8k-base", trust_remote_code=True, torch_dtype="auto")
45
 
46
  # Set the prompt for generating text
47
  prompt = "Albert Einstein was known for "
@@ -59,7 +59,7 @@ outputs = model.generate(
59
  )
60
 
61
  # Convert the generated token IDs back to text
62
- generated_text = tokenizer.batch_decode(outputs skip_special_tokens=True)
63
 
64
  # Print the generated text
65
  print(generated_text[0])
 
41
 
42
  # Load the tokenizer and model
43
  tokenizer = AutoTokenizer.from_pretrained("cerebras/btlm-3b-8k-base")
44
+ model = AutoModelForCausalLM.from_pretrained("cerebras/btlm-3b-8k-base", trust_remote_code=True, torch_dtype="auto")
45
 
46
  # Set the prompt for generating text
47
  prompt = "Albert Einstein was known for "
 
59
  )
60
 
61
  # Convert the generated token IDs back to text
62
+ generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
63
 
64
  # Print the generated text
65
  print(generated_text[0])