machineteacher commited on
Commit
e8654f2
1 Parent(s): ad76c72

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -8
README.md CHANGED
@@ -66,17 +66,12 @@ Given an edit instruction and an original text, our model can generate the edite
66
  ```python
67
  from transformers import AutoTokenizer, T5ForConditionalGeneration
68
 
69
- tokenizer = AutoTokenizer.from_pretrained("grammarly/coedit-xl")
70
- model = T5ForConditionalGeneration.from_pretrained("grammarly/coedit-xl")
71
- input_text =
72
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids
73
  outputs = model.generate(input_ids, max_length=256)
74
  edited_text = tokenizer.decode(outputs[0], skip_special_tokens=True)[0]
75
-
76
- before_input = 'Fix grammatical errors in this sentence: New kinds of vehicles will be invented with new technology than today.'
77
- model_input = tokenizer(before_input, return_tensors='pt')
78
- model_outputs = model.generate(**model_input, num_beams=8, max_length=1024)
79
- after_text = tokenizer.batch_decode(model_outputs, skip_special_tokens=True)[0]
80
  ```
81
 
82
 
 
66
  ```python
67
  from transformers import AutoTokenizer, T5ForConditionalGeneration
68
 
69
+ tokenizer = AutoTokenizer.from_pretrained("grammarly/coedit-large")
70
+ model = T5ForConditionalGeneration.from_pretrained("grammarly/coedit-large")
71
+ input_text = 'Fix grammatical errors in this sentence: New kinds of vehicles will be invented with new technology than today.'
72
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids
73
  outputs = model.generate(input_ids, max_length=256)
74
  edited_text = tokenizer.decode(outputs[0], skip_special_tokens=True)[0]
 
 
 
 
 
75
  ```
76
 
77