Upload instruct_pipeline.py
Browse files- instruct_pipeline.py +0 -3
instruct_pipeline.py
CHANGED
@@ -105,7 +105,6 @@ class InstructionTextGenerationPipeline(Pipeline):
|
|
105 |
**generate_kwargs,
|
106 |
)[0].cpu()
|
107 |
instruction_text = model_inputs.pop("instruction_text")
|
108 |
-
print(instruction_text)
|
109 |
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "instruction_text": instruction_text}
|
110 |
|
111 |
def postprocess(self, model_outputs, response_key_token_id, end_key_token_id, return_instruction_text):
|
@@ -142,8 +141,6 @@ class InstructionTextGenerationPipeline(Pipeline):
|
|
142 |
|
143 |
fully_decoded = self.tokenizer.decode(sequence)
|
144 |
|
145 |
-
print(fully_decoded)
|
146 |
-
|
147 |
# The response appears after "### Response:". The model has been trained to append "### End" at the
|
148 |
# end.
|
149 |
m = re.search(r"#+\s*Response:\s*(.+?)#+\s*End", fully_decoded, flags=re.DOTALL)
|
|
|
105 |
**generate_kwargs,
|
106 |
)[0].cpu()
|
107 |
instruction_text = model_inputs.pop("instruction_text")
|
|
|
108 |
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "instruction_text": instruction_text}
|
109 |
|
110 |
def postprocess(self, model_outputs, response_key_token_id, end_key_token_id, return_instruction_text):
|
|
|
141 |
|
142 |
fully_decoded = self.tokenizer.decode(sequence)
|
143 |
|
|
|
|
|
144 |
# The response appears after "### Response:". The model has been trained to append "### End" at the
|
145 |
# end.
|
146 |
m = re.search(r"#+\s*Response:\s*(.+?)#+\s*End", fully_decoded, flags=re.DOTALL)
|