typo fix for get_probabilities
#5
by
mnagired
- opened
README.md
CHANGED
|
@@ -79,7 +79,7 @@ def parse_output(output, input_len):
|
|
| 79 |
list_index_logprobs_i = [torch.topk(token_i, k=nlogprobs, largest=True, sorted=True)
|
| 80 |
for token_i in list(output.scores)[:-1]]
|
| 81 |
if list_index_logprobs_i is not None:
|
| 82 |
-
prob =
|
| 83 |
prob_of_risk = prob[1]
|
| 84 |
|
| 85 |
res = tokenizer.decode(output.sequences[:,input_len:][0],skip_special_tokens=True).strip()
|
|
@@ -92,7 +92,7 @@ def parse_output(output, input_len):
|
|
| 92 |
|
| 93 |
return label, prob_of_risk.item()
|
| 94 |
|
| 95 |
-
def
|
| 96 |
safe_token_prob = 1e-50
|
| 97 |
unsafe_token_prob = 1e-50
|
| 98 |
for gen_token_i in logprobs:
|
|
|
|
| 79 |
list_index_logprobs_i = [torch.topk(token_i, k=nlogprobs, largest=True, sorted=True)
|
| 80 |
for token_i in list(output.scores)[:-1]]
|
| 81 |
if list_index_logprobs_i is not None:
|
| 82 |
+
prob = get_probabilities(list_index_logprobs_i)
|
| 83 |
prob_of_risk = prob[1]
|
| 84 |
|
| 85 |
res = tokenizer.decode(output.sequences[:,input_len:][0],skip_special_tokens=True).strip()
|
|
|
|
| 92 |
|
| 93 |
return label, prob_of_risk.item()
|
| 94 |
|
| 95 |
+
def get_probabilities(logprobs):
|
| 96 |
safe_token_prob = 1e-50
|
| 97 |
unsafe_token_prob = 1e-50
|
| 98 |
for gen_token_i in logprobs:
|