Datasets:
Added an example
Browse files
README.md
CHANGED
@@ -37,5 +37,46 @@ generation that supports 18 programming languages. It takes the OpenAI
|
|
37 |
"HumanEval" Python benchmarks and uses little compilers to translate them
|
38 |
to other languages. It is easy to add support for new languages and benchmarks.
|
39 |
|
40 |
-
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
"HumanEval" Python benchmarks and uses little compilers to translate them
|
38 |
to other languages. It is easy to add support for new languages and benchmarks.
|
39 |
|
40 |
+
## Example
|
41 |
|
42 |
+
The following script uses the Salesforce/codegen model to generate Lua
|
43 |
+
and MultiPL-E to produce a script with unit tests for luaunit.
|
44 |
+
|
45 |
+
```python
|
46 |
+
import datasets
|
47 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
48 |
+
|
49 |
+
LANG = "lua"
|
50 |
+
MODEL_NAME = "Salesforce/codegen-350M-multi"
|
51 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
52 |
+
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME).half().cuda()
|
53 |
+
problems = datasets.load_dataset("nuprl/MultiPL-E", LANG)
|
54 |
+
|
55 |
+
def stop_at_stop_token(decoded_string, problem):
|
56 |
+
"""
|
57 |
+
Truncates the output at stop tokens, taking care to skip the prompt
|
58 |
+
which may have stop tokens.
|
59 |
+
"""
|
60 |
+
min_stop_index = len(decoded_string)
|
61 |
+
for stop_token in problem["stop_tokens"]:
|
62 |
+
stop_index = decoded_string.find(stop_token)
|
63 |
+
if stop_index != -1 and stop_index > len(problem["prompt"]) and stop_index < min_stop_index:
|
64 |
+
min_stop_index = stop_index
|
65 |
+
return decoded_string[:min_stop_index]
|
66 |
+
|
67 |
+
for problem in problems["test"]:
|
68 |
+
input_ids = tokenizer(
|
69 |
+
problem["prompt"],
|
70 |
+
return_tensors="pt",
|
71 |
+
).input_ids.cuda()
|
72 |
+
generated_ids = model.generate(
|
73 |
+
input_ids, max_length=256, pad_token_id=tokenizer.eos_token_id + 2
|
74 |
+
)
|
75 |
+
truncated_string = stop_at_stop_token(tokenizer.decode(generated_ids[0]), problem)
|
76 |
+
filename = problem["name"] + "." + LANG
|
77 |
+
with open(filename, "w") as f:
|
78 |
+
print(f"Created {filename}")
|
79 |
+
f.write(truncated_string)
|
80 |
+
f.write("\n")
|
81 |
+
f.write(problem["tests"])
|
82 |
+
```
|