deniskokosss
commited on
Commit
•
9d9f5da
1
Parent(s):
dfe2dea
Added reproduction script
Browse files
README.md
CHANGED
@@ -88,11 +88,98 @@ accelerate launch main.py \
|
|
88 |
| CodeLlama-13b-Python-hf | 0.4380 | 0.5796 | 0.4301 | 0.6226 |
|
89 |
|
90 |
<details>
|
91 |
-
<summary> Generation
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
</details>
|
|
|
88 |
| CodeLlama-13b-Python-hf | 0.4380 | 0.5796 | 0.4301 | 0.6226 |
|
89 |
|
90 |
<details>
|
91 |
+
<summary> Generation scripts in the table </summary>
|
92 |
+
```console
|
93 |
+
#!/bin/bash
|
94 |
+
# use with https://github.com/NLP-Core-Team/bigcode-evaluation-harness
|
95 |
+
|
96 |
+
# RU
|
97 |
+
mkdir -p ./outs/humaneval_ru
|
98 |
+
mkdir -p ./results/humaneval_ru
|
99 |
+
MODELS_PATH="bigcode"
|
100 |
+
echo $MODELS_PATH
|
101 |
+
declare -A bs=( ["starcoderbase-1b"]=16 ["starcoderbase-3b"]=8 ["starcoderbase-7b"]=4 ["starcoderbase"]=1 ["starcoder"]=1)
|
102 |
+
for model_name in starcoder # starcoderbase-1b starcoderbase-3b starcoderbase-7b starcoderbase codellama_2_13b
|
103 |
+
do
|
104 |
+
echo $MODELS_PATH/$model_name
|
105 |
+
conda run -p /home/jovyan/kokosinskiy/envs/codegen310 --no-capture-output accelerate launch --mixed_precision="fp16" main.py \
|
106 |
+
--model $MODELS_PATH/$model_name \
|
107 |
+
--max_length_generation 512 \
|
108 |
+
--tasks humaneval_ru \
|
109 |
+
--use_auth_token \
|
110 |
+
--temperature 0.2 \
|
111 |
+
--n_samples 20 \
|
112 |
+
--precision fp16 \
|
113 |
+
--batch_size ${bs[$model_name]} \
|
114 |
+
--allow_code_execution \
|
115 |
+
--save_generations_path ./outs/humaneval_ru/$model_name.json \
|
116 |
+
--metric_output_path ./results/humaneval_ru/$model_name.metrics
|
117 |
+
done
|
118 |
+
|
119 |
+
MODELS_PATH="codellama"
|
120 |
+
echo $MODELS_PATH
|
121 |
+
declare -A bs=( ["CodeLlama-7b-Python-hf"]=8 ["CodeLlama-7b-hf"]=16 ["CodeLlama-13b-Python-hf"]=4 ["CodeLlama-13b-hf"]=4 )
|
122 |
+
for model_name in CodeLlama-7b-hf CodeLlama-7b-Python-hf CodeLlama-13b-hf CodeLlama-13b-Python-hf # codellama_2_13b
|
123 |
+
do
|
124 |
+
echo $MODELS_PATH/$model_name
|
125 |
+
conda run -p /home/jovyan/kokosinskiy/envs/codegen310 --no-capture-output accelerate launch --mixed_precision="fp16" main.py \
|
126 |
+
--model $MODELS_PATH/$model_name \
|
127 |
+
--max_length_generation 512 \
|
128 |
+
--tasks humaneval_ru \
|
129 |
+
--use_auth_token \
|
130 |
+
--temperature 0.2 \
|
131 |
+
--n_samples 20 \
|
132 |
+
--precision fp16 \
|
133 |
+
--batch_size ${bs[$model_name]} \
|
134 |
+
--allow_code_execution \
|
135 |
+
--save_generations_path ./outs/humaneval_ru/$model_name.json \
|
136 |
+
--metric_output_path ./results/humaneval_ru/$model_name.metrics
|
137 |
+
done
|
138 |
+
|
139 |
+
# EN
|
140 |
+
|
141 |
+
mkdir -p ./outs/humaneval
|
142 |
+
mkdir -p ./results/humaneval
|
143 |
+
MODELS_PATH="bigcode"
|
144 |
+
echo $MODELS_PATH
|
145 |
+
declare -A bs=( ["starcoderbase-1b"]=16 ["starcoderbase-3b"]=8 ["starcoderbase-7b"]=4 ["starcoderbase"]=1 ["starcoder"]=1)
|
146 |
+
for model_name in starcoder starcoderbase-1b starcoderbase-3b starcoderbase-7b starcoderbase
|
147 |
+
do
|
148 |
+
echo $MODELS_PATH/$model_name
|
149 |
+
conda run -p /home/jovyan/kokosinskiy/envs/codegen310 --no-capture-output accelerate launch --mixed_precision="fp16" main.py \
|
150 |
+
--model $MODELS_PATH/$model_name \
|
151 |
+
--max_length_generation 512 \
|
152 |
+
--tasks humaneval \
|
153 |
+
--use_auth_token \
|
154 |
+
--temperature 0.2 \
|
155 |
+
--n_samples 20 \
|
156 |
+
--precision fp16 \
|
157 |
+
--batch_size ${bs[$model_name]} \
|
158 |
+
--allow_code_execution \
|
159 |
+
--save_generations_path ./outs/humaneval/$model_name.json \
|
160 |
+
--metric_output_path ./results/humaneval/$model_name.metrics
|
161 |
+
done
|
162 |
+
|
163 |
+
MODELS_PATH="codellama"
|
164 |
+
echo $MODELS_PATH
|
165 |
+
declare -A bs=( ["CodeLlama-7b-Python-hf"]=8 ["CodeLlama-7b-hf"]=16 ["CodeLlama-13b-Python-hf"]=4 ["CodeLlama-13b-hf"]=4 )
|
166 |
+
for model_name in CodeLlama-7b-hf CodeLlama-7b-Python-hf CodeLlama-13b-hf CodeLlama-13b-Python-hf
|
167 |
+
do
|
168 |
+
echo $MODELS_PATH/$model_name
|
169 |
+
conda run -p /home/jovyan/kokosinskiy/envs/codegen310 --no-capture-output accelerate launch --mixed_precision="fp16" main.py \
|
170 |
+
--model $MODELS_PATH/$model_name \
|
171 |
+
--max_length_generation 512 \
|
172 |
+
--tasks humaneval \
|
173 |
+
--use_auth_token \
|
174 |
+
--temperature 0.2 \
|
175 |
+
--n_samples 20 \
|
176 |
+
--precision fp16 \
|
177 |
+
--batch_size ${bs[$model_name]} \
|
178 |
+
--allow_code_execution \
|
179 |
+
--save_generations_path ./outs/humaneval/$model_name.json \
|
180 |
+
--metric_output_path ./results/humaneval/$model_name.metrics
|
181 |
+
done
|
182 |
+
|
183 |
+
|
184 |
+
```
|
185 |
</details>
|