Upload 2 files
Browse files- _script_for_eval.py +62 -54
- _script_for_gen.py +2 -4
_script_for_eval.py
CHANGED
@@ -17,6 +17,7 @@ from datasets import load_dataset
|
|
17 |
from sentence_transformers import SentenceTransformer, CrossEncoder
|
18 |
from sklearn.metrics.pairwise import cosine_similarity
|
19 |
|
|
|
20 |
client = OpenAI()
|
21 |
|
22 |
def load_cache(use_cache):
|
@@ -116,12 +117,13 @@ def get_fixed_code_fine_tuned(prompt, few_shot_messages, model_name):
|
|
116 |
messages.append({"role": "user", "content": prompt})
|
117 |
|
118 |
max_retries = 3
|
|
|
119 |
for attempt in range(max_retries):
|
120 |
try:
|
121 |
response = client.chat.completions.create(
|
122 |
model=model_name,
|
123 |
messages=messages,
|
124 |
-
max_tokens=
|
125 |
temperature=0.2,
|
126 |
top_p=0.95
|
127 |
)
|
@@ -131,6 +133,20 @@ def get_fixed_code_fine_tuned(prompt, few_shot_messages, model_name):
|
|
131 |
time.sleep(2 ** attempt) # Exponential backoff
|
132 |
else:
|
133 |
raise Exception(f"API call failed after {max_retries} attempts: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
|
135 |
def process_file(test_case, cache, fixed_files, model_name, use_cache, n_shot, use_similarity, oracle_mode):
|
136 |
file_name = test_case["file_name"]
|
@@ -154,7 +170,7 @@ def process_file(test_case, cache, fixed_files, model_name, use_cache, n_shot, u
|
|
154 |
os.remove(tmp_file)
|
155 |
|
156 |
tqdm.write("Scanning file " + input_file + "...")
|
157 |
-
scan_command_input = f"semgrep --config
|
158 |
os.system(scan_command_input)
|
159 |
|
160 |
if not os.path.exists(tmp_file):
|
@@ -163,71 +179,62 @@ def process_file(test_case, cache, fixed_files, model_name, use_cache, n_shot, u
|
|
163 |
|
164 |
with open(tmp_file, 'r') as jf:
|
165 |
data = json.load(jf)
|
166 |
-
|
167 |
-
if len(data.get("errors", []))
|
168 |
-
|
169 |
-
|
170 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
else:
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
shift = len("```python")
|
199 |
-
fixed_code = response[idx + shift :]
|
200 |
-
else:
|
201 |
-
fixed_code = response
|
202 |
-
|
203 |
-
stop_words = ["```", "assistant"]
|
204 |
-
|
205 |
-
for w in stop_words:
|
206 |
-
if w in fixed_code:
|
207 |
-
fixed_code = fixed_code[:fixed_code.find(w)]
|
208 |
-
if len(fixed_code) < 400:
|
209 |
-
result = False
|
210 |
-
if has_all_comments(fixed_code):
|
211 |
-
result = False
|
212 |
if os.path.exists(output_file):
|
213 |
os.remove(output_file)
|
214 |
with open(output_file, 'w') as wf:
|
215 |
wf.write(fixed_code)
|
216 |
if os.path.exists(tmp_file):
|
217 |
os.remove(tmp_file)
|
218 |
-
scan_command_output = f"semgrep --config
|
219 |
os.system(scan_command_output)
|
220 |
with open(tmp_file, 'r') as jf:
|
221 |
data = json.load(jf)
|
222 |
-
if len(data["
|
223 |
tqdm.write("Passing response for " + input_file + " at 1 ...")
|
224 |
result = True
|
225 |
fixed_files.append(file_name)
|
226 |
else:
|
227 |
result = False
|
228 |
-
else:
|
229 |
-
tqdm.write(f"Semgrep reported errors for {input_file}")
|
230 |
-
result = False
|
231 |
|
232 |
if os.path.exists(tmp_file):
|
233 |
os.remove(tmp_file)
|
@@ -258,7 +265,7 @@ def main():
|
|
258 |
oracle_mode = args.oracle
|
259 |
sanitized_model_name = f"{sanitize_filename(model_name)}-{n_shot}-shot{'-sim' if use_similarity else ''}"
|
260 |
|
261 |
-
dataset = load_dataset("patched-codes/static-analysis-eval", split="train")
|
262 |
data = [{"file_name": item["file_name"], "source": item["source"], "cwe": item["cwe"]} for item in dataset]
|
263 |
|
264 |
cache = load_cache(use_cache)
|
@@ -271,7 +278,8 @@ def main():
|
|
271 |
manager = multiprocessing.Manager()
|
272 |
fixed_files = manager.list()
|
273 |
|
274 |
-
process_func = partial(process_test_case, cache=cache, fixed_files=fixed_files, model_name=model_name,
|
|
|
275 |
|
276 |
with multiprocessing.Pool(processes=4) as pool:
|
277 |
results = list(tqdm(pool.imap(process_func, data), total=total_tests))
|
|
|
17 |
from sentence_transformers import SentenceTransformer, CrossEncoder
|
18 |
from sklearn.metrics.pairwise import cosine_similarity
|
19 |
|
20 |
+
# client = OpenAI(base_url="http://localhost:11434/v1/", api_key="ollama")
|
21 |
client = OpenAI()
|
22 |
|
23 |
def load_cache(use_cache):
|
|
|
117 |
messages.append({"role": "user", "content": prompt})
|
118 |
|
119 |
max_retries = 3
|
120 |
+
|
121 |
for attempt in range(max_retries):
|
122 |
try:
|
123 |
response = client.chat.completions.create(
|
124 |
model=model_name,
|
125 |
messages=messages,
|
126 |
+
max_tokens=4096,
|
127 |
temperature=0.2,
|
128 |
top_p=0.95
|
129 |
)
|
|
|
133 |
time.sleep(2 ** attempt) # Exponential backoff
|
134 |
else:
|
135 |
raise Exception(f"API call failed after {max_retries} attempts: {str(e)}")
|
136 |
+
|
137 |
+
def clean_code_snippet(response):
|
138 |
+
# Remove opening delimiter
|
139 |
+
if response.startswith("```python"):
|
140 |
+
response = response[len("```python"):]
|
141 |
+
elif response.startswith("```"):
|
142 |
+
response = response[len("```"):]
|
143 |
+
|
144 |
+
# Remove closing delimiter
|
145 |
+
if response.endswith("```"):
|
146 |
+
response = response[:-len("```")]
|
147 |
+
|
148 |
+
# Trim any remaining whitespace
|
149 |
+
return response.strip()
|
150 |
|
151 |
def process_file(test_case, cache, fixed_files, model_name, use_cache, n_shot, use_similarity, oracle_mode):
|
152 |
file_name = test_case["file_name"]
|
|
|
170 |
os.remove(tmp_file)
|
171 |
|
172 |
tqdm.write("Scanning file " + input_file + "...")
|
173 |
+
scan_command_input = f"semgrep --config auto {input_file} --output {tmp_file} --json > /dev/null 2>&1"
|
174 |
os.system(scan_command_input)
|
175 |
|
176 |
if not os.path.exists(tmp_file):
|
|
|
179 |
|
180 |
with open(tmp_file, 'r') as jf:
|
181 |
data = json.load(jf)
|
182 |
+
|
183 |
+
if len(data.get("errors", [])) > 0:
|
184 |
+
tqdm.write(f"Error processing {input_file} ...")
|
185 |
+
return False
|
186 |
+
|
187 |
+
if len(data.get("results", [])) == 0:
|
188 |
+
tqdm.write(input_file + " has no vulnerabilities")
|
189 |
+
result = False
|
190 |
+
else:
|
191 |
+
tqdm.write("Vulnerability found in " + input_file + "...")
|
192 |
+
if oracle_mode:
|
193 |
+
result = True
|
194 |
+
fixed_files.append(file_name)
|
195 |
else:
|
196 |
+
cwe = test_case['cwe']
|
197 |
+
lines = data["results"][0]["extra"]["lines"]
|
198 |
+
message = data["results"][0]["extra"]["message"]
|
199 |
+
|
200 |
+
prompt = f"""Vulnerability Report:
|
201 |
+
- Type: {cwe}
|
202 |
+
- Location: {lines}
|
203 |
+
- Description: {message}
|
204 |
+
|
205 |
+
Original Code:
|
206 |
+
```
|
207 |
+
{file_text}
|
208 |
+
```
|
209 |
+
|
210 |
+
Task: Fix the vulnerability in the code above. Provide only the complete fixed code without explanations or comments. Make minimal changes necessary to address the security issue while preserving the original functionality."""
|
211 |
+
# print(prompt)
|
212 |
+
few_shot_messages = fetch_dataset_examples(prompt, n_shot, use_similarity)
|
213 |
+
response = get_fixed_code_fine_tuned(prompt, few_shot_messages, model_name)
|
214 |
+
# print(response)
|
215 |
+
|
216 |
+
fixed_code = clean_code_snippet(response)
|
217 |
+
|
218 |
+
if len(fixed_code) < 512 or has_all_comments(fixed_code):
|
219 |
+
result = False
|
220 |
+
else:
|
221 |
+
# print("Here2\n" + fixed_code)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
222 |
if os.path.exists(output_file):
|
223 |
os.remove(output_file)
|
224 |
with open(output_file, 'w') as wf:
|
225 |
wf.write(fixed_code)
|
226 |
if os.path.exists(tmp_file):
|
227 |
os.remove(tmp_file)
|
228 |
+
scan_command_output = f"semgrep --config auto {output_file} --output {tmp_file} --json > /dev/null 2>&1"
|
229 |
os.system(scan_command_output)
|
230 |
with open(tmp_file, 'r') as jf:
|
231 |
data = json.load(jf)
|
232 |
+
if len(data["results"]) == 0:
|
233 |
tqdm.write("Passing response for " + input_file + " at 1 ...")
|
234 |
result = True
|
235 |
fixed_files.append(file_name)
|
236 |
else:
|
237 |
result = False
|
|
|
|
|
|
|
238 |
|
239 |
if os.path.exists(tmp_file):
|
240 |
os.remove(tmp_file)
|
|
|
265 |
oracle_mode = args.oracle
|
266 |
sanitized_model_name = f"{sanitize_filename(model_name)}-{n_shot}-shot{'-sim' if use_similarity else ''}"
|
267 |
|
268 |
+
dataset = load_dataset("patched-codes/static-analysis-eval", split="train", download_mode='force_redownload')
|
269 |
data = [{"file_name": item["file_name"], "source": item["source"], "cwe": item["cwe"]} for item in dataset]
|
270 |
|
271 |
cache = load_cache(use_cache)
|
|
|
278 |
manager = multiprocessing.Manager()
|
279 |
fixed_files = manager.list()
|
280 |
|
281 |
+
process_func = partial(process_test_case, cache=cache, fixed_files=fixed_files, model_name=model_name,
|
282 |
+
use_cache=use_cache, n_shot=n_shot, use_similarity=use_similarity, oracle_mode=oracle_mode)
|
283 |
|
284 |
with multiprocessing.Pool(processes=4) as pool:
|
285 |
results = list(tqdm(pool.imap(process_func, data), total=total_tests))
|
_script_for_gen.py
CHANGED
@@ -160,10 +160,8 @@ def merge_and_push_dataset(jsonl_file, new_dataset_name):
|
|
160 |
logging.info("Creating dataset")
|
161 |
try:
|
162 |
dataset = Dataset.from_list(preprocessed_data)
|
163 |
-
except
|
164 |
logging.error(f"Error creating dataset: {str(e)}")
|
165 |
-
logging.info("Attempting to create dataset with type inference disabled")
|
166 |
-
dataset = Dataset.from_list(preprocessed_data, features=pa.schema([]))
|
167 |
|
168 |
# Push the dataset to the new repository
|
169 |
logging.info(f"Pushing dataset with {len(dataset)} records to Hugging Face")
|
@@ -178,7 +176,7 @@ def main():
|
|
178 |
|
179 |
if args.push_to_dataset:
|
180 |
# Merge and push the dataset
|
181 |
-
jsonl_file = "
|
182 |
merge_and_push_dataset(jsonl_file, args.push_to_dataset)
|
183 |
else:
|
184 |
# Perform the regular dataset extension process
|
|
|
160 |
logging.info("Creating dataset")
|
161 |
try:
|
162 |
dataset = Dataset.from_list(preprocessed_data)
|
163 |
+
except Exception as e:
|
164 |
logging.error(f"Error creating dataset: {str(e)}")
|
|
|
|
|
165 |
|
166 |
# Push the dataset to the new repository
|
167 |
logging.info(f"Pushing dataset with {len(dataset)} records to Hugging Face")
|
|
|
176 |
|
177 |
if args.push_to_dataset:
|
178 |
# Merge and push the dataset
|
179 |
+
jsonl_file = "static_analysis_eval.jsonl"
|
180 |
merge_and_push_dataset(jsonl_file, args.push_to_dataset)
|
181 |
else:
|
182 |
# Perform the regular dataset extension process
|