thomasrantian
commited on
Commit
·
27fa720
1
Parent(s):
281f497
upload the long-tail eval code and data
Browse files- ground_truth/drivelm_val.json +2 -2
- metric.py +73 -46
ground_truth/drivelm_val.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:935dec2d885a8add5a1beef3e76dc6880ec2d58fd28b57ea58bdb4b190171efe
|
3 |
+
size 10830798
|
metric.py
CHANGED
@@ -11,12 +11,16 @@ import language_evaluation
|
|
11 |
from multiprocessing import Pool
|
12 |
import openai
|
13 |
from huggingface_hub import HfApi, hf_hub_download
|
|
|
14 |
|
15 |
USE_INTERNAL = True
|
16 |
|
17 |
GROUND_TRUTH = "ground_truth/drivelm_val.json"
|
18 |
FORMAT = "json"
|
19 |
|
|
|
|
|
|
|
20 |
"""Error handling code"""
|
21 |
TEAM_MESSAGE_TEMPLATE = "The team name in your submission is [<TEAM_NAME>].\n"
|
22 |
def update_teamname_to_submission_comment(params, team_name):
|
@@ -361,6 +365,51 @@ class evaluation_suit():
|
|
361 |
scores = sum(scores) / len(scores)
|
362 |
return scores
|
363 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
364 |
def eval_chatGPT(self, data):
|
365 |
remain_attempts = len(self.chatgpt_eval.api_keys)
|
366 |
while remain_attempts > 0:
|
@@ -498,19 +547,24 @@ class evaluation_suit():
|
|
498 |
self.match["match"]["GT"].append(GT)
|
499 |
self.match["match"]["answer"].append(answer)
|
500 |
self.match["GPT"].append((answer, GT))
|
|
|
|
|
|
|
501 |
|
502 |
|
503 |
def evaluation(self):
|
504 |
print("evaluation start!")
|
505 |
scores = {}
|
506 |
-
scores["accuracy"] = self.eval_acc()
|
507 |
-
print("USE_INTERNAL: ", USE_INTERNAL)
|
508 |
-
if USE_INTERNAL:
|
509 |
-
|
510 |
-
else:
|
511 |
-
|
512 |
-
scores.update(self.eval_language())
|
513 |
-
scores["match"] = self.eval_match()
|
|
|
|
|
514 |
|
515 |
return scores
|
516 |
|
@@ -562,33 +616,25 @@ def compute(params, quiet=True):
|
|
562 |
|
563 |
try:
|
564 |
evaluation = evaluation_suit()
|
565 |
-
output = {"
|
566 |
for scene_id in test_file.keys():
|
567 |
scene_data = test_file[scene_id]['key_frames']
|
568 |
|
569 |
for frame_id in scene_data.keys():
|
570 |
frame_data_qa = scene_data[frame_id]['QA']
|
571 |
-
|
572 |
-
|
573 |
-
for i, qa in enumerate(frame_data_qa["
|
574 |
question = qa['Q']
|
575 |
GT = qa['A']
|
576 |
-
tag =
|
577 |
-
idx = scene_id + "_" + frame_id + "
|
578 |
predict = pred_file[idx]["answer"]
|
579 |
-
|
580 |
-
first_flag = False
|
581 |
-
evaluation.set_graph(predict, GT)
|
582 |
-
evaluation.forward(tag, predict, GT)
|
583 |
-
else:
|
584 |
-
if evaluation.eval_graph(question):
|
585 |
-
res = evaluation.forward(tag, predict, GT)
|
586 |
|
587 |
output = evaluation.evaluation()
|
588 |
-
print("
|
589 |
-
print("
|
590 |
-
print("match score: ", output["match"])
|
591 |
-
print("language score:")
|
592 |
for key in evaluation.language_score_keys:
|
593 |
print(key, output[key])
|
594 |
|
@@ -597,31 +643,12 @@ def compute(params, quiet=True):
|
|
597 |
weights = [0.4, 0.2, 0.2, 0.2]
|
598 |
|
599 |
# chatGPT
|
600 |
-
score = output["
|
601 |
scores.append(score)
|
602 |
|
603 |
# language
|
604 |
-
score = 0
|
605 |
-
for idx, key in enumerate(evaluation.language_score_keys):
|
606 |
-
if idx < 4:
|
607 |
-
score += output[key] / 4. / 3.
|
608 |
-
elif idx == 4:
|
609 |
-
score += output[key] / 3.
|
610 |
-
else:
|
611 |
-
score += output[key] / 10. / 3.
|
612 |
-
|
613 |
-
scores.append(score)
|
614 |
-
|
615 |
-
# match
|
616 |
-
score = output["match"] / 100.
|
617 |
-
scores.append(score)
|
618 |
-
|
619 |
-
# accuracy
|
620 |
-
score = output["accuracy"]
|
621 |
-
scores.append(score)
|
622 |
|
623 |
-
final_score =
|
624 |
-
output["final_score"] = final_score
|
625 |
|
626 |
except Exception as e:
|
627 |
error_message = "Evaluation failed. " + str(e)
|
|
|
11 |
from multiprocessing import Pool
|
12 |
import openai
|
13 |
from huggingface_hub import HfApi, hf_hub_download
|
14 |
+
import ast
|
15 |
|
16 |
USE_INTERNAL = True
|
17 |
|
18 |
GROUND_TRUTH = "ground_truth/drivelm_val.json"
|
19 |
FORMAT = "json"
|
20 |
|
21 |
+
# Deafult traj_L2 error
|
22 |
+
MAXIMUM_L2_Error = 100
|
23 |
+
|
24 |
"""Error handling code"""
|
25 |
TEAM_MESSAGE_TEMPLATE = "The team name in your submission is [<TEAM_NAME>].\n"
|
26 |
def update_teamname_to_submission_comment(params, team_name):
|
|
|
365 |
scores = sum(scores) / len(scores)
|
366 |
return scores
|
367 |
|
368 |
+
def extract_traj_from_response(self, response):
|
369 |
+
response = response.split('[', 1)[1].split(']')[0]
|
370 |
+
response = response.split(', ')
|
371 |
+
coordinates = [list(ast.literal_eval(s)) for s in response]
|
372 |
+
# convert to tensor
|
373 |
+
coordinates = np.array(coordinates) # 6 x 2
|
374 |
+
return coordinates # 6 x 2
|
375 |
+
|
376 |
+
def eval_traj_L2(self):
|
377 |
+
ADEs = []
|
378 |
+
for i in range(len(self.traj_L2["answer"])):
|
379 |
+
answer = self.traj_L2["answer"][i]
|
380 |
+
GT = self.traj_L2["GT"][i]
|
381 |
+
try:
|
382 |
+
# Compute the ADE of the traj
|
383 |
+
answer_traj = self.extract_traj_from_response(answer)
|
384 |
+
GT_traj = self.extract_traj_from_response(GT)
|
385 |
+
# Compute the L2 betwween the two trajectories
|
386 |
+
ADE = np.linalg.norm(answer_traj - GT_traj)
|
387 |
+
except Exception as e:
|
388 |
+
print(answer)
|
389 |
+
print(GT)
|
390 |
+
print("Can not extract traj from the response. Return default MAXIMUM_L2_Error.")
|
391 |
+
ADE = MAXIMUM_L2_Error
|
392 |
+
ADEs.append(ADE)
|
393 |
+
mean_ADE = sum(ADEs) / len(ADEs)
|
394 |
+
return mean_ADE
|
395 |
+
|
396 |
+
def eval_long_tail_behavior_planning_gpt_score(self, data):
|
397 |
+
# with Pool(32) as p: # Change the number based on your CPU cores
|
398 |
+
# scores = p.map(self.chatgpt_eval.forward, data)
|
399 |
+
scores = []
|
400 |
+
for item in data:
|
401 |
+
answer, GT = item
|
402 |
+
# Remove the traj from the answer and GT
|
403 |
+
answer = answer.split(", and its 3-second future trajectory")[0]
|
404 |
+
GT = GT.split("The autonomous vehicle's 3-second future trajectory is")[0]
|
405 |
+
item = (answer, GT)
|
406 |
+
#score = self.chatgpt_eval.forward(item)
|
407 |
+
score = 50
|
408 |
+
scores.append(float(score))
|
409 |
+
#scores = list(map(float, scores))
|
410 |
+
scores = sum(scores) / len(scores)
|
411 |
+
return scores
|
412 |
+
|
413 |
def eval_chatGPT(self, data):
|
414 |
remain_attempts = len(self.chatgpt_eval.api_keys)
|
415 |
while remain_attempts > 0:
|
|
|
547 |
self.match["match"]["GT"].append(GT)
|
548 |
self.match["match"]["answer"].append(answer)
|
549 |
self.match["GPT"].append((answer, GT))
|
550 |
+
if 4 in tag:
|
551 |
+
self.traj_L2["GT"].append(GT)
|
552 |
+
self.traj_L2["answer"].append(answer)
|
553 |
|
554 |
|
555 |
def evaluation(self):
|
556 |
print("evaluation start!")
|
557 |
scores = {}
|
558 |
+
# #scores["accuracy"] = self.eval_acc()
|
559 |
+
# print("USE_INTERNAL: ", USE_INTERNAL)
|
560 |
+
# if USE_INTERNAL:
|
561 |
+
# scores["chatgpt"] = self.eval_chatGPT_internal(self.GPT)
|
562 |
+
# else:
|
563 |
+
# scores["chatgpt"] = self.eval_chatGPT(self.GPT)
|
564 |
+
# scores.update(self.eval_language())
|
565 |
+
# scores["match"] = self.eval_match()
|
566 |
+
scores["traj_l2"] = self.eval_traj_L2()
|
567 |
+
scores["chatgpt_longtail_behavior"] = self.eval_long_tail_behavior_planning_gpt_score(self.GPT)
|
568 |
|
569 |
return scores
|
570 |
|
|
|
616 |
|
617 |
try:
|
618 |
evaluation = evaluation_suit()
|
619 |
+
output = {"chatgpt": [], "traj_l2": []}
|
620 |
for scene_id in test_file.keys():
|
621 |
scene_data = test_file[scene_id]['key_frames']
|
622 |
|
623 |
for frame_id in scene_data.keys():
|
624 |
frame_data_qa = scene_data[frame_id]['QA']
|
625 |
+
if 'long_tail_behavior_planning' not in frame_data_qa:
|
626 |
+
continue
|
627 |
+
for i, qa in enumerate(frame_data_qa["long_tail_behavior_planning"]):
|
628 |
question = qa['Q']
|
629 |
GT = qa['A']
|
630 |
+
tag = [1,4]
|
631 |
+
idx = scene_id + "_" + frame_id + "_lt_" + str(i)
|
632 |
predict = pred_file[idx]["answer"]
|
633 |
+
evaluation.forward(tag, predict, GT)
|
|
|
|
|
|
|
|
|
|
|
|
|
634 |
|
635 |
output = evaluation.evaluation()
|
636 |
+
print("chatgpt score: ", output["chatgpt_longtail_behavior"])
|
637 |
+
print("traj score: ", output["traj_l2"])
|
|
|
|
|
638 |
for key in evaluation.language_score_keys:
|
639 |
print(key, output[key])
|
640 |
|
|
|
643 |
weights = [0.4, 0.2, 0.2, 0.2]
|
644 |
|
645 |
# chatGPT
|
646 |
+
score = output["chatgpt_longtail_behavior"] / 100.
|
647 |
scores.append(score)
|
648 |
|
649 |
# language
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
650 |
|
651 |
+
output["final_score"] = score
|
|
|
652 |
|
653 |
except Exception as e:
|
654 |
error_message = "Evaluation failed. " + str(e)
|