Spaces:
Sleeping
Sleeping
bofenghuang
commited on
Commit
•
8219057
1
Parent(s):
6ca16de
up
Browse files
common.py
CHANGED
@@ -11,12 +11,10 @@ import re
|
|
11 |
import time
|
12 |
from typing import Optional
|
13 |
|
14 |
-
import openai
|
15 |
-
import anthropic
|
16 |
|
17 |
-
from
|
18 |
-
|
19 |
-
from fastchat.model.model_adapter import get_conversation_template, ANTHROPIC_MODEL_LIST, MISTRAL_MODEL_LIST
|
20 |
|
21 |
# API setting constants
|
22 |
API_MAX_RETRY = 16
|
@@ -167,10 +165,6 @@ def run_judge_single(question, answer, judge, ref_answer, multi_turn=False):
|
|
167 |
judgment = chat_compeletion_anthropic(
|
168 |
model, conv, temperature=0, max_tokens=1024
|
169 |
)
|
170 |
-
elif model in MISTRAL_MODEL_LIST:
|
171 |
-
judgment = chat_compeletion_mistral(
|
172 |
-
model, conv, temperature=0, max_tokens=1024
|
173 |
-
)
|
174 |
else:
|
175 |
raise ValueError(f"Invalid judge model name: {model}")
|
176 |
|
@@ -516,26 +510,6 @@ def chat_compeletion_palm(chat_state, model, conv, temperature, max_tokens):
|
|
516 |
return chat_state, output
|
517 |
|
518 |
|
519 |
-
def chat_compeletion_mistral(model, conv, temperature, max_tokens):
|
520 |
-
output = API_ERROR_OUTPUT
|
521 |
-
for _ in range(API_MAX_RETRY):
|
522 |
-
try:
|
523 |
-
c = MistralClient(api_key=os.environ["MISTRAL_API_KEY"])
|
524 |
-
messages = conv.to_mistralai_api_messages()
|
525 |
-
response = c.chat(
|
526 |
-
model=model,
|
527 |
-
messages=messages,
|
528 |
-
temperature=temperature,
|
529 |
-
max_tokens=max_tokens,
|
530 |
-
)
|
531 |
-
output = response.choices[0].message.content
|
532 |
-
break
|
533 |
-
except Exception as e:
|
534 |
-
print(type(e), e)
|
535 |
-
time.sleep(API_RETRY_SLEEP)
|
536 |
-
return output
|
537 |
-
|
538 |
-
|
539 |
def normalize_game_key_single(gamekey, result):
|
540 |
"""Make the model names sorted in a game key."""
|
541 |
qid, model_1, model_2 = gamekey
|
|
|
11 |
import time
|
12 |
from typing import Optional
|
13 |
|
14 |
+
# import openai
|
15 |
+
# import anthropic
|
16 |
|
17 |
+
# from fastchat.model.model_adapter import get_conversation_template, ANTHROPIC_MODEL_LIST, MISTRAL_MODEL_LIST
|
|
|
|
|
18 |
|
19 |
# API setting constants
|
20 |
API_MAX_RETRY = 16
|
|
|
165 |
judgment = chat_compeletion_anthropic(
|
166 |
model, conv, temperature=0, max_tokens=1024
|
167 |
)
|
|
|
|
|
|
|
|
|
168 |
else:
|
169 |
raise ValueError(f"Invalid judge model name: {model}")
|
170 |
|
|
|
510 |
return chat_state, output
|
511 |
|
512 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
513 |
def normalize_game_key_single(gamekey, result):
|
514 |
"""Make the model names sorted in a game key."""
|
515 |
qid, model_1, model_2 = gamekey
|