IliaLarchenko commited on
Commit
9f4cedf
1 Parent(s): b5acaa5

Candidate switch to stream and modes

Browse files
Files changed (1) hide show
  1. tests/candidate.py +38 -23
tests/candidate.py CHANGED
@@ -23,9 +23,11 @@ def complete_interview(
23
  topic: str = "",
24
  model: str = "gpt-3.5-turbo",
25
  pause: int = 0,
 
 
26
  ) -> Tuple[str, Dict]:
27
  """
28
- Complete an interview and record the results.
29
 
30
  :param interview_type: Type of interview to complete.
31
  :param exp_name: Experiment name for file saving.
@@ -35,6 +37,8 @@ def complete_interview(
35
  :param topic: Topic for the interview.
36
  :param model: Model to use for the candidate.
37
  :param pause: Pause duration between requests to prevent rate limits.
 
 
38
  :return: Tuple containing the file path and interview data.
39
  """
40
  client = OpenAI(base_url="https://api.openai.com/v1")
@@ -48,7 +52,8 @@ def complete_interview(
48
  topic = topic or random.choice(topic_lists[interview_type])
49
  difficulty = difficulty or random.choice(["easy", "medium", "hard"])
50
 
51
- problem_statement_text = llm.get_problem_full(requirements, difficulty, topic, interview_type)
 
52
 
53
  interview_data = defaultdict(
54
  lambda: None,
@@ -80,33 +85,41 @@ def complete_interview(
80
  response_times = []
81
  previous_code = ""
82
 
83
- for _ in range(30):
84
- response = client.chat.completions.create(
85
- model=model, messages=messages_candidate, temperature=1, response_format={"type": "json_object"}
86
- )
87
- response_json = json.loads(response.choices[0].message.content)
88
-
89
- code = response_json.get("code_and_notes", "")
90
- candidate_message = response_json.get("message", "")
91
-
92
- if not code and not candidate_message:
93
- print("No message or code in response")
 
 
 
 
 
 
 
 
 
 
94
  continue
95
 
96
- messages_candidate.append({"role": "assistant", "content": response.choices[0].message.content})
97
 
98
- if code:
99
- interview_data["transcript"].append(f"CANDIDATE CODE AND NOTES: {code}")
100
- elif candidate_message:
101
- interview_data["transcript"].append(f"CANDIDATE MESSAGE: {candidate_message}")
102
 
103
  chat_display.append([candidate_message, None])
104
 
105
- if response_json.get("finished") and not response_json.get("question"):
106
- break
107
-
108
  send_time = time.time()
109
- messages_interviewer, chat_display, previous_code = llm.send_request_full(code, previous_code, messages_interviewer, chat_display)
 
 
 
 
110
  response_times.append(time.time() - send_time)
111
 
112
  messages_candidate.append({"role": "user", "content": chat_display[-1][1]})
@@ -119,7 +132,9 @@ def complete_interview(
119
 
120
  time.sleep(pause) # to prevent exceeding rate limits
121
 
122
- interview_data["feedback"] = llm.end_interview_full(problem_statement_text, messages_interviewer, interview_type)
 
 
123
  interview_data["average_response_time_seconds"] = round(sum(response_times) / len(response_times), 2) if response_times else 0
124
 
125
  current_time = time.strftime("%Y%m%d-%H%M%S")
 
23
  topic: str = "",
24
  model: str = "gpt-3.5-turbo",
25
  pause: int = 0,
26
+ mode: str = "normal",
27
+ max_messages: Optional[int] = None,
28
  ) -> Tuple[str, Dict]:
29
  """
30
+ Complete an interview and record the results with additional strange use cases.
31
 
32
  :param interview_type: Type of interview to complete.
33
  :param exp_name: Experiment name for file saving.
 
37
  :param topic: Topic for the interview.
38
  :param model: Model to use for the candidate.
39
  :param pause: Pause duration between requests to prevent rate limits.
40
+ :param mode: Mode of operation ("normal", "empty", "gibberish", "repeat").
41
+ :param max_messages: Maximum number of messages in the conversation.
42
  :return: Tuple containing the file path and interview data.
43
  """
44
  client = OpenAI(base_url="https://api.openai.com/v1")
 
52
  topic = topic or random.choice(topic_lists[interview_type])
53
  difficulty = difficulty or random.choice(["easy", "medium", "hard"])
54
 
55
+ for problem_statement_text in llm.get_problem(requirements, difficulty, topic, interview_type):
56
+ pass
57
 
58
  interview_data = defaultdict(
59
  lambda: None,
 
85
  response_times = []
86
  previous_code = ""
87
 
88
+ if max_messages is None:
89
+ max_messages = 30 if mode == "normal" else 5
90
+
91
+ for _ in range(max_messages):
92
+ if mode == "empty":
93
+ response_content = ""
94
+ elif mode == "gibberish":
95
+ response_content = "".join(random.choices(string.ascii_letters + string.digits, k=50))
96
+ elif mode == "repeat":
97
+ response_content = chat_display[-1][1]
98
+ else: # normal mode
99
+ response = client.chat.completions.create(
100
+ model=model, messages=messages_candidate, temperature=1, response_format={"type": "json_object"}
101
+ )
102
+ response_json = json.loads(response.choices[0].message.content)
103
+ response_content = response_json.get("message", "")
104
+
105
+ candidate_message = response_content
106
+
107
+ if not candidate_message and mode != "empty":
108
+ print("No message in response")
109
  continue
110
 
111
+ messages_candidate.append({"role": "assistant", "content": candidate_message})
112
 
113
+ interview_data["transcript"].append(f"CANDIDATE MESSAGE: {candidate_message}")
 
 
 
114
 
115
  chat_display.append([candidate_message, None])
116
 
 
 
 
117
  send_time = time.time()
118
+ for messages_interviewer, chat_display, previous_code in llm.send_request(
119
+ candidate_message, previous_code, messages_interviewer, chat_display
120
+ ):
121
+ pass
122
+
123
  response_times.append(time.time() - send_time)
124
 
125
  messages_candidate.append({"role": "user", "content": chat_display[-1][1]})
 
132
 
133
  time.sleep(pause) # to prevent exceeding rate limits
134
 
135
+ for fb in llm.end_interview(problem_statement_text, messages_interviewer, interview_type):
136
+ interview_data["feedback"] = fb
137
+
138
  interview_data["average_response_time_seconds"] = round(sum(response_times) / len(response_times), 2) if response_times else 0
139
 
140
  current_time = time.strftime("%Y%m%d-%H%M%S")