-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathinterview_agent.py
More file actions
51 lines (43 loc) · 2.32 KB
/
interview_agent.py
File metadata and controls
51 lines (43 loc) · 2.32 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
"""Interview agent: runs screening questions and collects candidate answers."""
from task import Task, TaskStatus
from log_utils import log_step
# Mock answers used when candidate_data["answers"] is not provided.
_DEFAULT_MOCK_ANSWERS = [
"We used a versioned REST API with backward compatibility; we chose consistency over brevity in payloads.",
"I start by reproducing locally, add logging or metrics, then narrow with binary search or bisect on deploys.",
"Junction table for many-to-many; I use it when both sides can have multiple associations and I need extra attributes on the link.",
"Idempotency key in the request, stored with outcome; we reject duplicates and return the same result for the same key.",
"We timebox refactors, track tech debt in the backlog, and align with the team on a sustainable pace.",
]
class InterviewAgent:
"""
Conducts the interview phase: asks interview_questions and collects answers.
Uses candidate_data["answers"] if present; otherwise mocks answers.
Controlled by WorkflowManager. No UI logic.
"""
def __init__(self) -> None:
pass
def conduct_interview(self, task: Task) -> Task:
"""
Simulate asking task.interview_questions and record answers.
Answers come from task.candidate_data["answers"] or are mocked.
Returns updated task with candidate_answers set and status = interviewing.
"""
log_step("Interview Agent", "Conducting technical screening (Q&A)")
questions = task.interview_questions or []
provided = task.candidate_data.get("answers") if isinstance(task.candidate_data.get("answers"), list) else None
if provided is not None and len(provided) >= len(questions):
answers = list(provided[: len(questions)])
else:
# Mock one answer per question, reusing defaults if needed
answers = []
for i in range(len(questions)):
answers.append(_DEFAULT_MOCK_ANSWERS[i % len(_DEFAULT_MOCK_ANSWERS)])
log_step("Interview Agent", "Screening complete", {"answers_collected": len(answers), "questions_answered": len(questions)})
return task.model_copy(
update={
"candidate_answers": answers,
"status": TaskStatus.interviewing,
},
deep=True,
)