Skip to content

Commit

Permalink
Isolating the non-releasing call
Browse files Browse the repository at this point in the history
  • Loading branch information
johnjosephhorton committed Jan 18, 2025
1 parent 77f7ab2 commit c95d448
Show file tree
Hide file tree
Showing 7 changed files with 55 additions and 11 deletions.
7 changes: 6 additions & 1 deletion edsl/agents/Invigilator.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,10 @@ async def async_get_agent_response(self) -> AgentResponseDict:
if self.key_lookup:
self.model.set_key_lookup(self.key_lookup)

return await self.model.async_get_response(**params)
print("\t\t\tIn invigilator, waiting on the language model")
response = await self.model.async_get_response(**params)
print("\t\t\tGot response from language model")
return response

def store_response(self, agent_response_dict: AgentResponseDict) -> None:
"""Store the response in the invigilator, in case it is needed later because of validation failure."""
Expand All @@ -55,7 +58,9 @@ async def async_answer_question(self) -> EDSLResultObjectInput:
>>> i = InvigilatorAI.example()
"""
print("In the InvigitatorAI async_answer_question - waiting on agent response")
agent_response_dict: AgentResponseDict = await self.async_get_agent_response()
print("Got agent response")
self.store_response(agent_response_dict)
return self._extract_edsl_result_entry_and_validate(agent_response_dict)

Expand Down
8 changes: 6 additions & 2 deletions edsl/jobs/AnswerQuestionFunctionConstructor.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,8 +131,8 @@ def _handle_exception(
if stop_on_exception:
raise e

def __call__(self):
return self.answer_question_and_record_task
# def __call__(self):
# return self.answer_question_and_record_task

async def answer_question_and_record_task(
self,
Expand Down Expand Up @@ -167,9 +167,13 @@ async def attempt_answer():
)

try:
print(
"Awaiting on the invigilator (inside the AnswerQuestionFunctionConstructor.py file)"
)
response: EDSLResultObjectInput = (
await invigilator.async_answer_question()
)
print("Got a response back")
if response.validated:
self.interview.answers.add_answer(
response=response, question=question
Expand Down
7 changes: 4 additions & 3 deletions edsl/jobs/InterviewTaskManager.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ def _create_single_task(
)

for dependency in dependencies:
print("Adding a dependency")
task_creator.add_dependency(dependency)

self.task_creators[question.question_name] = task_creator
Expand All @@ -82,9 +83,9 @@ def task_status_logs(self) -> "InterviewStatusLog":
The keys are the question names; the values are the lists of status log changes for each task.
"""
for task_creator in self.task_creators.values():
self._task_status_log_dict[
task_creator.question.question_name
] = task_creator.status_log
self._task_status_log_dict[task_creator.question.question_name] = (
task_creator.status_log
)
return self._task_status_log_dict

@property
Expand Down
10 changes: 7 additions & 3 deletions edsl/jobs/interviews/Interview.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,10 +263,11 @@ async def async_conduct_interview(
model_buckets = ModelBuckets.infinity_bucket()

# was "self.tasks" - is that necessary?
afc = AnswerQuestionFunctionConstructor(
self, key_lookup=run_config.environment.key_lookup
)
self.tasks = self.task_manager.build_question_tasks(
answer_func=AnswerQuestionFunctionConstructor(
self, key_lookup=run_config.environment.key_lookup
)(),
answer_func=afc.answer_question_and_record_task,
token_estimator=RequestTokenEstimator(self),
model_buckets=model_buckets,
)
Expand All @@ -281,9 +282,12 @@ async def async_conduct_interview(
key_lookup=run_config.environment.key_lookup,
)
self.invigilators = [fetcher(question) for question in self.survey.questions]
# breakpoint()
print("Ready to run tasks")
await asyncio.gather(
*self.tasks, return_exceptions=not run_config.parameters.stop_on_exception
)
print("Tasks are done")
self.answers.replace_missing_answers_with_none(self.survey)
valid_results = list(
self._extract_valid_results(self.tasks, self.invigilators, self.exceptions)
Expand Down
1 change: 1 addition & 0 deletions edsl/jobs/runners/JobsRunnerAsyncio.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ async def get_results(results) -> None:
"""Conducted the interviews and append to the results list."""
result_generator = AsyncInterviewRunner(self.jobs, run_config)
async for result, interview in result_generator.run():
# print("Line 100 in JobsRunnerAsyncio.py - iterating through results")
results.append(result)
results.task_history.add_interview(interview)

Expand Down
9 changes: 7 additions & 2 deletions edsl/language_models/LanguageModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -394,11 +394,14 @@ async def _async_get_intended_model_call_outcome(
from edsl.config import CONFIG

TIMEOUT = float(CONFIG.get("EDSL_API_TIMEOUT"))
print("#######DEBUG############")
# print("#######DEBUG############")
spacer = "".join(["\t"] * 7)
print(
"Calling model to answer question prompt:\n" + user_prompt, flush=True
spacer + "Calling model to answer question prompt:" + user_prompt,
flush=True,
)
response = await asyncio.wait_for(f(**params), timeout=TIMEOUT)
print(spacer + "Model call completed", flush=True)
new_cache_key = cache.store(
**cache_call_params, response=response
) # store the response in the cache
Expand Down Expand Up @@ -459,9 +462,11 @@ async def async_get_response(
params.update({"invigilator": kwargs["invigilator"]})

model_inputs = ModelInputs(user_prompt=user_prompt, system_prompt=system_prompt)
print("\t\t\t\t In LanguageModel - waiting on model call")
model_outputs: ModelResponse = (
await self._async_get_intended_model_call_outcome(**params)
)
print("\t\t\t\t In LanguageModel - model call completed")
edsl_dict: EDSLOutput = self.parse_response(model_outputs.response)

agent_response_dict = AgentResponseDict(
Expand Down
24 changes: 24 additions & 0 deletions test_async_run.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
from edsl import QuestionFreeText, Survey


q1 = QuestionFreeText(
question_name="q1", question_text="What is your favorite primary color 1?"
)
q2 = QuestionFreeText(
question_name="q2",
question_text="What is your favorite primary programming language?",
)

q3 = QuestionFreeText(
question_name="q3", question_text="What is your favorite primary color 2?"
)
q4 = QuestionFreeText(
question_name="q4",
question_text="What is your favorite primary programming language 2?",
)
from edsl import Model

m = Model("test")
s = Survey(questions=[q1, q2, q3, q4])
res = s.by(m).run(disable_remote_inference=True, cache=False, stop_on_exception=True)
res.select("answer.*")

0 comments on commit c95d448

Please sign in to comment.