Skip to content

Commit

Permalink
Add print message before calling llm async_execute_model_call method
Browse files Browse the repository at this point in the history
  • Loading branch information
zer0dss committed Jan 17, 2025
1 parent 4013d70 commit 090e8a1
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 2 deletions.
1 change: 1 addition & 0 deletions edsl/jobs/Jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -515,6 +515,7 @@ async def _execute_with_remote_cache(self, run_job_async: bool) -> Results:
# remote_cache_description=self.run_config.parameters.remote_cache_description,
# ):
runner = JobsRunnerAsyncio(self, environment=self.run_config.environment)
run_job_async = True
if run_job_async:
results = await runner.run_async(self.run_config.parameters)
else:
Expand Down
6 changes: 4 additions & 2 deletions edsl/language_models/LanguageModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,10 @@ async def _async_get_intended_model_call_outcome(
from edsl.config import CONFIG

TIMEOUT = float(CONFIG.get("EDSL_API_TIMEOUT"))

print("#######DEBUG############")
print(
"Calling model to answer question prompt:\n" + user_prompt, flush=True
)
response = await asyncio.wait_for(f(**params), timeout=TIMEOUT)
new_cache_key = cache.store(
**cache_call_params, response=response
Expand Down Expand Up @@ -571,7 +574,6 @@ def example(
return Model(skip_api_key_check=True)

def from_cache(self, cache: "Cache") -> LanguageModel:

from copy import deepcopy
from types import MethodType
from edsl import Cache
Expand Down

0 comments on commit 090e8a1

Please sign in to comment.