You want a production-shaped Jido.AI.Agent with tools, request handles, and explicit runtime control.
After this guide, you will run a custom tool, submit async requests, and await specific request IDs.
defmodule MyApp.Actions.AddNumbers do
use Jido.Action,
name: "add_numbers",
schema: Zoi.object(%{a: Zoi.integer(), b: Zoi.integer()}),
description: "Add two numbers."
@impl true
def run(%{a: a, b: b}, _context), do: {:ok, %{sum: a + b}}
enddefmodule MyApp.MathAgent do
use Jido.AI.Agent,
name: "math_agent",
model: :fast,
tools: [MyApp.Actions.AddNumbers],
max_iterations: 8,
system_prompt: "Solve accurately. Use tools for arithmetic.",
llm_opts: [thinking: %{type: :enabled, budget_tokens: 2048}, reasoning_effort: :high]
endUse effect_policy to bound which tool-emitted effects are allowed at runtime.
defmodule MyApp.SafeMathAgent do
use Jido.AI.Agent,
name: "safe_math_agent",
model: :fast,
tools: [MyApp.Actions.AddNumbers],
effect_policy: %{
mode: :allow_list,
allow: [Jido.Agent.StateOp.SetState, Jido.Agent.Directive.Emit]
},
strategy_effect_policy: %{
constraints: %{
emit: %{allowed_signal_prefixes: ["app.math."]}
}
}
endstrategy_effect_policy.constraints accepts atom keys, string keys, or keyword lists (including nested emit and schedule maps). Inputs are normalized before policy enforcement.
{:ok, pid} = Jido.AgentServer.start(agent: MyApp.MathAgent)
{:ok, req} = MyApp.MathAgent.ask(pid, "What is 19 + 23?")
{:ok, result} = MyApp.MathAgent.await(req, timeout: 15_000)
# Per-request overrides
{:ok, req2} =
MyApp.MathAgent.ask(pid, "Explain how you solved that",
allowed_tools: ["add_numbers"],
tool_context: %{tenant_id: "acme"},
llm_opts: [reasoning_effort: :medium]
)Use request_transformer when the next LLM turn should depend on runtime state produced by earlier tools.
defmodule MyApp.ClassifierTransformer do
def transform_request(request, _state, _config, runtime_context) do
seen_codes = get_in(runtime_context, [:state, :seen_codes]) || []
case seen_codes do
[] ->
{:ok, %{tools: request.tools}}
codes ->
{:ok,
%{
tools: %{},
llm_opts: [
provider_options: [
response_schema: %{
type: "object",
properties: %{code: %{enum: codes}}
}
]
]
}}
end
end
end
defmodule MyApp.ClassifierAgent do
use Jido.AI.Agent,
name: "classifier_agent",
model: :capable,
tools: [MyApp.Actions.FindSimilarCodes],
request_transformer: MyApp.ClassifierTransformer
endThen let your retrieval tool write the allowlist into runtime state:
{:ok, %{seen_codes: codes},
[
%Jido.Agent.StateOp.SetState{
attrs: %{seen_codes: codes}
}
]}This pattern is the clean way to implement "show only the IDs the model has seen" flows. Prefer runtime state plus request_transformer over parsing thread history.
signal = Jido.Signal.new!(
"ai.react.set_tool_context",
%{tool_context: %{tenant_id: "acme"}},
source: "/docs/example"
)
:ok = Jido.AgentServer.cast(pid, signal){:ok, _agent} = Jido.AI.set_system_prompt(pid, "You are a concise support specialist.")If you persist the conversation history (e.g. from snapshot.details.conversation),
you can restore it on restart so the agent resumes where it left off.
saved_messages = snapshot.details.conversation
# Split out one leading system message (if present) so it does not become
# a duplicate context entry.
{saved_system_prompt, conversation_messages} =
case saved_messages do
[%{role: role, content: content} | rest]
when role in [:system, "system"] and is_binary(content) ->
{content, rest}
_ ->
{nil, saved_messages}
end
# At start time — pass the saved context via initial_state:
context =
Jido.AI.Context.new(system_prompt: saved_system_prompt)
|> Jido.AI.Context.append_messages(conversation_messages)
Jido.AgentServer.start_link(agent: MyAgent, initial_state: %{context: context})When restoring with initial_state: %{context: context}, a nil
context.system_prompt is backfilled from the agent's configured prompt.
If you enable the retrieval plugin, auto-enrichment does not run on ai.react.query signals.
Recall memory explicitly and prepend it to your prompt. See Retrieval And Quota for details.
Symptom:
- Request completes with tool error
{:error, :not_a_tool}when registering dynamically
Fix:
- Ensure module exports
name/0,schema/0, andrun/2 - Validate module with
Jido.AI.register_tool(pid, ToolModule)
request_policydefault::reject- Tool timeout default:
15_000ms - Tool retry defaults:
1retry,200msbackoff
Use this approach when:
- You need reasoning plus tool execution
- You need per-request correlation and awaiting
Do not use this approach when:
- You only need deterministic, single-pass text completion