-
Notifications
You must be signed in to change notification settings - Fork 0
Observability Hooks
Monitor what your agents do at runtime -- tool calls, knowledge fetches, and skill selections -- without modifying business logic.
Agents.KT provides three hooks on the Agent type. Each fires at a specific point in the agent's execution lifecycle:
| Hook | Fires When | Signature |
|---|---|---|
onToolUse |
After a tool executor completes | (name: String, args: Map<String, Any?>, result: Any?) -> Unit |
onKnowledgeUsed |
When the LLM fetches a knowledge entry | (name: String, content: String) -> Unit |
onSkillChosen |
After skill selection resolves | (name: String) -> Unit |
All hooks are optional. They do not affect execution -- the agent runs identically with or without them. They are purely observational.
Fires after every tool execution within the agentic loop. You receive the tool name, the arguments the LLM provided, and the result the executor returned.
val agent = agent<String, String>("file-ops") {
model { ollama("qwen2.5:7b") }
budget { maxTurns = 10 }
skills {
skill<String, String>("manage", "Manage files") {
tools("read_file", "write_file")
tool("read_file", "Read a file") { args ->
File(args["path"] as String).readText()
}
tool("write_file", "Write a file") { args ->
val path = args["path"] as String
val content = args["content"] as String
File(path).writeText(content)
"Written ${content.length} bytes"
}
}
}
onToolUse { name, args, result ->
println("TOOL [$name] args=$args result=$result")
}
}Output when the agent reads and writes a file:
TOOL [read_file] args={path=/tmp/input.txt} result=Hello, World!
TOOL [write_file] args={path=/tmp/output.txt, content=HELLO, WORLD!} result=Written 13 bytes
Structured logging:
onToolUse { name, args, result ->
logger.info(
"tool_call",
mapOf(
"tool" to name,
"args" to args,
"result_type" to result?.javaClass?.simpleName,
"result_length" to result?.toString()?.length
)
)
}Metrics collection:
onToolUse { name, args, result ->
metrics.counter("agent.tool.calls", "tool" to name).increment()
metrics.timer("agent.tool.duration", "tool" to name).record(duration)
}Audit trail:
val auditLog = mutableListOf<AuditEntry>()
onToolUse { name, args, result ->
auditLog.add(AuditEntry(
timestamp = Instant.now(),
tool = name,
args = args,
result = result?.toString()
))
}Fires when the LLM decides to fetch a knowledge entry during an agentic skill. You receive the knowledge entry's name and its content.
val agent = agent<String, String>("support-bot") {
model { ollama("qwen2.5:7b") }
budget { maxTurns = 5 }
skills {
skill<String, String>("answer", "Answer questions") {
tools("search_db")
knowledge("faq", "Frequently asked questions") {
loadText("/data/faq.txt")
}
knowledge("pricing", "Current pricing information") {
loadText("/data/pricing.txt")
}
tool("search_db", "Search the support database") { args ->
supportDb.search(args["query"] as String)
}
}
}
onKnowledgeUsed { name, content ->
println("KNOWLEDGE [$name] loaded ${content.length} chars")
}
}Output when the LLM fetches the FAQ:
KNOWLEDGE [faq] loaded 4823 chars
Track knowledge relevance:
val knowledgeHits = mutableMapOf<String, Int>()
onKnowledgeUsed { name, content ->
knowledgeHits.merge(name, 1, Int::plus)
}
// After many runs:
// knowledgeHits = {faq=142, pricing=87, policies=23}
// "policies" is rarely used -- consider removing or restructuring itMonitor knowledge freshness:
onKnowledgeUsed { name, content ->
val lastUpdated = knowledgeMetadata[name]?.lastUpdated
if (lastUpdated != null && lastUpdated.isBefore(Instant.now().minus(30, ChronoUnit.DAYS))) {
logger.warn("Knowledge '$name' is over 30 days old -- consider refreshing")
}
}Fires after skill selection resolves, regardless of which strategy was used (predicate, LLM routing, or first-match). You receive the name of the chosen skill.
val agent = agent<String, String>("router") {
model { ollama("qwen2.5:7b") }
skills {
skill<String, String>("billing", "Billing questions") { /* ... */ }
skill<String, String>("technical", "Technical support") { /* ... */ }
skill<String, String>("general", "General inquiries") { /* ... */ }
}
onSkillChosen { name ->
println("ROUTING -> $name")
}
}Output:
ROUTING -> billing
Routing analytics:
onSkillChosen { name ->
metrics.counter("agent.skill.chosen", "skill" to name).increment()
}Debugging routing decisions:
onSkillChosen { name ->
logger.debug("Skill '$name' chosen for input: ${currentInput.take(100)}...")
}An agent can use all three hooks simultaneously for full observability:
val agent = agent<String, String>("observable-agent") {
model { ollama("qwen2.5:7b") }
budget { maxTurns = 10 }
skills {
skill<String, String>("research", "Research a topic") {
tools("search", "fetch_page")
knowledge("guidelines", "Research guidelines") {
loadText("/data/guidelines.txt")
}
tool("search", "Web search") { args ->
webSearch(args["query"] as String)
}
tool("fetch_page", "Fetch a web page") { args ->
httpClient.get(args["url"] as String)
}
}
skill<String, String>("summarize", "Summarize content") {
implementedBy { input -> summarize(input) }
}
}
onSkillChosen { name ->
logger.info("skill_selected: $name")
}
onKnowledgeUsed { name, content ->
logger.info("knowledge_fetched: $name (${content.length} chars)")
}
onToolUse { name, args, result ->
logger.info("tool_executed: $name args=$args")
}
}A typical execution trace:
skill_selected: research
knowledge_fetched: guidelines (1205 chars)
tool_executed: search args={query=Kotlin coroutines best practices}
tool_executed: fetch_page args={url=https://example.com/article}
The hooks fire in execution order: skill selection first, then knowledge and tools as the agentic loop runs.
Hooks are powerful testing tools. They let you assert on agent behavior without needing a live LLM.
@Test
fun `agent calls search before summarize`() {
val toolCalls = mutableListOf<String>()
val mockClient = ModelClient { messages ->
when (toolCalls.size) {
0 -> LlmResponse.ToolCalls(listOf(ToolCall("search", mapOf("query" to "test"))))
1 -> LlmResponse.ToolCalls(listOf(ToolCall("summarize", mapOf("text" to "data"))))
else -> LlmResponse.Text("Summary: test data")
}
}
val agent = agent<String, String>("test-agent") {
model { ollama("unused"); client = mockClient }
budget { maxTurns = 5 }
skills {
skill<String, String>("work", "Do work") {
tools("search", "summarize")
tool("search", "Search") { "results" }
tool("summarize", "Summarize") { "summary" }
}
}
onToolUse { name, _, _ ->
toolCalls.add(name)
}
}
agent("analyze this")
assertEquals(listOf("search", "summarize"), toolCalls)
}@Test
fun `billing questions route to billing skill`() {
var selectedSkill = ""
val agent = agent<String, String>("router") {
skillSelection { input ->
if (input.contains("charge")) "billing" else "general"
}
skills {
skill<String, String>("billing", "Billing") {
implementedBy { "Billing response" }
}
skill<String, String>("general", "General") {
implementedBy { "General response" }
}
}
onSkillChosen { name ->
selectedSkill = name
}
}
agent("Why was I charged twice?")
assertEquals("billing", selectedSkill)
}@Test
fun `agent uses FAQ knowledge for common questions`() {
val knowledgeAccessed = mutableListOf<String>()
val mockClient = ModelClient { messages ->
// Simulate the LLM fetching knowledge, then answering
if (knowledgeAccessed.isEmpty()) {
LlmResponse.ToolCalls(listOf(ToolCall("knowledge_faq", emptyMap())))
} else {
LlmResponse.Text("Based on the FAQ: yes, we offer refunds.")
}
}
val agent = agent<String, String>("support") {
model { ollama("unused"); client = mockClient }
budget { maxTurns = 3 }
skills {
skill<String, String>("answer", "Answer questions") {
tools("knowledge_faq")
knowledge("faq", "FAQ content") {
"Q: Do you offer refunds? A: Yes, within 30 days."
}
}
}
onKnowledgeUsed { name, _ ->
knowledgeAccessed.add(name)
}
}
agent("Do you offer refunds?")
assertTrue(knowledgeAccessed.contains("faq"))
}Combine all hooks for comprehensive behavior verification:
@Test
fun `full agent behavior test`() {
var skill = ""
val tools = mutableListOf<String>()
val knowledge = mutableListOf<String>()
val mockClient = ModelClient { messages ->
when (tools.size) {
0 -> LlmResponse.ToolCalls(listOf(ToolCall("search", mapOf("q" to "test"))))
else -> LlmResponse.Text("answer")
}
}
val agent = agent<String, String>("full-test") {
model { ollama("unused"); client = mockClient }
budget { maxTurns = 5 }
skills {
skill<String, String>("research", "Research") {
tools("search")
tool("search", "Search") { "results" }
knowledge("docs", "Documentation") { "doc content" }
}
}
onSkillChosen { skill = it }
onToolUse { name, _, _ -> tools.add(name) }
onKnowledgeUsed { name, _ -> knowledge.add(name) }
}
val result = agent("find info")
assertEquals("research", skill)
assertEquals(listOf("search"), tools)
assertEquals("answer", result)
}This pattern gives you deterministic, fast, LLM-free tests that verify the agent's orchestration logic: which skill was chosen, which tools were called, in what order, and what the final result was.
- Model & Tool Calling -- the agentic loop where these hooks fire
-
Skill Selection & Routing -- how
onSkillChosenrelates to routing strategies -
Tool Error Recovery -- monitor recovery attempts with
onToolUse - Budget Controls -- combine hooks with budgets for usage tracking
Getting Started
Core Concepts
Composition Operators
LLM Integration
- Model & Tool Calling
- Tool Error Recovery
- Skill Selection & Routing
- Budget Controls
- Observability Hooks
Guided Generation
Agent Memory
Reference