Skip to content

Commit 9fd6d0c

Browse files
committed
remove multiagent folder (python)
1 parent d38eb3c commit 9fd6d0c

File tree

7 files changed

+566
-8
lines changed

7 files changed

+566
-8
lines changed

Diff for: helpers/python.ts

-7
Original file line numberDiff line numberDiff line change
@@ -474,13 +474,6 @@ export const installPythonTemplate = async ({
474474
await copyRouterCode(root, tools ?? []);
475475
}
476476

477-
// Copy multiagents overrides
478-
if (template === "multiagent") {
479-
await copy("**", path.join(root), {
480-
cwd: path.join(compPath, "multiagent", "python"),
481-
});
482-
}
483-
484477
if (template === "multiagent" || template === "reflex") {
485478
if (useCase) {
486479
const sourcePath =

Diff for: templates/components/agents/python/deep_research/app/workflows/deep_research.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,13 @@
1818

1919
from app.engine.index import IndexConfig, get_index
2020
from app.workflows.agents import plan_research, research, write_report
21-
from app.workflows.events import SourceNodesEvent
2221
from app.workflows.models import (
2322
CollectAnswersEvent,
2423
DataEvent,
2524
PlanResearchEvent,
2625
ReportEvent,
2726
ResearchEvent,
27+
SourceNodesEvent,
2828
)
2929

3030
logger = logging.getLogger("uvicorn")

Diff for: templates/components/agents/python/deep_research/app/workflows/models.py

+15
Original file line numberDiff line numberDiff line change
@@ -41,3 +41,18 @@ class DataEvent(Event):
4141

4242
def to_response(self):
4343
return self.model_dump()
44+
45+
46+
class SourceNodesEvent(Event):
47+
nodes: List[NodeWithScore]
48+
49+
def to_response(self):
50+
return {
51+
"type": "sources",
52+
"data": {
53+
"nodes": [
54+
SourceNodes.from_source_node(node).model_dump()
55+
for node in self.nodes
56+
]
57+
},
58+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
from enum import Enum
2+
from typing import List, Optional
3+
4+
from llama_index.core.schema import NodeWithScore
5+
from llama_index.core.workflow import Event
6+
7+
from app.api.routers.models import SourceNodes
8+
9+
10+
class AgentRunEventType(Enum):
11+
TEXT = "text"
12+
PROGRESS = "progress"
13+
14+
15+
class AgentRunEvent(Event):
16+
name: str
17+
msg: str
18+
event_type: AgentRunEventType = AgentRunEventType.TEXT
19+
data: Optional[dict] = None
20+
21+
def to_response(self) -> dict:
22+
return {
23+
"type": "agent",
24+
"data": {
25+
"agent": self.name,
26+
"type": self.event_type.value,
27+
"text": self.msg,
28+
"data": self.data,
29+
},
30+
}
31+
32+
33+
class SourceNodesEvent(Event):
34+
nodes: List[NodeWithScore]
35+
36+
def to_response(self):
37+
return {
38+
"type": "sources",
39+
"data": {
40+
"nodes": [
41+
SourceNodes.from_source_node(node).model_dump()
42+
for node in self.nodes
43+
]
44+
},
45+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,230 @@
1+
import logging
2+
import uuid
3+
from abc import ABC, abstractmethod
4+
from typing import Any, AsyncGenerator, Callable, Optional
5+
6+
from llama_index.core.base.llms.types import ChatMessage, ChatResponse, MessageRole
7+
from llama_index.core.llms.function_calling import FunctionCallingLLM
8+
from llama_index.core.tools import (
9+
BaseTool,
10+
FunctionTool,
11+
ToolOutput,
12+
ToolSelection,
13+
)
14+
from llama_index.core.workflow import Context
15+
from pydantic import BaseModel, ConfigDict
16+
17+
from app.workflows.events import AgentRunEvent, AgentRunEventType
18+
19+
logger = logging.getLogger("uvicorn")
20+
21+
22+
class ContextAwareTool(FunctionTool, ABC):
23+
@abstractmethod
24+
async def acall(self, ctx: Context, input: Any) -> ToolOutput: # type: ignore
25+
pass
26+
27+
28+
class ChatWithToolsResponse(BaseModel):
29+
"""
30+
A tool call response from chat_with_tools.
31+
"""
32+
33+
tool_calls: Optional[list[ToolSelection]]
34+
tool_call_message: Optional[ChatMessage]
35+
generator: Optional[AsyncGenerator[ChatResponse | None, None]]
36+
37+
model_config = ConfigDict(arbitrary_types_allowed=True)
38+
39+
def is_calling_different_tools(self) -> bool:
40+
tool_names = {tool_call.tool_name for tool_call in self.tool_calls}
41+
return len(tool_names) > 1
42+
43+
def has_tool_calls(self) -> bool:
44+
return self.tool_calls is not None and len(self.tool_calls) > 0
45+
46+
def tool_name(self) -> str:
47+
assert self.has_tool_calls()
48+
assert not self.is_calling_different_tools()
49+
return self.tool_calls[0].tool_name
50+
51+
async def full_response(self) -> str:
52+
assert self.generator is not None
53+
full_response = ""
54+
async for chunk in self.generator:
55+
content = chunk.message.content
56+
if content:
57+
full_response += content
58+
return full_response
59+
60+
61+
async def chat_with_tools( # type: ignore
62+
llm: FunctionCallingLLM,
63+
tools: list[BaseTool],
64+
chat_history: list[ChatMessage],
65+
) -> ChatWithToolsResponse:
66+
"""
67+
Request LLM to call tools or not.
68+
This function doesn't change the memory.
69+
"""
70+
generator = _tool_call_generator(llm, tools, chat_history)
71+
is_tool_call = await generator.__anext__()
72+
if is_tool_call:
73+
# Last chunk is the full response
74+
# Wait for the last chunk
75+
full_response = None
76+
async for chunk in generator:
77+
full_response = chunk
78+
assert isinstance(full_response, ChatResponse)
79+
return ChatWithToolsResponse(
80+
tool_calls=llm.get_tool_calls_from_response(full_response),
81+
tool_call_message=full_response.message,
82+
generator=None,
83+
)
84+
else:
85+
return ChatWithToolsResponse(
86+
tool_calls=None,
87+
tool_call_message=None,
88+
generator=generator,
89+
)
90+
91+
92+
async def call_tools(
93+
ctx: Context,
94+
agent_name: str,
95+
tools: list[BaseTool],
96+
tool_calls: list[ToolSelection],
97+
emit_agent_events: bool = True,
98+
) -> list[ChatMessage]:
99+
if len(tool_calls) == 0:
100+
return []
101+
102+
tools_by_name = {tool.metadata.get_name(): tool for tool in tools}
103+
if len(tool_calls) == 1:
104+
return [
105+
await call_tool(
106+
ctx,
107+
tools_by_name[tool_calls[0].tool_name],
108+
tool_calls[0],
109+
lambda msg: ctx.write_event_to_stream(
110+
AgentRunEvent(
111+
name=agent_name,
112+
msg=msg,
113+
)
114+
),
115+
)
116+
]
117+
# Multiple tool calls, show progress
118+
tool_msgs: list[ChatMessage] = []
119+
120+
progress_id = str(uuid.uuid4())
121+
total_steps = len(tool_calls)
122+
if emit_agent_events:
123+
ctx.write_event_to_stream(
124+
AgentRunEvent(
125+
name=agent_name,
126+
msg=f"Making {total_steps} tool calls",
127+
)
128+
)
129+
for i, tool_call in enumerate(tool_calls):
130+
tool = tools_by_name.get(tool_call.tool_name)
131+
if not tool:
132+
tool_msgs.append(
133+
ChatMessage(
134+
role=MessageRole.ASSISTANT,
135+
content=f"Tool {tool_call.tool_name} does not exist",
136+
)
137+
)
138+
continue
139+
tool_msg = await call_tool(
140+
ctx,
141+
tool,
142+
tool_call,
143+
event_emitter=lambda msg: ctx.write_event_to_stream(
144+
AgentRunEvent(
145+
name=agent_name,
146+
msg=msg,
147+
event_type=AgentRunEventType.PROGRESS,
148+
data={
149+
"id": progress_id,
150+
"total": total_steps,
151+
"current": i,
152+
},
153+
)
154+
),
155+
)
156+
tool_msgs.append(tool_msg)
157+
return tool_msgs
158+
159+
160+
async def call_tool(
161+
ctx: Context,
162+
tool: BaseTool,
163+
tool_call: ToolSelection,
164+
event_emitter: Optional[Callable[[str], None]],
165+
) -> ChatMessage:
166+
if event_emitter:
167+
event_emitter(
168+
f"Calling tool {tool_call.tool_name}, {str(tool_call.tool_kwargs)}"
169+
)
170+
try:
171+
if isinstance(tool, ContextAwareTool):
172+
if ctx is None:
173+
raise ValueError("Context is required for context aware tool")
174+
# inject context for calling an context aware tool
175+
response = await tool.acall(ctx=ctx, **tool_call.tool_kwargs)
176+
else:
177+
response = await tool.acall(**tool_call.tool_kwargs) # type: ignore
178+
return ChatMessage(
179+
role=MessageRole.TOOL,
180+
content=str(response.raw_output),
181+
additional_kwargs={
182+
"tool_call_id": tool_call.tool_id,
183+
"name": tool.metadata.get_name(),
184+
},
185+
)
186+
except Exception as e:
187+
logger.error(f"Got error in tool {tool_call.tool_name}: {str(e)}")
188+
if event_emitter:
189+
event_emitter(f"Got error in tool {tool_call.tool_name}: {str(e)}")
190+
return ChatMessage(
191+
role=MessageRole.TOOL,
192+
content=f"Error: {str(e)}",
193+
additional_kwargs={
194+
"tool_call_id": tool_call.tool_id,
195+
"name": tool.metadata.get_name(),
196+
},
197+
)
198+
199+
200+
async def _tool_call_generator(
201+
llm: FunctionCallingLLM,
202+
tools: list[BaseTool],
203+
chat_history: list[ChatMessage],
204+
) -> AsyncGenerator[ChatResponse | bool, None]:
205+
response_stream = await llm.astream_chat_with_tools(
206+
tools,
207+
chat_history=chat_history,
208+
allow_parallel_tool_calls=False,
209+
)
210+
211+
full_response = None
212+
yielded_indicator = False
213+
async for chunk in response_stream:
214+
if "tool_calls" not in chunk.message.additional_kwargs:
215+
# Yield a boolean to indicate whether the response is a tool call
216+
if not yielded_indicator:
217+
yield False
218+
yielded_indicator = True
219+
220+
# if not a tool call, yield the chunks!
221+
yield chunk # type: ignore
222+
elif not yielded_indicator:
223+
# Yield the indicator for a tool call
224+
yield True
225+
yielded_indicator = True
226+
227+
full_response = chunk
228+
229+
if full_response:
230+
yield full_response # type: ignore
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
from enum import Enum
2+
from typing import List, Optional
3+
4+
from llama_index.core.schema import NodeWithScore
5+
from llama_index.core.workflow import Event
6+
7+
from app.api.routers.models import SourceNodes
8+
9+
10+
class AgentRunEventType(Enum):
11+
TEXT = "text"
12+
PROGRESS = "progress"
13+
14+
15+
class AgentRunEvent(Event):
16+
name: str
17+
msg: str
18+
event_type: AgentRunEventType = AgentRunEventType.TEXT
19+
data: Optional[dict] = None
20+
21+
def to_response(self) -> dict:
22+
return {
23+
"type": "agent",
24+
"data": {
25+
"agent": self.name,
26+
"type": self.event_type.value,
27+
"text": self.msg,
28+
"data": self.data,
29+
},
30+
}
31+
32+
33+
class SourceNodesEvent(Event):
34+
nodes: List[NodeWithScore]
35+
36+
def to_response(self):
37+
return {
38+
"type": "sources",
39+
"data": {
40+
"nodes": [
41+
SourceNodes.from_source_node(node).model_dump()
42+
for node in self.nodes
43+
]
44+
},
45+
}

0 commit comments

Comments
 (0)