Skip to content

Commit e4253b4

Browse files
Fix some compatible bugs (#732)
1 parent 6acc6c0 commit e4253b4

File tree

8 files changed

+45
-15
lines changed

8 files changed

+45
-15
lines changed

ms_agent/agent/llm_agent.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import inspect
44
import os.path
55
import sys
6+
import uuid
67
from copy import deepcopy
78
from typing import Any, AsyncGenerator, Dict, List, Optional, Tuple, Union
89

@@ -176,6 +177,10 @@ async def _parallel_tool_call(self,
176177
content=tool_call_result,
177178
tool_call_id=tool_call_query['id'],
178179
name=tool_call_query['tool_name'])
180+
if _new_message.tool_call_id is None:
181+
# sometimes tool call id is None, add a random one
182+
_new_message.tool_call_id = str(uuid.uuid4())[:8]
183+
tool_call_query['id'] = _new_message.tool_call_id
179184
messages.append(_new_message)
180185
self._log_output(_new_message.content, self.tag)
181186
return messages

ms_agent/llm/openai_llm.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -167,9 +167,15 @@ def _merge_stream_message(self, pre_message_chunk: Optional[Message],
167167
message.tool_calls[0]['id'] = message_chunk.tool_calls[
168168
0]['id']
169169
if message_chunk.tool_calls[0]['arguments']:
170-
message.tool_calls[0][
171-
'arguments'] += message_chunk.tool_calls[0][
172-
'arguments']
170+
if message.tool_calls[0]['arguments']:
171+
message.tool_calls[0][
172+
'arguments'] += message_chunk.tool_calls[0][
173+
'arguments']
174+
else:
175+
# message.tool_calls[0]['arguments'] may be None
176+
message.tool_calls[0][
177+
'arguments'] = message_chunk.tool_calls[0][
178+
'arguments']
173179
if message_chunk.tool_calls[0]['tool_name']:
174180
message.tool_calls[0][
175181
'tool_name'] = message_chunk.tool_calls[0][

ms_agent/tools/tool_manager.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,8 @@ def __init__(self, config, mcp_config: Optional[Dict[str, Any]] = None):
3030
self.extra_tools.append(SplitTask(config))
3131
if hasattr(config, 'tools') and hasattr(config.tools, 'file_system'):
3232
self.extra_tools.append(FileSystemTool(config))
33+
self.tool_call_timeout = getattr(config, 'tool_call_timeout',
34+
TOOL_CALL_TIMEOUT)
3335
self._tool_index = {}
3436

3537
def register_tool(self, tool: ToolBase):
@@ -87,7 +89,7 @@ async def single_call_tool(self, tool_info: ToolCall):
8789
server_name,
8890
tool_name=tool_name.split(self.TOOL_SPLITER)[1],
8991
tool_args=tool_args),
90-
timeout=TOOL_CALL_TIMEOUT)
92+
timeout=self.tool_call_timeout)
9193
return response
9294
except asyncio.TimeoutError:
9395
# TODO: How to get the information printed by the tool before hanging to return to the model?

projects/code_scratch/architecture.yaml

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
llm:
22
service: openai
3-
model: claude-3-7-sonnet-20250219
3+
model: claude-sonnet-4-20250514
44
openai_api_key:
55
openai_base_url: https://dashscope.aliyuncs.com/compatible-mode/v1
66

@@ -9,9 +9,10 @@ generation_config:
99
temperature: 0.3
1010
top_k: 50
1111
stream: true
12+
max_tokens: 32000
1213
extra_body:
1314
dashscope_extend_params:
14-
provider: idealab
15+
provider: b
1516
enable_thinking: false
1617

1718

@@ -57,6 +58,8 @@ callbacks:
5758

5859
max_chat_round: 1
5960

61+
tool_call_timeout: 30000
62+
6063
output_dir: output
6164

6265
help: |

projects/code_scratch/callbacks/artifact_callback.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,13 @@ def __init__(self, config: DictConfig):
2323
async def on_task_begin(self, runtime: Runtime, messages: List[Message]):
2424
await self.file_system.connect()
2525

26+
async def on_generate_response(self, runtime: Runtime,
27+
messages: List[Message]):
28+
for message in messages:
29+
if message.role == 'assistant' and message.tool_calls and not message.content:
30+
# Claude seems does not allow empty content
31+
message.content = 'I should do a tool calling to continue:\n'
32+
2633
async def after_generate_response(self, runtime: Runtime,
2734
messages: List[Message]):
2835
if messages[-1].tool_calls or messages[-1].role == 'tool':

projects/code_scratch/callbacks/coding_callback.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,9 @@ async def on_task_begin(self, runtime: Runtime, messages: List[Message]):
2424
await self.file_system.connect()
2525

2626
async def on_tool_call(self, runtime: Runtime, messages: List[Message]):
27-
if not messages[-1].tool_calls or messages[-1].tool_calls[0][
28-
'tool_name'] != 'split_to_sub_task':
27+
# tool name is not 'split_to_sub_task', ut is 'SplitTask---split_to_sub_task'
28+
if not messages[-1].tool_calls or 'split_to_sub_task' not in messages[
29+
-1].tool_calls[0]['tool_name']:
2930
return
3031
assert messages[0].role == 'system'
3132
arguments = messages[-1].tool_calls[0]['arguments']
@@ -115,8 +116,8 @@ async def on_tool_call(self, runtime: Runtime, messages: List[Message]):
115116
messages[-1].tool_calls[0]['arguments'] = json.dumps({'tasks': tasks})
116117

117118
async def after_tool_call(self, runtime: Runtime, messages: List[Message]):
118-
if not messages[-2].tool_calls or messages[-2].tool_calls[0][
119-
'tool_name'] != 'split_to_sub_task':
119+
if not messages[-2].tool_calls or 'split_to_sub_task' not in messages[
120+
-2].tool_calls[0]['tool_name']:
120121
return
121122
assert messages[0].role == 'system'
122123
arguments = messages[-2].tool_calls[0]['arguments']

projects/code_scratch/coding.yaml

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
llm:
22
service: openai
3-
model: claude-3-7-sonnet-20250219
3+
model: claude-sonnet-4-20250514
44
openai_api_key:
55
openai_base_url: https://dashscope.aliyuncs.com/compatible-mode/v1
66

@@ -9,9 +9,10 @@ generation_config:
99
temperature: 0.2
1010
top_k: 20
1111
stream: true
12+
max_tokens: 32000
1213
extra_body:
1314
dashscope_extend_params:
14-
provider: idealab
15+
provider: b
1516
enable_thinking: false
1617

1718

@@ -22,7 +23,7 @@ prompt:
2223
* Do not miss any file in `files.json`
2324
* Do not modify the `files.json` file
2425
2. Group the code files by:
25-
* 8~10 files in one group.
26+
* 3~5 files in one group.
2627
* Group closely related modules especially modules ** on a complete calling stack ** or ** depends on each other **, minimize the dependencies between groups.
2728
* List groups from backend to frontend, from low level to high levels.
2829
* All files should start from the frontend or backend folder
@@ -53,6 +54,8 @@ tools:
5354

5455
max_chat_round: 100
5556

57+
tool_call_timeout: 30000
58+
5659
output_dir: output
5760

5861
help: |

projects/code_scratch/refine.yaml

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
llm:
22
service: openai
3-
model: claude-3-7-sonnet-20250219
3+
model: claude-sonnet-4-20250514
44
openai_api_key:
55
openai_base_url: https://dashscope.aliyuncs.com/compatible-mode/v1
66

@@ -9,9 +9,10 @@ generation_config:
99
temperature: 0.2
1010
top_k: 20
1111
stream: true
12+
max_tokens: 32000
1213
extra_body:
1314
dashscope_extend_params:
14-
provider: idealab
15+
provider: b
1516
enable_thinking: false
1617

1718

@@ -74,6 +75,8 @@ tools:
7475

7576
max_chat_round: 100
7677

78+
tool_call_timeout: 30000
79+
7780
output_dir: output
7881

7982
help: |

0 commit comments

Comments
 (0)