From 51e07a04b071c60286d8b9a1390d25be3bf1e525 Mon Sep 17 00:00:00 2001 From: z30057876 Date: Wed, 20 Aug 2025 15:38:07 +0800 Subject: [PATCH] =?UTF-8?q?=E5=9B=9E=E5=90=88=E5=9B=BD=E9=99=85=E5=8C=96?= =?UTF-8?q?=E5=86=85=E5=AE=B9=EF=BC=883=EF=BC=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- apps/llm/patterns/rewoo.py | 327 +++++++++++++++++++++-------- apps/routers/conversation.py | 2 +- apps/scheduler/call/core.py | 8 +- apps/scheduler/call/graph/style.py | 158 +++++++++----- apps/scheduler/executor/agent.py | 37 ++-- apps/scheduler/mcp_agent/base.py | 8 +- apps/scheduler/mcp_agent/plan.py | 4 +- apps/services/mcp_service.py | 8 +- 8 files changed, 374 insertions(+), 178 deletions(-) diff --git a/apps/llm/patterns/rewoo.py b/apps/llm/patterns/rewoo.py index 140d942a..561e7d09 100644 --- a/apps/llm/patterns/rewoo.py +++ b/apps/llm/patterns/rewoo.py @@ -2,6 +2,7 @@ """规划生成命令行""" from apps.llm.reasoning import ReasoningLLM +from apps.schemas.enum_var import LanguageType from .core import CorePattern @@ -9,56 +10,115 @@ from .core import CorePattern class InitPlan(CorePattern): """规划生成命令行""" - system_prompt: str = r""" - 你是一个计划生成器。对于给定的目标,**制定一个简单的计划**,该计划可以逐步生成合适的命令行参数和标志。 - - 你会收到一个"命令前缀",这是已经确定和生成的命令部分。你需要基于这个前缀使用标志和参数来完成命令。 - - 在每一步中,指明使用哪个外部工具以及工具输入来获取证据。 - - 工具可以是以下之一: - (1) Option["指令"]:查询最相似的命令行标志。只接受一个输入参数,"指令"必须是搜索字符串。搜索字符串应该详细且包含必要的数据。 - (2) Argument[名称]<值>:将任务中的数据放置到命令行的特定位置。接受两个输入参数。 - - 所有步骤必须以"Plan: "开头,且少于150个单词。 - 不要添加任何多余的步骤。 - 确保每个步骤都包含所需的所有信息 - 不要跳过步骤。 - 不要在证据后面添加任何额外数据。 - - 开始示例 - - 任务:在后台运行一个新的alpine:latest容器,将主机/root文件夹挂载至/data,并执行top命令。 - 前缀:`docker run` - 用法:`docker run ${OPTS} ${image} ${command}`。这是一个Python模板字符串。OPTS是所有标志的占位符。参数必须是 \ - ["image", "command"] 其中之一。 - 前缀描述:二进制程序`docker`的描述为"Docker容器平台",`run`子命令的描述为"从镜像创建并运行一个新的容器"。 - - Plan: 我需要一个标志使容器在后台运行。 #E1 = Option[在后台运行单个容器] - Plan: 我需要一个标志,将主机/root目录挂载至容器内/data目录。 #E2 = Option[挂载主机/root目录至/data目录] - Plan: 我需要从任务中解析出镜像名称。 #E3 = Argument[image] - Plan: 我需要指定容器中运行的命令。 #E4 = Argument[command] - Final: 组装上述线索,生成最终命令。 #F - - 示例结束 - - 让我们开始! - """ - """系统提示词""" - - user_prompt: str = r""" - 任务:{instruction} - 前缀:`{binary_name} {subcmd_name}` - 用法:`{subcmd_usage}`。这是一个Python模板字符串。OPTS是所有标志的占位符。参数必须是 {argument_list} 其中之一。 - 前缀描述:二进制程序`{binary_name}`的描述为"{binary_description}",`{subcmd_name}`子命令的描述为\ - "{subcmd_description}"。 - - 请生成相应的计划。 - """ - """用户提示词""" + @staticmethod + def _default() -> tuple[dict[LanguageType, str], dict[LanguageType, str]]: + """默认的Prompt内容""" + return { + LanguageType.CHINESE: r"You are a helpful assistant.", + LanguageType.ENGLISH: r"You are a helpful assistant.", + }, { + LanguageType.CHINESE: r""" + + + 你是一个计划生成器。对于给定的目标,**制定一个简单的计划**,该计划可以逐步生成合适的命令行参数和标志。 + + 你会收到一个"命令前缀",这是已经确定和生成的命令部分。你需要基于这个前缀使用标志和参数来完成命令。 + + 在每一步中,指明使用哪个外部工具以及工具输入来获取证据。 + + 工具可以是以下之一: + (1) Option["指令"]:查询最相似的命令行标志。只接受一个输入参数,"指令"必须是搜索字符串。\ +搜索字符串应该详细且包含必要的数据。 + (2) Argument[名称]<值>:将任务中的数据放置到命令行的特定位置。接受两个输入参数。 + + 所有步骤必须以"Plan: "开头,且少于150个单词。 + 不要添加任何多余的步骤。 + 确保每个步骤都包含所需的所有信息 - 不要跳过步骤。 + 不要在证据后面添加任何额外数据。 + + + + + 开始示例 + + 任务:在后台运行一个新的alpine:latest容器,将主机/root文件夹挂载至/data,并执行top命令。 + 前缀:`docker run` + 用法:`docker run ${OPTS} ${image} ${command}`。这是一个Python模板字符串。OPTS是所有标志的\ +占位符。参数必须是 ["image", "command"] 其中之一。 + 前缀描述:二进制程序`docker`的描述为"Docker容器平台",`run`子命令的描述为"从镜像创建并运行一个新的容器"。 + + Plan: 我需要一个标志使容器在后台运行。 #E1 = Option[在后台运行单个容器] + Plan: 我需要一个标志,将主机/root目录挂载至容器内/data目录。 #E2 = \ +Option[挂载主机/root目录至/data目录] + Plan: 我需要从任务中解析出镜像名称。 #E3 = Argument[image] + Plan: 我需要指定容器中运行的命令。 #E4 = Argument[command] + Final: 组装上述线索,生成最终命令。 #F + + + + + 任务:{instruction} + 前缀:`{binary_name} {subcmd_name}` + 用法:`{subcmd_usage}`。这是一个Python模板字符串。OPTS是所有标志的占位符。参数必须是 {argument_list} \ +其中之一。 + 前缀描述:二进制程序`{binary_name}`的描述为"{binary_description}",`{subcmd_name}`子命令的描述为\ + "{subcmd_description}"。 + + 现在生成相应的计划: + """, + LanguageType.ENGLISH: r""" + + + You are a plan generator. For a given goal, **draft a simple plan** that can step-by-step \ +generate the appropriate command line arguments and flags. + + You will receive a "command prefix", which is the already determined and generated command \ +part. You need to use the flags and arguments based on this prefix to complete the command. + + In each step, specify which external tool to use and the tool input to get the evidence. + + The tool can be one of the following: + (1) Option["instruction"]: Query the most similar command line flag. Only accepts one input \ +parameter, "instruction" must be a search string. The search string should be detailed and contain necessary data. + (2) Argument["name"]: Place the data from the task into a specific position in the \ +command line. Accepts two input parameters. + + All steps must start with "Plan: " and be less than 150 words. + Do not add any extra steps. + Ensure each step contains all the required information - do not skip steps. + Do not add any extra data after the evidence. + + + + Task: Run a new alpine:latest container in the background, mount the host /root folder to \ +/data, and execute the top command. + Prefix: `docker run` + Usage: `docker run ${OPTS} ${image} ${command}`. This is a Python template string. OPTS is \ +a placeholder for all flags. The arguments must be one of ["image", "command"]. + Prefix description: The description of binary program `docker` is "Docker container platform"\ +, and the description of `run` subcommand is "Create and run a new container from an image". + + Plan: I need a flag to make the container run in the background. #E1 = Option[Run a single \ +container in the background] + Plan: I need a flag to mount the host /root directory to /data directory in the \ +container. #E2 = Option[Mounthost /root directory to /data directory] + Plan: I need to parse the image name from the task. #E3 = Argument[image] + Plan: I need to specify the command to be run in the container. #E4 = Argument[command] + Final: Assemble the above clues to generate the final command. #F + + + + Task: {instruction} + Prefix: `{binary_name} {subcmd_name}` + Usage: `{subcmd_usage}`. This is a Python template string. OPTS is a placeholder for all flags. \ +The arguments must be one of {argument_list}. + Prefix description: The description of binary program `{binary_name}` is "{binary_description}", \ +and the description of `{subcmd_name}` subcommand is "{subcmd_description}". + + Generate the corresponding plan now: + """, + } - def __init__(self, system_prompt: str | None = None, user_prompt: str | None = None) -> None: - """处理Prompt""" - super().__init__(system_prompt, user_prompt) async def generate(self, **kwargs) -> str: # noqa: ANN003 """生成命令行evidence""" @@ -68,14 +128,15 @@ class InitPlan(CorePattern): binary_description = spec[binary_name][0] subcmd_usage = spec[binary_name][2][subcmd_name][1] subcmd_description = spec[binary_name][2][subcmd_name][0] + language = kwargs.get("language", LanguageType.CHINESE) argument_list = [] for key in spec[binary_name][2][subcmd_name][3]: argument_list += [key] messages = [ - {"role": "system", "content": self.system_prompt}, - {"role": "user", "content": self.user_prompt.format( + {"role": "system", "content": self.system_prompt[language]}, + {"role": "user", "content": self.user_prompt[language].format( instruction=kwargs["instruction"], binary_name=binary_name, subcmd_name=subcmd_name, @@ -99,43 +160,75 @@ class InitPlan(CorePattern): class PlanEvaluator(CorePattern): """计划评估器""" - system_prompt: str = r""" - 你是一个计划评估器。你的任务是评估给定的计划是否合理和完整。 - - 一个好的计划应该: - 1. 涵盖原始任务的所有要求 - 2. 使用适当的工具收集必要的信息 - 3. 具有清晰和逻辑的步骤 - 4. 没有冗余或不必要的步骤 - - 对于计划中的每个步骤,评估: - 1. 工具选择是否适当 - 2. 输入参数是否清晰和充分 - 3. 该步骤是否有助于实现最终目标 - - 请回复: - "VALID" - 如果计划良好且完整 - "INVALID: <原因>" - 如果计划有问题,请解释原因 - """ - """系统提示词""" + @staticmethod + def _default() -> tuple[dict[LanguageType, str], dict[LanguageType, str]]: + """默认的Prompt内容""" + return { + LanguageType.CHINESE: r"You are a helpful assistant.", + LanguageType.ENGLISH: r"You are a helpful assistant.", + }, { + LanguageType.CHINESE: r""" + + + 你是一个计划评估器。你的任务是评估给定的计划是否合理和完整。 + + 一个好的计划应该: + 1. 涵盖原始任务的所有要求 + 2. 使用适当的工具收集必要的信息 + 3. 具有清晰和逻辑的步骤 + 4. 没有冗余或不必要的步骤 + + 对于计划中的每个步骤,评估: + 1. 工具选择是否适当 + 2. 输入参数是否清晰和充分 + 3. 该步骤是否有助于实现最终目标 + + 请回复: + "VALID" - 如果计划良好且完整 + "INVALID: <原因>" - 如果计划有问题,请解释原因 + + + + 任务:{instruction} + 计划:{plan} + + 现在评估计划,并回复"VALID"或"INVALID: <原因>": + """, + LanguageType.ENGLISH: r""" + + + You are a plan replanner. When the plan is evaluated as invalid, you need to generate a new, \ +improved plan. + + The new plan should: + 1. Solve all problems mentioned in the evaluation + 2. Keep the same format as the original plan + 3. Be more precise and complete + 4. Use appropriate tools for each step + + Follow the same format as the original plan: + - Each step should start with "Plan: " + - Include tool usage with appropriate parameters + - Keep steps concise and focused + - End with the "Final" step + + + + Task: {instruction} + Original Plan: {plan} + Evaluation: {evaluation} + + Now evaluate the plan and reply "VALID" or "INVALID: ": + """, + } - user_prompt: str = r""" - 任务:{instruction} - 计划:{plan} - - 评估计划并回复"VALID"或"INVALID: <原因>"。 - """ - """用户提示词""" - - def __init__(self, system_prompt: str | None = None, user_prompt: str | None = None) -> None: - """初始化Prompt""" - super().__init__(system_prompt, user_prompt) - - async def generate(self, **kwargs) -> str: + async def generate(self, **kwargs) -> str: # noqa: ANN003 """生成计划评估结果""" + language = kwargs.get("language", LanguageType.CHINESE) + messages = [ - {"role": "system", "content": self.system_prompt}, - {"role": "user", "content": self.user_prompt.format( + {"role": "system", "content": self.system_prompt[language]}, + {"role": "user", "content": self.user_prompt[language].format( instruction=kwargs["instruction"], plan=kwargs["plan"], )}, @@ -180,11 +273,67 @@ class RePlanner(CorePattern): """ """用户提示词""" - def __init__(self, system_prompt: str | None = None, user_prompt: str | None = None) -> None: - """初始化Prompt""" - super().__init__(system_prompt, user_prompt) + @staticmethod + def _default() -> tuple[dict[LanguageType, str], dict[LanguageType, str]]: + """默认的Prompt内容""" + return { + LanguageType.CHINESE: r"You are a helpful assistant.", + LanguageType.ENGLISH: r"You are a helpful assistant.", + }, { + LanguageType.CHINESE: r""" + + + 你是一个计划重新规划器。当计划被评估为无效时,你需要生成一个新的、改进的计划。 + + 新计划应该: + 1. 解决评估中提到的所有问题 + 2. 保持与原始计划相同的格式 + 3. 更加精确和完整 + 4. 为每个步骤使用适当的工具 + + 遵循与原始计划相同的格式: + - 每个步骤应以"Plan: "开头 + - 包含带有适当参数的工具使用 + - 保持步骤简洁和重点突出 + - 以"Final"步骤结束 + + + + 任务:{instruction} + 原始计划:{plan} + 评估:{evaluation} + + 生成一个新的、改进的计划,解决评估中提到的所有问题: + """, + LanguageType.ENGLISH: r""" + + + You are a plan replanner. When the plan is evaluated as invalid, you need to generate a new, \ +improved plan. + + The new plan should: + 1. Solve all problems mentioned in the evaluation + 2. Keep the same format as the original plan + 3. Be more precise and complete + 4. Use appropriate tools for each step + + Follow the same format as the original plan: + - Each step should start with "Plan: " + - Include tool usage with appropriate parameters + - Keep steps concise and focused + - End with the "Final" step + + + + Task: {instruction} + Original Plan: {plan} + Evaluation: {evaluation} + + Now, generate a new, improved plan that solves all problems mentioned in the evaluation: + """, + } - async def generate(self, **kwargs) -> str: + async def generate(self, **kwargs) -> str: # noqa: ANN003 """生成重新规划结果""" messages = [ {"role": "system", "content": self.system_prompt}, diff --git a/apps/routers/conversation.py b/apps/routers/conversation.py index 65a22353..65799a8f 100644 --- a/apps/routers/conversation.py +++ b/apps/routers/conversation.py @@ -97,8 +97,8 @@ async def get_conversation_list(request: Request) -> JSONResponse: @router.post("", response_model=AddConversationRsp) async def add_conversation( request: Request, - title: str, appId: Annotated[uuid.UUID | None, Query()] = None, # noqa: N803 + title: str = "New Chat", *, debug: Annotated[bool, Query()] = False, ) -> JSONResponse: diff --git a/apps/scheduler/call/core.py b/apps/scheduler/call/core.py index ca812693..95c012f2 100644 --- a/apps/scheduler/call/core.py +++ b/apps/scheduler/call/core.py @@ -147,13 +147,7 @@ class CoreCall(BaseModel): if key not in data: err = f"[CoreCall] 输出Key {key} 不存在" logger.error(err) - raise CallError( - message=err, - data={ - "step_id": split_path[0], - "key": key, - }, - ) + return None data = data[key] return data diff --git a/apps/scheduler/call/graph/style.py b/apps/scheduler/call/graph/style.py index 631ea88a..20f6291c 100644 --- a/apps/scheduler/call/graph/style.py +++ b/apps/scheduler/call/graph/style.py @@ -9,6 +9,7 @@ from pydantic import BaseModel, Field from apps.llm.function import JsonGenerator from apps.llm.patterns.core import CorePattern from apps.llm.reasoning import ReasoningLLM +from apps.schemas.enum_var import LanguageType logger = logging.getLogger(__name__) @@ -24,66 +25,117 @@ class RenderStyleResult(BaseModel): class RenderStyle(CorePattern): """选择图表样式""" - system_prompt = r""" - You are a helpful assistant. Help the user make style choices when drawing a chart. - Chart title should be short and less than 3 words. - - Available types: - - `bar`: Bar graph - - `pie`: Pie graph - - `line`: Line graph - - `scatter`: Scatter graph - - Available bar additional styles: - - `normal`: Normal bar graph - - `stacked`: Stacked bar graph - - Available pie additional styles: - - `normal`: Normal pie graph - - `ring`: Ring pie graph - - Available scales: - - `linear`: Linear scale - - `log`: Logarithmic scale - - EXAMPLE - ## Question - 查询数据库中的数据,并绘制堆叠柱状图。 - - ## Thought - Let's think step by step. The user requires drawing a stacked bar chart, so the chart type should be `bar`, \ - i.e. a bar chart; the chart style should be `stacked`, i.e. a stacked form. - - ## Answer - The chart type should be: bar - The chart style should be: stacked - The scale should be: linear - - END OF EXAMPLE - - Let's begin. - """ - - user_prompt = r""" - ## Question - {question} - - ## Thought - Let's think step by step. - """ - - def __init__(self, system_prompt: str | None = None, user_prompt: str | None = None) -> None: - """初始化RenderStyle Prompt""" - super().__init__(system_prompt, user_prompt) + @staticmethod + def _default() -> tuple[dict[LanguageType, str], dict[LanguageType, str]]: + """默认的Prompt内容""" + return { + LanguageType.CHINESE: r"You are a helpful assistant.", + LanguageType.ENGLISH: r"You are a helpful assistant.", + }, { + LanguageType.CHINESE: r""" + + + 你的目标是:帮助用户在绘制图表时做出样式选择。 + 请以JSON格式输出你的选择。 + + 图标类型: + - `bar`: 柱状图 + - `pie`: 饼图 + - `line`: 折线图 + - `scatter`: 散点图 + 柱状图的附加样式: + - `normal`: 普通柱状图 + - `stacked`: 堆叠柱状图 + 饼图的附加样式: + - `normal`: 普通饼图 + - `ring`: 环形饼图 + 可用坐标比例: + - `linear`: 线性比例 + - `log`: 对数比例 + + + + ## 问题 + 查询数据库中的数据,并绘制堆叠柱状图。 + + ## 思考 + 让我们一步步思考。用户要求绘制堆叠柱状图,因此图表类型应为 `bar`,即柱状图;图表样式\ +应为 `stacked`,即堆叠形式。 + + ## 答案 + { + "chart_type": "bar", + "additional_style": "stacked", + "scale_type": "linear" + } + + + + ## 问题 + {question} + + ## 思考 + 让我们一步步思考。 + """, + LanguageType.ENGLISH: r""" + + + Your mission is: help the user make style choices when drawing a chart. + Please output your choices in JSON format. + + Chart types: + - `bar`: Bar chart + - `pie`: Pie chart + - `line`: Line chart + - `scatter`: Scatter chart + + Bar chart additional styles: + - `normal`: Normal bar chart + - `stacked`: Stacked bar chart + + Pie chart additional styles: + - `normal`: Normal pie chart + - `ring`: Ring pie chart + + Axis scaling: + - `linear`: Linear scaling + - `log`: Logarithmic scaling + + + + ## Question + Query the data from the database and draw a stacked bar chart. + + ## Thought + Let's think step by step. The user requires drawing a stacked bar chart, so the chart type \ +should be `bar`, i.e. a bar chart; the chart style should be `stacked`, i.e. a stacked form. + + ## Answer + { + "chart_type": "bar", + "additional_style": "stacked", + "scale_type": "linear" + } + + + + ## Question + {question} + + ## Thought + Let's think step by step. + """, + } async def generate(self, **kwargs) -> dict[str, Any]: # noqa: ANN003 """使用LLM选择图表样式""" question = kwargs["question"] + language = kwargs.get("language", LanguageType.CHINESE) # 使用Reasoning模型进行推理 messages = [ - {"role": "system", "content": self.system_prompt}, - {"role": "user", "content": self.user_prompt.format(question=question)}, + {"role": "system", "content": self.system_prompt[language]}, + {"role": "user", "content": self.user_prompt[language].format(question=question)}, ] result = "" llm = ReasoningLLM() diff --git a/apps/scheduler/executor/agent.py b/apps/scheduler/executor/agent.py index 5bce3740..285aa954 100644 --- a/apps/scheduler/executor/agent.py +++ b/apps/scheduler/executor/agent.py @@ -229,7 +229,7 @@ class MCPAgentExecutor(BaseExecutor): language=self.task.language, ) await self.push_message( - EventType.STEP_WAITING_FOR_PARAM, data={"message": error_message, "params": params_with_null} + EventType.STEP_WAITING_FOR_PARAM, data={"message": error_message, "params": params_with_null}, ) await self.push_message(EventType.FLOW_STOP, data={}) self.task.state.flow_status = FlowStatus.WAITING @@ -248,9 +248,9 @@ class MCPAgentExecutor(BaseExecutor): output_data={}, ex_data={ "message": error_message, - "params": params_with_null - } - ) + "params": params_with_null, + }, + ), ) async def get_next_step(self) -> None: @@ -311,7 +311,7 @@ class MCPAgentExecutor(BaseExecutor): flow_status=self.task.state.flow_status, input_data={}, output_data={}, - ) + ), ) async def work(self) -> None: @@ -319,7 +319,7 @@ class MCPAgentExecutor(BaseExecutor): if self.task.state.step_status == StepStatus.INIT: await self.push_message( EventType.STEP_INIT, - data={} + data={}, ) await self.get_tool_input_param(is_first=True) user_info = await UserManager.get_userinfo_by_user_sub(self.task.ids.user_sub) @@ -341,16 +341,15 @@ class MCPAgentExecutor(BaseExecutor): self.task.state.step_status = StepStatus.CANCELLED await self.push_message( EventType.STEP_CANCEL, - data={} + data={}, ) await self.push_message( EventType.FLOW_CANCEL, - data={} + data={}, ) if len(self.task.context) and self.task.context[-1].step_id == self.task.state.step_id: self.task.context[-1].step_status = StepStatus.CANCELLED - if self.task.state.step_status == StepStatus.PARAM: - await self.get_tool_input_param(is_first=False) + return max_retry = 5 for i in range(max_retry): if i != 0: @@ -435,7 +434,7 @@ class MCPAgentExecutor(BaseExecutor): output_data={ "message": self.task.state.error_message, }, - ) + ), ) await self.get_next_step() elif self.task.state.step_status == StepStatus.SUCCESS: @@ -451,7 +450,7 @@ class MCPAgentExecutor(BaseExecutor): ): await self.push_message( EventType.TEXT_ADD, - data=chunk + data=chunk, ) self.task.runtime.answer += chunk @@ -475,20 +474,20 @@ class MCPAgentExecutor(BaseExecutor): self.task.state.error_message = str(e) await self.push_message( EventType.FLOW_FAILED, - data={} + data={}, ) return self.task.state.flow_status = FlowStatus.RUNNING await self.push_message( EventType.FLOW_START, - data={} + data={}, ) if self.task.state.tool_id == FINAL_TOOL_ID: # 如果已经是最后一步,直接结束 self.task.state.flow_status = FlowStatus.SUCCESS await self.push_message( EventType.FLOW_SUCCESS, - data={} + data={}, ) await self.summarize() return @@ -505,7 +504,7 @@ class MCPAgentExecutor(BaseExecutor): self.task.state.step_status = StepStatus.SUCCESS await self.push_message( EventType.FLOW_SUCCESS, - data={} + data={}, ) await self.summarize() except Exception as e: @@ -515,11 +514,11 @@ class MCPAgentExecutor(BaseExecutor): self.task.state.step_status = StepStatus.ERROR await self.push_message( EventType.STEP_ERROR, - data={} + data={}, ) await self.push_message( EventType.FLOW_FAILED, - data={} + data={}, ) if len(self.task.context) and self.task.context[-1].step_id == self.task.state.step_id: del self.task.context[-1] @@ -535,7 +534,7 @@ class MCPAgentExecutor(BaseExecutor): flow_status=self.task.state.flow_status, input_data={}, output_data={}, - ) + ), ) finally: for mcp_service in self.mcp_list: diff --git a/apps/scheduler/mcp_agent/base.py b/apps/scheduler/mcp_agent/base.py index b3f71c4a..0dca0eb3 100644 --- a/apps/scheduler/mcp_agent/base.py +++ b/apps/scheduler/mcp_agent/base.py @@ -22,8 +22,8 @@ class MCPBase: """获取推理结果""" # 调用推理大模型 message = [ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": prompt}, + {"role": "system", "content": prompt}, + {"role": "user", "content": "Please provide a JSON response based on the above information and schema."}, ] result = "" async for chunk in self.llm.call( @@ -59,10 +59,10 @@ class MCPBase: flag = False if not flag: json_generator = JsonGenerator( - "请提取下面内容中的json\n\n", + "Please provide a JSON response based on the above information and schema.\n\n", [ {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "请提取下面内容中的json\n\n" + result}, + {"role": "user", "content": result}, ], schema, ) diff --git a/apps/scheduler/mcp_agent/plan.py b/apps/scheduler/mcp_agent/plan.py index a6ad1765..44d088f4 100644 --- a/apps/scheduler/mcp_agent/plan.py +++ b/apps/scheduler/mcp_agent/plan.py @@ -55,7 +55,9 @@ class MCPPlanner(MCPBase): """获取当前流程的名称""" template = _env.from_string(GENERATE_FLOW_NAME[self.language]) prompt = template.render(goal=self.goal) - return await self.get_resoning_result(prompt) + result = await self.get_resoning_result(prompt) + result = await self._parse_result(result, FlowName.model_json_schema()) + return FlowName.model_validate(result) async def create_next_step(self, history: str, tools: list[MCPTool]) -> Step: """创建下一步的执行步骤""" diff --git a/apps/services/mcp_service.py b/apps/services/mcp_service.py index 84b63830..59b22420 100644 --- a/apps/services/mcp_service.py +++ b/apps/services/mcp_service.py @@ -482,13 +482,13 @@ class MCPServiceManager: db_service = await service_collection.find_one({"_id": service_id, "author": user_sub}) db_service = MCPCollection.model_validate(db_service) if install: - if db_service.status == MCPInstallStatus.INSTALLING or db_service.status == MCPInstallStatus.READY: - err = "[MCPServiceManager] MCP服务已处于安装中或已准备就绪" - raise Exception(err) + if db_service.status == MCPInstallStatus.INSTALLING: + err = "[MCPServiceManager] MCP服务已处于安装中" + raise RuntimeError(err) mcp_config = await MCPLoader.get_config(service_id) await MCPLoader.init_one_template(mcp_id=service_id, config=mcp_config) else: if db_service.status != MCPInstallStatus.INSTALLING: err = "[MCPServiceManager] 只能卸载处于安装中的MCP服务" - raise Exception(err) + raise RuntimeError(err) await MCPLoader.cancel_installing_task([service_id]) -- Gitee