diff --git a/apps/common/queue.py b/apps/common/queue.py index 5601c93aa3ef72011f2e2d4b126f782d83e85b78..6e82af915e2a70f743c629902c149b7ebc0f7a0e 100644 --- a/apps/common/queue.py +++ b/apps/common/queue.py @@ -56,9 +56,12 @@ class MessageQueue: flow = MessageFlow( appId=task.state.app_id, flowId=task.state.flow_id, + flowName=task.state.flow_name, + flowStatus=task.state.flow_status, stepId=task.state.step_id, stepName=task.state.step_name, - stepStatus=task.state.status, + stepDescription=task.state.step_description, + stepStatus=task.state.step_status ) else: flow = None diff --git a/apps/constants.py b/apps/constants.py index 58158b33c3aa4ea5aa3595f5642bf6444e7d76cb..20cb79b54ac8db5450fdbb296cb400b47a29adf0 100644 --- a/apps/constants.py +++ b/apps/constants.py @@ -11,7 +11,7 @@ from apps.common.config import Config # 新对话默认标题 NEW_CHAT = "新对话" # 滑动窗口限流 默认窗口期 -SLIDE_WINDOW_TIME = 60 +SLIDE_WINDOW_TIME = 15 # OIDC 访问Token 过期时间(分钟) OIDC_ACCESS_TOKEN_EXPIRE_TIME = 30 # OIDC 刷新Token 过期时间(分钟) diff --git a/apps/dependency/user.py b/apps/dependency/user.py index fce67e51e00dd76b35bec2f7787dfe8039111589..8f5848a3d2961ef21b242d2eee931c623c6481c7 100644 --- a/apps/dependency/user.py +++ b/apps/dependency/user.py @@ -1,14 +1,16 @@ # Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. """用户鉴权""" - +import os import logging from fastapi import Depends from fastapi.security import OAuth2PasswordBearer +import secrets from starlette import status from starlette.exceptions import HTTPException from starlette.requests import HTTPConnection +from apps.common.config import Config from apps.services.api_key import ApiKeyManager from apps.services.session import SessionManager @@ -48,6 +50,9 @@ async def get_session(request: HTTPConnection) -> str: :param request: HTTP请求 :return: Session ID """ + if Config().get_config().no_auth.enable: + # 如果启用了无认证访问,直接返回调试用户 + return secrets.token_hex(16) session_id = await _get_session_id_from_request(request) if not session_id: raise HTTPException( @@ -69,6 +74,12 @@ async def get_user(request: HTTPConnection) -> str: :param request: HTTP请求体 :return: 用户sub """ + if Config().get_config().no_auth.enable: + # 如果启用了无认证访问,直接返回当前操作系统用户的名称 + username = os.environ.get('USERNAME') # 适用于 Windows 系统 + if not username: + username = os.environ.get('USER') # 适用于 Linux 和 macOS 系统 + return username or "admin" session_id = await _get_session_id_from_request(request) if not session_id: raise HTTPException( diff --git a/apps/llm/embedding.py b/apps/llm/embedding.py index 28ab86b49a96d19cc6e2db83930d5aff48f82260..df13044a1c9ce0e11968d5dd7c3f0f8ab2345394 100644 --- a/apps/llm/embedding.py +++ b/apps/llm/embedding.py @@ -1,8 +1,9 @@ """Embedding模型""" import httpx - +import logging from apps.common.config import Config +logger = logging.getLogger(__name__) class Embedding: @@ -15,7 +16,6 @@ class Embedding: embedding = await cls.get_embedding(["测试文本"]) return len(embedding[0]) - @classmethod async def _get_openai_embedding(cls, text: list[str]) -> list[list[float]]: """访问OpenAI兼容的Embedding API,获得向量化数据""" @@ -75,10 +75,18 @@ class Embedding: :param text: 待向量化文本(多条文本组成List) :return: 文本对应的向量(顺序与text一致,也为List) """ - if Config().get_config().embedding.type == "openai": - return await cls._get_openai_embedding(text) - if Config().get_config().embedding.type == "mindie": - return await cls._get_tei_embedding(text) + try: + if Config().get_config().embedding.type == "openai": + return await cls._get_openai_embedding(text) + if Config().get_config().embedding.type == "mindie": + return await cls._get_tei_embedding(text) - err = f"不支持的Embedding API类型: {Config().get_config().embedding.type}" - raise ValueError(err) + err = f"不支持的Embedding API类型: {Config().get_config().embedding.type}" + raise ValueError(err) + except Exception as e: + err = f"获取Embedding失败: {e}" + logger.error(err) + rt = [] + for i in range(len(text)): + rt.append([0.0]*1024) + return rt diff --git a/apps/llm/function.py b/apps/llm/function.py index 1f995fe7ba187cead03aa6fc62a4cbce1ec05a65..0d1fbf9a79c325d13831d8823d88544df88dae9a 100644 --- a/apps/llm/function.py +++ b/apps/llm/function.py @@ -10,7 +10,7 @@ from typing import Any from jinja2 import BaseLoader from jinja2.sandbox import SandboxedEnvironment from jsonschema import Draft7Validator - +from jsonschema import validate from apps.common.config import Config from apps.constants import JSON_GEN_MAX_TRIAL, REASONING_END_TOKEN from apps.llm.prompt import JSON_GEN_BASIC @@ -42,6 +42,7 @@ class FunctionLLM: self._params = { "model": self._config.model, "messages": [], + "timeout": 300 } if self._config.backend == "ollama": @@ -68,7 +69,6 @@ class FunctionLLM: api_key=self._config.api_key, ) - async def _call_openai( self, messages: list[dict[str, str]], @@ -123,7 +123,7 @@ class FunctionLLM: }, ] - response = await self._client.chat.completions.create(**self._params) # type: ignore[arg-type] + response = await self._client.chat.completions.create(**self._params) # type: ignore[arg-type] try: logger.info("[FunctionCall] 大模型输出:%s", response.choices[0].message.tool_calls[0].function.arguments) return response.choices[0].message.tool_calls[0].function.arguments @@ -132,7 +132,6 @@ class FunctionLLM: logger.info("[FunctionCall] 大模型输出:%s", ans) return await FunctionLLM.process_response(ans) - @staticmethod async def process_response(response: str) -> str: """处理大模型的输出""" @@ -169,7 +168,6 @@ class FunctionLLM: return json_str - async def _call_ollama( self, messages: list[dict[str, str]], @@ -196,10 +194,9 @@ class FunctionLLM: "format": schema, }) - response = await self._client.chat(**self._params) # type: ignore[arg-type] + response = await self._client.chat(**self._params) # type: ignore[arg-type] return await self.process_response(response.message.content or "") - async def call( self, messages: list[dict[str, Any]], @@ -237,6 +234,58 @@ class FunctionLLM: class JsonGenerator: """JSON生成器""" + @staticmethod + async def _parse_result_by_stack(result: str, schema: dict[str, Any]) -> str: + """解析推理结果""" + left_index = result.find('{') + right_index = result.rfind('}') + if left_index != -1 and right_index != -1 and left_index < right_index: + try: + tmp_js = json.loads(result[left_index:right_index + 1]) + validate(instance=tmp_js, schema=schema) + return tmp_js + except Exception as e: + logger.error("[JsonGenerator] 解析结果失败: %s", e) + stack = [] + json_candidates = [] + # 定义括号匹配关系 + bracket_map = {')': '(', ']': '[', '}': '{'} + + for i, char in enumerate(result): + # 遇到左括号则入栈 + if char in bracket_map.values(): + stack.append((char, i)) + # 遇到右括号且栈不为空时检查匹配 + elif char in bracket_map.keys() and stack: + if not stack: + continue + top_char, top_index = stack[-1] + # 检查是否匹配当前右括号 + if top_char == bracket_map[char]: + stack.pop() + # 当栈为空且当前是右花括号时,认为找到一个完整JSON + if not stack and char == '}': + json_str = result[top_index:i+1] + json_candidates.append(json_str) + else: + # 如果不匹配,清空栈 + stack.clear() + # 移除重复项并保持顺序 + seen = set() + unique_jsons = [] + for json_str in json_candidates[::]: + if json_str not in seen: + seen.add(json_str) + unique_jsons.append(json_str) + + for json_str in unique_jsons: + try: + tmp_js = json.loads(json_str) + validate(instance=tmp_js, schema=schema) + return tmp_js + except Exception as e: + logger.error("[JsonGenerator] 解析结果失败: %s", e) + return None def __init__(self, query: str, conversation: list[dict[str, str]], schema: dict[str, Any]) -> None: """初始化JSON生成器""" @@ -254,7 +303,6 @@ class JsonGenerator: ) self._err_info = "" - async def _assemble_message(self) -> str: """组装消息""" # 检查类型 @@ -275,23 +323,20 @@ class JsonGenerator: """单次尝试""" prompt = await self._assemble_message() messages = [ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": prompt}, + {"role": "system", "content": prompt}, + {"role": "user", "content": "please generate a JSON response based on the above information and schema."}, ] function = FunctionLLM() return await function.call(messages, self._schema, max_tokens, temperature) - async def generate(self) -> dict[str, Any]: """生成JSON""" Draft7Validator.check_schema(self._schema) validator = Draft7Validator(self._schema) - logger.info("[JSONGenerator] Schema:%s", self._schema) while self._count < JSON_GEN_MAX_TRIAL: self._count += 1 result = await self._single_trial() - logger.info("[JSONGenerator] 得到:%s", result) try: validator.validate(result) except Exception as err: # noqa: BLE001 diff --git a/apps/llm/patterns/core.py b/apps/llm/patterns/core.py index 4ef8133a9fed1b1e62f1ceb578c6bdb5a93b12a5..c4a58364ec7c21a132f401d5ebfb98e3d7c58b82 100644 --- a/apps/llm/patterns/core.py +++ b/apps/llm/patterns/core.py @@ -3,40 +3,52 @@ from abc import ABC, abstractmethod from textwrap import dedent +from pydantic import BaseModel, Field +from apps.schemas.enum_var import LanguageType class CorePattern(ABC): """基础大模型范式抽象类""" - system_prompt: str = "" - """系统提示词""" - user_prompt: str = "" """用户提示词""" input_tokens: int = 0 """输入Token数量""" output_tokens: int = 0 """输出Token数量""" + def get_default_prompt(self) -> dict[LanguageType, str]: + """ + 获取默认的用户提示词 + + :return: 默认的用户提示词 + :rtype: dict[LanguageType, str] + """ + return {}, {} - def __init__(self, system_prompt: str | None = None, user_prompt: str | None = None) -> None: + def __init__( + self, + system_prompt: dict[LanguageType, str] | None = None, + user_prompt: dict[LanguageType, str] | None = None, + ) -> None: """ 检查是否已经自定义了Prompt;有的话就用自定义的;同时对Prompt进行空格清除 :param system_prompt: 系统提示词,f-string格式 :param user_prompt: 用户提示词,f-string格式 """ + default_system_prompt, default_user_prompt = self.get_default_prompt() if system_prompt is not None: self.system_prompt = system_prompt - + else: + self.system_prompt = default_system_prompt if user_prompt is not None: self.user_prompt = user_prompt + else: + self.user_prompt = default_user_prompt - if not self.user_prompt: - err = "必须设置用户提示词!" - raise ValueError(err) + self.system_prompt = {lang: dedent(prompt).strip("\n") for lang, prompt in self.system_prompt.items()} - self.system_prompt = dedent(self.system_prompt).strip("\n") - self.user_prompt = dedent(self.user_prompt).strip("\n") + self.user_prompt = {lang: dedent(prompt).strip("\n") for lang, prompt in self.user_prompt.items()} @abstractmethod async def generate(self, **kwargs): # noqa: ANN003, ANN201 diff --git a/apps/llm/patterns/executor.py b/apps/llm/patterns/executor.py index f872fd2ac8d691b4079756a56a6107d6b6556585..e2153487a568eaa1289677b14111bbcbcc7b68ea 100644 --- a/apps/llm/patterns/executor.py +++ b/apps/llm/patterns/executor.py @@ -6,7 +6,7 @@ from typing import TYPE_CHECKING, Any from apps.llm.patterns.core import CorePattern from apps.llm.reasoning import ReasoningLLM from apps.llm.snippet import convert_context_to_prompt, facts_to_prompt - +from apps.schemas.enum_var import LanguageType if TYPE_CHECKING: from apps.schemas.scheduler import ExecutorBackground @@ -14,40 +14,79 @@ if TYPE_CHECKING: class ExecutorThought(CorePattern): """通过大模型生成Executor的思考内容""" - user_prompt: str = r""" - - - 你是一个可以使用工具的智能助手。 - 在回答用户的问题时,你为了获取更多的信息,使用了一个工具。 - 请简明扼要地总结工具的使用过程,提供你的见解,并给出下一步的行动。 - - 注意: - 工具的相关信息在标签中给出。 - 为了使你更好的理解发生了什么,你之前的思考过程在标签中给出。 - 输出时请不要包含XML标签,输出时请保持简明和清晰。 - - - - - {tool_name} - {tool_description} - {tool_output} - - - - {last_thought} - - - - 你当前需要解决的问题是: - {user_question} - - - 请综合以上信息,再次一步一步地进行思考,并给出见解和行动: - """ - """用户提示词""" - - def __init__(self, system_prompt: str | None = None, user_prompt: str | None = None) -> None: + def get_default_prompt(self) -> dict[LanguageType, str]: + user_prompt = { + LanguageType.CHINESE: r""" + + + 你是一个可以使用工具的智能助手。 + 在回答用户的问题时,你为了获取更多的信息,使用了一个工具。 + 请简明扼要地总结工具的使用过程,提供你的见解,并给出下一步的行动。 + + 注意: + 工具的相关信息在标签中给出。 + 为了使你更好的理解发生了什么,你之前的思考过程在标签中给出。 + 输出时请不要包含XML标签,输出时请保持简明和清晰。 + + + + + {tool_name} + {tool_description} + {tool_output} + + + + {last_thought} + + + + 你当前需要解决的问题是: + {user_question} + + + 请综合以上信息,再次一步一步地进行思考,并给出见解和行动: + """, + LanguageType.ENGLISH: r""" + + + You are an intelligent assistant who can use tools. + When answering user questions, you use a tool to get more information. + Please summarize the process of using the tool briefly, provide your insights, and give the next action. + + Note: + The information about the tool is given in the tag. + To help you better understand what happened, your previous thought process is given in the tag. + Do not include XML tags in the output, and keep the output brief and clear. + + + + + {tool_name} + {tool_description} + {tool_output} + + + + {last_thought} + + + + The question you need to solve is: + {user_question} + + + Please integrate the above information, think step by step again, provide insights, and give actions: + """, + } + """用户提示词""" + return {}, user_prompt + + def __init__( + self, + system_prompt: dict[LanguageType, str] | None = None, + user_prompt: dict[LanguageType, str] | None = None, + ) -> None: """处理Prompt""" super().__init__(system_prompt, user_prompt) @@ -57,19 +96,23 @@ class ExecutorThought(CorePattern): last_thought: str = kwargs["last_thought"] user_question: str = kwargs["user_question"] tool_info: dict[str, Any] = kwargs["tool_info"] + language: LanguageType = kwargs.get("language", LanguageType.CHINESE) except Exception as e: err = "参数不正确!" raise ValueError(err) from e messages = [ {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": self.user_prompt.format( - last_thought=last_thought, - user_question=user_question, - tool_name=tool_info["name"], - tool_description=tool_info["description"], - tool_output=tool_info["output"], - )}, + { + "role": "user", + "content": self.user_prompt[language].format( + last_thought=last_thought, + user_question=user_question, + tool_name=tool_info["name"], + tool_description=tool_info["description"], + tool_output=tool_info["output"], + ), + }, ] llm = ReasoningLLM() @@ -85,30 +128,59 @@ class ExecutorThought(CorePattern): class ExecutorSummary(CorePattern): """使用大模型进行生成Executor初始背景""" - user_prompt: str = r""" - - 根据给定的对话记录和关键事实,生成一个三句话背景总结。这个总结将用于后续对话的上下文理解。 - - 生成总结的要求如下: - 1. 突出重要信息点,例如时间、地点、人物、事件等。 - 2. “关键事实”中的内容可在生成总结时作为已知信息。 - 3. 输出时请不要包含XML标签,确保信息准确性,不得编造信息。 - 4. 总结应少于3句话,应少于300个字。 - - 对话记录将在标签中给出,关键事实将在标签中给出。 - - - {conversation} - - - {facts} - - - 现在,请开始生成背景总结: - """ - """用户提示词""" - - def __init__(self, system_prompt: str | None = None, user_prompt: str | None = None) -> None: + def get_default_prompt(self) -> dict[LanguageType, str]: + user_prompt = { + LanguageType.CHINESE: r""" + + 根据给定的对话记录和关键事实,生成一个三句话背景总结。这个总结将用于后续对话的上下文理解。 + + 生成总结的要求如下: + 1. 突出重要信息点,例如时间、地点、人物、事件等。 + 2. “关键事实”中的内容可在生成总结时作为已知信息。 + 3. 输出时请不要包含XML标签,确保信息准确性,不得编造信息。 + 4. 总结应少于3句话,应少于300个字。 + + 对话记录将在标签中给出,关键事实将在标签中给出。 + + + {conversation} + + + {facts} + + + 现在,请开始生成背景总结: + """, + LanguageType.ENGLISH: r""" + + Based on the given conversation records and key facts, generate a three-sentence background summary. This summary will be used for context understanding in subsequent conversations. + + The requirements for generating the summary are as follows: + 1. Highlight important information points, such as time, location, people, events, etc. + 2. The content in the "key facts" can be used as known information when generating the summary. + 3. Do not include XML tags in the output, ensure the accuracy of the information, and do not make up information. + 4. The summary should be less than 3 sentences and less than 300 words. + + The conversation records will be given in the tag, and the key facts will be given in the tag. + + + {conversation} + + + {facts} + + + Now, please start generating the background summary: + """, + } + """用户提示词""" + return {}, user_prompt + + def __init__( + self, + system_prompt: dict[LanguageType, str] | None = None, + user_prompt: dict[LanguageType, str] | None = None, + ) -> None: """初始化Background模式""" super().__init__(system_prompt, user_prompt) @@ -117,13 +189,17 @@ class ExecutorSummary(CorePattern): background: ExecutorBackground = kwargs["background"] conversation_str = convert_context_to_prompt(background.conversation) facts_str = facts_to_prompt(background.facts) + language = kwargs.get("language", LanguageType.CHINESE) messages = [ {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": self.user_prompt.format( - facts=facts_str, - conversation=conversation_str, - )}, + { + "role": "user", + "content": self.user_prompt[language].format( + facts=facts_str, + conversation=conversation_str, + ), + }, ] result = "" diff --git a/apps/llm/patterns/facts.py b/apps/llm/patterns/facts.py index 0b0381ff40a0e6632fd204c9efcf834a13c4711f..46707d1158f886e5e4fc12ddacef508183eb9b70 100644 --- a/apps/llm/patterns/facts.py +++ b/apps/llm/patterns/facts.py @@ -9,6 +9,7 @@ from apps.llm.function import JsonGenerator from apps.llm.patterns.core import CorePattern from apps.llm.reasoning import ReasoningLLM from apps.llm.snippet import convert_context_to_prompt +from apps.schemas.enum_var import LanguageType logger = logging.getLogger(__name__) @@ -22,62 +23,110 @@ class FactsResult(BaseModel): class Facts(CorePattern): """事实提取""" - system_prompt: str = "You are a helpful assistant." - """系统提示词(暂不使用)""" - - user_prompt: str = r""" - - - 从对话中提取关键信息,并将它们组织成独一无二的、易于理解的事实,包含用户偏好、关系、实体等有用信息。 - 以下是需要关注的信息类型以及有关如何处理输入数据的详细说明。 - - **你需要关注的信息类型** - 1. 实体:对话中涉及到的实体。例如:姓名、地点、组织、事件等。 - 2. 偏好:对待实体的态度。例如喜欢、讨厌等。 - 3. 关系:用户与实体之间,或两个实体之间的关系。例如包含、并列、互斥等。 - 4. 动作:对实体产生影响的具体动作。例如查询、搜索、浏览、点击等。 - - **要求** - 1. 事实必须准确,只能从对话中提取。不要将样例中的信息体现在输出中。 - 2. 事实必须清晰、简洁、易于理解。必须少于30个字。 - 3. 必须按照以下JSON格式输出: - - {{ - "facts": ["事实1", "事实2", "事实3"] - }} - - - - - 杭州西湖有哪些景点? - 杭州西湖是中国浙江省杭州市的一个著名景点,以其美丽的自然风光和丰富的文化遗产而闻名。西湖周围有许多著名的景点,包括著名的苏堤、白堤、断桥、三潭印月等。西湖以其清澈的湖水和周围的山脉而著名,是中国最著名的湖泊之一。 - - - + def get_default_prompt(self) -> dict[LanguageType, str]: + system_prompt = { + LanguageType.CHINESE: "你是一个有用的助手。", + LanguageType.ENGLISH: "You are a helpful assistant." + } + user_prompt = { + LanguageType.CHINESE: r""" + + + 从对话中提取关键信息,并将它们组织成独一无二的、易于理解的事实,包含用户偏好、关系、实体等有用信息。 + 以下是需要关注的信息类型以及有关如何处理输入数据的详细说明。 + + **你需要关注的信息类型** + 1. 实体:对话中涉及到的实体。例如:姓名、地点、组织、事件等。 + 2. 偏好:对待实体的态度。例如喜欢、讨厌等。 + 3. 关系:用户与实体之间,或两个实体之间的关系。例如包含、并列、互斥等。 + 4. 动作:对实体产生影响的具体动作。例如查询、搜索、浏览、点击等。 + + **要求** + 1. 事实必须准确,只能从对话中提取。不要将样例中的信息体现在输出中。 + 2. 事实必须清晰、简洁、易于理解。必须少于30个字。 + 3. 必须按照以下JSON格式输出: + {{ - "facts": ["杭州西湖有苏堤、白堤、断桥、三潭印月等景点"] + "facts": ["事实1", "事实2", "事实3"] }} - - - - - {conversation} - - """ - """用户提示词""" + + + + + 杭州西湖有哪些景点? + 杭州西湖是中国浙江省杭州市的一个著名景点,以其美丽的自然风光和丰富的文化遗产而闻名。西湖周围有许多著名的景点,包括著名的苏堤、白堤、断桥、三潭印月等。西湖以其清澈的湖水和周围的山脉而著名,是中国最著名的湖泊之一。 + + + + {{ + "facts": ["杭州西湖有苏堤、白堤、断桥、三潭印月等景点"] + }} + + + + + {conversation} + + """, + LanguageType.ENGLISH: r""" + + + Extract key information from the conversation and organize it into unique, easily understandable facts that include user preferences, relationships, entities, etc. + The following are the types of information to be paid attention to and detailed instructions on how to handle the input data. + + **Types of information to be paid attention to** + 1. Entities: Entities involved in the conversation. For example: names, locations, organizations, events, etc. + 2. Preferences: Attitudes towards entities. For example: like, dislike, etc. + 3. Relationships: Relationships between the user and entities, or between two entities. For example: include, parallel, exclusive, etc. + 4. Actions: Specific actions that affect entities. For example: query, search, browse, click, etc. + + **Requirements** + 1. Facts must be accurate and can only be extracted from the conversation. Do not include information from the sample in the output. + 2. Facts must be clear, concise, and easy to understand. Must be less than 30 words. + 3. Output in the following JSON format: - - def __init__(self, system_prompt: str | None = None, user_prompt: str | None = None) -> None: + {{ + "facts": ["fact1", "fact2", "fact3"] + }} + + + + + What are the attractions in West Lake, Hangzhou? + West Lake in Hangzhou is a famous scenic spot in Hangzhou, Zhejiang Province, China, famous for its beautiful natural scenery and rich cultural heritage. There are many famous attractions around West Lake, including the famous Su Causeway, Bai Causeway, Broken Bridge, Three Pools Mirroring the Moon, etc. West Lake is famous for its clear water and surrounding mountains, and is one of the most famous lakes in China. + + + + {{ + "facts": ["West Lake has the famous attractions of Suzhou Embankment, Bai Embankment, Qiantang Bridge, San Tang Yin Yue, etc."] + }} + + + + + {conversation} + + """, + } + """用户提示词""" + return system_prompt, user_prompt + + def __init__( + self, + system_prompt: dict[LanguageType, str] | None = None, + user_prompt: dict[LanguageType, str] | None = None, + ) -> None: """初始化Prompt""" super().__init__(system_prompt, user_prompt) - async def generate(self, **kwargs) -> list[str]: # noqa: ANN003 """事实提取""" conversation = convert_context_to_prompt(kwargs["conversation"]) + language = kwargs.get("language", LanguageType.CHINESE) + messages = [ - {"role": "system", "content": self.system_prompt}, - {"role": "user", "content": self.user_prompt.format(conversation=conversation)}, + {"role": "system", "content": self.system_prompt[language]}, + {"role": "user", "content": self.user_prompt[language].format(conversation=conversation)}, ] result = "" llm = ReasoningLLM() @@ -88,7 +137,7 @@ class Facts(CorePattern): messages += [{"role": "assistant", "content": result}] json_gen = JsonGenerator( - query="根据给定的背景信息,提取事实条目", + query="Extract fact entries based on the given background information", conversation=messages, schema=FactsResult.model_json_schema(), ) diff --git a/apps/llm/patterns/rewoo.py b/apps/llm/patterns/rewoo.py index ef78d92667d30fbd3b26d55ba4d87961181f3d48..66c3c65114a08b7824352ec629b10c748bcf7cda 100644 --- a/apps/llm/patterns/rewoo.py +++ b/apps/llm/patterns/rewoo.py @@ -3,59 +3,122 @@ from apps.llm.patterns.core import CorePattern from apps.llm.reasoning import ReasoningLLM +from apps.schemas.enum_var import LanguageType class InitPlan(CorePattern): """规划生成命令行""" - system_prompt: str = r""" - 你是一个计划生成器。对于给定的目标,**制定一个简单的计划**,该计划可以逐步生成合适的命令行参数和标志。 - - 你会收到一个"命令前缀",这是已经确定和生成的命令部分。你需要基于这个前缀使用标志和参数来完成命令。 - - 在每一步中,指明使用哪个外部工具以及工具输入来获取证据。 - - 工具可以是以下之一: - (1) Option["指令"]:查询最相似的命令行标志。只接受一个输入参数,"指令"必须是搜索字符串。搜索字符串应该详细且包含必要的数据。 - (2) Argument[名称]<值>:将任务中的数据放置到命令行的特定位置。接受两个输入参数。 - - 所有步骤必须以"Plan: "开头,且少于150个单词。 - 不要添加任何多余的步骤。 - 确保每个步骤都包含所需的所有信息 - 不要跳过步骤。 - 不要在证据后面添加任何额外数据。 - - 开始示例 - - 任务:在后台运行一个新的alpine:latest容器,将主机/root文件夹挂载至/data,并执行top命令。 - 前缀:`docker run` - 用法:`docker run ${OPTS} ${image} ${command}`。这是一个Python模板字符串。OPTS是所有标志的占位符。参数必须是 \ - ["image", "command"] 其中之一。 - 前缀描述:二进制程序`docker`的描述为"Docker容器平台",`run`子命令的描述为"从镜像创建并运行一个新的容器"。 - - Plan: 我需要一个标志使容器在后台运行。 #E1 = Option[在后台运行单个容器] - Plan: 我需要一个标志,将主机/root目录挂载至容器内/data目录。 #E2 = Option[挂载主机/root目录至/data目录] - Plan: 我需要从任务中解析出镜像名称。 #E3 = Argument[image] - Plan: 我需要指定容器中运行的命令。 #E4 = Argument[command] - Final: 组装上述线索,生成最终命令。 #F - - 示例结束 - - 让我们开始! - """ - """系统提示词""" - - user_prompt: str = r""" - 任务:{instruction} - 前缀:`{binary_name} {subcmd_name}` - 用法:`{subcmd_usage}`。这是一个Python模板字符串。OPTS是所有标志的占位符。参数必须是 {argument_list} 其中之一。 - 前缀描述:二进制程序`{binary_name}`的描述为"{binary_description}",`{subcmd_name}`子命令的描述为\ - "{subcmd_description}"。 - - 请生成相应的计划。 - """ - """用户提示词""" - - def __init__(self, system_prompt: str | None = None, user_prompt: str | None = None) -> None: + def get_default_prompt(self) -> dict[LanguageType, str]: + system_prompt = { + LanguageType.CHINESE: r""" + 你是一个计划生成器。对于给定的目标,**制定一个简单的计划**,该计划可以逐步生成合适的命令行参数和标志。 + + 你会收到一个"命令前缀",这是已经确定和生成的命令部分。你需要基于这个前缀使用标志和参数来完成命令。 + + 在每一步中,指明使用哪个外部工具以及工具输入来获取证据。 + + 工具可以是以下之一: + (1) Option["指令"]:查询最相似的命令行标志。只接受一个输入参数,"指令"必须是搜索字符串。搜索字符串应该详细且包含必要的数据。 + (2) Argument[名称]<值>:将任务中的数据放置到命令行的特定位置。接受两个输入参数。 + + 所有步骤必须以"Plan: "开头,且少于150个单词。 + 不要添加任何多余的步骤。 + 确保每个步骤都包含所需的所有信息 - 不要跳过步骤。 + 不要在证据后面添加任何额外数据。 + + 开始示例 + + 任务:在后台运行一个新的alpine:latest容器,将主机/root文件夹挂载至/data,并执行top命令。 + 前缀:`docker run` + 用法:`docker run ${OPTS} ${image} ${command}`。这是一个Python模板字符串。OPTS是所有标志的占位符。参数必须是 \ + ["image", "command"] 其中之一。 + 前缀描述:二进制程序`docker`的描述为"Docker容器平台",`run`子命令的描述为"从镜像创建并运行一个新的容器"。 + + Plan: 我需要一个标志使容器在后台运行。 #E1 = Option[在后台运行单个容器] + Plan: 我需要一个标志,将主机/root目录挂载至容器内/data目录。 #E2 = Option[挂载主机/root目录至/data目录] + Plan: 我需要从任务中解析出镜像名称。 #E3 = Argument[image] + Plan: 我需要指定容器中运行的命令。 #E4 = Argument[command] + Final: 组装上述线索,生成最终命令。 #F + + 示例结束 + + 让我们开始! + """, + LanguageType.ENGLISH: r""" + You are a plan generator. For a given goal, **draft a simple plan** that can step-by-step generate the \ + appropriate command line arguments and flags. + + You will receive a "command prefix", which is the already determined and generated command part. You need to \ + use the flags and arguments based on this prefix to complete the command. + + In each step, specify which external tool to use and the tool input to get the evidence. + + The tool can be one of the following: + (1) Option["instruction"]: Query the most similar command line flag. Only accepts one input parameter, \ + "instruction" must be a search string. The search string should be detailed and contain necessary data. + (2) Argument["name"]: Place the data from the task into a specific position in the command line. \ + Accepts two input parameters. + + All steps must start with "Plan: " and be less than 150 words. + Do not add any extra steps. + Ensure each step contains all the required information - do not skip steps. + Do not add any extra data after the evidence. + + Start example + + Task: Run a new alpine:latest container in the background, mount the host /root folder to /data, and execute \ + the top command. + Prefix: `docker run` + Usage: `docker run ${OPTS} ${image} ${command}`. This is a Python template string. OPTS is a placeholder for all \ + flags. The arguments must be one of ["image", "command"]. + Prefix description: The description of binary program `docker` is "Docker container platform", and the \ + description of `run` subcommand is "Create and run a new container from an image". + + Plan: I need a flag to make the container run in the background. #E1 = Option[Run a single container in the \ + background] + Plan: I need a flag to mount the host /root directory to /data directory in the container. #E2 = Option[Mount \ + host /root directory to /data directory] + Plan: I need to parse the image name from the task. #E3 = Argument[image] + Plan: I need to specify the command to be run in the container. #E4 = Argument[command] + Final: Assemble the above clues to generate the final command. #F + + End example + + Let's get started! + """, + } + """系统提示词""" + + user_prompt = { + LanguageType.CHINESE: r""" + 任务:{instruction} + 前缀:`{binary_name} {subcmd_name}` + 用法:`{subcmd_usage}`。这是一个Python模板字符串。OPTS是所有标志的占位符。参数必须是 {argument_list} 其中之一。 + 前缀描述:二进制程序`{binary_name}`的描述为"{binary_description}",`{subcmd_name}`子命令的描述为\ + "{subcmd_description}"。 + + 请生成相应的计划。 + """, + LanguageType.ENGLISH: r""" + Task: {instruction} + Prefix: `{binary_name} {subcmd_name}` + Usage: `{subcmd_usage}`. This is a Python template string. OPTS is a placeholder for all flags. The arguments \ + must be one of {argument_list}. + Prefix description: The description of binary program `{binary_name}` is "{binary_description}", and the \ + description of `{subcmd_name}` subcommand is "{subcmd_description}". + + Please generate the corresponding plan. + """, + } + """用户提示词""" + return system_prompt, user_prompt + + def __init__( + self, + system_prompt: dict[LanguageType, str] | None = None, + user_prompt: dict[LanguageType, str] | None = None, + ) -> None: """处理Prompt""" super().__init__(system_prompt, user_prompt) @@ -64,6 +127,7 @@ class InitPlan(CorePattern): spec = kwargs["spec"] binary_name = kwargs["binary_name"] subcmd_name = kwargs["subcmd_name"] + language = kwargs.get("language", LanguageType.CHINESE) binary_description = spec[binary_name][0] subcmd_usage = spec[binary_name][2][subcmd_name][1] subcmd_description = spec[binary_name][2][subcmd_name][0] @@ -73,16 +137,19 @@ class InitPlan(CorePattern): argument_list += [key] messages = [ - {"role": "system", "content": self.system_prompt}, - {"role": "user", "content": self.user_prompt.format( - instruction=kwargs["instruction"], - binary_name=binary_name, - subcmd_name=subcmd_name, - binary_description=binary_description, - subcmd_description=subcmd_description, - subcmd_usage=subcmd_usage, - argument_list=argument_list, - )}, + {"role": "system", "content": self.system_prompt[language]}, + { + "role": "user", + "content": self.user_prompt[language].format( + instruction=kwargs["instruction"], + binary_name=binary_name, + subcmd_name=subcmd_name, + binary_description=binary_description, + subcmd_description=subcmd_description, + subcmd_usage=subcmd_usage, + argument_list=argument_list, + ), + }, ] result = "" @@ -98,46 +165,84 @@ class InitPlan(CorePattern): class PlanEvaluator(CorePattern): """计划评估器""" - system_prompt: str = r""" - 你是一个计划评估器。你的任务是评估给定的计划是否合理和完整。 - - 一个好的计划应该: - 1. 涵盖原始任务的所有要求 - 2. 使用适当的工具收集必要的信息 - 3. 具有清晰和逻辑的步骤 - 4. 没有冗余或不必要的步骤 - - 对于计划中的每个步骤,评估: - 1. 工具选择是否适当 - 2. 输入参数是否清晰和充分 - 3. 该步骤是否有助于实现最终目标 - - 请回复: - "VALID" - 如果计划良好且完整 - "INVALID: <原因>" - 如果计划有问题,请解释原因 - """ - """系统提示词""" - - user_prompt: str = r""" - 任务:{instruction} - 计划:{plan} - - 评估计划并回复"VALID"或"INVALID: <原因>"。 - """ - """用户提示词""" - - def __init__(self, system_prompt: str | None = None, user_prompt: str | None = None) -> None: + def get_default_prompt(self) -> dict[LanguageType, str]: + system_prompt = { + LanguageType.CHINESE: r""" + 你是一个计划评估器。你的任务是评估给定的计划是否合理和完整。 + + 一个好的计划应该: + 1. 涵盖原始任务的所有要求 + 2. 使用适当的工具收集必要的信息 + 3. 具有清晰和逻辑的步骤 + 4. 没有冗余或不必要的步骤 + + 对于计划中的每个步骤,评估: + 1. 工具选择是否适当 + 2. 输入参数是否清晰和充分 + 3. 该步骤是否有助于实现最终目标 + + 请回复: + "VALID" - 如果计划良好且完整 + "INVALID: <原因>" - 如果计划有问题,请解释原因 + """, + LanguageType.ENGLISH: r""" + You are a plan evaluator. Your task is to evaluate whether the given plan is reasonable and complete. + + A good plan should: + 1. Cover all requirements of the original task + 2. Use appropriate tools to collect necessary information + 3. Have clear and logical steps + 4. Have no redundant or unnecessary steps + + For each step in the plan, evaluate: + 1. Whether the tool selection is appropriate + 2. Whether the input parameters are clear and sufficient + 3. Whether this step helps achieve the final goal + + Please reply: + "VALID" - If the plan is good and complete + "INVALID: <原因>" - If the plan has problems, please explain the reason + """, + } + """系统提示词""" + + user_prompt = { + LanguageType.CHINESE: r""" + 任务:{instruction} + 计划:{plan} + + 评估计划并回复"VALID"或"INVALID: <原因>"。 + """, + LanguageType.ENGLISH: r""" + Task: {instruction} + Plan: {plan} + + Evaluate the plan and reply with "VALID" or "INVALID: <原因>". + """, + } + """用户提示词""" + return system_prompt, user_prompt + + def __init__( + self, + system_prompt: dict[LanguageType, str] | None = None, + user_prompt: dict[LanguageType, str] | None = None, + ) -> None: """初始化Prompt""" super().__init__(system_prompt, user_prompt) async def generate(self, **kwargs) -> str: """生成计划评估结果""" + language = kwargs.get("language", LanguageType.CHINESE) messages = [ - {"role": "system", "content": self.system_prompt}, - {"role": "user", "content": self.user_prompt.format( - instruction=kwargs["instruction"], - plan=kwargs["plan"], - )}, + {"role": "system", "content": self.system_prompt[language]}, + { + "role": "user", + "content": self.user_prompt[language].format( + instruction=kwargs["instruction"], + plan=kwargs["plan"], + ), + }, ] result = "" @@ -153,45 +258,81 @@ class PlanEvaluator(CorePattern): class RePlanner(CorePattern): """重新规划器""" - system_prompt: str = r""" - 你是一个计划重新规划器。当计划被评估为无效时,你需要生成一个新的、改进的计划。 - - 新计划应该: - 1. 解决评估中提到的所有问题 - 2. 保持与原始计划相同的格式 - 3. 更加精确和完整 - 4. 为每个步骤使用适当的工具 - - 遵循与原始计划相同的格式: - - 每个步骤应以"Plan: "开头 - - 包含带有适当参数的工具使用 - - 保持步骤简洁和重点突出 - - 以"Final"步骤结束 - """ - """系统提示词""" - - user_prompt: str = r""" - 任务:{instruction} - 原始计划:{plan} - 评估:{evaluation} - - 生成一个新的、改进的计划,解决评估中提到的所有问题。 - """ - """用户提示词""" - - def __init__(self, system_prompt: str | None = None, user_prompt: str | None = None) -> None: + def get_default_prompt(self) -> dict[LanguageType, str]: + system_prompt = { + LanguageType.CHINESE: r""" + 你是一个计划重新规划器。当计划被评估为无效时,你需要生成一个新的、改进的计划。 + + 新计划应该: + 1. 解决评估中提到的所有问题 + 2. 保持与原始计划相同的格式 + 3. 更加精确和完整 + 4. 为每个步骤使用适当的工具 + + 遵循与原始计划相同的格式: + - 每个步骤应以"Plan: "开头 + - 包含带有适当参数的工具使用 + - 保持步骤简洁和重点突出 + - 以"Final"步骤结束 + """, + LanguageType.ENGLISH: r""" + You are a plan replanner. When the plan is evaluated as invalid, you need to generate a new, improved plan. + + The new plan should: + 1. Solve all problems mentioned in the evaluation + 2. Keep the same format as the original plan + 3. Be more precise and complete + 4. Use appropriate tools for each step + + Follow the same format as the original plan: + - Each step should start with "Plan: " + - Include tool usage with appropriate parameters + - Keep steps concise and focused + - End with the "Final" step + """, + } + """系统提示词""" + + user_prompt = { + LanguageType.CHINESE: r""" + 任务:{instruction} + 原始计划:{plan} + 评估:{evaluation} + + 生成一个新的、改进的计划,解决评估中提到的所有问题。 + """, + LanguageType.ENGLISH: r""" + Task: {instruction} + Original Plan: {plan} + Evaluation: {evaluation} + + Generate a new, improved plan that solves all problems mentioned in the evaluation. + """, + } + """用户提示词""" + return system_prompt, user_prompt + + def __init__( + self, + system_prompt: dict[LanguageType, str] | None = None, + user_prompt: dict[LanguageType, str] | None = None, + ) -> None: """初始化Prompt""" super().__init__(system_prompt, user_prompt) async def generate(self, **kwargs) -> str: """生成重新规划结果""" + language = kwargs.get("language", LanguageType.CHINESE) messages = [ - {"role": "system", "content": self.system_prompt}, - {"role": "user", "content": self.user_prompt.format( - instruction=kwargs["instruction"], - plan=kwargs["plan"], - evaluation=kwargs["evaluation"], - )}, + {"role": "system", "content": self.system_prompt[language]}, + { + "role": "user", + "content": self.user_prompt[language].format( + instruction=kwargs["instruction"], + plan=kwargs["plan"], + evaluation=kwargs["evaluation"], + ), + }, ] result = "" diff --git a/apps/llm/patterns/rewrite.py b/apps/llm/patterns/rewrite.py index 15d52ab288b4c964e81ee290894bd29a590bbab2..e7004a30c0602c073f793c269f172c0d8a8863f2 100644 --- a/apps/llm/patterns/rewrite.py +++ b/apps/llm/patterns/rewrite.py @@ -4,11 +4,16 @@ import logging from pydantic import BaseModel, Field +from textwrap import dedent + +from jinja2 import BaseLoader +from jinja2.sandbox import SandboxedEnvironment from apps.llm.function import JsonGenerator from apps.llm.patterns.core import CorePattern from apps.llm.reasoning import ReasoningLLM from apps.llm.token import TokenCalculator +from apps.schemas.enum_var import LanguageType logger = logging.getLogger(__name__) @@ -18,82 +23,159 @@ class QuestionRewriteResult(BaseModel): question: str = Field(description="补全后的问题") +_env = SandboxedEnvironment( + loader=BaseLoader, + autoescape=False, + trim_blocks=True, + lstrip_blocks=True, +) + + class QuestionRewrite(CorePattern): """问题补全与重写""" - system_prompt: str = r""" + def get_default_prompt(self) -> dict[LanguageType, str]: + system_prompt = { + LanguageType.CHINESE: dedent(r""" + + + 根据历史对话,推断用户的实际意图并补全用户的提问内容,历史对话被包含在标签中,用户意图被包含在标签中。 + 要求: + 1. 请使用JSON格式输出,参考下面给出的样例;不要包含任何XML标签,不要包含任何解释说明; + 2. 若用户当前提问内容与对话上文不相关,或你认为用户的提问内容已足够完整,请直接输出用户的提问内容。 + 3. 补全内容必须精准、恰当,不要编造任何内容。 + 4. 请输出补全后的问题,不要输出其他内容。 + 输出格式样例: + ```json + { + "question": "补全后的问题" + } + ``` + + + + + + + openEuler的特点是什么? + + + openEuler相较于其他操作系统,其特点是支持多种硬件架构,并且提供稳定、安全、高效的操作系统平台。 + + + + + openEuler的优势有哪些? + + + openEuler的优势包括开源、社区支持、以及对云计算和边缘计算的优化。 + + + + + + 详细点? + + + ```json + { + "question": "openEuler的特点是什么?请详细说明其优势和应用场景。" + } + ``` + + + + + {{history}} + + + {{question}} + + """), + LanguageType.ENGLISH: dedent(r""" + + + Based on the historical dialogue, infer the user's actual intent and complete the user's question. The historical dialogue is contained within the tags, and the user's intent is contained within the tags. + Requirements: + 1. Please output in JSON format, referring to the example provided below; do not include any XML tags or any explanatory notes; + 2. If the user's current question is unrelated to the previous dialogue or you believe the user's question is already complete enough, directly output the user's question. + 3. The completed content must be precise and appropriate; do not fabricate any content. + 4. Output only the completed question; do not include any other content. + Example output format: + ```json + { + "question": "The completed question" + } + ``` + + + + + + + What are the features of openEuler? + + + Compared to other operating systems, openEuler's features include support for multiple hardware architectures and providing a stable, secure, and efficient operating system platform. + + + + + What are the advantages of openEuler? + + + The advantages of openEuler include being open-source, having community support, and optimizations for cloud and edge computing. + + + + + + More details? + + + ```json + { + "question": "What are the features of openEuler? Please elaborate on its advantages and application scenarios." + } + + + + + {{history}} + + + {{question}} + + """) + } + + """用户提示词""" + user_prompt = { + LanguageType.CHINESE: r""" + + 请输出补全后的问题 + + """, + LanguageType.ENGLISH: r""" - - 根据历史对话,推断用户的实际意图并补全用户的提问内容,历史对话被包含在标签中,用户意图被包含在标签中。 - 要求: - 1. 请使用JSON格式输出,参考下面给出的样例;不要包含任何XML标签,不要包含任何解释说明; - 2. 若用户当前提问内容与对话上文不相关,或你认为用户的提问内容已足够完整,请直接输出用户的提问内容。 - 3. 补全内容必须精准、恰当,不要编造任何内容。 - 4. 请输出补全后的问题,不要输出其他内容。 - 输出格式样例: - {{ - "question": "补全后的问题" - }} - - - - - - - openEuler的特点是什么? - - - openEuler相较于其他操作系统,其特点是支持多种硬件架构,并且提供稳定、安全、高效的操作系统平台。 - - - - - openEuler的优势有哪些? - - - openEuler的优势包括开源、社区支持、以及对云计算和边缘计算的优化。 - - - - - - 详细点? - - - {{ - "question": "openEuler的特点是什么?请详细说明其优势和应用场景。" - }} - - + Please output the completed question - - {history} - - - {question} - - """ - """用户提示词""" - user_prompt: str = """ - - 请输出补全后的问题 - - """ + """} + return system_prompt, user_prompt async def generate(self, **kwargs) -> str: # noqa: ANN003 """问题补全与重写""" history = kwargs.get("history", []) question = kwargs["question"] llm = kwargs.get("llm", None) + language = kwargs.get("language", LanguageType.CHINESE) if not llm: llm = ReasoningLLM() leave_tokens = llm._config.max_tokens leave_tokens -= TokenCalculator().calculate_token_length( - messages=[ - {"role": "system", "content": self.system_prompt.format(history="", question=question)}, - {"role": "user", "content": self.user_prompt} - ] - ) + messages=[{"role": "system", "content": _env.from_string(self.system_prompt[language]).render( + history="", question=question)}, + {"role": "user", "content": _env.from_string(self.user_prompt[language]).render()}]) if leave_tokens <= 0: logger.error("[QuestionRewrite] 大模型上下文窗口不足,无法进行问题补全与重写") return question @@ -112,16 +194,16 @@ class QuestionRewrite(CorePattern): if leave_tokens >= 0: qa = sub_qa + qa index += 2 - messages = [ - {"role": "system", "content": self.system_prompt.format(history=qa, question=question)}, - {"role": "user", "content": self.user_prompt} - ] + messages = [{"role": "system", "content": _env.from_string(self.system_prompt[language]).render( + history=qa, question=question)}, {"role": "user", "content": _env.from_string(self.user_prompt[language]).render()}] result = "" async for chunk in llm.call(messages, streaming=False): result += chunk self.input_tokens = llm.input_tokens self.output_tokens = llm.output_tokens - + tmp_js = await JsonGenerator._parse_result_by_stack(result, QuestionRewriteResult.model_json_schema()) + if tmp_js is not None: + return tmp_js['question'] messages += [{"role": "assistant", "content": result}] json_gen = JsonGenerator( query="根据给定的背景信息,生成预测问题", diff --git a/apps/llm/patterns/select.py b/apps/llm/patterns/select.py index a6c496bdd0ef79631bbdc41935e717ca3e3668ea..4d9ab3a08832b120e87a9e7f9ac0dde7980fee76 100644 --- a/apps/llm/patterns/select.py +++ b/apps/llm/patterns/select.py @@ -11,6 +11,7 @@ from apps.llm.function import JsonGenerator from apps.llm.patterns.core import CorePattern from apps.llm.reasoning import ReasoningLLM from apps.llm.snippet import choices_to_prompt +from apps.schemas.enum_var import LanguageType logger = logging.getLogger(__name__) @@ -18,61 +19,123 @@ logger = logging.getLogger(__name__) class Select(CorePattern): """通过投票选择最佳答案""" - system_prompt: str = "You are a helpful assistant." - """系统提示词""" - - user_prompt: str = r""" - - - 根据历史对话(包括工具调用结果)和用户问题,从给出的选项列表中,选出最符合要求的那一项。 - 在输出之前,请先思考,并使用“”标签给出思考过程。 - 结果需要使用JSON格式输出,输出格式为:{{ "choice": "选项名称" }} - - - - - 使用天气API,查询明天杭州的天气信息 + def get_default_prompt(self) -> dict[LanguageType, str]: + system_prompt = { + LanguageType.CHINESE: "你是一个有用的助手。", + LanguageType.ENGLISH: "You are a helpful assistant.", + } + """系统提示词""" + + user_prompt = { + LanguageType.CHINESE: r""" + + + 根据历史对话(包括工具调用结果)和用户问题,从给出的选项列表中,选出最符合要求的那一项。 + 在输出之前,请先思考,并使用“”标签给出思考过程。 + 结果需要使用JSON格式输出,输出格式为:{{ "choice": "选项名称" }} + + + + + 使用天气API,查询明天杭州的天气信息 + + + + API + HTTP请求,获得返回的JSON数据 + + + SQL + 查询数据库,获得数据库表中的数据 + + + + + + API 工具可以通过 API 来获取外部数据,而天气信息可能就存储在外部数据中,由于用户说明中明确提到了 \ + 天气 API 的使用,因此应该优先使用 API 工具。\ + SQL 工具用于从数据库中获取信息,考虑到天气数据的可变性和动态性,不太可能存储在数据库中,因此 \ + SQL 工具的优先级相对较低,\ + 最佳选择似乎是“API:请求特定 API,获取返回的 JSON 数据”。 + + + + {{ "choice": "API" }} + + + + + + + {question} + + + + {choice_list} + + + + + 让我们一步一步思考。 + """, + LanguageType.ENGLISH: r""" + + + Based on the historical dialogue (including tool call results) and user question, select the most \ + suitable option from the given option list. + Before outputting, please think carefully and use the "" tag to give the thinking process. + The output needs to be in JSON format, the output format is: {{ "choice": "option name" }} + + + + + Use the weather API to query the weather information of Hangzhou tomorrow + + + + API + HTTP request, get the returned JSON data + + + SQL + Query the database, get the data in the database table + + + + + + The API tool can get external data through API, and the weather information may be stored in \ + external data. Since the user clearly mentioned the use of weather API, it should be given \ + priority to the API tool.\ + The SQL tool is used to get information from the database, considering the variability and \ + dynamism of weather data, it is unlikely to be stored in the database, so the priority of \ + the SQL tool is relatively low, \ + The best choice seems to be "API: request a specific API, get the returned JSON data". + + + + {{ "choice": "API" }} + + + + + + {question} + - - API - HTTP请求,获得返回的JSON数据 - - - SQL - 查询数据库,获得数据库表中的数据 - + {choice_list} - API 工具可以通过 API 来获取外部数据,而天气信息可能就存储在外部数据中,由于用户说明中明确提到了 \ - 天气 API 的使用,因此应该优先使用 API 工具。\ - SQL 工具用于从数据库中获取信息,考虑到天气数据的可变性和动态性,不太可能存储在数据库中,因此 \ - SQL 工具的优先级相对较低,\ - 最佳选择似乎是“API:请求特定 API,获取返回的 JSON 数据”。 + Let's think step by step. - - - {{ "choice": "API" }} - - - - - - - {question} - - - - {choice_list} - - - - - 让我们一步一步思考。 - """ - """用户提示词""" + + """, + } + """用户提示词""" + return system_prompt, user_prompt slot_schema: ClassVar[dict[str, Any]] = { "type": "object", @@ -86,17 +149,19 @@ class Select(CorePattern): } """最终输出的JSON Schema""" - - def __init__(self, system_prompt: str | None = None, user_prompt: str | None = None) -> None: - """初始化Prompt""" + def __init__( + self, + system_prompt: dict[LanguageType, str] | None = None, + user_prompt: dict[str, str] | None = None, + ) -> None: + """处理Prompt""" super().__init__(system_prompt, user_prompt) - async def _generate_single_attempt(self, user_input: str, choice_list: list[str]) -> str: """使用ReasoningLLM进行单次尝试""" logger.info("[Select] 单次选择尝试: %s", user_input) messages = [ - {"role": "system", "content": self.system_prompt}, + {"role": "system", "content": self.system_prompt[self.language]}, {"role": "user", "content": user_input}, ] result = "" @@ -120,7 +185,6 @@ class Select(CorePattern): function_result = await json_gen.generate() return function_result["choice"] - async def generate(self, **kwargs) -> str: # noqa: ANN003 """使用大模型做出选择""" logger.info("[Select] 使用LLM选择") @@ -128,6 +192,7 @@ class Select(CorePattern): result_list = [] background = kwargs.get("background", "无背景信息。") + language = kwargs.get("language", LanguageType.CHINESE) data_str = json.dumps(kwargs.get("data", {}), ensure_ascii=False) choice_prompt, choices_list = choices_to_prompt(kwargs["choices"]) @@ -141,7 +206,7 @@ class Select(CorePattern): return choices_list[0] logger.info("[Select] 选项列表: %s", choice_prompt) - user_input = self.user_prompt.format( + user_input = self.user_prompt[language].format( question=kwargs["question"], background=background, data=data_str, diff --git a/apps/llm/prompt.py b/apps/llm/prompt.py index 7cf555be390bc72301323cd103c1a7ec0ab7a085..40fc015881b908cdf24c4fc8d2ac9c84b0efec73 100644 --- a/apps/llm/prompt.py +++ b/apps/llm/prompt.py @@ -19,7 +19,7 @@ JSON_GEN_BASIC = dedent(r""" Background information is given in XML tags. - Here are the conversations between you and the user: + Here are the background information between you and the user: {% if conversation|length > 0 %} {% for message in conversation %} @@ -48,7 +48,6 @@ JSON_GEN_BASIC = dedent(r""" {% endif %} - {% if not function_call %} # Tools You must call one function to assist with the user query. @@ -67,5 +66,4 @@ JSON_GEN_BASIC = dedent(r""" # Output - {% endif %} """) diff --git a/apps/llm/reasoning.py b/apps/llm/reasoning.py index fdb36fc05adf38920bcce0d962b6aafc21e44b71..fddc84db89d98bb8c29d96b4dee71aeed49804ea 100644 --- a/apps/llm/reasoning.py +++ b/apps/llm/reasoning.py @@ -61,15 +61,16 @@ class ReasoningContent: return reason, text if self.reasoning_type == "args": - if hasattr(chunk.choices[0].delta, "reasoning_content"): + if hasattr( + chunk.choices[0].delta, "reasoning_content") and chunk.choices[0].delta.reasoning_content is not None: # type: ignore[attr-defined] + # 仍在推理中,继续添加推理内容 reason = chunk.choices[0].delta.reasoning_content or "" # type: ignore[attr-defined] else: # 推理结束,设置标志并添加结束标签 self.is_reasoning = False reason = "" # 如果当前内容不是推理内容标签,将其作为文本返回 - if content and not content.startswith(""): - text = content + text = content.lstrip("") elif self.reasoning_type == "tokens": for token in REASONING_END_TOKEN: if token == content: @@ -141,10 +142,11 @@ class ReasoningLLM: return await self._client.chat.completions.create( model=model, messages=messages, # type: ignore[] - max_tokens=max_tokens or self._config.max_tokens, + max_completion_tokens=max_tokens or self._config.max_tokens, temperature=temperature or self._config.temperature, stream=True, stream_options={"include_usage": True}, + timeout=300 ) # type: ignore[] async def call( # noqa: C901, PLR0912, PLR0913 diff --git a/apps/main.py b/apps/main.py index c4ca2bfb116db11624f4e4fbbe4e876a86e5f1eb..4beeda1532a6b75cd6064b18d3c350bec33a5f00 100644 --- a/apps/main.py +++ b/apps/main.py @@ -36,9 +36,10 @@ from apps.routers import ( record, service, user, + parameter ) from apps.scheduler.pool.pool import Pool - +logger = logging.getLogger(__name__) # 定义FastAPI app app = FastAPI(redoc_url=None) # 定义FastAPI全局中间件 @@ -66,6 +67,7 @@ app.include_router(llm.router) app.include_router(mcp_service.router) app.include_router(flow.router) app.include_router(user.router) +app.include_router(parameter.router) # logger配置 LOGGER_FORMAT = "%(funcName)s() - %(message)s" @@ -81,13 +83,48 @@ logging.basicConfig( ) +async def add_defaut_admin_user(): + """ + 添加无认证用户 + """ + from apps.common.mongo import MongoDB + from apps.schemas.collection import User + import os + mongo = MongoDB() + user_collection = mongo.get_collection("user") + username = os.environ.get('USERNAME') # 适用于 Windows 系统 + if not username: + username = os.environ.get('USER') # 适用于 Linux 和 macOS 系统 + if not username: + username = "admin" + try: + await user_collection.insert_one(User( + _id=username, + is_admin=True, + auto_execute=False + ).model_dump(by_alias=True)) + except Exception as e: + logger.error(f"[add_no_auth_user] 默认用户 {username} 已存在") + + +async def clear_user_activity() -> None: + """清除所有用户的活跃状态""" + from apps.services.activity import Activity + from apps.common.mongo import MongoDB + mongo = MongoDB() + activity_collection = mongo.get_collection("activity") + await activity_collection.delete_many({}) + logging.info("清除所有用户活跃状态完成") + + async def init_resources() -> None: """初始化必要资源""" WordsCheck() await LanceDB().init() await Pool.init() TokenCalculator() - + await add_defaut_admin_user() + await clear_user_activity() # 运行 if __name__ == "__main__": # 初始化必要资源 diff --git a/apps/routers/api_key.py b/apps/routers/api_key.py index 158cfc13a5be17f16e2d73ec5fa44c4a18e4f392..51366a2192eaa429a3f1e1d40672b9e915bec424 100644 --- a/apps/routers/api_key.py +++ b/apps/routers/api_key.py @@ -6,6 +6,7 @@ from typing import Annotated from fastapi import APIRouter, Depends, status from fastapi.responses import JSONResponse + from apps.dependency.user import get_user, verify_user from apps.schemas.api_key import GetAuthKeyRsp, PostAuthKeyMsg, PostAuthKeyRsp from apps.schemas.response_data import ResponseData diff --git a/apps/routers/appcenter.py b/apps/routers/appcenter.py index 0ec4db9155a06fe16cbeddff223ccd9a764d8b3d..ca2d93d5d0e11f69713a4130262de08d8191fd20 100644 --- a/apps/routers/appcenter.py +++ b/apps/routers/appcenter.py @@ -13,6 +13,8 @@ from apps.schemas.appcenter import AppFlowInfo, AppPermissionData from apps.schemas.enum_var import AppFilterType, AppType from apps.schemas.request_data import CreateAppRequest, ModFavAppRequest from apps.schemas.response_data import ( + AppMcpServiceInfo, + LLMIteam, BaseAppOperationMsg, BaseAppOperationRsp, GetAppListMsg, @@ -25,7 +27,8 @@ from apps.schemas.response_data import ( ResponseData, ) from apps.services.appcenter import AppCenterManager - +from apps.services.llm import LLMManager +from apps.services.mcp_service import MCPServiceManager logger = logging.getLogger(__name__) router = APIRouter( prefix="/api/app", @@ -180,6 +183,7 @@ async def get_recently_used_applications( @router.get("/{appId}", response_model=GetAppPropertyRsp | ResponseData) async def get_application( + user_sub: Annotated[str, Depends(get_user)], app_id: Annotated[str, Path(..., alias="appId", description="应用ID")], ) -> JSONResponse: """获取应用详情""" @@ -214,6 +218,24 @@ async def get_application( ) for flow in app_data.flows ] + mcp_service = [] + if app_data.mcp_service: + for service in app_data.mcp_service: + mcp_collection = await MCPServiceManager.get_mcp_service(service) + mcp_service.append(AppMcpServiceInfo( + id=mcp_collection.id, + name=mcp_collection.name, + description=mcp_collection.description, + )) + if app_data.llm_id == "empty": + llm_item = LLMIteam() + else: + llm_collection = await LLMManager.get_llm_by_id(user_sub, app_data.llm_id) + llm_item = LLMIteam( + llmId=llm_collection.id, + modelName=llm_collection.model_name, + icon=llm_collection.icon + ) return JSONResponse( status_code=status.HTTP_200_OK, content=GetAppPropertyRsp( @@ -234,7 +256,8 @@ async def get_application( authorizedUsers=app_data.permission.users, ), workflows=workflows, - mcpService=app_data.mcp_service, + mcpService=mcp_service, + llm=llm_item, ), ).model_dump(exclude_none=True, by_alias=True), ) diff --git a/apps/routers/auth.py b/apps/routers/auth.py index 1cba5ed629d90776b59ae3bf652381318dcabf0e..72416fa35b1c23d2975497427da35a4912695fa4 100644 --- a/apps/routers/auth.py +++ b/apps/routers/auth.py @@ -9,6 +9,7 @@ from fastapi import APIRouter, Depends, Request, status from fastapi.responses import HTMLResponse, JSONResponse from fastapi.templating import Jinja2Templates +from apps.common.config import Config from apps.common.oidc import oidc_provider from apps.dependency import get_session, get_user, verify_user from apps.schemas.collection import Audit @@ -47,8 +48,6 @@ async def oidc_login(request: Request, code: str) -> HTMLResponse: user_info = await oidc_provider.get_oidc_user(token["access_token"]) user_sub: str | None = user_info.get("user_sub", None) - if user_sub: - await oidc_provider.set_token(user_sub, token["access_token"], token["refresh_token"]) except Exception as e: logger.exception("User login failed") status_code = status.HTTP_400_BAD_REQUEST if "auth error" in str(e) else status.HTTP_403_FORBIDDEN @@ -75,7 +74,7 @@ async def oidc_login(request: Request, code: str) -> HTMLResponse: status_code=status.HTTP_403_FORBIDDEN, ) - await UserManager.update_userinfo_by_user_sub(user_sub) + await UserManager.update_refresh_revision_by_user_sub(user_sub) current_session = await SessionManager.create_session(user_host, user_sub) @@ -178,6 +177,7 @@ async def userinfo( user_sub=user_sub, revision=user.is_active, is_admin=user.is_admin, + auto_execute=user.auto_execute, ), ).model_dump(exclude_none=True, by_alias=True), ) @@ -193,7 +193,7 @@ async def userinfo( ) async def update_revision_number(request: Request, user_sub: Annotated[str, Depends(get_user)]) -> JSONResponse: # noqa: ARG001 """更新用户协议信息""" - ret: bool = await UserManager.update_userinfo_by_user_sub(user_sub, refresh_revision=True) + ret: bool = await UserManager.update_refresh_revision_by_user_sub(user_sub, refresh_revision=True) if not ret: return JSONResponse( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, diff --git a/apps/routers/chat.py b/apps/routers/chat.py index 7fe5162c0f7db804c6d723207ffa327d9394e3eb..87cf1be672205c4b116a51aba5c8fbba743a8ab0 100644 --- a/apps/routers/chat.py +++ b/apps/routers/chat.py @@ -7,20 +7,24 @@ import uuid from collections.abc import AsyncGenerator from typing import Annotated -from fastapi import APIRouter, Depends, HTTPException, status +from fastapi import APIRouter, Depends, HTTPException, status, Query from fastapi.responses import JSONResponse, StreamingResponse from apps.common.queue import MessageQueue from apps.common.wordscheck import WordsCheck from apps.dependency import get_session, get_user +from apps.schemas.enum_var import FlowStatus from apps.scheduler.scheduler import Scheduler from apps.scheduler.scheduler.context import save_data -from apps.schemas.request_data import RequestData +from apps.schemas.request_data import RequestData, RequestDataApp from apps.schemas.response_data import ResponseData +from apps.schemas.enum_var import LanguageType from apps.schemas.task import Task from apps.services.activity import Activity from apps.services.blacklist import QuestionBlacklistManager, UserBlacklistManager from apps.services.flow import FlowManager +from apps.services.conversation import ConversationManager +from apps.services.record import RecordManager from apps.services.task import TaskManager RECOMMEND_TRES = 5 @@ -36,28 +40,50 @@ async def init_task(post_body: RequestData, user_sub: str, session_id: str) -> T # 生成group_id if not post_body.group_id: post_body.group_id = str(uuid.uuid4()) - # 创建或还原Task - task = await TaskManager.get_task(session_id=session_id, post_body=post_body, user_sub=user_sub) + # 更改信息并刷新数据库 - task.runtime.question = post_body.question - task.ids.group_id = post_body.group_id + if post_body.task_id is None: + conversation = await ConversationManager.get_conversation_by_conversation_id( + user_sub=user_sub, + conversation_id=post_body.conversation_id, + ) + if not conversation: + err = "[Chat] 用户没有权限访问该对话!" + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=err) + task_ids = await TaskManager.delete_tasks_by_conversation_id(post_body.conversation_id) + await RecordManager.update_record_flow_status_to_cancelled_by_task_ids(task_ids) + task = await TaskManager.init_new_task(user_sub=user_sub, session_id=session_id, post_body=post_body) + task.runtime.question = post_body.question + task.ids.group_id = post_body.group_id + task.state.app_id = post_body.app.app_id if post_body.app else "" + else: + if not post_body.task_id: + err = "[Chat] task_id 不可为空!" + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="task_id cannot be empty") + task = await TaskManager.get_task_by_task_id(post_body.task_id) + post_body.app = RequestDataApp(appId=task.state.app_id) + post_body.group_id = task.ids.group_id + post_body.conversation_id = task.ids.conversation_id + post_body.language = task.language + post_body.question = task.runtime.question + task.language = post_body.language return task async def chat_generator(post_body: RequestData, user_sub: str, session_id: str) -> AsyncGenerator[str, None]: """进行实际问答,并从MQ中获取消息""" try: - await Activity.set_active(user_sub) + active_id = await Activity.set_active(user_sub) # 敏感词检查 if await WordsCheck().check(post_body.question) != 1: yield "data: [SENSITIVE]\n\n" logger.info("[Chat] 问题包含敏感词!") - await Activity.remove_active(user_sub) + await Activity.remove_active(active_id) return task = await init_task(post_body, user_sub, session_id) - + task.ids.active_id = active_id # 创建queue;由Scheduler进行关闭 queue = MessageQueue() await queue.init() @@ -77,19 +103,18 @@ async def chat_generator(post_body: RequestData, user_sub: str, session_id: str) # 获取最终答案 task = scheduler.task - if not task.runtime.answer: - logger.error("[Chat] 答案为空") + if task.state.flow_status == FlowStatus.ERROR: + logger.error("[Chat] 生成答案失败") yield "data: [ERROR]\n\n" - await Activity.remove_active(user_sub) + await Activity.remove_active(active_id) return # 对结果进行敏感词检查 if await WordsCheck().check(task.runtime.answer) != 1: yield "data: [SENSITIVE]\n\n" logger.info("[Chat] 答案包含敏感词!") - await Activity.remove_active(user_sub) + await Activity.remove_active(active_id) return - # 创建新Record,存入数据库 await save_data(task, user_sub, post_body) @@ -107,7 +132,7 @@ async def chat_generator(post_body: RequestData, user_sub: str, session_id: str) yield "data: [ERROR]\n\n" finally: - await Activity.remove_active(user_sub) + await Activity.remove_active(active_id) @router.post("/chat") @@ -117,16 +142,13 @@ async def chat( session_id: Annotated[str, Depends(get_session)], ) -> StreamingResponse: """LLM流式对话接口""" + post_body.language = LanguageType.CHINESE if post_body.language in {"zh", LanguageType.CHINESE} else LanguageType.ENGLISH # 前端 Flow-Debug 传输为“zh" # 问题黑名单检测 - if not await QuestionBlacklistManager.check_blacklisted_questions(input_question=post_body.question): + if post_body.question is not None and not await QuestionBlacklistManager.check_blacklisted_questions(input_question=post_body.question): # 用户扣分 await UserBlacklistManager.change_blacklisted_users(user_sub, -10) raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="question is blacklisted") - # 限流检查 - if await Activity.is_active(user_sub): - raise HTTPException(status_code=status.HTTP_429_TOO_MANY_REQUESTS, detail="Too many requests") - res = chat_generator(post_body, user_sub, session_id) return StreamingResponse( content=res, @@ -138,9 +160,12 @@ async def chat( @router.post("/stop", response_model=ResponseData) -async def stop_generation(user_sub: Annotated[str, Depends(get_user)]): # noqa: ANN201 +async def stop_generation(user_sub: Annotated[str, Depends(get_user)], + task_id: Annotated[str, Query(..., alias="taskId")] = "") -> JSONResponse: """停止生成""" - await Activity.remove_active(user_sub) + task = await TaskManager.get_task_by_task_id(task_id) + if task: + await Activity.remove_active(task.ids.active_id) return JSONResponse( status_code=status.HTTP_200_OK, content=ResponseData( diff --git a/apps/routers/conversation.py b/apps/routers/conversation.py index 1620ab5421a4018fd316a6d55939d859befd5282..4e867a7093fd5917491c71deea86dc21f6a60671 100644 --- a/apps/routers/conversation.py +++ b/apps/routers/conversation.py @@ -44,6 +44,7 @@ logger = logging.getLogger(__name__) async def create_new_conversation( + title: str, user_sub: str, app_id: str = "", llm_id: str = "empty", @@ -57,7 +58,8 @@ async def create_new_conversation( err = "Invalid app_id." raise RuntimeError(err) new_conv = await ConversationManager.add_conversation_by_user_sub( - user_sub, + title=title, + user_sub=user_sub, app_id=app_id, llm_id=llm_id, kb_ids=kb_ids or [], @@ -68,6 +70,7 @@ async def create_new_conversation( raise RuntimeError(err) return new_conv + @router.get( "", response_model=ConversationListRsp, @@ -127,6 +130,7 @@ async def get_conversation_list(user_sub: Annotated[str, Depends(get_user)]) -> async def add_conversation( user_sub: Annotated[str, Depends(get_user)], app_id: Annotated[str, Query(..., alias="appId")] = "", + title: Annotated[str, Body(...)] = "New Chat", llm_id: Annotated[str, Body(..., alias="llmId")] = "empty", kb_ids: Annotated[list[str] | None, Body(..., alias="kbIds")] = None, *, @@ -138,7 +142,8 @@ async def add_conversation( app_id = app_id if app_id else "" debug = debug if debug is not None else False new_conv = await create_new_conversation( - user_sub, + title=title, + user_sub=user_sub, app_id=app_id, llm_id=llm_id, kb_ids=kb_ids or [], @@ -185,15 +190,15 @@ async def update_conversation( ) # 更新Conversation数据 - change_status = await ConversationManager.update_conversation_by_conversation_id( - user_sub, - conversation_id, - { - "title": post_body.title, - }, - ) - - if not change_status: + try: + await ConversationManager.update_conversation_by_conversation_id( + user_sub, + conversation_id, + { + "title": post_body.title, + }, + ) + except Exception as e: return JSONResponse( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content=ResponseData( diff --git a/apps/routers/flow.py b/apps/routers/flow.py index fc38c1bfb9879c1d846d700e91b812c83866c2c4..1de75aa84cc66fff175c3dee33e6f3c1edd013a6 100644 --- a/apps/routers/flow.py +++ b/apps/routers/flow.py @@ -2,6 +2,7 @@ """FastAPI Flow拓扑结构展示API""" from typing import Annotated +import logging from fastapi import APIRouter, Body, Depends, Query, status from fastapi.responses import JSONResponse @@ -20,11 +21,14 @@ from apps.schemas.response_data import ( NodeServiceListRsp, ResponseData, ) +from apps.schemas.enum_var import LanguageType from apps.services.appcenter import AppCenterManager from apps.services.application import AppManager from apps.services.flow import FlowManager from apps.services.flow_validate import FlowService +logger = logging.getLogger(__name__) + router = APIRouter( prefix="/api/flow", tags=["flow"], @@ -43,9 +47,10 @@ router = APIRouter( ) async def get_services( user_sub: Annotated[str, Depends(get_user)], + language: LanguageType = Query(LanguageType.CHINESE, description="语言参数,默认为中文") ) -> NodeServiceListRsp: """获取用户可访问的节点元数据所在服务的信息""" - services = await FlowManager.get_service_by_user_id(user_sub) + services = await FlowManager.get_service_by_user_id(user_sub, language) if services is None: return NodeServiceListRsp( code=status.HTTP_404_NOT_FOUND, @@ -130,8 +135,11 @@ async def put_flow( ).model_dump(exclude_none=True, by_alias=True), ) put_body.flow = await FlowService.remove_excess_structure_from_flow(put_body.flow) + logger.error(f'{put_body.flow}') await FlowService.validate_flow_illegal(put_body.flow) + logger.error(f'{put_body.flow}') put_body.flow.connectivity = await FlowService.validate_flow_connectivity(put_body.flow) + logger.error(f'{put_body.flow}') result = await FlowManager.put_flow_by_app_and_flow_id(app_id, flow_id, put_body.flow) if result is None: return JSONResponse( diff --git a/apps/routers/mcp_service.py b/apps/routers/mcp_service.py index a845a3761ea2ab179a1b71d4674d0d5fd3b5a760..bb8f3eb6182113f840dab7b9e288271bf2acd23b 100644 --- a/apps/routers/mcp_service.py +++ b/apps/routers/mcp_service.py @@ -36,6 +36,7 @@ router = APIRouter( dependencies=[Depends(verify_user)], ) + async def _check_user_admin(user_sub: str) -> None: user = await UserManager.get_userinfo_by_user_sub(user_sub) if not user: @@ -52,6 +53,8 @@ async def get_mcpservice_list( ] = SearchType.ALL, keyword: Annotated[str | None, Query(..., alias="keyword", description="搜索关键字")] = None, page: Annotated[int, Query(..., alias="page", ge=1, description="页码")] = 1, + is_install: Annotated[bool | None, Query(..., alias="isInstall", description="是否已安装")] = None, + is_active: Annotated[bool | None, Query(..., alias="isActive", description="是否激活")] = None, ) -> JSONResponse: """获取服务列表""" try: @@ -60,6 +63,8 @@ async def get_mcpservice_list( user_sub, keyword, page, + is_install, + is_active ) except Exception as e: err = f"[MCPServiceCenter] 获取MCP服务列表失败: {e}" @@ -89,7 +94,7 @@ async def get_mcpservice_list( @router.post("", response_model=UpdateMCPServiceRsp) async def create_or_update_mcpservice( user_sub: Annotated[str, Depends(get_user)], # TODO: get_user直接获取所有用户信息 - data: UpdateMCPServiceRequest, + data: UpdateMCPServiceRequest ) -> JSONResponse: """新建或更新MCP服务""" await _check_user_admin(user_sub) @@ -130,6 +135,36 @@ async def create_or_update_mcpservice( ).model_dump(exclude_none=True, by_alias=True)) +@router.post("/{serviceId}/install") +async def install_mcp_service( + user_sub: Annotated[str, Depends(get_user)], + service_id: Annotated[str, Path(..., alias="serviceId", description="服务ID")], + install: Annotated[bool, Query(..., description="是否安装")] = True, +) -> JSONResponse: + await _check_user_admin(user_sub) + try: + await MCPServiceManager.install_mcpservice(user_sub, service_id, install) + except Exception as e: + err = f"[MCPService] 安装mcp服务失败: {e!s}" if install else f"[MCPService] 卸载mcp服务失败: {e!s}" + logger.exception(err) + return JSONResponse( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + content=ResponseData( + code=status.HTTP_500_INTERNAL_SERVER_ERROR, + message=err, + result={}, + ).model_dump(exclude_none=True, by_alias=True), + ) + return JSONResponse( + status_code=status.HTTP_200_OK, + content=ResponseData( + code=status.HTTP_200_OK, + message="OK", + result={}, + ).model_dump(exclude_none=True, by_alias=True), + ) + + @router.get("/{serviceId}", response_model=GetMCPServiceDetailRsp) async def get_service_detail( user_sub: Annotated[str, Depends(get_user)], @@ -138,10 +173,6 @@ async def get_service_detail( edit: Annotated[bool, Query(..., description="是否为编辑模式")] = False, ) -> JSONResponse: """获取MCP服务详情""" - # 检查用户权限 - if edit: - await _check_user_admin(user_sub) - # 获取MCP服务详情 try: data = await MCPServiceManager.get_mcp_service(service_id) @@ -166,11 +197,8 @@ async def get_service_detail( name=data.name, description=data.description, overview=config.overview, - data=json.dumps( - config.config.model_dump(by_alias=True, exclude_none=True), - indent=4, - ensure_ascii=False, - ), + data=config.config.model_dump( + exclude_none=True, by_alias=True), mcpType=config.type, ) else: @@ -181,6 +209,7 @@ async def get_service_detail( name=data.name, description=data.description, overview=config.overview, + status=data.status, tools=data.tools, ) @@ -225,7 +254,7 @@ async def delete_service( ) -@router.post("/icon", response_model=UpdateMCPServiceRsp) +@router.post("/icon/{serviceId}", response_model=UpdateMCPServiceRsp) async def update_mcp_icon( user_sub: Annotated[str, Depends(get_user)], service_id: Annotated[str, Path(..., alias="serviceId", description="服务ID")], @@ -282,7 +311,7 @@ async def active_or_deactivate_mcp_service( """激活/取消激活mcp""" try: if data.active: - await MCPServiceManager.active_mcpservice(user_sub, service_id) + await MCPServiceManager.active_mcpservice(user_sub, service_id, data.mcp_env) else: await MCPServiceManager.deactive_mcpservice(user_sub, service_id) except Exception as e: diff --git a/apps/routers/parameter.py b/apps/routers/parameter.py new file mode 100644 index 0000000000000000000000000000000000000000..6edbe2e142cb6589be8947c28ae2eb4a7287baa1 --- /dev/null +++ b/apps/routers/parameter.py @@ -0,0 +1,77 @@ +from typing import Annotated + +from fastapi import APIRouter, Depends, Query, status +from fastapi.responses import JSONResponse + +from apps.dependency import get_user +from apps.dependency.user import verify_user +from apps.services.parameter import ParameterManager +from apps.schemas.response_data import ( + GetOperaRsp, + GetParamsRsp +) +from apps.services.application import AppManager +from apps.services.flow import FlowManager + +router = APIRouter( + prefix="/api/parameter", + tags=["parameter"], + dependencies=[ + Depends(verify_user), + ], +) + + +@router.get("", response_model=GetParamsRsp) +async def get_parameters( + user_sub: Annotated[str, Depends(get_user)], + app_id: Annotated[str, Query(alias="appId")], + flow_id: Annotated[str, Query(alias="flowId")], + step_id: Annotated[str, Query(alias="stepId")], +) -> JSONResponse: + """Get parameters for node choice.""" + if not await AppManager.validate_user_app_access(user_sub, app_id): + return JSONResponse( + status_code=status.HTTP_403_FORBIDDEN, + content=GetParamsRsp( + code=status.HTTP_403_FORBIDDEN, + message="用户没有权限访问该流", + result=[], + ).model_dump(exclude_none=True, by_alias=True), + ) + flow = await FlowManager.get_flow_by_app_and_flow_id(app_id, flow_id) + if not flow: + return JSONResponse( + status_code=status.HTTP_404_NOT_FOUND, + content=GetParamsRsp( + code=status.HTTP_404_NOT_FOUND, + message="未找到该流", + result=[], + ).model_dump(exclude_none=True, by_alias=True), + ) + result = await ParameterManager.get_pre_params_by_flow_and_step_id(flow, step_id) + return JSONResponse( + status_code=status.HTTP_200_OK, + content=GetParamsRsp( + code=status.HTTP_200_OK, + message="获取参数成功", + result=result + ).model_dump(exclude_none=True, by_alias=True), + ) + + +@router.get("/operate", response_model=GetOperaRsp) +async def get_operate_parameters( + user_sub: Annotated[str, Depends(get_user)], + param_type: Annotated[str, Query(alias="ParamType")], +) -> JSONResponse: + """Get parameters for node choice.""" + result = await ParameterManager.get_operate_and_bind_type(param_type) + return JSONResponse( + status_code=status.HTTP_200_OK, + content=GetOperaRsp( + code=status.HTTP_200_OK, + message="获取操作成功", + result=result + ).model_dump(exclude_none=True, by_alias=True), + ) diff --git a/apps/routers/record.py b/apps/routers/record.py index 7384793b4b2abea5117462cb630c5b13c8f76071..663708b86b2dc57a0973c8b73a3d4cb02298b08b 100644 --- a/apps/routers/record.py +++ b/apps/routers/record.py @@ -65,7 +65,7 @@ async def get_record(conversation_id: str, user_sub: Annotated[str, Depends(get_ tmp_record = RecordData( id=record.id, groupId=record_group.id, - taskId=record_group.task_id, + taskId=record.task_id, conversationId=conversation_id, content=record_data, metadata=record.metadata @@ -81,26 +81,27 @@ async def get_record(conversation_id: str, user_sub: Annotated[str, Depends(get_ # 获得Record关联的文档 tmp_record.document = await DocumentManager.get_used_docs_by_record_group(user_sub, record_group.id) - # 获得Record关联的flow数据 - flow_list = await TaskManager.get_context_by_record_id(record_group.id, record.id) - if flow_list: - first_flow = FlowStepHistory.model_validate(flow_list[0]) + flow_step_list = await TaskManager.get_context_by_record_id(record_group.id, record.id) + if flow_step_list: tmp_record.flow = RecordFlow( - id=first_flow.flow_name, #TODO: 此处前端应该用name + id=record.flow.flow_id, # TODO: 此处前端应该用name recordId=record.id, - flowId=first_flow.id, - stepNum=len(flow_list), + flowId=record.flow.flow_id, + flowName=record.flow.flow_name, + flowStatus=record.flow.flow_staus, + stepNum=len(flow_step_list), steps=[], ) - for flow in flow_list: - flow_step = FlowStepHistory.model_validate(flow) + for flow_step in flow_step_list: tmp_record.flow.steps.append( RecordFlowStep( - stepId=flow_step.step_name, #TODO: 此处前端应该用name - stepStatus=flow_step.status, + stepId=flow_step.step_id, + stepName=flow_step.step_name, + stepStatus=flow_step.step_status, input=flow_step.input_data, output=flow_step.output_data, + exData=flow_step.ex_data ), ) diff --git a/apps/routers/user.py b/apps/routers/user.py index 54e12f444181b56e408f7face5c4ed37be76008b..537f1bf3adb95e8b48ef1b3d384376bae86ac802 100644 --- a/apps/routers/user.py +++ b/apps/routers/user.py @@ -3,10 +3,11 @@ from typing import Annotated -from fastapi import APIRouter, Depends, status +from fastapi import APIRouter, Body, Depends, status, Query from fastapi.responses import JSONResponse from apps.dependency import get_user +from apps.schemas.request_data import UserUpdateRequest from apps.schemas.response_data import UserGetMsp, UserGetRsp from apps.schemas.user import UserInfo from apps.services.user import UserManager @@ -17,12 +18,14 @@ router = APIRouter( ) -@router.get("") -async def chat( +@router.get("", response_model=UserGetRsp) +async def get_user_sub( user_sub: Annotated[str, Depends(get_user)], + page_size: Annotated[int, Query(description="每页用户数量")] = 20, + page_cnt: Annotated[int, Query(description="当前页码")] = 1, ) -> JSONResponse: """查询所有用户接口""" - user_list = await UserManager.get_all_user_sub() + user_list, total = await UserManager.get_all_user_sub(page_cnt=page_cnt, page_size=page_size, filter_user_subs=[user_sub]) user_info_list = [] for user in user_list: # user_info = await UserManager.get_userinfo_by_user_sub(user) 暂时不需要查询user_name @@ -39,6 +42,22 @@ async def chat( content=UserGetRsp( code=status.HTTP_200_OK, message="用户数据详细信息获取成功", - result=UserGetMsp(userInfoList=user_info_list), + result=UserGetMsp(userInfoList=user_info_list, total=total), ).model_dump(exclude_none=True, by_alias=True), ) + + +@router.post("") +async def update_user_info( + user_sub: Annotated[str, Depends(get_user)], + *, + data: Annotated[UserUpdateRequest, Body(..., description="用户更新信息")], +) -> JSONResponse: + """更新用户信息接口""" + # 更新用户信息 + + await UserManager.update_userinfo_by_user_sub(user_sub, data) + return JSONResponse( + status_code=status.HTTP_200_OK, + content={"code": status.HTTP_200_OK, "message": "用户信息更新成功"}, + ) diff --git a/apps/scheduler/call/__init__.py b/apps/scheduler/call/__init__.py index 2ee8b862885b0d88e519bf00a1678a49863df2bd..c5a6f0549c6891bcdf74052ddb8152fb550653b2 100644 --- a/apps/scheduler/call/__init__.py +++ b/apps/scheduler/call/__init__.py @@ -8,7 +8,7 @@ from apps.scheduler.call.mcp.mcp import MCP from apps.scheduler.call.rag.rag import RAG from apps.scheduler.call.sql.sql import SQL from apps.scheduler.call.suggest.suggest import Suggestion - +from apps.scheduler.call.choice.choice import Choice # 只包含需要在编排界面展示的工具 __all__ = [ "API", @@ -18,4 +18,5 @@ __all__ = [ "SQL", "Graph", "Suggestion", + "Choice" ] diff --git a/apps/scheduler/call/api/api.py b/apps/scheduler/call/api/api.py index e1891f7259b72a2fa03228f6289c54da6297b958..0656fa4fa5d5b5c297b7c1257350d2e983c74beb 100644 --- a/apps/scheduler/call/api/api.py +++ b/apps/scheduler/call/api/api.py @@ -5,7 +5,7 @@ import json import logging from collections.abc import AsyncGenerator from functools import partial -from typing import Any +from typing import Any, ClassVar import httpx from fastapi import status @@ -15,7 +15,7 @@ from pydantic.json_schema import SkipJsonSchema from apps.common.oidc import oidc_provider from apps.scheduler.call.api.schema import APIInput, APIOutput from apps.scheduler.call.core import CoreCall -from apps.schemas.enum_var import CallOutputType, ContentType, HTTPMethod +from apps.schemas.enum_var import CallOutputType, ContentType, HTTPMethod, LanguageType from apps.schemas.scheduler import ( CallError, CallInfo, @@ -59,10 +59,16 @@ class API(CoreCall, input_model=APIInput, output_model=APIOutput): body: dict[str, Any] = Field(description="已知的部分请求体", default={}) query: dict[str, Any] = Field(description="已知的部分请求参数", default={}) - @classmethod - def info(cls) -> CallInfo: - """返回Call的名称和描述""" - return CallInfo(name="API调用", description="向某一个API接口发送HTTP请求,获取数据。") + i18n_info: ClassVar[dict[str, dict]] = { + LanguageType.CHINESE: { + "name": "API调用", + "description": "向某一个API接口发送HTTP请求,获取数据", + }, + LanguageType.ENGLISH: { + "name": "API Call", + "description": "Send an HTTP request to an API to obtain data", + }, + } async def _init(self, call_vars: CallVars) -> APIInput: """初始化API调用工具""" @@ -99,8 +105,10 @@ class API(CoreCall, input_model=APIInput, output_model=APIOutput): body=self.body, ) - async def _exec(self, input_data: dict[str, Any]) -> AsyncGenerator[CallOutputChunk, None]: - """调用API,然后返回LLM解析后的数据""" + async def _exec( + self, input_data: dict[str, Any], language: LanguageType = LanguageType.CHINESE + ) -> AsyncGenerator[CallOutputChunk, None]: + """调用API,然后返回LLM解析后的数据""" self._client = httpx.AsyncClient(timeout=self.timeout) input_obj = APIInput.model_validate(input_data) try: @@ -112,7 +120,9 @@ class API(CoreCall, input_model=APIInput, output_model=APIOutput): finally: await self._client.aclose() - async def _make_api_call(self, data: APIInput, files: dict[str, tuple[str, bytes, str]]) -> httpx.Response: + async def _make_api_call( + self, data: APIInput, files: dict[str, tuple[str, bytes, str]] + ) -> httpx.Response: """组装API请求""" # 获取必要参数 if self._auth: diff --git a/apps/scheduler/call/choice/choice.py b/apps/scheduler/call/choice/choice.py index a5edf21afb2eeb308dd909a40696500751c9a086..df886d932c4a6d6485e0ab471448d1951c89555c 100644 --- a/apps/scheduler/call/choice/choice.py +++ b/apps/scheduler/call/choice/choice.py @@ -1,19 +1,163 @@ # Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. """使用大模型或使用程序做出判断""" -from enum import Enum - -from apps.scheduler.call.choice.schema import ChoiceInput, ChoiceOutput -from apps.scheduler.call.core import CoreCall +import ast +import copy +import logging +from collections.abc import AsyncGenerator +from typing import Any, ClassVar +from pydantic import Field -class Operator(str, Enum): - """Choice工具支持的运算符""" +from apps.scheduler.call.choice.condition_handler import ConditionHandler +from apps.scheduler.call.choice.schema import ( + Condition, + ChoiceBranch, + ChoiceInput, + ChoiceOutput, + Condition, + Logic, +) +from apps.scheduler.call.core import CoreCall +from apps.schemas.enum_var import CallOutputType, LanguageType +from apps.schemas.parameters import Type +from apps.schemas.scheduler import ( + CallError, + CallInfo, + CallOutputChunk, + CallVars, +) - pass +logger = logging.getLogger(__name__) class Choice(CoreCall, input_model=ChoiceInput, output_model=ChoiceOutput): """Choice工具""" - pass + to_user: bool = Field(default=False) + choices: list[ChoiceBranch] = Field( + description="分支", default=[ChoiceBranch(), ChoiceBranch(conditions=[Condition()], is_default=False)] + ) + + i18n_info: ClassVar[dict[str, dict]] = { + LanguageType.CHINESE: { + "name": "判断", + "description": "使用大模型或使用程序做出判断", + }, + LanguageType.ENGLISH: { + "name": "Choice", + "description": "Use a large model or a program to make a decision", + }, + } + + def _raise_value_error(self, msg: str) -> None: + """统一处理 ValueError 异常抛出""" + logger.warning(msg) + raise ValueError(msg) + + async def _prepare_message(self, call_vars: CallVars) -> list[dict[str, Any]]: + """替换choices中的系统变量""" + valid_choices = [] + + for choice in self.choices: + try: + # 验证逻辑运算符 + if choice.logic not in [Logic.AND, Logic.OR]: + msg = f"无效的逻辑运算符: {choice.logic}" + logger.warning(f"[Choice] 分支 {choice.branch_id} 条件处理失败: {msg}") + continue + + valid_conditions = [] + for i in range(len(choice.conditions)): + condition = copy.deepcopy(choice.conditions[i]) + # 处理左值 + if condition.left.step_id is not None: + condition.left.value = self._extract_history_variables( + condition.left.value, call_vars.history) + # 检查历史变量是否成功提取 + if condition.left.value is None: + msg = f"步骤 {condition.left.step_id} 的历史变量不存在" + logger.warning(f"[Choice] 分支 {choice.branch_id} 条件处理失败: {msg}") + continue + if not ConditionHandler.check_value_type( + condition.left, condition.left.type): + msg = f"左值类型不匹配: {condition.left.value} 应为 {condition.left.type.value}" + logger.warning(f"[Choice] 分支 {choice.branch_id} 条件处理失败: {msg}") + continue + else: + msg = "左侧变量缺少step_id" + logger.warning(f"[Choice] 分支 {choice.branch_id} 条件处理失败: {msg}") + continue + # 处理右值 + if condition.right.step_id is not None: + condition.right.value = self._extract_history_variables( + condition.right.value, call_vars.history) + # 检查历史变量是否成功提取 + if condition.right.value is None: + msg = f"步骤 {condition.right.step_id} 的历史变量不存在" + logger.warning(f"[Choice] 分支 {choice.branch_id} 条件处理失败: {msg}") + continue + if not ConditionHandler.check_value_type( + condition.right, condition.right.type): + msg = f"右值类型不匹配: {condition.right.value} 应为 {condition.right.type.value}" + logger.warning(f"[Choice] 分支 {choice.branch_id} 条件处理失败: {msg}") + continue + else: + # 如果右值没有step_id,尝试从call_vars中获取 + right_value_type = await ConditionHandler.get_value_type_from_operate( + condition.operate) + if right_value_type is None: + msg = f"不支持的运算符: {condition.operate}" + logger.warning(f"[Choice] 分支 {choice.branch_id} 条件处理失败: {msg}") + continue + if condition.right.type != right_value_type: + msg = f"右值类型不匹配: {condition.right.value} 应为 {right_value_type.value}" + logger.warning(f"[Choice] 分支 {choice.branch_id} 条件处理失败: {msg}") + continue + if right_value_type == Type.STRING: + condition.right.value = str(condition.right.value) + else: + condition.right.value = ast.literal_eval(condition.right.value) + if not ConditionHandler.check_value_type( + condition.right, condition.right.type): + msg = f"右值类型不匹配: {condition.right.value} 应为 {condition.right.type.value}" + logger.warning(f"[Choice] 分支 {choice.branch_id} 条件处理失败: {msg}") + continue + valid_conditions.append(condition) + + # 如果所有条件都无效,抛出异常 + if not valid_conditions and not choice.is_default: + msg = "分支没有有效条件" + logger.warning(f"[Choice] 分支 {choice.branch_id} 条件处理失败: {msg}") + continue + + # 更新有效条件 + choice.conditions = valid_conditions + valid_choices.append(choice) + + except ValueError as e: + logger.warning("分支 %s 处理失败: %s,已跳过", choice.branch_id, str(e)) + continue + + return valid_choices + + async def _init(self, call_vars: CallVars) -> ChoiceInput: + """初始化Choice工具""" + return ChoiceInput( + choices=await self._prepare_message(call_vars), + ) + + async def _exec( + self, input_data: dict[str, Any], language: LanguageType + ) -> AsyncGenerator[CallOutputChunk, None]: + """执行Choice工具""" + # 解析输入数据 + data = ChoiceInput(**input_data) + try: + branch_id = ConditionHandler.handler(data.choices) + yield CallOutputChunk( + type=CallOutputType.DATA, + content=ChoiceOutput(branch_id=branch_id).model_dump(exclude_none=True, by_alias=True), + ) + except Exception as e: + raise CallError(message=f"选择工具调用失败:{e!s}", data={}) from e diff --git a/apps/scheduler/call/choice/condition_handler.py b/apps/scheduler/call/choice/condition_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..f63620625323d5f77e0de54b03196cb9277eba48 --- /dev/null +++ b/apps/scheduler/call/choice/condition_handler.py @@ -0,0 +1,309 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. +"""处理条件分支的工具""" + + +import logging + +from pydantic import BaseModel + +from apps.schemas.parameters import ( + Type, + NumberOperate, + StringOperate, + ListOperate, + BoolOperate, + DictOperate, +) + +from apps.scheduler.call.choice.schema import ( + ChoiceBranch, + Condition, + Logic, + Value +) + +logger = logging.getLogger(__name__) + + +class ConditionHandler(BaseModel): + """条件分支处理器""" + @staticmethod + async def get_value_type_from_operate(operate: NumberOperate | StringOperate | ListOperate | + BoolOperate | DictOperate) -> Type: + """获取右值的类型""" + if isinstance(operate, NumberOperate): + return Type.NUMBER + if operate in [ + StringOperate.EQUAL, StringOperate.NOT_EQUAL, StringOperate.CONTAINS, StringOperate.NOT_CONTAINS, + StringOperate.STARTS_WITH, StringOperate.ENDS_WITH, StringOperate.REGEX_MATCH]: + return Type.STRING + if operate in [StringOperate.LENGTH_EQUAL, StringOperate.LENGTH_GREATER_THAN, + StringOperate.LENGTH_GREATER_THAN_OR_EQUAL, StringOperate.LENGTH_LESS_THAN, + StringOperate.LENGTH_LESS_THAN_OR_EQUAL]: + return Type.NUMBER + if operate in [ListOperate.EQUAL, ListOperate.NOT_EQUAL]: + return Type.LIST + if operate in [ListOperate.CONTAINS, ListOperate.NOT_CONTAINS]: + return Type.STRING + if operate in [ListOperate.LENGTH_EQUAL, ListOperate.LENGTH_GREATER_THAN, + ListOperate.LENGTH_GREATER_THAN_OR_EQUAL, ListOperate.LENGTH_LESS_THAN, + ListOperate.LENGTH_LESS_THAN_OR_EQUAL]: + return Type.NUMBER + if operate in [BoolOperate.EQUAL, BoolOperate.NOT_EQUAL]: + return Type.BOOL + if operate in [DictOperate.EQUAL, DictOperate.NOT_EQUAL]: + return Type.DICT + if operate in [DictOperate.CONTAINS_KEY, DictOperate.NOT_CONTAINS_KEY]: + return Type.STRING + return None + + @staticmethod + def check_value_type(value: Value, expected_type: Type) -> bool: + """检查值的类型是否符合预期""" + if expected_type == Type.STRING and isinstance(value.value, str): + return True + if expected_type == Type.NUMBER and isinstance(value.value, (int, float)): + return True + if expected_type == Type.LIST and isinstance(value.value, list): + return True + if expected_type == Type.DICT and isinstance(value.value, dict): + return True + if expected_type == Type.BOOL and isinstance(value.value, bool): + return True + return False + + @staticmethod + def handler(choices: list[ChoiceBranch]) -> str: + """处理条件""" + + for block_judgement in choices[::-1]: + results = [] + if block_judgement.is_default: + return block_judgement.branch_id + for condition in block_judgement.conditions: + result = ConditionHandler._judge_condition(condition) + if result is not None: + results.append(result) + if not results: + logger.warning(f"[Choice] 分支 {block_judgement.branch_id} 条件处理失败: 没有有效的条件") + continue + if block_judgement.logic == Logic.AND: + final_result = all(results) + elif block_judgement.logic == Logic.OR: + final_result = any(results) + + if final_result: + return block_judgement.branch_id + + return "" + + @staticmethod + def _judge_condition(condition: Condition) -> bool: + """ + 判断条件是否成立。 + + Args: + condition (Condition): 'left', 'operate', 'right', 'type' + + Returns: + bool + + """ + left = condition.left + operate = condition.operate + right = condition.right + value_type = left.type + + result = None + if value_type == Type.STRING: + result = ConditionHandler._judge_string_condition(left, operate, right) + elif value_type == Type.NUMBER: + result = ConditionHandler._judge_number_condition(left, operate, right) + elif value_type == Type.BOOL: + result = ConditionHandler._judge_bool_condition(left, operate, right) + elif value_type == Type.LIST: + result = ConditionHandler._judge_list_condition(left, operate, right) + elif value_type == Type.DICT: + result = ConditionHandler._judge_dict_condition(left, operate, right) + else: + msg = f"不支持的数据类型: {value_type}" + logger.error(f"[Choice] 条件处理失败: {msg}") + return None + return result + + @staticmethod + def _judge_string_condition(left: Value, operate: StringOperate, right: Value) -> bool: + """ + 判断字符串类型的条件。 + + Args: + left (Value): 左值,包含 'value' 键。 + operate (Operate): 操作符 + right (Value): 右值,包含 'value' 键。 + + Returns: + bool + + """ + left_value = left.value + if not isinstance(left_value, str): + msg = f"左值必须是字符串类型 ({left_value})" + logger.warning(msg) + return None + right_value = right.value + if operate == StringOperate.EQUAL: + return left_value == right_value + elif operate == StringOperate.NOT_EQUAL: + return left_value != right_value + elif operate == StringOperate.CONTAINS: + return right_value in left_value + elif operate == StringOperate.NOT_CONTAINS: + return right_value not in left_value + elif operate == StringOperate.STARTS_WITH: + return left_value.startswith(right_value) + elif operate == StringOperate.ENDS_WITH: + return left_value.endswith(right_value) + elif operate == StringOperate.REGEX_MATCH: + import re + return bool(re.match(right_value, left_value)) + elif operate == StringOperate.LENGTH_EQUAL: + return len(left_value) == right_value + elif operate == StringOperate.LENGTH_GREATER_THAN: + return len(left_value) > right_value + elif operate == StringOperate.LENGTH_GREATER_THAN_OR_EQUAL: + return len(left_value) >= right_value + elif operate == StringOperate.LENGTH_LESS_THAN: + return len(left_value) < right_value + elif operate == StringOperate.LENGTH_LESS_THAN_OR_EQUAL: + return len(left_value) <= right_value + return False + + @staticmethod + def _judge_number_condition(left: Value, operate: NumberOperate, right: Value) -> bool: # noqa: PLR0911 + """ + 判断数字类型的条件。 + + Args: + left (Value): 左值,包含 'value' 键。 + operate (Operate): 操作符 + right (Value): 右值,包含 'value' 键。 + + Returns: + bool + + """ + left_value = left.value + if not isinstance(left_value, (int, float)): + msg = f"左值必须是数字类型 ({left_value})" + logger.warning(msg) + return None + right_value = right.value + if operate == NumberOperate.EQUAL: + return left_value == right_value + elif operate == NumberOperate.NOT_EQUAL: + return left_value != right_value + elif operate == NumberOperate.GREATER_THAN: + return left_value > right_value + elif operate == NumberOperate.LESS_THAN: # noqa: PLR2004 + return left_value < right_value + elif operate == NumberOperate.GREATER_THAN_OR_EQUAL: + return left_value >= right_value + elif operate == NumberOperate.LESS_THAN_OR_EQUAL: + return left_value <= right_value + return False + + @staticmethod + def _judge_bool_condition(left: Value, operate: BoolOperate, right: Value) -> bool: + """ + 判断布尔类型的条件。 + + Args: + left (Value): 左值,包含 'value' 键。 + operate (Operate): 操作符 + right (Value): 右值,包含 'value' 键。 + + Returns: + bool + + """ + left_value = left.value + if not isinstance(left_value, bool): + msg = "左值必须是布尔类型" + logger.warning(msg) + return None + right_value = right.value + if operate == BoolOperate.EQUAL: + return left_value == right_value + elif operate == BoolOperate.NOT_EQUAL: + return left_value != right_value + return False + + @staticmethod + def _judge_list_condition(left: Value, operate: ListOperate, right: Value): + """ + 判断列表类型的条件。 + + Args: + left (Value): 左值,包含 'value' 键。 + operate (Operate): 操作符 + right (Value): 右值,包含 'value' 键。 + + Returns: + bool + + """ + left_value = left.value + if not isinstance(left_value, list): + msg = f"左值必须是列表类型 ({left_value})" + logger.warning(msg) + return None + right_value = right.value + if operate == ListOperate.EQUAL: + return left_value == right_value + elif operate == ListOperate.NOT_EQUAL: + return left_value != right_value + elif operate == ListOperate.CONTAINS: + return right_value in left_value + elif operate == ListOperate.NOT_CONTAINS: + return right_value not in left_value + elif operate == ListOperate.LENGTH_EQUAL: + return len(left_value) == right_value + elif operate == ListOperate.LENGTH_GREATER_THAN: + return len(left_value) > right_value + elif operate == ListOperate.LENGTH_GREATER_THAN_OR_EQUAL: + return len(left_value) >= right_value + elif operate == ListOperate.LENGTH_LESS_THAN: + return len(left_value) < right_value + elif operate == ListOperate.LENGTH_LESS_THAN_OR_EQUAL: + return len(left_value) <= right_value + return False + + @staticmethod + def _judge_dict_condition(left: Value, operate: DictOperate, right: Value): + """ + 判断字典类型的条件。 + + Args: + left (Value): 左值,包含 'value' 键。 + operate (Operate): 操作符 + right (Value): 右值,包含 'value' 键。 + + Returns: + bool + + """ + left_value = left.value + if not isinstance(left_value, dict): + msg = f"左值必须是字典类型 ({left_value})" + logger.warning(msg) + return None + right_value = right.value + if operate == DictOperate.EQUAL: + return left_value == right_value + elif operate == DictOperate.NOT_EQUAL: + return left_value != right_value + elif operate == DictOperate.CONTAINS_KEY: + return right_value in left_value + elif operate == DictOperate.NOT_CONTAINS_KEY: + return right_value not in left_value + return False diff --git a/apps/scheduler/call/choice/schema.py b/apps/scheduler/call/choice/schema.py index 60b62d09fd66adbf32295f44ec86398a537f38d5..955322705031604531cc14f6fe7a4a30935d5989 100644 --- a/apps/scheduler/call/choice/schema.py +++ b/apps/scheduler/call/choice/schema.py @@ -1,12 +1,63 @@ # Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. """Choice Call的输入和输出""" +import uuid +from enum import Enum + +from pydantic import Field from apps.scheduler.call.core import DataBase +from apps.schemas.parameters import ( + BoolOperate, + DictOperate, + ListOperate, + NumberOperate, + StringOperate, + Type, +) + + +class Logic(str, Enum): + """Choice 工具支持的逻辑运算符""" + + AND = "and" + OR = "or" + + +class Value(DataBase): + """值的结构""" + + step_id: str | None = Field(description="步骤id", default=None) + type: Type | None = Field(description="值的类型", default=None) + name: str | None = Field(description="值的名称", default=None) + value: str | float | int | bool | list | dict | None = Field(description="值", default=None) + + +class Condition(DataBase): + """单个条件""" + + left: Value = Field(description="左值", default=Value()) + right: Value = Field(description="右值", default=Value()) + operate: NumberOperate | StringOperate | ListOperate | BoolOperate | DictOperate | None = Field( + description="运算符", default=None) + id: str = Field(description="条件ID", default_factory=lambda: str(uuid.uuid4())) + + +class ChoiceBranch(DataBase): + """子分支""" + + branch_id: str = Field(description="分支ID", default_factory=lambda: str(uuid.uuid4())) + logic: Logic = Field(description="逻辑运算符", default=Logic.AND) + conditions: list[Condition] = Field(description="条件列表", default=[]) + is_default: bool = Field(description="是否为默认分支", default=True) class ChoiceInput(DataBase): """Choice Call的输入""" + choices: list[ChoiceBranch] = Field(description="分支", default=[]) + class ChoiceOutput(DataBase): """Choice Call的输出""" + + branch_id: str = Field(description="分支ID", default="") diff --git a/apps/scheduler/call/cmd/cmd.py b/apps/scheduler/call/cmd/cmd.py index 7f9d9f8c4f47e1c0cf3e4bcceea1b619014598fa..9dfc3f6169e989957cfcd6639962e7876b942619 100644 --- a/apps/scheduler/call/cmd/cmd.py +++ b/apps/scheduler/call/cmd/cmd.py @@ -1,11 +1,13 @@ # Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. """自然语言生成命令""" -from typing import Any +from typing import Any, ClassVar + from pydantic import BaseModel, Field from apps.scheduler.call.core import CoreCall +from apps.schemas.enum_var import LanguageType class _CmdParams(BaseModel): @@ -19,12 +21,19 @@ class _CmdOutput(BaseModel): """Cmd工具的输出""" - class Cmd(CoreCall): """Cmd工具。用于根据BTDL描述文件,生成命令。""" - name: str = "cmd" - description: str = "根据BTDL描述文件,生成命令。" + i18n_info: ClassVar[dict[str, dict]] = { + LanguageType.CHINESE: { + "name": "命令生成", + "description": "根据BTDL描述文件,生成命令", + }, + LanguageType.ENGLISH: { + "name": "Command Generation", + "description": "Generate commands based on BTDL description files", + }, + } async def _exec(self, _slot_data: dict[str, Any]) -> _CmdOutput: """调用Cmd工具""" diff --git a/apps/scheduler/call/convert/convert.py b/apps/scheduler/call/convert/convert.py index 27980bd8ad46aaaa7741d6a6919661aac580de64..37a75f55192d8f9b6d7dd5de6c1c160feb51b79b 100644 --- a/apps/scheduler/call/convert/convert.py +++ b/apps/scheduler/call/convert/convert.py @@ -3,7 +3,7 @@ from collections.abc import AsyncGenerator from datetime import datetime -from typing import Any +from typing import Any, ClassVar import pytz from jinja2 import BaseLoader @@ -12,7 +12,7 @@ from pydantic import Field from apps.scheduler.call.convert.schema import ConvertInput, ConvertOutput from apps.scheduler.call.core import CallOutputChunk, CoreCall -from apps.schemas.enum_var import CallOutputType +from apps.schemas.enum_var import CallOutputTyp, LanguageType from apps.schemas.scheduler import ( CallInfo, CallOutputChunk, @@ -26,11 +26,16 @@ class Convert(CoreCall, input_model=ConvertInput, output_model=ConvertOutput): text_template: str | None = Field(description="自然语言信息的格式化模板,jinja2语法", default=None) data_template: str | None = Field(description="原始数据的格式化模板,jinja2语法", default=None) - - @classmethod - def info(cls) -> CallInfo: - """返回Call的名称和描述""" - return CallInfo(name="模板转换", description="使用jinja2语法和jsonnet语法,将自然语言信息和原始数据进行格式化。") + i18n_info: ClassVar[dict[str, dict]] = { + LanguageType.CHINESE: { + "name": "转换工具", + "description": "提取或格式化Step输出", + }, + LanguageType.ENGLISH: { + "name": "Convert Tool", + "description": "Extract or format Step output", + }, + } async def _init(self, call_vars: CallVars) -> ConvertInput: """初始化工具""" @@ -46,7 +51,6 @@ class Convert(CoreCall, input_model=ConvertInput, output_model=ConvertOutput): ) return ConvertInput() - async def _exec(self) -> AsyncGenerator[CallOutputChunk, None]: """ 调用Convert工具 diff --git a/apps/scheduler/call/core.py b/apps/scheduler/call/core.py index 2b1cbba83b9345e8df84e8f23f893137cb46532b..e9e39a180d9ad93c4fc9063d5b5f39eb4d212126 100644 --- a/apps/scheduler/call/core.py +++ b/apps/scheduler/call/core.py @@ -14,7 +14,7 @@ from pydantic.json_schema import SkipJsonSchema from apps.llm.function import FunctionLLM from apps.llm.reasoning import ReasoningLLM -from apps.schemas.enum_var import CallOutputType +from apps.schemas.enum_var import CallOutputType, LanguageType from apps.schemas.pool import NodePool from apps.schemas.scheduler import ( CallError, @@ -25,6 +25,7 @@ from apps.schemas.scheduler import ( CallVars, ) from apps.schemas.task import FlowStepHistory +from apps.schemas.enum_var import LanguageType if TYPE_CHECKING: from apps.scheduler.executor.step import StepExecutor @@ -52,7 +53,9 @@ class CoreCall(BaseModel): name: SkipJsonSchema[str] = Field(description="Step的名称", exclude=True) description: SkipJsonSchema[str] = Field(description="Step的描述", exclude=True) node: SkipJsonSchema[NodePool | None] = Field(description="节点信息", exclude=True) - enable_filling: SkipJsonSchema[bool] = Field(description="是否需要进行自动参数填充", default=False, exclude=True) + enable_filling: SkipJsonSchema[bool] = Field( + description="是否需要进行自动参数填充", default=False, exclude=True + ) tokens: SkipJsonSchema[CallTokens] = Field( description="Call的输入输出Tokens信息", default=CallTokens(), @@ -68,6 +71,7 @@ class CoreCall(BaseModel): exclude=True, frozen=True, ) + i18n_info: ClassVar[SkipJsonSchema[dict[str, dict]]] = {} to_user: bool = Field(description="是否需要将输出返回给用户", default=False) @@ -76,21 +80,25 @@ class CoreCall(BaseModel): extra="allow", ) + @classmethod + def info(cls, language: LanguageType = LanguageType.CHINESE) -> CallInfo: + """ + 返回Call的名称和描述 + + :return: Call的名称和描述 + :rtype: CallInfo + """ + lang_info = cls.i18n_info.get(language, cls.i18n_info[LanguageType.CHINESE]) + return CallInfo(name=lang_info["name"], description=lang_info["description"]) - def __init_subclass__(cls, input_model: type[DataBase], output_model: type[DataBase], **kwargs: Any) -> None: + def __init_subclass__( + cls, input_model: type[DataBase], output_model: type[DataBase], **kwargs: Any + ) -> None: """初始化子类""" super().__init_subclass__(**kwargs) cls.input_model = input_model cls.output_model = output_model - - @classmethod - def info(cls) -> CallInfo: - """返回Call的名称和描述""" - err = "[CoreCall] 必须手动实现info方法" - raise NotImplementedError(err) - - @staticmethod def _assemble_call_vars(executor: "StepExecutor") -> CallVars: """组装CallVars""" @@ -120,7 +128,6 @@ class CoreCall(BaseModel): summary=executor.task.runtime.summary, ) - @staticmethod def _extract_history_variables(path: str, history: dict[str, FlowStepHistory]) -> Any: """ @@ -131,32 +138,23 @@ class CoreCall(BaseModel): :return: 变量 """ split_path = path.split("/") + if len(split_path) < 1: + err = f"[CoreCall] 路径格式错误: {path}" + logger.error(err) + return None if split_path[0] not in history: err = f"[CoreCall] 步骤{split_path[0]}不存在" logger.error(err) - raise CallError( - message=err, - data={ - "step_id": split_path[0], - }, - ) - + return None data = history[split_path[0]].output_data for key in split_path[1:]: if key not in data: err = f"[CoreCall] 输出Key {key} 不存在" logger.error(err) - raise CallError( - message=err, - data={ - "step_id": split_path[0], - "key": key, - }, - ) + return None data = data[key] return data - @classmethod async def instance(cls, executor: "StepExecutor", node: NodePool | None, **kwargs: Any) -> Self: """实例化Call类""" @@ -170,36 +168,35 @@ class CoreCall(BaseModel): await obj._set_input(executor) return obj - async def _set_input(self, executor: "StepExecutor") -> None: """获取Call的输入""" self._sys_vars = self._assemble_call_vars(executor) input_data = await self._init(self._sys_vars) self.input = input_data.model_dump(by_alias=True, exclude_none=True) - async def _init(self, call_vars: CallVars) -> DataBase: """初始化Call类,并返回Call的输入""" err = "[CoreCall] 初始化方法必须手动实现" raise NotImplementedError(err) - async def _exec(self, input_data: dict[str, Any]) -> AsyncGenerator[CallOutputChunk, None]: """Call类实例的流式输出方法""" yield CallOutputChunk(type=CallOutputType.TEXT, content="") - async def _after_exec(self, input_data: dict[str, Any]) -> None: """Call类实例的执行后方法""" - - async def exec(self, executor: "StepExecutor", input_data: dict[str, Any]) -> AsyncGenerator[CallOutputChunk, None]: + async def exec( + self, + executor: "StepExecutor", + input_data: dict[str, Any], + language: LanguageType = LanguageType.CHINESE, + ) -> AsyncGenerator[CallOutputChunk, None]: """Call类实例的执行方法""" - async for chunk in self._exec(input_data): + async for chunk in self._exec(input_data, language): yield chunk await self._after_exec(input_data) - async def _llm(self, messages: list[dict[str, Any]]) -> str: """Call可直接使用的LLM非流式调用""" result = "" @@ -210,7 +207,6 @@ class CoreCall(BaseModel): self.output_tokens = llm.output_tokens return result - async def _json(self, messages: list[dict[str, Any]], schema: type[BaseModel]) -> BaseModel: """Call可直接使用的JSON生成""" json = FunctionLLM() diff --git a/apps/scheduler/call/empty.py b/apps/scheduler/call/empty.py index 5865bc7e804491a7d6aa41fa251dc8c5d9c77dc6..2677fe175af42aa178e6b46cdab0ba010e98374f 100644 --- a/apps/scheduler/call/empty.py +++ b/apps/scheduler/call/empty.py @@ -2,26 +2,25 @@ """空白Call""" from collections.abc import AsyncGenerator -from typing import Any +from typing import Any, ClassVar from apps.scheduler.call.core import CoreCall, DataBase -from apps.schemas.enum_var import CallOutputType +from apps.schemas.enum_var import CallOutputType, LanguageType from apps.schemas.scheduler import CallInfo, CallOutputChunk, CallVars class Empty(CoreCall, input_model=DataBase, output_model=DataBase): """空Call""" - - @classmethod - def info(cls) -> CallInfo: - """ - 返回Call的名称和描述 - - :return: Call的名称和描述 - :rtype: CallInfo - """ - return CallInfo(name="空白", description="空白节点,用于占位") - + i18n_info: ClassVar[dict[str, dict]] = { + LanguageType.CHINESE: { + "name": "空白节点", + "description": "空白节点,用于占位", + }, + LanguageType.ENGLISH: { + "name": "Empty Node", + "description": "Empty node for placeholder", + }, + } async def _init(self, call_vars: CallVars) -> DataBase: """ @@ -33,8 +32,8 @@ class Empty(CoreCall, input_model=DataBase, output_model=DataBase): """ return DataBase() - - async def _exec(self, input_data: dict[str, Any]) -> AsyncGenerator[CallOutputChunk, None]: + async def _exec(self, input_data: dict[str, Any], + language: LanguageType = LanguageType.CHINESE) -> AsyncGenerator[CallOutputChunk, None]: """ 执行Call diff --git a/apps/scheduler/call/facts/facts.py b/apps/scheduler/call/facts/facts.py index f8aebcd748d92de4109553d0f68fecac43363f58..718c9d0d75219c52f0802bea9f5b970a0c527605 100644 --- a/apps/scheduler/call/facts/facts.py +++ b/apps/scheduler/call/facts/facts.py @@ -2,7 +2,7 @@ """提取事实工具""" from collections.abc import AsyncGenerator -from typing import TYPE_CHECKING, Any, Self +from typing import TYPE_CHECKING, Any, Self, ClassVar from jinja2 import BaseLoader from jinja2.sandbox import SandboxedEnvironment @@ -16,7 +16,7 @@ from apps.scheduler.call.facts.schema import ( FactsInput, FactsOutput, ) -from apps.schemas.enum_var import CallOutputType +from apps.schemas.enum_var import CallOutputType, LanguageType from apps.schemas.pool import NodePool from apps.schemas.scheduler import CallInfo, CallOutputChunk, CallVars from apps.services.user_domain import UserDomainManager @@ -30,12 +30,16 @@ class FactsCall(CoreCall, input_model=FactsInput, output_model=FactsOutput): answer: str = Field(description="用户输入") - - @classmethod - def info(cls) -> CallInfo: - """返回Call的名称和描述""" - return CallInfo(name="提取事实", description="从对话上下文和文档片段中提取事实。") - + i18n_info: ClassVar[dict[str, dict]] = { + LanguageType.CHINESE: { + "name": "提取事实", + "description": "从对话上下文和文档片段中提取事实。", + }, + LanguageType.ENGLISH: { + "name": "Fact Extraction", + "description": "Extract facts from the conversation context and document snippets.", + }, + } @classmethod async def instance(cls, executor: "StepExecutor", node: NodePool | None, **kwargs: Any) -> Self: @@ -51,7 +55,6 @@ class FactsCall(CoreCall, input_model=FactsInput, output_model=FactsOutput): await obj._set_input(executor) return obj - async def _init(self, call_vars: CallVars) -> FactsInput: """初始化工具""" # 组装必要变量 @@ -65,8 +68,9 @@ class FactsCall(CoreCall, input_model=FactsInput, output_model=FactsOutput): message=message, ) - - async def _exec(self, input_data: dict[str, Any]) -> AsyncGenerator[CallOutputChunk, None]: + async def _exec( + self, input_data: dict[str, Any], language: LanguageType = LanguageType.CHINESE + ) -> AsyncGenerator[CallOutputChunk, None]: """执行工具""" data = FactsInput(**input_data) # jinja2 环境 @@ -78,20 +82,20 @@ class FactsCall(CoreCall, input_model=FactsInput, output_model=FactsOutput): ) # 提取事实信息 - facts_tpl = env.from_string(FACTS_PROMPT) + facts_tpl = env.from_string(FACTS_PROMPT[language]) facts_prompt = facts_tpl.render(conversation=data.message) facts_obj: FactsGen = await self._json([ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": facts_prompt}, - ], FactsGen) # type: ignore[arg-type] + ], FactsGen) # type: ignore[arg-type] # 更新用户画像 - domain_tpl = env.from_string(DOMAIN_PROMPT) + domain_tpl = env.from_string(DOMAIN_PROMPT[language]) domain_prompt = domain_tpl.render(conversation=data.message) domain_list: DomainGen = await self._json([ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": domain_prompt}, - ], DomainGen) # type: ignore[arg-type] + ], DomainGen) # type: ignore[arg-type] for domain in domain_list.keywords: await UserDomainManager.update_user_domain_by_user_sub_and_domain_name(data.user_sub, domain) @@ -104,10 +108,14 @@ class FactsCall(CoreCall, input_model=FactsInput, output_model=FactsOutput): ).model_dump(by_alias=True, exclude_none=True), ) - - async def exec(self, executor: "StepExecutor", input_data: dict[str, Any]) -> AsyncGenerator[CallOutputChunk, None]: + async def exec( + self, + executor: "StepExecutor", + input_data: dict[str, Any], + language: LanguageType = LanguageType.CHINESE, + ) -> AsyncGenerator[CallOutputChunk, None]: """执行工具""" - async for chunk in self._exec(input_data): + async for chunk in self._exec(input_data, language): content = chunk.content if not isinstance(content, dict): err = "[FactsCall] 工具输出格式错误" diff --git a/apps/scheduler/call/facts/prompt.py b/apps/scheduler/call/facts/prompt.py index b2b2513f2c28feb4d19f5fb4ee3eba1bd616a4dc..02e134391fa33d3dbe3b3e71cc63947acc739777 100644 --- a/apps/scheduler/call/facts/prompt.py +++ b/apps/scheduler/call/facts/prompt.py @@ -2,8 +2,10 @@ """记忆提取工具的提示词""" from textwrap import dedent - -DOMAIN_PROMPT: str = dedent(r""" +from apps.schemas.enum_var import LanguageType +DOMAIN_PROMPT: dict[str, str] = { + LanguageType.CHINESE: dedent( + r""" 根据对话上文,提取推荐系统所需的关键词标签,要求: @@ -35,8 +37,48 @@ DOMAIN_PROMPT: str = dedent(r""" {% endfor %} -""") -FACTS_PROMPT: str = dedent(r""" +""" + ), + LanguageType.ENGLISH: dedent( + r""" + + + Extract keywords for recommendation system based on the previous conversation, requirements: + 1. Entity nouns, technical terms, time range, location, product, etc. can be keyword tags + 2. At least one keyword is related to the topic of the conversation + 3. Tags should be concise and not repeated, not exceeding 10 characters + 4. Output in JSON format, do not include XML tags, do not include any explanatory notes + + + + + What's the weather like in Beijing? + Beijing is sunny today. + + + + { + "keywords": ["Beijing", "weather"] + } + + + + + + {% for item in conversation %} + <{{item['role']}}> + {{item['content']}} + + {% endfor %} + + +""" + ), +} + +FACTS_PROMPT: dict[str, str] = { + LanguageType.CHINESE: dedent( + r""" 从对话中提取关键信息,并将它们组织成独一无二的、易于理解的事实,包含用户偏好、关系、实体等有用信息。 @@ -80,4 +122,53 @@ FACTS_PROMPT: str = dedent(r""" {% endfor %} -""") +""" + ), + LanguageType.ENGLISH: dedent( + r""" + + + Extract key information from the conversation and organize it into unique, easily understandable facts, including user preferences, relationships, entities, etc. + The following are the types of information you need to pay attention to and detailed instructions on how to handle input data. + + **Types of information you need to pay attention to** + 1. Entities: Entities involved in the conversation. For example: names, locations, organizations, events, etc. + 2. Preferences: Attitudes towards entities. For example: like, dislike, etc. + 3. Relationships: Relationships between users and entities, or between two entities. For example: include, parallel, mutually exclusive, etc. + 4. Actions: Specific actions that affect entities. For example: query, search, browse, click, etc. + + **Requirements** + 1. Facts must be accurate and can only be extracted from the conversation. Do not include the information in the example in the output. + 2. Facts must be clear, concise, and easy to understand. Must be less than 30 words. + 3. Output in the following JSON format: + + { + "facts": ["Fact 1", "Fact 2", "Fact 3"] + } + + + + + What are the attractions in Hangzhou West Lake? + West Lake in Hangzhou, Zhejiang Province, China, is a famous scenic spot known for its beautiful natural scenery and rich cultural heritage. Many notable attractions surround West Lake, including the renowned Su Causeway, Bai Causeway, Broken Bridge, and the Three Pools Mirroring the Moon. Famous for its crystal-clear waters and the surrounding mountains, West Lake is one of China's most famous lakes. + + + + { + "facts": ["Hangzhou West Lake has famous attractions such as Suzhou Embankment, Bai Budi, Qiantang Bridge, San Tang Yue, etc."] + } + + + + + + {% for item in conversation %} + <{{item['role']}}> + {{item['content']}} + + {% endfor %} + + +""" + ), +} diff --git a/apps/scheduler/call/graph/graph.py b/apps/scheduler/call/graph/graph.py index c2728f17913fcd0e8343168f2b508dbe6006fd6e..2a9b7b0672d17d44c9989e523d946803135c4a4b 100644 --- a/apps/scheduler/call/graph/graph.py +++ b/apps/scheduler/call/graph/graph.py @@ -3,7 +3,7 @@ import json from collections.abc import AsyncGenerator -from typing import Any +from typing import Any, ClassVar from anyio import Path from pydantic import Field @@ -11,7 +11,7 @@ from pydantic import Field from apps.scheduler.call.core import CoreCall from apps.scheduler.call.graph.schema import RenderFormat, RenderInput, RenderOutput from apps.scheduler.call.graph.style import RenderStyle -from apps.schemas.enum_var import CallOutputType +from apps.schemas.enum_var import CallOutputType, LanguageType from apps.schemas.scheduler import ( CallError, CallInfo, @@ -25,12 +25,16 @@ class Graph(CoreCall, input_model=RenderInput, output_model=RenderOutput): dataset_key: str = Field(description="图表的数据来源(字段名)", default="") - - @classmethod - def info(cls) -> CallInfo: - """返回Call的名称和描述""" - return CallInfo(name="图表", description="将SQL查询出的数据转换为图表") - + i18n_info: ClassVar[dict[str, dict]] = { + LanguageType.CHINESE: { + "name": "图表", + "description": "将SQL查询出的数据转换为图表。", + }, + LanguageType.ENGLISH: { + "name": "Chart", + "description": "Convert the data queried by SQL into a chart.", + }, + } async def _init(self, call_vars: CallVars) -> RenderInput: """初始化Render Call,校验参数,读取option模板""" @@ -54,8 +58,9 @@ class Graph(CoreCall, input_model=RenderInput, output_model=RenderOutput): data=data, ) - - async def _exec(self, input_data: dict[str, Any]) -> AsyncGenerator[CallOutputChunk, None]: + async def _exec( + self, input_data: dict[str, Any], language: LanguageType = LanguageType.CHINESE + ) -> AsyncGenerator[CallOutputChunk, None]: """运行Render Call""" data = RenderInput(**input_data) @@ -84,7 +89,7 @@ class Graph(CoreCall, input_model=RenderInput, output_model=RenderOutput): try: style_obj = RenderStyle() - llm_output = await style_obj.generate(question=data.question) + llm_output = await style_obj.generate(question=data.question, language=language) self.tokens.input_tokens += style_obj.input_tokens self.tokens.output_tokens += style_obj.output_tokens @@ -100,7 +105,6 @@ class Graph(CoreCall, input_model=RenderInput, output_model=RenderOutput): ).model_dump(exclude_none=True, by_alias=True), ) - @staticmethod def _separate_key_value(data: list[dict[str, Any]]) -> list[dict[str, Any]]: """ @@ -117,8 +121,9 @@ class Graph(CoreCall, input_model=RenderInput, output_model=RenderOutput): result.append({"type": key, "value": val}) return result - - def _parse_options(self, column_num: int, chart_style: str, additional_style: str, scale_style: str) -> None: + def _parse_options( + self, column_num: int, chart_style: str, additional_style: str, scale_style: str + ) -> None: """解析LLM做出的图表样式选择""" series_template = {} diff --git a/apps/scheduler/call/graph/style.py b/apps/scheduler/call/graph/style.py index 631ea88acb9c0ef6b851e35cb3fffdecc567a901..e9fef038635ef7569147335d3289b46341675927 100644 --- a/apps/scheduler/call/graph/style.py +++ b/apps/scheduler/call/graph/style.py @@ -9,6 +9,7 @@ from pydantic import BaseModel, Field from apps.llm.function import JsonGenerator from apps.llm.patterns.core import CorePattern from apps.llm.reasoning import ReasoningLLM +from apps.schemas.enum_var import LanguageType logger = logging.getLogger(__name__) @@ -24,53 +25,95 @@ class RenderStyleResult(BaseModel): class RenderStyle(CorePattern): """选择图表样式""" - system_prompt = r""" - You are a helpful assistant. Help the user make style choices when drawing a chart. - Chart title should be short and less than 3 words. - - Available types: - - `bar`: Bar graph - - `pie`: Pie graph - - `line`: Line graph - - `scatter`: Scatter graph - - Available bar additional styles: - - `normal`: Normal bar graph - - `stacked`: Stacked bar graph - - Available pie additional styles: - - `normal`: Normal pie graph - - `ring`: Ring pie graph - - Available scales: - - `linear`: Linear scale - - `log`: Logarithmic scale - - EXAMPLE - ## Question - 查询数据库中的数据,并绘制堆叠柱状图。 - - ## Thought - Let's think step by step. The user requires drawing a stacked bar chart, so the chart type should be `bar`, \ - i.e. a bar chart; the chart style should be `stacked`, i.e. a stacked form. - - ## Answer - The chart type should be: bar - The chart style should be: stacked - The scale should be: linear - - END OF EXAMPLE - - Let's begin. - """ - - user_prompt = r""" - ## Question - {question} - - ## Thought - Let's think step by step. - """ + def get_default_prompt(self) -> dict[LanguageType, str]: + system_prompt = { + LanguageType.CHINESE: r""" + 你是一个有用的助手。帮助用户在绘制图表时做出样式选择。 + 图表标题应简短且少于3个字。 + 可用类型: + - `bar`: 柱状图 + - `pie`: 饼图 + - `line`: 折线图 + - `scatter`: 散点图 + 可用柱状图附加样式: + - `normal`: 普通柱状图 + - `stacked`: 堆叠柱状图 + 可用饼图附加样式: + - `normal`: 普通饼图 + - `ring`: 环形饼图 + 可用比例: + - `linear`: 线性比例 + - `log`: 对数比例 + EXAMPLE + ## 问题 + 查询数据库中的数据,并绘制堆叠柱状图。 + ## 思考 + 让我们一步步思考。用户要求绘制堆叠柱状图,因此图表类型应为 `bar`,即柱状图;图表样式 + 应为 `stacked`,即堆叠形式。 + ## 答案 + 图表类型应为:bar + 图表样式应为:stacked + 比例应为:linear + END OF EXAMPLE + + 让我们开始吧。 + """, + LanguageType.ENGLISH: r""" + You are a helpful assistant. Help the user make style choices when drawing a chart. + Chart title should be short and less than 3 words. + + Available types: + - `bar`: Bar graph + - `pie`: Pie graph + - `line`: Line graph + - `scatter`: Scatter graph + + Available bar additional styles: + - `normal`: Normal bar graph + - `stacked`: Stacked bar graph + + Available pie additional styles: + - `normal`: Normal pie graph + - `ring`: Ring pie graph + + Available scales: + - `linear`: Linear scale + - `log`: Logarithmic scale + + EXAMPLE + ## Question + 查询数据库中的数据,并绘制堆叠柱状图。 + + ## Thought + Let's think step by step. The user requires drawing a stacked bar chart, so the chart type should be `bar`, \ + i.e. a bar chart; the chart style should be `stacked`, i.e. a stacked form. + + ## Answer + The chart type should be: bar + The chart style should be: stacked + The scale should be: linear + + END OF EXAMPLE + + Let's begin. + """ + } + user_prompt = { + LanguageType.CHINESE: r""" + ## 问题 + {question} + ## 思考 + 让我们一步步思考。根据用户问题,选择合适的图表类型、样式和比例。 + """, + LanguageType.ENGLISH: r""" + ## Question + {question} + + ## Thought + Let's think step by step. + """ + } + return system_prompt, user_prompt def __init__(self, system_prompt: str | None = None, user_prompt: str | None = None) -> None: """初始化RenderStyle Prompt""" @@ -79,11 +122,11 @@ class RenderStyle(CorePattern): async def generate(self, **kwargs) -> dict[str, Any]: # noqa: ANN003 """使用LLM选择图表样式""" question = kwargs["question"] - + language = kwargs.get("language", LanguageType.CHINESE) # 使用Reasoning模型进行推理 messages = [ - {"role": "system", "content": self.system_prompt}, - {"role": "user", "content": self.user_prompt.format(question=question)}, + {"role": "system", "content": self.system_prompt[language]}, + {"role": "user", "content": self.user_prompt[language].format(question=question)}, ] result = "" llm = ReasoningLLM() diff --git a/apps/scheduler/call/llm/llm.py b/apps/scheduler/call/llm/llm.py index 6a679dce98af6164211edd16b2fa38714d899f31..1e9bcb94804a6c446db71695a3c68752a804f0b6 100644 --- a/apps/scheduler/call/llm/llm.py +++ b/apps/scheduler/call/llm/llm.py @@ -4,7 +4,7 @@ import logging from collections.abc import AsyncGenerator from datetime import datetime -from typing import Any +from typing import Any, ClassVar import pytz from jinja2 import BaseLoader @@ -15,7 +15,7 @@ from apps.llm.reasoning import ReasoningLLM from apps.scheduler.call.core import CoreCall from apps.scheduler.call.llm.prompt import LLM_CONTEXT_PROMPT, LLM_DEFAULT_PROMPT from apps.scheduler.call.llm.schema import LLMInput, LLMOutput -from apps.schemas.enum_var import CallOutputType +from apps.schemas.enum_var import CallOutputType, LanguageType from apps.schemas.scheduler import ( CallError, CallInfo, @@ -38,12 +38,16 @@ class LLM(CoreCall, input_model=LLMInput, output_model=LLMOutput): system_prompt: str = Field(description="大模型系统提示词", default="You are a helpful assistant.") user_prompt: str = Field(description="大模型用户提示词", default=LLM_DEFAULT_PROMPT) - - @classmethod - def info(cls) -> CallInfo: - """返回Call的名称和描述""" - return CallInfo(name="大模型", description="以指定的提示词和上下文信息调用大模型,并获得输出。") - + i18n_info: ClassVar[dict[str, dict]] = { + LanguageType.CHINESE: { + "name": "大模型", + "description": "以指定的提示词和上下文信息调用大模型,并获得输出。", + }, + LanguageType.ENGLISH: { + "name": "Foundation Model", + "description": "Call the foundation model with specified prompt and context, and obtain the output.", + }, + } async def _prepare_message(self, call_vars: CallVars) -> list[dict[str, Any]]: """准备消息""" @@ -93,15 +97,15 @@ class LLM(CoreCall, input_model=LLMInput, output_model=LLMOutput): {"role": "user", "content": user_input}, ] - async def _init(self, call_vars: CallVars) -> LLMInput: """初始化LLM工具""" return LLMInput( message=await self._prepare_message(call_vars), ) - - async def _exec(self, input_data: dict[str, Any]) -> AsyncGenerator[CallOutputChunk, None]: + async def _exec( + self, input_data: dict[str, Any], language: LanguageType = LanguageType.CHINESE + ) -> AsyncGenerator[CallOutputChunk, None]: """运行LLM Call""" data = LLMInput(**input_data) try: diff --git a/apps/scheduler/call/llm/prompt.py b/apps/scheduler/call/llm/prompt.py index 0f227dcaa618b11a2c888f55a61fa51f349d7d8b..20536ff27b28b17979bbf7eed3b94f5e15f2139a 100644 --- a/apps/scheduler/call/llm/prompt.py +++ b/apps/scheduler/call/llm/prompt.py @@ -2,16 +2,34 @@ """大模型工具的提示词""" from textwrap import dedent +from apps.schemas.enum_var import LanguageType LLM_CONTEXT_PROMPT = dedent( + # r""" + # 以下是对用户和AI间对话的简短总结,在中给出: + # + # {{ summary }} + # + # 你作为AI,在回答用户的问题前,需要获取必要的信息。为此,你调用了一些工具,并获得了它们的输出: + # 工具的输出数据将在中给出, 其中为工具的名称,为工具的输出数据。 + # + # {% for tool in history_data %} + # + # {{ tool.step_name }} + # {{ tool.step_description }} + # {{ tool.output_data }} + # + # {% endfor %} + # + # """, r""" - 以下是对用户和AI间对话的简短总结,在中给出: + The following is a brief summary of the user and AI conversation, given in : {{ summary }} - 你作为AI,在回答用户的问题前,需要获取必要的信息。为此,你调用了一些工具,并获得了它们的输出: - 工具的输出数据将在中给出, 其中为工具的名称,为工具的输出数据。 + As an AI, before answering the user's question, you need to obtain necessary information. For this purpose, you have called some tools and obtained their outputs: + The output data of the tools will be given in , where is the name of the tool and is the output data of the tool. {% for tool in history_data %} @@ -24,12 +42,29 @@ LLM_CONTEXT_PROMPT = dedent( """, ).strip("\n") LLM_DEFAULT_PROMPT = dedent( + # r""" + # + # 你是一个乐于助人的智能助手。请结合给出的背景信息, 回答用户的提问。 + # 当前时间:{{ time }},可以作为时间参照。 + # 用户的问题将在中给出,上下文背景信息将在中给出。 + # 注意:输出不要包含任何XML标签,不要编造任何信息。若你认为用户提问与背景信息无关,请忽略背景信息直接作答。 + # + # + # {{ question }} + # + # + # {{ context }} + # + # 现在,输出你的回答: + # """, r""" - 你是一个乐于助人的智能助手。请结合给出的背景信息, 回答用户的提问。 - 当前时间:{{ time }},可以作为时间参照。 - 用户的问题将在中给出,上下文背景信息将在中给出。 - 注意:输出不要包含任何XML标签,不要编造任何信息。若你认为用户提问与背景信息无关,请忽略背景信息直接作答。 + You are a helpful AI assistant. Please answer the user's question based on the given background information. + Current time: {{ time }}, which can be used as a reference. + The user's question will be given in , and the context background information will be given in . + + Respond using the same language as the user's question, unless the user explicitly requests a specific language—then follow that request. + Note: Do not include any XML tags in the output. Do not make up any information. If you think the user's question is unrelated to the background information, please ignore the background information and answer directly. @@ -39,12 +74,13 @@ LLM_DEFAULT_PROMPT = dedent( {{ context }} - - 现在,输出你的回答: - """, + Now, please output your answer: + """ ).strip("\n") -LLM_ERROR_PROMPT = dedent( - r""" + +LLM_ERROR_PROMPT = { + LanguageType.CHINESE: dedent( + r""" 你是一位智能助手,能够根据用户的问题,使用Python工具获取信息,并作出回答。你在使用工具解决回答用户的问题时,发生了错误。 你的任务是:分析工具(Python程序)的异常信息,分析造成该异常可能的原因,并以通俗易懂的方式,将原因告知用户。 @@ -67,8 +103,36 @@ LLM_ERROR_PROMPT = dedent( 现在,输出你的回答: - """, -).strip("\n") + """ + ).strip("\n"), + LanguageType.ENGLISH: dedent( + r""" + + You are an intelligent assistant. When using Python tools to answer user questions, an error occurred. + Your task is: Analyze the exception information of the tool (Python program), analyze the possible causes of the error, and inform the user in an easy-to-understand way. + + Current time: {{ time }}, which can be used as a reference. + The program exception information that occurred will be given in , the user's question will be given in , and the context background information will be given in . + Note: Do not include any XML tags in the output. Do not make up any information. If you think the user's question is unrelated to the background information, please ignore the background information. + + + + {{ error_info }} + + + + {{ question }} + + + + {{ context }} + + + Now, please output your answer: + """ + ).strip("\n"), +} + RAG_ANSWER_PROMPT = dedent( r""" diff --git a/apps/scheduler/call/mcp/mcp.py b/apps/scheduler/call/mcp/mcp.py index 661e9ada74da76e6d78a0f6cba42cef08c562335..c0930ae0e104ae2e175ed7743aff908d73118d3f 100644 --- a/apps/scheduler/call/mcp/mcp.py +++ b/apps/scheduler/call/mcp/mcp.py @@ -4,7 +4,7 @@ import logging from collections.abc import AsyncGenerator from copy import deepcopy -from typing import Any +from typing import Any, ClassVar from pydantic import Field @@ -16,7 +16,7 @@ from apps.scheduler.call.mcp.schema import ( MCPOutput, ) from apps.scheduler.mcp import MCPHost, MCPPlanner, MCPSelector -from apps.schemas.enum_var import CallOutputType +from apps.schemas.enum_var import CallOutputType, LanguageType from apps.schemas.mcp import MCPPlanItem from apps.schemas.scheduler import ( CallInfo, @@ -26,26 +26,47 @@ from apps.schemas.scheduler import ( logger = logging.getLogger(__name__) +MCP_GENERATE: dict[str, dict[LanguageType, str]] = { + "START": { + LanguageType.CHINESE: "[MCP] 开始生成计划...\n\n\n\n", + LanguageType.ENGLISH: "[MCP] Start generating plan...\n\n\n\n", + }, + "END": { + LanguageType.CHINESE: "[MCP] 计划生成完成:\n\n{plan_str}\n\n\n\n", + LanguageType.ENGLISH: "[MCP] Plan generation completed: \n\n{plan_str}\n\n\n\n", + }, +} + +MCP_SUMMARY: dict[str, dict[LanguageType, str]] = { + "START": { + LanguageType.CHINESE: "[MCP] 正在总结任务结果...\n\n", + LanguageType.ENGLISH: "[MCP] Start summarizing task results...\n\n", + }, + "END": { + LanguageType.CHINESE: "[MCP] 任务完成\n\n---\n\n{answer}\n\n", + LanguageType.ENGLISH: "[MCP] Task summary completed\n\n{answer}\n\n", + }, +} + class MCP(CoreCall, input_model=MCPInput, output_model=MCPOutput): """MCP工具""" mcp_list: list[str] = Field(description="MCP Server ID列表", max_length=5, min_length=1) - max_steps: int = Field(description="最大步骤数", default=6) + max_steps: int = Field(description="最大步骤数", default=20) text_output: bool = Field(description="是否将结果以文本形式返回", default=True) to_user: bool = Field(description="是否将结果返回给用户", default=True) - - @classmethod - def info(cls) -> CallInfo: - """ - 返回Call的名称和描述 - - :return: Call的名称和描述 - :rtype: CallInfo - """ - return CallInfo(name="MCP", description="调用MCP Server,执行工具") - + i18n_info: ClassVar[dict[str, dict]] = { + LanguageType.CHINESE: { + "name": "MCP", + "description": "调用MCP Server,执行工具", + }, + LanguageType.ENGLISH: { + "name": "MCP", + "description": "Call the MCP Server to execute tools", + }, + } async def _init(self, call_vars: CallVars) -> MCPInput: """初始化MCP""" @@ -63,33 +84,33 @@ class MCP(CoreCall, input_model=MCPInput, output_model=MCPOutput): return MCPInput(avaliable_tools=avaliable_tools, max_steps=self.max_steps) - - async def _exec(self, input_data: dict[str, Any]) -> AsyncGenerator[CallOutputChunk, None]: + async def _exec( + self, input_data: dict[str, Any], language: LanguageType = LanguageType.CHINESE + ) -> AsyncGenerator[CallOutputChunk, None]: """执行MCP""" # 生成计划 - async for chunk in self._generate_plan(): + async for chunk in self._generate_plan(language): yield chunk # 执行计划 plan_list = deepcopy(self._plan.plans) while len(plan_list) > 0: - async for chunk in self._execute_plan_item(plan_list.pop(0)): + async for chunk in self._execute_plan_item(plan_list.pop(0), language): yield chunk # 生成总结 - async for chunk in self._generate_answer(): + async for chunk in self._generate_answer(language): yield chunk - - async def _generate_plan(self) -> AsyncGenerator[CallOutputChunk, None]: + async def _generate_plan(self, language) -> AsyncGenerator[CallOutputChunk, None]: """生成执行计划""" # 开始提示 - yield self._create_output("[MCP] 开始生成计划...\n\n\n\n", MCPMessageType.PLAN_BEGIN) + yield self._create_output(MCP_GENERATE["START"][language], MCPMessageType.PLAN_BEGIN) # 选择工具并生成计划 selector = MCPSelector() top_tool = await selector.select_top_tool(self._call_vars.question, self.mcp_list) - planner = MCPPlanner(self._call_vars.question) + planner = MCPPlanner(self._call_vars.question, language) self._plan = await planner.create_plan(top_tool, self.max_steps) # 输出计划 @@ -98,13 +119,14 @@ class MCP(CoreCall, input_model=MCPInput, output_model=MCPOutput): plan_str += f"[+] {plan_item.content}; {plan_item.tool}[{plan_item.instruction}]\n\n" yield self._create_output( - f"[MCP] 计划生成完成:\n\n{plan_str}\n\n\n\n", + MCP_GENERATE["END"][language].format(plan_str=plan_str), MCPMessageType.PLAN_END, data=self._plan.model_dump(), ) - - async def _execute_plan_item(self, plan_item: MCPPlanItem) -> AsyncGenerator[CallOutputChunk, None]: + async def _execute_plan_item( + self, plan_item: MCPPlanItem, language: LanguageType = LanguageType.CHINESE + ) -> AsyncGenerator[CallOutputChunk, None]: """执行单个计划项""" # 判断是否为Final if plan_item.tool == "Final": @@ -125,7 +147,7 @@ class MCP(CoreCall, input_model=MCPInput, output_model=MCPOutput): # 调用工具 try: - result = await self._host.call_tool(tool, plan_item) + result = await self._host.call_tool(tool, plan_item, language) except Exception as e: err = f"[MCP] 工具 {tool.name} 调用失败: {e!s}" logger.exception(err) @@ -141,29 +163,27 @@ class MCP(CoreCall, input_model=MCPInput, output_model=MCPOutput): }, ) - - async def _generate_answer(self) -> AsyncGenerator[CallOutputChunk, None]: + async def _generate_answer(self, language) -> AsyncGenerator[CallOutputChunk, None]: """生成总结""" # 提示开始总结 yield self._create_output( - "[MCP] 正在总结任务结果...\n\n", + MCP_SUMMARY["START"][language], MCPMessageType.FINISH_BEGIN, ) # 生成答案 - planner = MCPPlanner(self._call_vars.question) + planner = MCPPlanner(self._call_vars.question, language) answer = await planner.generate_answer(self._plan, await self._host.assemble_memory()) # 输出结果 yield self._create_output( - f"[MCP] 任务完成\n\n---\n\n{answer}\n\n", + MCP_SUMMARY["END"][language].format(answer=answer), MCPMessageType.FINISH_END, data=MCPOutput( message=answer, ).model_dump(), ) - def _create_output( self, text: str, @@ -173,8 +193,11 @@ class MCP(CoreCall, input_model=MCPInput, output_model=MCPOutput): """创建输出""" if self.text_output: return CallOutputChunk(type=CallOutputType.TEXT, content=text) - return CallOutputChunk(type=CallOutputType.DATA, content=MCPMessage( - msg_type=msg_type, - message=text.strip(), - data=data or {}, - ).model_dump_json()) + return CallOutputChunk( + type=CallOutputType.DATA, + content=MCPMessage( + msg_type=msg_type, + message=text.strip(), + data=data or {}, + ).model_dump_json(), + ) diff --git a/apps/scheduler/call/rag/rag.py b/apps/scheduler/call/rag/rag.py index e27327d8ad4d01387eeeb4f1644c056d32bc0dbe..789a030a9477866bba7761472bf0539721f031f0 100644 --- a/apps/scheduler/call/rag/rag.py +++ b/apps/scheduler/call/rag/rag.py @@ -3,7 +3,7 @@ import logging from collections.abc import AsyncGenerator -from typing import Any +from typing import Any, ClassVar import httpx from fastapi import status @@ -13,7 +13,7 @@ from apps.common.config import Config from apps.llm.patterns.rewrite import QuestionRewrite from apps.scheduler.call.core import CoreCall from apps.scheduler.call.rag.schema import RAGInput, RAGOutput, SearchMethod -from apps.schemas.enum_var import CallOutputType +from apps.schemas.enum_var import CallOutputType, LanguageType from apps.schemas.scheduler import ( CallError, CallInfo, @@ -37,10 +37,16 @@ class RAG(CoreCall, input_model=RAGInput, output_model=RAGOutput): is_compress: bool = Field(description="是否压缩", default=False) tokens_limit: int = Field(description="token限制", default=8192) - @classmethod - def info(cls) -> CallInfo: - """返回Call的名称和描述""" - return CallInfo(name="知识库", description="查询知识库,从文档中获取必要信息") + i18n_info: ClassVar[dict[str, dict]] = { + LanguageType.CHINESE: { + "name": "知识库", + "description": "查询知识库,从文档中获取必要信息", + }, + LanguageType.ENGLISH: { + "name": "Knowledge Base", + "description": "Query the knowledge base and obtain necessary information from documents", + }, + } async def _init(self, call_vars: CallVars) -> RAGInput: """初始化RAG工具""" @@ -58,7 +64,9 @@ class RAG(CoreCall, input_model=RAGInput, output_model=RAGOutput): tokensLimit=self.tokens_limit, ) - async def _exec(self, input_data: dict[str, Any]) -> AsyncGenerator[CallOutputChunk, None]: + async def _exec( + self, input_data: dict[str, Any], language: LanguageType = LanguageType.CHINESE + ) -> AsyncGenerator[CallOutputChunk, None]: """调用RAG工具""" data = RAGInput(**input_data) question_obj = QuestionRewrite() diff --git a/apps/scheduler/call/search/search.py b/apps/scheduler/call/search/search.py index 73d21d7b9956a19b6eaaf23467b4286d7fbb3d79..473e7d73e58e9aa606a79b1ba83a0f415e42c756 100644 --- a/apps/scheduler/call/search/search.py +++ b/apps/scheduler/call/search/search.py @@ -1,10 +1,11 @@ """搜索工具""" from collections.abc import AsyncGenerator -from typing import Any +from typing import Any, ClassVar from apps.scheduler.call.core import CoreCall from apps.scheduler.call.search.schema import SearchInput, SearchOutput +from apps.schemas.enum_var import LanguageType from apps.schemas.scheduler import ( CallError, CallInfo, @@ -16,11 +17,16 @@ from apps.schemas.scheduler import ( class Search(CoreCall, input_model=SearchInput, output_model=SearchOutput): """搜索工具""" - @classmethod - def info(cls) -> CallInfo: - """返回Call的名称和描述""" - return CallInfo(name="搜索", description="获取搜索引擎的结果") - + i18n_info: ClassVar[dict[str, dict]] = { + LanguageType.CHINESE: { + "name": "搜索", + "description": "获取搜索引擎的结果。", + }, + LanguageType.ENGLISH: { + "name": "Search", + "description": "Get the results of the search engine.", + }, + } async def _init(self, call_vars: CallVars) -> SearchInput: """初始化工具""" diff --git a/apps/scheduler/call/slot/prompt.py b/apps/scheduler/call/slot/prompt.py index e5650a4c5764a4ab739c6b66ad00838597c134a3..8a3f7ae8535f3420f05ca66b02d02429f143ca67 100644 --- a/apps/scheduler/call/slot/prompt.py +++ b/apps/scheduler/call/slot/prompt.py @@ -1,7 +1,9 @@ # Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. """自动参数填充工具的提示词""" +from apps.schemas.enum_var import LanguageType -SLOT_GEN_PROMPT = r""" +SLOT_GEN_PROMPT:dict[LanguageType, str] = { + LanguageType.CHINESE: r""" 你是一个可以使用工具的AI助手,正尝试使用工具来完成任务。 目前,你正在生成一个JSON参数对象,以作为调用工具的输入。 @@ -85,4 +87,91 @@ SLOT_GEN_PROMPT = r""" {{schema}} - """ + """, + LanguageType.ENGLISH: r""" + + You are an AI assistant capable of using tools to complete tasks. + Currently, you are generating a JSON parameter object as input for calling a tool. + Please generate a compliant JSON object based on user input, background information, tool information, and JSON Schema content. + + Background information will be provided in , tool information in , JSON Schema in , \ + and the user's question in . + Output the generated JSON object in . + + Requirements: + 1. Strictly follow the JSON format described in the JSON Schema. Do not fabricate non-existent fields. + 2. Prioritize using values from user input for JSON fields. If not available, use content from background information. + 3. Only output the JSON object. Do not include any explanations or additional content. + 4. Optional fields in the JSON Schema may be omitted. + 5. Examples are for illustration only. Do not copy content from examples or use them as output. + 6. Respond in the same language as the user's question by default, unless explicitly requested otherwise. + + + + + User asked about today's weather in Hangzhou. AI replied it's sunny, 20℃. User then asks about tomorrow's weather in Hangzhou. + + + What's the weather like in Hangzhou tomorrow? + + + Tool name: check_weather + Tool description: Query weather information for specified cities + + + { + "type": "object", + "properties": { + "city": { + "type": "string", + "description": "City name" + }, + "date": { + "type": "string", + "description": "Query date" + }, + "required": ["city", "date"] + } + } + + + { + "city": "Hangzhou", + "date": "tomorrow" + } + + + + + Historical summary of tasks given by user, provided in : + + {{summary}} + + Additional itemized information: + {{ facts }} + + + During this task, you have called some tools and obtained their outputs, provided in : + + {% for tool in history_data %} + + {{ tool.step_name }} + {{ tool.step_description }} + {{ tool.output_data }} + + {% endfor %} + + + + {{question}} + + + Tool name: {{current_tool["name"]}} + Tool description: {{current_tool["description"]}} + + + {{schema}} + + + """, +} diff --git a/apps/scheduler/call/slot/slot.py b/apps/scheduler/call/slot/slot.py index 4f8e1010cc0bd88f22e050778bce236d7d3515e0..8dd0e7162aa366a7989799c172b539bdcce072be 100644 --- a/apps/scheduler/call/slot/slot.py +++ b/apps/scheduler/call/slot/slot.py @@ -3,7 +3,7 @@ import json from collections.abc import AsyncGenerator -from typing import TYPE_CHECKING, Any, Self +from typing import TYPE_CHECKING, Any, Self, ClassVar from jinja2 import BaseLoader from jinja2.sandbox import SandboxedEnvironment @@ -15,7 +15,7 @@ from apps.scheduler.call.core import CoreCall from apps.scheduler.call.slot.prompt import SLOT_GEN_PROMPT from apps.scheduler.call.slot.schema import SlotInput, SlotOutput from apps.scheduler.slot.slot import Slot as SlotProcessor -from apps.schemas.enum_var import CallOutputType +from apps.schemas.enum_var import CallOutputType, LanguageType from apps.schemas.pool import NodePool from apps.schemas.scheduler import CallInfo, CallOutputChunk, CallVars @@ -32,14 +32,20 @@ class Slot(CoreCall, input_model=SlotInput, output_model=SlotOutput): facts: list[str] = Field(description="事实信息", default=[]) step_num: int = Field(description="历史步骤数", default=1) - - @classmethod - def info(cls) -> CallInfo: - """返回Call的名称和描述""" - return CallInfo(name="参数自动填充", description="根据步骤历史,自动填充参数") - - - async def _llm_slot_fill(self, remaining_schema: dict[str, Any]) -> tuple[str, dict[str, Any]]: + i18n_info: ClassVar[dict[str, dict]] = { + LanguageType.CHINESE: { + "name": "参数自动填充", + "description": "根据步骤历史,自动填充参数", + }, + LanguageType.ENGLISH: { + "name": "Parameter Auto-Fill", + "description": "Auto-fill parameters based on step history.", + }, + } + + async def _llm_slot_fill( + self, remaining_schema: dict[str, Any], language: LanguageType = LanguageType.CHINESE + ) -> tuple[str, dict[str, Any]]: """使用大模型填充参数;若大模型解析度足够,则直接返回结果""" env = SandboxedEnvironment( loader=BaseLoader(), @@ -47,21 +53,24 @@ class Slot(CoreCall, input_model=SlotInput, output_model=SlotOutput): trim_blocks=True, lstrip_blocks=True, ) - template = env.from_string(SLOT_GEN_PROMPT) + template = env.from_string(SLOT_GEN_PROMPT[language]) conversation = [ {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": template.render( - current_tool={ - "name": self.name, - "description": self.description, - }, - schema=remaining_schema, - history_data=self._flow_history, - summary=self.summary, - question=self._question, - facts=self.facts, - )}, + { + "role": "user", + "content": template.render( + current_tool={ + "name": self.name, + "description": self.description, + }, + schema=remaining_schema, + history_data=self._flow_history, + summary=self.summary, + question=self._question, + facts=self.facts, + ), + }, ] # 使用大模型进行尝试 @@ -106,7 +115,6 @@ class Slot(CoreCall, input_model=SlotInput, output_model=SlotOutput): await obj._set_input(executor) return obj - async def _init(self, call_vars: CallVars) -> SlotInput: """初始化""" self._flow_history = [] @@ -126,8 +134,9 @@ class Slot(CoreCall, input_model=SlotInput, output_model=SlotOutput): remaining_schema=remaining_schema, ) - - async def _exec(self, input_data: dict[str, Any]) -> AsyncGenerator[CallOutputChunk, None]: + async def _exec( + self, input_data: dict[str, Any], language: LanguageType = LanguageType.CHINESE + ) -> AsyncGenerator[CallOutputChunk, None]: """执行参数填充""" data = SlotInput(**input_data) @@ -141,7 +150,7 @@ class Slot(CoreCall, input_model=SlotInput, output_model=SlotOutput): ).model_dump(by_alias=True, exclude_none=True), ) return - answer, slot_data = await self._llm_slot_fill(data.remaining_schema) + answer, slot_data = await self._llm_slot_fill(data.remaining_schema, language) slot_data = self._processor.convert_json(slot_data) remaining_schema = self._processor.check_json(slot_data) diff --git a/apps/scheduler/call/sql/schema.py b/apps/scheduler/call/sql/schema.py index 06ffb4f0611bb12a3d6080a166c2bead52d24e10..c3b57c5245dd1ad2cbb1fd24fb80f2f96959fea8 100644 --- a/apps/scheduler/call/sql/schema.py +++ b/apps/scheduler/call/sql/schema.py @@ -1,7 +1,7 @@ # Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. """SQL工具的输入输出""" -from typing import Any +from typing import Any, Optional from pydantic import Field @@ -17,5 +17,5 @@ class SQLInput(DataBase): class SQLOutput(DataBase): """SQL工具的输出""" - dataset: list[dict[str, Any]] = Field(description="SQL工具的执行结果") + result: list[dict[str, Any]] = Field(description="SQL工具的执行结果") sql: str = Field(description="SQL语句") diff --git a/apps/scheduler/call/sql/sql.py b/apps/scheduler/call/sql/sql.py index 3e24301de508e06adf5cfdbf24b3d8ca37c0cc27..cf0c6f678654cf0bd59dda34c6860f81277ed940 100644 --- a/apps/scheduler/call/sql/sql.py +++ b/apps/scheduler/call/sql/sql.py @@ -3,8 +3,9 @@ import logging from collections.abc import AsyncGenerator -from typing import Any +from typing import Any, ClassVar +from urllib.parse import urlparse import httpx from fastapi import status from pydantic import Field @@ -12,7 +13,7 @@ from pydantic import Field from apps.common.config import Config from apps.scheduler.call.core import CoreCall from apps.scheduler.call.sql.schema import SQLInput, SQLOutput -from apps.schemas.enum_var import CallOutputType +from apps.schemas.enum_var import CallOutputType, LanguageType from apps.schemas.scheduler import ( CallError, CallInfo, @@ -22,21 +23,42 @@ from apps.schemas.scheduler import ( logger = logging.getLogger(__name__) +MESSAGE = { + "invaild": { + LanguageType.CHINESE: "SQL查询错误:无法生成有效的SQL语句!", + LanguageType.ENGLISH: "SQL query error: Unable to generate valid SQL statements!", + }, + "fail": { + LanguageType.CHINESE: "SQL查询错误:SQL语句执行失败!", + LanguageType.ENGLISH: "SQL query error: SQL statement execution failed!", + }, +} + class SQL(CoreCall, input_model=SQLInput, output_model=SQLOutput): """SQL工具。用于调用外置的Chat2DB工具的API,获得SQL语句;再在PostgreSQL中执行SQL语句,获得数据。""" - database_url: str = Field(description="数据库连接地址") + database_type: str = Field(description="数据库类型",default="postgres") # mysql mongodb opengauss postgres + host: str = Field(description="数据库地址",default="localhost") + port: int = Field(description="数据库端口",default=5432) + username: str = Field(description="数据库用户名",default="root") + password: str = Field(description="数据库密码",default="root") + database: str = Field(description="数据库名称",default="postgres") + table_name_list: list[str] = Field(description="表名列表",default=[]) - top_k: int = Field(description="生成SQL语句数量",default=5) - use_llm_enhancements: bool = Field(description="是否使用大模型增强", default=False) - @classmethod - def info(cls) -> CallInfo: - """返回Call的名称和描述""" - return CallInfo(name="SQL查询", description="使用大模型生成SQL语句,用于查询数据库中的结构化数据") + i18n_info: ClassVar[dict[str, dict]] = { + LanguageType.CHINESE: { + "name": "SQL查询", + "description": "使用大模型生成SQL语句,用于查询数据库中的结构化数据", + }, + LanguageType.ENGLISH: { + "name": "SQL Query", + "description": "Use the foundation model to generate SQL statements to query structured data in the databases", + }, + } async def _init(self, call_vars: CallVars) -> SQLInput: """初始化SQL工具。""" @@ -44,99 +66,55 @@ class SQL(CoreCall, input_model=SQLInput, output_model=SQLOutput): question=call_vars.question, ) + async def _exec( + self, input_data: dict[str, Any], language: LanguageType = LanguageType.CHINESE + ) -> AsyncGenerator[CallOutputChunk, None]: + """运行SQL工具, 支持MySQL, MongoDB, PostgreSQL, OpenGauss""" + + data = SQLInput(**input_data) + + headers = {"Content-Type": "application/json"} - async def _generate_sql(self, data: SQLInput) -> list[dict[str, Any]]: - """生成SQL语句列表""" post_data = { - "database_url": self.database_url, - "table_name_list": self.table_name_list, - "question": data.question, - "topk": self.top_k, - "use_llm_enhancements": self.use_llm_enhancements, + "type": self.database_type, + "host": self.host, + "port": self.port, + "username": self.username, + "password": self.password, + "database": self.database, + "goal": data.question, + "table_list": self.table_name_list, } - headers = {"Content-Type": "application/json"} - sql_list = [] - request_num = 0 - max_request = 5 - - while request_num < max_request and len(sql_list) < self.top_k: - try: - async with httpx.AsyncClient() as client: - response = await client.post( - Config().get_config().extra.sql_url + "/database/sql", - headers=headers, - json=post_data, - timeout=60.0, - ) - request_num += 1 - if response.status_code == status.HTTP_200_OK: - result = response.json() - if result["code"] == status.HTTP_200_OK: - sql_list.extend(result["result"]["sql_list"]) - else: - logger.error("[SQL] 生成失败:%s", response.text) - except Exception: - logger.exception("[SQL] 生成失败") - request_num += 1 - - return sql_list - - - async def _execute_sql( - self, - sql_list: list[dict[str, Any]], - ) -> tuple[list[dict[str, Any]] | None, str | None]: - """执行SQL语句并返回结果""" - headers = {"Content-Type": "application/json"} + try: + async with httpx.AsyncClient() as client: + response = await client.post( + Config().get_config().extra.sql_url + "/sql/handler", + headers=headers, + json=post_data, + timeout=60.0, + ) + + result = response.json() + if response.status_code == status.HTTP_200_OK: + if result["code"] == status.HTTP_200_OK: + result_data = result["result"] + sql_exec_results = result_data.get("execute_result") + sql_exec = result_data.get("sql") + sql_exec_risk = result_data.get("risk") + logger.info("[SQL] 调用成功\n[SQL 语句]: %s\n[SQL 结果]: %s\n[SQL 风险]: %s", sql_exec, sql_exec_results, sql_exec_risk) + + else: + logger.error("[SQL] 调用失败:%s", response.text) + logger.error("[SQL] 错误信息:%s", response["result"]) + except Exception: + logger.exception("[SQL] 调用失败") - for sql_dict in sql_list: - try: - async with httpx.AsyncClient() as client: - response = await client.post( - Config().get_config().extra.sql_url + "/sql/execute", - headers=headers, - json={ - "database_id": sql_dict["database_id"], - "sql": sql_dict["sql"], - }, - timeout=60.0, - ) - if response.status_code == status.HTTP_200_OK: - result = response.json() - if result["code"] == status.HTTP_200_OK: - return result["result"], sql_dict["sql"] - else: - logger.error("[SQL] 调用失败:%s", response.text) - except Exception: - logger.exception("[SQL] 调用失败") - - return None, None - - - async def _exec(self, input_data: dict[str, Any]) -> AsyncGenerator[CallOutputChunk, None]: - """运行SQL工具""" - data = SQLInput(**input_data) - # 生成SQL语句 - sql_list = await self._generate_sql(data) - if not sql_list: - raise CallError( - message="SQL查询错误:无法生成有效的SQL语句!", - data={}, - ) - - # 执行SQL语句 - sql_exec_results, sql_exec = await self._execute_sql(sql_list) - if sql_exec_results is None or sql_exec is None: - raise CallError( - message="SQL查询错误:SQL语句执行失败!", - data={}, - ) # 返回结果 data = SQLOutput( - dataset=sql_exec_results, + result=sql_exec_results, sql=sql_exec, ).model_dump(exclude_none=True, by_alias=True) diff --git a/apps/scheduler/call/suggest/prompt.py b/apps/scheduler/call/suggest/prompt.py index abc5e7d186500f917b4610b9c0e893dec7ef3c12..a9f61d7c1c5df6bc7fcee488d6ab05781b685d72 100644 --- a/apps/scheduler/call/suggest/prompt.py +++ b/apps/scheduler/call/suggest/prompt.py @@ -2,8 +2,11 @@ """问题推荐工具的提示词""" from textwrap import dedent +from apps.schemas.enum_var import LanguageType -SUGGEST_PROMPT = dedent(r""" +SUGGEST_PROMPT: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" 根据提供的对话和附加信息(用户倾向、历史问题列表、工具信息等),生成三个预测问题。 @@ -89,4 +92,98 @@ SUGGEST_PROMPT = dedent(r""" 现在,进行问题生成: -""") +""" + ), + LanguageType.ENGLISH: dedent( + r""" + + + Generate three predicted questions based on the provided conversation and additional information (user preferences, historical question list, tool information, etc.). + The historical question list displays questions asked by the user before the historical conversation and is for background reference only. + The conversation will be given in the tag, the user preferences will be given in the tag, + the historical question list will be given in the tag, and the tool information will be given in the tag. + + Requirements for generating predicted questions: + + 1. Generate three predicted questions in the user's voice. They must be interrogative or imperative sentences and must be less than 30 words. + + 2. Predicted questions must be concise, without repetition, unnecessary information, or text other than the question. + + 3. Output must be in the following format: + + ```json + { + "predicted_questions": [ + "Predicted question 1", + "Predicted question 2", + "Predicted question 3" + ] + } + ``` + + + + What are the famous attractions in Hangzhou? + Hangzhou West Lake is a famous scenic spot in Hangzhou, Zhejiang Province, China, known for its beautiful natural scenery and rich cultural heritage. There are many famous attractions around West Lake, including the renowned Su Causeway, Bai Causeway, Broken Bridge, and the Three Pools Mirroring the Moon. West Lake is renowned for its clear waters and surrounding mountains, making it one of China's most famous lakes. + + + Briefly introduce Hangzhou + What are the famous attractions in Hangzhou? + + + Scenic Spot Search + Scenic Spot Information Search + + ["Hangzhou", "Tourism"] + + Now, generate questions: + + { + "predicted_questions": [ + "What is the ticket price for the West Lake Scenic Area in Hangzhou?", + "What are the famous attractions in Hangzhou?", + "What's the weather like in Hangzhou?" + ] + } + + + + Here's the actual data: + + + {% for message in conversation %} + <{{ message.role }}>{{ message.content }} + {% endfor %} + + + + {% if history %} + {% for question in history %} + {{ question }} + {% endfor %} + {% else %} + (No history question) + {% endif %} + + + + {% if tool %} + {{ tool.name }} + {{ tool.description }} + {% else %} + (No tool information) + {% endif %} + + + + {% if preference %} + {{ preference }} + {% else %} + (no user preference) + {% endif %} + + + Now, generate the question: + """ + ), +} diff --git a/apps/scheduler/call/suggest/suggest.py b/apps/scheduler/call/suggest/suggest.py index 1788fa0f4a8ede3af38264c9bb4a82628018086b..1e67b88bd5987c813a39aaa7f98f457d7ee01132 100644 --- a/apps/scheduler/call/suggest/suggest.py +++ b/apps/scheduler/call/suggest/suggest.py @@ -3,7 +3,7 @@ import random from collections.abc import AsyncGenerator -from typing import TYPE_CHECKING, Any, Self +from typing import TYPE_CHECKING, Any, Self, ClassVar from jinja2 import BaseLoader from jinja2.sandbox import SandboxedEnvironment @@ -20,7 +20,7 @@ from apps.scheduler.call.suggest.schema import ( SuggestionInput, SuggestionOutput, ) -from apps.schemas.enum_var import CallOutputType +from apps.schemas.enum_var import CallOutputType, LanguageType from apps.schemas.pool import NodePool from apps.schemas.record import RecordContent from apps.schemas.scheduler import ( @@ -47,11 +47,16 @@ class Suggestion(CoreCall, input_model=SuggestionInput, output_model=SuggestionO context: SkipJsonSchema[list[dict[str, str]]] = Field(description="Executor的上下文", exclude=True) conversation_id: SkipJsonSchema[str] = Field(description="对话ID", exclude=True) - @classmethod - def info(cls) -> CallInfo: - """返回Call的名称和描述""" - return CallInfo(name="问题推荐", description="在答案下方显示推荐的下一个问题") - + i18n_info: ClassVar[dict[str, dict]] = { + LanguageType.CHINESE: { + "name": "问题推荐", + "description": "在答案下方显示推荐的下一个问题", + }, + LanguageType.ENGLISH: { + "name": "Question Suggestion", + "description": "Display the suggested next question under the answer", + }, + } @classmethod async def instance(cls, executor: "StepExecutor", node: NodePool | None, **kwargs: Any) -> Self: @@ -124,8 +129,9 @@ class Suggestion(CoreCall, input_model=SuggestionInput, output_model=SuggestionO history_questions.append(record_data.question) return history_questions - - async def _exec(self, input_data: dict[str, Any]) -> AsyncGenerator[CallOutputChunk, None]: + async def _exec( + self, input_data: dict[str, Any], language: LanguageType = LanguageType.CHINESE + ) -> AsyncGenerator[CallOutputChunk, None]: """运行问题推荐""" data = SuggestionInput(**input_data) @@ -141,7 +147,7 @@ class Suggestion(CoreCall, input_model=SuggestionInput, output_model=SuggestionO # 已推送问题数量 pushed_questions = 0 # 初始化Prompt - prompt_tpl = self._env.from_string(SUGGEST_PROMPT) + prompt_tpl = self._env.from_string(SUGGEST_PROMPT[language]) # 先处理configs for config in self.configs: diff --git a/apps/scheduler/call/summary/summary.py b/apps/scheduler/call/summary/summary.py index b605204e179246f915d561c0884fd277712faf33..da3139a9844bb7d354958175bfa9af34d09df74b 100644 --- a/apps/scheduler/call/summary/summary.py +++ b/apps/scheduler/call/summary/summary.py @@ -2,14 +2,14 @@ """总结上下文工具""" from collections.abc import AsyncGenerator -from typing import TYPE_CHECKING, Any, Self +from typing import TYPE_CHECKING, Any, Self, ClassVar from pydantic import Field from apps.llm.patterns.executor import ExecutorSummary from apps.scheduler.call.core import CoreCall, DataBase from apps.scheduler.call.summary.schema import SummaryOutput -from apps.schemas.enum_var import CallOutputType +from apps.schemas.enum_var import CallOutputType, LanguageType from apps.schemas.pool import NodePool from apps.schemas.scheduler import ( CallInfo, @@ -28,10 +28,16 @@ class Summary(CoreCall, input_model=DataBase, output_model=SummaryOutput): context: ExecutorBackground = Field(description="对话上下文") - @classmethod - def info(cls) -> CallInfo: - """返回Call的名称和描述""" - return CallInfo(name="理解上下文", description="使用大模型,理解对话上下文") + i18n_info: ClassVar[dict[str, dict]] = { + LanguageType.CHINESE: { + "name": "理解上下文", + "description": "使用大模型,理解对话上下文", + }, + LanguageType.ENGLISH: { + "name": "Context Understanding", + "description": "Use the foundation model to understand the conversation context", + }, + } @classmethod async def instance(cls, executor: "StepExecutor", node: NodePool | None, **kwargs: Any) -> Self: @@ -51,20 +57,25 @@ class Summary(CoreCall, input_model=DataBase, output_model=SummaryOutput): """初始化工具,返回输入""" return DataBase() - - async def _exec(self, _input_data: dict[str, Any]) -> AsyncGenerator[CallOutputChunk, None]: + async def _exec( + self, _input_data: dict[str, Any], language: LanguageType = LanguageType.CHINESE + ) -> AsyncGenerator[CallOutputChunk, None]: """执行工具""" summary_obj = ExecutorSummary() - summary = await summary_obj.generate(background=self.context) + summary = await summary_obj.generate(background=self.context, language=language) self.tokens.input_tokens += summary_obj.input_tokens self.tokens.output_tokens += summary_obj.output_tokens yield CallOutputChunk(type=CallOutputType.TEXT, content=summary) - - async def exec(self, executor: "StepExecutor", input_data: dict[str, Any]) -> AsyncGenerator[CallOutputChunk, None]: + async def exec( + self, + executor: "StepExecutor", + input_data: dict[str, Any], + language: LanguageType = LanguageType.CHINESE, + ) -> AsyncGenerator[CallOutputChunk, None]: """执行工具""" - async for chunk in self._exec(input_data): + async for chunk in self._exec(input_data, language): content = chunk.content if not isinstance(content, str): err = "[SummaryCall] 工具输出格式错误" diff --git a/apps/scheduler/executor/agent.py b/apps/scheduler/executor/agent.py index f6814dd364636d304382dc258f87fdd9f06eb9a8..372417bc188a27c03301f99d4a2387c1433cd0e3 100644 --- a/apps/scheduler/executor/agent.py +++ b/apps/scheduler/executor/agent.py @@ -1,12 +1,32 @@ # Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. """MCP Agent执行器""" - +from datetime import datetime,UTC import logging +import uuid +import anyio +from mcp.types import TextContent from pydantic import Field +from apps.llm.reasoning import ReasoningLLM from apps.scheduler.executor.base import BaseExecutor -from apps.scheduler.mcp_agent.agent.mcp import MCPAgent +from apps.schemas.enum_var import LanguageType +from apps.scheduler.mcp_agent.host import MCPHost +from apps.scheduler.mcp_agent.plan import MCPPlanner +from apps.scheduler.mcp_agent.select import FINAL_TOOL_ID, SELF_DESC_TOOL_ID +from apps.scheduler.pool.mcp.pool import MCPPool +from apps.schemas.enum_var import EventType, FlowStatus, StepStatus +from apps.schemas.mcp import ( + MCPCollection, + MCPTool, + Step, +) +from apps.schemas.message import FlowParams +from apps.schemas.task import FlowStepHistory +from apps.services.appcenter import AppCenterManager +from apps.services.mcp_service import MCPServiceManager +from apps.services.task import TaskManager +from apps.services.user import UserManager logger = logging.getLogger(__name__) @@ -14,27 +34,581 @@ logger = logging.getLogger(__name__) class MCPAgentExecutor(BaseExecutor): """MCP Agent执行器""" - question: str = Field(description="用户输入") - max_steps: int = Field(default=10, description="最大步数") + max_steps: int = Field(default=40, description="最大步数") servers_id: list[str] = Field(description="MCP server id") agent_id: str = Field(default="", description="Agent ID") agent_description: str = Field(default="", description="Agent描述") + mcp_list: list[MCPCollection] = Field(description="MCP服务器列表", default=[]) + mcp_pool: MCPPool = Field(description="MCP池", default=MCPPool()) + tools: dict[str, MCPTool] = Field( + description="MCP工具列表,key为tool_id", + default={}, + ) + tool_list: list[MCPTool] = Field( + description="MCP工具列表,包含所有MCP工具", + default=[], + ) + params: FlowParams | bool | None = Field( + default=None, + description="流执行过程中的参数补充", + alias="params", + ) + resoning_llm: ReasoningLLM = Field( + default=ReasoningLLM(), + description="推理大模型", + ) - async def run(self) -> None: - """运行MCP Agent""" - agent = await MCPAgent.create( - servers_id=self.servers_id, - max_steps=self.max_steps, - task=self.task, - msg_queue=self.msg_queue, - question=self.question, - agent_id=self.agent_id, - description=self.agent_description, + async def update_tokens(self) -> None: + """更新令牌数""" + self.task.tokens.input_tokens = self.resoning_llm.input_tokens + self.task.tokens.output_tokens = self.resoning_llm.output_tokens + await TaskManager.save_task(self.task.id, self.task) + + async def load_state(self) -> None: + """从数据库中加载FlowExecutor的状态""" + logger.info("[FlowExecutor] 加载Executor状态") + # 尝试恢复State + if self.task.state and self.task.state.flow_status != FlowStatus.INIT: + self.task.context = await TaskManager.get_context_by_task_id(self.task.id) + + async def load_mcp(self) -> None: + """加载MCP服务器列表""" + logger.info("[MCPAgentExecutor] 加载MCP服务器列表") + # 获取MCP服务器列表 + app = await AppCenterManager.fetch_app_data_by_id(self.agent_id) + mcp_ids = app.mcp_service + for mcp_id in mcp_ids: + mcp_service = await MCPServiceManager.get_mcp_service(mcp_id) + if self.task.ids.user_sub not in mcp_service.activated: + logger.warning( + "[MCPAgentExecutor] 用户 %s 未启用MCP %s", + self.task.ids.user_sub, + mcp_id, + ) + continue + + self.mcp_list.append(mcp_service) + await self.mcp_pool._init_mcp(mcp_id, self.task.ids.user_sub) + for tool in mcp_service.tools: + self.tools[tool.id] = tool + self.tool_list.extend(mcp_service.tools) + if self.task.language == LanguageType.CHINESE: + self.tools[FINAL_TOOL_ID] = MCPTool( + id=FINAL_TOOL_ID, name="Final Tool", description="结束流程的工具", mcp_id="", input_schema={}, + ) + self.tool_list.append( + MCPTool(id=FINAL_TOOL_ID, name="Final Tool", description="结束流程的工具", mcp_id="", input_schema={}), + ) + self.tools[SELF_DESC_TOOL_ID] = MCPTool( + id=SELF_DESC_TOOL_ID, + name="Self Description", + description="用于描述自身能力和背景信息的工具", + mcp_id="", + input_schema={}, + ) + self.tool_list.append( + MCPTool( + id=SELF_DESC_TOOL_ID, + name="Self Description", + description="用于描述自身能力和背景信息的工具", + mcp_id="", + input_schema={}, + ) + ) + else: + self.tools[FINAL_TOOL_ID] = MCPTool(id=FINAL_TOOL_ID, name="Final Tool", + description="The tool to end the process", mcp_id="", input_schema={},) + self.tool_list.append( + MCPTool( + id=FINAL_TOOL_ID, name="Final Tool", description="The tool to end the process", + mcp_id="", input_schema={}),) + self.tools[SELF_DESC_TOOL_ID] = MCPTool( + id=SELF_DESC_TOOL_ID, + name="Self Description", + description="A tool used to describe one's own abilities and background information", + mcp_id="", + input_schema={}, + ) + self.tool_list.append( + MCPTool( + id=SELF_DESC_TOOL_ID, + name="Self Description", + description="A tool used to describe one's own abilities and background information", + mcp_id="", + input_schema={}, + ) + ) + + async def get_tool_input_param(self, is_first: bool) -> None: + # 工具的入参是 {} ,不需要填充 + if self.task.state.tool_id in [FINAL_TOOL_ID, SELF_DESC_TOOL_ID]: + self.task.state.current_input = {} + return + if is_first: + # 获取第一个输入参数 + mcp_tool = self.tools[self.task.state.tool_id] + self.task.state.current_input = await MCPHost._get_first_input_params( + mcp_tool, self.task.runtime.question, self.task.state.step_description, self.task + ) + else: + # 获取后续输入参数 + if isinstance(self.params, FlowParams): + params = self.params.content + params_description = self.params.description + else: + params = {} + params_description = "" + mcp_tool = self.tools[self.task.state.tool_id] + self.task.state.current_input = await MCPHost._fill_params( + mcp_tool, + self.task.runtime.question, + self.task.state.step_description, + self.task.state.current_input, + self.task.state.error_message, + params, + params_description, + self.task.language, + ) + + async def confirm_before_step(self) -> None: + """确认前步骤""" + # 发送确认消息 + mcp_tool = self.tools[self.task.state.tool_id] + confirm_message = await MCPPlanner.get_tool_risk( + mcp_tool, self.task.state.current_input, "", self.resoning_llm, self.task.language + ) + await self.update_tokens() + await self.push_message( + EventType.STEP_WAITING_FOR_START, confirm_message.model_dump(exclude_none=True, by_alias=True), + ) + await self.push_message(EventType.FLOW_STOP, {}) + self.task.state.flow_status = FlowStatus.WAITING + self.task.state.step_status = StepStatus.WAITING + self.task.context.append( + FlowStepHistory( + task_id=self.task.id, + step_id=self.task.state.step_id, + step_name=self.task.state.step_name, + step_description=self.task.state.step_description, + step_status=self.task.state.step_status, + flow_id=self.task.state.flow_id, + flow_name=self.task.state.flow_name, + flow_status=self.task.state.flow_status, + input_data={}, + output_data={}, + ex_data=confirm_message.model_dump(exclude_none=True, by_alias=True), + ) + ) + + async def run_step(self) -> None: + """执行步骤""" + self.task.state.flow_status = FlowStatus.RUNNING + self.task.state.step_status = StepStatus.RUNNING + mcp_tool = self.tools[self.task.state.tool_id] + result_exchange = True + try: + if self.task.state.tool_id == SELF_DESC_TOOL_ID: + tools = [] + for tool in self.tool_list: + if tool.id not in [SELF_DESC_TOOL_ID, FINAL_TOOL_ID]: + tools.append(f"{tool.name}: {tool.description}") + output_params = { + "message": tools + } + result_exchange = False + else: + mcp_client = (await self.mcp_pool.get(mcp_tool.mcp_id, self.task.ids.user_sub)) + output_params = await mcp_client.call_tool(mcp_tool.name, self.task.state.current_input) + except anyio.ClosedResourceError: + logger.exception("[MCPAgentExecutor] MCP客户端连接已关闭: %s", mcp_tool.mcp_id) + await self.mcp_pool.stop(mcp_tool.mcp_id, self.task.ids.user_sub) + await self.mcp_pool._init_mcp(mcp_tool.mcp_id, self.task.ids.user_sub) + self.task.state.step_status = StepStatus.ERROR + return + except Exception as e: + import traceback + logger.exception("[MCPAgentExecutor] 执行步骤 %s 时发生错误: %s", mcp_tool.name, traceback.format_exc()) + self.task.state.step_status = StepStatus.ERROR + self.task.state.error_message = str(e) + return + if result_exchange: + if output_params.isError: + err = "" + for output in output_params.content: + if isinstance(output, TextContent): + err += output.text + self.task.state.step_status = StepStatus.ERROR + self.task.state.error_message = err + return + message = "" + for output in output_params.content: + if isinstance(output, TextContent): + message += output.text + output_params = { + "message": message, + } + + await self.update_tokens() + await self.push_message(EventType.STEP_INPUT, self.task.state.current_input) + await self.push_message(EventType.STEP_OUTPUT, output_params) + self.task.context.append( + FlowStepHistory( + task_id=self.task.id, + step_id=self.task.state.step_id, + step_name=self.task.state.step_name, + step_description=self.task.state.step_description, + step_status=StepStatus.SUCCESS, + flow_id=self.task.state.flow_id, + flow_name=self.task.state.flow_name, + flow_status=self.task.state.flow_status, + input_data=self.task.state.current_input, + output_data=output_params, + ) ) + self.task.state.step_status = StepStatus.SUCCESS + async def generate_params_with_null(self) -> None: + """生成参数补充""" + mcp_tool = self.tools[self.task.state.tool_id] + params_with_null = await MCPPlanner.get_missing_param( + mcp_tool, + self.task.state.current_input, + self.task.state.error_message, + self.resoning_llm, + self.task.language, + ) + await self.update_tokens() + error_message = await MCPPlanner.change_err_message_to_description( + error_message=self.task.state.error_message, + tool=mcp_tool, + input_params=self.task.state.current_input, + reasoning_llm=self.resoning_llm, + language=self.task.language, + ) + await self.push_message( + EventType.STEP_WAITING_FOR_PARAM, data={"message": error_message, "params": params_with_null} + ) + await self.push_message(EventType.FLOW_STOP, data={}) + self.task.state.flow_status = FlowStatus.WAITING + self.task.state.step_status = StepStatus.PARAM + self.task.context.append( + FlowStepHistory( + task_id=self.task.id, + step_id=self.task.state.step_id, + step_name=self.task.state.step_name, + step_description=self.task.state.step_description, + step_status=self.task.state.step_status, + flow_id=self.task.state.flow_id, + flow_name=self.task.state.flow_name, + flow_status=self.task.state.flow_status, + input_data={}, + output_data={}, + ex_data={ + "message": error_message, + "params": params_with_null + } + ) + ) + + async def get_next_step(self) -> None: + """获取下一步""" + self.task.tokens.time=datetime.now(UTC).timestamp() + self.task.state.retry_times = 0 + if self.task.state.step_cnt < self.max_steps: + self.task.state.step_cnt += 1 + history = await MCPHost.assemble_memory(self.task) + max_retry = 3 + step = None + for i in range(max_retry): + try: + step = await MCPPlanner.create_next_step(self.task.runtime.question, history, self.tool_list, language=self.task.language) + if step.tool_id in self.tools.keys(): + break + except Exception as e: + logger.warning("[MCPAgentExecutor] 获取下一步失败,重试中: %s", str(e)) + if step is None or step.tool_id not in self.tools.keys(): + step = Step( + tool_id=FINAL_TOOL_ID, + description=FINAL_TOOL_ID + ) + tool_id = step.tool_id + if tool_id == FINAL_TOOL_ID: + step_name = FINAL_TOOL_ID + else: + step_name = self.tools[tool_id].name + step_description = step.description + self.task.state.step_id = str(uuid.uuid4()) + self.task.state.tool_id = tool_id + self.task.state.step_name = step_name + self.task.state.step_description = step_description + self.task.state.step_status = StepStatus.INIT + self.task.state.current_input = {} + else: + # 没有下一步了,结束流程 + self.task.state.tool_id = FINAL_TOOL_ID + return + + async def error_handle_after_step(self) -> None: + """步骤执行失败后的错误处理""" + self.task.state.step_status = StepStatus.ERROR + self.task.state.flow_status = FlowStatus.ERROR + await self.push_message( + EventType.FLOW_FAILED, + data={} + ) + if len(self.task.context) and self.task.context[-1].step_id == self.task.state.step_id: + del self.task.context[-1] + self.task.context.append( + FlowStepHistory( + task_id=self.task.id, + step_id=self.task.state.step_id, + step_name=self.task.state.step_name, + step_description=self.task.state.step_description, + step_status=self.task.state.step_status, + flow_id=self.task.state.flow_id, + flow_name=self.task.state.flow_name, + flow_status=self.task.state.flow_status, + input_data={}, + output_data={}, + ) + ) + self.task.state.tool_id = FINAL_TOOL_ID + + async def work(self) -> None: + """执行当前步骤""" + if self.task.state.step_status == StepStatus.INIT: + await self.push_message( + EventType.STEP_INIT, + data={} + ) + await self.get_tool_input_param(is_first=True) + user_info = await UserManager.get_userinfo_by_user_sub(self.task.ids.user_sub) + if not user_info.auto_execute: + # 等待用户确认 + await self.confirm_before_step() + return + self.task.state.step_status = StepStatus.RUNNING + elif self.task.state.step_status in [StepStatus.PARAM, StepStatus.WAITING, StepStatus.RUNNING]: + if self.task.state.step_status == StepStatus.PARAM: + if len(self.task.context) and self.task.context[-1].step_id == self.task.state.step_id: + del self.task.context[-1] + await self.get_tool_input_param(is_first=False) + elif self.task.state.step_status == StepStatus.WAITING: + if self.params: + if len(self.task.context) and self.task.context[-1].step_id == self.task.state.step_id: + del self.task.context[-1] + else: + self.task.state.flow_status = FlowStatus.CANCELLED + self.task.state.step_status = StepStatus.CANCELLED + await self.push_message( + EventType.STEP_CANCEL, + data={} + ) + await self.push_message( + EventType.FLOW_CANCEL, + data={} + ) + if len(self.task.context) and self.task.context[-1].step_id == self.task.state.step_id: + self.task.context[-1].step_status = StepStatus.CANCELLED + return + max_retry = 5 + for i in range(max_retry): + if i != 0: + await self.get_tool_input_param(is_first=True) + await self.run_step() + if self.task.state.step_status == StepStatus.SUCCESS: + break + elif self.task.state.step_status == StepStatus.ERROR: + # 错误处理 + self.task.state.retry_times += 1 + if self.task.state.retry_times >= 3: + await self.error_handle_after_step() + else: + user_info = await UserManager.get_userinfo_by_user_sub(self.task.ids.user_sub) + if user_info.auto_execute: + await self.push_message( + EventType.STEP_ERROR, + data={ + "message": self.task.state.error_message, + } + ) + if len(self.task.context) and self.task.context[-1].step_id == self.task.state.step_id: + self.task.context[-1].step_status = StepStatus.ERROR + self.task.context[-1].output_data = { + "message": self.task.state.error_message, + } + else: + self.task.context.append( + FlowStepHistory( + task_id=self.task.id, + step_id=self.task.state.step_id, + step_name=self.task.state.step_name, + step_description=self.task.state.step_description, + step_status=StepStatus.ERROR, + flow_id=self.task.state.flow_id, + flow_name=self.task.state.flow_name, + flow_status=self.task.state.flow_status, + input_data=self.task.state.current_input, + output_data={ + "message": self.task.state.error_message, + }, + ) + ) + await self.get_next_step() + else: + mcp_tool = self.tools[self.task.state.tool_id] + is_param_error = await MCPPlanner.is_param_error( + self.task.runtime.question, + await MCPHost.assemble_memory(self.task), + self.task.state.error_message, + mcp_tool, + self.task.state.step_description, + self.task.state.current_input, + language=self.task.language, + ) + if is_param_error.is_param_error: + # 如果是参数错误,生成参数补充 + await self.generate_params_with_null() + else: + await self.push_message( + EventType.STEP_ERROR, + data={ + "message": self.task.state.error_message, + } + ) + if len(self.task.context) and self.task.context[-1].step_id == self.task.state.step_id: + self.task.context[-1].step_status = StepStatus.ERROR + self.task.context[-1].output_data = { + "message": self.task.state.error_message, + } + else: + self.task.context.append( + FlowStepHistory( + task_id=self.task.id, + step_id=self.task.state.step_id, + step_name=self.task.state.step_name, + step_description=self.task.state.step_description, + step_status=StepStatus.ERROR, + flow_id=self.task.state.flow_id, + flow_name=self.task.state.flow_name, + flow_status=self.task.state.flow_status, + input_data=self.task.state.current_input, + output_data={ + "message": self.task.state.error_message, + }, + ) + ) + await self.get_next_step() + elif self.task.state.step_status == StepStatus.SUCCESS: + await self.get_next_step() + + async def summarize(self) -> None: + """总结""" + async for chunk in MCPPlanner.generate_answer( + self.task.runtime.question, + (await MCPHost.assemble_memory(self.task)), + self.resoning_llm, + self.task.language, + ): + await self.push_message( + EventType.TEXT_ADD, + data=chunk + ) + self.task.runtime.answer += chunk + + async def run(self) -> None: + """执行MCP Agent的主逻辑""" + # 初始化MCP服务 + await self.load_state() + await self.load_mcp() + data = {} + if self.task.state.flow_status == FlowStatus.INIT: + # 初始化状态 + try: + self.task.state.flow_id = str(uuid.uuid4()) + self.task.state.flow_name = (await MCPPlanner.get_flow_name( + self.task.runtime.question, self.resoning_llm, self.task.language + )).flow_name + flow_risk = await MCPPlanner.get_flow_excute_risk( + self.task.runtime.question, self.tool_list, self.resoning_llm, self.task.language + ) + user_info = await UserManager.get_userinfo_by_user_sub(self.task.ids.user_sub) + if user_info.auto_execute: + data = flow_risk.model_dump(exclude_none=True, by_alias=True) + await TaskManager.save_task(self.task.id, self.task) + await self.get_next_step() + except Exception as e: + logger.exception("[MCPAgentExecutor] 初始化失败") + self.task.state.flow_status = FlowStatus.ERROR + self.task.state.error_message = str(e) + await self.push_message( + EventType.FLOW_FAILED, + data={} + ) + return + self.task.state.flow_status = FlowStatus.RUNNING + await self.push_message( + EventType.FLOW_START, + data=data + ) + if self.task.state.tool_id == FINAL_TOOL_ID: + # 如果已经是最后一步,直接结束 + self.task.state.flow_status = FlowStatus.SUCCESS + await self.push_message( + EventType.FLOW_SUCCESS, + data={} + ) + await self.summarize() + return try: - answer = await agent.run(self.question) - self.task = agent.task - self.task.runtime.answer = answer + while self.task.state.flow_status == FlowStatus.RUNNING: + if self.task.state.tool_id == FINAL_TOOL_ID: + break + await self.work() + await TaskManager.save_task(self.task.id, self.task) + tool_id = self.task.state.tool_id + if tool_id == FINAL_TOOL_ID: + # 如果已经是最后一步,直接结束 + self.task.state.flow_status = FlowStatus.SUCCESS + self.task.state.step_status = StepStatus.SUCCESS + await self.push_message( + EventType.FLOW_SUCCESS, + data={} + ) + await self.summarize() except Exception as e: - logger.error(f"Error: {str(e)}") + logger.exception("[MCPAgentExecutor] 执行过程中发生错误") + self.task.state.flow_status = FlowStatus.ERROR + self.task.state.error_message = str(e) + self.task.state.step_status = StepStatus.ERROR + await self.push_message( + EventType.STEP_ERROR, + data={} + ) + await self.push_message( + EventType.FLOW_FAILED, + data={} + ) + if len(self.task.context) and self.task.context[-1].step_id == self.task.state.step_id: + del self.task.context[-1] + self.task.context.append( + FlowStepHistory( + task_id=self.task.id, + step_id=self.task.state.step_id, + step_name=self.task.state.step_name, + step_description=self.task.state.step_description, + step_status=self.task.state.step_status, + flow_id=self.task.state.flow_id, + flow_name=self.task.state.flow_name, + flow_status=self.task.state.flow_status, + input_data={}, + output_data={}, + ) + ) + finally: + for mcp_service in self.mcp_list: + try: + await self.mcp_pool.stop(mcp_service.id, self.task.ids.user_sub) + except Exception as e: + import traceback + logger.error("[MCPAgentExecutor] 停止MCP客户端时发生错误: %s", traceback.format_exc()) diff --git a/apps/scheduler/executor/base.py b/apps/scheduler/executor/base.py index cf2f4e6838f5d8d5b6578db2750543ddfd22ee75..877341f08e776c168b618ff5cbea8e17c9cfc956 100644 --- a/apps/scheduler/executor/base.py +++ b/apps/scheduler/executor/base.py @@ -44,15 +44,8 @@ class BaseExecutor(BaseModel, ABC): :param event_type: 事件类型 :param data: 消息数据,如果是FLOW_START事件且data为None,则自动构建FlowStartContent """ - if event_type == EventType.FLOW_START.value and isinstance(data, dict): - data = FlowStartContent( - question=self.question, - params=self.task.runtime.filled, - ).model_dump(exclude_none=True, by_alias=True) - elif event_type == EventType.FLOW_STOP.value: - data = {} - elif event_type == EventType.TEXT_ADD.value and isinstance(data, str): - data=TextAddContent(text=data).model_dump(exclude_none=True, by_alias=True) + if event_type == EventType.TEXT_ADD.value and isinstance(data, str): + data = TextAddContent(text=data).model_dump(exclude_none=True, by_alias=True) if data is None: data = {} @@ -62,7 +55,7 @@ class BaseExecutor(BaseModel, ABC): await self.msg_queue.push_output( self.task, event_type=event_type, - data=data, # type: ignore[arg-type] + data=data, # type: ignore[arg-type] ) @abstractmethod diff --git a/apps/scheduler/executor/flow.py b/apps/scheduler/executor/flow.py index a70d0d7073c50ec2290c3fd4c797bbb3efcd4501..485cef3451f8e0c464b9892c2cc38d482436841d 100644 --- a/apps/scheduler/executor/flow.py +++ b/apps/scheduler/executor/flow.py @@ -11,7 +11,7 @@ from pydantic import Field from apps.scheduler.call.llm.prompt import LLM_ERROR_PROMPT from apps.scheduler.executor.base import BaseExecutor from apps.scheduler.executor.step import StepExecutor -from apps.schemas.enum_var import EventType, SpecialCallType, StepStatus +from apps.schemas.enum_var import EventType, SpecialCallType, FlowStatus, StepStatus, LanguageType from apps.schemas.flow import Flow, Step from apps.schemas.request_data import RequestDataApp from apps.schemas.task import ExecutorState, StepQueueItem @@ -20,21 +20,37 @@ from apps.services.task import TaskManager logger = logging.getLogger(__name__) # 开始前的固定步骤 FIXED_STEPS_BEFORE_START = [ - Step( - name="理解上下文", - description="使用大模型,理解对话上下文", - node=SpecialCallType.SUMMARY.value, - type=SpecialCallType.SUMMARY.value, - ), + { + LanguageType.CHINESE: Step( + name="理解上下文", + description="使用大模型,理解对话上下文", + node=SpecialCallType.SUMMARY.value, + type=SpecialCallType.SUMMARY.value, + ), + LanguageType.ENGLISH: Step( + name="Understand context", + description="Use large model to understand the context of the dialogue", + node=SpecialCallType.SUMMARY.value, + type=SpecialCallType.SUMMARY.value, + ), + } ] # 结束后的固定步骤 FIXED_STEPS_AFTER_END = [ - Step( - name="记忆存储", - description="理解对话答案,并存储到记忆中", - node=SpecialCallType.FACTS.value, - type=SpecialCallType.FACTS.value, - ), + { + LanguageType.CHINESE: Step( + name="记忆存储", + description="理解对话答案,并存储到记忆中", + node=SpecialCallType.FACTS.value, + type=SpecialCallType.FACTS.value, + ), + LanguageType.ENGLISH: Step( + name="Memory storage", + description="Understand the answer of the dialogue and store it in the memory", + node=SpecialCallType.FACTS.value, + type=SpecialCallType.FACTS.value, + ), + } ] @@ -46,38 +62,45 @@ class FlowExecutor(BaseExecutor): flow_id: str = Field(description="Flow ID") question: str = Field(description="用户输入") post_body_app: RequestDataApp = Field(description="请求体中的app信息") - + current_step: StepQueueItem | None = Field( + description="当前执行的步骤", + default=None + ) async def load_state(self) -> None: """从数据库中加载FlowExecutor的状态""" logger.info("[FlowExecutor] 加载Executor状态") # 尝试恢复State - if self.task.state: + if ( + self.task.state + and self.task.state.flow_status != FlowStatus.INIT + and self.task.state.flow_status != FlowStatus.UNKNOWN + ): self.task.context = await TaskManager.get_context_by_task_id(self.task.id) else: # 创建ExecutorState self.task.state = ExecutorState( flow_id=str(self.flow_id), flow_name=self.flow.name, + flow_status=FlowStatus.RUNNING, description=str(self.flow.description), - status=StepStatus.RUNNING, + step_status=StepStatus.RUNNING, app_id=str(self.post_body_app.app_id), step_id="start", - step_name="开始", + step_name="开始" if self.task.language == LanguageType.CHINESE else "Start", ) self.validate_flow_state(self.task) # 是否到达Flow结束终点(变量) self._reached_end: bool = False self.step_queue: deque[StepQueueItem] = deque() - - async def _invoke_runner(self, queue_item: StepQueueItem) -> None: + async def _invoke_runner(self) -> None: """单一Step执行""" # 创建步骤Runner step_runner = StepExecutor( msg_queue=self.msg_queue, task=self.task, - step=queue_item, + step=self.current_step, background=self.background, question=self.question, ) @@ -85,23 +108,21 @@ class FlowExecutor(BaseExecutor): # 初始化步骤 await step_runner.init() # 运行Step - await step_runner.run() + await step_runner.run() # 更新Task(已存过库) self.task = step_runner.task - async def _step_process(self) -> None: """执行当前queue里面的所有步骤(在用户看来是单一Step)""" while True: try: - queue_item = self.step_queue.pop() + self.current_step = self.step_queue.pop() except IndexError: break # 执行Step - await self._invoke_runner(queue_item) - + await self._invoke_runner() async def _find_next_id(self, step_id: str) -> list[str]: """查找下一个节点""" @@ -111,14 +132,22 @@ class FlowExecutor(BaseExecutor): next_ids += [edge.edge_to] return next_ids - async def _find_flow_next(self) -> list[StepQueueItem]: """在当前步骤执行前,尝试获取下一步""" # 如果当前步骤为结束,则直接返回 - if self.task.state.step_id == "end" or not self.task.state.step_id: # type: ignore[arg-type] + if self.task.state.step_id == "end" or not self.task.state.step_id: # type: ignore[arg-type] return [] - - next_steps = await self._find_next_id(self.task.state.step_id) # type: ignore[arg-type] + if self.current_step.step.type == SpecialCallType.CHOICE.value: + # 如果是choice节点,获取分支ID + branch_id = self.task.context[-1].output_data["branch_id"] + if branch_id: + next_steps = await self._find_next_id(self.task.state.step_id + "." + branch_id) + logger.info("[FlowExecutor] 分支ID:%s", branch_id) + else: + logger.warning("[FlowExecutor] 没有找到分支ID,返回空列表") + return [] + else: + next_steps = await self._find_next_id(self.task.state.step_id) # type: ignore[arg-type] # 如果step没有任何出边,直接跳到end if not next_steps: return [ @@ -137,7 +166,6 @@ class FlowExecutor(BaseExecutor): for next_step in next_steps ] - async def run(self) -> None: """ 运行流,返回各步骤结果,直到无法继续执行 @@ -150,46 +178,56 @@ class FlowExecutor(BaseExecutor): # 获取首个步骤 first_step = StepQueueItem( - step_id=self.task.state.step_id, # type: ignore[arg-type] - step=self.flow.steps[self.task.state.step_id], # type: ignore[arg-type] + step_id=self.task.state.step_id, # type: ignore[arg-type] + step=self.flow.steps[self.task.state.step_id], # type: ignore[arg-type] ) # 头插开始前的系统步骤,并执行 for step in FIXED_STEPS_BEFORE_START: - self.step_queue.append(StepQueueItem( - step_id=str(uuid.uuid4()), - step=step, - enable_filling=False, - to_user=False, - )) + self.step_queue.append( + StepQueueItem( + step_id=str(uuid.uuid4()), + step=step.get(self.task.language, step[LanguageType.CHINESE]), + enable_filling=False, + to_user=False, + ) + ) await self._step_process() # 插入首个步骤 self.step_queue.append(first_step) - + self.task.state.flow_status = FlowStatus.RUNNING # type: ignore[arg-type] # 运行Flow(未达终点) + is_error = False while not self._reached_end: # 如果当前步骤出错,执行错误处理步骤 - if self.task.state.status == StepStatus.ERROR: # type: ignore[arg-type] + if self.task.state.step_status == StepStatus.ERROR: # type: ignore[arg-type] logger.warning("[FlowExecutor] Executor出错,执行错误处理步骤") self.step_queue.clear() - self.step_queue.appendleft(StepQueueItem( - step_id=str(uuid.uuid4()), - step=Step( - name="错误处理", - description="错误处理", - node=SpecialCallType.LLM.value, - type=SpecialCallType.LLM.value, - params={ - "user_prompt": LLM_ERROR_PROMPT.replace( - "{{ error_info }}", - self.task.state.error_info["err_msg"], # type: ignore[arg-type] + self.step_queue.appendleft( + StepQueueItem( + step_id=str(uuid.uuid4()), + step=Step( + name=( + "错误处理" if self.task.language == LanguageType.CHINESE else "Error Handling" ), - }, - ), - enable_filling=False, - to_user=False, - )) + description=( + "错误处理" if self.task.language == LanguageType.CHINESE else "Error Handling" + ), + node=SpecialCallType.LLM.value, + type=SpecialCallType.LLM.value, + params={ + "user_prompt": LLM_ERROR_PROMPT[self.task.language].replace( + "{{ error_info }}", + self.task.state.error_info["err_msg"], # type: ignore[arg-type] + ), + }, + ), + enable_filling=False, + to_user=False, + ) + ) + is_error = True # 错误处理后结束 self._reached_end = True @@ -204,15 +242,26 @@ class FlowExecutor(BaseExecutor): for step in next_step: self.step_queue.append(step) + # 更新Task状态 + if is_error: + self.task.state.flow_status = FlowStatus.ERROR # type: ignore[arg-type] + else: + self.task.state.flow_status = FlowStatus.SUCCESS # type: ignore[arg-type] + # 尾插运行结束后的系统步骤 for step in FIXED_STEPS_AFTER_END: - self.step_queue.append(StepQueueItem( - step_id=str(uuid.uuid4()), - step=step, - )) + self.step_queue.append( + StepQueueItem( + step_id=str(uuid.uuid4()), + step=step.get(self.task.language, step[LanguageType.CHINESE]), + ) + ) await self._step_process() # FlowStop需要返回总时间,需要倒推最初的开始时间(当前时间减去当前已用总时间) self.task.tokens.time = round(datetime.now(UTC).timestamp(), 2) - self.task.tokens.full_time # 推送Flow停止消息 - await self.push_message(EventType.FLOW_STOP.value) + if is_error: + await self.push_message(EventType.FLOW_FAILED.value) + else: + await self.push_message(EventType.FLOW_SUCCESS.value) diff --git a/apps/scheduler/executor/step.py b/apps/scheduler/executor/step.py index 6b3451fa9ccd92b8f9b2d497f3983a64ff3a6981..7c3808c2d6d051289f257d742f184956fad96d4c 100644 --- a/apps/scheduler/executor/step.py +++ b/apps/scheduler/executor/step.py @@ -86,8 +86,8 @@ class StepExecutor(BaseExecutor): logger.info("[StepExecutor] 初始化步骤 %s", self.step.step.name) # State写入ID和运行状态 - self.task.state.step_id = self.step.step_id # type: ignore[arg-type] - self.task.state.step_name = self.step.step.name # type: ignore[arg-type] + self.task.state.step_id = self.step.step_id # type: ignore[arg-type] + self.task.state.step_name = self.step.step.name # type: ignore[arg-type] # 获取并验证Call类 node_id = self.step.step.node @@ -119,7 +119,6 @@ class StepExecutor(BaseExecutor): logger.exception("[StepExecutor] 初始化Call失败") raise - async def _run_slot_filling(self) -> None: """运行自动参数填充;相当于特殊Step,但是不存库""" # 判断是否需要进行自动参数填充 @@ -127,13 +126,13 @@ class StepExecutor(BaseExecutor): return # 暂存旧数据 - current_step_id = self.task.state.step_id # type: ignore[arg-type] - current_step_name = self.task.state.step_name # type: ignore[arg-type] + current_step_id = self.task.state.step_id # type: ignore[arg-type] + current_step_name = self.task.state.step_name # type: ignore[arg-type] # 更新State - self.task.state.step_id = str(uuid.uuid4()) # type: ignore[arg-type] - self.task.state.step_name = "自动参数填充" # type: ignore[arg-type] - self.task.state.status = StepStatus.RUNNING # type: ignore[arg-type] + self.task.state.step_id = str(uuid.uuid4()) # type: ignore[arg-type] + self.task.state.step_name = "自动参数填充" # type: ignore[arg-type] + self.task.state.step_status = StepStatus.RUNNING # type: ignore[arg-type] self.task.tokens.time = round(datetime.now(UTC).timestamp(), 2) # 初始化填参 @@ -156,21 +155,20 @@ class StepExecutor(BaseExecutor): # 如果没有填全,则状态设置为待填参 if result.remaining_schema: - self.task.state.status = StepStatus.PARAM # type: ignore[arg-type] + self.task.state.step_status = StepStatus.PARAM # type: ignore[arg-type] else: - self.task.state.status = StepStatus.SUCCESS # type: ignore[arg-type] + self.task.state.step_status = StepStatus.SUCCESS # type: ignore[arg-type] await self.push_message(EventType.STEP_OUTPUT.value, result.model_dump(by_alias=True, exclude_none=True)) # 更新输入 self.obj.input.update(result.slot_data) # 恢复State - self.task.state.step_id = current_step_id # type: ignore[arg-type] - self.task.state.step_name = current_step_name # type: ignore[arg-type] + self.task.state.step_id = current_step_id # type: ignore[arg-type] + self.task.state.step_name = current_step_name # type: ignore[arg-type] self.task.tokens.input_tokens += self.obj.tokens.input_tokens self.task.tokens.output_tokens += self.obj.tokens.output_tokens - async def _process_chunk( self, iterator: AsyncGenerator[CallOutputChunk, None], @@ -202,7 +200,6 @@ class StepExecutor(BaseExecutor): return content - async def run(self) -> None: """运行单个步骤""" self.validate_flow_state(self.task) @@ -212,34 +209,34 @@ class StepExecutor(BaseExecutor): await self._run_slot_filling() # 更新状态 - self.task.state.status = StepStatus.RUNNING # type: ignore[arg-type] + self.task.state.step_status = StepStatus.RUNNING # type: ignore[arg-type] self.task.tokens.time = round(datetime.now(UTC).timestamp(), 2) # 推送输入 await self.push_message(EventType.STEP_INPUT.value, self.obj.input) # 执行步骤 - iterator = self.obj.exec(self, self.obj.input) + iterator = self.obj.exec(self, self.obj.input, language=self.task.language) try: content = await self._process_chunk(iterator, to_user=self.obj.to_user) except Exception as e: logger.exception("[StepExecutor] 运行步骤失败,进行异常处理步骤") - self.task.state.status = StepStatus.ERROR # type: ignore[arg-type] + self.task.state.step_status = StepStatus.ERROR # type: ignore[arg-type] await self.push_message(EventType.STEP_OUTPUT.value, {}) if isinstance(e, CallError): - self.task.state.error_info = { # type: ignore[arg-type] + self.task.state.error_info = { # type: ignore[arg-type] "err_msg": e.message, "data": e.data, } else: - self.task.state.error_info = { # type: ignore[arg-type] + self.task.state.error_info = { # type: ignore[arg-type] "err_msg": str(e), "data": {}, } return # 更新执行状态 - self.task.state.status = StepStatus.SUCCESS # type: ignore[arg-type] + self.task.state.step_status = StepStatus.SUCCESS # type: ignore[arg-type] self.task.tokens.input_tokens += self.obj.tokens.input_tokens self.task.tokens.output_tokens += self.obj.tokens.output_tokens self.task.tokens.full_time += round(datetime.now(UTC).timestamp(), 2) - self.task.tokens.time @@ -253,16 +250,17 @@ class StepExecutor(BaseExecutor): # 更新context history = FlowStepHistory( task_id=self.task.id, - flow_id=self.task.state.flow_id, # type: ignore[arg-type] - flow_name=self.task.state.flow_name, # type: ignore[arg-type] + flow_id=self.task.state.flow_id, # type: ignore[arg-type] + flow_name=self.task.state.flow_name, # type: ignore[arg-type] + flow_status=self.task.state.flow_status, # type: ignore[arg-type] step_id=self.step.step_id, step_name=self.step.step.name, step_description=self.step.step.description, - status=self.task.state.status, # type: ignore[arg-type] + step_status=self.task.state.step_status, input_data=self.obj.input, output_data=output_data, ) - self.task.context.append(history.model_dump(exclude_none=True, by_alias=True)) + self.task.context.append(history) # 推送输出 await self.push_message(EventType.STEP_OUTPUT.value, output_data) diff --git a/apps/scheduler/mcp/host.py b/apps/scheduler/mcp/host.py index 78aa7bc3ee869e8710e1fb02a2d9fb438d04be34..8e7b26e38e2eba24b9b9c735b93e6a5a12407970 100644 --- a/apps/scheduler/mcp/host.py +++ b/apps/scheduler/mcp/host.py @@ -14,7 +14,7 @@ from apps.llm.function import JsonGenerator from apps.scheduler.mcp.prompt import MEMORY_TEMPLATE from apps.scheduler.pool.mcp.client import MCPClient from apps.scheduler.pool.mcp.pool import MCPPool -from apps.schemas.enum_var import StepStatus +from apps.schemas.enum_var import StepStatus, LanguageType from apps.schemas.mcp import MCPPlanItem, MCPTool from apps.schemas.task import FlowStepHistory from apps.services.task import TaskManager @@ -25,10 +25,18 @@ logger = logging.getLogger(__name__) class MCPHost: """MCP宿主服务""" - def __init__(self, user_sub: str, task_id: str, runtime_id: str, runtime_name: str) -> None: + def __init__( + self, + user_sub: str, + task_id: str, + runtime_id: str, + runtime_name: str, + language: LanguageType = LanguageType.CHINESE, + ) -> None: """初始化MCP宿主""" self._user_sub = user_sub self._task_id = task_id + self.language = language # 注意:runtime在工作流中是flow_id和step_description,在Agent中可为标识Agent的id和description self._runtime_id = runtime_id self._runtime_name = runtime_name @@ -40,7 +48,6 @@ class MCPHost: lstrip_blocks=True, ) - async def get_client(self, mcp_id: str) -> MCPClient | None: """获取MCP客户端""" mongo = MongoDB() @@ -59,7 +66,6 @@ class MCPHost: logger.warning("用户 %s 的MCP %s 没有运行中的实例,请检查环境", self._user_sub, mcp_id) return None - async def assemble_memory(self) -> str: """组装记忆""" task = await TaskManager.get_task_by_task_id(self._task_id) @@ -69,16 +75,15 @@ class MCPHost: context_list = [] for ctx_id in self._context_list: - context = next((ctx for ctx in task.context if ctx["_id"] == ctx_id), None) + context = next((ctx for ctx in task.context if ctx.id == ctx_id), None) if not context: continue context_list.append(context) - return self._env.from_string(MEMORY_TEMPLATE).render( + return self._env.from_string(MEMORY_TEMPLATE[self.language]).render( context_list=context_list, ) - async def _save_memory( self, tool: MCPTool, @@ -105,11 +110,12 @@ class MCPHost: task_id=self._task_id, flow_id=self._runtime_id, flow_name=self._runtime_name, + flow_status=StepStatus.RUNNING, step_id=tool.name, step_name=tool.name, # description是规划的实际内容 step_description=plan_item.content, - status=StepStatus.SUCCESS, + step_status=StepStatus.SUCCESS, input_data=input_data, output_data=output_data, ) @@ -120,12 +126,11 @@ class MCPHost: logger.error("任务 %s 不存在", self._task_id) return {} self._context_list.append(context.id) - task.context.append(context.model_dump(by_alias=True, exclude_none=True)) + task.context.append(context.model_dump(exclude_none=True, by_alias=True)) await TaskManager.save_task(self._task_id, task) return output_data - async def _fill_params(self, tool: MCPTool, query: str) -> dict[str, Any]: """填充工具参数""" # 更清晰的输入·指令,这样可以调用generate @@ -146,7 +151,6 @@ class MCPHost: ) return await json_generator.generate() - async def call_tool(self, tool: MCPTool, plan_item: MCPPlanItem) -> list[dict[str, Any]]: """调用工具""" # 拿到Client @@ -170,7 +174,6 @@ class MCPHost: return processed_result - async def get_tool_list(self, mcp_id_list: list[str]) -> list[MCPTool]: """获取工具列表""" mongo = MongoDB() diff --git a/apps/scheduler/mcp/plan.py b/apps/scheduler/mcp/plan.py index cd4f5975eea3f023a92626966081c2d1eb33bdb7..78d695f2cc9fc47c995245f0e8cf059b60e76a3d 100644 --- a/apps/scheduler/mcp/plan.py +++ b/apps/scheduler/mcp/plan.py @@ -8,14 +8,16 @@ from apps.llm.function import JsonGenerator from apps.llm.reasoning import ReasoningLLM from apps.scheduler.mcp.prompt import CREATE_PLAN, FINAL_ANSWER from apps.schemas.mcp import MCPPlan, MCPTool +from apps.schemas.enum_var import LanguageType class MCPPlanner: """MCP 用户目标拆解与规划""" - def __init__(self, user_goal: str) -> None: + def __init__(self, user_goal: str, language: LanguageType = LanguageType.CHINESE) -> None: """初始化MCP规划器""" self.user_goal = user_goal + self.language = language self._env = SandboxedEnvironment( loader=BaseLoader, autoescape=True, @@ -25,7 +27,6 @@ class MCPPlanner: self.input_tokens = 0 self.output_tokens = 0 - async def create_plan(self, tool_list: list[MCPTool], max_steps: int = 6) -> MCPPlan: """规划下一步的执行流程,并输出""" # 获取推理结果 @@ -38,7 +39,7 @@ class MCPPlanner: async def _get_reasoning_plan(self, tool_list: list[MCPTool], max_steps: int) -> str: """获取推理大模型的结果""" # 格式化Prompt - template = self._env.from_string(CREATE_PLAN) + template = self._env.from_string(CREATE_PLAN[self.language]) prompt = template.render( goal=self.user_goal, tools=tool_list, @@ -88,7 +89,7 @@ class MCPPlanner: async def generate_answer(self, plan: MCPPlan, memory: str) -> str: """生成最终回答""" - template = self._env.from_string(FINAL_ANSWER) + template = self._env.from_string(FINAL_ANSWER[self.language]) prompt = template.render( plan=plan, memory=memory, diff --git a/apps/scheduler/mcp/prompt.py b/apps/scheduler/mcp/prompt.py index b322fb0883e8ed935243389cb86066845a549631..29721b31c92a84d875052f3097af6e3e5c6db82d 100644 --- a/apps/scheduler/mcp/prompt.py +++ b/apps/scheduler/mcp/prompt.py @@ -2,8 +2,11 @@ """MCP相关的大模型Prompt""" from textwrap import dedent +from apps.schemas.enum_var import LanguageType -MCP_SELECT = dedent(r""" +MCP_SELECT: dict[str, str] = { + LanguageType.CHINESE: dedent( + r""" 你是一个乐于助人的智能助手。 你的任务是:根据当前目标,选择最合适的MCP Server。 @@ -61,8 +64,73 @@ MCP_SELECT = dedent(r""" ### 请一步一步思考: -""") -CREATE_PLAN = dedent(r""" +""" + ), + LanguageType.ENGLISH: dedent( + r""" + You are a helpful intelligent assistant. + Your task is to select the most appropriate MCP server based on your current goals. + + ## Things to note when selecting an MCP server: + + 1. Ensure you fully understand your current goals and select the most appropriate MCP server. + 2. Please select from the provided list of MCP servers; do not generate your own. + 3. Please provide the rationale for your choice before making your selection. + 4. Your current goals will be listed below, along with the list of MCP servers. + Please include your thought process in the "Thought Process" section and your selection in the "Selection Results" section. + 5. Your selection must be in JSON format, strictly following the template below. Do not output any additional content: + + ```json + { + "mcp": "The name of your selected MCP server" + } + ``` + + 6. The following example is for reference only. Do not use it as a basis for selecting an MCP server. + + ## Example + + ### Goal + + I need an MCP server to complete a task. + + ### MCP Server List + + - **mcp_1**: "MCP Server 1"; Description of MCP Server 1 + - **mcp_2**: "MCP Server 2"; Description of MCP Server 2 + + ### Think step by step: + + Because the current goal requires an MCP server to complete a task, select mcp_1. + + ### Select Result + + ```json + { + "mcp": "mcp_1" + } + ``` + + ## Let's get started! + + ### Goal + + {{goal}} + + ### MCP Server List + + {% for mcp in mcp_list %} + - **{{mcp.id}}**: "{{mcp.name}}"; {{mcp.description}} + {% endfor %} + + ### Think step by step: + +""" + ), +} +CREATE_PLAN: dict[str, str] = { + LanguageType.CHINESE: dedent( + r""" 你是一个计划生成器。 请分析用户的目标,并生成一个计划。你后续将根据这个计划,一步一步地完成用户的目标。 @@ -72,7 +140,8 @@ CREATE_PLAN = dedent(r""" 2. 计划中的每一个步骤必须且只能使用一个工具。 3. 计划中的步骤必须具有清晰和逻辑的步骤,没有冗余或不必要的步骤。 4. 计划中的最后一步必须是Final工具,以确保计划执行结束。 - + 5.生成的计划必须要覆盖用户的目标,不能遗漏任何用户目标中的内容。 + # 生成计划时的注意事项: - 每一条计划包含3个部分: @@ -93,8 +162,7 @@ CREATE_PLAN = dedent(r""" } ``` - - 在生成计划之前,请一步一步思考,解析用户的目标,并指导你接下来的生成。\ -思考过程应放置在 XML标签中。 + - 在生成计划之前,请一步一步思考,解析用户的目标,并指导你接下来的生成。思考过程应按步骤顺序放置在 XML标签中。 - 计划内容中,可以使用"Result[]"来引用之前计划步骤的结果。例如:"Result[3]"表示引用第三条计划执行后的结果。 - 计划不得多于{{ max_num }}条,且每条计划内容应少于150字。 @@ -106,8 +174,7 @@ CREATE_PLAN = dedent(r""" {% for tool in tools %} - {{ tool.id }}{{tool.name}};{{ tool.description }} {% endfor %} - - Final结束步骤,当执行到这一步时,\ -表示计划执行结束,所得到的结果将作为最终结果。 + - Final结束步骤,当执行到这一步时,表示计划执行结束,所得到的结果将作为最终结果。 # 样例 @@ -162,8 +229,114 @@ CREATE_PLAN = dedent(r""" {{goal}} # 计划 -""") -EVALUATE_PLAN = dedent(r""" +""" + ), + LanguageType.ENGLISH: dedent( + r""" + You are a plan generator. + Please analyze the user's goal and generate a plan. You will then follow this plan to achieve the user's goal step by step. + + # A good plan should: + + 1. Be able to successfully achieve the user's goal. + 2. Each step in the plan must use only one tool. + 3. The steps in the plan must have clear and logical steps, without redundant or unnecessary steps. + 4. The last step in the plan must be a Final tool to ensure that the plan is executed. + + # Things to note when generating plans: + + - Each plan contains three parts: + - Plan content: Describes the general content of a single plan step + - Tool ID: Must be selected from the tool list below + - Tool instructions: Rewrite the user's goal to make it more consistent with the tool's input requirements + - Plans must be generated in the following format. Do not output any additional data: + + ```json + { + "plans": [ + { + "content":"Plan content", + "tool":"Tool ID", + "instruction":"Tool instructions" + } + ] + } + ``` + + - Before generating a plan, please think step by step, analyze the user's goal, and guide your next steps. The thought process should be placed in sequential steps within XML tags. + - In the plan content, you can use "Result[]" to reference the results of the previous plan steps. For example: "Result[3]" refers to the result after the third plan is executed. + - The plan should not have more than {{ max_num }} items, and each plan content should be less than 150 words. + + # Tools + + You can access and use some tools, which will be given in the XML tags. + + + {% for tool in tools %} + - {{ tool.id }}{{tool.name}}; {{ tool.description }} + {% endfor %} + - FinalEnd step. When this step is executed, \ + Indicates that the plan execution is completed and the result obtained will be used as the final result. + + + # Example + + ## Target + + Run a new alpine:latest container in the background, mount the host/root folder to /data, and execute the top command. + + ## Plan + + + 1. This goal needs to be completed using Docker. First, you need to select a suitable MCP Server. + 2. The goal can be broken down into the following parts: + - Run the alpine:latest container + - Mount the host directory + - Run in the background + - Execute the top command + 3. You need to select an MCP Server first, then generate the Docker command, and finally execute the command. + + + ```json + { + "plans": [ + { + "content": "Select an MCP Server that supports Docker", + "tool": "mcp_selector", + "instruction": "You need an MCP Server that supports running Docker containers" + }, + { + "content": "Use the MCP Server selected in Result[0] to generate Docker commands", + "tool": "command_generator", + "instruction": "Generate Docker command: Run the alpine:latest container in the background, mount /root to /data, and execute the top command" + }, + { + "content": "Execute the command generated by Result[1] on the MCP Server of Result[0]", + "tool": "command_executor", + "instruction": "Execute Docker command" + }, + { + "content": "Task execution completed, the container is running in the background, the result is Result[2]", + "tool": "Final", + "instruction": "" + } + ] + } + ``` + + # Now start generating the plan: + + ## Goal + + {{goal}} + + # Plan +""" + ), +} +EVALUATE_PLAN: dict[str, str] = { + LanguageType.CHINESE: dedent( + r""" 你是一个计划评估器。 请根据给定的计划,和当前计划执行的实际情况,分析当前计划是否合理和完整,并生成改进后的计划。 @@ -209,8 +382,61 @@ EVALUATE_PLAN = dedent(r""" # 现在开始评估计划: -""") -FINAL_ANSWER = dedent(r""" +""" + ), + LanguageType.ENGLISH: dedent( + r""" + You are a plan evaluator. + Based on the given plan and the actual execution of the current plan, analyze whether the current plan is reasonable and complete, and generate an improved plan. + + # A good plan should: + + 1. Be able to successfully achieve the user's goal. + 2. Each step in the plan must use only one tool. + 3. The steps in the plan must have clear and logical steps, without redundant or unnecessary steps. + 4. The last step in the plan must be a Final tool to ensure the completion of the plan execution. + + # Your previous plan was: + + {{ plan }} + + # The execution status of this plan is: + + The execution status of the plan will be placed in the XML tags. + + + {{ memory }} + + + # Notes when conducting the evaluation: + + - Please think step by step, analyze the user's goal, and guide your subsequent generation. The thinking process should be placed in the XML tags. + - The evaluation results are divided into two parts: + - Conclusion of the plan evaluation + - Improved plan + - Please output the evaluation results in the following JSON format: + + ```json + { + "evaluation": "Evaluation results", + "plans": [ + { + "content": "Improved plan content", + "tool": "Tool ID", + "instruction": "Tool instructions" + } + ] + } + ``` + + # Start evaluating the plan now: + +""" + ), +} +FINAL_ANSWER: dict[str, str] = { + LanguageType.CHINESE: dedent( + r""" 综合理解计划执行结果和背景信息,向用户报告目标的完成情况。 # 用户目标 @@ -229,12 +455,50 @@ FINAL_ANSWER = dedent(r""" # 现在,请根据以上信息,向用户报告目标的完成情况: -""") -MEMORY_TEMPLATE = dedent(r""" +""" + ), + LanguageType.ENGLISH: dedent( + r""" + Based on the understanding of the plan execution results and background information, report to the user the completion status of the goal. + + # User goal + + {{ goal }} + + # Plan execution status + + To achieve the above goal, you implemented the following plan: + + {{ memory }} + + # Other background information: + + {{ status }} + + # Now, based on the above information, please report to the user the completion status of the goal: + +""" + ), +} +MEMORY_TEMPLATE: dict[str, str] = { + LanguageType.CHINESE: dedent( + r""" {% for ctx in context_list %} - 第{{ loop.index }}步:{{ ctx.step_description }} - - 调用工具 `{{ ctx.step_id }}`,并提供参数 `{{ ctx.input_data }}` - - 执行状态:{{ ctx.status }} - - 得到数据:`{{ ctx.output_data }}` + - 调用工具 `{{ ctx.step_name }}`,并提供参数 `{{ ctx.input_data|tojson }}`。 + - 执行状态:{{ ctx.step_status }} + - 得到数据:`{{ ctx.output_data|tojson }}` + {% endfor %} +""" + ), + LanguageType.ENGLISH: dedent( + r""" + {% for ctx in context_list %} + - Step {{ loop.index }}: {{ ctx.step_description }} + - Called tool `{{ ctx.step_id }}` and provided parameters `{{ ctx.input_data }}` + - Execution status: {{ ctx.status }} + - Got data: `{{ ctx.output_data }}` {% endfor %} -""") +""" + ), +} diff --git a/apps/scheduler/mcp/select.py b/apps/scheduler/mcp/select.py index 2ff5034471c5e9c38f166c6187b76dfb4596f734..e8b0e88c09ac1d1ac63f995b779da8483a2fce22 100644 --- a/apps/scheduler/mcp/select.py +++ b/apps/scheduler/mcp/select.py @@ -14,6 +14,7 @@ from apps.llm.reasoning import ReasoningLLM from apps.scheduler.mcp.prompt import ( MCP_SELECT, ) +from apps.schemas.enum_var import LanguageType from apps.schemas.mcp import ( MCPCollection, MCPSelectResult, @@ -39,7 +40,6 @@ class MCPSelector: sql += f"'{mcp_id}', " return sql.rstrip(", ") + ")" - async def _get_top_mcp_by_embedding( self, query: str, @@ -49,10 +49,17 @@ class MCPSelector: logger.info("[MCPHelper] 查询MCP Server向量: %s, %s", query, mcp_list) mcp_table = await LanceDB().get_table("mcp") query_embedding = await Embedding.get_embedding([query]) - mcp_vecs = await (await mcp_table.search( - query=query_embedding, - vector_column_name="embedding", - )).where(f"id IN {MCPSelector._assemble_sql(mcp_list)}").limit(5).to_list() + mcp_vecs = ( + await ( + await mcp_table.search( + query=query_embedding, + vector_column_name="embedding", + ) + ) + .where(f"id IN {MCPSelector._assemble_sql(mcp_list)}") + .limit(5) + .to_list() + ) # 拿到名称和description logger.info("[MCPHelper] 查询MCP Server名称和描述: %s", mcp_vecs) @@ -72,12 +79,8 @@ class MCPSelector: }]) return llm_mcp_list - async def _get_mcp_by_llm( - self, - query: str, - mcp_list: list[dict[str, str]], - mcp_ids: list[str], + self, query: str, mcp_list: list[dict[str, str]], mcp_ids: list[str], language ) -> MCPSelectResult: """通过LLM选择最合适的MCP Server""" # 初始化jinja2环境 @@ -87,7 +90,7 @@ class MCPSelector: trim_blocks=True, lstrip_blocks=True, ) - template = env.from_string(MCP_SELECT) + template = env.from_string(MCP_SELECT[language]) # 渲染模板 mcp_prompt = template.render( mcp_list=mcp_list, @@ -100,7 +103,6 @@ class MCPSelector: # 使用小模型提取JSON return await self._call_function_mcp(result, mcp_ids) - async def _call_reasoning(self, prompt: str) -> str: """调用大模型进行推理""" logger.info("[MCPHelper] 调用推理大模型") @@ -116,7 +118,6 @@ class MCPSelector: self.output_tokens += llm.output_tokens return result - async def _call_function_mcp(self, reasoning_result: str, mcp_ids: list[str]) -> MCPSelectResult: """调用结构化输出小模型提取JSON""" logger.info("[MCPHelper] 调用结构化输出小模型") @@ -136,11 +137,8 @@ class MCPSelector: raise return result - async def select_top_mcp( - self, - query: str, - mcp_list: list[str], + self, query: str, mcp_list: list[str], language: LanguageType = LanguageType.CHINESE ) -> MCPSelectResult: """ 选择最合适的MCP Server @@ -151,11 +149,12 @@ class MCPSelector: llm_mcp_list = await self._get_top_mcp_by_embedding(query, mcp_list) # 通过LLM选择最合适的 - return await self._get_mcp_by_llm(query, llm_mcp_list, mcp_list) - + return await self._get_mcp_by_llm(query, llm_mcp_list, mcp_list, language) @staticmethod - async def select_top_tool(query: str, mcp_list: list[str], top_n: int = 10) -> list[MCPTool]: + async def select_top_tool( + query: str, mcp_list: list[str], top_n: int = 10 + ) -> list[MCPTool]: """选择最合适的工具""" tool_vector = await LanceDB().get_table("mcp_tool") query_embedding = await Embedding.get_embedding([query]) diff --git a/apps/scheduler/mcp_agent/__init__.py b/apps/scheduler/mcp_agent/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..12f5cb68c12e4d19d830a8155eaeb0851fce897d --- /dev/null +++ b/apps/scheduler/mcp_agent/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. +"""Scheduler MCP 模块""" + +from apps.scheduler.mcp.host import MCPHost +from apps.scheduler.mcp.plan import MCPPlanner +from apps.scheduler.mcp.select import MCPSelector + +__all__ = ["MCPHost", "MCPPlanner", "MCPSelector"] diff --git a/apps/scheduler/mcp_agent/agent/base.py b/apps/scheduler/mcp_agent/agent/base.py deleted file mode 100644 index eccb58a9ce8466f67ac76040a53469edac455109..0000000000000000000000000000000000000000 --- a/apps/scheduler/mcp_agent/agent/base.py +++ /dev/null @@ -1,196 +0,0 @@ -"""MCP Agent基类""" -import logging -from abc import ABC, abstractmethod -from contextlib import asynccontextmanager - -from pydantic import BaseModel, Field, model_validator - -from apps.common.queue import MessageQueue -from apps.schemas.enum_var import AgentState -from apps.schemas.task import Task -from apps.llm.reasoning import ReasoningLLM -from apps.scheduler.mcp_agent.schema import Memory, Message, Role -from apps.services.activity import Activity - -logger = logging.getLogger(__name__) - - -class BaseAgent(BaseModel, ABC): - """ - 用于管理代理状态和执行的抽象基类。 - - 为状态转换、内存管理、 - 以及分步执行循环。子类必须实现`step`方法。 - """ - - msg_queue: MessageQueue - task: Task - name: str = Field(..., description="Agent名称") - agent_id: str = Field(default="", description="Agent ID") - description: str = Field(default="", description="Agent描述") - question: str - # Prompts - next_step_prompt: str | None = Field( - None, description="判断下一步动作的提示" - ) - - # Dependencies - llm: ReasoningLLM = Field(default_factory=ReasoningLLM, description="大模型实例") - memory: Memory = Field(default_factory=Memory, description="Agent记忆库") - state: AgentState = Field( - default=AgentState.IDLE, description="Agent状态" - ) - servers_id: list[str] = Field(default_factory=list, description="MCP server id") - - # Execution control - max_steps: int = Field(default=10, description="终止前的最大步长") - current_step: int = Field(default=0, description="执行中的当前步骤") - - duplicate_threshold: int = 2 - - user_prompt: str = r""" - 当前步骤:{step} 工具输出结果:{result} - 请总结当前正在执行的步骤和对应的工具输出结果,内容包括当前步骤是多少,执行的工具是什么,输出是什么。 - 最终以报告的形式展示。 - 如果工具输出结果中执行的工具为terminate,请按照状态输出本次交互过程最终结果并完成对整个报告的总结,不需要输出你的分析过程。 - """ - """用户提示词""" - - class Config: - arbitrary_types_allowed = True - extra = "allow" # Allow extra fields for flexibility in subclasses - - @model_validator(mode="after") - def initialize_agent(self) -> "BaseAgent": - """初始化Agent""" - if self.llm is None or not isinstance(self.llm, ReasoningLLM): - self.llm = ReasoningLLM() - if not isinstance(self.memory, Memory): - self.memory = Memory() - return self - - @asynccontextmanager - async def state_context(self, new_state: AgentState): - """ - Agent状态转换上下文管理器 - - Args: - new_state: 要转变的状态 - - :return: None - :raise ValueError: 如果new_state无效 - """ - if not isinstance(new_state, AgentState): - raise ValueError(f"无效状态: {new_state}") - - previous_state = self.state - self.state = new_state - try: - yield - except Exception as e: - self.state = AgentState.ERROR # Transition to ERROR on failure - raise e - finally: - self.state = previous_state # Revert to previous state - - def update_memory( - self, - role: Role, - content: str, - **kwargs, - ) -> None: - """添加信息到Agent的memory中""" - message_map = { - "user": Message.user_message, - "system": Message.system_message, - "assistant": Message.assistant_message, - "tool": lambda content, **kw: Message.tool_message(content, **kw), - } - - if role not in message_map: - raise ValueError(f"不支持的消息角色: {role}") - - # Create message with appropriate parameters based on role - kwargs = {**(kwargs if role == "tool" else {})} - self.memory.add_message(message_map[role](content, **kwargs)) - - async def run(self, request: str | None = None) -> str: - """异步执行Agent的主循环""" - self.task.runtime.question = request - if self.state != AgentState.IDLE: - raise RuntimeError(f"无法从以下状态运行智能体: {self.state}") - - if request: - self.update_memory("user", request) - - results: list[str] = [] - async with self.state_context(AgentState.RUNNING): - while ( - self.current_step < self.max_steps and self.state != AgentState.FINISHED - ): - if not await Activity.is_active(self.task.ids.user_sub): - logger.info("用户终止会话,任务停止!") - return "" - self.current_step += 1 - logger.info(f"执行步骤{self.current_step}/{self.max_steps}") - step_result = await self.step() - - # Check for stuck state - if self.is_stuck(): - self.handle_stuck_state() - result = f"Step {self.current_step}: {step_result}" - results.append(result) - - if self.current_step >= self.max_steps: - self.current_step = 0 - self.state = AgentState.IDLE - result = f"任务终止: 已达到最大步数 ({self.max_steps})" - await self.msg_queue.push_output( - self.task, - event_type="text.add", - data={"text": result}, # type: ignore[arg-type] - ) - results.append(result) - return "\n".join(results) if results else "未执行任何步骤" - - @abstractmethod - async def step(self) -> str: - """ - 执行代理工作流程中的单个步骤。 - - 必须由子类实现,以定义具体的行为。 - """ - - def handle_stuck_state(self): - """通过添加更改策略的提示来处理卡住状态""" - stuck_prompt = "\ - 观察到重复响应。考虑新策略,避免重复已经尝试过的无效路径" - self.next_step_prompt = f"{stuck_prompt}\n{self.next_step_prompt}" - logger.warning(f"检测到智能体处于卡住状态。新增提示:{stuck_prompt}") - - def is_stuck(self) -> bool: - """通过检测重复内容来检查代理是否卡在循环中""" - if len(self.memory.messages) < 2: - return False - - last_message = self.memory.messages[-1] - if not last_message.content: - return False - - duplicate_count = sum( - 1 - for msg in reversed(self.memory.messages[:-1]) - if msg.role == "assistant" and msg.content == last_message.content - ) - - return duplicate_count >= self.duplicate_threshold - - @property - def messages(self) -> list[Message]: - """从Agent memory中检索消息列表""" - return self.memory.messages - - @messages.setter - def messages(self, value: list[Message]) -> None: - """设置Agent memory的消息列表""" - self.memory.messages = value diff --git a/apps/scheduler/mcp_agent/agent/mcp.py b/apps/scheduler/mcp_agent/agent/mcp.py deleted file mode 100644 index 378da368aca02d0d628352fcc4816b98b2921d01..0000000000000000000000000000000000000000 --- a/apps/scheduler/mcp_agent/agent/mcp.py +++ /dev/null @@ -1,81 +0,0 @@ -"""MCP Agent""" -import logging - -from pydantic import Field - -from apps.scheduler.mcp.host import MCPHost -from apps.scheduler.mcp_agent.agent.toolcall import ToolCallAgent -from apps.scheduler.mcp_agent.tool import Terminate, ToolCollection - -logger = logging.getLogger(__name__) - - -class MCPAgent(ToolCallAgent): - """ - 用于与MCP(模型上下文协议)服务器交互。 - - 使用SSE或stdio传输连接到MCP服务器 - 并使服务器的工具 - """ - - name: str = "MCPAgent" - description: str = "一个多功能的智能体,能够使用多种工具(包括基于MCP的工具)解决各种任务" - - # Add general-purpose tools to the tool collection - available_tools: ToolCollection = Field( - default_factory=lambda: ToolCollection( - Terminate(), - ), - ) - - special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name]) - - _initialized: bool = False - - @classmethod - async def create(cls, **kwargs) -> "MCPAgent": # noqa: ANN003 - """创建并初始化MCP Agent实例""" - instance = cls(**kwargs) - await instance.initialize_mcp_servers() - instance._initialized = True - return instance - - async def initialize_mcp_servers(self) -> None: - """初始化与已配置的MCP服务器的连接""" - mcp_host = MCPHost( - self.task.ids.user_sub, - self.task.id, - self.agent_id, - self.description, - ) - mcps = {} - for mcp_id in self.servers_id: - client = await mcp_host.get_client(mcp_id) - if client: - mcps[mcp_id] = client - - for mcp_id, mcp_client in mcps.items(): - new_tools = [] - for tool in mcp_client.tools: - original_name = tool.name - # Always prefix with server_id to ensure uniqueness - tool_name = f"mcp_{mcp_id}_{original_name}" - - server_tool = MCPClientTool( - name=tool_name, - description=tool.description, - parameters=tool.inputSchema, - session=mcp_client.session, - server_id=mcp_id, - original_name=original_name, - ) - new_tools.append(server_tool) - self.available_tools.add_tools(*new_tools) - - async def think(self) -> bool: - """使用适当的上下文处理当前状态并决定下一步操作""" - if not self._initialized: - await self.initialize_mcp_servers() - self._initialized = True - - return await super().think() diff --git a/apps/scheduler/mcp_agent/agent/react.py b/apps/scheduler/mcp_agent/agent/react.py deleted file mode 100644 index b56efd8b195eb36c4d5718711cc5f07b5a49812f..0000000000000000000000000000000000000000 --- a/apps/scheduler/mcp_agent/agent/react.py +++ /dev/null @@ -1,35 +0,0 @@ -from abc import ABC, abstractmethod - -from pydantic import Field - -from apps.schemas.enum_var import AgentState -from apps.llm.reasoning import ReasoningLLM -from apps.scheduler.mcp_agent.agent.base import BaseAgent -from apps.scheduler.mcp_agent.schema import Memory - - -class ReActAgent(BaseAgent, ABC): - name: str - description: str | None = None - - system_prompt: str | None = None - next_step_prompt: str | None = None - - llm: ReasoningLLM | None = Field(default_factory=ReasoningLLM) - memory: Memory = Field(default_factory=Memory) - state: AgentState = AgentState.IDLE - - @abstractmethod - async def think(self) -> bool: - """处理当前状态并决定下一步操作""" - - @abstractmethod - async def act(self) -> str: - """执行已决定的行动""" - - async def step(self) -> str: - """执行一个步骤:思考和行动""" - should_act = await self.think() - if not should_act: - return "思考完成-无需采取任何行动" - return await self.act() diff --git a/apps/scheduler/mcp_agent/agent/toolcall.py b/apps/scheduler/mcp_agent/agent/toolcall.py deleted file mode 100644 index 1e22099ce1d2e2f2f54a3bc018511acf887a91a1..0000000000000000000000000000000000000000 --- a/apps/scheduler/mcp_agent/agent/toolcall.py +++ /dev/null @@ -1,238 +0,0 @@ -import asyncio -import json -import logging -from typing import Any, Optional - -from pydantic import Field - -from apps.schemas.enum_var import AgentState -from apps.llm.function import JsonGenerator -from apps.llm.patterns import Select -from apps.scheduler.mcp_agent.agent.react import ReActAgent -from apps.scheduler.mcp_agent.schema import Function, Message, ToolCall -from apps.scheduler.mcp_agent.tool import Terminate, ToolCollection - -logger = logging.getLogger(__name__) - - -class ToolCallAgent(ReActAgent): - """用于处理工具/函数调用的基本Agent类""" - - name: str = "toolcall" - description: str = "可以执行工具调用的智能体" - - available_tools: ToolCollection = ToolCollection( - Terminate(), - ) - tool_choices: str = "auto" - special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name]) - - tool_calls: list[ToolCall] = Field(default_factory=list) - _current_base64_image: str | None = None - - max_observe: int | bool | None = None - - async def think(self) -> bool: - """使用工具处理当前状态并决定下一步行动""" - messages = [] - for message in self.messages: - if isinstance(message, Message): - message = message.to_dict() - messages.append(message) - try: - # 通过工具获得响应 - select_obj = Select() - choices = [] - for available_tool in self.available_tools.to_params(): - choices.append(available_tool.get("function")) - - tool = await select_obj.generate(question=self.question, choices=choices) - if tool in self.available_tools.tool_map: - schema = self.available_tools.tool_map[tool].parameters - json_generator = JsonGenerator( - query="根据跟定的信息,获取工具参数", - conversation=messages, - schema=schema, - ) # JsonGenerator - parameters = await json_generator.generate() - - else: - raise ValueError(f"尝试调用不存在的工具: {tool}") - except Exception as e: - raise - self.tool_calls = tool_calls = [ToolCall(id=tool, function=Function(name=tool, arguments=parameters))] - content = f"选择的执行工具为:{tool}, 参数为{parameters}" - - logger.info( - f"{self.name} 选择 {len(tool_calls) if tool_calls else 0}个工具执行" - ) - if tool_calls: - logger.info( - f"准备使用的工具: {[call.function.name for call in tool_calls]}" - ) - logger.info(f"工具参数: {tool_calls[0].function.arguments}") - - try: - - assistant_msg = ( - Message.from_tool_calls(content=content, tool_calls=self.tool_calls) - if self.tool_calls - else Message.assistant_message(content) - ) - self.memory.add_message(assistant_msg) - - if not self.tool_calls: - return bool(content) - - return bool(self.tool_calls) - except Exception as e: - logger.error(f"{self.name}的思考过程遇到了问题:: {e}") - self.memory.add_message( - Message.assistant_message( - f"处理时遇到错误: {str(e)}" - ) - ) - return False - - async def act(self) -> str: - """执行工具调用并处理其结果""" - if not self.tool_calls: - # 如果没有工具调用,则返回最后的消息内容 - return self.messages[-1].content or "没有要执行的内容或命令" - - results = [] - for command in self.tool_calls: - await self.msg_queue.push_output( - self.task, - event_type="text.add", - data={"text": f"正在执行工具{command.function.name}"} - ) - - self._current_base64_image = None - - result = await self.execute_tool(command) - - if self.max_observe: - result = result[: self.max_observe] - - push_result = "" - async for chunk in self.llm.call( - messages=[{"role": "system", "content": "You are a helpful asistant."}, - {"role": "user", "content": self.user_prompt.format( - step=self.current_step, - result=result, - )}, ], streaming=False - ): - push_result += chunk - self.task.tokens.input_tokens += self.llm.input_tokens - self.task.tokens.output_tokens += self.llm.output_tokens - await self.msg_queue.push_output( - self.task, - event_type="text.add", - data={"text": push_result}, # type: ignore[arg-type] - ) - - await self.msg_queue.push_output( - self.task, - event_type="text.add", - data={"text": f"工具{command.function.name}执行完成"}, # type: ignore[arg-type] - ) - - logger.info( - f"工具'{command.function.name}'执行完成! 执行结果为: {result}" - ) - - # 将工具响应添加到内存 - tool_msg = Message.tool_message( - content=result, - tool_call_id=command.id, - name=command.function.name, - ) - self.memory.add_message(tool_msg) - results.append(result) - self.question += ( - f"\n已执行工具{command.function.name}, " - f"作用是{self.available_tools.tool_map[command.function.name].description},结果为{result}" - ) - - return "\n\n".join(results) - - async def execute_tool(self, command: ToolCall) -> str: - """执行单个工具调用""" - if not command or not command.function or not command.function.name: - return "错误:无效的命令格式" - - name = command.function.name - if name not in self.available_tools.tool_map: - return f"错误:未知工具 '{name}'" - - try: - # 解析参数 - args = command.function.arguments - # 执行工具 - logger.info(f"激活工具:'{name}'...") - result = await self.available_tools.execute(name=name, tool_input=args) - - # 执行特殊工具 - await self._handle_special_tool(name=name, result=result) - - # 格式化结果 - observation = ( - f"观察到执行的工具 `{name}`的输出:\n{str(result)}" - if result - else f"工具 `{name}` 已完成,无输出" - ) - - return observation - except json.JSONDecodeError: - error_msg = f"解析{name}的参数时出错:JSON格式无效" - logger.error( - f"{name}”的参数没有意义-无效的JSON,参数:{command.function.arguments}" - ) - return f"错误: {error_msg}" - except Exception as e: - error_msg = f"工具 '{name}' 遇到问题: {str(e)}" - logger.exception(error_msg) - return f"错误: {error_msg}" - - async def _handle_special_tool(self, name: str, result: Any, **kwargs): - """处理特殊工具的执行和状态变化""" - if not self._is_special_tool(name): - return - - if self._should_finish_execution(name=name, result=result, **kwargs): - # 将智能体状态设为finished - logger.info(f"特殊工具'{name}'已完成任务!") - self.state = AgentState.FINISHED - - @staticmethod - def _should_finish_execution(**kwargs) -> bool: - """确定工具执行是否应完成""" - return True - - def _is_special_tool(self, name: str) -> bool: - """检查工具名称是否在特殊工具列表中""" - return name.lower() in [n.lower() for n in self.special_tool_names] - - async def cleanup(self): - """清理Agent工具使用的资源。""" - logger.info(f"正在清理智能体的资源'{self.name}'...") - for tool_name, tool_instance in self.available_tools.tool_map.items(): - if hasattr(tool_instance, "cleanup") and asyncio.iscoroutinefunction( - tool_instance.cleanup - ): - try: - logger.debug(f"清理工具: {tool_name}") - await tool_instance.cleanup() - except Exception as e: - logger.error( - f"清理工具时发生错误'{tool_name}': {e}", exc_info=True - ) - logger.info(f"智能体清理完成'{self.name}'.") - - async def run(self, request: Optional[str] = None) -> str: - """运行Agent""" - try: - return await super().run(request) - finally: - await self.cleanup() diff --git a/apps/scheduler/mcp_agent/base.py b/apps/scheduler/mcp_agent/base.py new file mode 100644 index 0000000000000000000000000000000000000000..103ec60daaccbdaa401e746360354b73341d8d08 --- /dev/null +++ b/apps/scheduler/mcp_agent/base.py @@ -0,0 +1,48 @@ +from typing import Any +import json +from jsonschema import validate +import logging +from apps.llm.function import JsonGenerator +from apps.llm.reasoning import ReasoningLLM + +logger = logging.getLogger(__name__) + + +class MCPBase: + """MCP基类""" + + @staticmethod + async def get_resoning_result(prompt: str, resoning_llm: ReasoningLLM = ReasoningLLM()) -> str: + """获取推理结果""" + # 调用推理大模型 + message = [ + {"role": "system", "content": prompt}, + {"role": "user", "content": "Please provide a JSON response based on the above information and schema."}, + ] + result = "" + async for chunk in resoning_llm.call( + message, + streaming=False, + temperature=0.07, + result_only=False, + ): + result += chunk + + return result + + @staticmethod + async def _parse_result(result: str, schema: dict[str, Any]) -> str: + """解析推理结果""" + json_result = await JsonGenerator._parse_result_by_stack(result, schema) + if json_result is not None: + return json_result + json_generator = JsonGenerator( + "Please provide a JSON response based on the above information and schema.\n\n", + [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": result}, + ], + schema, + ) + json_result = await json_generator.generate() + return json_result diff --git a/apps/scheduler/mcp_agent/host.py b/apps/scheduler/mcp_agent/host.py new file mode 100644 index 0000000000000000000000000000000000000000..2f8048c6eba1bda055732935ebe3ae72c0379f49 --- /dev/null +++ b/apps/scheduler/mcp_agent/host.py @@ -0,0 +1,110 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. +"""MCP宿主""" + +import json +import logging +from typing import Any + +from jinja2 import BaseLoader +from jinja2.sandbox import SandboxedEnvironment + +from apps.llm.function import JsonGenerator +from apps.llm.reasoning import ReasoningLLM +from apps.scheduler.mcp.prompt import MEMORY_TEMPLATE +from apps.scheduler.mcp_agent.base import MCPBase +from apps.scheduler.mcp_agent.prompt import GEN_PARAMS, REPAIR_PARAMS +from apps.schemas.mcp import MCPTool +from apps.schemas.task import Task +from apps.schemas.enum_var import LanguageType + +logger = logging.getLogger(__name__) + +_env = SandboxedEnvironment( + loader=BaseLoader, + autoescape=False, + trim_blocks=True, + lstrip_blocks=True, +) + + +def tojson_filter(value): + return json.dumps(value, ensure_ascii=False, separators=(',', ':')) + + +_env.filters["tojson"] = tojson_filter + +LLM_QUERY_FIX = { + LanguageType.CHINESE: "请生成修复之后的工具参数", + LanguageType.ENGLISH: "Please generate the tool parameters after repair", +} + + +class MCPHost(MCPBase): + """MCP宿主服务""" + + @staticmethod + async def assemble_memory(task: Task) -> str: + """组装记忆""" + + return _env.from_string(MEMORY_TEMPLATE[task.language]).render( + context_list=task.context, + ) + + @staticmethod + async def _get_first_input_params( + mcp_tool: MCPTool, + goal: str, + current_goal: str, + task: Task, + resoning_llm: ReasoningLLM = ReasoningLLM(), + ) -> dict[str, Any]: + """填充工具参数""" + # 更清晰的输入·指令,这样可以调用generate + prompt = _env.from_string(GEN_PARAMS[task.language]).render( + tool_name=mcp_tool.name, + tool_description=mcp_tool.description, + goal=goal, + current_goal=current_goal, + input_schema=mcp_tool.input_schema, + background_info=await MCPHost.assemble_memory(task), + ) + result = await MCPHost.get_resoning_result(prompt, resoning_llm) + # 使用JsonGenerator解析结果 + result = await MCPHost._parse_result( + result, + mcp_tool.input_schema, + ) + return result + + @staticmethod + async def _fill_params( + mcp_tool: MCPTool, + goal: str, + current_goal: str, + current_input: dict[str, Any], + error_message: str = "", + params: dict[str, Any] = {}, + params_description: str = "", + language: LanguageType = LanguageType.CHINESE, + ) -> dict[str, Any]: + llm_query = LLM_QUERY_FIX[language] + prompt = _env.from_string(REPAIR_PARAMS[language]).render( + tool_name=mcp_tool.name, + goal=goal, + current_goal=current_goal, + tool_description=mcp_tool.description, + input_schema=mcp_tool.input_schema, + input_params=current_input, + error_message=error_message, + params=params, + params_description=params_description, + ) + json_generator = JsonGenerator( + llm_query, + [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": prompt}, + ], + mcp_tool.input_schema, + ) + return await json_generator.generate() diff --git a/apps/scheduler/mcp_agent/plan.py b/apps/scheduler/mcp_agent/plan.py new file mode 100644 index 0000000000000000000000000000000000000000..d280f1a4fdcd66b31f1a9a9fc204aa69973be513 --- /dev/null +++ b/apps/scheduler/mcp_agent/plan.py @@ -0,0 +1,472 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. +"""MCP 用户目标拆解与规划""" + +import logging +from collections.abc import AsyncGenerator +from typing import Any + +from jinja2 import BaseLoader +from jinja2.sandbox import SandboxedEnvironment + +from apps.llm.reasoning import ReasoningLLM +from apps.scheduler.mcp_agent.base import MCPBase +from apps.scheduler.mcp_agent.prompt import ( + CHANGE_ERROR_MESSAGE_TO_DESCRIPTION, + CREATE_PLAN, + EVALUATE_GOAL, + FINAL_ANSWER, + GEN_STEP, + GENERATE_FLOW_NAME, + GENERATE_FLOW_EXCUTE_RISK, + GET_MISSING_PARAMS, + GET_REPLAN_START_STEP_INDEX, + IS_PARAM_ERROR, + RECREATE_PLAN, + RISK_EVALUATE, + TOOL_EXECUTE_ERROR_TYPE_ANALYSIS, + TOOL_SKIP, +) +from apps.schemas.enum_var import LanguageType +from apps.scheduler.slot.slot import Slot +from apps.schemas.mcp import ( + GoalEvaluationResult, + FlowName, + FlowRisk, + IsParamError, + MCPPlan, + MCPTool, + RestartStepIndex, + Step, + ToolExcutionErrorType, + ToolRisk, + ToolSkip, +) +from apps.schemas.task import Task + +_env = SandboxedEnvironment( + loader=BaseLoader, + autoescape=False, + trim_blocks=True, + lstrip_blocks=True, +) +logger = logging.getLogger(__name__) + + +class MCPPlanner(MCPBase): + """MCP 用户目标拆解与规划""" + + @staticmethod + async def evaluate_goal( + goal: str, tool_list: list[MCPTool], + resoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE,) -> GoalEvaluationResult: + """评估用户目标的可行性""" + # 获取推理结果 + result = await MCPPlanner._get_reasoning_evaluation(goal, tool_list, resoning_llm, language) + + # 返回评估结果 + return await MCPPlanner._parse_evaluation_result(result) + + @staticmethod + async def _get_reasoning_evaluation( + goal, tool_list: list[MCPTool], + resoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE,) -> str: + """获取推理大模型的评估结果""" + template = _env.from_string(EVALUATE_GOAL[language]) + prompt = template.render( + goal=goal, + tools=tool_list, + ) + return await MCPPlanner.get_resoning_result(prompt, resoning_llm) + + @staticmethod + async def _parse_evaluation_result(result: str) -> GoalEvaluationResult: + """将推理结果解析为结构化数据""" + schema = GoalEvaluationResult.model_json_schema() + evaluation = await MCPPlanner._parse_result(result, schema) + # 使用GoalEvaluationResult模型解析结果 + return GoalEvaluationResult.model_validate(evaluation) + + @staticmethod + async def get_flow_name( + user_goal: str, + resoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE, + ) -> FlowName: + """获取当前流程的名称""" + + result = await MCPPlanner._get_reasoning_flow_name(user_goal, resoning_llm, language) + result = await MCPPlanner._parse_result(result, FlowName.model_json_schema()) + # 使用FlowName模型解析结果 + return FlowName.model_validate(result) + + @staticmethod + async def _get_reasoning_flow_name( + user_goal: str, + resoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE, + ) -> str: + """获取推理大模型的流程名称""" + template = _env.from_string(GENERATE_FLOW_NAME[language]) + prompt = template.render(goal=user_goal) + return await MCPPlanner.get_resoning_result(prompt, resoning_llm) + + @staticmethod + async def get_flow_excute_risk( + user_goal: str, + tools: list[MCPTool], + resoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE, + ) -> FlowRisk: + """获取当前流程的风险评估结果""" + result = await MCPPlanner._get_reasoning_flow_risk(user_goal, tools, resoning_llm, language) + result = await MCPPlanner._parse_result(result, FlowRisk.model_json_schema()) + # 使用FlowRisk模型解析结果 + return FlowRisk.model_validate(result) + + @staticmethod + async def _get_reasoning_flow_risk( + user_goal: str, + tools: list[MCPTool], + resoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE, + ) -> str: + """获取推理大模型的流程风险""" + template = _env.from_string(GENERATE_FLOW_EXCUTE_RISK[language]) + prompt = template.render( + goal=user_goal, + tools=tools, + ) + return await MCPPlanner.get_resoning_result(prompt, resoning_llm) + + @staticmethod + async def get_replan_start_step_index( + user_goal: str, + error_message: str, + current_plan: MCPPlan | None = None, + history: str = "", + reasoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE, + ) -> RestartStepIndex: + """获取重新规划的步骤索引""" + # 获取推理结果 + template = _env.from_string(GET_REPLAN_START_STEP_INDEX[language]) + prompt = template.render( + goal=user_goal, + error_message=error_message, + current_plan=current_plan.model_dump(exclude_none=True, by_alias=True), + history=history, + ) + result = await MCPPlanner.get_resoning_result(prompt, reasoning_llm) + # 解析为结构化数据 + schema = RestartStepIndex.model_json_schema() + schema["properties"]["start_index"]["maximum"] = len(current_plan.plans) - 1 + schema["properties"]["start_index"]["minimum"] = 0 + restart_index = await MCPPlanner._parse_result(result, schema) + # 使用RestartStepIndex模型解析结果 + return RestartStepIndex.model_validate(restart_index) + + @staticmethod + async def create_plan( + user_goal: str, + is_replan: bool = False, + error_message: str = "", + current_plan: MCPPlan | None = None, + tool_list: list[MCPTool] = [], + max_steps: int = 6, + reasoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE, + ) -> MCPPlan: + """规划下一步的执行流程,并输出""" + # 获取推理结果 + result = await MCPPlanner._get_reasoning_plan( + user_goal, is_replan, error_message, current_plan, tool_list, max_steps, reasoning_llm, language + ) + + # 解析为结构化数据 + return await MCPPlanner._parse_plan_result(result, max_steps) + + @staticmethod + async def _get_reasoning_plan( + user_goal: str, + is_replan: bool = False, + error_message: str = "", + current_plan: MCPPlan | None = None, + tool_list: list[MCPTool] = [], + max_steps: int = 10, + reasoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE, + ) -> str: + """获取推理大模型的结果""" + # 格式化Prompt + tool_ids = [tool.id for tool in tool_list] + if is_replan: + template = _env.from_string(RECREATE_PLAN[language]) + prompt = template.render( + current_plan=current_plan.model_dump(exclude_none=True, by_alias=True), + error_message=error_message, + goal=user_goal, + tools=tool_list, + max_num=max_steps, + ) + else: + template = _env.from_string(CREATE_PLAN[language]) + prompt = template.render( + goal=user_goal, + tools=tool_list, + max_num=max_steps, + ) + return await MCPPlanner.get_resoning_result(prompt, reasoning_llm) + + @staticmethod + async def _parse_plan_result(result: str, max_steps: int) -> MCPPlan: + """将推理结果解析为结构化数据""" + # 格式化Prompt + schema = MCPPlan.model_json_schema() + schema["properties"]["plans"]["maxItems"] = max_steps + plan = await MCPPlanner._parse_result(result, schema) + # 使用Function模型解析结果 + return MCPPlan.model_validate(plan) + + @staticmethod + async def create_next_step( + goal: str, + history: str, + tools: list[MCPTool], + reasoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE, + ) -> Step: + """创建下一步的执行步骤""" + # 获取推理结果 + template = _env.from_string(GEN_STEP[language]) + prompt = template.render(goal=goal, history=history, tools=tools) + result = await MCPPlanner.get_resoning_result(prompt, reasoning_llm) + + # 解析为结构化数据 + schema = Step.model_json_schema() + if "enum" not in schema["properties"]["tool_id"]: + schema["properties"]["tool_id"]["enum"] = [] + for tool in tools: + schema["properties"]["tool_id"]["enum"].append(tool.id) + step = await MCPPlanner._parse_result(result, schema) + logger.info("[MCPPlanner] 创建下一步的执行步骤: %s", step) + # 使用Step模型解析结果 + + step = Step.model_validate(step) + return step + + @staticmethod + async def tool_skip( + task: Task, + step_id: str, + step_name: str, + step_instruction: str, + step_content: str, + reasoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE, + ) -> ToolSkip: + """判断当前步骤是否需要跳过""" + # 获取推理结果 + template = _env.from_string(TOOL_SKIP[language]) + from apps.scheduler.mcp_agent.host import MCPHost + history = await MCPHost.assemble_memory(task) + prompt = template.render( + step_id=step_id, + step_name=step_name, + step_instruction=step_instruction, + step_content=step_content, + history=history, + goal=task.runtime.question + ) + result = await MCPPlanner.get_resoning_result(prompt, reasoning_llm) + + # 解析为结构化数据 + schema = ToolSkip.model_json_schema() + skip_result = await MCPPlanner._parse_result(result, schema) + # 使用ToolSkip模型解析结果 + return ToolSkip.model_validate(skip_result) + + @staticmethod + async def get_tool_risk( + tool: MCPTool, + input_parm: dict[str, Any], + additional_info: str = "", + resoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE, + ) -> ToolRisk: + """获取MCP工具的风险评估结果""" + # 获取推理结果 + result = await MCPPlanner._get_reasoning_risk( + tool, input_parm, additional_info, resoning_llm, language + ) + + # 返回风险评估结果 + return await MCPPlanner._parse_risk_result(result) + + @staticmethod + async def _get_reasoning_risk( + tool: MCPTool, + input_param: dict[str, Any], + additional_info: str, + resoning_llm: ReasoningLLM, + language: LanguageType = LanguageType.CHINESE, + ) -> str: + """获取推理大模型的风险评估结果""" + template = _env.from_string(RISK_EVALUATE[language]) + prompt = template.render( + tool_name=tool.name, + tool_description=tool.description, + input_param=input_param, + additional_info=additional_info, + ) + return await MCPPlanner.get_resoning_result(prompt, resoning_llm) + + @staticmethod + async def _parse_risk_result(result: str) -> ToolRisk: + """将推理结果解析为结构化数据""" + schema = ToolRisk.model_json_schema() + risk = await MCPPlanner._parse_result(result, schema) + # 使用ToolRisk模型解析结果 + return ToolRisk.model_validate(risk) + + @staticmethod + async def _get_reasoning_tool_execute_error_type( + user_goal: str, + current_plan: MCPPlan, + tool: MCPTool, + input_param: dict[str, Any], + error_message: str, + reasoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE, + ) -> str: + """获取推理大模型的工具执行错误类型""" + template = _env.from_string(TOOL_EXECUTE_ERROR_TYPE_ANALYSIS[language]) + prompt = template.render( + goal=user_goal, + current_plan=current_plan.model_dump(exclude_none=True, by_alias=True), + tool_name=tool.name, + tool_description=tool.description, + input_param=input_param, + error_message=error_message, + ) + return await MCPPlanner.get_resoning_result(prompt, reasoning_llm) + + @staticmethod + async def _parse_tool_execute_error_type_result(result: str) -> ToolExcutionErrorType: + """将推理结果解析为工具执行错误类型""" + schema = ToolExcutionErrorType.model_json_schema() + error_type = await MCPPlanner._parse_result(result, schema) + # 使用ToolExcutionErrorType模型解析结果 + return ToolExcutionErrorType.model_validate(error_type) + + @staticmethod + async def get_tool_execute_error_type( + user_goal: str, + current_plan: MCPPlan, + tool: MCPTool, + input_param: dict[str, Any], + error_message: str, + reasoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE, + ) -> ToolExcutionErrorType: + """获取MCP工具执行错误类型""" + # 获取推理结果 + result = await MCPPlanner._get_reasoning_tool_execute_error_type( + user_goal, current_plan, tool, input_param, error_message, reasoning_llm, language + ) + # 返回工具执行错误类型 + return await MCPPlanner._parse_tool_execute_error_type_result(result) + + @staticmethod + async def is_param_error( + goal: str, + history: str, + error_message: str, + tool: MCPTool, + step_description: str, + input_params: dict[str, Any], + reasoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE, + ) -> IsParamError: + """判断错误信息是否是参数错误""" + tmplate = _env.from_string(IS_PARAM_ERROR[language]) + prompt = tmplate.render( + goal=goal, + history=history, + step_id=tool.id, + step_name=tool.name, + step_description=step_description, + input_params=input_params, + error_message=error_message, + ) + result = await MCPPlanner.get_resoning_result(prompt, reasoning_llm) + # 解析为结构化数据 + schema = IsParamError.model_json_schema() + is_param_error = await MCPPlanner._parse_result(result, schema) + # 使用IsParamError模型解析结果 + return IsParamError.model_validate(is_param_error) + + @staticmethod + async def change_err_message_to_description( + error_message: str, + tool: MCPTool, + input_params: dict[str, Any], + reasoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE, + ) -> str: + """将错误信息转换为工具描述""" + template = _env.from_string(CHANGE_ERROR_MESSAGE_TO_DESCRIPTION[language]) + prompt = template.render( + error_message=error_message, + tool_name=tool.name, + tool_description=tool.description, + input_schema=tool.input_schema, + input_params=input_params, + ) + result = await MCPPlanner.get_resoning_result(prompt, reasoning_llm) + return result + + @staticmethod + async def get_missing_param( + tool: MCPTool, + input_param: dict[str, Any], + error_message: str, + reasoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE, + ) -> list[str]: + """获取缺失的参数""" + slot = Slot(schema=tool.input_schema) + template = _env.from_string(GET_MISSING_PARAMS[language]) + schema_with_null = slot.add_null_to_basic_types() + prompt = template.render( + tool_name=tool.name, + tool_description=tool.description, + input_param=input_param, + schema=schema_with_null, + error_message=error_message, + ) + result = await MCPPlanner.get_resoning_result(prompt, reasoning_llm) + # 解析为结构化数据 + input_param_with_null = await MCPPlanner._parse_result(result, schema_with_null) + return input_param_with_null + + @staticmethod + async def generate_answer( + user_goal: str, + memory: str, + resoning_llm: ReasoningLLM = ReasoningLLM(), + language: LanguageType = LanguageType.CHINESE, + ) -> AsyncGenerator[str, None]: + """生成最终回答""" + template = _env.from_string(FINAL_ANSWER[language]) + prompt = template.render( + memory=memory, + goal=user_goal, + ) + async for chunk in resoning_llm.call( + [{"role": "user", "content": prompt}], + streaming=True, + temperature=0.07, + ): + yield chunk diff --git a/apps/scheduler/mcp_agent/prompt.py b/apps/scheduler/mcp_agent/prompt.py new file mode 100644 index 0000000000000000000000000000000000000000..d030c6f5aa9d06a6ea877d257f5b334c3f9c392c --- /dev/null +++ b/apps/scheduler/mcp_agent/prompt.py @@ -0,0 +1,2471 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. +"""MCP相关的大模型Prompt""" +from apps.schemas.enum_var import LanguageType +from textwrap import dedent + +MCP_SELECT: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个乐于助人的智能助手。 + 你的任务是:根据当前目标,选择最合适的MCP Server。 + + ## 选择MCP Server时的注意事项: + + 1. 确保充分理解当前目标,选择最合适的MCP Server。 + 2. 请在给定的MCP Server列表中选择,不要自己生成MCP Server。 + 3. 请先给出你选择的理由,再给出你的选择。 + 4. 当前目标将在下面给出,MCP Server列表也会在下面给出。 + 请将你的思考过程放在"思考过程"部分,将你的选择放在"选择结果"部分。 + 5. 选择必须是JSON格式,严格按照下面的模板,不要输出任何其他内容: + + ```json + { + "mcp": "你选择的MCP Server的名称" + } + ``` + + 6. 下面的示例仅供参考,不要将示例中的内容作为选择MCP Server的依据。 + + ## 示例 + + ### 目标 + + 我需要一个MCP Server来完成一个任务。 + + ### MCP Server列表 + + - **mcp_1**: "MCP Server 1";MCP Server 1的描述 + - **mcp_2**: "MCP Server 2";MCP Server 2的描述 + + ### 请一步一步思考: + + 因为当前目标需要一个MCP Server来完成一个任务,所以选择mcp_1。 + + ### 选择结果 + + ```json + { + "mcp": "mcp_1" + } + ``` + + ## 现在开始! + + ### 目标 + + {{goal}} + + ### MCP Server列表 + + {% for mcp in mcp_list %} + - **{{mcp.id}}**: "{{mcp.name}}";{{mcp.description}} + {% endfor %} + + ### 请一步一步思考: + + """ + ), + LanguageType.ENGLISH: dedent( + r""" + You are an intelligent assistant who is willing to help. + Your task is: according to the current goal, select the most suitable MCP Server. + + ## Notes when selecting MCP Server: + + 1. Make sure to fully understand the current goal and select the most suitable MCP Server. + 2. Please select from the given MCP Server list, do not generate MCP Server by yourself. + 3. Please first give your reason for selection, then give your selection. + 4. The current goal will be given below, and the MCP Server list will also be given below. + Please put your thinking process in the "Thinking Process" part, and put your selection in the "Selection Result" part. + 5. The selection must be in JSON format, strictly follow the template below, do not output any other content: + + ```json + { + "mcp": "The name of the MCP Server you selected" + } + ``` + 6. The example below is for reference only, do not use the content in the example as the basis for selecting MCP Server. + + ## Example + + ### Goal + + I need an MCP Server to complete a task. + + ### MCP Server List + + - **mcp_1**: "MCP Server 1";Description of MCP Server 1 + - **mcp_2**: "MCP Server 2";Description of MCP Server 2 + + ### Please think step by step: + + Because the current goal needs an MCP Server to complete a task, so select mcp_1. + + ### Selection Result + + ```json + { + "mcp": "mcp_1" + } + ``` + + ## Now start! + ### Goal + + {{goal}} + + ### MCP Server List + + {% for mcp in mcp_list %} + - **{{mcp.id}}**: "{{mcp.name}}";{{mcp.description}} + {% endfor %} + + ### Please think step by step: + """ + ), +} +TOOL_SELECT: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个乐于助人的智能助手。 + 你的任务是:根据当前目标,附加信息,选择最合适的MCP工具。 + ## 选择MCP工具时的注意事项: + 1. 确保充分理解当前目标,选择实现目标所需的MCP工具。 + 2. 请在给定的MCP工具列表中选择,不要自己生成MCP工具。 + 3. 可以选择一些辅助工具,但必须确保这些工具与当前目标相关。 + 4. 注意,返回的工具ID必须是MCP工具的ID,而不是名称。 + 5. 不要选择不存在的工具。 + 必须按照以下格式生成选择结果,不要输出任何其他内容: + ```json + { + "tool_ids": ["工具ID1", "工具ID2", ...] + } + ``` + + # 示例 + ## 目标 + 调优mysql性能 + ## MCP工具列表 + + - mcp_tool_1 MySQL链接池工具;用于优化MySQL链接池 + - mcp_tool_2 MySQL性能调优工具;用于分析MySQL性能瓶颈 + - mcp_tool_3 MySQL查询优化工具;用于优化MySQL查询语句 + - mcp_tool_4 MySQL索引优化工具;用于优化MySQL索引 + - mcp_tool_5 文件存储工具;用于存储文件 + - mcp_tool_6 mongoDB工具;用于操作MongoDB数据库 + + ## 附加信息 + 1. 当前MySQL数据库的版本是8.0.26 + 2. 当前MySQL数据库的配置文件路径是/etc/my.cnf,并含有以下配置项 + ```json + { + "max_connections": 1000, + "innodb_buffer_pool_size": "1G", + "query_cache_size": "64M" + } + ##输出 + ```json + { + "tool_ids": ["mcp_tool_1", "mcp_tool_2", "mcp_tool_3", "mcp_tool_4"] + } + ``` + # 现在开始! + ## 目标 + {{goal}} + ## MCP工具列表 + + {% for tool in tools %} + - {{tool.id}} {{tool.name}};{{tool.description}} + {% endfor %} + + ## 附加信息 + {{additional_info}} + # 输出 + """ + ), + LanguageType.ENGLISH: dedent( + r""" + You are an intelligent assistant who is willing to help. + Your task is: according to the current goal, additional information, select the most suitable MCP tool. + ## Notes when selecting MCP tool: + 1. Make sure to fully understand the current goal and select the MCP tool that can achieve the goal. + 2. Please select from the given MCP tool list, do not generate MCP tool by yourself. + 3. You can select some auxiliary tools, but you must ensure that these tools are related to the current goal. + 4. Note that the returned tool ID must be the ID of the MCP tool, not the name. + 5. Do not select non-existent tools. + Must generate the selection result in the following format, do not output any other content: + ```json + { + "tool_ids": ["tool_id1", "tool_id2", ...] + } + ``` + + # Example + ## Goal + Optimize MySQL performance + ## MCP Tool List + + - mcp_tool_1 MySQL connection pool tool;used to optimize MySQL connection pool + - mcp_tool_2 MySQL performance tuning tool;used to analyze MySQL performance bottlenecks + - mcp_tool_3 MySQL query optimization tool;used to optimize MySQL query statements + - mcp_tool_4 MySQL index optimization tool;used to optimize MySQL index + - mcp_tool_5 File storage tool;used to store files + - mcp_tool_6 MongoDB tool;used to operate MongoDB database + + ## Additional Information + 1. The current MySQL database version is 8.0.26 + 2. The current MySQL database configuration file path is /etc/my.cnf, and contains the following configuration items + ```json + { + "max_connections": 1000, + "innodb_buffer_pool_size": "1G", + "query_cache_size": "64M" + } + ## Output + ```json + { + "tool_ids": ["mcp_tool_1", "mcp_tool_2", "mcp_tool_3", "mcp_tool_4"] + } + ``` + # Now start! + ## Goal + {{goal}} + ## MCP Tool List + + {% for tool in tools %} + - {{tool.id}} {{tool.name}};{{tool.description}} + {% endfor %} + + ## Additional Information + {{additional_info}} + # Output + """ + ), +} +EVALUATE_GOAL: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个计划评估器。 + 请根据用户的目标和当前的工具集合以及一些附加信息,判断基于当前的工具集合,是否能够完成用户的目标。 + 如果能够完成,请返回`true`,否则返回`false`。 + 推理过程必须清晰明了,能够让人理解你的判断依据。 + 必须按照以下格式回答: + ```json + { + "can_complete": true/false, + "resoning": "你的推理过程" + } + ``` + + # 样例 + # 目标 + 我需要扫描当前mysql数据库,分析性能瓶颈, 并调优 + + # 工具集合 + 你可以访问并使用一些工具,这些工具将在 XML标签中给出。 + + - mysql_analyzer 分析MySQL数据库性能 + - performance_tuner 调优数据库性能 + - Final 结束步骤,当执行到这一步时,表示计划执行结束,所得到的结果将作为最终结果。 + + + # 附加信息 + 1. 当前MySQL数据库的版本是8.0.26 + 2. 当前MySQL数据库的配置文件路径是/etc/my.cnf + + ## + ```json + { + "can_complete": true, + "resoning": "当前的工具集合中包含mysql_analyzer和performance_tuner,能够完成对MySQL数据库的性能分析和调优,因此可以完成用户的目标。" + } + ``` + + # 目标 + {{goal}} + + # 工具集合 + + {% for tool in tools %} + - {{tool.id}} {{tool.name}};{{tool.description}} + {% endfor %} + + + # 附加信息 + {{additional_info}} + + """ + ), + LanguageType.ENGLISH: dedent( + r""" + You are a plan evaluator. + Please judge whether the current tool set can complete the user's goal based on the user's goal and the current tool set and some additional information. + If it can be completed, return `true`, otherwise return `false`. + The reasoning process must be clear and understandable, so that people can understand your judgment basis. + Must answer in the following format: + ```json + { + "can_complete": true/false, + "resoning": "Your reasoning process" + } + ``` + + # Example + # Goal + I need to scan the current MySQL database, analyze performance bottlenecks, and optimize it. + + # Tool Set + You can access and use some tools, which will be given in the XML tag. + + - mysql_analyzer Analyze MySQL database performance + - performance_tuner Tune database performance + - Final End step, when executing this step, it means that the plan execution is over, and the result obtained will be the final result. + + + # Additional Information + 1. The current MySQL database version is 8.0.26 + 2. The current MySQL database configuration file path is /etc/my.cnf + + ## + ```json + { + "can_complete": true, + "resoning": "The current tool set contains mysql_analyzer and performance_tuner, which can complete the performance analysis and optimization of MySQL database, so the user's goal can be completed." + } + ``` + + # Goal + {{goal}} + + # Tool Set + + {% for tool in tools %} + - {{tool.id}} {{tool.name}};{{tool.description}} + {% endfor %} + + + # Additional Information + {{additional_info}} + + """ + ), +} +GENERATE_FLOW_NAME: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个智能助手,你的任务是根据用户的目标,生成一个合适的流程名称。 + + # 生成流程名称时的注意事项: + 1. 流程名称应该简洁明了,能够准确表达达成用户目标的过程。 + 2. 流程名称应该包含关键的操作或步骤,例如“扫描”、“分析”、“调优”等。 + 3. 流程名称应该避免使用过于复杂或专业的术语,以便用户能够理解。 + 4. 流程名称应该尽量简短,小于20个字或者单词。 + 5. 只输出流程名称,不要输出其他内容。 + # 样例 + # 目标 + 我需要扫描当前mysql数据库,分析性能瓶颈, 并调优 + # 输出 + { + "flow_name": "扫描MySQL数据库并分析性能瓶颈,进行调优" + } + # 现在开始生成流程名称: + # 目标 + {{goal}} + # 输出 + """ + ), + LanguageType.ENGLISH: dedent( + r""" + You are an intelligent assistant, your task is to generate a suitable flow name based on the user's goal. + + # Notes when generating flow names: + 1. The flow name should be concise and clear, accurately expressing the process of achieving the user's goal. + 2. The flow name should include key operations or steps, such as "scan", "analyze", "tune", etc. + 3. The flow name should avoid using overly complex or professional terms, so that users can understand. + 4. The flow name should be as short as possible, less than 20 characters or words. + 5. Only output the flow name, do not output other content. + # Example + # Goal + I need to scan the current MySQL database, analyze performance bottlenecks, and optimize it. + # Output + { + "flow_name": "Scan MySQL database and analyze performance bottlenecks, and optimize it." + } + # Now start generating the flow name: + # Goal + {{goal}} + # Output + """ + ), +} +GENERATE_FLOW_EXCUTE_RISK: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个智能助手,你的任务是根据用户的目标和当前的工具集合,评估当前流程的风险。 + + # 样例 + # 目标 + 我需要扫描当前mysql数据库,分析性能瓶颈, 并调优 + # 工具集合 + 你可以访问并使用一些工具,这些工具将在 XML标签中给出。 + + - mysql_analyzer 分析MySQL数据库性能 + - performance_tuner 调优数据库性能 + + # 输出 + { + "risk": "high", + "reason": "当前目标实现带来的风险较高,因为需要通过performance_tuner工具对MySQL数据库进行调优,而该工具可能会对数据库的性能和稳定性产生较大的影响,因此风险评估为高。" + } + # 现在开始评估当前流程的风险: + # 目标 + {{goal}} + # 工具集合 + + {% for tool in tools %} + - {{tool.id}} {{tool.name}};{{tool.description}} + {% endfor %} + + # 输出 + """ + ), + LanguageType.ENGLISH: dedent( + r""" + You are an intelligent assistant, your task is to evaluate the risk of the current process based on the user's goal and the current tool set. + # Example + # Goal + I need to scan the current MySQL database, analyze performance bottlenecks, and optimize it. + # Tool Set + You can access and use some tools, which will be given in the XML tag. + + - mysql_analyzer Analyze MySQL database performance + - performance_tuner Tune database performance + + # Output + { + "risk": "high", + "reason": "The risk brought by the realization of the current goal is relatively high, because it is necessary to tune the MySQL database through the performance_tuner tool, which may have a greater impact on the performance and stability of the database. Therefore, the risk assessment is high." + } + # Now start evaluating the risk of the current process: + # Goal + {{goal}} + # Tool Set + + {% for tool in tools %} + - {{tool.id}} {{tool.name}};{{tool.description}} + {% endfor %} + + # Output + """ + ) +} +GET_REPLAN_START_STEP_INDEX: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个智能助手,你的任务是根据用户的目标、报错信息和当前计划和历史,获取重新规划的步骤起始索引。 + + # 样例 + # 目标 + 我需要扫描当前mysql数据库,分析性能瓶颈, 并调优 + # 报错信息 + 执行端口扫描命令时,出现了错误:`- bash: curl: command not found`。 + # 当前计划 + ```json + { + "plans": [ + { + "step_id": "step_1", + "content": "生成端口扫描命令", + "tool": "command_generator", + "instruction": "生成端口扫描命令:扫描" + }, + { + "step_id": "step_2", + "content": "在执行Result[0]生成的命令", + "tool": "command_executor", + "instruction": "执行端口扫描命令" + } + ] + } + # 历史 + [ + { + id: "0", + task_id: "task_1", + flow_id: "flow_1", + flow_name: "MYSQL性能调优", + flow_status: "RUNNING", + step_id: "step_1", + step_name: "生成端口扫描命令", + step_description: "生成端口扫描命令:扫描当前MySQL数据库的端口", + step_status: "FAILED", + input_data: { + "command": "nmap -p 3306 + "target": "localhost" + }, + output_data: { + "error": "- bash: curl: command not found" + } + } + ] + # 输出 + { + "start_index": 0, + "reasoning": "当前计划的第一步就失败了,报错信息显示curl命令未找到,可能是因为没有安装curl工具,因此需要从第一步重新规划。" + } + # 现在开始获取重新规划的步骤起始索引: + # 目标 + {{goal}} + # 报错信息 + {{error_message}} + # 当前计划 + {{current_plan}} + # 历史 + {{history}} + # 输出 + """ + ), + LanguageType.ENGLISH: dedent( + r""" + You are an intelligent assistant, your task is to get the starting index of the step to be replanned based on the user's goal, error message, and current plan and history. + + # Example + # Goal + I need to scan the current MySQL database, analyze performance bottlenecks, and optimize it. + # Error message + An error occurred while executing the port scan command: `- bash: curl: command not found`. + # Current plan + ```json + { + "plans": [ + { + "step_id": "step_1", + "content": "Generate port scan command", + "tool": "command_generator", + "instruction": "Generate port scan command: scan" + }, + { + "step_id": "step_2", + "content": "Execute the command generated by Result[0]", + "tool": "command_executor", + "instruction": "Execute port scan command" + } + ] + } + # History + [ + { + id: "0", + task_id: "task_1", + flow_id: "flow_1", + flow_name: "MYSQL Performance Tuning", + flow_status: "RUNNING", + step_id: "step_1", + step_name: "Generate port scan command", + step_description: "Generate port scan command: scan the port of the current MySQL database", + step_status: "FAILED", + input_data: { + "command": "nmap -p 3306 + "target": "localhost" + }, + output_data: { + "error": "- bash: curl: command not found" + } + } + ] + # Output + { + "start_index": 0, + "reasoning": "The first step of the current plan failed, the error message shows that the curl command was not found, which may be because the curl tool was not installed. Therefore, it is necessary to replan from the first step." + } + # Now start getting the starting index of the step to be replanned: + # Goal + {{goal}} + # Error message + {{error_message}} + # Current plan + {{current_plan}} + # History + {{history}} + # Output + """ + ), +} +CREATE_PLAN: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个计划生成器。 + 请分析用户的目标,并生成一个计划。你后续将根据这个计划,一步一步地完成用户的目标。 + + # 一个好的计划应该: + + 1. 能够成功完成用户的目标 + 2. 计划中的每一个步骤必须且只能使用一个工具。 + 3. 计划中的步骤必须具有清晰和逻辑的步骤,没有冗余或不必要的步骤。 + 4. 计划中的最后一步必须是Final工具,以确保计划执行结束。 + + # 生成计划时的注意事项: + + - 每一条计划包含3个部分: + - 计划内容:描述单个计划步骤的大致内容 + - 工具ID:必须从下文的工具列表中选择 + - 工具指令:改写用户的目标,使其更符合工具的输入要求 + - 必须按照如下格式生成计划,不要输出任何额外数据: + + ```json + { + "plans": [ + { + "content": "计划内容", + "tool": "工具ID", + "instruction": "工具指令" + } + ] + } + ``` + + - 在生成计划之前,请一步一步思考,解析用户的目标,并指导你接下来的生成。 +思考过程应放置在 XML标签中。 + - 计划内容中,可以使用"Result[]"来引用之前计划步骤的结果。例如:"Result[3]"表示引用第三条计划执行后的结果。 + - 计划不得多于{{max_num}}条,且每条计划内容应少于150字。 + + # 工具 + + 你可以访问并使用一些工具,这些工具将在 XML标签中给出。 + + + {% for tool in tools %} + - {{tool.id}} {{tool.name}};{{tool.description}} + {% endfor %} + + + # 样例 + + # 目标 + + 在后台运行一个新的alpine: latest容器,将主机/root文件夹挂载至/data,并执行top命令。 + + # 计划 + + + 1. 这个目标需要使用Docker来完成, 首先需要选择合适的MCP Server + 2. 目标可以拆解为以下几个部分: + - 运行alpine: latest容器 + - 挂载主机目录 + - 在后台运行 + - 执行top命令 + 3. 需要先选择MCP Server, 然后生成Docker命令, 最后执行命令 + + ```json + { + "plans": [ + { + "content": "选择一个支持Docker的MCP Server", + "tool": "mcp_selector", + "instruction": "需要一个支持Docker容器运行的MCP Server" + }, + { + "content": "使用Result[0]中选择的MCP Server,生成Docker命令", + "tool": "command_generator", + "instruction": "生成Docker命令:在后台运行alpine:latest容器,挂载/root到/data,执行top命令" + }, + { + "content": "在Result[0]的MCP Server上执行Result[1]生成的命令", + "tool": "command_executor", + "instruction": "执行Docker命令" + }, + { + "content": "任务执行完成,容器已在后台运行,结果为Result[2]", + "tool": "Final", + "instruction": "" + } + ] + } + ``` + + # 现在开始生成计划: + + # 目标 + + {{goal}} + + # 计划 +""" + ), + LanguageType.ENGLISH: dedent( + r""" + You are a plan builder. + Analyze the user's goals and generate a plan. You will then follow this plan, step by step, to achieve the user's goals. + + # A good plan should: + + 1. Be able to successfully achieve the user's goals. + 2. Each step in the plan must use only one tool. + 3. The steps in the plan must have clear and logical progression, without redundant or unnecessary steps. + 4. The last step in the plan must be the Final tool to ensure the plan execution is complete. + + # Things to note when generating a plan: + + - Each plan contains 3 parts: + - Plan content: describes the general content of a single plan step + - Tool ID: must be selected from the tool list below + - Tool instructions: rewrite the user's goal to make it more consistent with the tool's input requirements + - The plan must be generated in the following format, and no additional data should be output: + + ```json + { + "plans": [ + { + "content": "Plan content", + "tool": "Tool ID", + "instruction": "Tool instruction" + } + ] + } + ``` + + - Before generating a plan, please think step by step, analyze the user's goals, and guide your subsequent generation. + The thinking process should be placed in the XML tags. + - In the plan content, you can use "Result[]" to reference the results of the previous plan step. For example: "Result[3]" refers to the result after the third plan is executed. + - There should be no more than {{max_num}} plans, and each plan content should be less than 150 words. + + # Tools + + You can access and use a number of tools, listed within the XML tags. + + + {% for tool in tools %} + - {{tool.id}} {{tool.name}}; {{tool.description}} + {% endfor %} + + + # Example + + # Goal + + Run a new alpine:latest container in the background, mount the host's /root folder to /data, and execute the top command. + + # Plan + + + 1. This goal needs to be completed using Docker. First, you need to select a suitable MCP Server. + 2. The goal can be broken down into the following parts: + - Run the alpine:latest container + - Mount the host directory + - Run in the background + - Execute the top command + 3. You need to select the MCP Server first, then generate the Docker command, and finally execute the command. + + ```json + { + "plans": [ + { + "content": "Select an MCP Server that supports Docker", + "tool": "mcp_selector", + "instruction": "You need an MCP Server that supports running Docker containers" + }, + { + "content": "Use the MCP Server selected in Result[0] to generate Docker commands", + "tool": "command_generator", + "instruction": "Generate Docker commands: run the alpine:latest container in the background, mount /root to /data, and execute the top command" + }, + { + "content": "In the MCP of Result[0] Execute the command generated by Result[1] on the server", + "tool": "command_executor", + "instruction": "Execute Docker command" + }, + { + "content": "Task execution completed, the container is running in the background, the result is Result[2]", + "tool": "Final", + "instruction": "" + } + ] + } + ``` + + # Now start generating the plan: + + # Goal + + {{goal}} + + # Plan + """ + ), +} +RECREATE_PLAN: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个计划重建器。 + 请根据用户的目标、当前计划和运行报错,重新生成一个计划。 + + # 一个好的计划应该: + + 1. 能够成功完成用户的目标 + 2. 计划中的每一个步骤必须且只能使用一个工具。 + 3. 计划中的步骤必须具有清晰和逻辑的步骤,没有冗余或不必要的步骤。 + 4. 你的计划必须避免之前的错误,并且能够成功执行。 + 5. 计划中的最后一步必须是Final工具,以确保计划执行结束。 + + # 生成计划时的注意事项: + + - 每一条计划包含3个部分: + - 计划内容:描述单个计划步骤的大致内容 + - 工具ID:必须从下文的工具列表中选择 + - 工具指令:改写用户的目标,使其更符合工具的输入要求 + - 必须按照如下格式生成计划,不要输出任何额外数据: + + ```json + { + "plans": [ + { + "content": "计划内容", + "tool": "工具ID", + "instruction": "工具指令" + } + ] + } + ``` + + - 在生成计划之前,请一步一步思考,解析用户的目标,并指导你接下来的生成。 +思考过程应放置在 XML标签中。 + - 计划内容中,可以使用"Result[]"来引用之前计划步骤的结果。例如:"Result[3]"表示引用第三条计划执行后的结果。 + - 计划不得多于{{max_num}}条,且每条计划内容应少于150字。 + + # 样例 + + # 目标 + + 请帮我扫描一下192.168.1.1的这台机器的端口,看看有哪些端口开放。 + # 工具 + 你可以访问并使用一些工具,这些工具将在 XML标签中给出。 + + - command_generator 生成命令行指令 + - tool_selector 选择合适的工具 + - command_executor 执行命令行指令 + - Final 结束步骤,当执行到这一步时,表示计划执行结束,所得到的结果将作为最终结果。 + # 当前计划 + ```json + { + "plans": [ + { + "content": "生成端口扫描命令", + "tool": "command_generator", + "instruction": "生成端口扫描命令:扫描192.168.1.1的开放端口" + }, + { + "content": "在执行第一步生成的命令", + "tool": "command_executor", + "instruction": "执行端口扫描命令" + }, + { + "content": "任务执行完成", + "tool": "Final", + "instruction": "" + } + ] + } + ``` + # 运行报错 + 执行端口扫描命令时,出现了错误:`- bash: curl: command not found`。 + # 重新生成的计划 + + + 1. 这个目标需要使用网络扫描工具来完成, 首先需要选择合适的网络扫描工具 + 2. 目标可以拆解为以下几个部分: + - 生成端口扫描命令 + - 执行端口扫描命令 + 3.但是在执行端口扫描命令时,出现了错误:`- bash: curl: command not found`。 + 4.我将计划调整为: + - 需要先生成一个命令,查看当前机器支持哪些网络扫描工具 + - 执行这个命令,查看当前机器支持哪些网络扫描工具 + - 然后从中选择一个网络扫描工具 + - 基于选择的网络扫描工具,生成端口扫描命令 + - 执行端口扫描命令 + ```json + { + "plans": [ + { + "content": "需要生成一条命令查看当前机器支持哪些网络扫描工具", + "tool": "command_generator", + "instruction": "选择一个前机器支持哪些网络扫描工具" + }, + { + "content": "执行第一步中生成的命令,查看当前机器支持哪些网络扫描工具", + "tool": "command_executor", + "instruction": "执行第一步中生成的命令" + }, + { + "content": "从第二步执行结果中选择一个网络扫描工具,生成端口扫描命令", + "tool": "tool_selector", + "instruction": "选择一个网络扫描工具,生成端口扫描命令" + }, + { + "content": "基于第三步中选择的网络扫描工具,生成端口扫描命令", + "tool": "command_generator", + "instruction": "生成端口扫描命令:扫描192.168.1.1的开放端口" + }, + { + "content": "执行第四步中生成的端口扫描命令", + "tool": "command_executor", + "instruction": "执行端口扫描命令" + }, + { + "content": "任务执行完成", + "tool": "Final", + "instruction": "" + } + ] + } + ``` + + # 现在开始重新生成计划: + + # 目标 + + {{goal}} + + # 工具 + + 你可以访问并使用一些工具,这些工具将在 XML标签中给出。 + + + {% for tool in tools %} + - {{tool.id}} {{tool.name}};{{tool.description}} + {% endfor %} + + + # 当前计划 + {{current_plan}} + + # 运行报错 + {{error_message}} + + # 重新生成的计划 +""" + ), + LanguageType.ENGLISH: dedent( + r""" + You are a plan rebuilder. + Please regenerate a plan based on the user's goals, current plan, and runtime errors. + + # A good plan should: + + 1. Successfully achieve the user's goals. + 2. Each step in the plan must use only one tool. + 3. The steps in the plan must have clear and logical progression, without redundant or unnecessary steps. + 4. Your plan must avoid previous errors and be able to be successfully executed. + 5. The last step in the plan must be the Final tool to ensure that the plan is complete. + + # Things to note when generating a plan: + + - Each plan contains 3 parts: + - Plan content: describes the general content of a single plan step + - Tool ID: must be selected from the tool list below + - Tool instructions: rewrite the user's goal to make it more consistent with the tool's input requirements + - The plan must be generated in the following format, and no additional data should be output: + + ```json + { + "plans": [ + { + "content": "Plan content", + "tool": "Tool ID", + "instruction": "Tool instruction" + } + ] + } + ``` + + - Before generating a plan, please think step by step, analyze the user's goals, and guide your subsequent generation. + The thinking process should be placed in the XML tags. + - In the plan content, you can use "Result[]" to reference the results of the previous plan step. For example: "Result[3]" refers to the result after the third plan is executed. + - There should be no more than {{max_num}} plans, and each plan content should be less than 150 words. + + # Objective + + Please scan the ports of the machine at 192.168.1.1 to see which ports are open. + # Tools + You can access and use a number of tools, which are listed within the XML tags. + + - command_generator Generates command line instructions + - tool_selector Selects the appropriate tool + - command_executor Executes command line instructions + - Final This is the final step. When this step is reached, the plan execution ends, and the result is used as the final result. + # Current plan + ```json + { + "plans": [ + { + "content": "Generate port scan command", + "tool": "command_generator", + "instruction": "Generate port scan command: Scan open ports on 192.168.1.1" + }, + { + "content": "Execute the command generated in the first step", + "tool": "command_executor", + "instruction": "Execute the port scan command" + }, + { + "content": "Task execution completed", + "tool": "Final", + "instruction": "" + } + ] + } + ``` + # Run error + When executing the port scan command, an error occurred: `- bash: curl: command not found`. + # Regenerate the plan + + + 1. This goal requires a network scanning tool. First, select the appropriate network scanning tool. + 2. The goal can be broken down into the following parts: + - Generate the port scanning command + - Execute the port scanning command + 3. However, when executing the port scanning command, an error occurred: `- bash: curl: command not found`. + 4. I adjusted the plan to: + - Generate a command to check which network scanning tools the current machine supports + - Execute this command to check which network scanning tools the current machine supports + - Then select a network scanning tool + - Generate a port scanning command based on the selected network scanning tool + - Execute the port scanning command + ```json + { + "plans": [ + { + "content": "You need to generate a command to check which network scanning tools the current machine supports", + "tool": "command_generator", + "instruction": "Select which network scanning tools the current machine supports" + + }, + { + "content": "Execute the command generated in the first step to check which network scanning tools the current machine supports", + "tool": "command_executor", + "instruction": "Execute the command generated in the first step" + + }, + { + "content": "Select a network scanning tool from the results of the second step and generate a port scanning command", + "tool": "tool_selector", + "instruction": "Select a network scanning tool and generate a port scanning command" + + }, + { + "content": "Generate a port scan command based on the network scanning tool selected in step 3", + "tool": "command_generator", + "instruction": "Generate a port scan command: Scan the open ports on 192.168.1.1" + }, + { + "content": "Execute the port scan command generated in step 4", + "tool": "command_executor", + "instruction": "Execute the port scan command" + }, + { + "content": "Task execution completed", + "tool": "Final", + "instruction": "" + } + ] + } + ``` + + # Now start regenerating the plan: + + # Goal + + {{goal}} + + # Tools + + You can access and use a number of tools, which are listed within the XML tags. + + + {% for tool in tools %} + - {{tool.id}} {{tool.name}}; {{tool.description}} + {% endfor %} + + + # Current plan + {{current_plan}} + + # Run error + {{error_message}} + + # Regenerated plan + """ + ), +} +GEN_STEP: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个计划生成器。 + 请根据用户的目标、当前计划和历史,生成一个新的步骤。 + + # 一个好的计划步骤应该: + 1.使用最适合的工具来完成当前步骤。 + 2.能够基于当前的计划和历史,完成阶段性的任务。 + 3.不要选择不存在的工具。 + 4.如果你认为当前已经达成了用户的目标,可以直接返回Final工具,表示计划执行结束。 + 5.tool_id中的工具ID必须是当前工具集合中存在的工具ID,而不是工具的名称。 + 6.工具在 XML标签中给出,工具的id在 下的 XML标签中给出。 + + # 样例 1 + # 目标 + 我需要扫描当前mysql数据库,分析性能瓶颈, 并调优,我的ip是192.168.1.1,数据库端口是3306,用户名是root,密码是password + # 历史记录 + 第1步:生成端口扫描命令 + - 调用工具 `command_generator`,并提供参数 `帮我生成一个mysql端口扫描命令` + - 执行状态:成功 + - 得到数据:`{"command": "nmap -sS -p--open 192.168.1.1"}` + 第2步:执行端口扫描命令 + - 调用工具 `command_executor`,并提供参数 `{"command": "nmap -sS -p--open 192.168.1.1"}` + - 执行状态:成功 + - 得到数据:`{"result": "success"}` + # 工具 + + - DuDlgP mysql分析工具,用于分析数据库性能/description> + - ADsxSX 文件存储工具,用于存储文件 + - ySASDZ mongoDB工具,用于操作MongoDB数据库 + - Final 结束步骤,当执行到这一步时,表示计划执行结束,所得到的结果将作为最终结果。 + + # 输出 + ```json + { + "tool_id": "DuDlgP", + "description": "扫描ip为192.168.1.1的MySQL数据库,端口为3306,用户名为root,密码为password的数据库性能", + } + ``` + # 样例二 + # 目标 + 计划从杭州到北京的旅游计划 + # 历史记录 + 第1步:将杭州转换为经纬度坐标 + - 调用工具 `经纬度工具`,并提供参数 `{"city_from": "杭州", "address": "西湖"}` + - 执行状态:成功 + - 得到数据:`{"location": "123.456, 78.901"}` + 第2步:查询杭州的天气 + - 调用工具 `天气查询工具`,并提供参数 `{"location": "123.456, 78.901"}` + - 执行状态:成功 + - 得到数据:`{"weather": "晴", "temperature": "25°C"}` + 第3步:将北京转换为经纬度坐标 + - 调用工具 `经纬度工具`,并提供参数 `{"city_from": "北京", "address": "天安门"}` + - 执行状态:成功 + - 得到数据:`{"location": "123.456, 78.901"}` + 第4步:查询北京的天气 + - 调用工具 `天气查询工具`,并提供参数 `{"location": "123.456, 78.901"}` + - 执行状态:成功 + - 得到数据:`{"weather": "晴", "temperature": "25°C"}` + # 工具 + + - cSAads 经纬度工具,将详细的结构化地址转换为经纬度坐标。支持对地标性名胜景区、建筑物名称解析为经纬度坐标 + - sScseS 天气查询工具,用于查询天气信息 + - pcSEsx 路径规划工具,根据用户起终点经纬度坐标规划综合各类公共(火车、公交、地铁)交通方式的通勤方案,并且返回通勤方案的数据,跨城场景下必须传起点城市与终点城市 + - Final Final;结束步骤,当执行到这一步时,表示计划执行结束,所得到的结果将作为最终结果。 + + # 输出 + ```json + { + "tool_id": "pcSEsx", + "description": "规划从杭州到北京的综合公共交通方式的通勤方案" + } + ``` + # 现在开始生成步骤: + # 目标 + {{goal}} + # 历史记录 + {{history}} + # 工具 + + {% for tool in tools %} + - {{tool.id}} {{tool.description}} + {% endfor %} + +""" + ), + LanguageType.ENGLISH: dedent( + r""" + You are a plan generator. + Please generate a new step based on the user's goal, current plan, and history. + + # A good plan step should: + 1. Use the most appropriate tool for the current step. + 2. Complete the tasks at each stage based on the current plan and history. + 3. Do not select a tool that does not exist. + 4. If you believe the user's goal has been achieved, return to the Final tool to complete the plan execution. + + # Example 1 + # Objective + I need to scan the current MySQL database, analyze performance bottlenecks, and optimize it. My IP address is 192.168.1.1, the database port is 3306, my username is root, and my password is password. + # History + Step 1: Generate a port scan command + - Call the `command_generator` tool and provide the `help me generate a MySQL port scan command` parameter. + - Execution status: Success. + - Result: `{"command": "nmap -sS -p --open 192.168.1.1"}` + Step 2: Execute the port scan command + - Call the `command_executor` tool and provide the `{"command": "nmap -sS -p --open 192.168.1.1"}` parameter. + - Execution status: Success. + - Result: `{"result": "success"}` + # Tools + + - mcp_tool_1 mysql_analyzer; used for analyzing database performance. + - mcp_tool_2 File storage tool; used for storing files. + - mcp_tool_3 MongoDB tool; used for operating MongoDB databases. + - Final This step completes the plan execution and the result is used as the final result. + + # Output + ```json + { + "tool_id": "mcp_tool_1", + "description": "Scan the database performance of the MySQL database with IP address 192.168.1.1, port 3306, username root, and password password", + } + ``` + # Example 2 + # Objective + Plan a trip from Hangzhou to Beijing + # History + Step 1: Convert Hangzhou to latitude and longitude coordinates + - Call the `maps_geo_planner` tool and provide `{"city_from": "Hangzhou", "address": "West Lake"}` + - Execution status: Success + - Result: `{"location": "123.456, 78.901"}` + Step 2: Query the weather in Hangzhou + - Call the `weather_query` tool and provide `{"location": "123.456, 78.901"}` + - Execution Status: Success + - Result: `{"weather": "Sunny", "temperature": "25°C"}` + Step 3: Convert Beijing to latitude and longitude coordinates + - Call the `maps_geo_planner` tool and provide `{"city_from": "Beijing", "address": "Tiananmen"}` + - Execution Status: Success + - Result: `{"location": "123.456, 78.901"}` + Step 4: Query the weather in Beijing + - Call the `weather_query` tool and provide `{"location": "123.456, 78.901"}` + - Execution Status: Success + - Result: `{"weather": "Sunny", "temperature": "25°C"}` + # Tools + + - mcp_tool_4 maps_geo_planner; Converts a detailed structured address into longitude and latitude coordinates. Supports parsing landmarks, scenic spots, and building names into longitude and latitude coordinates. + - mcp_tool_5 weather_query; Weather query, used to query weather information. + - mcp_tool_6 maps_direction_transit_integrated; Plans a commuting plan based on the user's starting and ending longitude and latitude coordinates, integrating various public transportation modes (train, bus, subway), and returns the commuting plan data. For cross-city scenarios, both the starting and ending cities must be provided. + - Final Final; Final step. When this step is reached, plan execution is complete, and the resulting result is used as the final result. + + # Output + ```json + { + "tool_id": "mcp_tool_6", + "description": "Plan a comprehensive public transportation commute from Hangzhou to Beijing" + } + ``` + # Now start generating steps: + # Goal + {{goal}} + # History + {{history}} + # Tools + + {% for tool in tools %} + - {{tool.id}} {{tool.description}} + {% endfor %} + + """ + ), +} + +TOOL_SKIP: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个计划执行器。 + 你的任务是根据当前的计划和用户目标,判断当前步骤是否需要跳过。 + 如果需要跳过,请返回`true`,否则返回`false`。 + 必须按照以下格式回答: + ```json + { + "skip": true/false, + } + ``` + 注意: + 1.你的判断要谨慎,在历史消息中有足够的上下文信息时,才可以判断是否跳过当前步骤。 + # 样例 + # 用户目标 + 我需要扫描当前mysql数据库,分析性能瓶颈, 并调优 + # 历史 + 第1步:生成端口扫描命令 + - 调用工具 `command_generator`,并提供参数 `{"command": "nmap -sS -p--open 192.168.1.1"}` + - 执行状态:成功 + - 得到数据:`{"command": "nmap -sS -p--open 192.168.1.1"}` + 第2步:执行端口扫描命令 + - 调用工具 `command_executor`,并提供参数 `{"command": "nmap -sS -p--open 192.168.1.1"}` + - 执行状态:成功 + - 得到数据:`{"result": "success"}` + 第3步:分析端口扫描结果 + - 调用工具 `mysql_analyzer`,并提供参数 `{"host": "192.168.1.1", "port": 3306, "username": "root", "password": "password"}` + - 执行状态:成功 + - 得到数据:`{"performance": "good", "bottleneck": "none"}` + # 当前步骤 + + step_4 + command_generator + 生成MySQL性能调优命令 + 生成MySQL性能调优命令:调优MySQL数据库性能 + + # 输出 + ```json + { + "skip": true + } + ``` + # 用户目标 + {{goal}} + # 历史 + {{history}} + # 当前步骤 + + {{step_id}} + {{step_name}} + {{step_instruction}} + {{step_content}} + + # 输出 + """ + ), + LanguageType.ENGLISH: dedent( + r""" + You are a plan executor. + Your task is to determine whether the current step should be skipped based on the current plan and the user's goal. + If skipping is required, return `true`; otherwise, return `false`. + The answer must follow the following format: + ```json + { + "skip": true/false, + } + ``` + Note: + 1. Be cautious in your judgment and only decide whether to skip the current step when there is sufficient context in the historical messages. + # Example + # User Goal + I need to scan the current MySQL database, analyze performance bottlenecks, and optimize it. + # History + Step 1: Generate a port scan command + - Call the `command_generator` tool with `{"command": "nmap -sS -p--open 192.168.1.1"}` + - Execution Status: Success + - Result: `{"command": "nmap -sS -p--open 192.168.1.1"}` + Step 2: Execute the port scan command + - Call the `command_executor` tool with `{"command": "nmap -sS -p--open 192.168.1.1"}` + - Execution Status: Success + - Result: `{"result": "success"}` + Step 3: Analyze the port scan results + - Call the `mysql_analyzer` tool with `{"host": "192.168.1.1", "port": 3306, "username": "root", "password": "password"}` + - Execution status: Success + - Result: `{"performance": "good", "bottleneck": "none"}` + # Current step + + step_4 + command_generator + Generate MySQL performance tuning commands + Generate MySQL performance tuning commands: Tune MySQL database performance + + # Output + ```json + { + "skip": true + } + ``` + # User goal + {{goal}} + # History + {{history}} + # Current step + + {{step_id}} + {{step_name}} + {{step_instruction}} + {{step_content}} + + # output + """ + ), +} +RISK_EVALUATE: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个工具执行计划评估器。 + 你的任务是根据当前工具的名称、描述和入参以及附加信息,判断当前工具执行的风险并输出提示。 + ```json + { + "risk": "low/medium/high", + "reason": "提示信息" + } + ``` + # 样例 + # 工具名称 + mysql_analyzer + # 工具描述 + 分析MySQL数据库性能 + # 工具入参 + { + "host": "192.0.0.1", + "port": 3306, + "username": "root", + "password": "password" + } + # 附加信息 + 1. 当前MySQL数据库的版本是8.0.26 + 2. 当前MySQL数据库的配置文件路径是/etc/my.cnf,并含有以下配置项 + ```ini + [mysqld] + innodb_buffer_pool_size=1G + innodb_log_file_size=256M + ``` + # 输出 + ```json + { + "risk": "medium", + "reason": "当前工具将连接到MySQL数据库并分析性能,可能会对数据库性能产生一定影响。请确保在非生产环境中执行此操作。" + } + ``` + # 工具 + + {{tool_name}} + {{tool_description}} + + # 工具入参 + {{input_param}} + # 附加信息 + {{additional_info}} + # 输出 + """ + ), + LanguageType.ENGLISH: dedent( + r""" + You are a tool execution plan evaluator. + Your task is to determine the risk of executing the current tool based on its name, description, input parameters, and additional information, and output a warning. + ```json + { + "risk": "low/medium/high", + "reason": "prompt message" + } + ``` + # Example + # Tool name + mysql_analyzer + # Tool description + Analyzes MySQL database performance + # Tool input + { + "host": "192.0.0.1", + "port": 3306, + "username": "root", + "password": "password" + } + # Additional information + 1. The current MySQL database version is 8.0.26 + 2. The current MySQL database configuration file path is /etc/my.cnf and contains the following configuration items + ```ini + [mysqld] + innodb_buffer_pool_size=1G + innodb_log_file_size=256M + ``` + # Output + ```json + { + "risk": "medium", + "reason": "This tool will connect to a MySQL database and analyze performance, which may impact database performance. This operation should only be performed in a non-production environment." + } + ``` + # Tool + + {{tool_name}} + {{tool_description}} + + # Tool Input Parameters + {{input_param}} + # Additional Information + {{additional_info}} + # Output + + """ + ), +} +# 根据当前计划和报错信息决定下一步执行,具体计划有需要用户补充工具入参、重计划当前步骤、重计划接下来的所有计划 +TOOL_EXECUTE_ERROR_TYPE_ANALYSIS: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个计划决策器。 + + 你的任务是根据用户目标、当前计划、当前使用的工具、工具入参和工具运行报错,决定下一步执行的操作。 + 请根据以下规则进行判断: + 1. 仅通过补充工具入参来解决问题的,返回 missing_param; + 2. 需要重计划当前步骤的,返回 decorrect_plan + 3.推理过程必须清晰明了,能够让人理解你的判断依据,并且不超过100字。 + 你的输出要以json格式返回,格式如下: + + ```json + { + "error_type": "missing_param/decorrect_plan, + "reason": "你的推理过程" + } + ``` + + # 样例 + # 用户目标 + 我需要扫描当前mysql数据库,分析性能瓶颈, 并调优 + # 当前计划 + { + "plans": [ + { + "content": "生成端口扫描命令", + "tool": "command_generator", + "instruction": "生成端口扫描命令:扫描192.168.1.1的开放端口" + }, + { + "content": "在执行Result[0]生成的命令", + "tool": "command_executor", + "instruction": "执行端口扫描命令" + }, + { + "content": "任务执行完成,端口扫描结果为Result[2]", + "tool": "Final", + "instruction": "" + } + ] + } + # 当前使用的工具 + + command_executor + 执行命令行指令 + + # 工具入参 + { + "command": "nmap -sS -p--open 192.168.1.1" + } + # 工具运行报错 + 执行端口扫描命令时,出现了错误:`- bash: nmap: command not found`。 + # 输出 + ```json + { + "error_type": "decorrect_plan", + "reason": "当前计划的第二步执行失败,报错信息显示nmap命令未找到,可能是因为没有安装nmap工具,因此需要重计划当前步骤。" + } + ``` + # 用户目标 + {{goal}} + # 当前计划 + {{current_plan}} + # 当前使用的工具 + + {{tool_name}} + {{tool_description}} + + # 工具入参 + {{input_param}} + # 工具运行报错 + {{error_message}} + # 输出 + """ + ), + LanguageType.ENGLISH: dedent( + r""" + You are a plan decider. + + Your task is to decide the next action based on the user's goal, the current plan, the tool being used, tool inputs, and tool errors. + Please make your decision based on the following rules: + 1. If the problem can be solved by simply adding tool inputs, return missing_param; + 2. If the current step needs to be replanned, return decorrect_plan. + 3. Your reasoning must be clear and concise, allowing the user to understand your decision. It should not exceed 100 words. + Your output should be returned in JSON format, as follows: + + ```json + { + "error_type": "missing_param/decorrect_plan, + "reason": "Your reasoning" + } + ``` + + # Example + # User Goal + I need to scan the current MySQL database, analyze performance bottlenecks, and optimize it. + # Current Plan + { + "plans": [ + { + "content": "Generate port scan command", + "tool": "command_generator", + "instruction": "Generate port scan command: Scan the open ports of 192.168.1.1" + }, + { + "content": "Execute the command generated by Result[0]", + "tool": "command_executor", + "instruction": "Execute the port scan command" + }, + { + "content": "Task execution completed, the port scan result is Result[2]", + "tool": "Final", + "instruction": "" + } + ] + } + # Currently used tool + + command_executor + Execute command line instructions + + # Tool input parameters + { + "command": "nmap -sS -p--open 192.168.1.1" + } + # Tool running error + When executing the port scan command, an error occurred: `- bash: nmap: command not found`. + # Output + ```json + { + "error_type": "decorrect_plan", + "reason": "The second step of the current plan failed. The error message shows that the nmap command was not found. This may be because the nmap tool is not installed. Therefore, the current step needs to be replanned." + } + ``` + # User goal + {{goal}} + # Current plan + {{current_plan}} + # Currently used tool + + {{tool_name}} + {{tool_description}} + + # Tool input parameters + {{input_param}} + # Tool execution error + {{error_message}} + # Output + """ + ), +} + +IS_PARAM_ERROR: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个计划执行专家,你的任务是判断当前的步骤执行失败是否是因为参数错误导致的, + 如果是,请返回`true`,否则返回`false`。 + 必须按照以下格式回答: + ```json + { + "is_param_error": true/false, + } + ``` + # 样例 + # 用户目标 + 我需要扫描当前mysql数据库,分析性能瓶颈, 并调优 + # 历史 + 第1步:生成端口扫描命令 + - 调用工具 `command_generator`,并提供参数 `{"command": "nmap -sS -p--open 192.168.1.1"}` + - 执行状态:成功 + - 得到数据:`{"command": "nmap -sS -p--open 192.168.1.1"}` + 第2步:执行端口扫描命令 + - 调用工具 `command_executor`,并提供参数 `{"command": "nmap -sS -p--open 192.168.1.1"}` + - 执行状态:成功 + - 得到数据:`{"result": "success"}` + # 当前步骤 + + step_3 + mysql_analyzer + 分析MySQL数据库性能 + + # 工具入参 + { + "host": "192.0.0.1", + "port": 3306, + "username": "root", + "password": "password" + } + # 工具运行报错 + 执行MySQL性能分析命令时,出现了错误:`host is not correct`。 + + # 输出 + ```json + { + "is_param_error": true + } + ``` + # 用户目标 + {{goal}} + # 历史 + {{history}} + # 当前步骤 + + {{step_id}} + {{step_name}} + {{step_instruction}} + + # 工具入参 + {{input_param}} + # 工具运行报错 + {{error_message}} + # 输出 + """ + ), + LanguageType.ENGLISH: dedent( + r""" + You are a plan execution expert. Your task is to determine whether the current step execution failure is due to parameter errors. + If so, return `true`; otherwise, return `false`. + The answer must be in the following format: + ```json + { + "is_param_error": true/false, + } + ``` + # Example + # User Goal + I need to scan the current MySQL database, analyze performance bottlenecks, and optimize it. + # History + Step 1: Generate a port scan command + - Call the `command_generator` tool and provide `{"command": "nmap -sS -p--open 192.168.1.1"}` + - Execution Status: Success + - Result: `{"command": "nmap -sS -p--open 192.168.1.1"}` + Step 2: Execute the port scan command + - Call the `command_executor` tool and provide `{"command": "nmap -sS -p--open 192.168.1.1"}` + - Execution Status: Success + - Result: `{"result": "success"}` + # Current step + + step_3 + mysql_analyzer + Analyze MySQL database performance + + # Tool input parameters + { + "host": "192.0.0.1", + "port": 3306, + "username": "root", + "password": "password" + } + # Tool execution error + When executing the MySQL performance analysis command, an error occurred: `host is not correct`. + + # Output + ```json + { + "is_param_error": true + } + ``` + # User goal + {{goal}} + # History + {{history}} + # Current step + + {{step_id}} + {{step_name}} + {{step_instruction}} + + # Tool input parameters + {{input_param}} + # Tool error + {{error_message}} + # Output + """ + ), +} + +# 将当前程序运行的报错转换为自然语言 +CHANGE_ERROR_MESSAGE_TO_DESCRIPTION: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个智能助手,你的任务是将当前程序运行的报错转换为自然语言描述。 + 请根据以下规则进行转换: + 1. 将报错信息转换为自然语言描述,描述应该简洁明了,能够让人理解报错的原因和影响。 + 2. 描述应该包含报错的具体内容和可能的解决方案。 + 3. 描述应该避免使用过于专业的术语,以便用户能够理解。 + 4. 描述应该尽量简短,控制在50字以内。 + 5. 只输出自然语言描述,不要输出其他内容。 + # 样例 + # 工具信息 + + port_scanner + 扫描主机端口 + + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "主机地址" + }, + "port": { + "type": "integer", + "description": "端口号" + }, + "username": { + "type": "string", + "description": "用户名" + }, + "password": { + "type": "string", + "description": "密码" + } + }, + "required": ["host", "port", "username", "password"] + } + + + # 工具入参 + { + "host": "192.0.0.1", + "port": 3306, + "username": "root", + "password": "password" + } + # 报错信息 + 执行端口扫描命令时,出现了错误:`password is not correct`。 + # 输出 + 扫描端口时发生错误:密码不正确。请检查输入的密码是否正确,并重试。 + # 现在开始转换报错信息: + # 工具信息 + + {{tool_name}} + {{tool_description}} + + {{input_schema}} + + + # 工具入参 + {{input_params}} + # 报错信息 + {{error_message}} + # 输出 + """ + ), + LanguageType.ENGLISH: dedent( + r""" + You are an intelligent assistant. Your task is to convert the error message generated by the current program into a natural language description. + Please follow the following rules for conversion: + 1. Convert the error message into a natural language description. The description should be concise and clear, allowing users to understand the cause and impact of the error. + 2. The description should include the specific content of the error and possible solutions. + 3. The description should avoid using overly technical terms so that users can understand it. + 4. The description should be as brief as possible, within 50 words. + 5. Only output the natural language description, do not output other content. + # Example + # Tool Information + + port_scanner + Scan host ports + + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "Host address" + }, + "port": { + "type": "integer", + "description": "Port number" + }, + "username": { + "type": "string", + "description": "Username" + }, + "password": { + "type": "string", + "description": "Password" + } + }, + "required": ["host", "port", "username", "password"] + } + + + # Tool input + { + "host": "192.0.0.1", + "port": 3306, + "username": "root", + "password": "password" + } + # Error message + An error occurred while executing the port scan command: `password is not correct`. + # Output + An error occurred while scanning the port: The password is incorrect. Please check that the password you entered is correct and try again. + # Now start converting the error message: + # Tool information + + {{tool_name}} + {{tool_description}} + + {{input_schema}} + + + # Tool input parameters + {{input_params}} + # Error message + {{error_message}} + # Output + """ + ), +} +# 获取缺失的参数的json结构体 +GET_MISSING_PARAMS: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个工具参数获取器。 + 你的任务是根据当前工具的名称、描述和入参和入参的schema以及运行报错,将当前缺失的参数设置为null,并输出一个JSON格式的字符串。 + ```json + { + "host": "请补充主机地址", + "port": "请补充端口号", + "username": "请补充用户名", + "password": "请补充密码" + } + ``` + # 样例 + # 工具名称 + mysql_analyzer + # 工具描述 + 分析MySQL数据库性能 + # 工具入参 + { + "host": "192.0.0.1", + "port": 3306, + "username": "root", + "password": "password" + } + # 工具入参schema + { + "type": "object", + "properties": { + "host": { + "anyOf": [ + {"type": "string"}, + {"type": "null"} + ], + "description": "MySQL数据库的主机地址(可以为字符串或null)" + }, + "port": { + "anyOf": [ + {"type": "string"}, + {"type": "null"} + ], + "description": "MySQL数据库的端口号(可以是数字、字符串或null)" + }, + "username": { + "anyOf": [ + {"type": "string"}, + {"type": "null"} + ], + "description": "MySQL数据库的用户名(可以为字符串或null)" + }, + "password": { + "anyOf": [ + {"type": "string"}, + {"type": "null"} + ], + "description": "MySQL数据库的密码(可以为字符串或null)" + } + }, + "required": ["host", "port", "username", "password"] + } + # 运行报错 + 执行端口扫描命令时,出现了错误:`password is not correct`。 + # 输出 + ```json + { + "host": "192.0.0.1", + "port": 3306, + "username": null, + "password": null + } + ``` + # 工具 + + {{tool_name}} + {{tool_description}} + + # 工具入参 + {{input_param}} + # 工具入参schema(部分字段允许为null) + {{input_schema}} + # 运行报错 + {{error_message}} + # 输出 + """ + ), + LanguageType.ENGLISH: dedent( + r""" + You are a tool parameter getter. + Your task is to set missing parameters to null based on the current tool's name, description, input parameters, input parameter schema, and runtime errors, and output a JSON-formatted string. + ```json + { + "host": "Please provide the host address", + "port": "Please provide the port number", + "username": "Please provide the username", + "password": "Please provide the password" + } + ``` + # Example + # Tool Name + mysql_analyzer + # Tool Description + Analyze MySQL database performance + # Tool Input Parameters + { + "host": "192.0.0.1", + "port": 3306, + "username": "root", + "password": "password" + } + # Tool Input Parameter Schema + { + "type": "object", + "properties": { + "host": { + "anyOf": [ + {"type": "string"}, + {"type": "null"} + ], + "description": "MySQL database host address (can be a string or null)" + }, + "port": { + "anyOf": [ + {"type": "string"}, + {"type": "null"} + ], + "description": "MySQL database port number (can be a number, a string, or null)" + }, + "username": { + "anyOf": [ + {"type": "string"}, + {"type": "null"} + ], + "description": "MySQL database username (can be a string or null)" + }, + "password": { + "anyOf": [ + {"type": "string"}, + {"type": "null"} + ], + "description": "MySQL database password (can be a string or null)" + } + }, + "required": ["host", "port", "username", "password"] + } + # Run error + When executing the port scan command, an error occurred: `password is not correct`. + # Output + ```json + { + "host": "192.0.0.1", + "port": 3306, + "username": null, + "password": null + } + ``` + # Tool + + {{tool_name}} + {{tool_description}} + + # Tool input parameters + {{input_param}} + # Tool input parameter schema (some fields can be null) + {{input_schema}} + # Run error + {{error_message}} + # Output + """ + ), +} + +GEN_PARAMS: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个工具参数生成器。 + 你的任务是根据总的目标、阶段性的目标、工具信息、工具入参的schema和背景信息生成工具的入参。 + 注意: + 1.生成的参数在格式上必须符合工具入参的schema。 + 2.总的目标、阶段性的目标和背景信息必须被充分理解,利用其中的信息来生成工具入参。 + 3.生成的参数必须符合阶段性目标。 + + # 样例 + # 工具信息 + < tool > + < name > mysql_analyzer < /name > + < description > 分析MySQL数据库性能 < /description > + < / tool > + # 总目标 + 我需要扫描当前mysql数据库,分析性能瓶颈, 并调优,ip地址是192.168.1.1,端口是3306,用户名是root,密码是password。 + # 当前阶段目标 + 我要连接MySQL数据库,分析性能瓶颈,并调优。 + # 工具入参的schema + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "MySQL数据库的主机地址" + }, + "port": { + "type": "integer", + "description": "MySQL数据库的端口号" + }, + "username": { + "type": "string", + "description": "MySQL数据库的用户名" + }, + "password": { + "type": "string", + "description": "MySQL数据库的密码" + } + }, + "required": ["host", "port", "username", "password"] + } + # 背景信息 + 第1步:生成端口扫描命令 + - 调用工具 `command_generator`,并提供参数 `帮我生成一个mysql端口扫描命令` + - 执行状态:成功 + - 得到数据:`{"command": "nmap -sS -p--open 192.168.1.1"}` + 第2步:执行端口扫描命令 + - 调用工具 `command_executor`,并提供参数 `{"command": "nmap -sS -p--open 192.168.1.1"}` + - 执行状态:成功 + - 得到数据:`{"result": "success"}` + # 输出 + ```json + { + "host": "192.168.1.1", + "port": 3306, + "username": "root", + "password": "password" + } + ``` + # 工具 + < tool > + < name > {{tool_name}} < /name > + < description > {{tool_description}} < /description > + < / tool > + # 总目标 + {{goal}} + # 当前阶段目标 + {{current_goal}} + # 工具入参scheme + {{input_schema}} + # 背景信息 + {{background_info}} + # 输出 + """ + ), + LanguageType.ENGLISH: dedent( + r""" + You are a tool parameter generator. + Your task is to generate tool input parameters based on the overall goal, phased goals, tool information, tool input parameter schema, and background information. + Note: + 1. The generated parameters must conform to the tool input parameter schema. + 2. The overall goal, phased goals, and background information must be fully understood and used to generate tool input parameters. + 3. The generated parameters must conform to the phased goals. + + # Example + # Tool Information + < tool > + < name >mysql_analyzer < /name > + < description > Analyze MySQL Database Performance < /description > + < / tool > + # Overall Goal + I need to scan the current MySQL database, analyze performance bottlenecks, and optimize it. The IP address is 192.168.1.1, the port is 3306, the username is root, and the password is password. + # Current Phase Goal + I need to connect to the MySQL database, analyze performance bottlenecks, and optimize it. # Tool input schema + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "MySQL database host address" + }, + "port": { + "type": "integer", + "description": "MySQL database port number" + }, + "username": { + "type": "string", + "description": "MySQL database username" + }, + "password": { + "type": "string", + "description": "MySQL database password" + } + }, + "required": ["host", "port", "username", "password"] + } + # Background information + Step 1: Generate a port scan command + - Call the `command_generator` tool and provide the `Help me generate a MySQL port scan command` parameter + - Execution status: Success + - Received data: `{"command": "nmap -sS -p --open 192.168.1.1"}` + + Step 2: Execute the port scan command + - Call the `command_executor` tool and provide the parameters `{"command": "nmap -sS -p --open 192.168.1.1"}` + - Execution status: Success + - Received data: `{"result": "success"}` + # Output + ```json + { + "host": "192.168.1.1", + "port": 3306, + "username": "root", + "password": "password" + } + ``` + # Tool + < tool > + < name > {{tool_name}} < /name > + < description > {{tool_description}} < /description > + < / tool > + # Overall goal + {{goal}} + # Current stage goal + {{current_goal}} + # Tool input scheme + {{input_schema}} + # Background information + {{background_info}} + # Output + """ + ), +} + +REPAIR_PARAMS: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 你是一个工具参数修复器。 + 你的任务是根据当前的工具信息、目标、工具入参的schema、工具当前的入参、工具的报错、补充的参数和补充的参数描述,修复当前工具的入参。 + + 注意: + 1.最终修复的参数要符合目标和工具入参的schema。 + + # 样例 + # 工具信息 + + mysql_analyzer + 分析MySQL数据库性能 + + # 总目标 + 我需要扫描当前mysql数据库,分析性能瓶颈, 并调优 + # 当前阶段目标 + 我要连接MySQL数据库,分析性能瓶颈,并调优。 + # 工具入参的schema + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "MySQL数据库的主机地址" + }, + "port": { + "type": "integer", + "description": "MySQL数据库的端口号" + }, + "username": { + "type": "string", + "description": "MySQL数据库的用户名" + }, + "password": { + "type": "string", + "description": "MySQL数据库的密码" + } + }, + "required": ["host", "port", "username", "password"] + } + # 工具当前的入参 + { + "host": "192.0.0.1", + "port": 3306, + "username": "root", + "password": "password" + } + # 工具的报错 + 执行端口扫描命令时,出现了错误:`password is not correct`。 + # 补充的参数 + { + "username": "admin", + "password": "admin123" + } + # 补充的参数描述 + 用户希望使用admin用户和admin123密码来连接MySQL数据库。 + # 输出 + ```json + { + "host": "192.0.0.1", + "port": 3306, + "username": "admin", + "password": "admin123" + } + ``` + # 工具 + + {{tool_name}} + {{tool_description}} + + # 总目标 + {{goal}} + # 当前阶段目标 + {{current_goal}} + # 工具入参scheme + {{input_schema}} + # 工具当前的入参 + {{input_params}} + # 运行报错 + {{error_message}} + # 补充的参数 + {{params}} + # 补充的参数描述 + {{params_description}} + # 输出 + """ + ), + LanguageType.ENGLISH: dedent( + r""" + You are a tool parameter fixer. + Your task is to fix the current tool input parameters based on the current tool information, tool input parameter schema, tool current input parameters, tool error, supplemented parameters, and supplemented parameter descriptions. + + # Example + # Tool information + + mysql_analyzer + Analyze MySQL database performance + + # Tool input parameter schema + { + "type": "object", + "properties": { + "host": { + "type": "string", + "description": "MySQL database host address" + }, + "port": { + "type": "integer", + "description": "MySQL database port number" + }, + "username": { + "type": "string", + "description": "MySQL database username" + }, + "password": { + "type": "string", + "description": "MySQL database password" + } + }, + "required": ["host", "port", "username", "password"] + } + # Current tool input parameters + { + "host": "192.0.0.1", + "port": 3306, + "username": "root", + "password": "password" + } + # Tool error + When executing the port scan command, an error occurred: `password is not correct`. + # Supplementary parameters + { + "username": "admin", + "password": "admin123" + } + # Supplementary parameter description + The user wants to use the admin user and the admin123 password to connect to the MySQL database. + # Output + ```json + { + "host": "192.0.0.1", + "port": 3306, + "username": "admin", + "password": "admin123" + } + ``` + # Tool + + {{tool_name}} + {{tool_description}} + + # Tool input schema + {{input_schema}} + # Tool current input parameters + {{input_params}} + # Runtime error + {{error_message}} + # Supplementary parameters + {{params}} + # Supplementary parameter descriptions + {{params_description}} + # Output + """ + ), +} +FINAL_ANSWER: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + 综合理解计划执行结果和背景信息,向用户报告目标的完成情况。 + + # 注意 + 1.输出的图片链接需要设置高为400px,且使用html的img标签进行展示,不能直接输出链接。 + 1.1 例如:图片描述(可选) + 2.不要输出模型相关的信息,例如“作为一个AI模型,我无法...”等。 + # 用户目标 + + {{goal}} + + # 计划执行情况 + + 为了完成上述目标,你实施了以下计划: + + {{memory}} + + # 其他背景信息: + + {{status}} + + # 现在,请根据以上信息,向用户报告目标的完成情况: + + """ + ), + LanguageType.ENGLISH: dedent( + r""" + Comprehensively understand the plan execution results and background information, and report the goal completion status to the user. + + # Note + 1. The output image link needs to be set to a height of 400px and displayed using the HTML img tag, not directly outputting the link. + 1.1 For example: Image description (optional) + 2. Do not output model-related information, such as "As an AI model, I cannot..." etc. + # User Goal + + {{goal}} + + # Plan Execution Status + + To achieve the above goal, you implemented the following plan: + + {{memory}} + + # Additional Background Information: + + {{status}} + + # Now, based on the above information, report the goal completion status to the user: + + """ + ), +} + +MEMORY_TEMPLATE: dict[LanguageType, str] = { + LanguageType.CHINESE: dedent( + r""" + {% for ctx in context_list %} + - 第{{loop.index}}步:{{ctx.step_description}} + - 调用工具 `{{ctx.step_id}}`,并提供参数 `{{ctx.input_data}}` + - 执行状态:{{ctx.status}} + - 得到数据:`{{ctx.output_data}}` + {% endfor %} + """ + ), + LanguageType.ENGLISH: dedent( + r""" + {% for ctx in context_list %} + - Step {{loop.index}}: {{ctx.step_description}} + - Call the tool `{{ctx.step_id}}` and provide the parameter `{{ctx.input_data}}` + - Execution status: {{ctx.status}} + - Receive data: `{{ctx.output_data}}` + {% endfor %} + """ + ), +} diff --git a/apps/scheduler/mcp_agent/schema.py b/apps/scheduler/mcp_agent/schema.py deleted file mode 100644 index 614139074382daf128a19a320c949e5e46803c4d..0000000000000000000000000000000000000000 --- a/apps/scheduler/mcp_agent/schema.py +++ /dev/null @@ -1,148 +0,0 @@ -"""MCP Agent执行数据结构""" -from typing import Any, Self - -from pydantic import BaseModel, Field - -from apps.schemas.enum_var import Role - - -class Function(BaseModel): - """工具函数""" - - name: str - arguments: dict[str, Any] - - -class ToolCall(BaseModel): - """Represents a tool/function call in a message""" - - id: str - type: str = "function" - function: Function - - -class Message(BaseModel): - """Represents a chat message in the conversation""" - - role: Role = Field(...) - content: str | None = Field(default=None) - tool_calls: list[ToolCall] | None = Field(default=None) - name: str | None = Field(default=None) - tool_call_id: str | None = Field(default=None) - - def __add__(self, other) -> list["Message"]: - """支持 Message + list 或 Message + Message 的操作""" - if isinstance(other, list): - return [self] + other - elif isinstance(other, Message): - return [self, other] - else: - raise TypeError( - f"unsupported operand type(s) for +: '{type(self).__name__}' and '{type(other).__name__}'" - ) - - def __radd__(self, other) -> list["Message"]: - """支持 list + Message 的操作""" - if isinstance(other, list): - return other + [self] - else: - raise TypeError( - f"unsupported operand type(s) for +: '{type(other).__name__}' and '{type(self).__name__}'" - ) - - def to_dict(self) -> dict: - """Convert message to dictionary format""" - message = {"role": self.role} - if self.content is not None: - message["content"] = self.content - if self.tool_calls is not None: - message["tool_calls"] = [tool_call.dict() for tool_call in self.tool_calls] - if self.name is not None: - message["name"] = self.name - if self.tool_call_id is not None: - message["tool_call_id"] = self.tool_call_id - return message - - @classmethod - def user_message(cls, content: str) -> Self: - """Create a user message""" - return cls(role=Role.USER, content=content) - - @classmethod - def system_message(cls, content: str) -> Self: - """Create a system message""" - return cls(role=Role.SYSTEM, content=content) - - @classmethod - def assistant_message( - cls, content: str | None = None, - ) -> Self: - """Create an assistant message""" - return cls(role=Role.ASSISTANT, content=content) - - @classmethod - def tool_message( - cls, content: str, name: str, tool_call_id: str, - ) -> Self: - """Create a tool message""" - return cls( - role=Role.TOOL, - content=content, - name=name, - tool_call_id=tool_call_id, - ) - - @classmethod - def from_tool_calls( - cls, - tool_calls: list[Any], - content: str | list[str] = "", - **kwargs, # noqa: ANN003 - ) -> Self: - """Create ToolCallsMessage from raw tool calls. - - Args: - tool_calls: Raw tool calls from LLM - content: Optional message content - """ - formatted_calls = [ - {"id": call.id, "function": call.function.model_dump(), "type": "function"} - for call in tool_calls - ] - return cls( - role=Role.ASSISTANT, - content=content, - tool_calls=formatted_calls, - **kwargs, - ) - - -class Memory(BaseModel): - messages: list[Message] = Field(default_factory=list) - max_messages: int = Field(default=100) - - def add_message(self, message: Message) -> None: - """Add a message to memory""" - self.messages.append(message) - # Optional: Implement message limit - if len(self.messages) > self.max_messages: - self.messages = self.messages[-self.max_messages:] - - def add_messages(self, messages: list[Message]) -> None: - """Add multiple messages to memory""" - self.messages.extend(messages) - # Optional: Implement message limit - if len(self.messages) > self.max_messages: - self.messages = self.messages[-self.max_messages:] - - def clear(self) -> None: - """Clear all messages""" - self.messages.clear() - - def get_recent_messages(self, n: int) -> list[Message]: - """Get n most recent messages""" - return self.messages[-n:] - - def to_dict_list(self) -> list[dict]: - """Convert messages to list of dicts""" - return [msg.to_dict() for msg in self.messages] diff --git a/apps/scheduler/mcp_agent/select.py b/apps/scheduler/mcp_agent/select.py new file mode 100644 index 0000000000000000000000000000000000000000..a62af7ce8a28c285403408c00bcb2b4aa57bbfa4 --- /dev/null +++ b/apps/scheduler/mcp_agent/select.py @@ -0,0 +1,127 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. +"""选择MCP Server及其工具""" + +import logging +import random + +from jinja2 import BaseLoader +from jinja2.sandbox import SandboxedEnvironment + +from apps.llm.reasoning import ReasoningLLM +from apps.llm.token import TokenCalculator +from apps.scheduler.mcp_agent.base import MCPBase +from apps.scheduler.mcp_agent.prompt import TOOL_SELECT +from apps.schemas.mcp import MCPTool, MCPToolIdsSelectResult +from apps.schemas.enum_var import LanguageType + +logger = logging.getLogger(__name__) + +_env = SandboxedEnvironment( + loader=BaseLoader, + autoescape=True, + trim_blocks=True, + lstrip_blocks=True, +) + +FINAL_TOOL_ID = "FIANL" +SUMMARIZE_TOOL_ID = "SUMMARIZE" +SELF_DESC_TOOL_ID = "SELF_DESC" + + +class MCPSelector(MCPBase): + """MCP选择器""" + + @staticmethod + async def select_top_tool( + goal: str, + tool_list: list[MCPTool], + additional_info: str | None = None, + top_n: int | None = None, + reasoning_llm: ReasoningLLM | None = None, + language: LanguageType = LanguageType.CHINESE, + ) -> list[MCPTool]: + """选择最合适的工具""" + random.shuffle(tool_list) + max_tokens = reasoning_llm._config.max_tokens + template = _env.from_string(TOOL_SELECT[language]) + token_calculator = TokenCalculator() + if ( + token_calculator.calculate_token_length( + messages=[ + { + "role": "user", + "content": template.render(goal=goal, tools=[], additional_info=additional_info), + } + ], + pure_text=True, + ) + > max_tokens + ): + logger.warning("[MCPSelector] 工具选择模板长度超过最大令牌数,无法进行选择") + return [] + current_index = 0 + tool_ids = [] + while current_index < len(tool_list): + index = current_index + sub_tools = [] + while index < len(tool_list): + tool = tool_list[index] + tokens = token_calculator.calculate_token_length( + messages=[ + { + "role": "user", + "content": template.render( + goal=goal, tools=[tool], additional_info=additional_info + ), + } + ], + pure_text=True, + ) + if tokens > max_tokens: + continue + sub_tools.append(tool) + + tokens = token_calculator.calculate_token_length( + messages=[ + { + "role": "user", + "content": template.render( + goal=goal, tools=sub_tools, additional_info=additional_info + ), + }, + ], + pure_text=True, + ) + if tokens > max_tokens: + del sub_tools[-1] + break + else: + index += 1 + current_index = index + if sub_tools: + schema = MCPToolIdsSelectResult.model_json_schema() + if "items" not in schema["properties"]["tool_ids"]: + schema["properties"]["tool_ids"]["items"] = {} + # 将enum添加到items中,限制数组元素的可选值 + schema["properties"]["tool_ids"]["items"]["enum"] = [tool.id for tool in sub_tools] + result = await MCPSelector.get_resoning_result( + template.render(goal=goal, tools=sub_tools, additional_info="请根据目标选择对应的工具"), + reasoning_llm, + ) + result = await MCPSelector._parse_result(result, schema) + try: + result = MCPToolIdsSelectResult.model_validate(result) + tool_ids.extend(result.tool_ids) + except Exception: + logger.exception("[MCPSelector] 解析MCP工具ID选择结果失败") + continue + mcp_tools = [tool for tool in tool_list if tool.id in tool_ids] + + if top_n is not None: + mcp_tools = mcp_tools[:top_n] + mcp_tools.append( + MCPTool(id=FINAL_TOOL_ID, name="Final", description="终止", mcp_id=FINAL_TOOL_ID, input_schema={}) + ) + # mcp_tools.append(MCPTool(id=SUMMARIZE_TOOL_ID, name="Summarize", + # description="总结工具", mcp_id=SUMMARIZE_TOOL_ID, input_schema={})) + return mcp_tools diff --git a/apps/scheduler/mcp_agent/tool/__init__.py b/apps/scheduler/mcp_agent/tool/__init__.py deleted file mode 100644 index 4593f31742fee21b2e3ec1c7c18ff8e3cfea2110..0000000000000000000000000000000000000000 --- a/apps/scheduler/mcp_agent/tool/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from apps.scheduler.mcp_agent.tool.base import BaseTool -from apps.scheduler.mcp_agent.tool.terminate import Terminate -from apps.scheduler.mcp_agent.tool.tool_collection import ToolCollection - -__all__ = [ - "BaseTool", - "Terminate", - "ToolCollection", -] diff --git a/apps/scheduler/mcp_agent/tool/base.py b/apps/scheduler/mcp_agent/tool/base.py deleted file mode 100644 index 04ad45c47a3eecb25efdf5b2ce52beb6965b2fbd..0000000000000000000000000000000000000000 --- a/apps/scheduler/mcp_agent/tool/base.py +++ /dev/null @@ -1,73 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any, Dict, Optional - -from pydantic import BaseModel, Field - - -class BaseTool(ABC, BaseModel): - name: str - description: str - parameters: Optional[dict] = None - - class Config: - arbitrary_types_allowed = True - - async def __call__(self, **kwargs) -> Any: - return await self.execute(**kwargs) - - @abstractmethod - async def execute(self, **kwargs) -> Any: - """使用给定的参数执行工具""" - - def to_param(self) -> Dict: - """将工具转换为函数调用格式""" - return { - "type": "function", - "function": { - "name": self.name, - "description": self.description, - "parameters": self.parameters, - }, - } - - -class ToolResult(BaseModel): - """表示工具执行的结果""" - - output: Any = Field(default=None) - error: Optional[str] = Field(default=None) - system: Optional[str] = Field(default=None) - - class Config: - arbitrary_types_allowed = True - - def __bool__(self): - return any(getattr(self, field) for field in self.__fields__) - - def __add__(self, other: "ToolResult"): - def combine_fields( - field: Optional[str], other_field: Optional[str], concatenate: bool = True - ): - if field and other_field: - if concatenate: - return field + other_field - raise ValueError("Cannot combine tool results") - return field or other_field - - return ToolResult( - output=combine_fields(self.output, other.output), - error=combine_fields(self.error, other.error), - system=combine_fields(self.system, other.system), - ) - - def __str__(self): - return f"Error: {self.error}" if self.error else self.output - - def replace(self, **kwargs): - """返回一个新的ToolResult,其中替换了给定的字段""" - # return self.copy(update=kwargs) - return type(self)(**{**self.dict(), **kwargs}) - - -class ToolFailure(ToolResult): - """表示失败的ToolResult""" diff --git a/apps/scheduler/mcp_agent/tool/terminate.py b/apps/scheduler/mcp_agent/tool/terminate.py deleted file mode 100644 index 84aa120316de1123f985eebfd82c471e8ceec990..0000000000000000000000000000000000000000 --- a/apps/scheduler/mcp_agent/tool/terminate.py +++ /dev/null @@ -1,25 +0,0 @@ -from apps.scheduler.mcp_agent.tool.base import BaseTool - - -_TERMINATE_DESCRIPTION = """当请求得到满足或助理无法继续处理任务时,终止交互。 -当您完成所有任务后,调用此工具结束工作。""" - - -class Terminate(BaseTool): - name: str = "terminate" - description: str = _TERMINATE_DESCRIPTION - parameters: dict = { - "type": "object", - "properties": { - "status": { - "type": "string", - "description": "交互的完成状态", - "enum": ["success", "failure"], - } - }, - "required": ["status"], - } - - async def execute(self, status: str) -> str: - """Finish the current execution""" - return f"交互已完成,状态为: {status}" diff --git a/apps/scheduler/mcp_agent/tool/tool_collection.py b/apps/scheduler/mcp_agent/tool/tool_collection.py deleted file mode 100644 index 95bda317805abdecc256af0737091b46adf77b1a..0000000000000000000000000000000000000000 --- a/apps/scheduler/mcp_agent/tool/tool_collection.py +++ /dev/null @@ -1,55 +0,0 @@ -"""用于管理多个工具的集合类""" -import logging -from typing import Any - -from apps.scheduler.mcp_agent.tool.base import BaseTool, ToolFailure, ToolResult - -logger = logging.getLogger(__name__) - - -class ToolCollection: - """定义工具的集合""" - - class Config: - arbitrary_types_allowed = True - - def __init__(self, *tools: BaseTool): - self.tools = tools - self.tool_map = {tool.name: tool for tool in tools} - - def __iter__(self): - return iter(self.tools) - - def to_params(self) -> list[dict[str, Any]]: - return [tool.to_param() for tool in self.tools] - - async def execute( - self, *, name: str, tool_input: dict[str, Any] = None - ) -> ToolResult: - tool = self.tool_map.get(name) - if not tool: - return ToolFailure(error=f"Tool {name} is invalid") - try: - result = await tool(**tool_input) - return result - except Exception as e: - return ToolFailure(error=f"Failed to execute tool {name}: {e}") - - def add_tool(self, tool: BaseTool): - """ - 将单个工具添加到集合中。 - - 如果已存在同名工具,则将跳过该工具并记录警告。 - """ - if tool.name in self.tool_map: - logger.warning(f"Tool {tool.name} already exists in collection, skipping") - return self - - self.tools += (tool,) - self.tool_map[tool.name] = tool - return self - - def add_tools(self, *tools: BaseTool): - for tool in tools: - self.add_tool(tool) - return self diff --git a/apps/scheduler/pool/loader/app.py b/apps/scheduler/pool/loader/app.py index a85395bfc7723af9d009a1945fe4617cf390b938..aaca569d20fc0985233d2db5063a68869f788d85 100644 --- a/apps/scheduler/pool/loader/app.py +++ b/apps/scheduler/pool/loader/app.py @@ -102,7 +102,6 @@ class AppLoader: await file_checker.diff_one(app_path) await self.load(app_id, file_checker.hashes[f"app/{app_id}"]) - @staticmethod async def delete(app_id: str, *, is_reload: bool = False) -> None: """ @@ -157,5 +156,7 @@ class AppLoader: }, upsert=True, ) + app_pool = await app_collection.find_one({"_id": metadata.id}) + logger.error(f"[AppLoader] 更新 MongoDB 成功: {app_pool}") except Exception: logger.exception("[AppLoader] 更新 MongoDB 失败") diff --git a/apps/scheduler/pool/loader/flow.py b/apps/scheduler/pool/loader/flow.py index 57344d40a5c1aba0454a7f00fffcfbf5a93ee221..75e894ec4a2158e16eb5a215886de87391043e86 100644 --- a/apps/scheduler/pool/loader/flow.py +++ b/apps/scheduler/pool/loader/flow.py @@ -11,7 +11,7 @@ import yaml from anyio import Path from apps.common.config import Config -from apps.schemas.enum_var import EdgeType +from apps.schemas.enum_var import NodeType,EdgeType from apps.schemas.flow import AppFlow, Flow from apps.schemas.pool import AppPool from apps.models.vector import FlowPoolVector @@ -77,25 +77,18 @@ class FlowLoader: err = f"[FlowLoader] 步骤名称不能以下划线开头:{key}" logger.error(err) raise ValueError(err) - if key == "start": - step["name"] = "开始" - step["description"] = "开始节点" - step["type"] = "start" - elif key == "end": - step["name"] = "结束" - step["description"] = "结束节点" - step["type"] = "end" - else: - try: - step["type"] = await NodeManager.get_node_call_id(step["node"]) - except ValueError as e: - logger.warning("[FlowLoader] 获取节点call_id失败:%s,错误信息:%s", step["node"], e) - step["type"] = "Empty" - step["name"] = ( - (await NodeManager.get_node_name(step["node"])) - if "name" not in step or step["name"] == "" - else step["name"] - ) + if step["type"]==NodeType.START.value or step["type"]==NodeType.END.value: + continue + try: + step["type"] = await NodeManager.get_node_call_id(step["node"]) + except ValueError as e: + logger.warning("[FlowLoader] 获取节点call_id失败:%s,错误信息:%s", step["node"], e) + step["type"] = "Empty" + step["name"] = ( + (await NodeManager.get_node_name(step["node"])) + if "name" not in step or step["name"] == "" + else step["name"] + ) return flow_yaml async def load(self, app_id: str, flow_id: str) -> Flow | None: diff --git a/apps/scheduler/pool/loader/mcp.py b/apps/scheduler/pool/loader/mcp.py index 1463d0a1a9141bca271e29d7387759748aa8bd43..bb4cb8a10408118d7614145259e1ef0fd9de8d46 100644 --- a/apps/scheduler/pool/loader/mcp.py +++ b/apps/scheduler/pool/loader/mcp.py @@ -11,6 +11,7 @@ import shutil import asyncer from anyio import Path from sqids.sqids import Sqids +from typing import Any from apps.common.lance import LanceDB from apps.common.mongo import MongoDB @@ -91,48 +92,47 @@ class MCPLoader(metaclass=SingletonMeta): :param MCPServerConfig config: MCP配置 :return: 无 """ - if not config.config.auto_install: - print(f"[Installer] MCP模板无需安装: {mcp_id}") # noqa: T201 - - elif isinstance(config.config, MCPServerStdioConfig): - print(f"[Installer] Stdio方式的MCP模板,开始自动安装: {mcp_id}") # noqa: T201 - if "uv" in config.config.command: - new_config = await install_uvx(mcp_id, config.config) - elif "npx" in config.config.command: - new_config = await install_npx(mcp_id, config.config) - - if new_config is None: - logger.error("[MCPLoader] MCP模板安装失败: %s", mcp_id) - await MCPLoader.update_template_status(mcp_id, MCPInstallStatus.FAILED) - return - - config.config = new_config - - # 重新保存config - template_config = MCP_PATH / "template" / mcp_id / "config.json" - f = await template_config.open("w+", encoding="utf-8") - config_data = config.model_dump(by_alias=True, exclude_none=True) - await f.write(json.dumps(config_data, indent=4, ensure_ascii=False)) - await f.aclose() - - else: - print(f"[Installer] SSE/StreamableHTTP方式的MCP模板,无需安装: {mcp_id}") # noqa: T201 - config.config.auto_install = False - - print(f"[Installer] MCP模板安装成功: {mcp_id}") # noqa: T201 - await MCPLoader.update_template_status(mcp_id, MCPInstallStatus.READY) - await MCPLoader._insert_template_tool(mcp_id, config) + try: + if not config.config.auto_install: + print(f"[Installer] MCP模板无需安装: {mcp_id}") # noqa: T201 + elif isinstance(config.config, MCPServerStdioConfig): + print(f"[Installer] Stdio方式的MCP模板,开始自动安装: {mcp_id}") # noqa: T201 + if "uv" in config.config.command: + new_config = await install_uvx(mcp_id, config.config) + elif "npx" in config.config.command: + new_config = await install_npx(mcp_id, config.config) + + if new_config is None: + logger.error("[MCPLoader] MCP模板安装失败: %s", mcp_id) + await MCPLoader.update_template_status(mcp_id, MCPInstallStatus.FAILED) + return + + config.config = new_config + + # 重新保存config + template_config = MCP_PATH / "template" / mcp_id / "config.json" + f = await template_config.open("w+", encoding="utf-8") + config_data = config.model_dump(by_alias=True, exclude_none=True) + await f.write(json.dumps(config_data, indent=4, ensure_ascii=False)) + await f.aclose() + + else: + logger.info(f"[Installer] SSE/StreamableHTTP方式的MCP模板,无需安装: {mcp_id}") # noqa: T201 + config.config.auto_install = False + + await MCPLoader._insert_template_tool(mcp_id, config) + await MCPLoader.update_template_status(mcp_id, MCPInstallStatus.READY) + logger.info(f"[Installer] MCP模板安装成功: {mcp_id}") # noqa: T201 + except Exception as e: + logger.error("[MCPLoader] MCP模板安装失败: %s, 错误: %s", mcp_id, e) + await MCPLoader.update_template_status(mcp_id, MCPInstallStatus.FAILED) + raise @staticmethod - async def init_one_template(mcp_id: str, config: MCPServerConfig) -> None: + async def clear_ready_or_failed_mcp_installation() -> None: """ - 初始化单个MCP模板 - - :param str mcp_id: MCP模板ID - :param MCPServerConfig config: MCP配置 - :return: 无 + 清除状态为ready或failed的MCP安装任务 """ - # 删除完成或者失败的MCP安装任务 mcp_collection = MongoDB().get_collection("mcp") mcp_ids = ProcessHandler.get_all_task_ids() # 检索_id在mcp_ids且状态为ready或者failed的MCP的内容 @@ -147,48 +147,52 @@ class MCPLoader(metaclass=SingletonMeta): continue ProcessHandler.remove_task(item.id) logger.info("[MCPLoader] 删除已完成或失败的MCP安装进程: %s", item.id) - # 插入数据库;这里用旧的config就可以 - await MCPLoader._insert_template_db(mcp_id, config) + + @staticmethod + async def init_one_template(mcp_id: str, config: MCPServerConfig) -> None: + """ + 初始化单个MCP模板 + + :param str mcp_id: MCP模板ID + :param MCPServerConfig config: MCP配置 + :return: 无 + """ + await MCPLoader.clear_ready_or_failed_mcp_installation() # 检查目录 template_path = MCP_PATH / "template" / mcp_id await Path.mkdir(template_path, parents=True, exist_ok=True) # 安装MCP模板 + ProcessHandler.remove_task(mcp_id) if not ProcessHandler.add_task(mcp_id, MCPLoader._install_template_task, mcp_id, config): err = f"安装任务无法执行,请稍后重试: {mcp_id}" logger.error(err) raise RuntimeError(err) + # 将installing状态的安装任务的状态变为cancelled @staticmethod - async def _init_all_template() -> None: + async def cancel_all_installing_task() -> None: """ - 初始化所有MCP模板 - - 遍历 ``template`` 目录下的所有MCP模板,并初始化。在Framework启动时进行此流程,确保所有MCP均可正常使用。 - 这一过程会与数据库内的条目进行对比,若发生修改,则重新创建数据库条目。 + 取消正在安装的MCP模板任务 """ template_path = MCP_PATH / "template" logger.info("[MCPLoader] 初始化所有MCP模板: %s", template_path) - + mongo = MongoDB() + mcp_collection = mongo.get_collection("mcp") # 遍历所有模板 + mcp_ids = [] async for mcp_dir in template_path.iterdir(): # 不是目录 if not await mcp_dir.is_dir(): logger.warning("[MCPLoader] 跳过非目录: %s", mcp_dir.as_posix()) continue - # 检查配置文件是否存在 - config_path = mcp_dir / "config.json" - if not await config_path.exists(): - logger.warning("[MCPLoader] 跳过没有配置文件的MCP模板: %s", mcp_dir.as_posix()) - continue - - # 读取配置并加载 - config = await MCPLoader._load_config(config_path) - - # 初始化第一个MCP Server - logger.info("[MCPLoader] 初始化MCP模板: %s", mcp_dir.as_posix()) - await MCPLoader.init_one_template(mcp_dir.name, config) + mcp_ids.append(mcp_dir.name) + # 更新数据库状态 + await mcp_collection.update_many( + {"_id": {"$in": mcp_ids}, "status": MCPInstallStatus.INSTALLING}, + {"$set": {"status": MCPInstallStatus.CANCELLED}}, + ) @staticmethod async def _get_template_tool( @@ -263,6 +267,12 @@ class MCPLoader(metaclass=SingletonMeta): # 基本信息插入数据库 mcp_collection = MongoDB().get_collection("mcp") + # 清空当前工具列表 + await mcp_collection.update_one( + {"_id": mcp_id}, + {"$set": {"tools": []}}, + upsert=True, + ) await mcp_collection.update_one( {"_id": mcp_id}, { @@ -345,14 +355,15 @@ class MCPLoader(metaclass=SingletonMeta): :return: 图标 :rtype: str """ - icon_path = MCP_PATH / "template" / mcp_id / "icon.png" + icon_path = MCP_PATH / "template" / mcp_id / "icon" / f"{mcp_id}.png" if not await icon_path.exists(): logger.warning("[MCPLoader] MCP模板图标不存在: %s", mcp_id) return "" f = await icon_path.open("rb") icon = await f.read() await f.aclose() - return base64.b64encode(icon).decode("utf-8") + header = "data:image/png;base64," + return header + base64.b64encode(icon).decode("utf-8") @staticmethod async def get_config(mcp_id: str) -> MCPServerConfig: @@ -384,6 +395,7 @@ class MCPLoader(metaclass=SingletonMeta): # 更新数据库 mongo = MongoDB() mcp_collection = mongo.get_collection("mcp") + logger.info("[MCPLoader] 更新MCP模板状态: %s -> %s", mcp_id, status) await mcp_collection.update_one( {"_id": mcp_id}, {"$set": {"status": status}}, @@ -391,7 +403,7 @@ class MCPLoader(metaclass=SingletonMeta): ) @staticmethod - async def user_active_template(user_sub: str, mcp_id: str) -> None: + async def user_active_template(user_sub: str, mcp_id: str, mcp_env: dict[str, Any] | None = None) -> None: """ 用户激活MCP模板 @@ -409,7 +421,7 @@ class MCPLoader(metaclass=SingletonMeta): if await user_path.exists(): err = f"MCP模板“{mcp_id}”已存在或有同名文件,无法激活" raise FileExistsError(err) - + mcp_config = await MCPLoader.get_config(mcp_id) # 拷贝文件 await asyncer.asyncify(shutil.copytree)( template_path.as_posix(), @@ -417,7 +429,35 @@ class MCPLoader(metaclass=SingletonMeta): dirs_exist_ok=True, symlinks=True, ) - + if mcp_env is not None: + if mcp_config.type == MCPType.STDIO: + mcp_config.config.env.update(mcp_env) + else: + mcp_config.config.headers.update(mcp_env) + if mcp_config.type == MCPType.STDIO: + index = None + for i in range(len(mcp_config.config.args)): + if mcp_config.config.args[i] == "--directory": + index = i + 1 + break + if index is not None: + if index < len(mcp_config.config.args): + mcp_config.config.args[index] = str(user_path)+'/project' + else: + mcp_config.config.args.append(str(user_path)+'/project') + else: + mcp_config.config.args = ["--directory", str(user_path)+'/project'] + mcp_config.config.args + user_config_path = user_path / "config.json" + # 更新用户配置 + f = await user_config_path.open("w", encoding="utf-8", errors="ignore") + await f.write( + json.dumps( + mcp_config.model_dump(by_alias=True, exclude_none=True), + indent=4, + ensure_ascii=False, + ) + ) + await f.aclose() # 更新数据库 mongo = MongoDB() mcp_collection = mongo.get_collection("mcp") @@ -468,6 +508,26 @@ class MCPLoader(metaclass=SingletonMeta): logger.info("[MCPLoader] 这些MCP在文件系统中被删除: %s", deleted_mcp_list) return deleted_mcp_list + @staticmethod + async def cancel_installing_task(cancel_mcp_list: list[str]) -> None: + """ + 取消正在安装的MCP模板任务 + + :param list[str] cancel_mcp_list: 需要取消的MCP列表 + :return: 无 + """ + mongo = MongoDB() + mcp_collection = mongo.get_collection("mcp") + # 更新数据库状态 + cancel_mcp_list = await mcp_collection.distinct("_id", {"_id": {"$in": cancel_mcp_list}, "status": MCPInstallStatus.INSTALLING}) + await mcp_collection.update_many( + {"_id": {"$in": cancel_mcp_list}, "status": MCPInstallStatus.INSTALLING}, + {"$set": {"status": MCPInstallStatus.CANCELLED}}, + ) + for mcp_id in cancel_mcp_list: + ProcessHandler.remove_task(mcp_id) + logger.info("[MCPLoader] 取消这些正在安装的MCP模板任务: %s", cancel_mcp_list) + @staticmethod async def remove_deleted_mcp(deleted_mcp_list: list[str]) -> None: """ @@ -573,8 +633,8 @@ class MCPLoader(metaclass=SingletonMeta): # 检查目录 await MCPLoader._check_dir() - # 初始化所有模板 - await MCPLoader._init_all_template() + # 暂停所有安装任务 + await MCPLoader.cancel_all_installing_task() # 加载用户MCP await MCPLoader._load_user_mcp() diff --git a/apps/scheduler/pool/loader/service.py b/apps/scheduler/pool/loader/service.py index 2b9060461fc0f3baaece19e88c71776449e9752a..2d84069c434de99e45156452a1e5156ba03b2b3e 100644 --- a/apps/scheduler/pool/loader/service.py +++ b/apps/scheduler/pool/loader/service.py @@ -3,6 +3,7 @@ import asyncio import logging +import os import shutil from anyio import Path @@ -30,6 +31,9 @@ class ServiceLoader: """加载单个Service""" service_path = BASE_PATH / service_id # 载入元数据 + if not os.path.exists(service_path / "metadata.yaml"): + logger.error("[ServiceLoader] Service %s 的元数据不存在", service_id) + return metadata = await MetadataLoader().load_one(service_path / "metadata.yaml") if not isinstance(metadata, ServiceMetadata): err = f"[ServiceLoader] 元数据类型错误: {service_path}/metadata.yaml" @@ -48,7 +52,6 @@ class ServiceLoader: # 更新数据库 await self._update_db(nodes, metadata) - async def save(self, service_id: str, metadata: ServiceMetadata, data: dict) -> None: """在文件系统上保存Service,并更新数据库""" service_path = BASE_PATH / service_id @@ -67,7 +70,6 @@ class ServiceLoader: await file_checker.diff_one(service_path) await self.load(service_id, file_checker.hashes[f"service/{service_id}"]) - async def delete(self, service_id: str, *, is_reload: bool = False) -> None: """删除Service,并更新数据库""" mongo = MongoDB() @@ -95,7 +97,6 @@ class ServiceLoader: if await path.exists(): shutil.rmtree(path) - async def _update_db(self, nodes: list[NodePool], metadata: ServiceMetadata) -> None: # noqa: C901, PLR0912, PLR0915 """更新数据库""" if not metadata.hashes: @@ -197,4 +198,3 @@ class ServiceLoader: await asyncio.sleep(0.01) else: raise - diff --git a/apps/scheduler/pool/mcp/client.py b/apps/scheduler/pool/mcp/client.py index 092bac8909635a5e0c846dddef3456d8ad3be43c..b672690536bc1f03923de51bef6b1ed88c785a04 100644 --- a/apps/scheduler/pool/mcp/client.py +++ b/apps/scheduler/pool/mcp/client.py @@ -29,6 +29,7 @@ class MCPClient: mcp_id: str task: asyncio.Task ready_sign: asyncio.Event + error_sign: asyncio.Event stop_sign: asyncio.Event client: ClientSession status: MCPStatus @@ -54,9 +55,10 @@ class MCPClient: """ # 创建Client if isinstance(config, MCPServerSSEConfig): + headers = config.headers or {} client = sse_client( url=config.url, - headers=config.env, + headers=headers ) elif isinstance(config, MCPServerStdioConfig): if user_sub: @@ -64,7 +66,6 @@ class MCPClient: else: cwd = MCP_PATH / "template" / mcp_id / "project" await cwd.mkdir(parents=True, exist_ok=True) - client = stdio_client(server=StdioServerParameters( command=config.command, args=config.args, @@ -72,6 +73,7 @@ class MCPClient: cwd=cwd.as_posix(), )) else: + self.error_sign.set() err = f"[MCPClient] MCP {mcp_id}:未知的MCP服务类型“{config.type}”" logger.error(err) raise TypeError(err) @@ -85,23 +87,24 @@ class MCPClient: # 初始化Client await session.initialize() except Exception: + self.error_sign.set() + self.status = MCPStatus.STOPPED logger.exception("[MCPClient] MCP %s:初始化失败", mcp_id) raise self.ready_sign.set() self.status = MCPStatus.RUNNING - # 等待关闭信号 await self.stop_sign.wait() + logger.info("[MCPClient] MCP %s:收到停止信号,正在关闭", mcp_id) # 关闭Client try: - await exit_stack.aclose() # type: ignore[attr-defined] + await exit_stack.aclose() # type: ignore[attr-defined] self.status = MCPStatus.STOPPED except Exception: logger.exception("[MCPClient] MCP %s:关闭失败", mcp_id) - async def init(self, user_sub: str | None, mcp_id: str, config: MCPServerSSEConfig | MCPServerStdioConfig) -> None: """ 初始化 MCP Client类 @@ -116,27 +119,34 @@ class MCPClient: # 初始化变量 self.mcp_id = mcp_id self.ready_sign = asyncio.Event() + self.error_sign = asyncio.Event() self.stop_sign = asyncio.Event() # 创建协程 self.task = asyncio.create_task(self._main_loop(user_sub, mcp_id, config)) # 等待初始化完成 - await self.ready_sign.wait() - - # 获取工具列表 + done, pending = await asyncio.wait( + [asyncio.create_task(self.ready_sign.wait()), + asyncio.create_task(self.error_sign.wait())], + return_when=asyncio.FIRST_COMPLETED + ) + if self.error_sign.is_set(): + self.status = MCPStatus.ERROR + logger.error("[MCPClient] MCP %s:初始化失败", mcp_id) + raise Exception(f"MCP {mcp_id} 初始化失败") + + # 获取工具列表 self.tools = (await self.client.list_tools()).tools - async def call_tool(self, tool_name: str, params: dict) -> "CallToolResult": """调用MCP Server的工具""" return await self.client.call_tool(tool_name, params) - async def stop(self) -> None: """停止MCP Client""" self.stop_sign.set() try: await self.task - except Exception: - logger.exception("[MCPClient] MCP %s:停止失败", self.mcp_id) + except Exception as e: + logger.warning("[MCPClient] MCP %s:停止时发生异常:%s", self.mcp_id, e) diff --git a/apps/scheduler/pool/mcp/install.py b/apps/scheduler/pool/mcp/install.py index 1b6c3edeb3a042b7716d0d7748e9c3e50b01af74..02392b366e17b59110ce6a467e63daa081e8a427 100644 --- a/apps/scheduler/pool/mcp/install.py +++ b/apps/scheduler/pool/mcp/install.py @@ -3,12 +3,16 @@ from asyncio import subprocess from typing import TYPE_CHECKING - +import logging +import os +import shutil from apps.constants import MCP_PATH if TYPE_CHECKING: from apps.schemas.mcp import MCPServerStdioConfig +logger = logging.getLogger(__name__) + async def install_uvx(mcp_id: str, config: "MCPServerStdioConfig") -> "MCPServerStdioConfig | None": """ @@ -23,27 +27,35 @@ async def install_uvx(mcp_id: str, config: "MCPServerStdioConfig") -> "MCPServer :rtype: MCPServerStdioConfig :raises ValueError: 未找到MCP Server对应的Python包 """ - # 创建文件夹 - mcp_path = MCP_PATH / "template" / mcp_id / "project" - await mcp_path.mkdir(parents=True, exist_ok=True) - + uv_path = shutil.which('uv') + if uv_path is None: + error = "[Installer] 未找到uv命令,请先安装uv包管理器: pip install uv" + logging.error(error) + raise Exception(error) # 找到包名 - package = "" + package = None for arg in config.args: - if not arg.startswith("-"): + if not arg.startswith("-") and arg != "run": package = arg break - + logger.error(f"[Installer] MCP包名: {package}") if not package: print("[Installer] 未找到包名") # noqa: T201 return None - + # 创建文件夹 + mcp_path = MCP_PATH / "template" / mcp_id / "project" + logger.error(f"[Installer] MCP安装路径: {mcp_path}") + await mcp_path.mkdir(parents=True, exist_ok=True) # 如果有pyproject.toml文件,则使用sync + flag = await (mcp_path / "pyproject.toml").exists() + logger.error(f"[Installer] MCP安装标志: {flag}") if await (mcp_path / "pyproject.toml").exists(): + shell_command = f"{uv_path} venv; {uv_path} sync --index-url https://pypi.tuna.tsinghua.edu.cn/simple --active --no-install-project --no-cache" + logger.error(f"[Installer] MCP安装命令: {shell_command}") pipe = await subprocess.create_subprocess_shell( ( - "uv venv; " - "uv sync --index-url https://pypi.tuna.tsinghua.edu.cn/simple --active " + f"{uv_path} venv; " + f"{uv_path} sync --index-url https://pypi.tuna.tsinghua.edu.cn/simple --active " "--no-install-project --no-cache" ), stdout=subprocess.PIPE, @@ -57,19 +69,20 @@ async def install_uvx(mcp_id: str, config: "MCPServerStdioConfig") -> "MCPServer return None print(f"[Installer] 检查依赖成功: {mcp_path}; {stdout.decode() if stdout else '(无输出信息)'}") # noqa: T201 - config.command = "uv" - config.args = ["run", *config.args] + config.command = uv_path + if "run" not in config.args: + config.args = ["run", *config.args] config.auto_install = False - + logger.error(f"[Installer] MCP安装配置更新成功: {config}") return config # 否则,初始化uv项目 pipe = await subprocess.create_subprocess_shell( ( - f"uv init; " - f"uv venv; " - f"uv add --index-url https://pypi.tuna.tsinghua.edu.cn/simple {package}; " - f"uv sync --index-url https://pypi.tuna.tsinghua.edu.cn/simple --active " + f"{uv_path} init; " + f"{uv_path} venv; " + f"{uv_path} add --index-url https://pypi.tuna.tsinghua.edu.cn/simple {package}; " + f"{uv_path} sync --index-url https://pypi.tuna.tsinghua.edu.cn/simple --active " f"--no-install-project --no-cache" ), stdout=subprocess.PIPE, @@ -84,8 +97,9 @@ async def install_uvx(mcp_id: str, config: "MCPServerStdioConfig") -> "MCPServer print(f"[Installer] 安装 {package} 成功: {mcp_path}; {stdout.decode() if stdout else '(无输出信息)'}") # noqa: T201 # 更新配置 - config.command = "uv" - config.args = ["run", *config.args] + config.command = uv_path + if "run" not in config.args: + config.args = ["run", *config.args] config.auto_install = False return config @@ -103,17 +117,13 @@ async def install_npx(mcp_id: str, config: "MCPServerStdioConfig") -> "MCPServer :rtype: MCPServerStdioConfig :raises ValueError: 未找到MCP Server对应的npm包 """ - mcp_path = MCP_PATH / "template" / mcp_id / "project" - await mcp_path.mkdir(parents=True, exist_ok=True) - - # 如果有node_modules文件夹,则认为已安装 - if await (mcp_path / "node_modules").exists(): - config.command = "npm" - config.args = ["exec", *config.args] - return config - + npm_path = shutil.which('npm') + if npm_path is None: + error = "[Installer] 未找到npm命令,请先安装Node.js和npm" + logging.error(error) + raise Exception(error) # 查找package name - package = "" + package = None for arg in config.args: if not arg.startswith("-"): package = arg @@ -122,10 +132,18 @@ async def install_npx(mcp_id: str, config: "MCPServerStdioConfig") -> "MCPServer if not package: print("[Installer] 未找到包名") # noqa: T201 return None + mcp_path = MCP_PATH / "template" / mcp_id / "project" + await mcp_path.mkdir(parents=True, exist_ok=True) + # 如果有node_modules文件夹,则认为已安装 + if await (mcp_path / "node_modules").exists(): + config.command = npm_path + if "exec" not in config.args: + config.args = ["exec", *config.args] + return config # 安装NPM包 pipe = await subprocess.create_subprocess_shell( - f"npm install {package}", + f"{npm_path} install {package}", stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=mcp_path, @@ -137,8 +155,9 @@ async def install_npx(mcp_id: str, config: "MCPServerStdioConfig") -> "MCPServer print(f"[Installer] 安装 {package} 成功: {mcp_path}; {stdout.decode() if stdout else '(无输出信息)'}") # noqa: T201 # 更新配置 - config.command = "npm" - config.args = ["exec", *config.args] + config.command = npm_path + if "exec" not in config.args: + config.args = ["exec", *config.args] config.auto_install = False return config diff --git a/apps/scheduler/pool/mcp/pool.py b/apps/scheduler/pool/mcp/pool.py index 91cde4d9d6c83fd5088328e12cfbb6bf06d3c7cb..bf0320f429a9ef45864ba6548b1bb28e3d874b59 100644 --- a/apps/scheduler/pool/mcp/pool.py +++ b/apps/scheduler/pool/mcp/pool.py @@ -21,16 +21,13 @@ class MCPPool(metaclass=SingletonMeta): """初始化MCP池""" self.pool = {} - async def _init_mcp(self, mcp_id: str, user_sub: str) -> MCPClient | None: """初始化MCP池""" - mcp_math = MCP_USER_PATH / user_sub / mcp_id / "project" config_path = MCP_USER_PATH / user_sub / mcp_id / "config.json" - - if not await mcp_math.exists() or not await mcp_math.is_dir(): - logger.warning("[MCPPool] 用户 %s 的MCP %s 未激活", user_sub, mcp_id) + flag = (await config_path.exists()) + if not flag: + logger.warning("[MCPPool] 用户 %s 的MCP %s 配置文件不存在", user_sub, mcp_id) return None - config = MCPServerConfig.model_validate_json(await config_path.read_text()) if config.type in (MCPType.SSE, MCPType.STDIO): @@ -40,9 +37,11 @@ class MCPPool(metaclass=SingletonMeta): return None await client.init(user_sub, mcp_id, config.config) + if user_sub not in self.pool: + self.pool[user_sub] = {} + self.pool[user_sub][mcp_id] = client return client - async def _get_from_dict(self, mcp_id: str, user_sub: str) -> MCPClient | None: """从字典中获取MCP客户端""" if user_sub not in self.pool: @@ -53,7 +52,6 @@ class MCPPool(metaclass=SingletonMeta): return self.pool[user_sub][mcp_id] - async def _validate_user(self, mcp_id: str, user_sub: str) -> bool: """验证用户是否已激活""" mongo = MongoDB() @@ -61,7 +59,6 @@ class MCPPool(metaclass=SingletonMeta): mcp_db_result = await mcp_collection.find_one({"_id": mcp_id, "activated": user_sub}) return mcp_db_result is not None - async def get(self, mcp_id: str, user_sub: str) -> MCPClient | None: """获取MCP客户端""" item = await self._get_from_dict(mcp_id, user_sub) @@ -83,7 +80,6 @@ class MCPPool(metaclass=SingletonMeta): return item - async def stop(self, mcp_id: str, user_sub: str) -> None: """停止MCP客户端""" await self.pool[user_sub][mcp_id].stop() diff --git a/apps/scheduler/pool/pool.py b/apps/scheduler/pool/pool.py index 7710d24dc102fe02c06d8cbc14f126bd088db2d4..ead552fc2994150137121b103ff013d8ea4d66a5 100644 --- a/apps/scheduler/pool/pool.py +++ b/apps/scheduler/pool/pool.py @@ -60,7 +60,6 @@ class Pool: await Path(root_dir + "mcp").unlink(missing_ok=True) await Path(root_dir + "mcp").mkdir(parents=True, exist_ok=True) - @staticmethod async def init() -> None: """ @@ -121,13 +120,15 @@ class Pool: for app in changed_app: hash_key = Path("app/" + app).as_posix() if hash_key in checker.hashes: - await app_loader.load(app, checker.hashes[hash_key]) - + try: + await app_loader.load(app, checker.hashes[hash_key]) + except Exception as e: + await app_loader.delete(app, is_reload=True) + logger.warning("[Pool] 加载App %s 失败: %s", app, e) # 载入MCP logger.info("[Pool] 载入MCP") await MCPLoader.init() - async def get_flow_metadata(self, app_id: str) -> list[AppFlow]: """从数据库中获取特定App的全部Flow的元数据""" mongo = MongoDB() @@ -145,14 +146,12 @@ class Pool: else: return flow_metadata_list - async def get_flow(self, app_id: str, flow_id: str) -> Flow | None: """从文件系统中获取单个Flow的全部数据""" logger.info("[Pool] 获取工作流 %s", flow_id) flow_loader = FlowLoader() return await flow_loader.load(app_id, flow_id) - async def get_call(self, call_id: str) -> Any: """[Exception] 拿到Call的信息""" # 从MongoDB里拿到数据 diff --git a/apps/scheduler/scheduler/context.py b/apps/scheduler/scheduler/context.py index b7088d8da45ad01e94e0cf7208db25c5f56c3b22..6e737314822c7d9c5a9671c8670bd5a38db40e3d 100644 --- a/apps/scheduler/scheduler/context.py +++ b/apps/scheduler/scheduler/context.py @@ -10,6 +10,7 @@ from apps.llm.patterns.facts import Facts from apps.schemas.collection import Document from apps.schemas.enum_var import StepStatus from apps.schemas.record import ( + FlowHistory, Record, RecordContent, RecordDocument, @@ -114,11 +115,14 @@ async def save_data(task: Task, user_sub: str, post_body: RequestData) -> None: used_docs.append( RecordGroupDocument( _id=docs["id"], + author=docs.get("author", ""), + order=docs.get("order", 0), name=docs["name"], abstract=docs.get("abstract", ""), extension=docs.get("extension", ""), size=docs.get("size", 0), associated="answer", + created_at=docs.get("created_at", round(datetime.now(UTC).timestamp(), 3)), ) ) if docs.get("order") is not None: @@ -154,7 +158,6 @@ async def save_data(task: Task, user_sub: str, post_body: RequestData) -> None: facts=task.runtime.facts, data={}, ) - try: # 加密Record数据 encrypt_data, encrypt_config = Security.encrypt(record_content.model_dump_json(by_alias=True)) @@ -185,34 +188,37 @@ async def save_data(task: Task, user_sub: str, post_body: RequestData) -> None: feature={}, ), createdAt=current_time, - flow=[i["_id"] for i in task.context], + flow=FlowHistory( + flow_id=task.state.flow_id, + flow_name=task.state.flow_name, + flow_status=task.state.flow_status, + history_ids=[context.id for context in task.context], + ) ) # 检查是否存在group_id if not await RecordManager.check_group_id(task.ids.group_id, user_sub): - record_group = await RecordManager.create_record_group( - task.ids.group_id, user_sub, post_body.conversation_id, task.id, + record_group_id = await RecordManager.create_record_group( + task.ids.group_id, user_sub, post_body.conversation_id ) - if not record_group: + if not record_group_id: logger.error("[Scheduler] 创建问答组失败") return else: - record_group = task.ids.group_id + record_group_id = task.ids.group_id # 修改文件状态 - await DocumentManager.change_doc_status(user_sub, post_body.conversation_id, record_group) + await DocumentManager.change_doc_status(user_sub, post_body.conversation_id, record_group_id) # 保存Record - await RecordManager.insert_record_data_into_record_group(user_sub, record_group, record) + await RecordManager.insert_record_data_into_record_group(user_sub, record_group_id, record) # 保存与答案关联的文件 - await DocumentManager.save_answer_doc(user_sub, record_group, used_docs) + await DocumentManager.save_answer_doc(user_sub, record_group_id, used_docs) if post_body.app and post_body.app.app_id: # 更新最近使用的应用 await AppCenterManager.update_recent_app(user_sub, post_body.app.app_id) - # 若状态为成功,删除Task - if not task.state or task.state.status == StepStatus.SUCCESS: + if not task.state or task.state.flow_status == StepStatus.SUCCESS or task.state.flow_status == StepStatus.ERROR or task.state.flow_status == StepStatus.CANCELLED: await TaskManager.delete_task_by_task_id(task.id) else: - # 更新Task await TaskManager.save_task(task.id, task) diff --git a/apps/scheduler/scheduler/message.py b/apps/scheduler/scheduler/message.py index c89fdd1014ba4714d0777742412e98c0802ca68c..2bd18861789ae2c5f8a909a4831a2fe967045c12 100644 --- a/apps/scheduler/scheduler/message.py +++ b/apps/scheduler/scheduler/message.py @@ -15,6 +15,7 @@ from apps.schemas.message import ( InitContentFeature, TextAddContent, ) +from apps.schemas.enum_var import FlowStatus from apps.schemas.rag_data import RAGEventData, RAGQueryReq from apps.schemas.record import RecordDocument from apps.schemas.task import Task @@ -59,30 +60,34 @@ async def push_init_message( async def push_rag_message( - task: Task, queue: MessageQueue, user_sub: str, llm: LLM, history: list[dict[str, str]], - doc_ids: list[str], - rag_data: RAGQueryReq,) -> Task: + task: Task, + queue: MessageQueue, + user_sub: str, + llm: LLM, + history: list[dict[str, str]], + doc_ids: list[str], + rag_data: RAGQueryReq, +) -> None: """推送RAG消息""" full_answer = "" - - async for chunk in RAG.chat_with_llm_base_on_rag(user_sub, llm, history, doc_ids, rag_data): - task, content_obj = await _push_rag_chunk(task, queue, chunk) - if content_obj.event_type == EventType.TEXT_ADD.value: - # 如果是文本消息,直接拼接到答案中 - full_answer += content_obj.content - elif content_obj.event_type == EventType.DOCUMENT_ADD.value: - task.runtime.documents.append({ - "id": content_obj.content.get("id", ""), - "order": content_obj.content.get("order", 0), - "name": content_obj.content.get("name", ""), - "abstract": content_obj.content.get("abstract", ""), - "extension": content_obj.content.get("extension", ""), - "size": content_obj.content.get("size", 0), - }) + try: + async for chunk in RAG.chat_with_llm_base_on_rag( + user_sub, llm, history, doc_ids, rag_data, task.language + ): + task, content_obj = await _push_rag_chunk(task, queue, chunk) + if content_obj.event_type == EventType.TEXT_ADD.value: + # 如果是文本消息,直接拼接到答案中 + full_answer += content_obj.content + elif content_obj.event_type == EventType.DOCUMENT_ADD.value: + task.runtime.documents.append(content_obj.content) + task.state.flow_status = FlowStatus.SUCCESS + except Exception as e: + logger.error(f"[Scheduler] RAG服务发生错误: {e}") + task.state.flow_status = FlowStatus.ERROR # 保存答案 task.runtime.answer = full_answer + task.tokens.full_time = round(datetime.now(UTC).timestamp(), 2) - task.tokens.time await TaskManager.save_task(task.id, task) - return task async def _push_rag_chunk(task: Task, queue: MessageQueue, content: str) -> tuple[Task, RAGEventData]: @@ -115,10 +120,12 @@ async def _push_rag_chunk(task: Task, queue: MessageQueue, content: str) -> tupl data=DocumentAddContent( documentId=content_obj.content.get("id", ""), documentOrder=content_obj.content.get("order", 0), + documentAuthor=content_obj.content.get("author", ""), documentName=content_obj.content.get("name", ""), documentAbstract=content_obj.content.get("abstract", ""), documentType=content_obj.content.get("extension", ""), documentSize=content_obj.content.get("size", 0), + createdAt=round(content_obj.content.get("created_at", datetime.now(tz=UTC).timestamp()), 3), ).model_dump(exclude_none=True, by_alias=True), ) except Exception: diff --git a/apps/scheduler/scheduler/scheduler.py b/apps/scheduler/scheduler/scheduler.py index ed73638ced241909f55989597f8bb747b50f1945..2bd51e745ea7e2a8b51f04f6f214b08b1a2ed28b 100644 --- a/apps/scheduler/scheduler/scheduler.py +++ b/apps/scheduler/scheduler/scheduler.py @@ -1,9 +1,12 @@ # Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. """Scheduler模块""" +import asyncio import logging from datetime import UTC, datetime - +from apps.llm.reasoning import ReasoningLLM +from apps.schemas.config import LLMConfig +from apps.llm.patterns.rewrite import QuestionRewrite from apps.common.config import Config from apps.common.mongo import MongoDB from apps.common.queue import MessageQueue @@ -17,11 +20,12 @@ from apps.scheduler.scheduler.message import ( push_rag_message, ) from apps.schemas.collection import LLM -from apps.schemas.enum_var import AppType, EventType +from apps.schemas.enum_var import FlowStatus, AppType, EventType from apps.schemas.pool import AppPool from apps.schemas.rag_data import RAGQueryReq from apps.schemas.request_data import RequestData from apps.schemas.scheduler import ExecutorBackground +from apps.services.activity import Activity from apps.schemas.task import Task from apps.services.appcenter import AppCenterManager from apps.services.knowledge import KnowledgeBaseManager @@ -41,12 +45,31 @@ class Scheduler: """初始化""" self.used_docs = [] self.task = task - self.queue = queue self.post_body = post_body - async def run(self) -> None: # noqa: PLR0911 - """运行调度器""" + async def _monitor_activity(self, kill_event): + """监控用户活动状态,不活跃时终止工作流""" + try: + check_interval = 0.5 # 每0.5秒检查一次 + + while not kill_event.is_set(): + # 检查用户活动状态 + is_active = await Activity.is_active(self.task.ids.active_id) + if not is_active: + logger.warning("[Scheduler] 用户 %s 不活跃,终止工作流", self.task.ids.user_sub) + kill_event.set() + break + + # 控制检查频率 + await asyncio.sleep(check_interval) + except asyncio.CancelledError: + logger.info("[Scheduler] 活动监控任务已取消") + except Exception as e: + logger.error(f"[Scheduler] 活动监控过程中发生错误: {e}") + + async def get_llm_use_in_chat_with_rag(self) -> LLM: + """获取RAG大模型""" try: # 获取当前会话使用的大模型 llm_id = await LLMManager.get_llm_id_by_conversation_id( @@ -54,8 +77,7 @@ class Scheduler: ) if not llm_id: logger.error("[Scheduler] 获取大模型ID失败") - await self.queue.close() - return + return None if llm_id == "empty": llm = LLM( _id="empty", @@ -65,24 +87,31 @@ class Scheduler: model_name=Config().get_config().llm.model, max_tokens=Config().get_config().llm.max_tokens, ) + return llm else: llm = await LLMManager.get_llm_by_id(self.task.ids.user_sub, llm_id) if not llm: logger.error("[Scheduler] 获取大模型失败") - await self.queue.close() - return + return None + return llm except Exception: logger.exception("[Scheduler] 获取大模型失败") - await self.queue.close() - return + return None + + async def get_kb_ids_use_in_chat_with_rag(self) -> list[str]: + """获取知识库ID列表""" try: - # 获取当前会话使用的知识库 kb_ids = await KnowledgeBaseManager.get_kb_ids_by_conversation_id( - self.task.ids.user_sub, self.task.ids.conversation_id) + self.task.ids.user_sub, self.task.ids.conversation_id, + ) + return kb_ids except Exception: logger.exception("[Scheduler] 获取知识库ID失败") await self.queue.close() - return + return [] + + async def run(self) -> None: # noqa: PLR0911 + """运行调度器""" try: # 获取当前问答可供关联的文档 docs, doc_ids = await get_docs(self.task.ids.user_sub, self.post_body) @@ -92,18 +121,30 @@ class Scheduler: return history, _ = await get_context(self.task.ids.user_sub, self.post_body, 3) # 已使用文档 - # 如果是智能问答,直接执行 logger.info("[Scheduler] 开始执行") - if not self.post_body.app or self.post_body.app.app_id == "": + # 创建用于通信的事件 + kill_event = asyncio.Event() + monitor = asyncio.create_task(self._monitor_activity(kill_event)) + rag_method = True + if self.post_body.app and self.post_body.app.app_id: + rag_method = False + if self.task.state.app_id: + rag_method = False + if rag_method: + llm = await self.get_llm_use_in_chat_with_rag() + kb_ids = await self.get_kb_ids_use_in_chat_with_rag() self.task = await push_init_message(self.task, self.queue, 3, is_flow=False) rag_data = RAGQueryReq( kbIds=kb_ids, query=self.post_body.question, tokensLimit=llm.max_tokens, ) - self.task = await push_rag_message(self.task, self.queue, self.task.ids.user_sub, llm, history, doc_ids, rag_data) - self.task.tokens.full_time = round(datetime.now(UTC).timestamp(), 2) - self.task.tokens.time + + # 启动监控任务和主任务 + main_task = asyncio.create_task(push_rag_message( + self.task, self.queue, self.task.ids.user_sub, llm, history, doc_ids, rag_data)) + else: # 查找对应的App元数据 app_data = await AppCenterManager.fetch_app_data_by_id(self.post_body.app.app_id) @@ -127,8 +168,27 @@ class Scheduler: conversation=context, facts=facts, ) - await self.run_executor(self.queue, self.post_body, executor_background) + # 启动监控任务和主任务 + main_task = asyncio.create_task(self.run_executor(self.queue, self.post_body, executor_background)) + # 等待任一任务完成 + done, pending = await asyncio.wait( + [main_task, monitor], + return_when=asyncio.FIRST_COMPLETED + ) + + # 如果是监控任务触发,终止主任务 + if kill_event.is_set(): + logger.warning("[Scheduler] 用户活动状态检测不活跃,正在终止工作流执行...") + main_task.cancel() + need_change_cancel_flow_state = [FlowStatus.RUNNING, FlowStatus.WAITING] + if self.task.state.flow_status in need_change_cancel_flow_state: + self.task.state.flow_status = FlowStatus.CANCELLED + try: + await main_task + logger.info("[Scheduler] 工作流执行已被终止") + except Exception as e: + logger.error(f"[Scheduler] 终止工作流时发生错误: {e}") # 更新Task,发送结束消息 logger.info("[Scheduler] 发送结束消息") await self.queue.push_output(self.task, event_type=EventType.DONE.value, data={}) @@ -152,6 +212,42 @@ class Scheduler: if not app_metadata: logger.error("[Scheduler] 未找到Agent应用") return + if app_metadata.llm_id == "empty": + llm = LLM( + _id="empty", + user_sub=self.task.ids.user_sub, + openai_base_url=Config().get_config().llm.endpoint, + openai_api_key=Config().get_config().llm.key, + model_name=Config().get_config().llm.model, + max_tokens=Config().get_config().llm.max_tokens, + ) + else: + llm = await LLMManager.get_llm_by_id( + self.task.ids.user_sub, app_metadata.llm_id, + ) + if not llm: + logger.error("[Scheduler] 获取大模型失败") + await self.queue.close() + return + reasion_llm = ReasoningLLM( + LLMConfig( + endpoint=llm.openai_base_url, + key=llm.openai_api_key, + model=llm.model_name, + max_tokens=llm.max_tokens, + ) + ) + if background.conversation and self.task.state.flow_status == FlowStatus.INIT: + try: + question_obj = QuestionRewrite() + post_body.question = await question_obj.generate( + history=background.conversation, + question=post_body.question, + llm=reasion_llm, + language=post_body.language, + ) + except Exception: + logger.exception("[Scheduler] 问题重写失败") if app_metadata.app_type == AppType.FLOW.value: logger.info("[Scheduler] 获取工作流元数据") flow_info = await Pool().get_flow_metadata(app_info.app_id) @@ -182,7 +278,6 @@ class Scheduler: # 初始化Executor logger.info("[Scheduler] 初始化Executor") - flow_exec = FlowExecutor( flow_id=flow_id, flow=flow_data, @@ -206,10 +301,11 @@ class Scheduler: task=self.task, msg_queue=queue, question=post_body.question, - max_steps=app_metadata.history_len, + history_len=app_metadata.history_len, servers_id=servers_id, background=background, agent_id=app_info.app_id, + params=post_body.params ) # 开始运行 logger.info("[Scheduler] 运行Executor") diff --git a/apps/scheduler/slot/slot.py b/apps/scheduler/slot/slot.py index 4caab4d2dc945ec5ac14b7769770f94c39980a9f..7a6313a1c14c3122fab87905c07ce1b4bc6a0d84 100644 --- a/apps/scheduler/slot/slot.py +++ b/apps/scheduler/slot/slot.py @@ -1,6 +1,7 @@ # Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. """参数槽位管理""" +import copy import json import logging import traceback @@ -12,6 +13,8 @@ from jsonschema.exceptions import ValidationError from jsonschema.protocols import Validator from jsonschema.validators import extend +from apps.schemas.response_data import ParamsNode +from apps.scheduler.call.choice.schema import Type from apps.scheduler.slot.parser import ( SlotConstParser, SlotDateParser, @@ -126,7 +129,7 @@ class Slot: # Schema标准 return [_process_json_value(item, spec_data["items"]) for item in json_value] if spec_data["type"] == "object" and isinstance(json_value, dict): - # 若Schema不标准,则不进行处理 + # 若Schema不标准,则不进行处理F if "properties" not in spec_data: return json_value # Schema标准 @@ -154,35 +157,60 @@ class Slot: @staticmethod def _generate_example(schema_node: dict) -> Any: # noqa: PLR0911 """根据schema生成示例值""" + if "anyOf" in schema_node or "oneOf" in schema_node: + # 如果有anyOf,随机返回一个示例 + for item in schema_node["anyOf"] if "anyOf" in schema_node else schema_node["oneOf"]: + example = Slot._generate_example(item) + if example is not None: + return example + + if "allOf" in schema_node: + # 如果有allOf,返回所有示例的合并 + example = None + for item in schema_node["allOf"]: + if example is None: + example = Slot._generate_example(item) + else: + other_example = Slot._generate_example(item) + if isinstance(example, dict) and isinstance(other_example, dict): + example.update(other_example) + else: + example = None + break + return example + if "default" in schema_node: return schema_node["default"] if "type" not in schema_node: return None - + type_value = schema_node["type"] + if isinstance(type_value, list): + # 如果是多类型,随机返回一个示例 + if len(type_value) > 1: + type_value = type_value[0] # 处理类型为 object 的节点 - if schema_node["type"] == "object": + if type_value == "object": data = {} properties = schema_node.get("properties", {}) for name, schema in properties.items(): data[name] = Slot._generate_example(schema) return data - # 处理类型为 array 的节点 - if schema_node["type"] == "array": + elif type_value == "array": items_schema = schema_node.get("items", {}) return [Slot._generate_example(items_schema)] # 处理类型为 string 的节点 - if schema_node["type"] == "string": + elif type_value == "string": return "" # 处理类型为 number 或 integer 的节点 - if schema_node["type"] in ["number", "integer"]: + elif type_value in ["number", "integer"]: return 0 # 处理类型为 boolean 的节点 - if schema_node["type"] == "boolean": + elif type_value == "boolean": return False # 处理其他类型或未定义类型 @@ -196,31 +224,114 @@ class Slot: """从JSON Schema中提取类型描述""" def _extract_type_desc(schema_node: dict[str, Any]) -> dict[str, Any]: - if "type" not in schema_node and "anyOf" not in schema_node: - return {} - data = {"type": schema_node.get("type", ""), "description": schema_node.get("description", "")} - if "anyOf" in schema_node: - data["type"] = "anyOf" - # 处理类型为 object 的节点 - if "anyOf" in schema_node: - data["items"] = {} - type_index = 0 - for type_index, sub_schema in enumerate(schema_node["anyOf"]): - sub_result = _extract_type_desc(sub_schema) - if sub_result: - data["items"]["type_"+str(type_index)] = sub_result - if schema_node.get("type", "") == "object": - data["items"] = {} + # 处理组合关键字 + special_keys = ["anyOf", "allOf", "oneOf"] + for key in special_keys: + if key in schema_node: + data = { + "type": key, + "description": schema_node.get("description", ""), + "items": {}, + } + type_index = 0 + for item in schema_node[key]: + if isinstance(item, dict): + data["items"][f"item_{type_index}"] = _extract_type_desc(item) + else: + data["items"][f"item_{type_index}"] = {"type": item, "description": ""} + type_index += 1 + return data + # 处理基本类型 + type_val = schema_node.get("type", "") + description = schema_node.get("description", "") + + # 处理多类型数组 + if isinstance(type_val, list): + if len(type_val) > 1: + data = {"type": "union", "description": description, "items": {}} + type_index = 0 + for t in type_val: + if t == "object": + tmp_dict = {} + for key, val in schema_node.get("properties", {}).items(): + tmp_dict[key] = _extract_type_desc(val) + data["items"][f"item_{type_index}"] = tmp_dict + elif t == "array": + items_schema = schema_node.get("items", {}) + data["items"][f"item_{type_index}"] = _extract_type_desc(items_schema) + else: + data["items"][f"item_{type_index}"] = {"type": t, "description": description} + type_index += 1 + return data + elif len(type_val) == 1: + type_val = type_val[0] + else: + type_val = "" + + data = {"type": type_val, "description": description, "items": {}} + + # 递归处理对象和数组 + if type_val == "object": for key, val in schema_node.get("properties", {}).items(): data["items"][key] = _extract_type_desc(val) - - # 处理类型为 array 的节点 - if schema_node.get("type", "") == "array": + elif type_val == "array": items_schema = schema_node.get("items", {}) - data["items"] = _extract_type_desc(items_schema) + if isinstance(items_schema, list): + item_index = 0 + for item in items_schema: + data["items"][f"item_{item_index}"] = _extract_type_desc(item) + item_index += 1 + else: + data["items"]["item"] = _extract_type_desc(items_schema) + if data["items"] == {}: + del data["items"] return data + return _extract_type_desc(self._schema) + def get_params_node_from_schema(self, root: str = "") -> ParamsNode: + """从JSON Schema中提取ParamsNode""" + def _extract_params_node(schema_node: dict[str, Any], name: str = "", path: str = "") -> ParamsNode: + """递归提取ParamsNode""" + if "type" not in schema_node: + return None + + param_type = schema_node["type"] + if isinstance(param_type, list): + return None # 不支持多类型 + if param_type == "object": + param_type = Type.DICT + elif param_type == "array": + param_type = Type.LIST + elif param_type == "string": + param_type = Type.STRING + elif param_type in ["number", "integer"]: + param_type = Type.NUMBER + elif param_type == "boolean": + param_type = Type.BOOL + else: + logger.warning(f"[Slot] 不支持的参数类型: {param_type}") + return None + sub_params = [] + + if param_type == Type.DICT and "properties" in schema_node: + for key, value in schema_node["properties"].items(): + sub_param = _extract_params_node(value, name=key, path=f"{path}/{key}") + if sub_param: + sub_params.append(sub_param) + else: + # 对于非对象类型,直接返回空子参数 + sub_params = None + return ParamsNode(paramName=name, + paramPath=path, + paramType=param_type, + subParams=sub_params) + try: + return _extract_params_node(self._schema, name=root, path=root) + except Exception as e: + logger.error(f"[Slot] 提取ParamsNode失败: {e!s}\n{traceback.format_exc()}") + return None + def _flatten_schema(self, schema: dict[str, Any]) -> tuple[dict[str, Any], list[str]]: """将JSON Schema扁平化""" result = {} @@ -276,7 +387,6 @@ class Slot: logger.exception("[Slot] 错误schema不合法: %s", error.schema) return {}, [] - def _assemble_patch( self, key: str, @@ -329,7 +439,6 @@ class Slot: logger.info("[Slot] 组装patch: %s", patch_list) return patch_list - def convert_json(self, json_data: str | dict[str, Any]) -> dict[str, Any]: """将用户手动填充的参数专为真实JSON""" json_dict = json.loads(json_data) if isinstance(json_data, str) else json_data @@ -373,3 +482,54 @@ class Slot: return schema_template return {} + + def add_null_to_basic_types(self) -> dict[str, Any]: + """ + 递归地为 JSON Schema 中的基础类型(bool、number等)添加 null 选项 + """ + def add_null_to_basic_types(schema: dict[str, Any]) -> dict[str, Any]: + """ + 递归地为 JSON Schema 中的基础类型(bool、number等)添加 null 选项 + + 参数: + schema (dict): 原始 JSON Schema + + 返回: + dict: 修改后的 JSON Schema + """ + # 如果不是字典类型(schema),直接返回 + if not isinstance(schema, dict): + return schema + + # 处理当前节点的 type 字段 + if 'type' in schema: + # 处理单一类型字符串 + if isinstance(schema['type'], str): + if schema['type'] in ['boolean', 'number', 'string', 'integer']: + schema['type'] = [schema['type'], 'null'] + + # 处理类型数组 + elif isinstance(schema['type'], list): + for i, t in enumerate(schema['type']): + if isinstance(t, str) and t in ['boolean', 'number', 'string', 'integer']: + if 'null' not in schema['type']: + schema['type'].append('null') + break + + # 递归处理 properties 字段(对象类型) + if 'properties' in schema: + for prop, prop_schema in schema['properties'].items(): + schema['properties'][prop] = add_null_to_basic_types(prop_schema) + + # 递归处理 items 字段(数组类型) + if 'items' in schema: + schema['items'] = add_null_to_basic_types(schema['items']) + + # 递归处理 anyOf, oneOf, allOf 字段 + for keyword in ['anyOf', 'oneOf', 'allOf']: + if keyword in schema: + schema[keyword] = [add_null_to_basic_types(sub_schema) for sub_schema in schema[keyword]] + + return schema + schema_copy = copy.deepcopy(self._schema) + return add_null_to_basic_types(schema_copy) diff --git a/apps/schemas/agent.py b/apps/schemas/agent.py index b52f5e1c3315fb873acddb2bcc4e27937498d561..16e818e4d588d4f25ea4854a7979e2dc5fe90ecb 100644 --- a/apps/schemas/agent.py +++ b/apps/schemas/agent.py @@ -17,6 +17,7 @@ class AgentAppMetadata(MetadataBase): app_type: AppType = Field(default=AppType.AGENT, description="应用类型", frozen=True) published: bool = Field(description="是否发布", default=False) history_len: int = Field(description="对话轮次", default=3, le=10) - mcp_service: list[str] = Field(default=[], alias="mcpService", description="MCP服务id列表") + mcp_service: list[str] = Field(default=[], description="MCP服务id列表") + llm_id: str = Field(default="empty", description="大模型ID") permission: Permission | None = Field(description="应用权限配置", default=None) version: str = Field(description="元数据版本") diff --git a/apps/schemas/appcenter.py b/apps/schemas/appcenter.py index a89f39df18083d90b988cc764c0e88f90500d1f3..e3bb896eba2361e0c28ee914b84f387d7da76b87 100644 --- a/apps/schemas/appcenter.py +++ b/apps/schemas/appcenter.py @@ -45,9 +45,9 @@ class AppFlowInfo(BaseModel): """应用工作流数据结构""" id: str = Field(..., description="工作流ID") - name: str = Field(..., description="工作流名称") - description: str = Field(..., description="工作流简介") - debug: bool = Field(..., description="是否经过调试") + name: str = Field(default="", description="工作流名称") + description: str = Field(default="", description="工作流简介") + debug: bool = Field(default=False, description="是否经过调试") class AppData(BaseModel): @@ -61,6 +61,7 @@ class AppData(BaseModel): first_questions: list[str] = Field( default=[], alias="recommendedQuestions", description="推荐问题", max_length=3) history_len: int = Field(3, alias="dialogRounds", ge=1, le=10, description="对话轮次(1~10)") + llm: str = Field(default="empty", description="大模型ID") permission: AppPermissionData = Field( default_factory=lambda: AppPermissionData(authorizedUsers=None), description="权限配置") workflows: list[AppFlowInfo] = Field(default=[], description="工作流信息列表") diff --git a/apps/schemas/collection.py b/apps/schemas/collection.py index 0ff66c72bbe30b7cbb55517952c8b14744d69cf2..22563e14eb12f19317fcf48ac0865025ecae883a 100644 --- a/apps/schemas/collection.py +++ b/apps/schemas/collection.py @@ -61,6 +61,7 @@ class User(BaseModel): fav_apps: list[str] = [] fav_services: list[str] = [] is_admin: bool = Field(default=False, description="是否为管理员") + auto_execute: bool = Field(default=True, description="是否自动执行任务") class LLM(BaseModel): @@ -105,7 +106,7 @@ class Conversation(BaseModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), alias="_id") user_sub: str - title: str = NEW_CHAT + title: str = Field(default=NEW_CHAT) created_at: float = Field(default_factory=lambda: round(datetime.now(tz=UTC).timestamp(), 3)) app_id: str | None = Field(default="") tasks: list[str] = [] diff --git a/apps/schemas/config.py b/apps/schemas/config.py index b88a81f1afb018663713a3bf4c0b9b62436e59d4..675a9ba7f26531206a5ea5f8a7598d4344ae2124 100644 --- a/apps/schemas/config.py +++ b/apps/schemas/config.py @@ -6,6 +6,12 @@ from typing import Literal from pydantic import BaseModel, Field +class NoauthConfig(BaseModel): + """无认证配置""" + + enable: bool = Field(description="是否启用无认证访问", default=False) + + class DeployConfig(BaseModel): """部署配置""" @@ -122,7 +128,7 @@ class ExtraConfig(BaseModel): class ConfigModel(BaseModel): """配置文件的校验Class""" - + no_auth: NoauthConfig = Field(description="无认证配置", default=NoauthConfig()) deploy: DeployConfig login: LoginConfig embedding: EmbeddingConfig diff --git a/apps/schemas/enum_var.py b/apps/schemas/enum_var.py index 9a20ba84d4805bcd502d9d4ca0f9ba3c49a7bb4c..e7ecf87c3ed043ff85818050cc7f9e7fe4ed04ba 100644 --- a/apps/schemas/enum_var.py +++ b/apps/schemas/enum_var.py @@ -14,11 +14,26 @@ class SlotType(str, Enum): class StepStatus(str, Enum): """步骤状态""" - + UNKNOWN = "unknown" + INIT = "init" + WAITING = "waiting" RUNNING = "running" SUCCESS = "success" ERROR = "error" PARAM = "param" + CANCELLED = "cancelled" + + +class FlowStatus(str, Enum): + """Flow状态""" + + UNKNOWN = "unknown" + INIT = "init" + WAITING = "waiting" + RUNNING = "running" + SUCCESS = "success" + ERROR = "error" + CANCELLED = "cancelled" class DocumentStatus(str, Enum): @@ -34,14 +49,22 @@ class EventType(str, Enum): """事件类型""" HEARTBEAT = "heartbeat" - INIT = "init", + INIT = "init" TEXT_ADD = "text.add" GRAPH = "graph" DOCUMENT_ADD = "document.add" + STEP_WAITING_FOR_START = "step.waiting_for_start" + STEP_WAITING_FOR_PARAM = "step.waiting_for_param" FLOW_START = "flow.start" + STEP_INIT = "step.init" STEP_INPUT = "step.input" STEP_OUTPUT = "step.output" + STEP_CANCEL = "step.cancel" + STEP_ERROR = "step.error" FLOW_STOP = "flow.stop" + FLOW_FAILED = "flow.failed" + FLOW_SUCCESS = "flow.success" + FLOW_CANCEL = "flow.cancel" DONE = "done" @@ -81,7 +104,7 @@ class NodeType(str, Enum): START = "start" END = "end" NORMAL = "normal" - CHOICE = "choice" + CHOICE = "Choice" class SaveType(str, Enum): @@ -144,7 +167,7 @@ class SpecialCallType(str, Enum): LLM = "LLM" START = "start" END = "end" - CHOICE = "choice" + CHOICE = "Choice" class CommentType(str, Enum): @@ -186,3 +209,10 @@ class AgentState(str, Enum): RUNNING = "RUNNING" FINISHED = "FINISHED" ERROR = "ERROR" + +class LanguageType(str, Enum): + """语言类型""" + + CHINESE = "zh" + ENGLISH = "en" + diff --git a/apps/schemas/flow.py b/apps/schemas/flow.py index 2646d04390099fd92988d78c00d1b1471780d6a9..dfffd1f14c779952d9bb761d2e2bccd85528bc0f 100644 --- a/apps/schemas/flow.py +++ b/apps/schemas/flow.py @@ -136,6 +136,7 @@ class AppMetadata(MetadataBase): published: bool = Field(description="是否发布", default=False) links: list[AppLink] = Field(description="相关链接", default=[]) first_questions: list[str] = Field(description="首次提问", default=[]) + llm_id: str = Field(description="大模型ID", default="empty") history_len: int = Field(description="对话轮次", default=3, le=10) permission: Permission | None = Field(description="应用权限配置", default=None) flows: list[AppFlow] = Field(description="Flow列表", default=[]) diff --git a/apps/schemas/flow_topology.py b/apps/schemas/flow_topology.py index d0ab666a9902ec7e0d81bb922f973d655f29eaf2..fecf21606634982ad6480069a7adc6714a612db2 100644 --- a/apps/schemas/flow_topology.py +++ b/apps/schemas/flow_topology.py @@ -5,6 +5,7 @@ from typing import Any from pydantic import BaseModel, Field +from apps.schemas.enum_var import SpecialCallType from apps.schemas.enum_var import EdgeType @@ -51,7 +52,7 @@ class NodeItem(BaseModel): service_id: str = Field(alias="serviceId", default="") node_id: str = Field(alias="nodeId", default="") name: str = Field(default="") - call_id: str = Field(alias="callId", default="Empty") + call_id: str = Field(alias="callId", default=SpecialCallType.EMPTY.value) description: str = Field(default="") enable: bool = Field(default=True) parameters: dict[str, Any] = Field(default={}) @@ -81,6 +82,6 @@ class FlowItem(BaseModel): nodes: list[NodeItem] = Field(default=[]) edges: list[EdgeItem] = Field(default=[]) created_at: float | None = Field(alias="createdAt", default=0) - connectivity: bool = Field(default=False,description="图的开始节点和结束节点是否联通,并且除结束节点都有出边") + connectivity: bool = Field(default=False, description="图的开始节点和结束节点是否联通,并且除结束节点都有出边") focus_point: PositionItem = Field(alias="focusPoint", default=PositionItem()) debug: bool = Field(default=False) diff --git a/apps/schemas/mcp.py b/apps/schemas/mcp.py index 44021b0ed5f5f6c372ac0a472bb3ac28ff5bbddb..718726e140681bc24b5c1b3cd80dd820a40f4fd1 100644 --- a/apps/schemas/mcp.py +++ b/apps/schemas/mcp.py @@ -1,6 +1,7 @@ # Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. """MCP 相关数据结构""" +import uuid from enum import Enum from typing import Any @@ -10,8 +11,9 @@ from pydantic import BaseModel, Field class MCPInstallStatus(str, Enum): """MCP 服务状态""" - + INIT = "init" INSTALLING = "installing" + CANCELLED = "cancelled" READY = "ready" FAILED = "failed" @@ -22,6 +24,7 @@ class MCPStatus(str, Enum): UNINITIALIZED = "uninitialized" RUNNING = "running" STOPPED = "stopped" + ERROR = "error" class MCPType(str, Enum): @@ -35,23 +38,25 @@ class MCPType(str, Enum): class MCPBasicConfig(BaseModel): """MCP 基本配置""" - env: dict[str, str] = Field(description="MCP 服务器环境变量", default={}) auto_approve: list[str] = Field(description="自动批准的MCP工具ID列表", default=[], alias="autoApprove") disabled: bool = Field(description="MCP 服务器是否禁用", default=False) - auto_install: bool = Field(description="是否自动安装MCP服务器", default=True, alias="autoInstall") + auto_install: bool = Field(description="是否自动安装MCP服务器", default=True) + timeout: int = Field(description="MCP 服务器超时时间(秒)", default=60, alias="timeout") + description: str = Field(description="MCP 服务器自然语言描述", default="") class MCPServerStdioConfig(MCPBasicConfig): """MCP 服务器配置""" + env: dict[str, Any] = Field(description="MCP 服务器环境变量", default={}) command: str = Field(description="MCP 服务器命令") args: list[str] = Field(description="MCP 服务器命令参数") class MCPServerSSEConfig(MCPBasicConfig): """MCP 服务器配置""" - - url: str = Field(description="MCP 服务器地址", default="") + headers: dict[str, str] = Field(description="MCP 服务器请求头", default={}) + url: str = Field(description="MCP 服务器地址", default="http://example.com/sse", pattern=r"^https?://.*$") class MCPServerConfig(BaseModel): @@ -84,7 +89,7 @@ class MCPCollection(BaseModel): type: MCPType = Field(description="MCP 类型", default=MCPType.SSE) activated: list[str] = Field(description="激活该MCP的用户ID列表", default=[]) tools: list[MCPTool] = Field(description="MCP工具列表", default=[]) - status: MCPInstallStatus = Field(description="MCP服务状态", default=MCPInstallStatus.INSTALLING) + status: MCPInstallStatus = Field(description="MCP服务状态", default=MCPInstallStatus.INIT) author: str = Field(description="MCP作者", default="") @@ -103,6 +108,74 @@ class MCPToolVector(LanceModel): embedding: Vector(dim=1024) = Field(description="MCP工具描述的向量信息") # type: ignore[call-arg] +class GoalEvaluationResult(BaseModel): + """MCP 目标评估结果""" + + can_complete: bool = Field(description="是否可以完成目标") + reason: str = Field(description="评估原因") + + +class Risk(str, Enum): + """MCP工具风险类型""" + + LOW = "low" + MEDIUM = "medium" + HIGH = "high" + + +class FlowName(BaseModel): + """MCP 流程名称""" + + flow_name: str = Field(description="MCP 流程名称", default="") + + +class FlowRisk(BaseModel): + """MCP 流程风险评估结果""" + + risk: Risk = Field(description="风险类型", default=Risk.LOW) + reason: str = Field(description="风险原因", default="") + + +class RestartStepIndex(BaseModel): + """MCP重新规划的步骤索引""" + + start_index: int = Field(description="重新规划的起始步骤索引") + reasoning: str = Field(description="重新规划的原因") + + +class ToolSkip(BaseModel): + """MCP工具跳过执行结果""" + + skip: bool = Field(description="是否跳过当前步骤", default=False) + + +class ToolRisk(BaseModel): + """MCP工具风险评估结果""" + + risk: Risk = Field(description="风险类型", default=Risk.LOW) + reason: str = Field(description="风险原因", default="") + + +class ErrorType(str, Enum): + """MCP工具错误类型""" + + MISSING_PARAM = "missing_param" + DECORRECT_PLAN = "decorrect_plan" + + +class ToolExcutionErrorType(BaseModel): + """MCP工具执行错误""" + + type: ErrorType = Field(description="错误类型", default=ErrorType.MISSING_PARAM) + reason: str = Field(description="错误原因", default="") + + +class IsParamError(BaseModel): + """MCP工具参数错误""" + + is_param_error: bool = Field(description="是否是参数错误", default=False) + + class MCPSelectResult(BaseModel): """MCP选择结果""" @@ -115,9 +188,15 @@ class MCPToolSelectResult(BaseModel): name: str = Field(description="工具名称") +class MCPToolIdsSelectResult(BaseModel): + """MCP工具ID选择结果""" + + tool_ids: list[str] = Field(description="工具ID列表") + + class MCPPlanItem(BaseModel): """MCP 计划""" - + step_id: str = Field(description="步骤的ID", default="") content: str = Field(description="计划内容") tool: str = Field(description="工具名称") instruction: str = Field(description="工具指令") @@ -126,4 +205,11 @@ class MCPPlanItem(BaseModel): class MCPPlan(BaseModel): """MCP 计划""" - plans: list[MCPPlanItem] = Field(description="计划列表") + plans: list[MCPPlanItem] = Field(description="计划列表", default=[]) + + +class Step(BaseModel): + """MCP步骤""" + + tool_id: str = Field(description="工具ID") + description: str = Field(description="步骤描述,15个字以下") diff --git a/apps/schemas/message.py b/apps/schemas/message.py index d0661224e0817ff6f25e341d65c88903020d18e8..17a569ca2cf710b206ddbc1afb655bf4675af7fd 100644 --- a/apps/schemas/message.py +++ b/apps/schemas/message.py @@ -1,14 +1,22 @@ # Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. """队列中的消息结构""" +from datetime import UTC, datetime from typing import Any from pydantic import BaseModel, Field -from apps.schemas.enum_var import EventType, StepStatus +from apps.schemas.enum_var import EventType, FlowStatus, StepStatus from apps.schemas.record import RecordMetadata +class FlowParams(BaseModel): + """流执行过程中的参数补充""" + + content: dict[str, Any] = Field(default={}, description="流执行过程中的参数补充内容") + description: str = Field(default="", description="流执行过程中的参数补充描述") + + class HeartbeatData(BaseModel): """心跳事件的数据结构""" @@ -22,8 +30,17 @@ class MessageFlow(BaseModel): app_id: str = Field(description="插件ID", alias="appId") flow_id: str = Field(description="Flow ID", alias="flowId") + flow_name: str = Field(description="Flow名称", alias="flowName") + flow_status: FlowStatus = Field(description="Flow状态", alias="flowStatus", default=FlowStatus.UNKNOWN) step_id: str = Field(description="当前步骤ID", alias="stepId") step_name: str = Field(description="当前步骤名称", alias="stepName") + sub_step_id: str | None = Field(description="当前子步骤ID", alias="subStepId", default=None) + sub_step_name: str | None = Field(description="当前子步骤名称", alias="subStepName", default=None) + step_description: str | None = Field( + description="当前步骤描述", + alias="stepDescription", + default=None, + ) step_status: StepStatus = Field(description="当前步骤状态", alias="stepStatus") @@ -60,17 +77,21 @@ class DocumentAddContent(BaseModel): document_id: str = Field(description="文档UUID", alias="documentId") document_order: int = Field(description="文档在对话中的顺序,从1开始", alias="documentOrder") + document_author: str = Field(description="文档作者", alias="documentAuthor", default="") document_name: str = Field(description="文档名称", alias="documentName") document_abstract: str = Field(description="文档摘要", alias="documentAbstract", default="") document_type: str = Field(description="文档MIME类型", alias="documentType", default="") document_size: float = Field(ge=0, description="文档大小,单位是KB,保留两位小数", alias="documentSize", default=0) + created_at: float = Field( + description="文档创建时间,单位是秒", alias="createdAt", default_factory=lambda: round(datetime.now(tz=UTC).timestamp(), 3) + ) class FlowStartContent(BaseModel): """flow.start消息的content""" question: str = Field(description="用户问题") - params: dict[str, Any] = Field(description="预先提供的参数") + params: dict[str, Any] | None = Field(description="预先提供的参数", default=None) class MessageBase(HeartbeatData): @@ -81,5 +102,5 @@ class MessageBase(HeartbeatData): conversation_id: str = Field(min_length=36, max_length=36, alias="conversationId") task_id: str = Field(min_length=36, max_length=36, alias="taskId") flow: MessageFlow | None = None - content: dict[str, Any] = {} + content: Any | None = Field(default=None, description="消息内容") metadata: MessageMetadata diff --git a/apps/schemas/parameters.py b/apps/schemas/parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..bd908d2375415c798f4804c4eabde797f6a4f7a0 --- /dev/null +++ b/apps/schemas/parameters.py @@ -0,0 +1,69 @@ +from enum import Enum + + +class NumberOperate(str, Enum): + """Choice 工具支持的数字运算符""" + + EQUAL = "number_equal" + NOT_EQUAL = "number_not_equal" + GREATER_THAN = "number_greater_than" + LESS_THAN = "number_less_than" + GREATER_THAN_OR_EQUAL = "number_greater_than_or_equal" + LESS_THAN_OR_EQUAL = "number_less_than_or_equal" + + +class StringOperate(str, Enum): + """Choice 工具支持的字符串运算符""" + + EQUAL = "string_equal" + NOT_EQUAL = "string_not_equal" + CONTAINS = "string_contains" + NOT_CONTAINS = "string_not_contains" + STARTS_WITH = "string_starts_with" + ENDS_WITH = "string_ends_with" + LENGTH_EQUAL = "string_length_equal" + LENGTH_GREATER_THAN = "string_length_greater_than" + LENGTH_GREATER_THAN_OR_EQUAL = "string_length_greater_than_or_equal" + LENGTH_LESS_THAN = "string_length_less_than" + LENGTH_LESS_THAN_OR_EQUAL = "string_length_less_than_or_equal" + REGEX_MATCH = "string_regex_match" + + +class ListOperate(str, Enum): + """Choice 工具支持的列表运算符""" + + EQUAL = "list_equal" + NOT_EQUAL = "list_not_equal" + CONTAINS = "list_contains" + NOT_CONTAINS = "list_not_contains" + LENGTH_EQUAL = "list_length_equal" + LENGTH_GREATER_THAN = "list_length_greater_than" + LENGTH_GREATER_THAN_OR_EQUAL = "list_length_greater_than_or_equal" + LENGTH_LESS_THAN = "list_length_less_than" + LENGTH_LESS_THAN_OR_EQUAL = "list_length_less_than_or_equal" + + +class BoolOperate(str, Enum): + """Choice 工具支持的布尔运算符""" + + EQUAL = "bool_equal" + NOT_EQUAL = "bool_not_equal" + + +class DictOperate(str, Enum): + """Choice 工具支持的字典运算符""" + + EQUAL = "dict_equal" + NOT_EQUAL = "dict_not_equal" + CONTAINS_KEY = "dict_contains_key" + NOT_CONTAINS_KEY = "dict_not_contains_key" + + +class Type(str, Enum): + """Choice 工具支持的类型""" + + STRING = "string" + NUMBER = "number" + LIST = "list" + DICT = "dict" + BOOL = "bool" diff --git a/apps/schemas/pool.py b/apps/schemas/pool.py index 27e16b370ec83acc11e1f435ac1da296fe2a9560..3532b6e2ffd31739e25e53414245b5d54fd395d0 100644 --- a/apps/schemas/pool.py +++ b/apps/schemas/pool.py @@ -109,4 +109,7 @@ class AppPool(BaseData): permission: Permission = Field(description="应用权限配置", default=Permission()) flows: list[AppFlow] = Field(description="Flow列表", default=[]) hashes: dict[str, str] = Field(description="关联文件的hash值", default={}) - mcp_service: list[str] = Field(default=[], alias="mcpService", description="MCP服务id列表") + mcp_service: list[str] = Field(default=[], description="MCP服务id列表") + llm_id: str = Field( + default="empty", description="应用使用的大模型ID(如果有的话)" + ) diff --git a/apps/schemas/record.py b/apps/schemas/record.py index d7acd368205d0aa5a2b368edf4af3a31ed4d1ff6..7f0c79a9ef7a2d2457d8c789f60081a2b150a0dd 100644 --- a/apps/schemas/record.py +++ b/apps/schemas/record.py @@ -10,15 +10,17 @@ from pydantic import BaseModel, Field from apps.schemas.collection import ( Document, ) -from apps.schemas.enum_var import CommentType, StepStatus +from apps.schemas.enum_var import CommentType, FlowStatus, StepStatus class RecordDocument(Document): """GET /api/record/{conversation_id} Result中的document数据结构""" id: str = Field(alias="_id", default="") + order: int = Field(default=0, description="文档顺序") abstract: str = Field(default="", description="文档摘要") user_sub: None = None + author: str = Field(default="", description="文档作者") associated: Literal["question", "answer"] class Config: @@ -31,9 +33,11 @@ class RecordFlowStep(BaseModel): """Record表子项:flow的单步数据结构""" step_id: str = Field(alias="stepId") + step_name: str = Field(alias="stepName", default="") step_status: StepStatus = Field(alias="stepStatus") input: dict[str, Any] output: dict[str, Any] + ex_data: dict[str, Any] | None = Field(default=None, alias="exData") class RecordFlow(BaseModel): @@ -42,6 +46,8 @@ class RecordFlow(BaseModel): id: str record_id: str = Field(alias="recordId") flow_id: str = Field(alias="flowId") + flow_name: str = Field(alias="flowName", default="") + flow_status: StepStatus = Field(alias="flowStatus", default=StepStatus.SUCCESS) step_num: int = Field(alias="stepNum") steps: list[RecordFlowStep] @@ -90,7 +96,7 @@ class RecordData(BaseModel): id: str group_id: str = Field(alias="groupId") conversation_id: str = Field(alias="conversationId") - task_id: str = Field(alias="taskId") + task_id: str | None = Field(default=None, alias="taskId") document: list[RecordDocument] = [] flow: RecordFlow | None = None content: RecordContent @@ -103,11 +109,22 @@ class RecordGroupDocument(BaseModel): """RecordGroup关联的文件""" id: str = Field(default_factory=lambda: str(uuid.uuid4()), alias="_id") + order: int = Field(default=0, description="文档顺序") + author: str = Field(default="", description="文档作者") name: str = Field(default="", description="文档名称") abstract: str = Field(default="", description="文档摘要") extension: str = Field(default="", description="文档扩展名") size: int = Field(default=0, description="文档大小,单位是KB") associated: Literal["question", "answer"] + created_at: float = Field(default=0.0, description="文档创建时间") + + +class FlowHistory(BaseModel): + """Flow执行历史""" + flow_id: str = Field(default_factory=lambda: str(uuid.uuid4()), alias="_id") + flow_name: str = Field(default="", description="Flow名称") + flow_staus: FlowStatus = Field(default=FlowStatus.SUCCESS, description="Flow执行状态") + history_ids: list[str] = Field(default=[], description="Flow执行历史ID列表") class Record(RecordData): @@ -115,9 +132,11 @@ class Record(RecordData): user_sub: str key: dict[str, Any] = {} - content: str + task_id: str | None = Field(default=None, description="任务ID") + content: str = Field(default="", description="Record内容,已加密") comment: RecordComment = Field(default=RecordComment()) - flow: list[str] = Field(default=[]) + flow: FlowHistory = Field( + default=FlowHistory(), description="Flow执行历史信息") class RecordGroup(BaseModel): @@ -134,5 +153,4 @@ class RecordGroup(BaseModel): records: list[Record] = [] docs: list[RecordGroupDocument] = [] # 问题不变,所用到的文档不变 conversation_id: str - task_id: str created_at: float = Field(default_factory=lambda: round(datetime.now(tz=UTC).timestamp(), 3)) diff --git a/apps/schemas/request_data.py b/apps/schemas/request_data.py index 2305dd93dfe33969691dcc42928e578e7f1c4207..7a4d6bb69be8ef8e3feb07dce750843bfef6b1ed 100644 --- a/apps/schemas/request_data.py +++ b/apps/schemas/request_data.py @@ -7,17 +7,18 @@ from pydantic import BaseModel, Field from apps.common.config import Config from apps.schemas.appcenter import AppData -from apps.schemas.enum_var import CommentType +from apps.schemas.enum_var import CommentType, LanguageType from apps.schemas.flow_topology import FlowItem from apps.schemas.mcp import MCPType +from apps.schemas.message import FlowParams + class RequestDataApp(BaseModel): """模型对话中包含的app信息""" app_id: str = Field(description="应用ID", alias="appId") - flow_id: str = Field(description="Flow ID", alias="flowId") - params: dict[str, Any] = Field(description="插件参数") + flow_id: str | None = Field(default=None, description="Flow ID", alias="flowId") class MockRequestData(BaseModel): @@ -39,13 +40,15 @@ class RequestDataFeatures(BaseModel): class RequestData(BaseModel): """POST /api/chat 请求的总的数据结构""" - question: str = Field(max_length=2000, description="用户输入") - conversation_id: str = Field(default="", alias="conversationId", description="聊天ID") + question: str | None = Field(default=None, max_length=2000, description="用户输入") + conversation_id: str | None = Field(default=None, alias="conversationId", description="聊天ID") group_id: str | None = Field(default=None, alias="groupId", description="问答组ID") - language: str = Field(default="zh", description="语言") + language: LanguageType = Field(default=LanguageType.CHINESE, description="语言") files: list[str] = Field(default=[], description="文件列表") app: RequestDataApp | None = Field(default=None, description="应用") debug: bool = Field(default=False, description="是否调试") + task_id: str | None = Field(default=None, alias="taskId", description="任务ID") + params: FlowParams | bool | None = Field(default=None, description="流执行过程中的参数补充", alias="params") class QuestionBlacklistRequest(BaseModel): @@ -98,7 +101,7 @@ class UpdateMCPServiceRequest(BaseModel): name: str = Field(..., description="MCP服务名称") description: str = Field(..., description="MCP服务描述") overview: str = Field(..., description="MCP服务概述") - config: str = Field(..., description="MCP服务配置") + config: dict[str, Any] = Field(..., description="MCP服务配置") mcp_type: MCPType = Field(description="MCP传输协议(Stdio/SSE/Streamable)", default=MCPType.STDIO, alias="mcpType") @@ -106,6 +109,7 @@ class ActiveMCPServiceRequest(BaseModel): """POST /api/mcp/{serviceId} 请求数据结构""" active: bool = Field(description="是否激活mcp服务") + mcp_env: dict[str, Any] | None = Field(default=None, description="MCP服务环境变量", alias="mcpEnv") class UpdateServiceRequest(BaseModel): @@ -183,3 +187,9 @@ class UpdateKbReq(BaseModel): """更新知识库请求体""" kb_ids: list[str] = Field(description="知识库ID列表", alias="kbIds", default=[]) + + +class UserUpdateRequest(BaseModel): + """更新用户信息请求体""" + + auto_execute: bool = Field(default=False, description="是否自动执行", alias="autoExecute") diff --git a/apps/schemas/response_data.py b/apps/schemas/response_data.py index b2a1872918638b153e1e94ff4db85ef00cfc0502..b8299fe8357520757a19d7ede4d5a48ab330d124 100644 --- a/apps/schemas/response_data.py +++ b/apps/schemas/response_data.py @@ -14,10 +14,19 @@ from apps.schemas.flow_topology import ( NodeServiceItem, PositionItem, ) +from apps.schemas.parameters import ( + Type, + NumberOperate, + StringOperate, + ListOperate, + BoolOperate, + DictOperate, +) from apps.schemas.mcp import MCPInstallStatus, MCPTool, MCPType from apps.schemas.record import RecordData from apps.schemas.user import UserInfo from apps.templates.generate_llm_operator_config import llm_provider_dict +from apps.common.config import Config class ResponseData(BaseModel): @@ -47,6 +56,7 @@ class AuthUserMsg(BaseModel): user_sub: str revision: bool is_admin: bool + auto_execute: bool class AuthUserRsp(ResponseData): @@ -90,7 +100,7 @@ class LLMIteam(BaseModel): icon: str = Field(default=llm_provider_dict["ollama"]["icon"]) llm_id: str = Field(alias="llmId", default="empty") - model_name: str = Field(alias="modelName", default="Ollama LLM") + model_name: str = Field(alias="modelName", default=Config().get_config().llm.model) class KbIteam(BaseModel): @@ -272,11 +282,21 @@ class BaseAppOperationRsp(ResponseData): result: BaseAppOperationMsg +class AppMcpServiceInfo(BaseModel): + """应用关联的MCP服务信息""" + + id: str = Field(..., description="MCP服务ID") + name: str = Field(default="", description="MCP服务名称") + description: str = Field(default="", description="MCP服务简介") + + class GetAppPropertyMsg(AppData): """GET /api/app/{appId} Result数据结构""" app_id: str = Field(..., alias="appId", description="应用ID") published: bool = Field(..., description="是否已发布") + mcp_service: list[AppMcpServiceInfo] = Field(default=[], alias="mcpService", description="MCP服务信息列表") + llm: LLMIteam | None = Field(alias="llm", default=None) class GetAppPropertyRsp(ResponseData): @@ -495,6 +515,10 @@ class GetMCPServiceDetailMsg(BaseModel): name: str = Field(..., description="MCP服务名称") description: str = Field(description="MCP服务描述") overview: str = Field(description="MCP服务概述") + status: MCPInstallStatus = Field( + description="MCP服务状态", + default=MCPInstallStatus.INIT, + ) tools: list[MCPTool] = Field(description="MCP服务Tools列表", default=[]) @@ -506,7 +530,7 @@ class EditMCPServiceMsg(BaseModel): name: str = Field(..., description="MCP服务名称") description: str = Field(description="MCP服务描述") overview: str = Field(description="MCP服务概述") - data: str = Field(description="MCP服务配置") + data: dict[str, Any] = Field(description="MCP服务配置") mcp_type: MCPType = Field(alias="mcpType", description="MCP 类型") @@ -573,7 +597,7 @@ class FlowStructureDeleteRsp(ResponseData): class UserGetMsp(BaseModel): """GET /api/user result""" - + total: int = Field(default=0) user_info_list: list[UserInfo] = Field(alias="userInfoList", default=[]) @@ -628,3 +652,42 @@ class ListLLMRsp(ResponseData): """GET /api/llm 返回数据结构""" result: list[LLMProviderInfo] = Field(default=[], title="Result") + + +class ParamsNode(BaseModel): + """参数数据结构""" + param_name: str = Field(..., description="参数名称", alias="paramName") + param_path: str = Field(..., description="参数路径", alias="paramPath") + param_type: Type = Field(..., description="参数类型", alias="paramType") + sub_params: list["ParamsNode"] | None = Field( + default=None, description="子参数列表", alias="subParams" + ) + + +class StepParams(BaseModel): + """参数数据结构""" + step_id: str = Field(..., description="步骤ID", alias="stepId") + name: str = Field(..., description="Step名称") + params_node: ParamsNode | None = Field( + default=None, description="参数节点", alias="paramsNode") + + +class GetParamsRsp(ResponseData): + """GET /api/params 返回数据结构""" + + result: list[StepParams] = Field( + default=[], description="参数列表", alias="result" + ) + + +class OperateAndBindType(BaseModel): + """操作和绑定类型数据结构""" + + operate: NumberOperate | StringOperate | ListOperate | BoolOperate | DictOperate = Field(description="操作类型") + bind_type: Type = Field(description="绑定类型") + + +class GetOperaRsp(ResponseData): + """GET /api/operate 返回数据结构""" + + result: list[OperateAndBindType] = Field(..., title="Result") diff --git a/apps/schemas/task.py b/apps/schemas/task.py index 8efcb59914d568478c65d611670b251d019b38ed..2bd292b67322aa586d557546f28703d2b3e11469 100644 --- a/apps/schemas/task.py +++ b/apps/schemas/task.py @@ -7,8 +7,9 @@ from typing import Any from pydantic import BaseModel, Field -from apps.schemas.enum_var import StepStatus +from apps.schemas.enum_var import FlowStatus, StepStatus, LanguageType from apps.schemas.flow import Step +from apps.schemas.mcp import MCPPlan class FlowStepHistory(BaseModel): @@ -22,12 +23,14 @@ class FlowStepHistory(BaseModel): task_id: str = Field(description="任务ID") flow_id: str = Field(description="FlowID") flow_name: str = Field(description="Flow名称") + flow_status: FlowStatus = Field(description="Flow状态") step_id: str = Field(description="当前步骤名称") step_name: str = Field(description="当前步骤名称") - step_description: str = Field(description="当前步骤描述") - status: StepStatus = Field(description="当前步骤状态") + step_description: str = Field(description="当前步骤描述", default="") + step_status: StepStatus = Field(description="当前步骤状态") input_data: dict[str, Any] = Field(description="当前Step执行的输入", default={}) output_data: dict[str, Any] = Field(description="当前Step执行后的结果", default={}) + ex_data: dict[str, Any] | None = Field(description="额外数据", default=None) created_at: float = Field(default_factory=lambda: round(datetime.now(tz=UTC).timestamp(), 3)) @@ -35,16 +38,21 @@ class ExecutorState(BaseModel): """FlowExecutor状态""" # 执行器级数据 - flow_id: str = Field(description="Flow ID") - flow_name: str = Field(description="Flow名称") - description: str = Field(description="Flow描述") - status: StepStatus = Field(description="Flow执行状态") - # 附加信息 - step_id: str = Field(description="当前步骤ID") - step_name: str = Field(description="当前步骤名称") - app_id: str = Field(description="应用ID") - slot: dict[str, Any] = Field(description="待填充参数的JSON Schema", default={}) - error_info: dict[str, Any] = Field(description="错误信息", default={}) + flow_id: str = Field(description="Flow ID", default="") + flow_name: str = Field(description="Flow名称", default="") + description: str = Field(description="Flow描述", default="") + flow_status: FlowStatus = Field(description="Flow状态", default=FlowStatus.INIT) + # 任务级数据 + step_cnt: int = Field(description="当前步骤数量", default=0) + step_id: str = Field(description="当前步骤ID", default="") + tool_id: str = Field(description="当前工具ID", default="") + step_name: str = Field(description="当前步骤名称", default="") + step_status: StepStatus = Field(description="当前步骤状态", default=StepStatus.UNKNOWN) + step_description: str = Field(description="当前步骤描述", default="") + app_id: str = Field(description="应用ID", default="") + current_input: dict[str, Any] = Field(description="当前输入数据", default={}) + error_message: str = Field(description="错误信息", default="") + retry_times: int = Field(description="当前步骤重试次数", default=0) class TaskIds(BaseModel): @@ -55,6 +63,7 @@ class TaskIds(BaseModel): conversation_id: str = Field(description="对话ID") record_id: str = Field(description="记录ID", default_factory=lambda: str(uuid.uuid4())) user_sub: str = Field(description="用户ID") + active_id: str = Field(description="活动ID", default_factory=lambda: str(uuid.uuid4())) class TaskTokens(BaseModel): @@ -86,11 +95,12 @@ class Task(BaseModel): id: str = Field(default_factory=lambda: str(uuid.uuid4()), alias="_id") ids: TaskIds = Field(description="任务涉及的各种ID") - context: list[dict[str, Any]] = Field(description="Flow的步骤执行信息", default=[]) - state: ExecutorState | None = Field(description="Flow的状态", default=None) + context: list[FlowStepHistory] = Field(description="Flow的步骤执行信息", default=[]) + state: ExecutorState = Field(description="Flow的状态", default=ExecutorState()) tokens: TaskTokens = Field(description="Token信息") runtime: TaskRuntime = Field(description="任务运行时数据") created_at: float = Field(default_factory=lambda: round(datetime.now(tz=UTC).timestamp(), 3)) + language: LanguageType = Field(description="语言", default=LanguageType.CHINESE) class StepQueueItem(BaseModel): diff --git a/apps/schemas/user.py b/apps/schemas/user.py index 61aa2587b8ae6255dc3885b04a96f12310f70773..ebea66924c062f4e680ad2e414768b307b056f4a 100644 --- a/apps/schemas/user.py +++ b/apps/schemas/user.py @@ -9,3 +9,4 @@ class UserInfo(BaseModel): user_sub: str = Field(alias="userSub", default="") user_name: str = Field(alias="userName", default="") + auto_execute: bool | None = Field(alias="autoExecute", default=False) diff --git a/apps/services/activity.py b/apps/services/activity.py index 299a49a640664b29a3e4724c5842f12bd289e3bf..7ba20cbb4300a5fa5cb60e37cdb887f38d54232f 100644 --- a/apps/services/activity.py +++ b/apps/services/activity.py @@ -3,70 +3,65 @@ import uuid from datetime import UTC, datetime - +import logging from apps.common.mongo import MongoDB from apps.constants import SLIDE_WINDOW_QUESTION_COUNT, SLIDE_WINDOW_TIME from apps.exceptions import ActivityError +logger = logging.getLogger(__name__) + class Activity: - """用户活动控制,限制单用户同一时间只能提问一个问题""" + """用户活动控制,限制单用户同一时间只能有SLIDE_WINDOW_QUESTION_COUNT个请求""" @staticmethod - async def is_active(user_sub: str) -> bool: + async def is_active(active_id: str) -> bool: """ 判断当前用户是否正在提问(占用GPU资源) :param user_sub: 用户实体ID :return: 判断结果,正在提问则返回True """ - time = round(datetime.now(UTC).timestamp(), 3) - - # 检查窗口内总请求数 - count = await MongoDB().get_collection("activity").count_documents( - {"timestamp": {"$gte": time - SLIDE_WINDOW_TIME, "$lte": time}}, - ) - if count >= SLIDE_WINDOW_QUESTION_COUNT: - return True # 检查用户是否正在提问 active = await MongoDB().get_collection("activity").find_one( - {"user_sub": user_sub}, + {"_id": active_id}, ) return bool(active) @staticmethod - async def set_active(user_sub: str) -> None: + async def set_active(user_sub: str) -> str: """设置用户的活跃标识""" time = round(datetime.now(UTC).timestamp(), 3) # 设置用户活跃状态 collection = MongoDB().get_collection("activity") - active = await collection.find_one({"user_sub": user_sub}) - if active: - err = "用户正在提问" + # 查看用户活跃标识是否在滑动窗口内 + + if await collection.count_documents({"user_sub": user_sub, "timestamp": {"$gt": time - SLIDE_WINDOW_TIME}}) >= SLIDE_WINDOW_QUESTION_COUNT: + err = "[Activity] 用户在滑动窗口内提问次数超过限制,请稍后再试。" raise ActivityError(err) + await collection.delete_many( + {"user_sub": user_sub, "timestamp": {"$lte": time - SLIDE_WINDOW_TIME}}, + ) + # 插入新的活跃记录 + tmp_record = { + "_id": str(uuid.uuid4()), + "user_sub": user_sub, + "timestamp": time, + } await collection.insert_one( - { - "_id": str(uuid.uuid4()), - "user_sub": user_sub, - "timestamp": time, - }, + tmp_record ) + return tmp_record["_id"] @staticmethod - async def remove_active(user_sub: str) -> None: + async def remove_active(active_id: str) -> None: """ 清除用户的活跃标识,释放GPU资源 :param user_sub: 用户实体ID """ - time = round(datetime.now(UTC).timestamp(), 3) # 清除用户当前活动标识 await MongoDB().get_collection("activity").delete_one( - {"user_sub": user_sub}, - ) - - # 清除超出窗口范围的请求记录 - await MongoDB().get_collection("activity").delete_many( - {"timestamp": {"$lte": time - SLIDE_WINDOW_TIME}}, + {"_id": active_id}, ) diff --git a/apps/services/appcenter.py b/apps/services/appcenter.py index e256ab55ab13bf8c5b9e40cb36a3c616a1d39596..3e20f12721a34dbda2b58fc72d3b380c4da3a4aa 100644 --- a/apps/services/appcenter.py +++ b/apps/services/appcenter.py @@ -59,7 +59,6 @@ class AppCenterManager: } user_favorite_app_ids = await AppCenterManager._get_favorite_app_ids_by_user(user_sub) - if filter_type == AppFilterType.ALL: # 获取所有已发布的应用 filters["published"] = True @@ -72,7 +71,6 @@ class AppCenterManager: "_id": {"$in": user_favorite_app_ids}, "published": True, } - # 添加关键字搜索条件 if keyword: filters["$or"] = [ @@ -84,7 +82,6 @@ class AppCenterManager: # 添加应用类型过滤条件 if app_type is not None: filters["app_type"] = app_type.value - # 获取应用列表 apps, total_apps = await AppCenterManager._search_apps_by_filter(filters, page, SERVICE_PAGE_SIZE) @@ -420,7 +417,7 @@ class AppCenterManager: ) @staticmethod - def _create_flow_metadata( + async def _create_flow_metadata( common_params: dict, data: AppData | None = None, app_data: AppPool | None = None, @@ -461,7 +458,7 @@ class AppCenterManager: return metadata @staticmethod - def _create_agent_metadata( + async def _create_agent_metadata( common_params: dict, user_sub: str, data: AppData | None = None, @@ -474,7 +471,12 @@ class AppCenterManager: # mcp_service 逻辑 if data is not None and hasattr(data, "mcp_service") and data.mcp_service: # 创建应用场景,验证传入的 mcp_service 状态,确保只使用已经激活的 (create_app) - metadata.mcp_service = [svc for svc in data.mcp_service if MCPServiceManager.is_active(user_sub, svc)] + activated_mcp_ids = [] + for svc in data.mcp_service: + is_activated = await MCPServiceManager.is_active(user_sub, svc) + if is_activated: + activated_mcp_ids.append(svc) + metadata.mcp_service = activated_mcp_ids elif data is not None and hasattr(data, "mcp_service"): # 更新应用场景,使用 data 中的 mcp_service (update_app) metadata.mcp_service = data.mcp_service if data.mcp_service is not None else [] @@ -484,7 +486,16 @@ class AppCenterManager: else: # 在预期的条件下,如果在 data 或 app_data 中找不到 mcp_service,则默认回退为空列表。 metadata.mcp_service = [] - + # 处理llm_id字段 + if data is not None and hasattr(data, "llm"): + # 创建应用场景,验证传入的 llm_id 状态 (create_app) + metadata.llm_id = data.llm if data.llm else "empty" + elif app_data is not None and hasattr(app_data, "llm_id"): + # 更新应用发布状态场景,使用 app_data 中的 llm_id (update_app_publish_status) + metadata.llm_id = app_data.llm_id if app_data.llm_id else "empty" + else: + # 在预期的条件下,如果在 data 或 app_data 中找不到 llm_id,则默认回退为 "empty"。 + metadata.llm_id = "empty" # Agent 应用的发布状态逻辑 if published is not None: # 从 update_app_publish_status 调用,'published' 参数已提供 metadata.published = published @@ -548,10 +559,10 @@ class AppCenterManager: # 根据应用类型创建不同的元数据 if app_type == AppType.FLOW: - return AppCenterManager._create_flow_metadata(common_params, data, app_data, published) + return (await AppCenterManager._create_flow_metadata(common_params, data, app_data, published)) if app_type == AppType.AGENT: - return AppCenterManager._create_agent_metadata(common_params, user_sub, data, app_data, published) + return (await AppCenterManager._create_agent_metadata(common_params, user_sub, data, app_data, published)) msg = "无效的应用类型" raise ValueError(msg) diff --git a/apps/services/conversation.py b/apps/services/conversation.py index 4bcade45c757ea2b3dca6c58863b2852af98c15b..3ced25872631ed9164cd607e319f147853889a2d 100644 --- a/apps/services/conversation.py +++ b/apps/services/conversation.py @@ -40,6 +40,7 @@ class ConversationManager: @staticmethod async def add_conversation_by_user_sub( + title: str, user_sub: str, app_id: str, llm_id: str, kb_ids: list[str], *, debug: bool) -> Conversation | None: """通过用户ID新建对话""" @@ -59,7 +60,11 @@ class ConversationManager: model_name=llm.model_name, ) kb_item_list = [] - team_kb_list = await KnowledgeBaseManager.get_team_kb_list_from_rag(user_sub, None, None) + try: + team_kb_list = await KnowledgeBaseManager.get_team_kb_list_from_rag(user_sub, None, None) + except: + logger.error("[ConversationManager] 获取团队知识库列表失败") + team_kb_list = [] for team_kb in team_kb_list: for kb in team_kb["kbList"]: if str(kb["kbId"]) in kb_ids: @@ -71,6 +76,7 @@ class ConversationManager: conversation_id = str(uuid.uuid4()) conv = Conversation( _id=conversation_id, + title=title, user_sub=user_sub, app_id=app_id, llm=llm_item, @@ -109,11 +115,10 @@ class ConversationManager: """通过ConversationID更新对话信息""" mongo = MongoDB() conv_collection = mongo.get_collection("conversation") - result = await conv_collection.update_one( + await conv_collection.update_one( {"_id": conversation_id, "user_sub": user_sub}, {"$set": data}, ) - return result.modified_count > 0 @staticmethod async def delete_conversation_by_conversation_id(user_sub: str, conversation_id: str) -> None: @@ -136,4 +141,4 @@ class ConversationManager: await record_group_collection.delete_many({"conversation_id": conversation_id}, session=session) await session.commit_transaction() - await TaskManager.delete_tasks_by_conversation_id(conversation_id) + await TaskManager.delete_tasks_and_flow_context_by_conversation_id(conversation_id) diff --git a/apps/services/document.py b/apps/services/document.py index 203162da1137cdd69d900ceea65a3029d2f4fd8e..451423a9d8f752c2abbf97f5b2a1df38e6d7fefe 100644 --- a/apps/services/document.py +++ b/apps/services/document.py @@ -2,6 +2,7 @@ """文件Manager""" import base64 +from datetime import UTC, datetime import logging import uuid @@ -131,12 +132,15 @@ class DocumentManager: return [ RecordDocument( _id=doc.id, + order=doc.order, + author=doc.author, abstract=doc.abstract, name=doc.name, type=doc.extension, size=doc.size, conversation_id=record_group.get("conversation_id", ""), associated=doc.associated, + created_at=doc.created_at or round(datetime.now(tz=UTC).timestamp(), 3) ) for doc in docs if type is None or doc.associated == type ] diff --git a/apps/services/flow.py b/apps/services/flow.py index 9275fc60c75199e8e17ebcba8f164083190b7211..5d5c9a634c21b88f99137ea432eb2acb294901a5 100644 --- a/apps/services/flow.py +++ b/apps/services/flow.py @@ -2,14 +2,14 @@ """flow Manager""" import logging - +from pydantic import BaseModel, Field from pymongo import ASCENDING from apps.common.mongo import MongoDB from apps.scheduler.pool.loader.flow import FlowLoader from apps.scheduler.slot.slot import Slot from apps.schemas.collection import User -from apps.schemas.enum_var import EdgeType, PermissionType +from apps.schemas.enum_var import EdgeType, PermissionType, LanguageType from apps.schemas.flow import Edge, Flow, Step from apps.schemas.flow_topology import ( EdgeItem, @@ -19,8 +19,8 @@ from apps.schemas.flow_topology import ( NodeServiceItem, PositionItem, ) +from apps.scheduler.pool.pool import Pool from apps.services.node import NodeManager - logger = logging.getLogger(__name__) @@ -69,7 +69,9 @@ class FlowManager: return (result > 0) @staticmethod - async def get_node_id_by_service_id(service_id: str) -> list[NodeMetaDataItem] | None: + async def get_node_id_by_service_id( + service_id: str, language: LanguageType = LanguageType.CHINESE + ) -> list[NodeMetaDataItem] | None: """ serviceId获取service的接口数据,并将接口转换为节点元数据 @@ -92,11 +94,20 @@ class FlowManager: except Exception: logger.exception("[FlowManager] generate_from_schema 失败") continue + + if service_id == "": + call_class: type[BaseModel] = await Pool().get_call(node_pool_record["_id"]) + node_name = call_class.info(language).name + node_description = call_class.info().description + else: + node_name = node_pool_record["name"] + node_description = node_pool_record["description"] + node_meta_data_item = NodeMetaDataItem( nodeId=node_pool_record["_id"], callId=node_pool_record["call_id"], - name=node_pool_record["name"], - description=node_pool_record["description"], + name=node_name, + description=node_description, editable=True, createdAt=node_pool_record["created_at"], parameters=parameters, # 添加 parametersTemplate 参数 @@ -109,7 +120,9 @@ class FlowManager: return nodes_meta_data_items @staticmethod - async def get_service_by_user_id(user_sub: str) -> list[NodeServiceItem] | None: + async def get_service_by_user_id( + user_sub: str, language: LanguageType = LanguageType.CHINESE + ) -> list[NodeServiceItem] | None: """ 通过user_id获取用户自己上传的、其他人公开的且收藏的、受保护且有权限访问并收藏的service @@ -149,7 +162,14 @@ class FlowManager: sort=[("created_at", ASCENDING)], ) service_records = await service_records_cursor.to_list(length=None) - service_items = [NodeServiceItem(serviceId="", name="系统", type="system", nodeMetaDatas=[])] + service_items = [ + NodeServiceItem( + serviceId="", + name="系统" if language == LanguageType.CHINESE else "System", + type="system", + nodeMetaDatas=[], + ) + ] service_items += [ NodeServiceItem( serviceId=record["_id"], @@ -161,7 +181,9 @@ class FlowManager: for record in service_records ] for service_item in service_items: - node_meta_datas = await FlowManager.get_node_id_by_service_id(service_item.service_id) + node_meta_datas = await FlowManager.get_node_id_by_service_id( + service_item.service_id, language + ) if node_meta_datas is None: node_meta_datas = [] service_item.node_meta_datas = node_meta_datas @@ -258,10 +280,7 @@ class FlowManager: ) for node_id, node_config in flow_config.steps.items(): input_parameters = node_config.params - if node_config.node not in ("Empty"): - _, output_parameters = await NodeManager.get_node_params(node_config.node) - else: - output_parameters = {} + _, output_parameters = await NodeManager.get_node_params(node_config.node) parameters = { "input_parameters": input_parameters, "output_parameters": Slot(output_parameters).extract_type_desc_from_schema(), diff --git a/apps/services/flow_validate.py b/apps/services/flow_validate.py index 78e8d340e955115a51e24ac8fc9081fd8ff00e7e..16c23053705c158c4d4f7e0faa438fe3927c0d63 100644 --- a/apps/services/flow_validate.py +++ b/apps/services/flow_validate.py @@ -4,6 +4,7 @@ import collections import logging +from apps.schemas.enum_var import SpecialCallType from apps.exceptions import FlowBranchValidationError, FlowEdgeValidationError, FlowNodeValidationError from apps.schemas.enum_var import NodeType from apps.schemas.flow_topology import EdgeItem, FlowItem, NodeItem @@ -38,32 +39,40 @@ class FlowService: for node in flow_item.nodes: from apps.scheduler.pool.pool import Pool from pydantic import BaseModel - if node.node_id != 'start' and node.node_id != 'end' and node.node_id != 'Empty': + if node.node_id != 'start' and node.node_id != 'end' and node.node_id != SpecialCallType.EMPTY.value: try: call_class: type[BaseModel] = await Pool().get_call(node.call_id) if not call_class: - node.node_id = 'Empty' + node.node_id = SpecialCallType.EMPTY.value node.description = '【对应的api工具被删除!节点不可用!请联系相关人员!】\n\n'+node.description except Exception as e: - node.node_id = 'Empty' + node.node_id = SpecialCallType.EMPTY.value node.description = '【对应的api工具被删除!节点不可用!请联系相关人员!】\n\n'+node.description logger.error(f"[FlowService] 获取步骤的call_id失败{node.call_id}由于:{e}") node_branch_map[node.step_id] = set() if node.call_id == NodeType.CHOICE.value: - node.parameters = node.parameters["input_parameters"] - if "choices" not in node.parameters: - node.parameters["choices"] = [] - for choice in node.parameters["choices"]: - if choice["branchId"] in node_branch_map[node.step_id]: - err = f"[FlowService] 节点{node.name}的分支{choice['branchId']}重复" + input_parameters = node.parameters["input_parameters"] + if "choices" not in input_parameters: + logger.error(f"[FlowService] 节点{node.name}的分支字段缺失") + raise FlowBranchValidationError(f"[FlowService] 节点{node.name}的分支字段缺失") + if not input_parameters["choices"]: + logger.error(f"[FlowService] 节点{node.name}的分支字段为空") + raise FlowBranchValidationError(f"[FlowService] 节点{node.name}的分支字段为空") + for choice in input_parameters["choices"]: + if "branch_id" not in choice: + err = f"[FlowService] 节点{node.name}的分支choice缺少branch_id字段" + logger.error(err) + raise FlowBranchValidationError(err) + if choice["branch_id"] in node_branch_map[node.step_id]: + err = f"[FlowService] 节点{node.name}的分支{choice['branch_id']}重复" logger.error(err) raise Exception(err) for illegal_char in branch_illegal_chars: - if illegal_char in choice["branchId"]: - err = f"[FlowService] 节点{node.name}的分支{choice['branchId']}名称中含有非法字符" + if illegal_char in choice["branch_id"]: + err = f"[FlowService] 节点{node.name}的分支{choice['branch_id']}名称中含有非法字符" logger.error(err) raise Exception(err) - node_branch_map[node.step_id].add(choice["branchId"]) + node_branch_map[node.step_id].add(choice["branch_id"]) else: node_branch_map[node.step_id].add("") valid_edges = [] @@ -133,7 +142,6 @@ class FlowService: branches = {} in_deg = {} out_deg = {} - for e in edges: if e.edge_id in ids: err = f"[FlowService] 边{e.edge_id}的id重复" diff --git a/apps/services/knowledge.py b/apps/services/knowledge.py index 9b4077f92cf44bca42d321cc20975d7ea2e5cadd..bd8dfc9e808f642a8eecf493a96f762dad8ae7b9 100644 --- a/apps/services/knowledge.py +++ b/apps/services/knowledge.py @@ -138,7 +138,11 @@ class KnowledgeBaseManager: return [] kb_ids_update_success = [] kb_item_dict_list = [] - team_kb_list = await KnowledgeBaseManager.get_team_kb_list_from_rag(user_sub, None, None) + try: + team_kb_list = await KnowledgeBaseManager.get_team_kb_list_from_rag(user_sub, None, None) + except Exception as e: + logger.error(f"[KnowledgeBaseManager] 获取团队知识库列表失败: {e}") + team_kb_list = [] for team_kb in team_kb_list: for kb in team_kb["kbList"]: if str(kb["kbId"]) in kb_ids: diff --git a/apps/services/mcp_service.py b/apps/services/mcp_service.py index 7cb880c0f08b1cc7727bde528be39ac748f07ae2..f6fec0ebd2f7053d3f8bc72a9d97ba077019528b 100644 --- a/apps/services/mcp_service.py +++ b/apps/services/mcp_service.py @@ -2,6 +2,7 @@ """MCP服务管理器""" import logging +from logging import config import random import re from typing import Any @@ -28,8 +29,10 @@ from apps.schemas.mcp import ( MCPTool, MCPType, ) +from apps.services.user import UserManager from apps.schemas.request_data import UpdateMCPServiceRequest from apps.schemas.response_data import MCPServiceCardItem +from apps.constants import MCP_PATH logger = logging.getLogger(__name__) sqids = Sqids(min_length=6) @@ -66,10 +69,8 @@ class MCPServiceManager: mcp_list = await mcp_collection.find({"_id": mcp_id}, {"status": True}).to_list(None) for db_item in mcp_list: status = db_item.get("status") - if MCPInstallStatus.READY.value == status: - return MCPInstallStatus.READY - if MCPInstallStatus.INSTALLING.value == status: - return MCPInstallStatus.INSTALLING + if status in MCPInstallStatus.__members__.values(): + return status return MCPInstallStatus.FAILED @staticmethod @@ -78,6 +79,8 @@ class MCPServiceManager: user_sub: str, keyword: str | None, page: int, + is_install: bool | None = None, + is_active: bool | None = None, ) -> list[MCPServiceCardItem]: """ 获取所有MCP服务列表 @@ -89,6 +92,20 @@ class MCPServiceManager: :return: MCP服务列表 """ filters = MCPServiceManager._build_filters(search_type, keyword) + if is_active is not None: + if is_active: + filters["activated"] = {"$in": [user_sub]} + else: + filters["activated"] = {"$nin": [user_sub]} + user_info = await UserManager.get_userinfo_by_user_sub(user_sub) + if not user_info.is_admin: + filters["status"] = MCPInstallStatus.READY.value + else: + if is_install is not None: + if is_install: + filters["status"] = MCPInstallStatus.READY.value + else: + filters["status"] = {"$ne": MCPInstallStatus.READY.value} mcpservice_pools = await MCPServiceManager._search_mcpservice(filters, page) return [ MCPServiceCardItem( @@ -198,7 +215,6 @@ class MCPServiceManager: base_filters = {"author": {"$regex": keyword, "$options": "i"}} return base_filters - @staticmethod async def create_mcpservice(data: UpdateMCPServiceRequest, user_sub: str) -> str: """ @@ -209,9 +225,9 @@ class MCPServiceManager: """ # 检查config if data.mcp_type == MCPType.SSE: - config = MCPServerSSEConfig.model_validate_json(data.config) + config = MCPServerSSEConfig.model_validate(data.config) else: - config = MCPServerStdioConfig.model_validate_json(data.config) + config = MCPServerStdioConfig.model_validate(data.config) # 构造Server mcp_server = MCPServerConfig( @@ -233,8 +249,24 @@ class MCPServiceManager: # 保存并载入配置 logger.info("[MCPServiceManager] 创建mcp:%s", mcp_server.name) + mcp_path = MCP_PATH / "template" / mcp_id / "project" + if isinstance(config, MCPServerStdioConfig): + index = None + for i in range(len(config.args)): + if not config.args[i] == "--directory": + continue + index = i + 1 + break + if index is not None: + if index >= len(config.args): + config.args.append(str(mcp_path)) + else: + config.args[index] = str(mcp_path) + else: + config.args += ["--directory", str(mcp_path)] + await MCPLoader._insert_template_db(mcp_id=mcp_id, config=mcp_server) await MCPLoader.save_one(mcp_id, mcp_server) - await MCPLoader.init_one_template(mcp_id=mcp_id, config=mcp_server) + await MCPLoader.update_template_status(mcp_id, MCPInstallStatus.INIT) return mcp_id @staticmethod @@ -256,21 +288,25 @@ class MCPServiceManager: raise ValueError(msg) db_service = MCPCollection.model_validate(db_service) - for user_id in db_service.activated: - await MCPServiceManager.deactive_mcpservice(user_sub=user_id, service_id=data.service_id) - - await MCPLoader.init_one_template(mcp_id=data.service_id, config=MCPServerConfig( + mcp_config = MCPServerConfig( name=data.name, overview=data.overview, description=data.description, - config=MCPServerStdioConfig.model_validate_json( + config=MCPServerStdioConfig.model_validate( data.config, - ) if data.mcp_type == MCPType.STDIO else MCPServerSSEConfig.model_validate_json( + ) if data.mcp_type == MCPType.STDIO else MCPServerSSEConfig.model_validate( data.config, ), type=data.mcp_type, author=user_sub, - )) + ) + old_mcp_config = await MCPLoader.get_config(data.service_id) + await MCPLoader._insert_template_db(mcp_id=data.service_id, config=mcp_config) + await MCPLoader.save_one(mcp_id=data.service_id, config=mcp_config) + if old_mcp_config.type != mcp_config.type or old_mcp_config.config != mcp_config.config: + for user_id in db_service.activated: + await MCPServiceManager.deactive_mcpservice(user_sub=user_id, service_id=data.service_id) + await MCPLoader.update_template_status(data.service_id, MCPInstallStatus.INIT) # 返回服务ID return data.service_id @@ -297,6 +333,7 @@ class MCPServiceManager: async def active_mcpservice( user_sub: str, service_id: str, + mcp_env: dict[str, Any] | None = None, ) -> None: """ 激活MCP服务 @@ -310,7 +347,7 @@ class MCPServiceManager: for item in status: mcp_status = item.get("status", MCPInstallStatus.INSTALLING) if mcp_status == MCPInstallStatus.READY: - await MCPLoader.user_active_template(user_sub, service_id) + await MCPLoader.user_active_template(user_sub, service_id, mcp_env) else: err = "[MCPServiceManager] MCP服务未准备就绪" raise RuntimeError(err) @@ -365,9 +402,36 @@ class MCPServiceManager: image = image.convert("RGB") image = image.resize((64, 64), resample=Image.Resampling.LANCZOS) # 检查文件夹 - if not await MCP_ICON_PATH.exists(): - await MCP_ICON_PATH.mkdir(parents=True, exist_ok=True) + image_path = MCP_PATH / "template" / service_id / "icon" + if not await image_path.exists(): + await image_path.mkdir(parents=True, exist_ok=True) # 保存 - image.save(MCP_ICON_PATH / f"{service_id}.png", format="PNG", optimize=True, compress_level=9) + image.save(image_path / f"{service_id}.png", format="PNG", optimize=True, compress_level=9) + + return f"{image_path / f'{service_id}.png'}" + + @staticmethod + async def install_mcpservice(user_sub: str, service_id: str, install: bool) -> None: + """ + 安装或卸载MCP服务 - return f"/static/mcp/{service_id}.png" + :param user_sub: str: 用户ID + :param service_id: str: MCP服务ID + :param install: bool: 是否安装 + :return: 无 + """ + service_collection = MongoDB().get_collection("mcp") + db_service = await service_collection.find_one({"_id": service_id, "author": user_sub}) + db_service = MCPCollection.model_validate(db_service) + if install: + if db_service.status == MCPInstallStatus.INSTALLING: + err = "[MCPServiceManager] MCP服务已处于安装中" + raise Exception(err) + await service_collection.update_one( + {"_id": service_id}, + {"$set": {"status": MCPInstallStatus.INSTALLING}}, + ) + mcp_config = await MCPLoader.get_config(service_id) + await MCPLoader.init_one_template(mcp_id=service_id, config=mcp_config) + else: + await MCPLoader.cancel_installing_task([service_id]) diff --git a/apps/services/node.py b/apps/services/node.py index 6f0d492edacdd7611324a38953417e0fa769249b..3fb311bf67b14de543567bf559540ced7aa8b11e 100644 --- a/apps/services/node.py +++ b/apps/services/node.py @@ -4,6 +4,7 @@ import logging from typing import TYPE_CHECKING, Any +from apps.schemas.enum_var import SpecialCallType from apps.common.mongo import MongoDB from apps.schemas.node import APINode from apps.schemas.pool import NodePool @@ -16,6 +17,7 @@ NODE_TYPE_MAP = { "API": APINode, } + class NodeManager: """Node管理器""" @@ -29,7 +31,6 @@ class NodeManager: raise ValueError(err) return node["call_id"] - @staticmethod async def get_node(node_id: str) -> NodePool: """获取Node的类型""" @@ -40,7 +41,6 @@ class NodeManager: raise ValueError(err) return NodePool.model_validate(node) - @staticmethod async def get_node_name(node_id: str) -> str: """获取node的名称""" @@ -52,7 +52,6 @@ class NodeManager: return "" return node_doc["name"] - @staticmethod def merge_params_schema(params_schema: dict[str, Any], known_params: dict[str, Any]) -> dict[str, Any]: """递归合并参数Schema,将known_params中的值填充到params_schema的对应位置""" @@ -75,12 +74,13 @@ class NodeManager: return params_schema - @staticmethod async def get_node_params(node_id: str) -> tuple[dict[str, Any], dict[str, Any]]: """获取Node数据""" from apps.scheduler.pool.pool import Pool - + if node_id == SpecialCallType.EMPTY.value: + # 如果是空节点,返回空Schema + return {}, {} # 查找Node信息 logger.info("[NodeManager] 获取节点 %s", node_id) node_collection = MongoDB().get_collection("node") @@ -100,7 +100,6 @@ class NodeManager: err = f"[NodeManager] Call {call_id} 不存在" logger.error(err) raise ValueError(err) - # 返回参数Schema return ( NodeManager.merge_params_schema(call_class.model_json_schema(), node_data.known_params or {}), diff --git a/apps/services/parameter.py b/apps/services/parameter.py new file mode 100644 index 0000000000000000000000000000000000000000..89f1ca85fa5f84c74e20b4f139a381052425d882 --- /dev/null +++ b/apps/services/parameter.py @@ -0,0 +1,91 @@ +# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. +"""flow Manager""" + +import logging + +from pymongo import ASCENDING + +from apps.services.node import NodeManager +from apps.schemas.flow_topology import FlowItem +from apps.scheduler.slot.slot import Slot +from apps.scheduler.call.choice.condition_handler import ConditionHandler +from apps.scheduler.call.choice.schema import ( + NumberOperate, + StringOperate, + ListOperate, + BoolOperate, + DictOperate, + Type +) +from apps.schemas.response_data import ( + OperateAndBindType, + ParamsNode, + StepParams, +) +from apps.services.node import NodeManager +logger = logging.getLogger(__name__) + + +class ParameterManager: + """Parameter Manager""" + @staticmethod + async def get_operate_and_bind_type(param_type: Type) -> list[OperateAndBindType]: + """Get operate and bind type""" + result = [] + operate = None + if param_type == Type.NUMBER: + operate = NumberOperate + elif param_type == Type.STRING: + operate = StringOperate + elif param_type == Type.LIST: + operate = ListOperate + elif param_type == Type.BOOL: + operate = BoolOperate + elif param_type == Type.DICT: + operate = DictOperate + if operate: + for item in operate: + result.append(OperateAndBindType( + operate=item, + bind_type=(await ConditionHandler.get_value_type_from_operate(item)))) + return result + + @staticmethod + async def get_pre_params_by_flow_and_step_id(flow: FlowItem, step_id: str) -> list[StepParams]: + """Get pre params by flow and step id""" + index = 0 + q = [step_id] + in_edges = {} + step_id_to_node_id = {} + step_id_to_node_name = {} + for step in flow.nodes: + step_id_to_node_id[step.step_id] = step.node_id + step_id_to_node_name[step.step_id] = step.name + for edge in flow.edges: + if edge.target_node not in in_edges: + in_edges[edge.target_node] = [] + in_edges[edge.target_node].append(edge.source_node) + while index < len(q): + tmp_step_id = q[index] + index += 1 + for i in range(len(in_edges.get(tmp_step_id, []))): + pre_node_id = in_edges[tmp_step_id][i] + if pre_node_id not in q: + q.append(pre_node_id) + pre_step_params = [] + for i in range(1, len(q)): + step_id = q[i] + if step_id == 'start' or step_id == 'end': + continue + node_id = step_id_to_node_id.get(step_id) + _, output_schema = await NodeManager.get_node_params(node_id) + slot = Slot(output_schema) + params_node = slot.get_params_node_from_schema() + pre_step_params.append( + StepParams( + stepId=step_id, + name=step_id_to_node_name.get(step_id), + paramsNode=params_node + ) + ) + return pre_step_params diff --git a/apps/services/rag.py b/apps/services/rag.py index 6b6c843dc6fc09a14809ef0a11bc44b2da434f29..918962498bf7727dba5e62769c813c4bdb2bf456 100644 --- a/apps/services/rag.py +++ b/apps/services/rag.py @@ -1,10 +1,11 @@ # Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. """对接Euler Copilot RAG""" +from datetime import UTC, datetime import json import logging from collections.abc import AsyncGenerator - +import re import httpx from typing import Any from fastapi import status @@ -15,9 +16,8 @@ from apps.llm.reasoning import ReasoningLLM from apps.llm.token import TokenCalculator from apps.schemas.collection import LLM from apps.schemas.config import LLMConfig -from apps.schemas.enum_var import EventType +from apps.schemas.enum_var import EventType, LanguageType from apps.schemas.rag_data import RAGQueryReq -from apps.services.activity import Activity from apps.services.session import SessionManager logger = logging.getLogger(__name__) @@ -28,59 +28,106 @@ class RAG: system_prompt: str = "You are a helpful assistant." """系统提示词""" - user_prompt = """' - - 你是openEuler社区的智能助手。请结合给出的背景信息, 回答用户的提问,并且基于给出的背景信息在相关句子后进行脚注。 - 一个例子将在中给出。 - 上下文背景信息将在中给出。 - 用户的提问将在中给出。 - 注意: - 1.输出不要包含任何XML标签,不要编造任何信息。若你认为用户提问与背景信息无关,请忽略背景信息直接作答。 - 2.脚注的格式为[[1]],[[2]],[[3]]等,脚注的内容为提供的文档的id。 - 3.脚注只出现在回答的句子的末尾,例如句号、问号等标点符号后面。 - 4.不要对脚注本身进行解释或说明。 - 5.请不要使用中的文档的id作为脚注。 - - + user_prompt: dict[LanguageType, str] = { + LanguageType.CHINESE: r""" + + 你是openEuler社区的智能助手。请结合给出的背景信息, 回答用户的提问,并且基于给出的背景信息在相关句子后进行脚注。 + 一个例子将在中给出。 + 上下文背景信息将在中给出。 + 用户的提问将在中给出。 + 注意: + 1.输出不要包含任何XML标签,不要编造任何信息。若你认为用户提问与背景信息无关,请忽略背景信息直接作答。 + 2.脚注的格式为[[1]],[[2]],[[3]]等,脚注的内容为提供的文档的id。 + 3.脚注只出现在回答的句子的末尾,例如句号、问号等标点符号后面。 + 4.不要对脚注本身进行解释或说明。 + 5.请不要使用中的文档的id作为脚注。 + + + + + + openEuler社区是一个开源操作系统社区,致力于推动Linux操作系统的发展。 + + + openEuler社区的目标是为用户提供一个稳定、安全、高效的操作系统平台,并且支持多种硬件架构。 + + + + + openEuler社区的成员来自世界各地,包括开发者、用户和企业。 + + + openEuler社区的成员共同努力,推动开源操作系统的发展,并且为用户提供支持和帮助。 + + + + + openEuler社区的目标是什么? + + + openEuler社区是一个开源操作系统社区,致力于推动Linux操作系统的发展。[[1]] + openEuler社区的目标是为用户提供一个稳定、安全、高效的操作系统平台,并且支持多种硬件架构。[[1]] + + + - - - openEuler社区是一个开源操作系统社区,致力于推动Linux操作系统的发展。 - - - openEuler社区的目标是为用户提供一个稳定、安全、高效的操作系统平台,并且支持多种硬件架构。 - - - - - openEuler社区的成员来自世界各地,包括开发者、用户和企业。 - - - openEuler社区的成员共同努力,推动开源操作系统的发展,并且为用户提供支持和帮助。 - - + {bac_info} - openEuler社区的目标是什么? + {user_question} - - openEuler社区是一个开源操作系统社区,致力于推动Linux操作系统的发展。[[1]] - openEuler社区的目标是为用户提供一个稳定、安全、高效的操作系统平台,并且支持多种硬件架构。[[1]] - - - - - {bac_info} - - - {user_question} - - """ + """, + LanguageType.ENGLISH: r""" + + You are a helpful assistant of openEuler community. Please answer the user's question based on the given background information and add footnotes after the related sentences. + An example will be given in . + The background information will be given in . + The user's question will be given in . + Note: + 1. Do not include any XML tags in the output, and do not make up any information. If you think the user's question is unrelated to the background information, please ignore the background information and directly answer. + 2. Your response should not exceed 250 words. + + + + + + openEuler community is an open source operating system community, committed to promoting the development of the Linux operating system. + + + openEuler community aims to provide users with a stable, secure, and efficient operating system platform, and support multiple hardware architectures. + + + + + Members of the openEuler community come from all over the world, including developers, users, and enterprises. + + + Members of the openEuler community work together to promote the development of open source operating systems, and provide support and assistance to users. + + + + + What is the goal of openEuler community? + + + openEuler community is an open source operating system community, committed to promoting the development of the Linux operating system. [[1]] + openEuler community aims to provide users with a stable, secure, and efficient operating system platform, and support multiple hardware architectures. [[1]] + + + + + {bac_info} + + + {user_question} + + """, + } @staticmethod - async def get_doc_info_from_rag(user_sub: str, max_tokens: int, - doc_ids: list[str], - data: RAGQueryReq) -> list[dict[str, Any]]: + async def get_doc_info_from_rag( + user_sub: str, max_tokens: int, doc_ids: list[str], data: RAGQueryReq + ) -> list[dict[str, Any]]: """获取RAG服务的文档信息""" session_id = await SessionManager.get_session_by_user_sub(user_sub) url = Config().get_config().rag.rag_service.rstrip("/") + "/chunk/search" @@ -138,27 +185,43 @@ class RAG: doc_cnt += 1 doc_id_map[doc_chunk["docId"]] = doc_cnt doc_index = doc_id_map[doc_chunk["docId"]] - leave_tokens -= token_calculator.calculate_token_length(messages=[ - {"role": "user", "content": f''''''}, - {"role": "user", "content": ""} + leave_tokens -= token_calculator.calculate_token_length( + messages=[ + { + "role": "user", + "content": f"""""", + }, + {"role": "user", "content": ""}, + ], + pure_text=True, + ) + tokens_of_chunk_element = token_calculator.calculate_token_length( + messages=[ + {"role": "user", "content": ""}, + {"role": "user", "content": ""}, ], - pure_text=True) - tokens_of_chunk_element = token_calculator.calculate_token_length(messages=[ - {"role": "user", "content": ""}, - {"role": "user", "content": ""}, - ], pure_text=True) + pure_text=True, + ) doc_cnt = 0 doc_id_map = {} for doc_chunk in doc_chunk_list: if doc_chunk["docId"] not in doc_id_map: doc_cnt += 1 + t = doc_chunk.get("docCreatedAt", None) + if isinstance(t, str): + t = datetime.strptime(t, '%Y-%m-%d %H:%M') + t = round(t.replace(tzinfo=UTC).timestamp(), 3) + else: + t = round(datetime.now(UTC).timestamp(), 3) doc_info_list.append({ "id": doc_chunk["docId"], "order": doc_cnt, "name": doc_chunk.get("docName", ""), + "author": doc_chunk.get("docAuthor", ""), "extension": doc_chunk.get("docExtension", ""), "abstract": doc_chunk.get("docAbstract", ""), "size": doc_chunk.get("docSize", 0), + "created_at": t, }) doc_id_map[doc_chunk["docId"]] = doc_cnt doc_index = doc_id_map[doc_chunk["docId"]] @@ -188,7 +251,12 @@ class RAG: @staticmethod async def chat_with_llm_base_on_rag( - user_sub: str, llm: LLM, history: list[dict[str, str]], doc_ids: list[str], data: RAGQueryReq + user_sub: str, + llm: LLM, + history: list[dict[str, str]], + doc_ids: list[str], + data: RAGQueryReq, + language: LanguageType = LanguageType.CHINESE, ) -> AsyncGenerator[str, None]: """获取RAG服务的结果""" reasion_llm = ReasoningLLM( @@ -202,7 +270,9 @@ class RAG: if history: try: question_obj = QuestionRewrite() - data.query = await question_obj.generate(history=history, question=data.query, llm=reasion_llm) + data.query = await question_obj.generate( + history=history, question=data.query, llm=reasion_llm, language=language + ) except Exception: logger.exception("[RAG] 问题重写失败") doc_chunk_list = await RAG.get_doc_info_from_rag( @@ -217,7 +287,7 @@ class RAG: }, { "role": "user", - "content": RAG.user_prompt.format( + "content": RAG.user_prompt[language].format( bac_info=bac_info, user_question=data.query, ), @@ -242,8 +312,9 @@ class RAG: + "\n\n" ) max_footnote_length = 4 - while doc_cnt > 0: - doc_cnt //= 10 + tmp_doc_cnt = doc_cnt + while tmp_doc_cnt > 0: + tmp_doc_cnt //= 10 max_footnote_length += 1 buffer = "" async for chunk in reasion_llm.call( @@ -254,8 +325,6 @@ class RAG: result_only=False, model=llm.model_name, ): - if not await Activity.is_active(user_sub): - return chunk = buffer + chunk # 防止脚注被截断 if len(chunk) >= 2 and chunk[-2:] != "]]": @@ -267,6 +336,14 @@ class RAG: chunk = chunk[:index + 1] else: buffer = "" + # 匹配脚注 + footnotes = re.findall(r"\[\[\d+\]\]", chunk) + # 去除编号大于doc_cnt的脚注 + footnotes = [fn for fn in footnotes if int(fn[2:-2]) > doc_cnt] + footnotes = list(set(footnotes)) # 去重 + if footnotes: + for fn in footnotes: + chunk = chunk.replace(fn, "") output_tokens += TokenCalculator().calculate_token_length( messages=[ {"role": "assistant", "content": chunk}, diff --git a/apps/services/record.py b/apps/services/record.py index 5c8a89dfd224166ac822e2eeb47fb79f1fefe7e4..cf8373b042ab409662dcf1d2e08ebc8ac3666d1b 100644 --- a/apps/services/record.py +++ b/apps/services/record.py @@ -9,7 +9,7 @@ from apps.schemas.record import ( Record, RecordGroup, ) - +from apps.schemas.enum_var import FlowStatus logger = logging.getLogger(__name__) @@ -17,7 +17,7 @@ class RecordManager: """问答对相关操作""" @staticmethod - async def create_record_group(group_id: str, user_sub: str, conversation_id: str, task_id: str) -> str | None: + async def create_record_group(group_id: str, user_sub: str, conversation_id: str) -> str | None: """创建问答组""" mongo = MongoDB() record_group_collection = mongo.get_collection("record_group") @@ -26,7 +26,6 @@ class RecordManager: _id=group_id, user_sub=user_sub, conversation_id=conversation_id, - task_id=task_id, ) try: @@ -49,6 +48,10 @@ class RecordManager: mongo = MongoDB() group_collection = mongo.get_collection("record_group") try: + await group_collection.update_one( + {"_id": group_id, "user_sub": user_sub}, + {"$pull": {"records": {"id": record.id}}} + ) await group_collection.update_one( {"_id": group_id, "user_sub": user_sub}, {"$push": {"records": record.model_dump(by_alias=True)}}, @@ -133,6 +136,19 @@ class RecordManager: logger.exception("[RecordManager] 查询问答组失败") return [] + @staticmethod + async def update_record_flow_status_to_cancelled_by_task_ids(task_ids: list[str]) -> None: + """更新Record关联的Flow状态""" + record_group_collection = MongoDB().get_collection("record_group") + try: + await record_group_collection.update_many( + {"records.task_id": {"$in": task_ids}, "records.flow.flow_status": {"$nin": [FlowStatus.ERROR.value, FlowStatus.SUCCESS.value]}}, + {"$set": {"records.$[elem].flow.flow_status": FlowStatus.CANCELLED}}, + array_filters=[{"elem.flow.flow_id": {"$in": task_ids}}], + ) + except Exception: + logger.exception("[RecordManager] 更新Record关联的Flow状态失败") + @staticmethod async def verify_record_in_group(group_id: str, record_id: str, user_sub: str) -> bool: """ @@ -151,7 +167,6 @@ class RecordManager: logger.exception("[RecordManager] 验证记录是否在组中失败") return False - @staticmethod async def check_group_id(group_id: str, user_sub: str) -> bool: """检查group_id是否存在""" diff --git a/apps/services/task.py b/apps/services/task.py index 1e672be690a6f17896ab01bc7149aa353987ecf7..49c9f4b037cb95408588ce304d0f483af88b4ed8 100644 --- a/apps/services/task.py +++ b/apps/services/task.py @@ -13,6 +13,7 @@ from apps.schemas.task import ( TaskIds, TaskRuntime, TaskTokens, + FlowStepHistory ) from apps.services.record import RecordManager @@ -45,7 +46,6 @@ class TaskManager: return Task.model_validate(task) - @staticmethod async def get_task_by_group_id(group_id: str, conversation_id: str) -> Task | None: """获取组ID的最后一条问答组关联的任务""" @@ -58,7 +58,6 @@ class TaskManager: task = await task_collection.find_one({"_id": record_group_obj.task_id}) return Task.model_validate(task) - @staticmethod async def get_task_by_task_id(task_id: str) -> Task | None: """根据task_id获取任务""" @@ -68,9 +67,8 @@ class TaskManager: return None return Task.model_validate(task) - @staticmethod - async def get_context_by_record_id(record_group_id: str, record_id: str) -> list[dict[str, Any]]: + async def get_context_by_record_id(record_group_id: str, record_id: str) -> list[FlowStepHistory]: """根据record_group_id获取flow信息""" record_group_collection = MongoDB().get_collection("record_group") flow_context_collection = MongoDB().get_collection("flow_context") @@ -85,59 +83,72 @@ class TaskManager: return [] flow_context_list = [] - for flow_context_id in records[0]["records"]["flow"]: + for flow_context_id in records[0]["records"]["flow"]["history_ids"]: flow_context = await flow_context_collection.find_one({"_id": flow_context_id}) if flow_context: - flow_context_list.append(flow_context) + flow_context_list.append(FlowStepHistory.model_validate(flow_context)) except Exception: logger.exception("[TaskManager] 获取record_id的flow信息失败") return [] else: return flow_context_list - @staticmethod - async def get_context_by_task_id(task_id: str, length: int = 0) -> list[dict[str, Any]]: + async def get_context_by_task_id(task_id: str, length: int | None = None) -> list[FlowStepHistory]: """根据task_id获取flow信息""" flow_context_collection = MongoDB().get_collection("flow_context") flow_context = [] try: - async for history in flow_context_collection.find( - {"task_id": task_id}, - ).sort( - "created_at", -1, - ).limit(length): - flow_context += [history] + if length is None: + async for context in flow_context_collection.find({"task_id": task_id}): + flow_context.append(FlowStepHistory.model_validate(context)) + else: + async for context in flow_context_collection.find({"task_id": task_id}).limit(length): + flow_context.append(FlowStepHistory.model_validate(context)) except Exception: logger.exception("[TaskManager] 获取task_id的flow信息失败") return [] else: return flow_context + @staticmethod + async def init_new_task( + user_sub: str, + session_id: str | None = None, + post_body: RequestData | None = None, + ) -> Task: + """获取任务块""" + return Task( + _id=str(uuid.uuid4()), + ids=TaskIds( + user_sub=user_sub if user_sub else "", + session_id=session_id if session_id else "", + conversation_id=post_body.conversation_id, + group_id=post_body.group_id if post_body.group_id else "", + ), + question=post_body.question if post_body else "", + group_id=post_body.group_id if post_body else "", + tokens=TaskTokens(), + runtime=TaskRuntime(), + ) @staticmethod - async def save_flow_context(task_id: str, flow_context: list[dict[str, Any]]) -> None: + async def save_flow_context(task_id: str, flow_context: list[FlowStepHistory]) -> None: """保存flow信息到flow_context""" flow_context_collection = MongoDB().get_collection("flow_context") try: - for history in flow_context: - # 查找是否存在 - current_context = await flow_context_collection.find_one({ - "task_id": task_id, - "_id": history["_id"], - }) - if current_context: - await flow_context_collection.update_one( - {"_id": current_context["_id"]}, - {"$set": history}, - ) - else: - await flow_context_collection.insert_one(history) + # 删除旧的flow_context + await flow_context_collection.delete_many({"task_id": task_id}) + if not flow_context: + return + await flow_context_collection.insert_many( + [history.model_dump(exclude_none=True, by_alias=True) for history in flow_context], + ordered=False, + ) except Exception: logger.exception("[TaskManager] 保存flow执行记录失败") - @staticmethod async def delete_task_by_task_id(task_id: str) -> None: """通过task_id删除Task信息""" @@ -148,9 +159,27 @@ class TaskManager: if task: await task_collection.delete_one({"_id": task_id}) + @staticmethod + async def delete_tasks_by_conversation_id(conversation_id: str) -> list[str]: + """通过ConversationID删除Task信息""" + mongo = MongoDB() + task_collection = mongo.get_collection("task") + task_ids = [] + try: + async for task in task_collection.find( + {"conversation_id": conversation_id}, + {"_id": 1}, + ): + task_ids.append(task["_id"]) + if task_ids: + await task_collection.delete_many({"conversation_id": conversation_id}) + return task_ids + except Exception: + logger.exception("[TaskManager] 删除ConversationID的Task信息失败") + return [] @staticmethod - async def delete_tasks_by_conversation_id(conversation_id: str) -> None: + async def delete_tasks_and_flow_context_by_conversation_id(conversation_id: str) -> None: """通过ConversationID删除Task信息""" mongo = MongoDB() task_collection = mongo.get_collection("task") @@ -167,52 +196,6 @@ class TaskManager: await task_collection.delete_many({"conversation_id": conversation_id}, session=session) await flow_context_collection.delete_many({"task_id": {"$in": task_ids}}, session=session) - - @classmethod - async def get_task( - cls, - task_id: str | None = None, - session_id: str | None = None, - post_body: RequestData | None = None, - user_sub: str | None = None, - ) -> Task: - """获取任务块""" - if task_id: - try: - task = await cls.get_task_by_task_id(task_id) - if task: - return task - except Exception: - logger.exception("[TaskManager] 通过task_id获取任务失败") - - logger.info("[TaskManager] 未提供task_id,通过session_id获取任务") - if not session_id or not post_body: - err = ( - "session_id 和 conversation_id 或 group_id 和 conversation_id 是恢复/创建任务的必要条件。" - ) - raise ValueError(err) - - if post_body.group_id: - task = await cls.get_task_by_group_id(post_body.group_id, post_body.conversation_id) - else: - task = await cls.get_task_by_conversation_id(post_body.conversation_id) - - if task: - return task - return Task( - _id=str(uuid.uuid4()), - ids=TaskIds( - user_sub=user_sub if user_sub else "", - session_id=session_id if session_id else "", - conversation_id=post_body.conversation_id, - group_id=post_body.group_id if post_body.group_id else "", - ), - state=None, - tokens=TaskTokens(), - runtime=TaskRuntime(), - ) - - @classmethod async def save_task(cls, task_id: str, task: Task) -> None: """保存任务块""" diff --git a/apps/services/user.py b/apps/services/user.py index 2721d3773bd43e2a8fda5b371d6336fcf0b9b7f3..1b96df18143f5a87b102f68e2bb4e2df62753f05 100644 --- a/apps/services/user.py +++ b/apps/services/user.py @@ -4,6 +4,7 @@ import logging from datetime import UTC, datetime +from apps.schemas.request_data import UserUpdateRequest from apps.common.mongo import MongoDB from apps.schemas.collection import User from apps.services.conversation import ConversationManager @@ -28,7 +29,7 @@ class UserManager: ).model_dump(by_alias=True)) @staticmethod - async def get_all_user_sub() -> list[str]: + async def get_all_user_sub(page_size: int = 20, page_cnt: int = 1, filter_user_subs: list[str] = []) -> tuple[list[str], int]: """ 获取所有用户的sub @@ -36,7 +37,13 @@ class UserManager: """ mongo = MongoDB() user_collection = mongo.get_collection("user") - return [user["_id"] async for user in user_collection.find({}, {"_id": 1})] + total = await user_collection.count_documents({}) - len(filter_user_subs) + + users = await user_collection.find( + {"_id": {"$nin": filter_user_subs}}, + {"_id": 1}, + ).skip((page_cnt - 1) * page_size).limit(page_size).to_list(length=page_size) + return [user["_id"] for user in users], total @staticmethod async def get_userinfo_by_user_sub(user_sub: str) -> User | None: @@ -52,7 +59,25 @@ class UserManager: return User(**user_data) if user_data else None @staticmethod - async def update_userinfo_by_user_sub(user_sub: str, *, refresh_revision: bool = False) -> bool: + async def update_userinfo_by_user_sub(user_sub: str, data: UserUpdateRequest) -> None: + """ + 根据用户sub更新用户信息 + + :param user_sub: 用户sub + :param data: 用户更新信息 + :return: 是否更新成功 + """ + mongo = MongoDB() + user_collection = mongo.get_collection("user") + update_dict = { + "$set": { + "auto_execute": data.auto_execute, + } + } + await user_collection.update_one({"_id": user_sub}, update_dict) + + @staticmethod + async def update_refresh_revision_by_user_sub(user_sub: str, *, refresh_revision: bool = False) -> bool: """ 根据用户sub更新用户信息 diff --git a/deploy/chart/euler_copilot/configs/rag/.env b/deploy/chart/euler_copilot/configs/rag/.env index 5e83f3f415091b156644b33bef9291a2d99d1931..0708fd0762d1f8c789ee5a4a873fb06880ce1463 100644 --- a/deploy/chart/euler_copilot/configs/rag/.env +++ b/deploy/chart/euler_copilot/configs/rag/.env @@ -52,7 +52,7 @@ HALF_KEY3=${halfKey3} #LLM config MODEL_NAME={{ .Values.models.answer.name }} -OPENAI_API_BASE={{ .Values.models.answer.endpoint }}/v1 +OPENAI_API_BASE={{ .Values.models.answer.endpoint }} OPENAI_API_KEY={{ default "" .Values.models.answer.key }} MAX_TOKENS={{ default 2048 .Values.models.answer.maxTokens }} diff --git a/deploy/chart/euler_copilot/values.yaml b/deploy/chart/euler_copilot/values.yaml index e3722b771c5bb34f4f894de69e0418dd2498de57..25a674cf5f9dbd88e95aade23cd07b1ce926e7f4 100644 --- a/deploy/chart/euler_copilot/values.yaml +++ b/deploy/chart/euler_copilot/values.yaml @@ -153,3 +153,38 @@ euler_copilot: type: # 当类型为NodePort时,填写主机的端口号 nodePort: + + deepinsight_web: + # [必填] 是否部署RAG Web前端用户界面 + enabled: true + # 镜像设置;默认为hub.oepkgs.net/neocopilot/web:0.9.6-x86 + # 镜像标签:["0.9.5-x86", "0.9.5-arm"] + image: + # 容器根目录只读 + readOnly: + # 性能限制设置 + resourceLimits: {} + # Service设置 + service: + # Service类型,例如NodePort + type: + # 当类型为NodePort时,填写主机的端口号 + nodePort: + + deepinsight: + # [必填] 是否部署RAG后端服务 + enabled: true + # 镜像设置;默认为hub.oepkgs.net/neocopilot/backend:0.9.6-x86 + # 镜像标签:["0.9.5-x86", "0.9.5-arm"] + image: + # 容器根目录只读 + readOnly: + # 性能限制设置 + resourceLimits: {} + # Service设置 + service: + # Service类型,例如NodePort + type: + # 当类型为NodePort时,填写主机的端口号 + nodePort: + diff --git a/docs/en/ai_full_stack/ai_container_image_userguide/_toc.yaml b/docs/en/ai_full_stack/ai_container_image_userguide/_toc.yaml index 8782a2915974178dadfa7b97efcb19df99069a72..77d171c0a48790d75b2e7e709c27fb91f8ef8e47 100644 --- a/docs/en/ai_full_stack/ai_container_image_userguide/_toc.yaml +++ b/docs/en/ai_full_stack/ai_container_image_userguide/_toc.yaml @@ -1,6 +1,6 @@ -label: AI容器镜像用户指南 +label: AI Container Image User Guide isManual: true -description: openEuler AI 容器镜像封装了 AI 框架等软件,提高 AI 应用开发或使用效率 +description: The openEuler AI container image packages AI frameworks and other software to improve the efficiency of AI application development and usage. sections: - - label: AI容器镜像用户指南 - href: ./ai-container-image-user-guide.md + - label: AI Container Image User Guide + href: ./ai_container_image_user_guide.md diff --git a/docs/en/ai_full_stack/ai_container_image_userguide/ai-container-image-user-guide.md b/docs/en/ai_full_stack/ai_container_image_userguide/ai_container_image_user_guide.md similarity index 33% rename from docs/en/ai_full_stack/ai_container_image_userguide/ai-container-image-user-guide.md rename to docs/en/ai_full_stack/ai_container_image_userguide/ai_container_image_user_guide.md index b433f2f4dcb8d609d014e8ecf3f77d37d5fe5773..b6b21e1878e509df93f20ac70a9ef0f3f1cd4baa 100644 --- a/docs/en/ai_full_stack/ai_container_image_userguide/ai-container-image-user-guide.md +++ b/docs/en/ai_full_stack/ai_container_image_userguide/ai_container_image_user_guide.md @@ -1,54 +1,54 @@ -# openEuler AI 容器镜像用户指南 +# openEuler AI Container Image User Guide -## 简介 +## Introduction -openEuler AI 容器镜像封装了不同硬件算力的 SDK 以及 AI 框架、大模型应用等软件,用户只需要在目标环境中加载镜像并启动容器,即可进行 AI 应用开发或使用,大大减少了应用部署和环境配置的时间,提升效率。 +The openEuler AI container images package SDKs for different hardware accelerators, along with AI frameworks and large-model applications. You only need to load the image and start a container in the target environment to develop or use AI applications, significantly reducing deployment and environment configuration time and improving efficiency. -## 获取镜像 +## Obtain Images -目前,openEuler 已发布支持 Ascend 和 NVIDIA 平台的容器镜像,获取路径如下: +Currently, openEuler provides container images for both Ascend and NVIDIA platforms. You can find them here: -- [openeuler/cann](https://hub.docker.com/r/openeuler/cann) 存放 SDK 类镜像,在 openEuler 基础镜像之上安装 CANN 系列软件,适用于 Ascend 环境。 -- [openeuler/cuda](https://hub.docker.com/r/openeuler/cuda) 存放 SDK 类镜像,在 openEuler 基础镜像之上安装 CUDA 系列软件,适用于 NVIDIA 环境。 -- [openeuler/pytorch](https://hub.docker.com/r/openeuler/pytorch) 存放 AI 框架类镜像,在 SDK 镜像基础之上安装 PyTorch,根据安装的 SDK 软件内容区分适用平台。 -- [openeuler/tensorflow](https://hub.docker.com/r/openeuler/tensorflow) 存放 AI 框架类镜像,在 SDK 镜像基础之上安装 TensorFlow,根据安装的 SDK 软件内容区分适用平台。 -- [openeuler/llm](https://hub.docker.com/r/openeuler/tensorrt) 存放模型应用类镜像,在 AI 框架镜像之上包含特定大模型及工具链,根据安装的 SDK 软件内容区分适用平台。 +- `docker.io/openeuler/cann`: SDK-type images that install the CANN software stack on top of the openEuler base image, for Ascend environments. +- `docker.io/openeuler/cuda`: SDK-type images that install the CUDA software stack on top of the openEuler base image, for NVIDIA environments. +- `docker.io/openeuler/pytorch`: AI framework images that install PyTorch on top of an SDK image; the applicable platform depends on the installed SDK. +- `docker.io/openeuler/tensorflow`: AI framework images that install TensorFlow on top of an SDK image; the applicable platform depends on the installed SDK. +- `docker.io/openeuler/llm`: Model application images that include specific large models and toolchains on top of an AI framework image; the applicable platform depends on the installed SDK. -详细的 AI 容器镜像分类和镜像 tag 的规范说明见[oEEP-0014](https://gitee.com/openeuler/TC/blob/master/oEEP/oEEP-0014%20openEuler%20AI容器镜像软件栈规范.md)。 +For detailed classifications and tag conventions of AI container images, see [oEEP-0014](https://gitee.com/openeuler/TC/blob/master/oEEP/oEEP-0014%20openEuler%20AI容器镜像软件栈规范.md). -由于 AI 容器镜像的体积一般较大,推荐用户在启动容器前先通过如下命令将镜像拉取到开发环境中。 +Because AI container images are typically large, it is recommended to pull the image to your development environment before starting a container: ```sh docker pull image:tag ``` -其中,`image`为仓库名,如`openeuler/cann`,`tag`为目标镜像的 TAG,待镜像拉取完成后即可启动容器。注意,使用`docker pull`命令需按照下文方法安装`docker`软件。 +Here, `image` is the repository name, such as `openeuler/cann`, and `tag` is the target image tag. After the image is pulled, you can start the container. Note that to use the `docker pull` command, Docker must be installed as described below. -## 启动容器 +## Start a Container -1. 在环境中安装`docker`,官方安装方法见[Install Docker Engine](https://docs.docker.com/engine/install/),也可直接通过如下命令进行安装。 +1. Install `docker` in your environment. Refer to the official guide `https://docs.docker.com/engine/install/`, or install directly with the following commands: ```sh yum install -y docker ``` - 或 + or ```sh apt-get install -y docker ``` -2. NVIDIA环境安装`nvidia-container` +2. For NVIDIA environments, install `nvidia-container` components. - 1)配置yum或apt repo - - 使用yum安装时,执行: + (1) Configure yum or apt repositories + - For yum-based systems, run: ```sh curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo | \ sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo ``` - - 使用apt安装时,执行: + - For apt-based systems, run: ```sh curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg @@ -60,33 +60,33 @@ docker pull image:tag sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list ``` - 2)安装`nvidia-container-toolkit`,`nvidia-container-runtime`,执行: + (2) Install `nvidia-container-toolkit` and `nvidia-container-runtime`: ```sh - # yum安装 + # yum installation yum install -y nvidia-container-toolkit nvidia-container-runtime ``` ```sh - # apt安装 + # apt installation apt-get install -y nvidia-container-toolkit nvidia-container-runtime ``` - 3)配置docker + (3) Configure Docker ```sh nvidia-ctk runtime configure --runtime=docker systemctl restart docker ``` - 非NVIDIA环境不执行此步骤。 + Skip this step if you are not on an NVIDIA platform. -3. 确保环境中安装`driver`及`firmware`,用户可从[NVIDIA](https://www.nvidia.com/)或[Ascend](https://www.hiascend.com/)官网获取正确版本进行安装。安装完成后 Ascend 平台使用`npu-smi`命令、NVIDIA 平台使用`nvidia-smi`进行测试,正确显示硬件信息则说明安装正常。 +3. Ensure the appropriate `driver` and `firmware` are installed. You can obtain the correct versions from the official websites of [NVIDIA](https://www.nvidia.com/) or [Ascend](https://www.hiascend.com/). After installation, test with `npu-smi` for Ascend platforms or `nvidia-smi` for NVIDIA platforms. If hardware information is displayed correctly, the installation is successful. -4. 完成上述操作后,即可使用`docker run`命令启动容器。 +4. After completing the above steps, use the `docker run` command to start a container. ```sh -# Ascend环境启动容器 +# Start a container on an Ascend environment docker run --rm --network host \ --device /dev/davinci0:/dev/davinci0 \ --device /dev/davinci_manager --device /dev/devmm_svm --device /dev/hisi_hdc \ @@ -96,6 +96,6 @@ docker run --rm --network host \ ``` ```sh -# NVIDIA环境启动容器 +# Start a container on an NVIDIA environment docker run --gpus all -d -ti image:tag ``` diff --git a/docs/en/ai_full_stack/ai_large_model_service_images_userguide/_toc.yaml b/docs/en/ai_full_stack/ai_large_model_service_images_userguide/_toc.yaml index bed3b23bcec452195f1b6a830c6c7e3a017413b0..660ee8766f50c8295e09f1fdd32947a3894f9598 100644 --- a/docs/en/ai_full_stack/ai_large_model_service_images_userguide/_toc.yaml +++ b/docs/en/ai_full_stack/ai_large_model_service_images_userguide/_toc.yaml @@ -1,6 +1,6 @@ -label: AI大模型服务镜像使用指南 +label: AI Large Model Service Image User Guide isManual: true -description: 支持百川、chatglm、星火等AI大模型的容器化封装 +description: Containerized packaging for AI large models such as Baichuan, ChatGLM, and Spark. sections: - - label: AI大模型服务镜像使用指南 - href: ./llm-service-image-user-guide.md + - label: AI Large Model Service Image User Guide + href: ./llm_service_image_user_guide.md diff --git a/docs/en/ai_full_stack/ai_large_model_service_images_userguide/llm-service-image-user-guide.md b/docs/en/ai_full_stack/ai_large_model_service_images_userguide/llm_service_image_user_guide.md similarity index 38% rename from docs/en/ai_full_stack/ai_large_model_service_images_userguide/llm-service-image-user-guide.md rename to docs/en/ai_full_stack/ai_large_model_service_images_userguide/llm_service_image_user_guide.md index c7c492b104b74e25ac87980c2d8580885a43df0e..b27604c65cc581cf8143300d1c19a7cd2849ba03 100644 --- a/docs/en/ai_full_stack/ai_large_model_service_images_userguide/llm-service-image-user-guide.md +++ b/docs/en/ai_full_stack/ai_large_model_service_images_userguide/llm_service_image_user_guide.md @@ -1,41 +1,41 @@ -# 支持百川、chatglm、星火等AI大模型的容器化封装 +# Containerized Packaging for AI Large Models Supporting Baichuan, ChatGLM, Spark -已配好相关依赖,分为CPU和GPU版本,降低使用门槛,开箱即用。 +Pre-configured with relevant dependencies, available in both CPU and GPU versions to lower the barrier to use and enable out-of-the-box deployment. -## 拉取镜像(CPU版本) +## Pull Image (CPU Version) ```bash docker pull openeuler/llm-server:1.0.0-oe2203sp3 ``` -## 拉取镜像(GPU版本) +## Pull Image (GPU Version) ```bash docker pull icewangds/llm-server:1.0.0 ``` -## 下载模型, 并转换为gguf格式 +## Download Model and Convert to GGUF Format ```bash -# 安装huggingface +# Install huggingface pip install huggingface-hub -# 下载你想要部署的模型 +# Download the model you want to deploy export HF_ENDPOINT=https://hf-mirror.com huggingface-cli download --resume-download baichuan-inc/Baichuan2-13B-Chat --local-dir /root/models/Baichuan2-13B-Chat --local-dir-use-symlinks False -# gguf格式转换 +# GGUF format conversion cd /root/models/ git clone https://github.com/ggerganov/llama.cpp.git python llama.cpp/convert-hf-to-gguf.py ./Baichuan2-13B-Chat -# 生成的gguf格式的模型路径 /root/models/Baichuan2-13B-Chat/ggml-model-f16.gguf +# Generated GGUF format model path: /root/models/Baichuan2-13B-Chat/ggml-model-f16.gguf ``` -## 启动方式 +## Startup Methods -需要Docker v25.0.0及以上版本。 +Requires Docker v25.0.0 or higher. -若使用GPU镜像,需要OS上安装nvidia-container-toolkit,安装方式见。 +If using GPU images, nvidia-container-toolkit must be installed on the OS. Installation instructions can be found at . docker-compose.yaml: @@ -43,19 +43,19 @@ docker-compose.yaml: version: '3' services: model: - image: : #镜像名称与tag + image: : # Image name and tag restart: on-failure:5 ports: - - 8001:8000 #监听端口号,修改“8001”以更换端口 + - 8001:8000 # Listening port, modify "8001" to change the port volumes: - - /root/models:/models # 大模型挂载目录 + - /root/models:/models # Large model mount directory environment: - - MODEL=/models/Baichuan2-13B-Chat/ggml-model-f16.gguf # 容器内的模型文件路径 - - MODEL_NAME=baichuan13b # 自定义模型名称 - - KEY=sk-12345678 # 自定义API Key - - CONTEXT=8192 # 上下文大小 - - THREADS=8 # CPU线程数,仅CPU部署时需要 - deploy: # 指定GPU资源, 仅GPU部署时需要 + - MODEL=/models/Baichuan2-13B-Chat/ggml-model-f16.gguf # Model file path inside container + - MODEL_NAME=baichuan13b # Custom model name + - KEY=sk-12345678 # Custom API Key + - CONTEXT=8192 # Context size + - THREADS=8 # CPU thread count, only needed for CPU deployment + deploy: # Specify GPU resources, only needed for GPU deployment resources: reservations: devices: @@ -71,12 +71,12 @@ docker-compose -f docker-compose.yaml up docker run: ```text -cpu部署: docker run -d --restart on-failure:5 -p 8001:8000 -v /root/models:/models -e MODEL=/models/Baichuan2-13B-Chat/ggml-model-f16.gguf -e MODEL_NAME=baichuan13b -e KEY=sk-12345678 openeuler/llm-server:1.0.0-oe2203sp3 +CPU deployment: docker run -d --restart on-failure:5 -p 8001:8000 -v /root/models:/models -e MODEL=/models/Baichuan2-13B-Chat/ggml-model-f16.gguf -e MODEL_NAME=baichuan13b -e KEY=sk-12345678 openeuler/llm-server:1.0.0-oe2203sp3 -gpu部署: docker run -d --gpus all --restart on-failure:5 -p 8001:8000 -v /root/models:/models -e MODEL=/models/Baichuan2-13B-Chat/ggml-model-f16.gguf -e MODEL_NAME=baichuan13b -e KEY=sk-12345678 icewangds/llm-server:1.0.0 +GPU deployment: docker run -d --gpus all --restart on-failure:5 -p 8001:8000 -v /root/models:/models -e MODEL=/models/Baichuan2-13B-Chat/ggml-model-f16.gguf -e MODEL_NAME=baichuan13b -e KEY=sk-12345678 icewangds/llm-server:1.0.0 ``` -## 调用大模型接口测试,成功返回则表示大模型服务已部署成功 +## Test Large Model API Call - Successful response indicates successful deployment ```bash curl -X POST http://127.0.0.1:8001/v1/chat/completions \ @@ -85,8 +85,8 @@ curl -X POST http://127.0.0.1:8001/v1/chat/completions \ -d '{ "model": "baichuan13b", "messages": [ - {"role": "system", "content": "你是一个社区助手,请回答以下问题。"}, - {"role": "user", "content": "你是谁?"} + {"role": "system", "content": "You are a community assistant, please answer the following question."}, + {"role": "user", "content": "Who are you?"} ], "stream": false, "max_tokens": 1024 diff --git a/docs/en/intelligent_foundation/sysHAX/_toc.yaml b/docs/en/intelligent_foundation/sysHAX/_toc.yaml deleted file mode 100644 index 4336146e16b5e642686555ba704c654cac564b5a..0000000000000000000000000000000000000000 --- a/docs/en/intelligent_foundation/sysHAX/_toc.yaml +++ /dev/null @@ -1,6 +0,0 @@ -label: sysHAX用户指南 -isManual: true -description: 异构协同加速运行 -sections: - - label: sysHAX部署指南 - href: ./deploy_guide/sysHax-deployment-guide.md diff --git a/docs/en/intelligent_foundation/sysHAX/deploy_guide/sysHax-deployment-guide.md b/docs/en/intelligent_foundation/sysHAX/deploy_guide/sysHax-deployment-guide.md deleted file mode 100644 index 06025bedca3484c6e8176c13f985ce9743f6bcb6..0000000000000000000000000000000000000000 --- a/docs/en/intelligent_foundation/sysHAX/deploy_guide/sysHax-deployment-guide.md +++ /dev/null @@ -1,151 +0,0 @@ -# sysHAX部署指南 - -sysHAX当前处于快速迭代阶段,基于vllm v0.6.6+npu进行验证。vllm上游发布的正式支持npu的版本为v0.7.1rc1,而当前用的vllm版本处于验证阶段,未合入主线。因此,在当前创新版本中暂不以源码形式发布,而是以容器化的形式为大家提供技术尝鲜。也欢迎开发者在使用过程中有任何问题和建议,可以在sig-Intelligence组中进行充分交流。 - -vllm是一款**高吞吐、低内存占用**的**大语言模型(LLM)推理与服务引擎**,支持**CPU 计算加速**,提供高效的算子下发机制,包括: - -- **Schedule(调度)**:优化任务分发,提高并行计算效率 -- **Prepare Input(准备数据)**:高效的数据预处理,加速输入构建 -- **Ray 框架**:利用分布式计算提升推理吞吐 -- **Sample(模型后处理)**:优化采样策略,提升生成质量 -- **框架后处理**:融合多种优化策略,提升整体推理性能 - -该引擎结合**高效计算调度与优化策略**,为 LLM 推理提供**更快、更稳定、更可扩展**的解决方案。 - -## 环境准备 - -| 服务器型号 | Atlas 800T/I A2 训练/推理服务器 | -| --------------- | --------------------------------------------------------- | -| 操作系统 | openEuler 22.03 LTS及以上 | -| NPU驱动版本 | Ascend-hdk-910b-npu-driver_24.1.rc3_linux-aarch64.run | -| 固件版本 | Ascend-hdk-910b-firmware_7.5.0.1.129.run | - -### **安装驱动固件** - -- 创建驱动运行用户HwHiAiUser(运行驱动进程的用户),安装驱动时无需指定运行用户,默认即为HwHiAiUser。 - -```shell -groupadd -g 1000 HwHiAiUser -useradd -g HwHiAiUser -u 1000 -d /home/HwHiAiUser -m HwHiAiUser -s /bin/bash -``` - -- 将驱动包和固件包上传到服务器任意目录如“/home”。 -- 执行如下命令,增加驱动和固件包的可执行权限。 - -```shell -chmod +x Ascend-hdk-910b-npu-driver_24.1.rc3_linux-aarch64.run -chmod +x Ascend-hdk-910b-firmware_7.5.0.1.129.run -``` - -- 安装驱动 - -```shell -./Ascend-hdk-910b-npu-driver_24.1.rc3_linux-aarch64.run --full --install-for-all - -# 若执行上述安装命令出现类似如下回显信息 -# [ERROR]The list of missing tools: lspci,ifconfig, -# 请执行yum install -y net-tools pciutils - -# 若系统出现如下关键回显信息,则表示驱动安装成功。 -# Driver package installed successfully! -``` - -- 安装固件 - -```shell -./Ascend-hdk-910b-firmware_7.5.0.1.129.run --full - -# 若系统出现如下关键回显信息,表示固件安装成功。 -# Firmware package installed successfully! Reboot now or after driver installation for the installation/upgrade to take effect -``` - -- 执行reboot命令重启系统。 -- 执行npu-smi info查看驱动加载是否成功。 - -## 容器部署场景 - -### 部署Ascend-Docker(容器引擎插件) - -- 参考版本:"Ascend-docker-runtime_6.0.RC3.1_linux-aarch64.run" - -```shell -# 将软件包”Ascend-docker-runtime_6.0.RC3.1_linux-aarch64.run”上传到服务器任意目录(如“/home”)。 -chmod +x Ascend-docker-runtime_6.0.RC3.1_linux-aarch64.run -``` - -```shell -./Ascend-docker-runtime_6.0.RC3.1_linux-aarch64.run --install -# 安装完成后,若显示类似如下信息,则说明软件安装成功: -xxx install success -``` - -- 执行systemctl restart docker命令重启docker,使容器引擎插件在docker配置文件中添加的内容生效。 - -### 容器场景vllm搭建 - -```shell -docker pull hub.oepkgs.net/neocopilot/vllm@sha256:c72a0533b8f34ebd4d352ddac3a969d57638c3d0c9c4af9b78c88400c6edff7a - -# /home路径不要全部映射,防止覆盖/home/HwHiAiUser -docker run -itd \ - -p 1111:22 \ - --name vllm_oe \ - --shm-size 16G \ - --device /dev/davinci0 \ - --device /dev/davinci1 \ - --device /dev/davinci2 \ - --device /dev/davinci3 \ - --device /dev/davinci4 \ - --device /dev/davinci5 \ - --device /dev/davinci6 \ - --device /dev/davinci7 \ - --device /dev/davinci_manager \ - --device /dev/devmm_svm \ - --device /dev/hisi_hdc \ - -v /usr/local/dcmi:/usr/local/dcmi \ - -v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \ - -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \ - -w /home \ - hub.oepkgs.net/neocopilot/vllm:0.6.6-aarch64-910B-oe2203-sp3 bash - -# 启动vllm,模型自行下载 -vllm serve /home/models/DeepSeek-R1-Distill-Llama-70B --distributed-executor-backend ray --tensor-parallel-size 8 --block-size 32 --preemption_mode swap -``` - -## 纯CPU推理环境部署 - -- 模型文件准备 - -1. 准备模型文件,放在`/home/model/`路径下 - - - **注意**:当前镜像版本支持DeepSeek 7B、32B以及Qwen系列模型 - -2. 拉取镜像,镜像地址:docker pull hub.oepkgs.net/neocopilot/syshax/vllm-cpu@sha256:3983071e1928b9fddc037a51f2fc6b044d41a35d5c1e75ff62c8c5e6b1c157a3 - -3. 启动容器: - -```bash -docker run --name vllm_server_sysHAX \ - -p 7001:7001 \ - -v /home/model:/home/model/ \ - -itd hub.oepkgs.net/neocopilot/syshax/vllm-cpu:0.1.2.4 bash -``` - -- 在容器中启动服务 - -```bash -cd /home/vllm_syshax -python3 vllm/entrypoints/openai/api_server.py \ - --model /home/model/DeepSeek-R1-Distill-Qwen-7B \ - --served-model-name=ds7b \ - --host 0.0.0.0 \ - --port 7001 \ - --dtype=half \ - --swap_space=16 \ - --block_size=16 \ - --preemption_mode=swap \ - --max_model_len=8192 & -``` - -**注意**:`--model`使用实际模型路径,`--served-model-name`可自己指定模型名字,端口两个需要对应,可不用7001 -部署完成,然后向7001端口发送请求即可,请求需满足OpenAPI格式。 diff --git a/docs/en/intelligent_foundation/syshax/deploy_guide/_toc.yaml b/docs/en/intelligent_foundation/syshax/deploy_guide/_toc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..be322a13a3e1446113febb826c3d8b305f50c724 --- /dev/null +++ b/docs/en/intelligent_foundation/syshax/deploy_guide/_toc.yaml @@ -0,0 +1,6 @@ +label: sysHAX User Guide +isManual: true +description: Heterogeneous Collaborative Acceleration +sections: + - label: sysHAX Deployment Guide + href: ./syshax_deployment_guide.md diff --git a/docs/zh/intelligent_foundation/sysHAX/deploy_guide/pictures/syshax-deploy.png b/docs/en/intelligent_foundation/syshax/deploy_guide/pictures/syshax-deploy.png similarity index 100% rename from docs/zh/intelligent_foundation/sysHAX/deploy_guide/pictures/syshax-deploy.png rename to docs/en/intelligent_foundation/syshax/deploy_guide/pictures/syshax-deploy.png diff --git a/docs/en/intelligent_foundation/syshax/deploy_guide/syshax_deployment_guide.md b/docs/en/intelligent_foundation/syshax/deploy_guide/syshax_deployment_guide.md new file mode 100644 index 0000000000000000000000000000000000000000..e6f6e3f4c44cb1ac82bc04d17de0bf92326030a5 --- /dev/null +++ b/docs/en/intelligent_foundation/syshax/deploy_guide/syshax_deployment_guide.md @@ -0,0 +1,212 @@ +# sysHAX Deployment Guide + +## Overview + +sysHAX is positioned as K+X heterogeneous fusion inference acceleration, mainly containing two functional components: + +- Dynamic inference scheduling +- CPU inference acceleration + +**Dynamic inference scheduling**: For inference tasks, the prefill phase belongs to compute-intensive tasks, while the decode phase belongs to memory-intensive tasks. Therefore, from the perspective of computational resources, the prefill phase is suitable for execution on GPU/NPU and other hardware, while the decode phase can be executed on CPU and other hardware. + +**CPU inference acceleration**: Accelerates CPU inference performance through NUMA affinity, parallel optimization, operator optimization, and other methods on the CPU. + +sysHAX consists of two delivery components: + +![syshax-deploy](pictures/syshax-deploy.png) + +The delivery components include: + +- sysHAX: Responsible for request processing and scheduling of prefill and decode requests +- vllm: vllm is a large model inference service that includes both GPU/NPU and CPU during deployment, used for processing prefill and decode requests respectively. From the perspective of developer usability, vllm will be released using containerization. + +vllm is a **high-throughput, low-memory-usage large language model (LLM) inference and service engine** that supports **CPU computation acceleration** and provides efficient operator dispatch mechanisms, including: + +- Schedule: Optimizes task distribution to improve parallel computation efficiency +- Prepare Input: Efficient data preprocessing to accelerate input construction +- Ray framework: Utilizes distributed computing to improve inference throughput +- Sample (model post-processing): Optimizes sampling strategies to improve generation quality +- Framework post-processing: Integrates multiple optimization strategies to improve overall inference performance + +This engine combines **efficient computation scheduling and optimization strategies** to provide **faster, more stable, and more scalable** solutions for LLM inference. + +## Environment Preparation + +| Server Model | Kunpeng 920 Series CPU | +| ------------ | ---------------------- | +| GPU | Nvidia A100 | +| OS | openEuler 22.03 LTS and above | +| Python | 3.9 and above | +| Docker | 25.0.3 and above | + +- Docker 25.0.3 can be installed via `dnf install moby`. + +## Deployment Process + +First, check whether NVIDIA drivers and CUDA drivers are already installed using `nvidia-smi` and `nvcc -V`. If not, you need to install NVIDIA drivers and CUDA drivers first. + +### Install NVIDIA Container Toolkit (Container Engine Plugin) + +If NVIDIA Container Toolkit is already installed, you can skip this step. Otherwise, follow the installation process below: + + + +- Execute the `systemctl restart docker` command to restart docker, making the container engine plugin configuration in the docker config file effective. + +### Container-based vllm Setup + +The following process deploys vllm in a GPU container. + +```shell +docker pull hub.oepkgs.net/neocopilot/syshax/syshax-vllm-gpu:0.2.0 + +docker run --name vllm_gpu \ + --ipc="shareable" \ + --shm-size=64g \ + --gpus=all \ + -p 8001:8001 \ + -v /home/models:/home/models \ + -w /home/ \ + -itd hub.oepkgs.net/neocopilot/syshax/syshax-vllm-gpu:0.2.0 bash +``` + +In the above script: + +- `--ipc="shareable"`: Allows the container to share IPC namespace for inter-process communication. +- `--shm-size=64g`: Sets the container shared memory to 64G. +- `--gpus=all`: Allows the container to use all GPU devices on the host +- `-p 8001:8001`: Port mapping, mapping the host's port 8001 to the container's port 8001. Developers can modify this as needed. +- `-v /home/models:/home/models`: Directory mounting, mapping the host's `/home/models` to `/home/models` inside the container for model sharing. Developers can modify the mapping directory as needed. + +```shell +vllm serve /home/models/DeepSeek-R1-Distill-Qwen-32B \ + --served-model-name=ds-32b \ + --host 0.0.0.0 \ + --port 8001 \ + --dtype=half \ + --swap_space=16 \ + --block_size=16 \ + --preemption_mode=swap \ + --max_model_len=8192 \ + --tensor-parallel-size 2 \ + --gpu_memory_utilization=0.8 +``` + +In the above script: + +- `--tensor-parallel-size 2`: Enables tensor parallelism, splitting the model across 2 GPUs. Requires at least 2 GPUs. Developers can modify this as needed. +- `--gpu_memory_utilization=0.8`: Limits GPU memory usage to 80% to avoid service crashes due to memory exhaustion. Developers can modify this as needed. + +The following process deploys vllm in a CPU container. + +```shell +docker pull hub.oepkgs.net/neocopilot/syshax/syshax-vllm-cpu:0.2.0 + +docker run --name vllm_cpu \ + --ipc container:vllm_gpu \ + --shm-size=64g \ + --privileged \ + -p 8002:8002 \ + -v /home/models:/home/models \ + -w /home/ \ + -itd hub.oepkgs.net/neocopilot/syshax/syshax-vllm-cpu:0.2.0 bash +``` + +In the above script: + +- `--ipc container:vllm_gpu`: Shares the IPC (inter-process communication) namespace with the container named vllm_gpu. Allows this container to exchange data directly through shared memory, avoiding cross-container copying. + +```shell +INFERENCE_OP_MODE=fused OMP_NUM_THREADS=160 CUSTOM_CPU_AFFINITY=0-159 SYSHAX_QUANTIZE=q8_0 \ +vllm serve /home/models/DeepSeek-R1-Distill-Qwen-32B \ + --served-model-name=ds-32b \ + --host 0.0.0.0 \ + --port 8002 \ + --dtype=half \ + --block_size=16 \ + --preemption_mode=swap \ + --max_model_len=8192 +``` + +In the above script: + +- `INFERENCE_OP_MODE=fused`: Enables CPU inference acceleration +- `OMP_NUM_THREADS=160`: Specifies the number of CPU inference threads to start as 160. This environment variable only takes effect after specifying INFERENCE_OP_MODE=fused. +- `CUSTOM_CPU_AFFINITY=0-159`: Specifies the CPU binding scheme, which will be explained in detail later. +- `SYSHAX_QUANTIZE=q8_0`: Specifies the quantization scheme as q8_0. The current version supports 2 quantization schemes: `q8_0`, `q4_0`. + +Note: The GPU container must be started first before starting the CPU container. + +Use lscpu to check the current machine's hardware configuration, focusing on: + +```shell +Architecture: aarch64 + CPU op-mode(s): 64-bit + Byte Order: Little Endian +CPU(s): 160 + On-line CPU(s) list: 0-159 +Vendor ID: HiSilicon + BIOS Vendor ID: HiSilicon + Model name: - + Model: 0 + Thread(s) per core: 1 + Core(s) per socket: 80 + Socket(s): 2 +NUMA: + NUMA node(s): 4 + NUMA node0 CPU(s): 0-39 + NUMA node1 CPU(s): 40-79 + NUMA node2 CPU(s): 80-119 + NUMA node3 CPU(s): 120-159 +``` + +This machine has 160 physical cores, no SMT enabled, 4 NUMA nodes, with 40 cores on each NUMA. + +Use these two environment variables to set the CPU binding scheme: `OMP_NUM_THREADS=160 CUSTOM_CPU_AFFINITY=0-159`. In these two environment variables, the first one is the number of CPU inference threads to start, and the second one is the IDs of the CPUs to bind. In CPU inference acceleration, to achieve NUMA affinity, CPU binding operations are required, following these rules: + +- The number of started threads must match the number of bound CPUs; +- The number of CPUs used on each NUMA must be the same to maintain load balancing. + +For example, in the above script, CPUs 0-159 are bound. Among them, 0-39 belong to NUMA node 0, 40-79 belong to NUMA node 1, 80-119 belong to NUMA node 2, and 120-159 belong to NUMA node 3. Each NUMA uses 40 CPUs, ensuring load balancing across all NUMAs. + +### sysHAX Installation + +sysHAX installation: + +```shell +dnf install sysHAX +``` + +Before starting sysHAX, some basic configuration is needed: + +```shell +syshax init +syshax config services.gpu.port 8001 +syshax config services.cpu.port 8002 +syshax config services.conductor.port 8010 +syshax config models.default ds-32b +``` + +Additionally, you can use `syshax config --help` to view all configuration commands. + +After configuration is complete, start the sysHAX service with the following command: + +```shell +syshax run +``` + +When starting the sysHAX service, service connectivity testing will be performed. sysHAX complies with openAPI standards. Once the service is started, you can use APIs to call the large model service. You can test it with the following script: + +```shell +curl http://0.0.0.0:8010/v1/chat/completions -H "Content-Type: application/json" -d '{ + "model": "ds-32b", + "messages": [ + { + "role": "user", + "content": "Introduce openEuler." + } + ], + "stream": true, + "max_tokens": 1024 +}' +``` diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/CPU\351\200\273\350\276\221\346\240\270\345\277\203.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/CPU\351\200\273\350\276\221\346\240\270\345\277\203.png" deleted file mode 100644 index 74ae942b5a5217b8a5e34a2b2cd8d32a49be7a00..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/CPU\351\200\273\350\276\221\346\240\270\345\277\203.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/Copilot\345\244\247\346\250\241\345\236\213\351\203\250\347\275\262\345\267\256\345\274\202.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/Copilot\345\244\247\346\250\241\345\236\213\351\203\250\347\275\262\345\267\256\345\274\202.png" deleted file mode 100644 index 8f1de7892e04be698310691d2cfdeb07cbfa579d..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/Copilot\345\244\247\346\250\241\345\236\213\351\203\250\347\275\262\345\267\256\345\274\202.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2761.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2761.png" deleted file mode 100644 index e59e8b669c3039341655eadd75ce1fda5cda1776..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2761.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2762.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2762.png" deleted file mode 100644 index 68ae1c7cb11e663cabbf1225b188fdfd628bf549..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2762.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2763.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2763.png" deleted file mode 100644 index d90f6182fb6ec63f868a5c2598de73db093775f2..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2763.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\265\213\350\257\225\346\216\245\345\217\243\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\265\213\350\257\225\346\216\245\345\217\243\346\210\220\345\212\237.png" deleted file mode 100644 index 374c3a2cc0be67a012ef8bf0ddc7688f97702d79..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\265\213\350\257\225\346\216\245\345\217\243\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\350\275\273\351\207\217\345\214\226\351\203\250\347\275\262\350\247\206\345\233\276.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\350\275\273\351\207\217\345\214\226\351\203\250\347\275\262\350\247\206\345\233\276.png" deleted file mode 100644 index 297ad86cac9226084483816f0c88c9116071b675..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\350\275\273\351\207\217\345\214\226\351\203\250\347\275\262\350\247\206\345\233\276.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/authhub\347\231\273\345\275\225\347\225\214\351\235\242.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/authhub\347\231\273\345\275\225\347\225\214\351\235\242.png" deleted file mode 100644 index 341828b1b6f728888d1dd52eec755033680155da..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/authhub\347\231\273\345\275\225\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\347\202\271\345\207\273\351\200\200\345\207\272\347\225\214\351\235\242.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\347\202\271\345\207\273\351\200\200\345\207\272\347\225\214\351\235\242.png" deleted file mode 100644 index 22b02fff81fe1db3232b80607da6f10f710c8c64..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\347\202\271\345\207\273\351\200\200\345\207\272\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\346\210\220\345\212\237\347\225\214\351\235\242.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\346\210\220\345\212\237\347\225\214\351\235\242.png" deleted file mode 100644 index a871907f348317e43633cf05f5241cb978476fb4..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\346\210\220\345\212\237\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\347\225\214\351\235\242.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\347\225\214\351\235\242.png" deleted file mode 100644 index d82c736a94b106a30fd8d1f7b781f9e335bb441f..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/k8s\351\233\206\347\276\244\344\270\255postgres\346\234\215\345\212\241\347\232\204\345\220\215\347\247\260.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/k8s\351\233\206\347\276\244\344\270\255postgres\346\234\215\345\212\241\347\232\204\345\220\215\347\247\260.png" deleted file mode 100644 index 473a0006c9710c92375e226a760c3a79989312f9..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/k8s\351\233\206\347\276\244\344\270\255postgres\346\234\215\345\212\241\347\232\204\345\220\215\347\247\260.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/postgres\346\234\215\345\212\241\347\253\257\345\217\243.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/postgres\346\234\215\345\212\241\347\253\257\345\217\243.png" deleted file mode 100644 index cfee6d88da56bc939886caece540f7de8cf77bbc..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/postgres\346\234\215\345\212\241\347\253\257\345\217\243.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag_port.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag_port.png" deleted file mode 100644 index b1d93f9c9d7587aa88a27d7e0bf185586583d438..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag_port.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" deleted file mode 100644 index fec3cdaa2b260e50f5523477da3e58a9e14e2130..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\347\224\261\344\272\216\347\273\237\344\270\200\350\265\204\344\272\247\344\270\213\345\255\230\345\234\250\345\220\214\345\220\215\350\265\204\344\272\247\345\272\223.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\347\224\261\344\272\216\347\273\237\344\270\200\350\265\204\344\272\247\344\270\213\345\255\230\345\234\250\345\220\214\345\220\215\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index 624459821de4542b635eeffa115eeba780929a4e..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\347\224\261\344\272\216\347\273\237\344\270\200\350\265\204\344\272\247\344\270\213\345\255\230\345\234\250\345\220\214\345\220\215\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\346\210\220\345\212\237.png" deleted file mode 100644 index 3104717bfa8f6615ad6726577a24938bc29884b2..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\244\261\350\264\245.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\244\261\350\264\245.png" deleted file mode 100644 index 454b9fdfa4b7f209dc370f78677a2f4e71ea49be..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\244\261\350\264\245.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\257\255\346\226\231.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\257\255\346\226\231.png" deleted file mode 100644 index d52d25d4778f6db2d2ec076d65018c40cd1da4d3..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\257\255\346\226\231.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\357\274\214\350\265\204\344\272\247\344\270\213\344\270\215\345\255\230\345\234\250\345\257\271\345\272\224\350\265\204\344\272\247\345\272\223.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\357\274\214\350\265\204\344\272\247\344\270\213\344\270\215\345\255\230\345\234\250\345\257\271\345\272\224\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index 82ed79c0154bd8e406621440c4e4a7caaab7e06e..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\357\274\214\350\265\204\344\272\247\344\270\213\344\270\215\345\255\230\345\234\250\345\257\271\345\272\224\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\346\210\220\345\212\237.png" deleted file mode 100644 index 7dd2dea945f39ada1d7dd053d150a995b160f203..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\273\272\347\253\213\350\265\204\344\272\247\345\272\223.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\273\272\347\253\213\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index 84737b4185ce781d7b32ab42d39b8d2452138dad..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\273\272\347\253\213\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\214\207\345\256\232\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\214\207\345\256\232\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245.png" deleted file mode 100644 index be89bdfde2518bba3941eee5d475f52ad9124343..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\214\207\345\256\232\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\345\210\235\345\247\213\345\214\226.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\345\210\235\345\247\213\345\214\226.png" deleted file mode 100644 index 27530840aaa5382a226e1ed8baea883895d9d75e..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\345\210\235\345\247\213\345\214\226.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" deleted file mode 100644 index aa04e6f7f0648adfca1240c750ca5b79b88da5f9..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\227\240\350\265\204\344\272\247\346\227\266\346\237\245\350\257\242\350\265\204\344\272\247.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\227\240\350\265\204\344\272\247\346\227\266\346\237\245\350\257\242\350\265\204\344\272\247.png" deleted file mode 100644 index 74905172c0c0a0acc4c4d0e35efd2493dc421c4e..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\227\240\350\265\204\344\272\247\346\227\266\346\237\245\350\257\242\350\265\204\344\272\247.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\347\234\213\346\226\207\346\241\243\344\272\247\347\224\237\347\211\207\346\256\265\346\200\273\346\225\260\345\222\214\344\270\212\344\274\240\346\210\220\345\212\237\346\200\273\346\225\260.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\347\234\213\346\226\207\346\241\243\344\272\247\347\224\237\347\211\207\346\256\265\346\200\273\346\225\260\345\222\214\344\270\212\344\274\240\346\210\220\345\212\237\346\200\273\346\225\260.png" deleted file mode 100644 index 432fbfcd02f6d2220e7d2a8512aee893d67be24d..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\347\234\213\346\226\207\346\241\243\344\272\247\347\224\237\347\211\207\346\256\265\346\200\273\346\225\260\345\222\214\344\270\212\344\274\240\346\210\220\345\212\237\346\200\273\346\225\260.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\345\205\250\351\203\250\350\257\255\346\226\231.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\345\205\250\351\203\250\350\257\255\346\226\231.png" deleted file mode 100644 index a4f4ea8a3999a9ab659ccd9ea39b80b21ff46e84..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\345\205\250\351\203\250\350\257\255\346\226\231.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\350\265\204\344\272\247.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\350\265\204\344\272\247.png" deleted file mode 100644 index 675b40297363664007f96948fb21b1cb90d6beea..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\350\265\204\344\272\247.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\216\267\345\217\226\346\225\260\346\215\256\345\272\223pod\345\220\215\347\247\260.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\216\267\345\217\226\346\225\260\346\215\256\345\272\223pod\345\220\215\347\247\260.png" deleted file mode 100644 index 8fc0c988e8b3830c550c6be6e42b88ac13448d1a..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\216\267\345\217\226\346\225\260\346\215\256\345\272\223pod\345\220\215\347\247\260.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\344\270\212\344\274\240\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\344\270\212\344\274\240\346\210\220\345\212\237.png" deleted file mode 100644 index 5c897e9883e868bf5160d92cb106ea4e4e9bc356..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\344\270\212\344\274\240\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\346\234\252\346\237\245\350\257\242\345\210\260\347\233\270\345\205\263\350\257\255\346\226\231.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\346\234\252\346\237\245\350\257\242\345\210\260\347\233\270\345\205\263\350\257\255\346\226\231.png" deleted file mode 100644 index 407e49b929b7ff4cf14703046a4ba0bfe1bb441e..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\346\234\252\346\237\245\350\257\242\345\210\260\347\233\270\345\205\263\350\257\255\346\226\231.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\346\237\245\350\257\242\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\346\237\245\350\257\242\346\210\220\345\212\237.png" deleted file mode 100644 index a4f4ea8a3999a9ab659ccd9ea39b80b21ff46e84..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\346\237\245\350\257\242\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\234\252\346\237\245\350\257\242\345\210\260\350\265\204\344\272\247\345\272\223.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\234\252\346\237\245\350\257\242\345\210\260\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index 45ab521ec5f5afbd81ad54f023aae3b7a867dbf2..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\234\252\346\237\245\350\257\242\345\210\260\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\237\245\350\257\242\350\265\204\344\272\247\345\272\223\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\237\245\350\257\242\350\265\204\344\272\247\345\272\223\346\210\220\345\212\237.png" deleted file mode 100644 index 90ed5624ae93ff9784a750514c53293df4e961f0..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\237\245\350\257\242\350\265\204\344\272\247\345\272\223\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\346\210\220\345\212\237.png" deleted file mode 100644 index 7b2cc38a931c9c236517c14c86fa93e3eb2b6dcd..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" deleted file mode 100644 index 1365a8d69467dec250d3451ac63e2615a2194c18..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\346\210\220\345\212\237png.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\346\210\220\345\212\237png.png" deleted file mode 100644 index 1bd944264baa9369e6f8fbfd04cabcd12730c0e9..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\346\210\220\345\212\237png.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\346\237\245\350\257\242\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\346\237\245\350\257\242\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" deleted file mode 100644 index 58bcd320e145dd29d9e5d49cb6d86964ebb83b51..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\346\237\245\350\257\242\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\344\270\255\351\227\264\345\261\202.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\344\270\255\351\227\264\345\261\202.png" deleted file mode 100644 index 809b785b999b6663d9e9bd41fed953925093d6bd..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\344\270\255\351\227\264\345\261\202.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\346\272\220\347\233\256\345\275\225.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\346\272\220\347\233\256\345\275\225.png" deleted file mode 100644 index 62ba5f6615f18deb3d5a71fd68ee8c929638d814..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\346\272\220\347\233\256\345\275\225.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\347\233\256\346\240\207\347\233\256\345\275\225.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\347\233\256\346\240\207\347\233\256\345\275\225.png" deleted file mode 100644 index d32c672fafcb0ef665bda0bcfdce19d2df44db01..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\347\233\256\346\240\207\347\233\256\345\275\225.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\207\215\345\244\215\345\210\233\345\273\272\350\265\204\344\272\247\345\244\261\350\264\245.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\207\215\345\244\215\345\210\233\345\273\272\350\265\204\344\272\247\345\244\261\350\264\245.png" deleted file mode 100644 index a5ecd6b65abc97320e7467f00d82ff1fd9bf0e44..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\207\215\345\244\215\345\210\233\345\273\272\350\265\204\344\272\247\345\244\261\350\264\245.png" and /dev/null differ diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/witChainD_deployment.md b/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/witChainD_deployment.md deleted file mode 100644 index 7aa0d750ae4421365b42ed38e45d3e06d88767ff..0000000000000000000000000000000000000000 --- a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/witChainD_deployment.md +++ /dev/null @@ -1,122 +0,0 @@ -# 本地资产库构建指南 - -本平台设计的目的为了为企业(用户)提供一个可视化入口实现对本地文档资产的管理,功能方面分为以下几个部分: - -- 用户管理: - - 您可以通过账号登入witchainD平台并在平台配置大模型,为后续某些文档解析模式提供能力。 -- 资产管理 - - 通过指定资产名称、资产简介、资产默认解析方法、资产默认分块大小和embedding模型等条目创建、删除、导入资产、编辑资产或资产信息 -- 文档管理 - - 支持用户上传限定大小和限定数量的文件,也允许用户下载自己上传的文件,用户可以通过点击资产卡片的形式进入资产,此时文档以条目的形式展示。 - - 支持txt、md、xlsx、docx和doc以及pdf文档的文档解析 - - 文档解析方式有三种general、ocr和enhanced模式,general模式下只提取文字和表格,ocr模式下不仅提取文字和表格嗨提取部分文档的图片内容,enhanced在ocr的基础上对图片中提取的内容进行总结。 - -- 文档解析结果管理: - - 通过指定chunk的类别和关键字过滤目标chunk或者可以指定chunk是否启用,来评判或消除chunk对检索造成的影响。 -- 任务管理 - - 查看当前导入导出资产和文档解析任务的状态、取消或移除导入导出资产库和文档解析任务 -- 检索增强: - - 本平台最终解析的结果通过向量化、关键字的形式对外呈现检索结果,也提供了token压缩和问题补全等技术增强结果命中的概率,也使用了上下文随机关联的形式补全检索结果。 - -## 登录管理平台 - -请在浏览器中输入 `https://$(wichaind域名)`访问 EulerCopilot 的 WitChainD 网页, -登入界面如下,输入账号(admin)、密码(123456)点击登录按钮登录系统。 - -![witchaind登录界面](./pictures/witChainD/witchaind登录界面.png) - -## 新建资产 - -### 1. 查看资产库 - -进入资产卡片显示页面,卡片展示了资产的名称、简介、文档篇数、创建时间和资产ID。 - -![查看资产库界面](./pictures/witChainD/查看资产库界面.png) - -可通过红框中的按钮将卡片展示的资产转换为条目显示。 - -### 2. 新建资产库 - -点击新建资产,会跳出资产信息配置的框图 - -![新建资产库界面](./pictures/witChainD/新建资产库界面.png) - -填写资产库名称、资产库描述(可选)、语言、嵌入模型、默认解析方法和默认文件分块大小后,点击确定。 -![新建资产库填写展示界面](./pictures/witChainD/新建资产库填写展示界面.png) - -资产库建立完毕之后会显示是否添加文档,点击确定 - -![资产库创建完成界面](./pictures/witChainD/资产库创建完成界面.png) - -点击确定完成后进入资产库 - -![资产库创建完成界面](./pictures/witChainD/进入资产库界面.png) - -## 导入文档 - -### 单次导入 - -点击导入文档按钮跳出文档上传框,选择本地文件并勾选进行上传 - -![导入文档界面](./pictures/witChainD/导入文档界面.png) - -![选择文件](./pictures/witChainD/选择文件.png) - -### 批量导入 - -点击1批量导入资产,2选择本地资产,3勾选本地资产,最后点击确定进行资产导入。 - -![批量导入界面](./pictures/witChainD/批量导入界面.png) - -资产导入中 - -![批量正在导入界面](./pictures/witChainD/批量正在导入界面.png) - -资产导入成功 - -![批量导入成功界面](./pictures/witChainD/批量导入成功界面.png) - -## 解析文档 - -等待解析中,点击取消可以停止文档解析。 - -![解析中界面](./pictures/witChainD/解析中界面.png) - -解析完成后,解析状态会显示解析成功。 - -![解析成功界面](./pictures/witChainD/解析成功界面.png) - -文档解析方式有三种general、ocr和enhanced模式,请根据需要选择合适的文档解析方法 - -![解析方法选择界面](./pictures/witChainD/解析方法选择界面.png) - -解析完毕可以通过点击文件名进入文档解析结果展示详情,可以查看文档解析结果,如下图所示: - -![解析详情界面](./pictures/witChainD/解析详情界面.png) - -可以通过1过滤文档解析的片段、表格和图片等内容,通过2可以通过关键字检索模糊检索对应的片段,通过3可以设定是否在检索中是否启用片段,如下图所示: - -![解析结果过滤界面](./pictures/witChainD/解析结果过滤界面.png) - -## 导出资产 - -点击回到首页 - -![回到首页](./pictures/witChainD/回到首页.png) - -再点击导出资产 - -![导出资产界面](./pictures/witChainD/导出资产界面.png) - -显示资产正在导出中,如下图所示: - -![正在导出界面](./pictures/witChainD/正在导出界面.png) - -导出成功点击下载,显示下载成功 - -![导出成功](./pictures/witChainD/导出成功.png) diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/work_flow/deploy_guide/workflow_deployment.md b/docs/en/openEuler_intelligence/intelligent_assistant/advance/work_flow/deploy_guide/workflow_deployment.md deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/work_flow/user_guide/workflow_guidance.md b/docs/en/openEuler_intelligence/intelligent_assistant/advance/work_flow/user_guide/workflow_guidance.md deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/online.md b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/online.md deleted file mode 100644 index 3e946173658dffbc296f616d94bfbd92a3d70890..0000000000000000000000000000000000000000 --- a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/online.md +++ /dev/null @@ -1,596 +0,0 @@ -# **EulerCopilot 智能助手部署指南** - -版本信息 -当前版本:v0.9.5 -更新日期:2025年4月1日 - -## 产品概述 - -EulerCopilot 是一款智能问答工具,使用 EulerCopilot 可以解决操作系统知识获取的便捷性,并且为OS领域模型赋能开发者及运维人员。作为获取操作系统知识,使能操作系统生产力工具 (如 A-Ops / A-Tune / x2openEuler / EulerMaker / EulerDevOps / StratoVirt / iSulad 等),颠覆传统命令交付方式,由传统命令交付方式向自然语义进化,并结合智能体任务规划能力,降低开发、使用操作系统特性的门槛。 - -本指南提供基于自动化脚本的EulerCopilot智能助手系统部署说明,支持一键自动部署和手动分步部署两种方式。 - -### 组件介绍 - -| 组件 | 端口 | 说明 | -| ----------------------------- | --------------- | -------------------- | -| euler-copilot-framework | 8002 (内部端口) | 智能体框架服务 | -| euler-copilot-web | 8080 | 智能体前端界面 | -| euler-copilot-rag | 9988 (内部端口) | 检索增强服务 | -| authhub-backend-service | 11120 (内部端口) | 鉴权服务后端 | -| authhub-web-service | 8000 | 鉴权服务前端 | -| mysql | 3306 (内部端口) | MySQL数据库 | -| redis | 6379 (内部端口) | Redis数据库 | -| minio | 9000 (内部端口) 9001(外部部端口) | minio数据库 | -| mongo | 27017 (内部端口) | mongo数据库 | -| postgres | 5432 (内部端口) | 向量数据库 | -| secret_inject | 无 | 配置文件安全复制工具 | - -### 软件要求 - -| 类型 | 版本要求 | 说明 | -|----------------| -------------------------------------|--------------------------------------| -| 操作系统 | openEuler 22.03 LTS 及以上版本 | 无 | -| K3s | >= v1.30.2,带有 Traefik Ingress 工具 | K3s 提供轻量级的 Kubernetes 集群,易于部署和管理 | -| Helm | >= v3.15.3 | Helm 是一个 Kubernetes 的包管理工具,其目的是快速安装、升级、卸载 EulerCopilot 服务 | -| python | >=3.9.9 | python3.9.9 以上版本为模型的下载和安装提供运行环境 | - ---- - -### 硬件规格 - -| 硬件资源 | 最小配置 | 推荐配置 | -|--------------|----------------------------|------------------------------| -| CPU | 4 核心 | 16 核心及以上 | -| RAM | 4 GB | 64 GB | -| 存储 | 32 GB | 64G | -| 大模型名称 | deepseek-llm-7b-chat | DeepSeek-R1-Llama-8B | -| 显存 (GPU) | NVIDIA RTX A4000 8GB | NVIDIA A100 80GB * 2 | - -**关键说明**: - -- 纯CPU环境,建议通过调用 OpenAI 接口或使用自带的模型部署方式来实现功能。 -- 如果k8s集群环境,则不需要单独安装k3s,要求version >= 1.28 - ---- - -### 部署视图 - -![部署图](./pictures/部署视图.png) - ---- - -### 域名配置 - -需准备以下两个服务域名: - -- authhub认证服务:authhub.eulercopilot.local -- EulerCopilot web服务:www.eulercopilot.local - -```bash -# 本地Windows主机中进行配置 -# 打开 C:\Windows\System32\drivers\etc\hosts 添加记录 -# 替换127.0.0.1为目标服务器的IP -127.0.0.1 authhub.eulercopilot.local -127.0.0.1 www.eulercopilot.local -``` - -## 快速开始 - -### 1. 获取部署脚本 - -- 从 EulerCopilot 的官方Git仓库 [euler-copilot-framework](https://gitee.com/openeuler/euler-copilot-framework) 下载最新的部署仓库 -- 如果您正在使用 Kubernetes,则不需要安装 k3s 工具。 - -```bash -# 下载目录以 home 为例 -cd /home -``` - -```bash -git clone https://gitee.com/openeuler/euler-copilot-framework.git -b dev -``` - -```bash -cd euler-copilot-framework/deploy/scripts -``` - -```bash -# 为脚本文件添加可执行权限 -chmod -R +x ./* -``` - -### 2. 部署EulerCopilot - -#### **一键部署** - -```bash -cd /home/euler-copilot-framework/deploy/scripts -``` - -```bash -bash deploy.sh -``` - -```bash -# 输入0进行一键自动部署 -============================== - 主部署菜单 -============================== -0) 一键自动部署 -1) 手动分步部署 -2) 重启服务 -3) 卸载所有组件并清除数据 -4) 退出程序 -============================== -请输入选项编号(0-3): 0 -``` - ---- - -#### **分步部署** - -```bash -# 选择1 -> 1 进入手动分步部署 -============================== - 主部署菜单 -============================== -0) 一键自动部署 -1) 手动分步部署 -2) 重启服务 -3) 卸载所有组件并清除数据 -4) 退出程序 -============================== -请输入选项编号(0-3): 1 -``` - -```bash -# 输入选项编号(0-9),逐步部署 -============================== - 手动分步部署菜单 -============================== -1) 执行环境检查脚本 -2) 安装k3s和helm -3) 安装Ollama -4) 部署Deepseek模型 -5) 部署Embedding模型 -6) 安装数据库 -7) 安装AuthHub -8) 安装EulerCopilot -9) 返回主菜单 -============================== -请输入选项编号(0-9): -``` - ---- - -#### **重启服务** - -```bash -# 输入选项重启服务 -============================== - 服务重启菜单 -============================== -可重启的服务列表: -1) authhub-backend -2) authhub -3) framework -4) minio -5) mongo -6) mysql -7) pgsql -8) rag -9) rag-web -10) redis -11) web -12) 返回主菜单 -============================== -请输入要重启的服务编号(1-12): -``` - -#### **卸载所有组件** - -```bash -sudo ./deploy.sh -# 选择2进行完全卸载 -============================== - 主部署菜单 -============================== -0) 一键自动部署 -1) 手动分步部署 -2) 卸载所有组件并清除数据 -3) 退出程序 -============================== -请输入选项编号(0-3): 2 -``` - ---- - -**关键说明**: - -- 在部署过程中,您需要输入 Authhub 域名和 EulerCopilot 域名, 不输入则使用默认域名`authhub.eulercopilot.local`, `www.eulercopilot.local`。 -- 资源不足时可参考 FAQ 中的评估资源可用性解决 -- 查看组件日志 - -```bash -kubectl logs -n euler-copilot -``` - -- 查看服务状态 - -```bash -kubectl get pod -n euler-copilot -``` - -- 大模型配置修改并更新EulerCopilot - -```bash -cd /home/euler-copilot-framework/deploy/chart/euler-copilot -``` - -```bash -vim values.yaml -``` - -```bash -helm upgrade euler-copilot -n euler-copilot . -``` - -## 验证安装 - -恭喜您,**EulerCopilot** 已成功部署!为了开始您的体验,请在浏览器中输入 `https://您的EulerCopilot域名` 链接访问 EulerCopilot 的网页界面: - -首次访问时,您需要点击页面上的 **立即注册** 按钮来创建一个新的账号,并完成登录过程。 - -![Web登录界面](./pictures/WEB登录界面.png) -![Web 界面](./pictures/WEB界面.png) - -## 构建专有领域智能问答 - -点击知识库,可登录本地知识库管理页面,详细信息请参考文档 [本地资产库构建指南](../../../advance/knowledge_base/deploy_guide/witChainD_deployment.md) -**知识库登录默认账号 `admin`, 密码 `123456`** - ---- - -## 附录 - -### 大模型准备 - -#### GPU 环境 - -可直接使用部署的deepseek大模型参考以下方式进行部署 - -1. 下载模型文件: - - ```bash - huggingface-cli download --resume-download Qwen/Qwen1.5-14B-Chat --local-dir Qwen1.5-14B-Chat - ``` - -2. 创建终端 control - - ```bash - screen -S control - ``` - - ```bash - python3 -m fastchat.serve.controller - ``` - - 按 Ctrl A+D 置于后台 - -3. 创建新终端 api - - ```bash - screen -S api - ``` - - ```bash - python3 -m fastchat.serve.openai_api_server --host 0.0.0.0 --port 30000 --api-keys sk-123456 - ``` - - 按 Ctrl A+D 置于后台 - 如果当前环境的 Python 版本是 3.12 或者 3.9 可以创建 python3.10 的 conda 虚拟环境 - - ```bash - mkdir -p /root/py310 - ``` - - ```bash - conda create --prefix=/root/py310 python==3.10.14 - ``` - - ```bash - conda activate /root/py310 - ``` - -4. 创建新终端 worker - - ```bash - screen -S worker - ``` - - ```bash - screen -r worker - ``` - - 安装 fastchat 和 vllm - - ```bash - pip install fschat vllm - ``` - - 安装依赖: - - ```bash - pip install fschat[model_worker] - ``` - - ```bash - python3 -m fastchat.serve.vllm_worker --model-path /root/models/Qwen1.5-14B-Chat/ --model-name qwen1.5 --num-gpus 8 --gpu-memory-utilization=0.7 --dtype=half - ``` - - 按 Ctrl A+D 置于后台 - -5. 按照如下方式修改配置的大模型参数,并更新服务。 - - ```bash - vim /home/euler-copilot-framework/deploy/chart/euler_copilot/values.yaml - ``` - - 修改如下部分 - - ```yaml - # 模型设置 - models: - # 用于问答的大模型;需要为OpenAI兼容接口 - answer: - # [必填] 接口URL(无需带上“v1”后缀) - url: http://172.168.178.107:11434 - # [必填] 接口API Key;默认置空 - key: sk-123456 - # [必填] 模型名称 - name: deepseek-llm-7b-chat:latest - # [必填] 模型最大上下文数;建议>=8192 - ctx_length: 8192 - # 模型最大输出长度,建议>=2048 - max_tokens: 2048 - # 用于Function Call的模型;建议使用特定推理框架 - functioncall: - # 推理框架类型,默认为ollama - # 可用的框架类型:["vllm", "sglang", "ollama", "openai"] - backend: - # 模型地址;不填则与问答模型一致 - url: ollama - # API Key;不填则与问答模型一致 - key: - # 模型名称;不填则与问答模型一致 - name: - # 模型最大上下文数;不填则与问答模型一致 - ctx_length: - # 模型最大输出长度;不填则与问答模型一致 - max_tokens: - # 用于数据向量化(Embedding)的模型 - embedding: - # 推理框架类型,默认为openai - # [必填] Embedding接口类型:["openai", "mindie"] - type: openai - # [必填] Embedding URL(需要带上“v1”后缀) - url: http://172.168.178.107:11434 - # [必填] Embedding 模型API Key - key: sk-123456 - # [必填] Embedding 模型名称 - name: bge-m3:latest - ``` - - ```bash - # 更新服务 - helm upgrade -n euler-copilot euler-copilot . - # 重启framework服务 - kubectl get pod -n euler-copilot - kubectl delete pod framework-deploy-65b669fc58-q9bw7 -n euler-copilot - ``` - -#### NPU 环境 - -NPU 环境部署可参考链接 [MindIE安装指南](https://www.hiascend.com/document/detail/zh/mindie/10RC2/whatismindie/mindie_what_0001.html) - -### FAQ - -#### 1. 解决 Hugging Face 连接错误 - -如果遇到如下连接错误: - -```text -urllib3.exceptions.NewConnectionError: : Failed to establish a new connection: [Errno 101] Network is unreachable -``` - -尝试以下解决方案: - -- 更新 `huggingface_hub` 包到最新版本。 - - ```bash - pip3 install -U huggingface_hub - ``` - -- 如果网络问题依旧存在,可以尝试使用镜像站点作为端点。 - - ```bash - export HF_ENDPOINT=https://hf-mirror.com - ``` - -#### 2. 在 RAG 容器中调用问答接口 - -进入对应的 RAG Pod 后,可以通过 `curl` 命令发送 POST 请求来获取问答结果。请确保在请求体中提供具体的问题文本。 - -```bash -curl -k -X POST "http://localhost:9988/kb/get_answer" \ - -H "Content-Type: application/json" \ - -d '{ - "question": "您的问题", - "kb_sn": "default_test", - "fetch_source": true - }' -``` - -#### 3. 解决 `helm upgrade` 错误 - -当 Kubernetes 集群不可达时,您可能会遇到类似下面的错误信息: - -```text -Error: UPGRADE FAILED: Kubernetes cluster unreachable -``` - -确保设置了正确的 KUBECONFIG 环境变量指向有效的配置文件。 - -```bash -echo "export KUBECONFIG=/etc/rancher/k3s/k3s.yaml" >> /root/.bashrc -source /root/.bashrc -``` - -#### 4. 查看 Pod 日志失败 - -如果您遇到查看 Pod 日志时权限被拒绝的问题,检查是否正确配置了代理设置,并将本机 IP 地址添加到 `no_proxy` 环境变量中。 - -```bash -cat /etc/systemd/system/k3s.service.env -``` - -编辑文件并确保包含: - -```bash -no_proxy=XXX.XXX.XXX.XXX -``` - -#### 5. GPU环境中大模型流式回复问题 - -对于某些服务执行 curl 大模型时无法进行流式回复的情况,尝试修改请求中的 `"stream"` 参数为 `false`。此外,确认已安装兼容版本的 Pydantic 库。 - -```bash -pip install pydantic==1.10.13 -``` - -#### 6. sglang 模型部署指南 - -按照以下步骤部署基于 sglang 的模型: - -```bash -# 1. 激活名为 `myenv` 的 Conda 环境,该环境基于 Python 3.10 创建: -conda activate myenv - -# 2. 安装 sglang 及其所有依赖项,指定版本为 0.3.0 -pip install "sglang[all]==0.3.0" - -# 3. 从特定索引安装 flashinfer,确保与您的 CUDA 和 PyTorch 版本兼容 -pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.4/ - -# 4. 使用 sglang 启动服务器,配置如下: -python -m sglang.launch_server \ - --served-model-name Qwen2.5-32B \ - --model-path Qwen2.5-32B-Instruct-AWQ \ - --host 0.0.0.0 \ - --port 8001 \ - --api-key "sk-12345" \ - --mem-fraction-static 0.5 \ - --tp 8 -``` - -- 验证安装 - - ```bash - pip show sglang - pip show flashinfer - ``` - -**注意事项:** - -- API Key:请确保 `--api-key` 参数中的 API 密钥是正确的 -- 模型路径: 确保 `--model-path` 参数中的路径是正确的,并且模型文件存在于该路径下。 -- CUDA 版本:确保你的系统上安装了 CUDA 12.1 和 PyTorch 2.4,因为 `flashinfer` 包依赖于这些特定版本。 -- 线程池大小:根据你的GPU资源和预期负载调整线程池大小。如果你有 8 个 GPU,那么可以选择 --tp 8 来充分利用这些资源。 - -#### 7. 获取 Embedding - -使用 curl 发送 POST 请求以获取 embedding 结果: - -```bash -curl -k -X POST http://localhost:11434/v1/embeddings \ - -H "Content-Type: application/json" \ - -d {"input": "The food was delicious and the waiter...", "model": "bge-m3", "encoding_format": "float"} -``` - -#### 8. 生成证书 - -为了生成自签名证书,首先下载 [mkcert](https://github.com/FiloSottile/mkcert/releases)工具,然后运行以下命令: - -```bash -mkcert -install -mkcert example.com -``` - -最后,将生成的证书和私钥拷贝到 values.yaml 中, 并应用至 Kubernetes Secret。 - -```bash -vim /home/euler-copilot-framework_openeuler/deploy/chart_ssl/traefik-secret.yaml -``` - -```bash -kubectl apply -f traefik-secret.yaml -``` - -#### 9. 问题排查方法 - -1. **获取集群事件信息** - - 为了更好地定位 Pod 失败的原因,请首先检查 Kubernetes 集群中的事件 (Events)。这可以提供有关 Pod 状态变化的上下文信息。 - - ```bash - kubectl get events -n euler-copilot - ``` - -2. **验证镜像拉取状态** - - 确认容器镜像是否成功拉取。如果镜像未能正确加载,可能是由于网络问题或镜像仓库配置错误。 - - ```bash - k3s crictl images - ``` - -3. **审查 Pod 日志** - - 检查相关 Pod 的日志,以寻找可能的错误信息或异常行为。这对于诊断应用程序级别的问题特别有用。 - - ```bash - kubectl logs rag-deploy-service-5b7887644c-sm58z -n euler-copilot - ``` - -4. **评估资源可用性** - - 确保 Kubernetes 集群有足够的资源(如 CPU、内存和存储)来支持 Pod 的运行。资源不足可能导致镜像拉取失败或其他性能问题,或使得 Pod 状态从 Running 变为 Pending 或 Completed。可查看磁盘空间并保证至少有 30% 的可用空间。这有助于维持 Pod 的稳定运行状态。参考该链接挂载空间较大的磁盘[How to move k3s data to another location](https://mrkandreev.name/snippets/how_to_move_k3s_data_to_another_location/) - - ```bash - kubectl top nodes - ``` - -5. **确认 k3s 版本兼容性** - - 如果遇到镜像拉取失败且镜像大小为 0 的问题,请检查您的 k3s 版本是否符合最低要求(v1.30.2 或更高)。较低版本可能存在不兼容的问题。 - - ```bash - k3s -v - ``` - -6. **检查配置** - - 检查 `values.yaml` 文件中关于 OIDC 配置和域名配置是否填写正确,确保配置无误后更新服务。 - - ```bash - cat /home/euler-copilot-framework/deploy/chart/euler_copilot - ``` - - ```bash - vim values.yaml | grep oidc - ``` - - ```bash - helm upgrade euler-copilot -n euler-copilot . - ``` diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/CPU\351\200\273\350\276\221\346\240\270\345\277\203.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/CPU\351\200\273\350\276\221\346\240\270\345\277\203.png" deleted file mode 100644 index 74ae942b5a5217b8a5e34a2b2cd8d32a49be7a00..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/CPU\351\200\273\350\276\221\346\240\270\345\277\203.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/Copilot\345\244\247\346\250\241\345\236\213\351\203\250\347\275\262\345\267\256\345\274\202.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/Copilot\345\244\247\346\250\241\345\236\213\351\203\250\347\275\262\345\267\256\345\274\202.png" deleted file mode 100644 index 8f1de7892e04be698310691d2cfdeb07cbfa579d..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/Copilot\345\244\247\346\250\241\345\236\213\351\203\250\347\275\262\345\267\256\345\274\202.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2761.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2761.png" deleted file mode 100644 index e59e8b669c3039341655eadd75ce1fda5cda1776..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2761.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2762.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2762.png" deleted file mode 100644 index 68ae1c7cb11e663cabbf1225b188fdfd628bf549..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2762.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2763.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2763.png" deleted file mode 100644 index d90f6182fb6ec63f868a5c2598de73db093775f2..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2763.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\265\213\350\257\225\346\216\245\345\217\243\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\265\213\350\257\225\346\216\245\345\217\243\346\210\220\345\212\237.png" deleted file mode 100644 index 374c3a2cc0be67a012ef8bf0ddc7688f97702d79..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\265\213\350\257\225\346\216\245\345\217\243\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\350\275\273\351\207\217\345\214\226\351\203\250\347\275\262\350\247\206\345\233\276.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\350\275\273\351\207\217\345\214\226\351\203\250\347\275\262\350\247\206\345\233\276.png" deleted file mode 100644 index 297ad86cac9226084483816f0c88c9116071b675..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\350\275\273\351\207\217\345\214\226\351\203\250\347\275\262\350\247\206\345\233\276.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/authhub\347\231\273\345\275\225\347\225\214\351\235\242.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/authhub\347\231\273\345\275\225\347\225\214\351\235\242.png" deleted file mode 100644 index 341828b1b6f728888d1dd52eec755033680155da..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/authhub\347\231\273\345\275\225\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\347\202\271\345\207\273\351\200\200\345\207\272\347\225\214\351\235\242.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\347\202\271\345\207\273\351\200\200\345\207\272\347\225\214\351\235\242.png" deleted file mode 100644 index 22b02fff81fe1db3232b80607da6f10f710c8c64..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\347\202\271\345\207\273\351\200\200\345\207\272\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\346\210\220\345\212\237\347\225\214\351\235\242.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\346\210\220\345\212\237\347\225\214\351\235\242.png" deleted file mode 100644 index a871907f348317e43633cf05f5241cb978476fb4..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\346\210\220\345\212\237\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\347\225\214\351\235\242.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\347\225\214\351\235\242.png" deleted file mode 100644 index d82c736a94b106a30fd8d1f7b781f9e335bb441f..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/k8s\351\233\206\347\276\244\344\270\255postgres\346\234\215\345\212\241\347\232\204\345\220\215\347\247\260.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/k8s\351\233\206\347\276\244\344\270\255postgres\346\234\215\345\212\241\347\232\204\345\220\215\347\247\260.png" deleted file mode 100644 index 473a0006c9710c92375e226a760c3a79989312f9..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/k8s\351\233\206\347\276\244\344\270\255postgres\346\234\215\345\212\241\347\232\204\345\220\215\347\247\260.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/postgres\346\234\215\345\212\241\347\253\257\345\217\243.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/postgres\346\234\215\345\212\241\347\253\257\345\217\243.png" deleted file mode 100644 index cfee6d88da56bc939886caece540f7de8cf77bbc..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/postgres\346\234\215\345\212\241\347\253\257\345\217\243.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag_port.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag_port.png" deleted file mode 100644 index b1d93f9c9d7587aa88a27d7e0bf185586583d438..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag_port.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" deleted file mode 100644 index fec3cdaa2b260e50f5523477da3e58a9e14e2130..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\347\224\261\344\272\216\347\273\237\344\270\200\350\265\204\344\272\247\344\270\213\345\255\230\345\234\250\345\220\214\345\220\215\350\265\204\344\272\247\345\272\223.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\347\224\261\344\272\216\347\273\237\344\270\200\350\265\204\344\272\247\344\270\213\345\255\230\345\234\250\345\220\214\345\220\215\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index 624459821de4542b635eeffa115eeba780929a4e..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\347\224\261\344\272\216\347\273\237\344\270\200\350\265\204\344\272\247\344\270\213\345\255\230\345\234\250\345\220\214\345\220\215\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\346\210\220\345\212\237.png" deleted file mode 100644 index 3104717bfa8f6615ad6726577a24938bc29884b2..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\244\261\350\264\245.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\244\261\350\264\245.png" deleted file mode 100644 index 454b9fdfa4b7f209dc370f78677a2f4e71ea49be..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\244\261\350\264\245.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\257\255\346\226\231.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\257\255\346\226\231.png" deleted file mode 100644 index d52d25d4778f6db2d2ec076d65018c40cd1da4d3..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\257\255\346\226\231.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\357\274\214\350\265\204\344\272\247\344\270\213\344\270\215\345\255\230\345\234\250\345\257\271\345\272\224\350\265\204\344\272\247\345\272\223.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\357\274\214\350\265\204\344\272\247\344\270\213\344\270\215\345\255\230\345\234\250\345\257\271\345\272\224\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index 82ed79c0154bd8e406621440c4e4a7caaab7e06e..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\357\274\214\350\265\204\344\272\247\344\270\213\344\270\215\345\255\230\345\234\250\345\257\271\345\272\224\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\346\210\220\345\212\237.png" deleted file mode 100644 index 7dd2dea945f39ada1d7dd053d150a995b160f203..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\273\272\347\253\213\350\265\204\344\272\247\345\272\223.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\273\272\347\253\213\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index 84737b4185ce781d7b32ab42d39b8d2452138dad..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\273\272\347\253\213\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\214\207\345\256\232\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\214\207\345\256\232\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245.png" deleted file mode 100644 index be89bdfde2518bba3941eee5d475f52ad9124343..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\214\207\345\256\232\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\345\210\235\345\247\213\345\214\226.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\345\210\235\345\247\213\345\214\226.png" deleted file mode 100644 index 27530840aaa5382a226e1ed8baea883895d9d75e..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\345\210\235\345\247\213\345\214\226.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" deleted file mode 100644 index aa04e6f7f0648adfca1240c750ca5b79b88da5f9..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\227\240\350\265\204\344\272\247\346\227\266\346\237\245\350\257\242\350\265\204\344\272\247.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\227\240\350\265\204\344\272\247\346\227\266\346\237\245\350\257\242\350\265\204\344\272\247.png" deleted file mode 100644 index 74905172c0c0a0acc4c4d0e35efd2493dc421c4e..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\227\240\350\265\204\344\272\247\346\227\266\346\237\245\350\257\242\350\265\204\344\272\247.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\347\234\213\346\226\207\346\241\243\344\272\247\347\224\237\347\211\207\346\256\265\346\200\273\346\225\260\345\222\214\344\270\212\344\274\240\346\210\220\345\212\237\346\200\273\346\225\260.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\347\234\213\346\226\207\346\241\243\344\272\247\347\224\237\347\211\207\346\256\265\346\200\273\346\225\260\345\222\214\344\270\212\344\274\240\346\210\220\345\212\237\346\200\273\346\225\260.png" deleted file mode 100644 index 432fbfcd02f6d2220e7d2a8512aee893d67be24d..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\347\234\213\346\226\207\346\241\243\344\272\247\347\224\237\347\211\207\346\256\265\346\200\273\346\225\260\345\222\214\344\270\212\344\274\240\346\210\220\345\212\237\346\200\273\346\225\260.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\345\205\250\351\203\250\350\257\255\346\226\231.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\345\205\250\351\203\250\350\257\255\346\226\231.png" deleted file mode 100644 index a4f4ea8a3999a9ab659ccd9ea39b80b21ff46e84..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\345\205\250\351\203\250\350\257\255\346\226\231.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\350\265\204\344\272\247.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\350\265\204\344\272\247.png" deleted file mode 100644 index 675b40297363664007f96948fb21b1cb90d6beea..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\350\265\204\344\272\247.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\216\267\345\217\226\346\225\260\346\215\256\345\272\223pod\345\220\215\347\247\260.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\216\267\345\217\226\346\225\260\346\215\256\345\272\223pod\345\220\215\347\247\260.png" deleted file mode 100644 index 8fc0c988e8b3830c550c6be6e42b88ac13448d1a..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\216\267\345\217\226\346\225\260\346\215\256\345\272\223pod\345\220\215\347\247\260.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\344\270\212\344\274\240\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\344\270\212\344\274\240\346\210\220\345\212\237.png" deleted file mode 100644 index 5c897e9883e868bf5160d92cb106ea4e4e9bc356..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\344\270\212\344\274\240\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\346\234\252\346\237\245\350\257\242\345\210\260\347\233\270\345\205\263\350\257\255\346\226\231.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\346\234\252\346\237\245\350\257\242\345\210\260\347\233\270\345\205\263\350\257\255\346\226\231.png" deleted file mode 100644 index 407e49b929b7ff4cf14703046a4ba0bfe1bb441e..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\346\234\252\346\237\245\350\257\242\345\210\260\347\233\270\345\205\263\350\257\255\346\226\231.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\346\237\245\350\257\242\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\346\237\245\350\257\242\346\210\220\345\212\237.png" deleted file mode 100644 index a4f4ea8a3999a9ab659ccd9ea39b80b21ff46e84..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\346\237\245\350\257\242\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\234\252\346\237\245\350\257\242\345\210\260\350\265\204\344\272\247\345\272\223.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\234\252\346\237\245\350\257\242\345\210\260\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index 45ab521ec5f5afbd81ad54f023aae3b7a867dbf2..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\234\252\346\237\245\350\257\242\345\210\260\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\237\245\350\257\242\350\265\204\344\272\247\345\272\223\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\237\245\350\257\242\350\265\204\344\272\247\345\272\223\346\210\220\345\212\237.png" deleted file mode 100644 index 90ed5624ae93ff9784a750514c53293df4e961f0..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\237\245\350\257\242\350\265\204\344\272\247\345\272\223\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\346\210\220\345\212\237.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\346\210\220\345\212\237.png" deleted file mode 100644 index 7b2cc38a931c9c236517c14c86fa93e3eb2b6dcd..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" deleted file mode 100644 index 1365a8d69467dec250d3451ac63e2615a2194c18..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\346\210\220\345\212\237png.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\346\210\220\345\212\237png.png" deleted file mode 100644 index 1bd944264baa9369e6f8fbfd04cabcd12730c0e9..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\346\210\220\345\212\237png.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\346\237\245\350\257\242\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\346\237\245\350\257\242\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" deleted file mode 100644 index 58bcd320e145dd29d9e5d49cb6d86964ebb83b51..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\346\237\245\350\257\242\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\344\270\255\351\227\264\345\261\202.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\344\270\255\351\227\264\345\261\202.png" deleted file mode 100644 index 809b785b999b6663d9e9bd41fed953925093d6bd..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\344\270\255\351\227\264\345\261\202.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\346\272\220\347\233\256\345\275\225.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\346\272\220\347\233\256\345\275\225.png" deleted file mode 100644 index 62ba5f6615f18deb3d5a71fd68ee8c929638d814..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\346\272\220\347\233\256\345\275\225.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\347\233\256\346\240\207\347\233\256\345\275\225.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\347\233\256\346\240\207\347\233\256\345\275\225.png" deleted file mode 100644 index d32c672fafcb0ef665bda0bcfdce19d2df44db01..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\347\233\256\346\240\207\347\233\256\345\275\225.png" and /dev/null differ diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\207\215\345\244\215\345\210\233\345\273\272\350\265\204\344\272\247\345\244\261\350\264\245.png" "b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\207\215\345\244\215\345\210\233\345\273\272\350\265\204\344\272\247\345\244\261\350\264\245.png" deleted file mode 100644 index a5ecd6b65abc97320e7467f00d82ff1fd9bf0e44..0000000000000000000000000000000000000000 Binary files "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\207\215\345\244\215\345\210\233\345\273\272\350\265\204\344\272\247\345\244\261\350\264\245.png" and /dev/null differ diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/introduction.md b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/introduction.md deleted file mode 100644 index b3f6abde54923c2b2dfb767a8cb03eb9281cd22c..0000000000000000000000000000000000000000 --- a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/introduction.md +++ /dev/null @@ -1,68 +0,0 @@ -# 前言 - -## 概述 - -本文档介绍了 EulerCopilot 的使用方法,对 EulerCopilot 线上服务的 Web 界面的各项功能做了详细介绍,同时提供了常见的问题解答,详细请参考对应手册。 - -## 读者对象 - -本文档主要适用于 EulerCopilot 的使用人员。使用人员必须具备以下经验和技能: - -- 熟悉 openEuler 操作系统相关情况。 -- 有 AI 对话使用经验。 - -## 修改记录 - -| 文档版本 | 发布日期 | 修改说明 | -|--------|------------|----------------| -| 03 | 2024-09-19 | 更新新版界面。 | -| 02 | 2024-05-13 | 优化智能对话操作指引。 | -| 01 | 2024-01-28 | 第一次正式发布。 | - -## 介绍 - -### 免责声明 - -- 使用过程中涉及的非工具本身验证功能所用的用户名和密码,不作他用,且不会被保存在系统环境中。 -- 在您进行对话或操作前应当确认您为应用程序的所有者或已获得所有者的充足授权同意。 -- 对话结果中可能包含您所分析应用的内部信息和相关数据,请妥善管理。 -- 除非法律法规或双方合同另有规定,openEuler 社区对分析结果不做任何明示或暗示的声明和保证,不对分析结果的适销性、满意度、非侵权性或特定用途适用性等作出任何保证或者承诺。 -- 您根据分析记录所采取的任何行为均应符合法律法规的要求,并由您自行承担风险。 -- 未经所有者授权,任何个人或组织均不得使用应用程序及相关分析记录从事任何活动。openEuler 社区不对由此造成的一切后果负责,亦不承担任何法律责任。必要时,将追究其法律责任。 - -### EulerCopilot 简介 - -EulerCopilot 是一个基于 openEuler 操作系统的人工智能助手,可以帮助用户解决各种技术问题,提供技术支持和咨询服务。它使用了最先进的自然语言处理技术和机器学习算法,能够理解用户的问题并提供相应的解决方案。 - -### 场景内容 - -1. OS 领域通用知识:EulerCopilot 可以咨询 Linux 常规知识、上游信息和工具链介绍和指导。 -2. openEuler 专业知识:EulerCopilot 可以咨询 openEuler 社区信息、技术原理和使用指导。 -3. openEuler 扩展知识:EulerCopilot 可以咨询 openEuler 周边硬件特性知识和ISV、OSV相关信息。 -4. openEuler 应用案例:EulerCopilot 可以提供 openEuler 技术案例、行业应用案例。 -5. shell 命令生成:EulerCopilot 可以帮助用户生成单条 shell 命令或者复杂命令。 - -总之,EulerCopilot 可以应用于各种场景,帮助用户提高工作效率和了解 Linux、openEuler 等的相关知识。 - -### 访问和使用 - -EulerCopilot 通过网址访问 Web 网页进行使用。账号注册与登录请参考[注册与登录](./registration_and_login.md)。使用方法请参考[智能问答使用指南](./qa_guide.md)。 - -### 界面说明 - -#### 界面分区 - -EulerCopilot 界面主要由如图 1 所示的区域组成,各个区域的作用如表 1 所示。 - -- 图 1 EulerCopilot 界面 - -![Copilot 界面](./pictures/main-page-sections.png) - -- 表 1 EulerCopilot 首页界面分区说明 - -| 区域 | 名称 | 说明 | -|-----|------------|----------------------------------------------------------------| -| 1 | 设置管理区 | 提供账号登录和退出操作入口和明亮/黑暗模式切换功能 | -| 2 | 对话管理区 | 用于用户新建对话、对话历史记录管理和对话历史记录批量删除操作 | -| 3 | 对话区 | 用于用户和 EulerCopilot 的对话聊天 | -| 4 | 服务协议和隐私政策区 | 提供查看服务协议和隐私政策入口 | diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/qa_guide.md b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/qa_guide.md deleted file mode 100644 index 73f25bfce21a08d3eef048aa74e29ed0cb1f65d4..0000000000000000000000000000000000000000 --- a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/qa_guide.md +++ /dev/null @@ -1,179 +0,0 @@ -# 智能问答使用指南 - -## 开始对话 - -在对话区下侧输入框即可输入对话想要提问的内容,输入 `Shift + Enter` 可进行换行,输入 `Enter` 即可发送对话提问内容,或者单击“发送”也可发送对话提问内容。 - -> **说明** -> -> 对话区位于页面的主体部分,如图 1 所示。 - -- 图 1 对话区 - ![对话区](./pictures/chat-area.png) - -### 多轮连续对话 - -EulerCopilot 智能问答支持多轮连续对话。只需要在同一个对话中继续追问即可使用,如图 2 所示。 - -- 图 2 多轮对话 - ![多轮对话](./pictures/context-support.png) - -### 重新生成 - -如遇到 AI 生成的内容有误或不完整的特殊情况,可以要求 AI 重新回答问题。单击对话回答左下侧的“重新生成”文字,可让 EulerCopilot 重新回答用户问题,重新回答后,在对话回答右下侧,会出现回答翻页的图标![向前翻页](./pictures/icon-arrow-prev.png)和![向后翻页](./pictures/icon-arrow-next.png),单击![向前翻页](./pictures/icon-arrow-prev.png)或![向后翻页](./pictures/icon-arrow-next.png)可查看不同的回答,如图 3 所示。 - -- 图 3 重新生成 - ![重新生成](./pictures/regenerate.png) - -### 推荐问题 - -在 AI 回答的下方,会展示一些推荐的问题,单击即可进行提问,如图 4 所示。 - -- 图 4 推荐问题 - ![推荐问题](./pictures/recommend-questions.png) - -## 自定义背景知识 - -EulerCopilot 支持上传文件功能。上传文件后,AI 会将上传的文件内容作为背景知识,在回答问题时,会结合背景知识进行回答。上传的背景知识只会作用于当前对话,不会影响其他对话。 - -### 上传文件 - -**步骤1** 单击对话区左下角的“上传文件”按钮,如图 5 所示。 - -- 图 5 上传文件按钮 - ![上传文件](./pictures/file-upload-btn.png) - -> **说明** -> -> 鼠标悬停到“上传文件”按钮上,会显示提示允许上传文件的规格和格式,如图 6 所示。 - -- 图 6 鼠标悬停显示上传文件规格提示 - ![上传文件提示](./pictures/file-upload-btn-prompt.png) - -**步骤2** 在弹出的文件选择框中,选择需要上传的文件,单击“打开”,即可上传文件。最多上传10个文件,总大小限制为64MB。接受 PDF、docx、doc、txt、md、xlsx。 - -开始上传后,对话区下方会显示上传进度,如图 7 所示。 - -- 图 7 同时上传的所有文件排列在问答输入框下方 - ![上传文件](./pictures/file-upload-uploading.png) - -文件上传完成后会自动解析,如图 8 所示,解析完成后,对话区下方会显示每个文件的大小信息。 - -- 图 8 文件上传至服务器后将显示“正在解析” - ![文件解析](./pictures/file-upload-parsing.png) - -文件上传成功后,左侧历史记录区会显示上传的文件数量,如图 9 所示。 - -- 图 9 对话历史记录标签上会展示上传文件数量 - ![历史记录标记](./pictures/file-upload-history-tag.png) - -### 针对文件提问 - -文件上传完成后,即可针对文件进行提问,提问方式同普通对话模式,如图 10 所示。 -回答结果如图 11 所示。 - -- 图 10 询问与上传的文件相关的问题 - ![针对文件提问](./pictures/file-upload-ask-against-file.png) - -- 图 11 AI 以上传的为背景知识进行回答 - ![根据自定义背景知识回答](./pictures/file-upload-showcase.png) - -## 管理对话 - -> **说明** -> -> 对话管理区在页面左侧。 - -### 新建对话 - -单击“新建对话”按钮即可新建对话,如图 12 所示。 - -- 图 12 “新建对话”按钮在页面左上方 - ![新建对话](./pictures/new-chat.png) - -### 对话历史记录搜索 - -在页面左侧历史记录搜索输入框输入关键词,然后单击![搜索](./pictures/icon-search.png)即可进行对话历史记录搜索如图 13 所示。 - -- 图 13 对话历史记录搜索框 - ![对话历史记录搜索](./pictures/search-history.png) - -### 对话历史记录单条管理 - -历史记录的列表位于历史记录搜索栏的下方,在每条对话历史记录的右侧,单击![编辑](./pictures/icon-edit.png)即可编辑对话历史记录的名字,如图 14 所示。 - -- 图 14 点击“编辑”图标重命名历史记录 - ![重命名历史记录](./pictures/rename-session.png) - -在对话历史记录名字重新书写完成后,单击右侧![确认](./pictures/icon-confirm.png)即可完成重命名,或者单击右侧![取消](./pictures/icon-cancel.png)放弃本次重命名,如图 15 所示。 - -- 图 15 完成/取消重命名历史记录 - ![完成/取消重命名历史记录](./pictures/rename-session-confirmation.png) - -另外,单击对话历史记录右侧的删除图标,如图 16 所示,即可对删除单条对话历史记录进行二次确认,在二次确认弹出框,如图 17 所示,单击“确认”,可确认删除单条对话历史记录,或者单击“取消”,取消本次删除。 - -- 图 16 点击“垃圾箱”图标删除单条历史记录 - ![删除单条历史记录](./pictures/delete-session.png) - -- 图 17 二次确认后删除历史记录 - ![删除单条历史记录二次确认](./pictures/delete-session-confirmation.png) - -### 对话历史记录批量删除 - -首先单击“批量删除”,如图 18 所示。 - -- 图 18 批量删除功能在历史记录搜索框右上方 - ![批量删除](./pictures/bulk-delete.png) - -然后可对历史记录进行选择删除,如图 19 所示。单击“全选”,即对所有历史记录选中,单击单条历史记录或历史记录左侧的选择框,可对单条历史记录进行选中。 - -- 图 19 在左侧勾选要批量删除历史记录 - ![批量删除历史记录选择](./pictures/bulk-delete-multi-select.png) - -最后需要对批量删除历史记录进行二次确认,如图 20 所示,单击“确认”,即删除,单击“取消”,即取消本次删除。 - -- 图 20 二次确认后删除所选的历史记录 - ![批量删除二次确认](./pictures/bulk-delete-confirmation.png) - -## 反馈与举报 - -在对话记录区,对话回答的右下侧,可进行对话回答反馈,如图 21 所示,单击![满意](./pictures/icon-thumb-up.png),可给对话回答点赞;单击![不满意](./pictures/icon-thumb-down.png),可以给对话回答反馈不满意的原因。 - -- 图 21 点赞和不满意反馈 - ![点赞和不满意反馈](./pictures/feedback.png) - -对于反馈不满意原因,如图 22 所示,在单击![不满意](./pictures/icon-thumb-down.png)之后,对话机器人会展示反馈内容填写的对话框,可选择相关的不满意原因的选项。 - -- 图 22 回答不满意反馈 - ![回答不满意反馈](./pictures/feedback-illegal.png) - -其中单击选择“存在错误信息”,需要填写参考答案链接和描述,如图 23 所示。 - -- 图 23 回答不满意反馈——存在错误信息 - ![回答不满意反馈——存在错误信息](./pictures/feedback-misinfo.png) - -### 举报 - -如果发现 AI 返回的内容中有违规信息,可以点击右下角按钮举报,如图 24 所示。点击举报后选择举报类型并提交,若没有合适的选项,请选择“其他”并输入原因,如图 25 所示。 - -- 图 24 举报按钮在对话块的右下角 - ![举报1](./pictures/report.png) - -- 图 25 点击后可选择举报类型 - ![举报2](./pictures/report-options.png) - -## 查看服务协议和隐私政策 - -单击文字“服务协议”,即可查看服务协议,单击文字“隐私政策”,即可查看隐私政策,如图 26、图 27 所示。 - -- 图 26 服务协议和隐私政策入口在页面底部信息栏 - ![服务协议和隐私政策入口](./pictures/privacy-policy-entry.png) - -- 图 27 点击后显示服务协议或隐私政策弹窗 - ![服务协议和隐私政策](./pictures/privacy-policy.png) - -## 附录 - -### 用户信息导出说明 - -EulerCopilot 后台存在用户信息导出功能,如用户需要,需主动通过 邮箱联系我们,运维会将导出的用户信息通过邮箱回送给用户。 diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/registration_and_login.md b/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/registration_and_login.md deleted file mode 100644 index 8f5cd63781e1f320af33ffe4baf276f65f4696fa..0000000000000000000000000000000000000000 --- a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/registration_and_login.md +++ /dev/null @@ -1,60 +0,0 @@ -# 登录 EulerCopilot - -本章节介绍如何登录已部署的 EulerCopilot 网页端的具体步骤。 - -## 浏览器要求 - -浏览器要求如表 1 所示。 - -- 表 1 浏览器要求 - -| 浏览器类型 | 最低版本 | 推荐版本 | -| ----- | ----- | ----- | -| Google Chrome | 72 | 121 或更高版本 | -| Mozilla Firefox | 89 | 122 或更高版本 | -| Apple Safari | 11.0 | 16.3 或更高版本 | - -## 操作步骤 - -**步骤1** 打开本地 PC 机的浏览器,在地址栏输入部署指南中配置好的域名,按下 `Enter`。在未登录状态,进入 EulerCopilot,会出现登录提示弹出框,如图 1 所示。 - -- 图 1 未登录 - -![未登录](./pictures/login-popup.png) - -**步骤2** 登录 EulerCopilot(已注册账号)。 - -打开登录界面,如图 2 所示。 - -- 图 2 登录 EulerCopilot - -![登录 EulerCopilot](./pictures/authhub-login.png) - -## 注册 EulerCopilot 账号 - -**步骤1** 在登录信息输入框右下角单击“立即注册”,如图 3 所示。 - -- 图 3 立即注册 - -![立即注册](./pictures/authhub-login-click2signup.png) - -**步骤2** 进入账号注册页,根据页面提示填写相关内容,如图 4 所示。 - -- 图 4 账号注册 - -![账号注册](./pictures/authhub-signup.png) - -**步骤3** 按页面要求填写账号信息后,单击“注册”,即可注册成功。注册后即可返回登录。 - -## 退出登录 - -**步骤1** 单击![退出登录](./pictures/icon-user.png)后,会出现“退出登录”下拉框,如图 5 所示。 - -> **说明** -> 账号管理区位于页面的右上角部分,如图 5 所示。 - -- 图 5 账号管理区 - -![账号管理区](./pictures/logout.png) - -**步骤2** 单击“退出登录”即可退出登录,如图 5 所示。 diff --git a/docs/en/openEuler_intelligence/intelligent_vulnerability_patching/intelligent-vulnerability-patching-user-guide.md b/docs/en/openEuler_intelligence/intelligent_vulnerability_patching/intelligent-vulnerability-patching-user-guide.md deleted file mode 100644 index b19e2c0025cf962506a663c72c6c484b67944a69..0000000000000000000000000000000000000000 --- a/docs/en/openEuler_intelligence/intelligent_vulnerability_patching/intelligent-vulnerability-patching-user-guide.md +++ /dev/null @@ -1,58 +0,0 @@ -# openEuler 智能化漏洞修补用户指南 - -## 简介 - -智能化漏洞修补提供了对openEuler的kernel仓库([https://gitee.com/openeuler/kernel](https://gitee.com/openeuler/kernel))进行智能化修补的能力,当前提供了CVE影响范围分析和补丁PR创建的功能。在代表CVE的issue下面评论/analyze和/create_pr命令来执行功能。 - -## 功能入口 - -在src-openEuler的kernel仓库([https://gitee.com/src-openeuler/kernel.git](https://gitee.com/src-openeuler/kernel.git))中,对代表CVE的issue下面进行评论。 - -![CVE截图](pictures/代表CVE的issue.png) - -## /analyze命令 - -`/analyze`命令提供了对CVE影响范围进行分析的能力。通过在issue下面评论`/analyze`,即可自动对当前维护范围内的openEuler版本进行分析,判断每一个openEuler版本是否引入该CVE,是否修复该CVE。 - -![/analyze命令](pictures/analyze命令.png) - -> [!NOTE]说明 -> /analyze命令无参数 - -CVE是否引入存在如下几种情况: - -* 无影响 -* 受影响 - -CVE是否修复存在如下几种情况: - -* 未修复 -* 已修复 - -在评论的最后,会贴上引入补丁链接与修复补丁链接。 - -## /create_pr命令 - -`/create_pr`命令提供了对CVE的补丁进行智能化修复的能力。通过在issue下面评论`/create_pr `,即可自动获得漏洞补丁,并通过创建PR来合入openEuler下的linux仓库([https://gitee.com/openeuler/kernel.git](https://gitee.com/openeuler/kernel.git))中。 -![/create_pr命令](pictures/create_pr命令.png) - -`/create_pr`命令存在参数,包括如下几种情况: - -```shell -# 对OLK-5.10分支创建补丁PR -/create_pr OLK-5.10 - -# 对OLK-5.10、OLK-6.6分支创建PR -/create_pr OLK-5.10 OLK-6.6 - -# 对当前所有的上游分支,包括openEuler-1.0-LTS、OLK-5.10、OLK-6.6三个分支 -/create_pr -``` - -返回结果如下: - -* pr创建成功 -* 没有修复补丁 -* 无法修复,存在冲突 - -如果补丁代码与修复分支存在冲突的话,会提示`无法修复,存在冲突`。该能力将会在后续的版本中进行迭代强化。 diff --git a/docs/en/openEuler_intelligence/mcp_agent/_toc.yaml b/docs/en/openEuler_intelligence/mcp_agent/_toc.yaml deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/_toc.yaml b/docs/en/openeuler_intelligence/intelligent_assistant/_toc.yaml similarity index 47% rename from docs/en/openEuler_intelligence/intelligent_assistant/_toc.yaml rename to docs/en/openeuler_intelligence/intelligent_assistant/_toc.yaml index 4eeff60477fc964bb62b1ebbe711c71035b1e13a..cf1d20f531dabc437a294f7e38fb8cb64ff70b45 100644 --- a/docs/en/openEuler_intelligence/intelligent_assistant/_toc.yaml +++ b/docs/en/openeuler_intelligence/intelligent_assistant/_toc.yaml @@ -1,56 +1,56 @@ -label: openEuler智能助手 +label: openEuler Intelligent Assistant isManual: true -description: 部署和使用openEuler智能助手 +description: Deploy and use openEuler Intelligent Assistant sections: - - label: 快速上手 + - label: Quick Start sections: - - label: web问答 + - label: Web Q&A sections: - - label: 部署指南 + - label: Deployment Guide sections: - - label: 无网络环境下部署指南 + - label: Offline Deployment Guide href: ./quick_start/smart_web/deploy_guide/offline.md - - label: 网络环境下部署指南 + - label: Online Deployment Guide href: ./quick_start/smart_web/deploy_guide/online.md - - label: 使用指导 + - label: User Guide sections: - - label: 前言 + - label: Introduction href: ./quick_start/smart_web/user_guide/introduction.md - - label: 注册与登录 + - label: Registration and Login href: ./quick_start/smart_web/user_guide/registration_and_login.md - - label: 智能问答使用 + - label: Intelligent Q&A Usage href: ./quick_start/smart_web/user_guide/qa_guide.md - - label: shell问答 + - label: Shell Q&A sections: - - label: 使用指导 + - label: User Guide sections: - - label: API Key 获取 - href: ./quick_start/smart_shell/user_guide/API_key.md - - label: 命令行助手使用 + - label: API Key Acquisition + href: ./quick_start/smart_shell/user_guide/api_key.md + - label: Command Line Assistant Usage href: ./quick_start/smart_shell/user_guide/shell.md - - label: 进阶使用 + - label: Advanced Usage sections: - - label: 智能诊断 + - label: Intelligent Diagnosis sections: - - label: 部署指南 + - label: Deployment Guide href: ./advance/smart_diagnosis/deploy_guide/diagnosis_deployment.md - - label: 使用指导 + - label: User Guide href: ./advance/smart_diagnosis/user_guide/diagnosis_guidance.md - - label: 智能调优 + - label: Intelligent Tuning sections: - - label: 部署指南 + - label: Deployment Guide href: ./advance/smart_tuning/deploy_guide/tune_deployment.md - - label: 使用指导 + - label: User Guide href: ./advance/smart_tuning/user_guide/tune_guidance.md - - label: 知识库管理 + - label: Knowledge Base Management sections: - - label: 部署指南 - href: ./advance/knowledge_base/deploy_guide/witChainD_deployment.md - - label: 使用指导 - href: ./advance/knowledge_base/user_guide/witChainD_guidance.md - - label: 工作流编排 + - label: Deployment Guide + href: ./advance/knowledge_base/deploy_guide/witchaind_deployment.md + - label: User Guide + href: ./advance/knowledge_base/user_guide/witchaind_guidance.md + - label: Workflow Orchestration sections: - - label: 部署指南 + - label: Deployment Guide href: ./advance/work_flow/deploy_guide/workflow_deployment.md - - label: 使用指导 + - label: User Guide href: ./advance/work_flow/user_guide/workflow_guidance.md diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\345\256\214\346\210\220\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/asset_library_creation_completed_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\345\256\214\346\210\220\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/asset_library_creation_completed_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/batch_import_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/batch_import_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\346\210\220\345\212\237\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/batch_import_successful_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\346\210\220\345\212\237\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/batch_import_successful_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\346\255\243\345\234\250\345\257\274\345\205\245\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/batch_importing_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\346\255\243\345\234\250\345\257\274\345\205\245\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/batch_importing_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\345\241\253\345\206\231\345\261\225\347\244\272\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/create_new_asset_library_fill_display_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\345\241\253\345\206\231\345\261\225\347\244\272\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/create_new_asset_library_fill_display_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/create_new_asset_library_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/create_new_asset_library_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\277\233\345\205\245\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/enter_asset_library_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\277\233\345\205\245\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/enter_asset_library_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\350\265\204\344\272\247\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/export_assets_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\350\265\204\344\272\247\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/export_assets_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\346\210\220\345\212\237.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/export_successful.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\346\210\220\345\212\237.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/export_successful.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\255\243\345\234\250\345\257\274\345\207\272\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/exporting_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\255\243\345\234\250\345\257\274\345\207\272\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/exporting_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\257\274\345\205\245\346\226\207\346\241\243\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/import_document_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\257\274\345\205\245\346\226\207\346\241\243\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/import_document_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\350\257\246\346\203\205\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parse_details_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\350\257\246\346\203\205\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parse_details_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\347\273\223\346\236\234\350\277\207\346\273\244\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parse_result_filter_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\347\273\223\346\236\234\350\277\207\346\273\244\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parse_result_filter_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\210\220\345\212\237\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parse_successful_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\210\220\345\212\237\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parse_successful_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\344\270\255\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parsing_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\344\270\255\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parsing_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\226\271\346\263\225\351\200\211\346\213\251\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parsing_method_selection_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\226\271\346\263\225\351\200\211\346\213\251\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parsing_method_selection_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\233\236\345\210\260\351\246\226\351\241\265.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/return_to_homepage.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\233\236\345\210\260\351\246\226\351\241\265.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/return_to_homepage.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\351\200\211\346\213\251\346\226\207\344\273\266.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/select_file.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\351\200\211\346\213\251\346\226\207\344\273\266.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/select_file.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\237\245\347\234\213\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/view_asset_library_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\237\245\347\234\213\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/view_asset_library_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/witchaind\347\231\273\345\275\225\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/witchaind_login_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/witchaind\347\231\273\345\275\225\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/witchaind_login_interface.png diff --git a/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/witchaind_deployment.md b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/witchaind_deployment.md new file mode 100644 index 0000000000000000000000000000000000000000..2a374d5737b925d7e4c5ad12e09ac0074ae925cc --- /dev/null +++ b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/witchaind_deployment.md @@ -0,0 +1,122 @@ +# Local Asset Library Construction Guide + +This platform is designed to provide enterprises (users) with a visual entry point to manage local document assets. The functionality is divided into the following sections: + +- User Management: + + You can log into the witchainD platform with your account and configure large models on the platform to provide capabilities for certain document parsing modes. +- Asset Management + + Create, delete, import assets, edit assets or asset information by specifying asset name, asset description, asset default parsing method, asset default chunk size, and embedding model. +- Document Management + - Support users to upload files with limited size and quantity, and also allow users to download their uploaded files. Users can enter assets by clicking on asset cards, where documents are displayed as entries. + - Support document parsing for txt, md, xlsx, docx, doc, and pdf documents + - There are three document parsing methods: general, ocr, and enhanced modes. In general mode, only text and tables are extracted. In ocr mode, not only text and tables are extracted, but also image content from some documents. Enhanced mode summarizes the extracted content from images based on ocr. + +- Document Parsing Result Management: + + Evaluate or eliminate the impact of chunks on retrieval by specifying chunk categories and keywords to filter target chunks, or by specifying whether chunks are enabled. +- Task Management + + View the status of current import/export asset and document parsing tasks, cancel or remove import/export asset libraries and document parsing tasks +- Retrieval Enhancement: + + The final parsing results of this platform are presented externally in the form of vectorization and keywords. It also provides technologies such as token compression and question completion to enhance the probability of result hits, and uses random contextual association to complete retrieval results. + +## Login Management Platform + +Please enter `https://$(wichaind domain)` in your browser to access EulerCopilot's WitChainD web page. +The login interface is as follows. Enter your account (admin) and password (123456), then click the login button to access the system. + +![witchaind login interface](./pictures/witchaind/witchaind_login_interface.png) + +## Create New Asset + +### 1. View Asset Library + +Enter the asset card display page. The cards show the asset name, description, number of documents, creation time, and asset ID. + +![View asset library interface](./pictures/witchaind/view_asset_library_interface.png) + +You can convert the assets displayed as cards to entry display through the button in the red box. + +### 2. Create New Asset Library + +Click "Create New Asset" to bring up the asset information configuration dialog. + +![Create new asset library interface](./pictures/witchaind/create_new_asset_library_interface.png) + +Fill in the asset library name, asset library description (optional), language, embedding model, default parsing method, and default file chunk size, then click OK. +![Create new asset library fill display interface](./pictures/witchaind/create_new_asset_library_fill_display_interface.png) + +After the asset library is created, it will ask whether to add documents. Click OK. + +![Asset library creation completed interface](./pictures/witchaind/asset_library_creation_completed_interface.png) + +Click OK to complete and enter the asset library. + +![Asset library creation completed interface](./pictures/witchaind/enter_asset_library_interface.png) + +## Import Documents + +### Single Import + +Click the "Import Document" button to bring up the document upload dialog. Select local files and check them for upload. + +![Import document interface](./pictures/witchaind/import_document_interface.png) + +![Select file](./pictures/witchaind/select_file.png) + +### Batch Import + +Click 1 "Batch Import Assets", 2 "Select Local Assets", 3 "Check Local Assets", and finally click OK to import assets. + +![Batch import interface](./pictures/witchaind/batch_import_interface.png) + +Assets importing. + +![Batch importing interface](./pictures/witchaind/batch_importing_interface.png) + +Asset import successful. + +![Batch import successful interface](./pictures/witchaind/batch_import_successful_interface.png) + +## Parse Documents + +Waiting for parsing. Click cancel to stop document parsing. + +![Parsing interface](./pictures/witchaind/parsing_interface.png) + +After parsing is completed, the parsing status will show "Parse Successful". + +![Parse successful interface](./pictures/witchaind/parse_successful_interface.png) + +There are three document parsing methods: general, ocr, and enhanced modes. Please choose the appropriate document parsing method according to your needs. + +![Parsing method selection interface](./pictures/witchaind/parsing_method_selection_interface.png) + +After parsing is complete, you can click on the filename to enter the document parsing result display details. You can view the document parsing results as shown in the figure below: + +![Parse details interface](./pictures/witchaind/parse_details_interface.png) + +You can filter document parsing fragments, tables, and images through 1, search for corresponding fragments through keywords with fuzzy search through 2, and set whether to enable fragments in retrieval through 3, as shown in the figure below: + +![Parse result filter interface](./pictures/witchaind/parse_result_filter_interface.png) + +## Export Assets + +Click "Return to Homepage". + +![Return to homepage](./pictures/witchaind/return_to_homepage.png) + +Then click "Export Assets". + +![Export assets interface](./pictures/witchaind/export_assets_interface.png) + +Shows that assets are being exported, as shown in the figure below: + +![Exporting interface](./pictures/witchaind/exporting_interface.png) + +Export successful. Click download to show download success. + +![Export successful](./pictures/witchaind/export_successful.png) diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\350\265\204\344\272\247\345\272\223\347\256\241\347\220\206\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/asset_library_management_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\350\265\204\344\272\247\345\272\223\347\256\241\347\220\206\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/asset_library_management_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\211\271\351\207\217\345\220\257\347\224\250.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/batch_enable.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\211\271\351\207\217\345\220\257\347\224\250.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/batch_enable.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\211\271\351\207\217\345\257\274\345\205\245\350\265\204\344\272\247\345\272\223.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/batch_import_asset_libraries.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\211\271\351\207\217\345\257\274\345\205\245\350\265\204\344\272\247\345\272\223.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/batch_import_asset_libraries.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\260\345\242\236\350\265\204\344\272\247\345\272\223.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/create_new_asset_library.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\260\345\242\236\350\265\204\344\272\247\345\272\223.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/create_new_asset_library.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\207\346\241\243\347\256\241\347\220\206\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/document_management_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\207\346\241\243\347\256\241\347\220\206\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/document_management_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\207\346\241\243\350\247\243\346\236\220.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/document_parsing.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\207\346\241\243\350\247\243\346\236\220.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/document_parsing.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\207\346\241\243\350\247\243\346\236\2202.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/document_parsing_2.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\207\346\241\243\350\247\243\346\236\2202.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/document_parsing_2.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\347\274\226\350\276\221\350\265\204\344\272\247\345\272\223.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/edit_asset_library.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\347\274\226\350\276\221\350\265\204\344\272\247\345\272\223.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/edit_asset_library.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\347\274\226\350\276\221\350\265\204\344\272\247\345\272\2230.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/edit_asset_library_0.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\347\274\226\350\276\221\350\265\204\344\272\247\345\272\2230.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/edit_asset_library_0.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\347\274\226\350\276\221\350\265\204\344\272\247\345\272\223\351\205\215\347\275\256.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/edit_asset_library_configuration.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\347\274\226\350\276\221\350\265\204\344\272\247\345\272\223\351\205\215\347\275\256.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/edit_asset_library_configuration.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\347\274\226\350\276\221\346\226\207\346\241\243\351\205\215\347\275\256.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/edit_document_configuration.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\347\274\226\350\276\221\346\226\207\346\241\243\351\205\215\347\275\256.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/edit_document_configuration.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\345\257\274\345\207\272\350\265\204\344\272\247\345\272\223.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/export_asset_library.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\345\257\274\345\207\272\350\265\204\344\272\247\345\272\223.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/export_asset_library.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\345\257\274\345\205\245\346\226\207\346\241\243.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/import_documents.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\345\257\274\345\205\245\346\226\207\346\241\243.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/import_documents.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\250\241\345\236\213\351\205\215\347\275\256.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/model_configuration.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\250\241\345\236\213\351\205\215\347\275\256.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/model_configuration.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\350\247\243\346\236\220\345\256\214\346\210\220.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/parsing_complete.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\350\247\243\346\236\220\345\256\214\346\210\220.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/parsing_complete.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\220\234\347\264\242\346\226\207\346\241\243.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/search_documents.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\220\234\347\264\242\346\226\207\346\241\243.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/search_documents.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\207\346\234\254\345\235\227\347\273\223\346\236\234\351\242\204\350\247\210.png" b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/text_block_result_preview.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\207\346\234\254\345\235\227\347\273\223\346\236\234\351\242\204\350\247\210.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/text_block_result_preview.png diff --git a/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/witchaind_guidance.md b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/witchaind_guidance.md new file mode 100644 index 0000000000000000000000000000000000000000..d7294f578aac43a72699c1bda711a5f543d57291 --- /dev/null +++ b/docs/en/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/witchaind_guidance.md @@ -0,0 +1,87 @@ +# witChainD User Guide - Knowledge Base Management + +After completing the witChainD deployment, you can use witChainD for knowledge base management. + +The following sections will introduce witChainD's functionality from a page perspective. + +## 1. Asset Library Management Interface + +This page is the asset library management interface, which users will enter after logging in. + +![Asset Library Management Interface](./pictures/asset_library_management_interface.png) + +**Supported Operations:** + +- Configure Models: Click the settings button in the upper right corner to modify model-related configurations. + + ![Model Configuration](./pictures/model_configuration.png) + +- Create New Asset Library: Click the "New Asset Library" button to create a new one, supporting custom name, description, language, embedding model, parsing method, file chunk size, and document category. Note: Duplicate names will automatically be modified to the asset library ID. + + ![Create New Asset Library](./pictures/create_new_asset_library.png) + +- Edit Asset Library: Click the edit button of the asset library to edit, supporting modification of name, description, language, parsing method, file chunk size, and document category. Note: Cannot be modified to duplicate names. + + ![Edit Asset Library](./pictures/edit_asset_library_0.png) + + ![Edit Asset Library](./pictures/edit_asset_library.png) + +- Export Asset Library: Click the export button of the asset library to export. After export completion, you need to download the corresponding asset library to local according to the download task in the task list. + + ![Export Asset Library](./pictures/export_asset_library.png) + +- Batch Import Asset Libraries: Click "Batch Import", upload local files and select them to import. + + ![Batch Import Asset Libraries](./pictures/batch_import_asset_libraries.png) + +- Search Asset Libraries: Type text in the search bar to search for asset libraries whose names contain the corresponding text. + +## 2. Document Management Interface + +Click on the corresponding asset library in the asset management interface to enter the document management interface. + +![Document Management Interface](./pictures/document_management_interface.png) + +**Supported Operations:** + +- Import Documents: Click "Import Documents" to upload files from local for import. After import, parsing will automatically start with the default configuration of that asset library. + + ![Import Documents](./pictures/import_documents.png) + +- Parse Documents: Click "Parse" in the operations to parse the document. You can also select multiple documents for batch parsing. + + ![Document Parsing](./pictures/document_parsing.png) + + ![Document Parsing 2](./pictures/document_parsing_2.png) + + ![Parsing Complete](./pictures/parsing_complete.png) + +- Edit Document Configuration: Click "Edit" to edit document configuration, supporting editing of document name, parsing method, category, and file chunk size. + + ![Edit Document Configuration](./pictures/edit_document_configuration.png) + +- Download Documents: Click download to download documents to local, or select multiple documents for batch download. + +- Delete Documents: Click delete to remove documents from the asset library, or select multiple documents for batch deletion. + +- Search Documents: Click the search key next to the document name, type the search text in the popup search box to search for documents whose names contain that text. + + ![Search Documents](./pictures/search_documents.png) + +- Edit Asset Library Configuration: Supports editing asset library name, description, language, default parsing method, file chunk size, and document information category. + + ![Edit Asset Library Configuration](./pictures/edit_asset_library_configuration.png) + +## 3. Parsing Result Management Interface + +Click on a parsed document to enter the document's parsing result management interface. The interface displays text block content previews after document parsing in order, with each text block accompanied by a tag indicating whether the information in that text block comes from paragraphs, lists, or images in the document. The switch on the right indicates whether that text block is enabled. + +![Text Block Result Preview](./pictures/text_block_result_preview.png) + +**Supported Operations:** + +- Disable/Enable Text Blocks: Click the switch on the right side of the text block to disable/enable the corresponding text block, or select multiple text blocks for batch disable/enable. + + ![Batch Enable](./pictures/batch_enable.png) + +- Search Text Blocks: Type content in the search box to find text blocks containing the corresponding content. diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/deploy_guide/diagnosis_deployment.md b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/deploy_guide/diagnosis_deployment.md similarity index 31% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/deploy_guide/diagnosis_deployment.md rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/deploy_guide/diagnosis_deployment.md index 7581b1bd17e804f72923d56b5e4f7a036ca2bd93..db1319efb33e460083c695244a738a6721f06a29 100644 --- a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/deploy_guide/diagnosis_deployment.md +++ b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/deploy_guide/diagnosis_deployment.md @@ -1,93 +1,93 @@ -# 智能诊断部署指南 +# Smart Diagnosis Deployment Guide -## 准备工作 +## Preparation -+ 提前安装 [EulerCopilot 命令行(智能 Shell)客户端](../../../quick_start/smart_shell/user_guide/shell.md) ++ Install [EulerCopilot Command Line (Smart Shell) Client](../../../quick_start/smart_shell/user_guide/shell.md) in advance -+ 被诊断机器不能安装 crictl 和 isula,只能有 docker 一个容器管理工具 ++ The machine to be diagnosed cannot install crictl and isula, only docker as the container management tool -+ 在需要被诊断的机器上安装 gala-gopher 和 gala-anteater ++ Install gala-gopher and gala-anteater on the machine to be diagnosed -## 部署 gala-gopher +## Deploying gala-gopher -### 1. 准备 BTF 文件 +### 1. Prepare BTF File -**如果Linux内核支持 BTF,则不需要准备 BTF 文件。**可以通过以下命令来查看Linux内核是否已经支持 BTF: +**If the Linux kernel supports BTF, then there is no need to prepare a BTF file.** You can check whether the Linux kernel already supports BTF by the following command: ```bash cat /boot/config-$(uname -r) | grep CONFIG_DEBUG_INFO_BTF ``` -如果输出结果为`CONFIG_DEBUG_INFO_BTF=y`,则表示内核支持BTF。否则表示内核不支持BTF。 -如果内核不支持BTF,需要手动制作BTF文件。步骤如下: +If the output result is `CONFIG_DEBUG_INFO_BTF=y`, then it means the kernel supports BTF. Otherwise it means the kernel does not support BTF. +If the kernel does not support BTF, you need to manually create a BTF file. The steps are as follows: -1. 获取当前Linux内核版本的 vmlinux 文件 +1. Obtain the vmlinux file of the current Linux kernel version - vmlinux 文件存放在 `kernel-debuginfo` 包里面,存放路径为 `/usr/lib/debug/lib/modules/$(uname -r)/vmlinux`。 + The vmlinux file is stored in the `kernel-debuginfo` package, and the storage path is `/usr/lib/debug/lib/modules/$(uname -r)/vmlinux`. - 例如,对于 `kernel-debuginfo-5.10.0-136.65.0.145.oe2203sp1.aarch64`,对应的vmlinux路径为`/usr/lib/debug/lib/modules/5.10.0-136.65.0.145.oe2203sp1.aarch64/vmlinux`。 + For example, for `kernel-debuginfo-5.10.0-136.65.0.145.oe2203sp1.aarch64`, the corresponding vmlinux path is `/usr/lib/debug/lib/modules/5.10.0-136.65.0.145.oe2203sp1.aarch64/vmlinux`. -2. 制作 BTF 文件 +2. Create BTF File - 基于获取到 vmlinux 文件来制作 BTF 文件。这一步可以在自己的环境里操作。首先,需要安装相关的依赖包: + Based on the obtained vmlinux file to create a BTF file. This step can be performed in your own environment. First, install the relevant dependency packages: ```bash - # 说明:dwarves 包中包含 pahole 命令,llvm 包中包含 llvm-objcopy 命令 + # Note: the dwarves package contains the pahole command, and the llvm package contains the llvm-objcopy command yum install -y llvm dwarves ``` - 执行下面的命令行,生成 BTF 文件。 + Execute the following command line to generate the BTF file. ```bash - kernel_version=4.19.90-2112.8.0.0131.oe1.aarch64 # 说明:这里需要替换成目标内核版本,可通过 uname -r 命令获取 + kernel_version=4.19.90-2112.8.0.0131.oe1.aarch64 # Note: replace this with the target kernel version, which can be obtained by the uname -r command pahole -J vmlinux llvm-objcopy --only-section=.BTF --set-section-flags .BTF=alloc,readonly --strip-all vmlinux ${kernel_version}.btf strip -x ${kernel_version}.btf ``` - 生成的 BTF 文件名称为`.btf`格式,其中 ``为目标机器的内核版本,可通过 `uname -r` 命令获取。 + The generated BTF file name is in `.btf` format, where `` is the kernel version of the target machine, which can be obtained by the `uname -r` command. -### 2. 下载 gala-gopher 容器镜像 +### 2. Download gala-gopher Container Image -#### 在线下载 +#### Online Download -gala-gopher 容器镜像已归档到 仓库中,可通过如下命令获取。 +The gala-gopher container image has been archived to the repository, and can be obtained by the following command. ```bash -# 获取 aarch64 架构的镜像 +# Get the aarch64 architecture image docker pull hub.oepkgs.net/a-ops/gala-gopher-profiling-aarch64:latest -# 获取 x86_64 架构的镜像 +# Get the x86_64 architecture image docker pull hub.oepkgs.net/a-ops/gala-gopher-profiling-x86_64:latest ``` -#### 离线下载 +#### Offline Download -若无法通过在线下载的方式下载容器镜像,可联系我(何秀军 00465007)获取压缩包。 +If you cannot download the container image through the online download method, contact me (He XiuJun 00465007) to obtain the compressed package. -拿到压缩包后,放到目标机器上,解压并加载容器镜像,命令行如下: +After obtaining the compressed package, place it on the target machine, decompress it and load the container image, the command line is as follows: ```bash tar -zxvf gala-gopher-profiling-aarch64.tar.gz docker load < gala-gopher-profiling-aarch64.tar ``` -### 3. 启动 gala-gopher 容器 +### 3. Start gala-gopher Container -容器启动命令: +Container startup command: ```shell docker run -d --name gala-gopher-profiling --privileged --pid=host --network=host -v /:/host -v /etc/localtime:/etc/localtime:ro -v /sys:/sys -v /usr/lib/debug:/usr/lib/debug -v /var/lib/docker:/var/lib/docker -v /tmp/$(uname -r).btf:/opt/gala-gopher/btf/$(uname -r).btf -e GOPHER_HOST_PATH=/host gala-gopher-profiling-aarch64:latest ``` -启动配置参数说明: +Startup configuration parameter description: -+ `-v /tmp/$(uname -r).btf:/opt/gala-gopher/btf/$(uname -r).btf` :如果内核支持 BTF,则删除该配置即可。如果内核不支持 BTF,则需要将前面准备好的 BTF 文件拷贝到目标机器上,并将 `/tmp/$(uname -r).btf` 替换为对应的路径。 -+ `gala-gopher-profiling-aarch64-0426` :gala-gopher容器对应的tag,替换成实际下载的tag。 ++ `-v /tmp/$(uname -r).btf:/opt/gala-gopher/btf/$(uname -r).btf` : If the kernel supports BTF, then you can simply remove this configuration. If the kernel does not support BTF, you need to copy the previously prepared BTF file to the target machine and replace `/tmp/$(uname -r).btf` with the corresponding path. ++ `gala-gopher-profiling-aarch64-0426` : The tag corresponding to the gala-gopher container, replace with the actual downloaded tag. -探针启动: +Probe Startup: -+ `container_id` 为需要观测的容器 id -+ 分别启动 sli 和 container 探针 ++ `container_id` is the ID of the container to be observed ++ Start the sli and container probes separately ```bash curl -X PUT http://localhost:9999/sli -d json='{"cmd":{"check_cmd":""},"snoopers":{"container_id":[""]},"params":{"report_period":5},"state":"running"}' @@ -107,33 +107,33 @@ curl -X PUT http://localhost:9999/sli -d json='{"state": "stopped"}' curl -X PUT http://localhost:9999/container -d json='{"state": "stopped"}' ``` -## 部署 gala-anteater +## Deploying gala-anteater -源码部署: +Source code deployment: ```bash -# 请指定分支为 930eulercopilot +# Please specify the branch as 930eulercopilot git clone https://gitee.com/GS-Stephen_Curry/gala-anteater.git ``` -安装部署请参考 -(请留意python版本导致执行setup.sh install报错) +For installation and deployment, please refer to +(please note that python version may cause setup.sh install to fail) -镜像部署: +Image deployment: ```bash docker pull hub.oepkgs.net/a-ops/gala-anteater:2.0.2 ``` -`/etc/gala-anteater/config/gala-anteater.yaml` 中 Kafka 和 Prometheus 的 `server` 和 `port` 需要按照实际部署修改,`model_topic`、`meta_topic`、`group_id` 自定义 +The `server` and `port` of Kafka and Prometheus in `/etc/gala-anteater/config/gala-anteater.yaml` need to be modified according to the actual deployment, and `model_topic`, `meta_topic`, `group_id` are customizable ```yaml Kafka: server: "xxxx" port: "xxxx" - model_topic: "xxxx" # 自定义,与rca配置中保持一致 - meta_topic: "xxxx" # 自定义,与rca配置中保持一致 - group_id: "xxxx" # 自定义,与rca配置中保持一致 + model_topic: "xxxx" # customizable, keep consistent with rca configuration + meta_topic: "xxxx" # customizable, keep consistent with rca configuration + group_id: "xxxx" # customizable, keep consistent with rca configuration # auth_type: plaintext/sasl_plaintext, please set "" for no auth auth_type: "" username: "" @@ -145,33 +145,33 @@ Prometheus: steps: "5" ``` -gala-anteater 中模型的训练依赖于 gala-gopher 采集的数据,因此请保证 gala-gopher 探针正常运行至少24小时,在运行 gala-anteater。 +The model training in gala-anteater depends on the data collected by gala-gopher, so please ensure that the gala-gopher probe runs normally for at least 24 hours before running gala-anteater. -## 部署 gala-ops +## Deploying gala-ops -每个中间件的大致介绍: +Introduction to each middleware: -kafka : 一个数据库中间件, 分布式数据分流作用, 可以配置为当前的管理节点。 +kafka: A database middleware with distributed data分流 functionality, which can be configured as the current management node. -prometheus:性能监控, 配置需要监控的生产节点 ip list。 +prometheus: Performance monitoring, configure the ip list of production nodes to be monitored. -直接通过yum install安装kafka和prometheus,可参照安装脚本 +Install kafka and prometheus directly through yum install, refer to the installation script -只需要参照其中 kafka 和 prometheus 的安装即可 +Only need to refer to the installation of kafka and prometheus in it -## 部署 euler-copilot-rca +## Deploying euler-copilot-rca -镜像拉取 +Image pull ```bash docker pull hub.oepkgs.net/a-ops/euler-copilot-rca:0.9.1 ``` -+ 修改 `config/config.json` 文件,配置 gala-gopher 镜像的 `container_id` 以及 `ip`,Kafka 和 Prometheus 的 `ip` 和 `port`(与上述 gala-anteater 配置保持一致) ++ Modify the `config/config.json` file to configure gala-gopher image's `container_id` and `ip`, Kafka and Prometheus's `ip` and `port` (keep consistent with the above gala-anteater configuration) ```yaml -"gopher_container_id": "xxxx", # gala-gopher的容器id - "remote_host": "xxxx" # gala-gopher的部署机器ip +"gopher_container_id": "xxxx", # Container ID of gala-gopher + "remote_host": "xxxx" # Machine IP address of gala-gopher }, "kafka": { "server": "xxxx", diff --git a/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/diagnosis_guidance.md b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/diagnosis_guidance.md new file mode 100644 index 0000000000000000000000000000000000000000..8a7bf783c1cf1686258c00f313bcc17b7d67ef91 --- /dev/null +++ b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/diagnosis_guidance.md @@ -0,0 +1,50 @@ +# Smart Plugin: Intelligent Diagnosis + +After deploying the intelligent diagnosis tool, you can use the EulerCopilot intelligent agent framework to perform diagnostics on your local machine. +In intelligent diagnosis mode, the intelligent agent framework service can call local diagnostic tools to diagnose abnormal conditions, analyze them, and generate reports. + +## Operation Steps + +**Step 1** Switch to "Smart Plugin" mode + +```bash +copilot -p +``` + +![Switch to Smart Plugin Mode](./pictures/shell-plugin-diagnose-switch-mode.png) + +**Step 2** Abnormal Event Detection + +```bash +Help me perform abnormal event detection +``` + +Press `Ctrl + O` to ask a question, then select "Intelligent Diagnosis" from the plugin list. + +![Abnormal Event Detection](./pictures/shell-plugin-diagnose-detect.png) + +**Step 3** View Abnormal Event Details + +```bash +View abnormal event details for XXX container +``` + +![View Abnormal Event Details](./pictures/shell-plugin-diagnose-detail.png) + +**Step 4** Execute Abnormal Event Analysis + +```bash +Please perform profiling analysis on XXX metrics for XXX container +``` + +![Abnormal Event Analysis](./pictures/shell-plugin-diagnose-profiling.png) + +**Step 5** View Abnormal Event Analysis Report + +Wait 5 to 10 minutes, then view the analysis report. + +```bash +View the profiling report corresponding to +``` + +![Execute Optimization Script](./pictures/shell-plugin-diagnose-report.png) diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-ask.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-ask.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-ask.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-ask.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-continue-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-continue-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-continue-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-continue-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-continue.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-continue.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-continue.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-continue.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-exit.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-exit.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-exit.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-exit.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-edit-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-edit-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-edit-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-edit-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-edit.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-edit.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-edit.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-edit.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-exec-multi-select.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-exec-multi-select.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-exec-multi-select.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-exec-multi-select.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-exec-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-exec-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-exec-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-exec-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-explain-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-explain-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-explain-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-explain-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-explain-select.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-explain-select.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-explain-select.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-explain-select.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-interact.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-interact.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-interact.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-interact.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-help.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-help.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-help.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-help.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-init.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-init.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-init.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-init.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-detail.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-detail.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-detail.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-detail.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-detect.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-detect.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-detect.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-detect.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-profiling.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-profiling.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-profiling.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-profiling.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-report.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-report.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-report.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-report.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-switch-mode.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-switch-mode.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-switch-mode.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-switch-mode.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-select.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-select.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-select.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-select.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-metrics-collect.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-metrics-collect.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-metrics-collect.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-metrics-collect.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-report.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-report.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-report.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-report.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-exec.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-exec.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-exec.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-exec.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-gen.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-gen.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-gen.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-gen.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-view.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-view.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-view.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-view.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-switch-mode.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-switch-mode.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-switch-mode.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-switch-mode.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/deploy_guide/tune_deployment.md b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/deploy_guide/tune_deployment.md similarity index 39% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/deploy_guide/tune_deployment.md rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/deploy_guide/tune_deployment.md index a770bc81e7efd9ad9f3910d92396a97c62be7ea2..8a75b9e072ceb4216e8b54bd9fdc884f0ad8c219 100644 --- a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/deploy_guide/tune_deployment.md +++ b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/deploy_guide/tune_deployment.md @@ -1,23 +1,23 @@ -# 智能调优部署指南 +# Smart Tuning Deployment Guide -## 准备工作 +## Prerequisites -+ 提前安装 [EulerCopilot 命令行(智能 Shell)客户端](../../../quick_start/smart_shell/user_guide/shell.md)) ++ Install [EulerCopilot Command Line (Smart Shell) Client](../../../quick_start/smart_shell/user_guide/shell.md) in advance -+ 被调优机器需要为 openEuler 22.03 LTS-SP3 ++ The machine to be tuned must be openEuler 22.03 LTS-SP3 -+ 在需要被调优的机器上安装依赖 ++ Install dependencies on the machine that needs to be tuned ```bash yum install -y sysstat perf ``` -+ 被调优机器需要开启 SSH 22端口 ++ The machine to be tuned must have SSH port 22 enabled -## 编辑配置文件 +## Edit Configuration File -修改values.yaml文件的tune部分,将 `enable` 字段改为 `True` ,并配置大模型设置、 -Embedding模型文件地址、以及需要调优的机器和对应机器上的 mysql 的账号名以及密码 +Modify the tune section in the values.yaml file, change the `enable` field to `True`, and configure the large language model settings, +Embedding model file address, as well as the machines that need tuning and their corresponding MySQL account names and passwords ```bash vim /home/euler-copilot-framework/deploy/chart/agents/values.yaml @@ -25,75 +25,75 @@ vim /home/euler-copilot-framework/deploy/chart/agents/values.yaml ```yaml tune: - # 【必填】是否启用智能调优Agent + # [Required] Whether to enable Smart Tuning Agent enabled: true - # 镜像设置 + # Image settings image: - # 镜像仓库。留空则使用全局设置。 + # Image registry. Leave empty to use global settings. registry: "" - # 【必填】镜像名称 + # [Required] Image name name: euler-copilot-tune - # 【必填】镜像标签 + # [Required] Image tag tag: "0.9.1" - # 拉取策略。留空则使用全局设置。 + # Pull policy. Leave empty to use global settings. imagePullPolicy: "" - # 【必填】容器根目录只读 + # [Required] Container root directory read-only readOnly: false - # 性能限制设置 + # Performance limit settings resources: {} - # Service设置 + # Service settings service: - # 【必填】Service类型,ClusterIP或NodePort + # [Required] Service type, ClusterIP or NodePort type: ClusterIP nodePort: - # 大模型设置 + # Large language model settings llm: - # 【必填】模型地址(需要包含v1后缀) + # [Required] Model address (must include v1 suffix) url: - # 【必填】模型名称 + # [Required] Model name name: "" - # 【必填】模型API Key + # [Required] Model API Key key: "" - # 【必填】模型最大Token数 + # [Required] Maximum tokens for the model max_tokens: 8096 - # 【必填】Embedding模型文件地址 + # [Required] Embedding model file address embedding: "" - # 待优化机器信息 + # Target optimization machine information machine: - # 【必填】IP地址 + # [Required] IP address ip: "" - # 【必填】Root用户密码 - # 注意:必需启用Root用户以密码形式SSH登录 + # [Required] Root user password + # Note: Root user must be enabled for SSH login with password password: "" - # 待优化应用设置 + # Target optimization application settings mysql: - # 【必填】数据库用户名 + # [Required] Database username user: "root" - # 【必填】数据库密码 + # [Required] Database password password: "" ``` -## 安装智能调优插件 +## Install Smart Tuning Plugin ```bash helm install -n euler-copilot agents . ``` -如果之前有执行过安装,则按下面指令更新插件服务 +If you have installed before, update the plugin service with the following command ```bash helm upgrade-n euler-copilot agents . ``` -如果 framework未重启,则需要重启framework配置 +If the framework has not been restarted, you need to restart the framework configuration ```bash kubectl delete pod framework-deploy-service-bb5b58678-jxzqr -n eulercopilot ``` -## 测试 +## Testing -+ 查看 tune 的 pod 状态 ++ Check the tune pod status ```bash NAME READY STATUS RESTARTS AGE @@ -108,19 +108,19 @@ kubectl delete pod framework-deploy-service-bb5b58678-jxzqr -n eulercopilot tune-deploy-agents-5d46bfdbd4-xph7b 1/1 Running 0 2d ``` -+ pod启动失败排查办法 - + 检查 euler-copilot-tune 目录下的 openapi.yaml 中 `servers.url` 字段,确保调优服务的启动地址被正确设置 - + 检查 `$plugin_dir` 插件文件夹的路径是否配置正确,该变量位于 `deploy/chart/euler_copilot/values.yaml` 中的 `framework`模块,如果插件目录不存在,需新建该目录,并需要将该目录下的 euler-copilot-tune 文件夹放到 `$plugin_dir` 中。 - + 检查sglang的地址和key填写是否正确,该变量位于 `vim /home/euler-copilot-framework/deploy/chart/euler_copilot/values.yaml` ++ Pod startup failure troubleshooting methods + + Check the `servers.url` field in the openapi.yaml under the euler-copilot-tune directory to ensure the tuning service startup address is correctly set + + Check if the `$plugin_dir` plugin folder path is correctly configured. This variable is located in the `framework` module in `deploy/chart/euler_copilot/values.yaml`. If the plugin directory doesn't exist, create it, and place the euler-copilot-tune folder from that directory into `$plugin_dir`. + + Check if the sglang address and key are correctly filled. This variable is located in `vim /home/euler-copilot-framework/deploy/chart/euler_copilot/values.yaml` ```yaml - # 用于Function Call的模型 + # Model for Function Call scheduler: - # 推理框架类型 + # Inference framework type backend: sglang - # 模型地址 + # Model address url: "" - # 模型 API Key + # Model API Key key: "" - # 数据库设置 + # Database settings ``` diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-ask.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-ask.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-ask.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-ask.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-continue-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-continue-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-continue-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-continue-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-continue.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-continue.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-continue.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-continue.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-exit.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-exit.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-exit.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-exit.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-edit-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-edit-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-edit-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-edit-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-edit.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-edit.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-edit.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-edit.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-exec-multi-select.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-exec-multi-select.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-exec-multi-select.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-exec-multi-select.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-exec-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-exec-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-exec-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-exec-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-explain-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-explain-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-explain-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-explain-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-explain-select.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-explain-select.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-explain-select.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-explain-select.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-interact.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-interact.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-interact.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-interact.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-help.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-help.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-help.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-help.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-init.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-init.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-init.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-init.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-detail.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-detail.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-detail.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-detail.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-detect.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-detect.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-detect.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-detect.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-profiling.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-profiling.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-profiling.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-profiling.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-report.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-report.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-report.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-report.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-switch-mode.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-switch-mode.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-switch-mode.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-switch-mode.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-select.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-select.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-select.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-select.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-metrics-collect.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-metrics-collect.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-metrics-collect.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-metrics-collect.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-report.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-report.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-report.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-report.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-exec.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-exec.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-exec.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-exec.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-gen.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-gen.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-gen.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-gen.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-view.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-view.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-view.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-view.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-switch-mode.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-switch-mode.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-switch-mode.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-switch-mode.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin.png b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin.png rename to docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin.png diff --git a/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/tune_guidance.md b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/tune_guidance.md new file mode 100644 index 0000000000000000000000000000000000000000..fe4dd5f849827e460914d7db5378bc4aef68d994 --- /dev/null +++ b/docs/en/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/tune_guidance.md @@ -0,0 +1,53 @@ +# Smart Plugin: Intelligent Tuning + +After deploying the intelligent tuning tool, you can use the EulerCopilot agent framework to perform tuning on the local machine. +In intelligent tuning mode, the agent framework service can call the local tuning tool to collect performance metrics and generate performance analysis reports and performance optimization recommendations. + +## Operation Steps + +**Step 1** Switch to "Intelligent Tuning" mode + +```bash +copilot -t +``` + +![Switch to Intelligent Tuning Mode](./pictures/shell-plugin-tuning-switch-mode.png) + +**Step 2** Collect Performance Metrics + +```bash +Help me collect performance metrics +``` + +![Performance Metrics Collection](./pictures/shell-plugin-tuning-metrics-collect.png) + +**Step 3** Generate Performance Analysis Report + +```bash +Help me generate a performance analysis report +``` + +![Performance Analysis Report](./pictures/shell-plugin-tuning-report.png) + +**Step 4** Generate Performance Optimization Recommendations + +```bash +Please generate a performance optimization script +``` + +![Performance Optimization Script](./pictures/shell-plugin-tuning-script-gen.png) + +**Step 5** Select "Execute Command" to run the optimization script + +![Execute Optimization Script](./pictures/shell-plugin-tuning-script-exec.png) + +- Script content as shown in the figure: + ![Optimization Script Content](./pictures/shell-plugin-tuning-script-view.png) + +## Remote Tuning + +If you need to perform remote tuning on other machines, please add the corresponding machine's IP address before the questions in the examples above. + +For example: `Please perform performance metrics collection on the machine 192.168.1.100.` + +Before performing remote tuning, please ensure that the target machine has deployed the intelligent tuning tool, and also ensure that the EulerCopilot agent framework can access the target machine. diff --git a/docs/en/openeuler_intelligence/intelligent_assistant/advance/work_flow/deploy_guide/workflow_deployment.md b/docs/en/openeuler_intelligence/intelligent_assistant/advance/work_flow/deploy_guide/workflow_deployment.md new file mode 100644 index 0000000000000000000000000000000000000000..f3f584960e9b4c5f8d03ddaa7cdcff6bfa5c22f0 --- /dev/null +++ b/docs/en/openeuler_intelligence/intelligent_assistant/advance/work_flow/deploy_guide/workflow_deployment.md @@ -0,0 +1,3 @@ +# Workflow Orchestration Deployment Guide + +(Current content is being updated, please wait.) diff --git a/docs/en/openeuler_intelligence/intelligent_assistant/advance/work_flow/user_guide/workflow_guidance.md b/docs/en/openeuler_intelligence/intelligent_assistant/advance/work_flow/user_guide/workflow_guidance.md new file mode 100644 index 0000000000000000000000000000000000000000..75f04c38f56d3a3ed6d6c3b696ffdf9cc0c3896a --- /dev/null +++ b/docs/en/openeuler_intelligence/intelligent_assistant/advance/work_flow/user_guide/workflow_guidance.md @@ -0,0 +1,3 @@ +# Workflow Orchestration User Guide + +(Current content is being updated, please wait.) diff --git a/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/api_key.md b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/api_key.md new file mode 100644 index 0000000000000000000000000000000000000000..9b7025519cbecc39ec79cf991cbf1ff7c1f1a2e7 --- /dev/null +++ b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/api_key.md @@ -0,0 +1,29 @@ +# Getting API Key + +## Introduction + +EulerCopilot command-line assistant uses API Key to verify user identity and obtain API access permissions. +Therefore, before you start using it, you need to get an API Key first. + +## Important Notes + +- Please keep your API Key secure and do not share it with others. +- API Key is only used for command-line assistant and DevStation desktop client, not for other purposes. +- Each user can only have one API Key. Creating a duplicate API Key will invalidate the old key. +- API Key is only displayed once when created. Please save it immediately. If the key is lost, you need to recreate it. +- If you encounter a "request too frequent" error during use, your API Key may have been used by others. Please go to the official website to refresh or revoke the API Key immediately. + +## How to Get + +1. Log in to the EulerCopilot web interface. +2. Click on the avatar in the upper right corner and select "API KEY". +3. Click the "New" button. +4. **Please save the API Key immediately. It is only displayed once when created. Do not share it with others.** + +## Managing API Key + +1. Log in to the EulerCopilot web interface. +2. Click on the avatar in the upper right corner and select "API KEY". +3. Click the "Refresh" button to refresh the API Key; click the "Revoke" button to revoke the API Key. + +- After refreshing the API Key, the old key becomes invalid. Please save the newly generated API Key immediately. diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-ask.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-ask.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-ask.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-ask.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-continue-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-continue-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-continue-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-continue-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-continue.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-continue.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-continue.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-continue.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-exit.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-exit.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-exit.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-exit.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-edit-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-edit-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-edit-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-edit-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-edit.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-edit.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-edit.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-edit.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-exec-multi-select.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-exec-multi-select.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-exec-multi-select.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-exec-multi-select.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-exec-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-exec-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-exec-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-exec-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-explain-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-explain-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-explain-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-explain-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-explain-select.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-explain-select.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-explain-select.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-explain-select.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-interact.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-interact.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-interact.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-interact.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-help.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-help.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-help.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-help.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-init.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-init.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-init.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-init.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-detail.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-detail.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-detail.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-detail.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-detect.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-detect.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-detect.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-detect.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-profiling.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-profiling.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-profiling.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-profiling.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-report.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-report.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-report.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-report.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-switch-mode.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-switch-mode.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-switch-mode.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-switch-mode.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-select.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-select.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-select.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-select.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-metrics-collect.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-metrics-collect.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-metrics-collect.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-metrics-collect.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-report.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-report.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-report.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-report.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-exec.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-exec.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-exec.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-exec.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-gen.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-gen.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-gen.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-gen.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-view.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-view.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-view.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-view.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-switch-mode.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-switch-mode.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-switch-mode.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-switch-mode.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin.png diff --git a/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/shell.md b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/shell.md new file mode 100644 index 0000000000000000000000000000000000000000..dcf6a97bb57ad4f66125330565d412315e653147 --- /dev/null +++ b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/shell.md @@ -0,0 +1,157 @@ +# Command Line Assistant User Guide + +## Introduction + +EulerCopilot Command Line Assistant is a command line (Shell) AI assistant that allows you to quickly generate and execute Shell commands, thereby improving your work efficiency. Additionally, the standard version based on Gitee AI online service comes with built-in knowledge about openEuler, helping you learn and use the openEuler operating system. + +## System Requirements + +- Operating System: openEuler 22.03 LTS SP3, or openEuler 24.03 LTS and above +- Command Line Software: + - Linux Desktop Environment: Supports built-in terminals in desktop environments such as GNOME, KDE, DDE, etc. + - Remote SSH Connection: Supports terminals compatible with xterm-256 and UTF-8 character sets + +## Installation + +EulerCopilot Command Line Assistant can be installed through the OEPKGS repository. + +### Configure OEPKGS Repository + +```bash +sudo dnf config-manager --add-repo https://repo.oepkgs.net/openeuler/rpm/`sed 's/release //;s/[()]//g;s/ /-/g' /etc/openEuler-release`/extras/`uname -m` +``` + +```bash +sudo dnf clean all +``` + +```bash +sudo dnf makecache +``` + +### Install Command Line Assistant + +```bash +sudo dnf install eulercopilot-cli +``` + +If you encounter `Error: GPG check FAILED`, use `--nogpgcheck` to skip the check. + +```bash +sudo dnf install --nogpgcheck eulercopilot-cli +``` + +## Initialization + +```bash +copilot --init +``` + +Then follow the prompts to enter your API Key to complete the configuration. + +![shell-init](./pictures/shell-init.png) + +Please exit the terminal or reconnect your SSH session before first use to make the configuration effective. + +- **View Assistant Help Page** + + ```bash + copilot --help + ``` + + ![shell-help](./pictures/shell-help.png) + +## Usage + +Enter your question in the terminal and press `Ctrl + O` to ask. + +### Shortcuts + +- After entering a natural language question, press `Ctrl + O` to directly ask the AI. +- Pressing `Ctrl + O` directly will automatically fill in the command prefix `copilot`, and you can press `Enter` after entering parameters to execute. + +### Smart Q&A + +After the command line assistant is initialized, it defaults to smart Q&A mode. +The current mode will be displayed in the **top-left corner** of the command prompt. +If the current mode is not "Smart Q&A", execute `copilot -c` (`copilot --chat`) to switch to smart Q&A mode. + +![chat-ask](./pictures/shell-chat-ask.png) + +After the AI answers, it will generate recommended questions based on the Q&A history. You can copy and paste them into the command line for follow-up questions. After entering a follow-up question, press `Enter` to ask. + +![chat-next](./pictures/shell-chat-continue.png) + +![chat-next-result](./pictures/shell-chat-continue-result.png) + +Smart Q&A mode supports continuous follow-up questions, with each follow-up question able to associate with up to 3 historical Q&A contexts. + +Enter `exit` to exit smart Q&A mode and return to the Linux command line. + +![chat-exit](./pictures/shell-chat-exit.png) + +- If you encounter program errors during Q&A, you can press `Ctrl + C` to immediately exit the current Q&A, then try asking again. + +### Shell Commands + +The AI will return Shell commands based on your question. EulerCopilot Command Line Assistant can explain, edit, or execute these commands and display the command execution results. + +![shell-cmd](./pictures/shell-cmd.png) + +The command line assistant will automatically extract commands from the AI's response and display related operations. You can use the up and down arrow keys to select operations and press `Enter` to confirm. + +![shell-cmd-interact](./pictures/shell-cmd-interact.png) + +#### Explanation + +If the AI only returns one command, selecting explanation will directly request the AI to explain the command and display the answer. +If the AI answers with multiple commands, selecting will display a command list, and you can choose **one** command at a time to request AI explanation. + +![shell-cmd-explain-select](./pictures/shell-cmd-explain-select.png) + +After completing the explanation, you can continue to select other operations. + +![shell-cmd-explain-result](./pictures/shell-cmd-explain-result.png) + +#### Edit + +![shell-cmd-edit](./pictures/shell-cmd-edit.png) + +Select a command to edit, and press `Enter` to confirm after editing. + +![shell-cmd-edit-result](./pictures/shell-cmd-edit-result.png) + +After completing the edit, you can continue editing other commands or select other operations. + +#### Execute + +If the AI only returns one command, selecting execute will directly execute the command and display the execution result. +If the AI answers with multiple commands, selecting will display a command list, and you can choose **multiple** commands to execute. + +You can use the up and down arrow keys to move the cursor, press the `Spacebar` to select commands, and press `Enter` to execute the selected commands. +Selected commands will display with **blue highlighting**, as shown in the figure. + +![shell-cmd-exec-multi-select](./pictures/shell-cmd-exec-multi-select.png) + +If no commands are selected and you press `Enter` directly, it will skip executing commands and proceed directly to the next round of Q&A. + +After pressing `Enter`, the selected commands will be executed from top to bottom in sequence. + +![shell-cmd-exec-result](./pictures/shell-cmd-exec-result.png) + +If errors occur during execution, the command line assistant will display error messages and **terminate command execution**, entering the next round of Q&A. +You can prompt the AI to correct commands or ask the AI to regenerate commands in the next round of Q&A. + +## Uninstallation + +```bash +sudo dnf remove eulercopilot-cli +``` + +Then use the following command to delete the configuration file. + +```bash +rm ~/.config/eulercopilot/config.json +``` + +After uninstallation is complete, please restart the terminal or reconnect your SSH session to restore the configuration. diff --git a/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/offline.md b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/offline.md new file mode 100644 index 0000000000000000000000000000000000000000..a21733c0336d65f8181e1656529e3ce77985654c --- /dev/null +++ b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/offline.md @@ -0,0 +1,620 @@ +# **EulerCopilot Intelligent Assistant Deployment Guide** + +Version Information +Current Version: v0.9.5 +Update Date: April 1, 2025 + +## Product Overview + +EulerCopilot is an intelligent Q&A tool that solves the convenience of acquiring operating system knowledge and empowers developers and operations personnel in the OS domain model. As a knowledge acquisition tool for operating systems that enables OS productivity (such as A-Ops / A-Tune / x2openEuler / EulerMaker / EulerDevOps / StratoVirt / iSulad, etc.), it transforms the traditional command delivery method to a natural semantic approach and combines with the intelligent agent task planning capabilities, lowering the threshold for developing and using operating system features. + +This guide provides system deployment instructions for the EulerCopilot intelligent assistant based on automated scripts, supporting both one-click automatic deployment and manual step-by-step deployment methods. + +### Component Introduction + +| Component | Port | Description | +| ----------------------------- | --------------- | ----------------------- | +| euler-copilot-framework | 8002 (internal port) | Intelligent agent framework service | +| euler-copilot-web | 8080 | Intelligent agent frontend interface | +| euler-copilot-rag | 9988 (internal port) | Retrieval-Augmented Generation service | +| authhub-backend-service | 11120 (internal port) | Authentication service backend | +| authhub-web-service | 8000 | Authentication service frontend | +| mysql | 3306 (internal port) | MySQL database | +| redis | 6379 (internal port) | Redis database | +| minio | 9000 (internal port) 9001 (external port) | minio database | +| mongo | 27017 (internal port) | mongo database | +| postgres | 5432 (internal port) | Vector database | +| secret_inject | None | Secure configuration file replication tool | + +### Software Requirements + +| Type | Version Requirements | Description | +|------|----------------------|-------------| +| Operating System | openEuler 22.03 LTS and above | None | +| K3s | >= v1.30.2, with Traefik Ingress tool | K3s provides a lightweight Kubernetes cluster, easy to deploy and manage | +| Helm | >= v3.15.3 | Helm is a Kubernetes package management tool, aimed at quickly installing, upgrading, and uninstalling EulerCopilot services | +| python | >=3.9.9 | Python 3.9.9 and above provides the runtime environment for model download and installation | + +--- + +### Hardware Specifications + +| Hardware Resources | Minimum Configuration | Recommended Configuration | +|--------------------|-----------------------|-----------------------------| +| CPU | 4 cores | 16 cores or above | +| RAM | 4 GB | 64 GB | +| Storage | 32 GB | 64 GB | +| Large Model Name | deepseek-llm-7b-chat | DeepSeek-R1-Llama-8B | +| GPU Memory | NVIDIA RTX A4000 8GB | NVIDIA A100 80GB * 2 | + +**Key Notes**: + +- For pure CPU environments, it is recommended to implement functionality by calling the OpenAI interface or using the built-in model deployment method. +- If k8s cluster environment exists, there is no need to install k3s separately, requiring version >= 1.28 + +--- + +### Deployment View + +![Deployment Diagram](./pictures/deployment-overview.png) + +--- + +### Domain Name Configuration + +Two service domain names need to be prepared: + +- Authhub authentication service: `authhub.eulercopilot.local` +- EulerCopilot web service: `www.eulercopilot.local` + +```bash +# Configure on local Windows host +# Open C:\Windows\System32\drivers\etc\hosts to add records +# Replace 127.0.0.1 with the target server IP +127.0.0.1 authhub.eulercopilot.local +127.0.0.1 www.eulercopilot.local +``` + +## Quick Start + +### 1. Resource Acquisition + +- **Obtain Deployment Scripts** + [Official Git repository of EulerCopilot](https://gitee.com/openeuler/euler-copilot-framework) + Switch to the dev branch, click to download ZIP, and upload to the target server + + ```bash + unzip euler-copilot-framework.tar -d /home + ``` + +- **Resource List** + + - **Download Address** + [EulerCopilot Resource Repository](https://repo.oepkgs.net/openEuler/rpm/openEuler-22.03-LTS/contrib/eulercopilot) + - **Image Files** + + ```bash + # Use script to automatically download all images (execute in network-connected environment) + cd /home/euler-copilot-framework/deploy/scripts/9-other-script/ + # Execute image download (version number can be replaced) + ./save_images.sh 0.9.5 # Automatically saved to directory /home/eulercopilot/images + # Upload to target server + scp /home/eulercopilot/images/* root@target-server:/home/eulercopilot/images/ + ``` + + - **Model deployment files**: `bge-m3-Q4_K_M.gguf`, `deepseek-llm-7b-chat-Q4_K_M.gguf`, `ollama-linux-arm64.tgz/ollama-linux-amd64.tgz` + - **Tool packages**: `helm-v3.15.0-linux-arm64.tar.gz/helm-v3.15.0-linux-amd64.tar.gz`, `k3s-airgap-images-arm64.tar.zst/k3s-airgap-images-amd64.tar.zst`, `k3s-arm64/k3s-amd64`, `k3s-install.sh` + +- **Key Notes** + + - **Network Requirements** + - Manual download requires ensuring there is a Windows environment with access to external network files. All downloads must be completed and then transferred to the offline environment. + - Script download requires execution on a network-connected server, only completing image downloads, then transfer to the offline environment. + - **Ensure the target server has the following directories** + + ```bash + /home/eulercopilot/ + ├── images/ # Store image files + ├── models/ # Store model files + └── tools/ # Store tool packages + ``` + +### 2. Deploy EulerCopilot + +#### One-click Deployment + +```bash +cd /home/euler-copilot-framework/deploy/scripts +``` + +```bash +bash deploy.sh +``` + +```bash +# Enter 0 for one-click automatic deployment +============================== + Main Deployment Menu +============================== +0) One-click automatic deployment +1) Manual step-by-step deployment +2) Restart services +3) Uninstall all components and clear data +4) Exit program +============================== +Please enter option number (0-3): 0 +``` + +--- + +#### Manual Step-by-step Deployment + +```bash +# Choose 1 -> 1 to enter manual step-by-step deployment +============================== + Main Deployment Menu +============================== +0) One-click automatic deployment +1) Manual step-by-step deployment +2) Restart services +3) Uninstall all components and clear data +4) Exit program +============================== +Please enter option number (0-3): 1 +``` + +```bash +# Enter option number (0-9) for step-by-step deployment +============================== + Manual Step-by-step Deployment Menu +============================== +1) Execute environment check script +2) Install k3s and helm +3) Install Ollama +4) Deploy Deepseek model +5) Deploy Embedding model +6) Install databases +7) Install AuthHub +8) Install EulerCopilot +9) Return to main menu +============================== +Please enter option number (0-9): +``` + +--- + +**Key Notes**: + +1. Resource preparation is required before installing and deploying +2. During the deployment process, you need to input the Authhub domain and EulerCopilot domain. If not input, default domains `authhub.eulercopilot.local`, `www.eulercopilot.local` will be used. + +#### Restart Services + +```bash +# Enter option to restart services +============================== + Service Restart Menu +============================== +Restartable service list: +1) authhub-backend +2) authhub +3) framework +4) minio +5) mongo +6) mysql +7) pgsql +8) rag +9) rag-web +10) redis +11) web +12) Return to main menu +============================== +Please enter the number of the service to restart (1-12): +``` + +#### Uninstall All Components + +```bash +sudo ./deploy.sh +# Choose 2 for complete uninstallation +============================== + Main Deployment Menu +============================== +0) One-click automatic deployment +1) Manual step-by-step deployment +2) Uninstall all components and clear data +3) Exit program +============================== +Please enter option number (0-3): 2 +``` + +--- + +**Key Notes**: + +- During the deployment process, you need to input the Authhub domain and EulerCopilot domain. If not input, default domains `authhub.eulercopilot.local`, `www.eulercopilot.local` will be used. +- When resources are insufficient, refer to FAQ to evaluate resource availability +- View component logs + +```bash +kubectl logs -n euler-copilot +``` + +- View service status + +```bash +kubectl get pod -n euler-copilot +``` + +- Modify large model configuration and update EulerCopilot + +```bash +cd /home/euler-copilot-framework/deploy/chart/euler-copilot +``` + +```bash +vim values.yaml +``` + +```bash +helm upgrade euler-copilot -n euler-copilot . +``` + +## Installation Verification + +Congratulations, **EulerCopilot** has been successfully deployed! To begin your usage, enter the `https://Your EulerCopilot domain` link in your browser to access the EulerCopilot web interface: + +When visiting for the first time, you need to click the **Register Now** button on the page to create a new account and complete the login process. + +![Web Login Interface](./pictures/web-login.png) +![Web Interface](./pictures/web.png) + +## Build Custom Domain-Specific Intelligent Q&A + +Click on the knowledge base to log into the local knowledge base management page. For detailed information, please refer to the documentation [Local Asset Repository Construction Guide](../../../advance/knowledge_base/deploy_guide/witchaind_deployment.md) +**Default account for knowledge base login `admin`, password `123456`** + +--- + +## Appendix + +### Large Model Preparation + +#### GPU Environment + +The deployed deepseek large model can be used directly. Refer to the following deployment method: + +1. Download model files + + ```bash + huggingface-cli download --resume-download Qwen/Qwen1.5-14B-Chat --local-dir Qwen1.5-14B-Chat + ``` + +2. Create terminal control + + ```bash + screen -S control + ``` + + ```bash + python3 -m fastchat.serve.controller + ``` + + Press Ctrl A+D to put it in the background + +3. Create new terminal api + + ```bash + screen -S api + ``` + + ```bash + python3 -m fastchat.serve.openai_api_server --host 0.0.0.0 --port 30000 --api-keys sk-123456 + ``` + + Press Ctrl A+D to put it in the background + If the current environment's Python version is 3.12 or 3.9, a conda virtual environment with Python 3.10 can be created + + ```bash + mkdir -p /root/py310 + ``` + + ```bash + conda create --prefix=/root/py310 python==3.10.14 + ``` + + ```bash + conda activate /root/py310 + ``` + +4. Create new terminal worker + + ```bash + screen -S worker + ``` + + ```bash + screen -r worker + ``` + + Install fastchat and vllm + + ```bash + pip install fschat vllm + ``` + + Install dependencies: + + ```bash + pip install fschat[model_worker] + ``` + + ```bash + python3 -m fastchat.serve.vllm_worker --model-path /root/models/Qwen1.5-14B-Chat/ --model-name qwen1.5 --num-gpus 8 --gpu-memory-utilization=0.7 --dtype=half + ``` + + Press Ctrl A+D to put it in the background + +5. Modify the large model parameters in the configuration as follows, and update the service. + + ```bash + vim /home/euler-copilot-framework/deploy/chart/euler_copilot/values.yaml + ``` + + Modify the following parts + + ```yaml + # Model settings + models: + # Large model for Q&A; must be OpenAI-compatible interface + answer: + # [Required] Interface URL (without "v1" suffix) + url: http://172.168.178.107:11434 + # [Required] Interface API Key; defaults to empty + key: sk-123456 + # [Required] Model name + name: deepseek-llm-7b-chat:latest + # [Required] Maximum context length for the model; recommended >= 8192 + ctx_length: 8192 + # Maximum output length for the model, recommended >= 2048 + max_tokens: 2048 + # Model for Function Call; recommended to use a specific inference framework + functioncall: + # Inference framework type, default is ollama + # Available framework types: ["vllm", "sglang", "ollama", "openai"] + backend: + # Model address; if not filled, same as Q&A model + url: ollama + # API Key; if not filled, same as Q&A model + key: + # Model name; if not filled, same as Q&A model + name: + # Maximum context length; if not filled, same as Q&A model + ctx_length: + # Maximum output length; if not filled, same as Q&A model + max_tokens: + # Model for data vectorization (Embedding) + embedding: + # Inference framework type, default is openai + # [Required] Embedding interface type: ["openai", "mindie"] + type: openai + # [Required] Embedding URL (with "v1" suffix) + url: http://172.168.178.107:11434 + # [Required] Embedding model API Key + key: sk-123456 + # [Required] Embedding model name + name: bge-m3:latest + ``` + + ```bash + # Update the service + helm upgrade -n euler-copilot euler-copilot . + # Restart framework service + kubectl get pod -n euler-copilot + kubectl delete pod framework-deploy-65b669fc58-q9bw7 -n euler-copilot + ``` + +#### NPU Environment + +NPU environment deployment can refer to the link [MindIE Installation Guide](https://www.hiascend.com/document/detail/zh/mindie/10RC2/whatismindie/mindie_what_0001.html) + +### FAQ + +#### 1. Resolve Hugging Face Connection Error + +If encountering connection errors like the following: + +```text +urllib3.exceptions.NewConnectionError: : Failed to establish a new connection: [Errno 101] Network is unreachable +``` + +Try the following solutions: + +- Update the `huggingface_hub` package to the latest version. + + ```bash + pip3 install -U huggingface_hub + ``` + +- If network issues persist, try using a mirror site as the endpoint. + + ```bash + export HF_ENDPOINT=https://hf-mirror.com + ``` + +#### 2. Invoke Q&A Interface in RAG Container + +After entering the corresponding RAG Pod, you can send POST requests via the `curl` command to get Q&A results. Please ensure that the specific question text is provided in the request body. + +```bash +curl -k -X POST "http://localhost:9988/kb/get_answer" \ + -H "Content-Type: application/json" \ + -d '{ + "question": "Your question", + "kb_sn": "default_test", + "fetch_source": true + }' +``` + +#### 3. Resolve `helm upgrade` Error + +When the Kubernetes cluster is unreachable, you might encounter error messages similar to the following: + +```text +Error: UPGRADE FAILED: Kubernetes cluster unreachable +``` + +Ensure the correct KUBECONFIG environment variable is set to point to a valid configuration file. + +```bash +echo "export KUBECONFIG=/etc/rancher/k3s/k3s.yaml" >> /root/.bashrc +source /root/.bashrc +``` + +#### 4. Failed to View Pod Logs + +If you encounter permission denied issues when viewing Pod logs, check if the proxy settings are correctly configured and add the local IP address to the `no_proxy` environment variable. + +```bash +cat /etc/systemd/system/k3s.service.env +``` + +Edit the file to ensure it includes: + +```bash +no_proxy=XXX.XXX.XXX.XXX +``` + +#### 5. Large Model Streaming Response Issue in GPU Environment + +For some services where curl to the large model fails to stream responses, try changing the `"stream"` parameter in the request to `false`. Also, confirm that the compatible version of the Pydantic library is installed. + +```bash +pip install pydantic==1.10.13 +``` + +#### 6. sglang Model Deployment Guide + +Deploy the sglang-based model following the steps below: + +```bash +# 1. 激活名为 `myenv` 的 Conda 环境,该环境基于 Python 3.10 创建: +conda activate myenv + +# 2. 安装 sglang 及其所有依赖项,指定版本为 0.3.0 +pip install "sglang[all]==0.3.0" + +# 3. 从特定索引安装 flashinfer,确保与您的 CUDA 和 PyTorch 版本兼容 +pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.4/ + +# 4. Start the server using sglang with the following configuration: +python -m sglang.launch_server \ + --served-model-name Qwen2.5-32B \ + --model-path Qwen2.5-32B-Instruct-AWQ \ + --host 0.0.0.0 \ + --port 8001 \ + --api-key "sk-12345" \ + --mem-fraction-static 0.5 \ + --tp 8 +``` + +- Verify installation + + ```bash + pip show sglang + pip show flashinfer + ``` + +**Important Notes:** + +- API Key: Please ensure the API key in the `--api-key` parameter is correct +- Model Path: Ensure the path in the `--model-path` parameter is correct and the model files exist at that path +- CUDA Version: Ensure your system has CUDA 12.1 and PyTorch 2.4 installed, as the `flashinfer` package depends on these specific versions +- Thread Pool Size: Adjust the thread pool size based on your GPU resources and expected load. If you have 8 GPUs, you can choose --tp 8 to fully utilize these resources + +#### 7. Get Embedding + +Use curl to send a POST request to get embedding results: + +```bash +curl -k -X POST http://localhost:11434/v1/embeddings \ + -H "Content-Type: application/json" \ + -d {"input": "The food was delicious and the waiter...", "model": "bge-m3", "encoding_format": "float"} +``` + +#### 8. Generate Certificates + +To generate self-signed certificates, first download the [mkcert](https://github.com/FiloSottile/mkcert/releases) tool, then run the following commands: + +```bash +mkcert -install +mkcert example.com +``` + +Finally, copy the generated certificate and private key to values.yaml and apply it to the Kubernetes Secret. + +```bash +vim /home/euler-copilot-framework_openeuler/deploy/chart_ssl/traefik-secret.yaml +``` + +```bash +kubectl apply -f traefik-secret.yaml +``` + +#### 9. Troubleshooting Methods + +1. **Get Cluster Event Information** + + To better identify the cause of Pod failures, first check the events (Events) in the Kubernetes cluster. This can provide contextual information about Pod state changes. + + ```bash + kubectl get events -n euler-copilot + ``` + +2. **Verify Image Pull Status** + + Confirm whether the container image was successfully pulled. If the image fails to load correctly, it may be due to network issues or incorrect image repository configuration. + + ```bash + k3s crictl images + ``` + +3. **Review Pod Logs** + + Check the logs of related Pods to look for possible error messages or abnormal behavior. This is particularly useful for diagnosing application-level issues. + + ```bash + kubectl logs rag-deploy-service-5b7887644c-sm58z -n euler-copilot + ``` + +4. **Assess Resource Availability** + + Ensure the Kubernetes cluster has sufficient resources (such as CPU, memory, and storage) to support Pod operation. Insufficient resources may cause image pull failures or other performance issues, or cause Pod status to change from Running to Pending or Completed. Check disk space and ensure at least 30% is available. This helps maintain stable Pod operation. Refer to this link to mount a disk with larger space [How to move k3s data to another location](https://mrkandreev.name/snippets/how_to_move_k3s_data_to_another_location/) + + ```bash + kubectl top nodes + ``` + +5. **Confirm k3s Version Compatibility** + + If you encounter image pull failures with image size 0, please check if your k3s version meets the minimum requirements (v1.30.2 or higher). Lower versions may have compatibility issues. + + ```bash + k3s -v + ``` + +6. **Check Configuration** + + Check if the OIDC configuration and domain configuration in the `values.yaml` file are filled in correctly, and update the service after ensuring the configuration is correct. + + ```bash + cat /home/euler-copilot-framework/deploy/chart/euler_copilot + ``` + + ```bash + vim values.yaml | grep oidc + ``` + + ```bash + helm upgrade euler-copilot -n euler-copilot . + ``` diff --git a/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/online.md b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/online.md new file mode 100644 index 0000000000000000000000000000000000000000..304f9ca3122c950a25e5f891b765509a2e16852e --- /dev/null +++ b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/online.md @@ -0,0 +1,596 @@ +# **EulerCopilot Intelligent Assistant Deployment Guide** + +Version Information +Current Version: v0.9.5 +Update Date: April 1, 2025 + +## Product Overview + +EulerCopilot is an intelligent Q&A tool that uses EulerCopilot to solve the convenience of operating system knowledge acquisition and empowers developers and operations personnel with OS domain models. As a tool for acquiring operating system knowledge and enabling operating system productivity tools (such as A-Ops / A-Tune / x2openEuler / EulerMaker / EulerDevOps / StratoVirt / iSulad, etc.), it subverts traditional command delivery methods, evolving from traditional command delivery methods to natural semantics, and combines intelligent agent task planning capabilities to lower the threshold for developing and using operating system features. + +This guide provides deployment instructions for the EulerCopilot intelligent assistant system based on automated scripts, supporting both one-click automatic deployment and manual step-by-step deployment. + +### Component Introduction + +| Component | Port | Description | +| ----------------------------- | --------------- | -------------------- | +| euler-copilot-framework | 8002 (internal port) | Intelligent agent framework service | +| euler-copilot-web | 8080 | Intelligent agent frontend interface | +| euler-copilot-rag | 9988 (internal port) | Retrieval enhancement service | +| authhub-backend-service | 11120 (internal port) | Authentication service backend | +| authhub-web-service | 8000 | Authentication service frontend | +| mysql | 3306 (internal port) | MySQL database | +| redis | 6379 (internal port) | Redis database | +| minio | 9000 (internal port) 9001(external port) | MinIO database | +| mongo | 27017 (internal port) | MongoDB database | +| postgres | 5432 (internal port) | Vector database | +| secret_inject | None | Configuration file secure copy tool | + +### Software Requirements + +| Type | Version Requirements | Description | +|----------------| -------------------------------------|--------------------------------------| +| Operating System | openEuler 22.03 LTS and above | None | +| K3s | >= v1.30.2, with Traefik Ingress tools | K3s provides a lightweight Kubernetes cluster that is easy to deploy and manage | +| Helm | >= v3.15.3 | Helm is a Kubernetes package management tool designed to quickly install, upgrade, and uninstall EulerCopilot services | +| Python | >=3.9.9 | Python 3.9.9 and above provides the runtime environment for model download and installation | + +--- + +### Hardware Specifications + +| Hardware Resource | Minimum Configuration | Recommended Configuration | +|--------------|----------------------------|------------------------------| +| CPU | 4 cores | 16 cores and above | +| RAM | 4 GB | 64 GB | +| Storage | 32 GB | 64G | +| Large Model Name | deepseek-llm-7b-chat | DeepSeek-R1-Llama-8B | +| VRAM (GPU) | NVIDIA RTX A4000 8GB | NVIDIA A100 80GB * 2 | + +**Key Notes**: + +- For pure CPU environments, it is recommended to implement functionality by calling OpenAI interfaces or using the built-in model deployment method. +- If it's a k8s cluster environment, there's no need to install k3s separately, requiring version >= 1.28 + +--- + +### Deployment View + +![Deployment Diagram](./pictures/deployment-overview.png) + +--- + +### Domain Configuration + +Prepare the following two service domains: + +- AuthHub authentication service: `authhub.eulercopilot.local` +- EulerCopilot web service: `www.eulercopilot.local` + +```bash +# Configure in local Windows host +# Open C:\Windows\System32\drivers\etc\hosts to add records +# Replace 127.0.0.1 with the target server's IP +127.0.0.1 authhub.eulercopilot.local +127.0.0.1 www.eulercopilot.local +``` + +## Quick Start + +### 1. Get Deployment Scripts + +- Download the latest deployment repository from EulerCopilot's official Git repository [euler-copilot-framework](https://gitee.com/openeuler/euler-copilot-framework) +- If you are using Kubernetes, there's no need to install the k3s tool. + +```bash +# Use home directory as download example +cd /home +``` + +```bash +git clone https://gitee.com/openeuler/euler-copilot-framework.git -b dev +``` + +```bash +cd euler-copilot-framework/deploy/scripts +``` + +```bash +# Add executable permissions to script files +chmod -R +x ./* +``` + +### 2. Deploy EulerCopilot + +#### **One-Click Deployment** + +```bash +cd /home/euler-copilot-framework/deploy/scripts +``` + +```bash +bash deploy.sh +``` + +```bash +# Enter 0 for one-click automatic deployment +============================== + Main Deployment Menu +============================== +0) One-Click Automatic Deployment +1) Manual Step-by-Step Deployment +2) Restart Services +3) Uninstall All Components and Clear Data +4) Exit Program +============================== +Please enter option number (0-3): 0 +``` + +--- + +#### **Step-by-Step Deployment** + +```bash +# Select 1 -> 1 to enter manual step-by-step deployment +============================== + Main Deployment Menu +============================== +0) One-Click Automatic Deployment +1) Manual Step-by-Step Deployment +2) Restart Services +3) Uninstall All Components and Clear Data +4) Exit Program +============================== +Please enter option number (0-3): 1 +``` + +```bash +# Enter option number (0-9) to deploy step by step +============================== + Manual Step-by-Step Deployment Menu +============================== +1) Execute Environment Check Script +2) Install k3s and helm +3) Install Ollama +4) Deploy Deepseek Model +5) Deploy Embedding Model +6) Install Databases +7) Install AuthHub +8) Install EulerCopilot +9) Return to Main Menu +============================== +Please enter option number (0-9): +``` + +--- + +#### **Restart Services** + +```bash +# Enter option to restart services +============================== + Service Restart Menu +============================== +List of services that can be restarted: +1) authhub-backend +2) authhub +3) framework +4) minio +5) mongo +6) mysql +7) pgsql +8) rag +9) rag-web +10) redis +11) web +12) Return to Main Menu +============================== +Please enter the service number to restart (1-12): +``` + +#### **Uninstall All Components** + +```bash +sudo ./deploy.sh +# Select 2 for complete uninstallation +============================== + Main Deployment Menu +============================== +0) One-Click Automatic Deployment +1) Manual Step-by-Step Deployment +2) Uninstall All Components and Clear Data +3) Exit Program +============================== +Please enter option number (0-3): 2 +``` + +--- + +**Key Notes**: + +- During deployment, you need to enter the AuthHub domain and EulerCopilot domain. If not entered, the default domains `authhub.eulercopilot.local` and `www.eulercopilot.local` will be used. +- When resources are insufficient, refer to the FAQ section for solutions on evaluating resource availability +- View component logs + +```bash +kubectl logs -n euler-copilot +``` + +- View service status + +```bash +kubectl get pod -n euler-copilot +``` + +- Modify large model configuration and update EulerCopilot + +```bash +cd /home/euler-copilot-framework/deploy/chart/euler-copilot +``` + +```bash +vim values.yaml +``` + +```bash +helm upgrade euler-copilot -n euler-copilot . +``` + +## Verify Installation + +Congratulations! **EulerCopilot** has been successfully deployed! To start your experience, please enter the link `https://your-eulercopilot-domain` in your browser to access the EulerCopilot web interface: + +On your first visit, you need to click the **Register Now** button on the page to create a new account and complete the login process. + +![Web Login Interface](./pictures/web-login.png) +![Web Interface](./pictures/web.png) + +## Build Domain-Specific Intelligent Q&A + +Click on Knowledge Base to log into the local knowledge base management page. For detailed information, please refer to the document [Local Asset Library Construction Guide](../../../advance/knowledge_base/deploy_guide/witchaind_deployment.md) +**Knowledge Base default login account: `admin`, password: `123456`** + +--- + +## Appendix + +### Large Model Preparation + +#### GPU Environment + +You can directly use the deployed deepseek large model. Refer to the following deployment method: + +1. Download model files: + + ```bash + huggingface-cli download --resume-download Qwen/Qwen1.5-14B-Chat --local-dir Qwen1.5-14B-Chat + ``` + +2. Create terminal control + + ```bash + screen -S control + ``` + + ```bash + python3 -m fastchat.serve.controller + ``` + + Press Ctrl A+D to put it in the background + +3. Create new terminal api + + ```bash + screen -S api + ``` + + ```bash + python3 -m fastchat.serve.openai_api_server --host 0.0.0.0 --port 30000 --api-keys sk-123456 + ``` + + Press Ctrl A+D to put it in the background + If the current environment's Python version is 3.12 or 3.9, you can create a conda virtual environment with Python 3.10 + + ```bash + mkdir -p /root/py310 + ``` + + ```bash + conda create --prefix=/root/py310 python==3.10.14 + ``` + + ```bash + conda activate /root/py310 + ``` + +4. Create new terminal worker + + ```bash + screen -S worker + ``` + + ```bash + screen -r worker + ``` + + Install fastchat and vllm + + ```bash + pip install fschat vllm + ``` + + Install dependencies: + + ```bash + pip install fschat[model_worker] + ``` + + ```bash + python3 -m fastchat.serve.vllm_worker --model-path /root/models/Qwen1.5-14B-Chat/ --model-name qwen1.5 --num-gpus 8 --gpu-memory-utilization=0.7 --dtype=half + ``` + + Press Ctrl A+D to put it in the background + +5. Modify the configured large model parameters as follows and update the service. + + ```bash + vim /home/euler-copilot-framework/deploy/chart/euler_copilot/values.yaml + ``` + + Modify the following section + + ```yaml + # Model Settings + models: + # Large model for Q&A; needs to be OpenAI-compatible interface + answer: + # [Required] Interface URL (no need to include "v1" suffix) + url: http://172.168.178.107:11434 + # [Required] Interface API Key; default is empty + key: sk-123456 + # [Required] Model name + name: deepseek-llm-7b-chat:latest + # [Required] Model maximum context length; recommended >=8192 + ctx_length: 8192 + # Model maximum output length, recommended >=2048 + max_tokens: 2048 + # Model for Function Call; recommended to use specific inference framework + functioncall: + # Inference framework type, default is ollama + # Available framework types: ["vllm", "sglang", "ollama", "openai"] + backend: + # Model address; if not filled, same as Q&A model + url: ollama + # API Key; if not filled, same as Q&A model + key: + # Model name; if not filled, same as Q&A model + name: + # Model maximum context length; if not filled, same as Q&A model + ctx_length: + # Model maximum output length; if not filled, same as Q&A model + max_tokens: + # Model for data vectorization (Embedding) + embedding: + # Inference framework type, default is openai + # [Required] Embedding interface type: ["openai", "mindie"] + type: openai + # [Required] Embedding URL (needs to include "v1" suffix) + url: http://172.168.178.107:11434 + # [Required] Embedding model API Key + key: sk-123456 + # [Required] Embedding model name + name: bge-m3:latest + ``` + + ```bash + # Update service + helm upgrade -n euler-copilot euler-copilot . + # Restart framework service + kubectl get pod -n euler-copilot + kubectl delete pod framework-deploy-65b669fc58-q9bw7 -n euler-copilot + ``` + +#### NPU Environment + +For NPU environment deployment, refer to the link [MindIE Installation Guide](https://www.hiascend.com/document/detail/zh/mindie/10RC2/whatismindie/mindie_what_0001.html) + +### FAQ + +#### 1. Resolve Hugging Face Connection Errors + +If you encounter the following connection error: + +```text +urllib3.exceptions.NewConnectionError: : Failed to establish a new connection: [Errno 101] Network is unreachable +``` + +Try the following solutions: + +- Update the `huggingface_hub` package to the latest version. + + ```bash + pip3 install -U huggingface_hub + ``` + +- If network issues persist, try using mirror sites as endpoints. + + ```bash + export HF_ENDPOINT=https://hf-mirror.com + ``` + +#### 2. Call Q&A Interface in RAG Container + +After entering the corresponding RAG Pod, you can send POST requests via `curl` commands to get Q&A results. Please ensure to provide specific question text in the request body. + +```bash +curl -k -X POST "http://localhost:9988/kb/get_answer" \ + -H "Content-Type: application/json" \ + -d '{ + "question": "Your question", + "kb_sn": "default_test", + "fetch_source": true + }' +``` + +#### 3. Resolve `helm upgrade` Errors + +When the Kubernetes cluster is unreachable, you may encounter error messages similar to the following: + +```text +Error: UPGRADE FAILED: Kubernetes cluster unreachable +``` + +Ensure that the correct KUBECONFIG environment variable is set to point to a valid configuration file. + +```bash +echo "export KUBECONFIG=/etc/rancher/k3s/k3s.yaml" >> /root/.bashrc +source /root/.bashrc +``` + +#### 4. Failed to View Pod Logs + +If you encounter permission denied issues when viewing Pod logs, check if proxy settings are correctly configured and add your local IP address to the `no_proxy` environment variable. + +```bash +cat /etc/systemd/system/k3s.service.env +``` + +Edit the file and ensure it contains: + +```bash +no_proxy=XXX.XXX.XXX.XXX +``` + +#### 5. Large Model Streaming Response Issues in GPU Environment + +For cases where certain services cannot perform streaming responses when executing curl with large models, try modifying the `"stream"` parameter in the request to `false`. Additionally, confirm that a compatible version of the Pydantic library is installed. + +```bash +pip install pydantic==1.10.13 +``` + +#### 6. sglang Model Deployment Guide + +Follow these steps to deploy models based on sglang: + +```bash +# 1. Activate the conda environment named `myenv`, which is created based on Python 3.10: +conda activate myenv + +# 2. Install sglang and all its dependencies, specifying version 0.3.0 +pip install "sglang[all]==0.3.0" + +# 3. Install flashinfer from a specific index to ensure compatibility with your CUDA and PyTorch versions +pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.4/ + +# 4. Start the server using sglang with the following configuration: +python -m sglang.launch_server \ + --served-model-name Qwen2.5-32B \ + --model-path Qwen2.5-32B-Instruct-AWQ \ + --host 0.0.0.0 \ + --port 8001 \ + --api-key "sk-12345" \ + --mem-fraction-static 0.5 \ + --tp 8 +``` + +- Verify installation + + ```bash + pip show sglang + pip show flashinfer + ``` + +**Important Notes:** + +- API Key: Please ensure the API key in the `--api-key` parameter is correct +- Model Path: Ensure the path in the `--model-path` parameter is correct and that the model files exist at that path. +- CUDA Version: Ensure you have CUDA 12.1 and PyTorch 2.4 installed on your system, as the `flashinfer` package depends on these specific versions. +- Thread Pool Size: Adjust the thread pool size based on your GPU resources and expected load. If you have 8 GPUs, you can choose --tp 8 to fully utilize these resources. + +#### 7. Get Embedding + +Use curl to send POST requests to get embedding results: + +```bash +curl -k -X POST http://localhost:11434/v1/embeddings \ + -H "Content-Type: application/json" \ + -d {"input": "The food was delicious and the waiter...", "model": "bge-m3", "encoding_format": "float"} +``` + +#### 8. Generate Certificates + +To generate self-signed certificates, first download the [mkcert](https://github.com/FiloSottile/mkcert/releases) tool, then run the following commands: + +```bash +mkcert -install +mkcert example.com +``` + +Finally, copy the generated certificate and private key to values.yaml and apply them to the Kubernetes Secret. + +```bash +vim /home/euler-copilot-framework_openeuler/deploy/chart_ssl/traefik-secret.yaml +``` + +```bash +kubectl apply -f traefik-secret.yaml +``` + +#### 9. Troubleshooting Methods + +1. **Get Cluster Event Information** + + To better locate the cause of Pod failures, first check the events in the Kubernetes cluster. This can provide contextual information about Pod state changes. + + ```bash + kubectl get events -n euler-copilot + ``` + +2. **Verify Image Pull Status** + + Confirm whether container images are successfully pulled. If images fail to load correctly, it may be due to network issues or incorrect image repository configuration. + + ```bash + k3s crictl images + ``` + +3. **Review Pod Logs** + + Check the logs of relevant Pods to look for possible error messages or abnormal behavior. This is particularly useful for diagnosing application-level issues. + + ```bash + kubectl logs rag-deploy-service-5b7887644c-sm58z -n euler-copilot + ``` + +4. **Evaluate Resource Availability** + + Ensure the Kubernetes cluster has sufficient resources (such as CPU, memory, and storage) to support Pod operation. Insufficient resources may cause image pull failures or other performance issues, or cause Pod status to change from Running to Pending or Completed. Check disk space and ensure at least 30% free space. This helps maintain stable Pod operation. Refer to this link to mount disks with larger space [How to move k3s data to another location](https://mrkandreev.name/snippets/how_to_move_k3s_data_to_another_location/) + + ```bash + kubectl top nodes + ``` + +5. **Confirm k3s Version Compatibility** + + If you encounter image pull failures with image size 0, please check if your k3s version meets the minimum requirements (v1.30.2 or higher). Lower versions may have compatibility issues. + + ```bash + k3s -v + ``` + +6. **Check Configuration** + + Check whether the OIDC configuration and domain configuration in the `values.yaml` file are filled in correctly, and update the service after ensuring the configuration is correct. + + ```bash + cat /home/euler-copilot-framework/deploy/chart/euler_copilot + ``` + + ```bash + vim values.yaml | grep oidc + ``` + + ```bash + helm upgrade euler-copilot -n euler-copilot . + ``` diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\351\203\250\347\275\262\350\247\206\345\233\276.png" b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/deployment-overview.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\351\203\250\347\275\262\350\247\206\345\233\276.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/deployment-overview.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/WEB\347\231\273\345\275\225\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/web-login.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/WEB\347\231\273\345\275\225\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/web-login.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/WEB\347\225\214\351\235\242.png" b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/web.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/WEB\347\225\214\351\235\242.png" rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/web.png diff --git a/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/_toc.yaml b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/_toc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..594dec7f4ea81089052df0c0a83d9bb0aa002cc0 --- /dev/null +++ b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/_toc.yaml @@ -0,0 +1,17 @@ +# User Guide for Intelligent Assistant +label: Intelligent Assistant User Guide +isManual: true +description: Guide for using the openEuler Intelligent Assistant +sections: + - label: Quick Start + sections: + - label: Web Q&A + sections: + - label: User Guide + sections: + - label: Introduction + href: introduction.md + - label: Registration and Login + href: registration_and_login.md + - label: Q&A Usage + href: qa_guide.md \ No newline at end of file diff --git a/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/introduction.md b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/introduction.md new file mode 100644 index 0000000000000000000000000000000000000000..c43fec83f822b106579e6ee36b6df4096d266fa0 --- /dev/null +++ b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/introduction.md @@ -0,0 +1,68 @@ +# Introduction + +## Overview + +This document introduces the usage of EulerCopilot, providing detailed descriptions of various features of the EulerCopilot online service web interface, along with common Q&A. For detailed information, please refer to the corresponding manuals. + +## Target Audience + +This document is primarily intended for EulerCopilot users. Users should possess the following experience and skills: + +- Familiarity with openEuler operating system related information. +- Experience with AI dialogue systems. + +## Revision History + +| Document Version | Release Date | Revision Description | +|------------------|--------------|---------------------| +| 03 | 2024-09-19 | Updated new interface. | +| 02 | 2024-05-13 | Optimized intelligent dialogue operation guidance. | +| 01 | 2024-01-28 | First official release. | + +## Introductions + +### Disclaimer + +- Usernames and passwords used during the process that are not part of the tool's own verification functionality are not used for other purposes and will not be saved in the system environment. +- Before engaging in dialogue or operations, you should confirm that you are the owner of the application or have obtained sufficient authorization consent from the owner. +- Dialogue results may contain internal information and related data of the applications you analyze. Please manage them appropriately. +- Unless otherwise stipulated by laws and regulations or bilateral contracts, the openEuler community makes no express or implied statements and warranties regarding analysis results, and makes no guarantees or commitments regarding the merchantability, satisfaction, non-infringement, or fitness for a particular purpose of analysis results. +- Any actions you take based on analysis records should comply with legal and regulatory requirements, and you bear the risks yourself. +- Without owner authorization, no individual or organization may use applications and related analysis records for any activities. The openEuler community is not responsible for any consequences arising there from, nor does it assume any legal liability. Legal liability will be pursued when necessary. + +### EulerCopilot Introduction + +EulerCopilot is an artificial intelligence assistant based on the openEuler operating system that helps users solve various technical problems and provides technical support and consulting services. It uses the most advanced natural language processing technology and machine learning algorithms to understand user questions and provide corresponding solutions. + +### Application Scenarios + +1. **OS Domain General Knowledge**: EulerCopilot can provide consultation on Linux general knowledge, upstream information, and toolchain introduction and guidance. +2. **openEuler Professional Knowledge**: EulerCopilot can provide consultation on openEuler community information, technical principles, and usage guidance. +3. **openEuler Extended Knowledge**: EulerCopilot can provide consultation on openEuler peripheral hardware feature knowledge and ISV, OSV related information. +4. **openEuler Application Cases**: EulerCopilot can provide openEuler technical cases and industry application cases. +5. **Shell Command Generation**: EulerCopilot can help users generate single shell commands or complex commands. + +In summary, EulerCopilot can be applied to various scenarios, helping users improve work efficiency and understand related knowledge about Linux, openEuler, and more. + +### Access and Usage + +EulerCopilot is accessed and used through a web page via URL. For account registration and login, please refer to [Registration and Login](./registration_and_login.md). For usage methods, please refer to [Intelligent Q&A User Guide](./qa_guide.md). + +### Interface Description + +#### Interface Layout + +The EulerCopilot interface mainly consists of the areas shown in Figure 1, with the functions of each area described in Table 1. + +- Figure 1: EulerCopilot Interface + +![Copilot Interface](./pictures/main-page-sections.png) + +- Table 1: EulerCopilot Homepage Interface Area Description + +| Area | Name | Description | +|------|------|-------------| +| 1 | Settings Management Area | Provides account login and logout operation entries and light/dark mode toggle functionality | +| 2 | Dialogue Management Area | Used for users to create new dialogues, manage dialogue history, and perform batch deletion of dialogue history | +| 3 | Dialogue Area | Used for dialogue chat between users and EulerCopilot | +| 4 | Service Agreement and Privacy Policy Area | Provides access to view service agreements and privacy policies | diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-login-click2signup.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-login-click2signup.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-login-click2signup.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-login-click2signup.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-login.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-login.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-login.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-login.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-signup.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-signup.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-signup.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-signup.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete-confirmation.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete-confirmation.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete-confirmation.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete-confirmation.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete-multi-select.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete-multi-select.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete-multi-select.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete-multi-select.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/chat-area.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/chat-area.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/chat-area.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/chat-area.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/context-support.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/context-support.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/context-support.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/context-support.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/delete-session-confirmation.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/delete-session-confirmation.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/delete-session-confirmation.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/delete-session-confirmation.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/delete-session.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/delete-session.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/delete-session.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/delete-session.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/feedback-illegal.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/feedback-illegal.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/feedback-illegal.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/feedback-illegal.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/feedback-misinfo.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/feedback-misinfo.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/feedback-misinfo.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/feedback-misinfo.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/feedback.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/feedback.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/feedback.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/feedback.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-ask-against-file.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-ask-against-file.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-ask-against-file.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-ask-against-file.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-btn-prompt.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-btn-prompt.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-btn-prompt.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-btn-prompt.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-btn.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-btn.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-btn.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-btn.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-history-tag.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-history-tag.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-history-tag.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-history-tag.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-parsing.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-parsing.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-parsing.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-parsing.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-showcase.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-showcase.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-showcase.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-showcase.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-uploading.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-uploading.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-uploading.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-uploading.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-arrow-next.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-arrow-next.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-arrow-next.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-arrow-next.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-arrow-prev.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-arrow-prev.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-arrow-prev.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-arrow-prev.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-cancel.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-cancel.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-cancel.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-cancel.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-confirm.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-confirm.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-confirm.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-confirm.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-edit.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-edit.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-edit.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-edit.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-search.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-search.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-search.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-search.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-thumb-down.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-thumb-down.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-thumb-down.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-thumb-down.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-thumb-up.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-thumb-up.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-thumb-up.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-thumb-up.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-user.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-user.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-user.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-user.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/login-popup.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/login-popup.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/login-popup.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/login-popup.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/logout.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/logout.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/logout.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/logout.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/main-page-clean-ref.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/main-page-clean-ref.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/main-page-clean-ref.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/main-page-clean-ref.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/main-page-sections.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/main-page-sections.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/main-page-sections.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/main-page-sections.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/new-chat.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/new-chat.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/new-chat.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/new-chat.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-list.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-list.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-list.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-list.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-selected.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-selected.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-selected.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-selected.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-suggestion.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-suggestion.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-suggestion.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-suggestion.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-case-step-1.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-case-step-1.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-case-step-1.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-case-step-1.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-case-step-2-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-case-step-2-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-case-step-2-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-case-step-2-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-case-step-2.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-case-step-2.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-case-step-2.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-case-step-2.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-fill-in-param-result.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-fill-in-param-result.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-fill-in-param-result.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-fill-in-param-result.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-fill-in-param.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-fill-in-param.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-fill-in-param.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/plugin-workflow-fill-in-param.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/privacy-policy-entry.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/privacy-policy-entry.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/privacy-policy-entry.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/privacy-policy-entry.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/privacy-policy.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/privacy-policy.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/privacy-policy.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/privacy-policy.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/recommend-questions.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/recommend-questions.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/recommend-questions.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/recommend-questions.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/regenerate.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/regenerate.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/regenerate.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/regenerate.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/rename-session-confirmation.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/rename-session-confirmation.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/rename-session-confirmation.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/rename-session-confirmation.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/rename-session.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/rename-session.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/rename-session.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/rename-session.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/report-options.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/report-options.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/report-options.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/report-options.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/report.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/report.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/report.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/report.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/search-history.png b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/search-history.png similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/search-history.png rename to docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/search-history.png diff --git a/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/qa_guide.md b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/qa_guide.md new file mode 100644 index 0000000000000000000000000000000000000000..6ed27a63caa6e1fb7561df611a29e0b489287b03 --- /dev/null +++ b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/qa_guide.md @@ -0,0 +1,179 @@ +# Intelligent Q&A User Guide + +## Start a Conversation + +In the input box below the conversation area, you can enter the content you want to ask. Press `Shift + Enter` for line breaks, press `Enter` to send your question, or click "Send" to submit your question. + +> **Note** +> +> The conversation area is located in the main part of the page, as shown in Figure 1. + +- Figure 1 Conversation Area + ![Conversation Area](./pictures/chat-area.png) + +### Multi-turn Continuous Conversation + +EulerCopilot intelligent Q&A supports multi-turn continuous conversations. You can simply continue asking follow-up questions in the same conversation, as shown in Figure 2. + +- Figure 2 Multi-turn Conversation + ![Multi-turn Conversation](./pictures/context-support.png) + +### Regenerate Response + +If you encounter situations where AI-generated content is incorrect or incomplete, you can ask AI to regenerate the answer. Click the "Regenerate" text on the bottom left of the AI response to have EulerCopilot answer your question again. After regenerating, pagination icons ![Previous Page](./pictures/icon-arrow-prev.png) and ![Next Page](./pictures/icon-arrow-next.png) will appear on the bottom right of the response. Click ![Previous Page](./pictures/icon-arrow-prev.png) or ![Next Page](./pictures/icon-arrow-next.png) to view different responses, as shown in Figure 3. + +- Figure 3 Regenerate Response + ![Regenerate Response](./pictures/regenerate.png) + +### Recommended Questions + +Below the AI response, some recommended questions will be displayed. Click on them to ask questions, as shown in Figure 4. + +- Figure 4 Recommended Questions + ![Recommended Questions](./pictures/recommend-questions.png) + +## Custom Background Knowledge + +EulerCopilot supports file upload functionality. After uploading files, AI will use the uploaded file content as background knowledge and incorporate it when answering questions. The uploaded background knowledge only applies to the current conversation and will not affect other conversations. + +### Upload Files + +**Step 1** Click the "Upload File" button in the bottom left corner of the conversation area, as shown in Figure 5. + +- Figure 5 Upload File Button + ![Upload File](./pictures/file-upload-btn.png) + +> **Note** +> +> Hover your mouse over the "Upload File" button to see prompts about the allowed file specifications and formats, as shown in Figure 6. + +- Figure 6 Mouse Hover Shows Upload File Specification Prompts + ![Upload File Prompts](./pictures/file-upload-btn-prompt.png) + +**Step 2** In the popup file selection dialog, select the file you want to upload and click "Open" to upload. You can upload up to 10 files with a total size limit of 64MB. Accepted formats include PDF, docx, doc, txt, md, and xlsx. + +After starting the upload, the upload progress will be displayed below the conversation area, as shown in Figure 7. + +- Figure 7 All Files Being Uploaded Simultaneously Are Arranged Below the Q&A Input Box + ![Upload File](./pictures/file-upload-uploading.png) + +After file upload is complete, it will be automatically parsed, as shown in Figure 8. After parsing is complete, the file size information for each file will be displayed below the conversation area. + +- Figure 8 After File Upload to Server, "Parsing" Will Be Displayed + ![File Parsing](./pictures/file-upload-parsing.png) + +After successful file upload, the left history record area will display the number of uploaded files, as shown in Figure 9. + +- Figure 9 The Uploaded File Count Will Be Displayed on the Conversation History Record Tab + ![History Record Mark](./pictures/file-upload-history-tag.png) + +### Ask Questions About Files + +After file upload is complete, you can ask questions about the files. The questioning method is the same as the regular conversation mode, as shown in Figure 10. +The answer result is shown in Figure 11. + +- Figure 10 Ask Questions Related to the Uploaded Files + ![Ask Questions About Files](./pictures/file-upload-ask-against-file.png) + +- Figure 11 AI Answers Based on Uploaded Background Knowledge + ![Answer Based on Custom Background Knowledge](./pictures/file-upload-showcase.png) + +## Manage Conversations + +> **Note** +> +> The conversation management area is on the left side of the page. + +### Create New Conversation + +Click the "New Conversation" button to create a new conversation, as shown in Figure 12. + +- Figure 12 "New Conversation" Button is Located in the Upper Left of the Page + ![New Conversation](./pictures/new-chat.png) + +### Search Conversation History + +Enter keywords in the history search input box on the left side of the page, then click ![Search](./pictures/icon-search.png) to search conversation history, as shown in Figure 13. + +- Figure 13 Conversation History Search Box + ![Conversation History Search](./pictures/search-history.png) + +### Manage Individual Conversation History Records + +The history record list is located below the history search bar. On the right side of each conversation history record, click ![Edit](./pictures/icon-edit.png) to edit the name of the conversation history record, as shown in Figure 14. + +- Figure 14 Click "Edit" Icon to Rename History Record + ![Rename History Record](./pictures/rename-session.png) + +After rewriting the conversation history record name, click ![Confirm](./pictures/icon-confirm.png) on the right to complete the renaming, or click ![Cancel](./pictures/icon-cancel.png) on the right to abandon this renaming, as shown in Figure 15. + +- Figure 15 Complete/Cancel Renaming History Record + ![Complete/Cancel Renaming History Record](./pictures/rename-session-confirmation.png) + +Additionally, click the delete icon on the right side of the conversation history record, as shown in Figure 16, to perform secondary confirmation for deleting a single conversation history record. In the secondary confirmation popup, as shown in Figure 17, click "Confirm" to confirm deletion of the single conversation history record, or click "Cancel" to cancel this deletion. + +- Figure 16 Click "Trash" Icon to Delete Single History Record + ![Delete Single History Record](./pictures/delete-session.png) + +- Figure 17 Secondary Confirmation Before Deleting History Record + ![Secondary Confirmation for Deleting Single History Record](./pictures/delete-session-confirmation.png) + +### Batch Delete Conversation History Records + +First, click "Batch Delete", as shown in Figure 18. + +- Figure 18 Batch Delete Function is Located Above the Right Side of the History Search Box + ![Batch Delete](./pictures/bulk-delete.png) + +Then you can select history records for deletion, as shown in Figure 19. Click "Select All" to select all history records, or click on a single history record or the selection box on the left side of the history record to select individual history records. + +- Figure 19 Check the Box on the Left to Select History Records for Batch Deletion + ![Batch Delete History Record Selection](./pictures/bulk-delete-multi-select.png) + +Finally, secondary confirmation is required for batch deleting history records, as shown in Figure 20. Click "Confirm" to delete, or click "Cancel" to cancel this deletion. + +- Figure 20 Secondary Confirmation Before Deleting Selected History Records + ![Batch Delete Secondary Confirmation](./pictures/bulk-delete-confirmation.png) + +## Feedback and Report + +In the conversation record area, on the bottom right side of the AI response, you can provide feedback on the conversation response, as shown in Figure 21. Click ![Thumbs Up](./pictures/icon-thumb-up.png) to give the conversation response a thumbs up; click ![Thumbs Down](./pictures/icon-thumb-down.png) to provide feedback on why you're dissatisfied with the response. + +- Figure 21 Thumbs Up and Dissatisfaction Feedback + ![Thumbs Up and Dissatisfaction Feedback](./pictures/feedback.png) + +For feedback on dissatisfaction reasons, as shown in Figure 22, after clicking ![Thumbs Down](./pictures/icon-thumb-down.png), the chatbot will display a dialog box for filling in feedback content, where you can choose relevant options for dissatisfaction reasons. + +- Figure 22 Dissatisfaction Feedback for Response + ![Dissatisfaction Feedback for Response](./pictures/feedback-illegal.png) + +Among them, clicking "Contains Incorrect Information" requires filling in reference answer links and descriptions, as shown in Figure 23. + +- Figure 23 Dissatisfaction Feedback for Response - Contains Incorrect Information + ![Dissatisfaction Feedback for Response - Contains Incorrect Information](./pictures/feedback-misinfo.png) + +### Report + +If you find that AI-generated content contains inappropriate information, you can click the report button in the bottom right corner, as shown in Figure 24. After clicking report, select the report type and submit. If there are no suitable options, please select "Other" and enter the reason, as shown in Figure 25. + +- Figure 24 Report Button is Located in the Bottom Right Corner of the Conversation Block + ![Report 1](./pictures/report.png) + +- Figure 25 After Clicking, You Can Select Report Type + ![Report 2](./pictures/report-options.png) + +## View Service Agreement and Privacy Policy + +Click on the text "Service Agreement" to view the service agreement, and click on the text "Privacy Policy" to view the privacy policy, as shown in Figures 26 and 27. + +- Figure 26 Service Agreement and Privacy Policy Entry Points are Located in the Bottom Information Bar of the Page + ![Service Agreement and Privacy Policy Entry](./pictures/privacy-policy-entry.png) + +- Figure 27 After Clicking, Service Agreement or Privacy Policy Popup Will Be Displayed + ![Service Agreement and Privacy Policy](./pictures/privacy-policy.png) + +## Appendix + +### User Information Export Instructions + +EulerCopilot backend has user information export functionality. If users need it, they must actively contact us through the email. Operations staff will send the exported user information back to users via email. diff --git a/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/registration_and_login.md b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/registration_and_login.md new file mode 100644 index 0000000000000000000000000000000000000000..4bf9fffa5f0df1bf238c4c5b5637fccd99ec2cfd --- /dev/null +++ b/docs/en/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/registration_and_login.md @@ -0,0 +1,60 @@ +# Logging into EulerCopilot + +This section introduces the specific steps to log into the deployed EulerCopilot web client. + +## Browser Requirements + +The browser requirements are shown in Table 1. + +- Table 1 Browser Requirements + +| Browser Type | Minimum Version | Recommended Version | +| ----- | ----- | ----- | +| Google Chrome | 72 | 121 or higher | +| Mozilla Firefox | 89 | 122 or higher | +| Apple Safari | 11.0 | 16.3 or higher | + +## Procedure + +**Step 1** Open the browser on your local PC, enter the configured domain name from the deployment guide in the address bar, and press `Enter`. When not logged in, accessing EulerCopilot will display a login prompt pop-up box, as shown in Figure 1. + +- Figure 1 Not Logged In + +![Not Logged In](./pictures/login-popup.png) + +**Step 2** Log into EulerCopilot (with an existing account). + +Open the login interface, as shown in Figure 2. + +- Figure 2 Logging into EulerCopilot + +![Logging into EulerCopilot](./pictures/authhub-login.png) + +## Registering an EulerCopilot Account + +**Step 1** Click "Register Now" in the bottom right corner of the login information input box, as shown in Figure 3. + +- Figure 3 Register Now + +![Register Now](./pictures/authhub-login-click2signup.png) + +**Step 2** Go to the account registration page and fill in the relevant information according to the page prompts, as shown in Figure 4. + +- Figure 4 Account Registration + +![Account Registration](./pictures/authhub-signup.png) + +**Step 3** After filling in the account information as required by the page, click "Register" to successfully register. After registration, you can return to the login page. + +## Logging Out + +**Step 1** Click ![Logout](./pictures/icon-user.png) to display the logout dropdown menu, as shown in Figure 5. + +> **Note** +> The account management area is located at the top right of the page, as shown in Figure 5. + +- Figure 5 Account Management Area + +![Account Management Area](./pictures/logout.png) + +**Step 2** Click "Logout" to log out, as shown in Figure 5. diff --git a/docs/en/openeuler_intelligence/intelligent_vulnerability_patching/_toc.yaml b/docs/en/openeuler_intelligence/intelligent_vulnerability_patching/_toc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2b8a86cf7894955fc8e5c8bba34b1deda2aacf24 --- /dev/null +++ b/docs/en/openeuler_intelligence/intelligent_vulnerability_patching/_toc.yaml @@ -0,0 +1,6 @@ +label: Intelligent Vulnerability Patching User Guide +isManual: true +description: Supports vulnerability patching for the kernel repository +sections: + - label: openEuler Intelligent Vulnerability Patching User Guide + href: ./intelligent_vulnerability_patching_user_guide.md diff --git a/docs/en/openeuler_intelligence/intelligent_vulnerability_patching/intelligent_vulnerability_patching_user_guide.md b/docs/en/openeuler_intelligence/intelligent_vulnerability_patching/intelligent_vulnerability_patching_user_guide.md new file mode 100644 index 0000000000000000000000000000000000000000..0ee977c12da96fb47aa83ea0fe2a990dfe4c136d --- /dev/null +++ b/docs/en/openeuler_intelligence/intelligent_vulnerability_patching/intelligent_vulnerability_patching_user_guide.md @@ -0,0 +1,59 @@ +# openEuler Intelligent Vulnerability Patching User Guide + +## Introduction + +Intelligent vulnerability patching provides the capability to intelligently patch the openEuler kernel repository ([https://gitee.com/openeuler/kernel](https://gitee.com/openeuler/kernel)). Currently, it offers CVE impact scope analysis and patch PR creation functionality. Use the `/analyze` and `/create_pr` commands in the comments below CVE-representing issues to execute these features. + +## Feature Entry + +In the src-openEuler kernel repository ([https://gitee.com/src-openeuler/kernel.git](https://gitee.com/src-openeuler/kernel.git)), comment below the CVE-representing issues. + +![CVE Screenshot](pictures/cve.png) + +## /analyze Command + +The `/analyze` command provides the capability to analyze the impact scope of CVEs. By commenting `/analyze` below an issue, it will automatically analyze the current maintenance scope of openEuler versions, determining whether each openEuler version introduces the CVE and whether it has been patched. + +![/analyze Command](pictures/analyze.png) + +> [!NOTE]Note +> The /analyze command takes no parameters + +CVE introduction status includes the following scenarios: + +* No impact +* Affected + +CVE patching status includes the following scenarios: + +* Not patched +* Patched + +At the end of the comment, links to the introduction patch and the fix patch will be attached. + +## /create_pr Command + +The `/create_pr` command provides the capability to intelligently fix CVE patches. By commenting `/create_pr ` below an issue, it will automatically obtain vulnerability patches and merge them into the openEuler linux repository ([https://gitee.com/openeuler/kernel.git](https://gitee.com/openeuler/kernel.git)) by creating PRs. + +![/create_pr Command](pictures/create_pr.png) + +The `/create_pr` command accepts parameters, including the following scenarios: + +```shell +# Create a patch PR for the OLK-5.10 branch +/create_pr OLK-5.10 + +# Create PRs for OLK-5.10 and OLK-6.6 branches +/create_pr OLK-5.10 OLK-6.6 + +# For all current upstream branches, including openEuler-1.0-LTS, OLK-5.10, and OLK-6.6 branches +/create_pr +``` + +Return results include: + +* PR creation successful +* No fix patch available +* Cannot fix, conflicts exist + +If there are conflicts between the patch code and the fix branch, it will prompt `Cannot fix, conflicts exist`. This capability will be iteratively enhanced in future versions. diff --git "a/docs/en/openEuler_intelligence/intelligent_vulnerability_patching/pictures/analyze\345\221\275\344\273\244.png" b/docs/en/openeuler_intelligence/intelligent_vulnerability_patching/pictures/analyze.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_vulnerability_patching/pictures/analyze\345\221\275\344\273\244.png" rename to docs/en/openeuler_intelligence/intelligent_vulnerability_patching/pictures/analyze.png diff --git "a/docs/en/openEuler_intelligence/intelligent_vulnerability_patching/pictures/create_pr\345\221\275\344\273\244.png" b/docs/en/openeuler_intelligence/intelligent_vulnerability_patching/pictures/create_pr.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_vulnerability_patching/pictures/create_pr\345\221\275\344\273\244.png" rename to docs/en/openeuler_intelligence/intelligent_vulnerability_patching/pictures/create_pr.png diff --git "a/docs/en/openEuler_intelligence/intelligent_vulnerability_patching/pictures/\344\273\243\350\241\250CVE\347\232\204issue.png" b/docs/en/openeuler_intelligence/intelligent_vulnerability_patching/pictures/cve.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_vulnerability_patching/pictures/\344\273\243\350\241\250CVE\347\232\204issue.png" rename to docs/en/openeuler_intelligence/intelligent_vulnerability_patching/pictures/cve.png diff --git a/docs/en/openeuler_intelligence/mcp_agent/_toc.yaml b/docs/en/openeuler_intelligence/mcp_agent/_toc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7af448ed9960b64b9cf3359549c785b3ca155d09 --- /dev/null +++ b/docs/en/openeuler_intelligence/mcp_agent/_toc.yaml @@ -0,0 +1,6 @@ +label: openEuler MCP Service Guide +isManual: true +description: openEuler Intelligent MCP +sections: + - label: openEuler MCP Service Guide + href: ./mcp_guide.md \ No newline at end of file diff --git a/docs/en/openeuler_intelligence/mcp_agent/mcp_guide.md b/docs/en/openeuler_intelligence/mcp_agent/mcp_guide.md new file mode 100644 index 0000000000000000000000000000000000000000..68b35325d03ddabed51005746b9008b6fb89a0fc --- /dev/null +++ b/docs/en/openeuler_intelligence/mcp_agent/mcp_guide.md @@ -0,0 +1,3 @@ +# MCP Service Guide + +(Current content is being updated, please wait.) diff --git a/docs/zh/ai_full_stack/ai_container_image_userguide/_toc.yaml b/docs/zh/ai_full_stack/ai_container_image_userguide/_toc.yaml index 8782a2915974178dadfa7b97efcb19df99069a72..6cec8dda55332335d1fe0c70bce89728f9cb1694 100644 --- a/docs/zh/ai_full_stack/ai_container_image_userguide/_toc.yaml +++ b/docs/zh/ai_full_stack/ai_container_image_userguide/_toc.yaml @@ -3,4 +3,4 @@ isManual: true description: openEuler AI 容器镜像封装了 AI 框架等软件,提高 AI 应用开发或使用效率 sections: - label: AI容器镜像用户指南 - href: ./ai-container-image-user-guide.md + href: ./ai_container_image_user_guide.md diff --git a/docs/zh/ai_full_stack/ai_container_image_userguide/ai-container-image-user-guide.md b/docs/zh/ai_full_stack/ai_container_image_userguide/ai_container_image_user_guide.md similarity index 74% rename from docs/zh/ai_full_stack/ai_container_image_userguide/ai-container-image-user-guide.md rename to docs/zh/ai_full_stack/ai_container_image_userguide/ai_container_image_user_guide.md index b433f2f4dcb8d609d014e8ecf3f77d37d5fe5773..c28f36bbe86b9156f3c35974a93c3746110fee57 100644 --- a/docs/zh/ai_full_stack/ai_container_image_userguide/ai-container-image-user-guide.md +++ b/docs/zh/ai_full_stack/ai_container_image_userguide/ai_container_image_user_guide.md @@ -8,11 +8,11 @@ openEuler AI 容器镜像封装了不同硬件算力的 SDK 以及 AI 框架、 目前,openEuler 已发布支持 Ascend 和 NVIDIA 平台的容器镜像,获取路径如下: -- [openeuler/cann](https://hub.docker.com/r/openeuler/cann) 存放 SDK 类镜像,在 openEuler 基础镜像之上安装 CANN 系列软件,适用于 Ascend 环境。 -- [openeuler/cuda](https://hub.docker.com/r/openeuler/cuda) 存放 SDK 类镜像,在 openEuler 基础镜像之上安装 CUDA 系列软件,适用于 NVIDIA 环境。 -- [openeuler/pytorch](https://hub.docker.com/r/openeuler/pytorch) 存放 AI 框架类镜像,在 SDK 镜像基础之上安装 PyTorch,根据安装的 SDK 软件内容区分适用平台。 -- [openeuler/tensorflow](https://hub.docker.com/r/openeuler/tensorflow) 存放 AI 框架类镜像,在 SDK 镜像基础之上安装 TensorFlow,根据安装的 SDK 软件内容区分适用平台。 -- [openeuler/llm](https://hub.docker.com/r/openeuler/tensorrt) 存放模型应用类镜像,在 AI 框架镜像之上包含特定大模型及工具链,根据安装的 SDK 软件内容区分适用平台。 +- `docker.io/openeuler/cann` 存放 SDK 类镜像,在 openEuler 基础镜像之上安装 CANN 系列软件,适用于 Ascend 环境。 +- `docker.io/openeuler/cuda` 存放 SDK 类镜像,在 openEuler 基础镜像之上安装 CUDA 系列软件,适用于 NVIDIA 环境。 +- `docker.io/openeuler/pytorch` 存放 AI 框架类镜像,在 SDK 镜像基础之上安装 PyTorch,根据安装的 SDK 软件内容区分适用平台。 +- `docker.io/openeuler/tensorflow` 存放 AI 框架类镜像,在 SDK 镜像基础之上安装 TensorFlow,根据安装的 SDK 软件内容区分适用平台。 +- `docker.io/openeuler/llm` 存放模型应用类镜像,在 AI 框架镜像之上包含特定大模型及工具链,根据安装的 SDK 软件内容区分适用平台。 详细的 AI 容器镜像分类和镜像 tag 的规范说明见[oEEP-0014](https://gitee.com/openeuler/TC/blob/master/oEEP/oEEP-0014%20openEuler%20AI容器镜像软件栈规范.md)。 @@ -26,7 +26,7 @@ docker pull image:tag ## 启动容器 -1. 在环境中安装`docker`,官方安装方法见[Install Docker Engine](https://docs.docker.com/engine/install/),也可直接通过如下命令进行安装。 +1. 在环境中安装`docker`,官方安装方法见 `https://docs.docker.com/engine/install/`,也可直接通过如下命令进行安装。 ```sh yum install -y docker diff --git a/docs/zh/ai_full_stack/ai_large_model_service_images_userguide/_toc.yaml b/docs/zh/ai_full_stack/ai_large_model_service_images_userguide/_toc.yaml index bed3b23bcec452195f1b6a830c6c7e3a017413b0..e4765568ccc2d1478573f457bf3fe636ff583949 100644 --- a/docs/zh/ai_full_stack/ai_large_model_service_images_userguide/_toc.yaml +++ b/docs/zh/ai_full_stack/ai_large_model_service_images_userguide/_toc.yaml @@ -3,4 +3,4 @@ isManual: true description: 支持百川、chatglm、星火等AI大模型的容器化封装 sections: - label: AI大模型服务镜像使用指南 - href: ./llm-service-image-user-guide.md + href: ./llm_service_image_user_guide.md diff --git a/docs/zh/ai_full_stack/ai_large_model_service_images_userguide/llm-service-image-user-guide.md b/docs/zh/ai_full_stack/ai_large_model_service_images_userguide/llm_service_image_user_guide.md similarity index 100% rename from docs/zh/ai_full_stack/ai_large_model_service_images_userguide/llm-service-image-user-guide.md rename to docs/zh/ai_full_stack/ai_large_model_service_images_userguide/llm_service_image_user_guide.md diff --git a/docs/zh/intelligent_foundation/sysHAX/deploy_guide/_toc.yaml b/docs/zh/intelligent_foundation/syshax/deploy_guide/_toc.yaml similarity index 75% rename from docs/zh/intelligent_foundation/sysHAX/deploy_guide/_toc.yaml rename to docs/zh/intelligent_foundation/syshax/deploy_guide/_toc.yaml index 6bf313f0c1c4a0b7b05b5f1c2814c8f083f6d4db..92a32e90ccf0368a8970930a40d85609c9631b03 100644 --- a/docs/zh/intelligent_foundation/sysHAX/deploy_guide/_toc.yaml +++ b/docs/zh/intelligent_foundation/syshax/deploy_guide/_toc.yaml @@ -3,4 +3,4 @@ isManual: true description: 异构协同加速运行 sections: - label: sysHAX部署指南 - href: ./sysHax-deployment-guide.md + href: ./syshax_deployment_guide.md diff --git a/docs/zh/intelligent_foundation/syshax/deploy_guide/pictures/syshax-deploy.png b/docs/zh/intelligent_foundation/syshax/deploy_guide/pictures/syshax-deploy.png new file mode 100644 index 0000000000000000000000000000000000000000..466c24fca4227dd00fc450cf9e405debe60811a5 Binary files /dev/null and b/docs/zh/intelligent_foundation/syshax/deploy_guide/pictures/syshax-deploy.png differ diff --git a/docs/zh/intelligent_foundation/sysHAX/deploy_guide/sysHax-deployment-guide.md b/docs/zh/intelligent_foundation/syshax/deploy_guide/syshax_deployment_guide.md similarity index 99% rename from docs/zh/intelligent_foundation/sysHAX/deploy_guide/sysHax-deployment-guide.md rename to docs/zh/intelligent_foundation/syshax/deploy_guide/syshax_deployment_guide.md index a8573f7b4368a2d3a0a2e85928507b1d6425e516..452ccd2f4885015bcbf12a0087d6409e3f4f3b8d 100644 --- a/docs/zh/intelligent_foundation/sysHAX/deploy_guide/sysHax-deployment-guide.md +++ b/docs/zh/intelligent_foundation/syshax/deploy_guide/syshax_deployment_guide.md @@ -12,7 +12,7 @@ sysHAX功能定位为K+X异构融合推理加速,主要包含两部分功能 sysHAX共包含两部分交付件: -![syshax-deploy](pictures/syshax-deploy.png "syshax-deploy") +![syshax-deploy](pictures/syshax-deploy.png) 交付件包括: - sysHAX:负责请求的处理和prefill、decode请求的调度 diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/CPU\351\200\273\350\276\221\346\240\270\345\277\203.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/CPU\351\200\273\350\276\221\346\240\270\345\277\203.png" deleted file mode 100644 index 74ae942b5a5217b8a5e34a2b2cd8d32a49be7a00..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/CPU\351\200\273\350\276\221\346\240\270\345\277\203.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/Copilot\345\244\247\346\250\241\345\236\213\351\203\250\347\275\262\345\267\256\345\274\202.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/Copilot\345\244\247\346\250\241\345\236\213\351\203\250\347\275\262\345\267\256\345\274\202.png" deleted file mode 100644 index 8f1de7892e04be698310691d2cfdeb07cbfa579d..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/Copilot\345\244\247\346\250\241\345\236\213\351\203\250\347\275\262\345\267\256\345\274\202.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2761.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2761.png" deleted file mode 100644 index e59e8b669c3039341655eadd75ce1fda5cda1776..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2761.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2762.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2762.png" deleted file mode 100644 index 68ae1c7cb11e663cabbf1225b188fdfd628bf549..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2762.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2763.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2763.png" deleted file mode 100644 index d90f6182fb6ec63f868a5c2598de73db093775f2..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2763.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\265\213\350\257\225\346\216\245\345\217\243\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\265\213\350\257\225\346\216\245\345\217\243\346\210\220\345\212\237.png" deleted file mode 100644 index 374c3a2cc0be67a012ef8bf0ddc7688f97702d79..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\265\213\350\257\225\346\216\245\345\217\243\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\350\275\273\351\207\217\345\214\226\351\203\250\347\275\262\350\247\206\345\233\276.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\350\275\273\351\207\217\345\214\226\351\203\250\347\275\262\350\247\206\345\233\276.png" deleted file mode 100644 index 297ad86cac9226084483816f0c88c9116071b675..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\350\275\273\351\207\217\345\214\226\351\203\250\347\275\262\350\247\206\345\233\276.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/WEB\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/WEB\347\225\214\351\235\242.png" deleted file mode 100644 index bb9be4e33ce470865fe5a07decbc056b9ee4e9bb..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/WEB\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/WEB\347\231\273\345\275\225\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/WEB\347\231\273\345\275\225\347\225\214\351\235\242.png" deleted file mode 100644 index fddbab4df70b940d5d5ed26fb8ec688f1592b5e8..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/WEB\347\231\273\345\275\225\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/authhub\347\231\273\345\275\225\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/authhub\347\231\273\345\275\225\347\225\214\351\235\242.png" deleted file mode 100644 index 341828b1b6f728888d1dd52eec755033680155da..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/authhub\347\231\273\345\275\225\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/witchaind\347\231\273\345\275\225\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/witchaind\347\231\273\345\275\225\347\225\214\351\235\242.png" deleted file mode 100644 index dfc28f4046fd4d61f48a0b0903ae2cf565ec5bc3..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/witchaind\347\231\273\345\275\225\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\233\236\345\210\260\351\246\226\351\241\265.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\233\236\345\210\260\351\246\226\351\241\265.png" deleted file mode 100644 index 92685c5d977abe55f5d201aa57da479c8af84561..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\233\236\345\210\260\351\246\226\351\241\265.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\257\274\345\205\245\346\226\207\346\241\243\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\257\274\345\205\245\346\226\207\346\241\243\347\225\214\351\235\242.png" deleted file mode 100644 index c4b71d6def0b6407f721cf3c137d714d923f86f1..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\257\274\345\205\245\346\226\207\346\241\243\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\346\210\220\345\212\237.png" deleted file mode 100644 index 3458c5330fad7b8c89cb0bc8efb70f875d6f17d2..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\350\265\204\344\272\247\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\350\265\204\344\272\247\347\225\214\351\235\242.png" deleted file mode 100644 index 469871fa9483a698b03374c3686b22156ad6e33a..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\350\265\204\344\272\247\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\346\210\220\345\212\237\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\346\210\220\345\212\237\347\225\214\351\235\242.png" deleted file mode 100644 index 8aba84e49c981c8f81cb91b14eee64f179bf0b38..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\346\210\220\345\212\237\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\347\225\214\351\235\242.png" deleted file mode 100644 index 7932773ccf59f58a283caccb92bd5af9475a7be9..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\346\255\243\345\234\250\345\257\274\345\205\245\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\346\255\243\345\234\250\345\257\274\345\205\245\347\225\214\351\235\242.png" deleted file mode 100644 index 50805afdb4764b74d9d16067999d7b39ce901d2a..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\346\255\243\345\234\250\345\257\274\345\205\245\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\345\241\253\345\206\231\345\261\225\347\244\272\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\345\241\253\345\206\231\345\261\225\347\244\272\347\225\214\351\235\242.png" deleted file mode 100644 index 8eb29b167f6ff1c2d951cd841f2340b027dec808..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\345\241\253\345\206\231\345\261\225\347\244\272\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" deleted file mode 100644 index 9da6121b1c1271c5b09c9292690ba3ab8d0a6cd2..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\237\245\347\234\213\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\237\245\347\234\213\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" deleted file mode 100644 index a533772ce715bbf2c4a9f374b03e7fe20bf470a1..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\237\245\347\234\213\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\255\243\345\234\250\345\257\274\345\207\272\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\255\243\345\234\250\345\257\274\345\207\272\347\225\214\351\235\242.png" deleted file mode 100644 index 659ebeae5b25738043f7750c7cc44a1e80557ed8..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\346\255\243\345\234\250\345\257\274\345\207\272\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\347\202\271\345\207\273\351\200\200\345\207\272\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\347\202\271\345\207\273\351\200\200\345\207\272\347\225\214\351\235\242.png" deleted file mode 100644 index 22b02fff81fe1db3232b80607da6f10f710c8c64..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\347\202\271\345\207\273\351\200\200\345\207\272\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\344\270\255\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\344\270\255\347\225\214\351\235\242.png" deleted file mode 100644 index 913a5ce34a0a3e95af29e7c4433e5367c0adf008..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\344\270\255\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\210\220\345\212\237\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\210\220\345\212\237\347\225\214\351\235\242.png" deleted file mode 100644 index a1c6dc638d0dbd51abc374d563da150ff328cbe3..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\210\220\345\212\237\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\226\271\346\263\225\351\200\211\346\213\251\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\226\271\346\263\225\351\200\211\346\213\251\347\225\214\351\235\242.png" deleted file mode 100644 index f0449b134e1ebe5d54ca46099b57c6ad0b949eca..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\226\271\346\263\225\351\200\211\346\213\251\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\347\273\223\346\236\234\350\277\207\346\273\244\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\347\273\223\346\236\234\350\277\207\346\273\244\347\225\214\351\235\242.png" deleted file mode 100644 index e3d3ba7727d53490b22ecc7a1b422d5ae03390d3..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\347\273\223\346\236\234\350\277\207\346\273\244\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\350\257\246\346\203\205\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\350\257\246\346\203\205\347\225\214\351\235\242.png" deleted file mode 100644 index e018cb0904b414d63e1008209adb47c0b8afb858..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\350\257\246\346\203\205\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\345\256\214\346\210\220\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\345\256\214\346\210\220\347\225\214\351\235\242.png" deleted file mode 100644 index 7bf98b8217dda2358621fe9b11164407e2040ae8..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\345\256\214\346\210\220\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\277\233\345\205\245\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\277\233\345\205\245\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" deleted file mode 100644 index 795e99cdad03b2a3377fe77e51e336c6a6ca5b29..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\350\277\233\345\205\245\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\351\200\211\346\213\251\346\226\207\344\273\266.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\351\200\211\346\213\251\346\226\207\344\273\266.png" deleted file mode 100644 index 8031fec14e15b0e80e596f21cf79fe2b58ff7293..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witChainD/\351\200\211\346\213\251\346\226\207\344\273\266.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\346\210\220\345\212\237\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\346\210\220\345\212\237\347\225\214\351\235\242.png" deleted file mode 100644 index a871907f348317e43633cf05f5241cb978476fb4..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\346\210\220\345\212\237\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\347\225\214\351\235\242.png" deleted file mode 100644 index d82c736a94b106a30fd8d1f7b781f9e335bb441f..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/k8s\351\233\206\347\276\244\344\270\255postgres\346\234\215\345\212\241\347\232\204\345\220\215\347\247\260.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/k8s\351\233\206\347\276\244\344\270\255postgres\346\234\215\345\212\241\347\232\204\345\220\215\347\247\260.png" deleted file mode 100644 index 473a0006c9710c92375e226a760c3a79989312f9..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/k8s\351\233\206\347\276\244\344\270\255postgres\346\234\215\345\212\241\347\232\204\345\220\215\347\247\260.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/postgres\346\234\215\345\212\241\347\253\257\345\217\243.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/postgres\346\234\215\345\212\241\347\253\257\345\217\243.png" deleted file mode 100644 index cfee6d88da56bc939886caece540f7de8cf77bbc..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/postgres\346\234\215\345\212\241\347\253\257\345\217\243.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag_port.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag_port.png" deleted file mode 100644 index b1d93f9c9d7587aa88a27d7e0bf185586583d438..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag_port.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" deleted file mode 100644 index fec3cdaa2b260e50f5523477da3e58a9e14e2130..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\347\224\261\344\272\216\347\273\237\344\270\200\350\265\204\344\272\247\344\270\213\345\255\230\345\234\250\345\220\214\345\220\215\350\265\204\344\272\247\345\272\223.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\347\224\261\344\272\216\347\273\237\344\270\200\350\265\204\344\272\247\344\270\213\345\255\230\345\234\250\345\220\214\345\220\215\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index 624459821de4542b635eeffa115eeba780929a4e..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\347\224\261\344\272\216\347\273\237\344\270\200\350\265\204\344\272\247\344\270\213\345\255\230\345\234\250\345\220\214\345\220\215\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\346\210\220\345\212\237.png" deleted file mode 100644 index 3104717bfa8f6615ad6726577a24938bc29884b2..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\244\261\350\264\245.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\244\261\350\264\245.png" deleted file mode 100644 index 454b9fdfa4b7f209dc370f78677a2f4e71ea49be..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\244\261\350\264\245.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\257\255\346\226\231.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\257\255\346\226\231.png" deleted file mode 100644 index d52d25d4778f6db2d2ec076d65018c40cd1da4d3..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\257\255\346\226\231.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\357\274\214\350\265\204\344\272\247\344\270\213\344\270\215\345\255\230\345\234\250\345\257\271\345\272\224\350\265\204\344\272\247\345\272\223.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\357\274\214\350\265\204\344\272\247\344\270\213\344\270\215\345\255\230\345\234\250\345\257\271\345\272\224\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index 82ed79c0154bd8e406621440c4e4a7caaab7e06e..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\357\274\214\350\265\204\344\272\247\344\270\213\344\270\215\345\255\230\345\234\250\345\257\271\345\272\224\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\346\210\220\345\212\237.png" deleted file mode 100644 index 7dd2dea945f39ada1d7dd053d150a995b160f203..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\273\272\347\253\213\350\265\204\344\272\247\345\272\223.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\273\272\347\253\213\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index 84737b4185ce781d7b32ab42d39b8d2452138dad..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\273\272\347\253\213\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\214\207\345\256\232\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\214\207\345\256\232\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245.png" deleted file mode 100644 index be89bdfde2518bba3941eee5d475f52ad9124343..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\214\207\345\256\232\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\345\210\235\345\247\213\345\214\226.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\345\210\235\345\247\213\345\214\226.png" deleted file mode 100644 index 27530840aaa5382a226e1ed8baea883895d9d75e..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\345\210\235\345\247\213\345\214\226.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" deleted file mode 100644 index aa04e6f7f0648adfca1240c750ca5b79b88da5f9..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\227\240\350\265\204\344\272\247\346\227\266\346\237\245\350\257\242\350\265\204\344\272\247.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\227\240\350\265\204\344\272\247\346\227\266\346\237\245\350\257\242\350\265\204\344\272\247.png" deleted file mode 100644 index 74905172c0c0a0acc4c4d0e35efd2493dc421c4e..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\227\240\350\265\204\344\272\247\346\227\266\346\237\245\350\257\242\350\265\204\344\272\247.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\347\234\213\346\226\207\346\241\243\344\272\247\347\224\237\347\211\207\346\256\265\346\200\273\346\225\260\345\222\214\344\270\212\344\274\240\346\210\220\345\212\237\346\200\273\346\225\260.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\347\234\213\346\226\207\346\241\243\344\272\247\347\224\237\347\211\207\346\256\265\346\200\273\346\225\260\345\222\214\344\270\212\344\274\240\346\210\220\345\212\237\346\200\273\346\225\260.png" deleted file mode 100644 index 432fbfcd02f6d2220e7d2a8512aee893d67be24d..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\347\234\213\346\226\207\346\241\243\344\272\247\347\224\237\347\211\207\346\256\265\346\200\273\346\225\260\345\222\214\344\270\212\344\274\240\346\210\220\345\212\237\346\200\273\346\225\260.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\345\205\250\351\203\250\350\257\255\346\226\231.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\345\205\250\351\203\250\350\257\255\346\226\231.png" deleted file mode 100644 index a4f4ea8a3999a9ab659ccd9ea39b80b21ff46e84..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\345\205\250\351\203\250\350\257\255\346\226\231.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\350\265\204\344\272\247.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\350\265\204\344\272\247.png" deleted file mode 100644 index 675b40297363664007f96948fb21b1cb90d6beea..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\350\265\204\344\272\247.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\216\267\345\217\226\346\225\260\346\215\256\345\272\223pod\345\220\215\347\247\260.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\216\267\345\217\226\346\225\260\346\215\256\345\272\223pod\345\220\215\347\247\260.png" deleted file mode 100644 index 8fc0c988e8b3830c550c6be6e42b88ac13448d1a..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\216\267\345\217\226\346\225\260\346\215\256\345\272\223pod\345\220\215\347\247\260.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\344\270\212\344\274\240\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\344\270\212\344\274\240\346\210\220\345\212\237.png" deleted file mode 100644 index 5c897e9883e868bf5160d92cb106ea4e4e9bc356..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\344\270\212\344\274\240\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\346\234\252\346\237\245\350\257\242\345\210\260\347\233\270\345\205\263\350\257\255\346\226\231.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\346\234\252\346\237\245\350\257\242\345\210\260\347\233\270\345\205\263\350\257\255\346\226\231.png" deleted file mode 100644 index 407e49b929b7ff4cf14703046a4ba0bfe1bb441e..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\346\234\252\346\237\245\350\257\242\345\210\260\347\233\270\345\205\263\350\257\255\346\226\231.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\346\237\245\350\257\242\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\346\237\245\350\257\242\346\210\220\345\212\237.png" deleted file mode 100644 index a4f4ea8a3999a9ab659ccd9ea39b80b21ff46e84..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\346\237\245\350\257\242\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\234\252\346\237\245\350\257\242\345\210\260\350\265\204\344\272\247\345\272\223.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\234\252\346\237\245\350\257\242\345\210\260\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index 45ab521ec5f5afbd81ad54f023aae3b7a867dbf2..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\234\252\346\237\245\350\257\242\345\210\260\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\237\245\350\257\242\350\265\204\344\272\247\345\272\223\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\237\245\350\257\242\350\265\204\344\272\247\345\272\223\346\210\220\345\212\237.png" deleted file mode 100644 index 90ed5624ae93ff9784a750514c53293df4e961f0..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\237\245\350\257\242\350\265\204\344\272\247\345\272\223\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\346\210\220\345\212\237.png" deleted file mode 100644 index 7b2cc38a931c9c236517c14c86fa93e3eb2b6dcd..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" deleted file mode 100644 index 1365a8d69467dec250d3451ac63e2615a2194c18..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\346\210\220\345\212\237png.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\346\210\220\345\212\237png.png" deleted file mode 100644 index 1bd944264baa9369e6f8fbfd04cabcd12730c0e9..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\346\210\220\345\212\237png.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\346\237\245\350\257\242\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\346\237\245\350\257\242\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" deleted file mode 100644 index 58bcd320e145dd29d9e5d49cb6d86964ebb83b51..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\346\237\245\350\257\242\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\344\270\255\351\227\264\345\261\202.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\344\270\255\351\227\264\345\261\202.png" deleted file mode 100644 index 809b785b999b6663d9e9bd41fed953925093d6bd..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\344\270\255\351\227\264\345\261\202.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\346\272\220\347\233\256\345\275\225.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\346\272\220\347\233\256\345\275\225.png" deleted file mode 100644 index 62ba5f6615f18deb3d5a71fd68ee8c929638d814..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\346\272\220\347\233\256\345\275\225.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\347\233\256\346\240\207\347\233\256\345\275\225.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\347\233\256\346\240\207\347\233\256\345\275\225.png" deleted file mode 100644 index d32c672fafcb0ef665bda0bcfdce19d2df44db01..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\347\233\256\346\240\207\347\233\256\345\275\225.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\207\215\345\244\215\345\210\233\345\273\272\350\265\204\344\272\247\345\244\261\350\264\245.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\207\215\345\244\215\345\210\233\345\273\272\350\265\204\344\272\247\345\244\261\350\264\245.png" deleted file mode 100644 index a5ecd6b65abc97320e7467f00d82ff1fd9bf0e44..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\207\215\345\244\215\345\210\233\345\273\272\350\265\204\344\272\247\345\244\261\350\264\245.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\351\200\211\346\213\251\350\265\204\344\272\247\345\272\223.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\351\200\211\346\213\251\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index bab7e2b93757c71659836cbcd9d842905f74b4f4..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\351\200\211\346\213\251\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\351\203\250\347\275\262\350\247\206\345\233\276.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\351\203\250\347\275\262\350\247\206\345\233\276.png" deleted file mode 100644 index 181bf1d2ddbe15cfd296c27df27d865bdbce8d69..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/\351\203\250\347\275\262\350\247\206\345\233\276.png" and /dev/null differ diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/witChainD_guidance.md b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/witChainD_guidance.md deleted file mode 100644 index 4759a57baa4e35ee529e9f4da70e1d1405612e6e..0000000000000000000000000000000000000000 --- a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/witChainD_guidance.md +++ /dev/null @@ -1,87 +0,0 @@ -# witChainD 使用指南——知识库管理 - -完成 witChainD 部署之后,即可使用 witChainD 进行知识库管理管理。 - -下文会从页面的维度进行 witChainD 的功能介绍。 - -## 1. 资产库管理界面 - -该页面为资产库管理界面,用户登录后将会进入该界面。 - -![资产库管理界面](./pictures/资产库管理界面.png) - -**支持操作:** - -- 配置模型:点击右上角的设置按键,可以修改模型相关的配置。 - - ![模型配置](./pictures/模型配置.png) - -- 新增资产库:点击新建资产库按钮新建,支持自定义名称、描述、语言、嵌入模型、解析方法、文件分块大小、文档类别。注意:重复名称会自动将名称修改成资产库id。 - - ![新增资产库](./pictures/新增资产库.png) - -- 编辑资产库:点击资产库的编辑按钮编辑,支持修改名称、描述、语言、解析方法、文件分块大小、文档类别。注意:不能修改成重复名称。 - - ![编辑资产库](./pictures/编辑资产库0.png) - - ![编辑资产库](./pictures/编辑资产库.png) - -- 导出资产库:点击资产库的导出按钮导出,导出完成后需要按任务列表中的下载任务下载对应资产库到本地。 - - ![导出资产库](./pictures/导出资产库.png) - -- 批量导入资产库:点击批量导入,上传本地文件后选中即可导入。 - - ![批量导入资产库](./pictures/批量导入资产库.png) - -- 搜索资产库:在搜索栏中键入文本,可以搜索得到名称包含对应文本的资产库。 - -## 2. 文档管理界面 - -在资产管理界面点击对应资产库,可以进入文档管理界面。 - -![文档管理界面](./pictures/文档管理界面.png) - -**支持操作:** - -- 导入文档:点击导入文档,从本地上传文件导入,导入后会自动以该资产库默认配置开始解析。 - - ![导入文档](./pictures/导入文档.png) - -- 解析文档:点击操作中的解析,对文档进行解析。也可以选中多个文档批量解析。 - - ![文档解析](./pictures/文档解析.png) - - ![文档解析2](./pictures/文档解析2.png) - - ![解析完成](./pictures/解析完成.png) - -- 编辑文档配置:点击编辑对文档配置进行编辑,支持编辑文档名称、解析方法、类别、文件分块大小。 - - ![编辑文档配置](./pictures/编辑文档配置.png) - -- 下载文档:点击下载即可将文档下载至本地,也可以选中多个文档批量下载。 - -- 删除文档:点击删除即可将文档从资产库中删除,也可以选中多个文档批量删除。 - -- 搜索文档:点击文档名称旁的搜索键,在弹出的搜索框中键入搜索的文本,可以搜索得到名称包含这些文本的文档。 - - ![搜索文档](./pictures/搜索文档.png) - -- 编辑资产库配置:支持编辑资产库名称、描述、语言、默认解析方法、文件分块大小、文档信息类别。 - - ![编辑资产库配置](./pictures/编辑资产库配置.png) - -## 3. 解析结果管理界面 - -点击解析完成的文档,可以进入文档的解析结果管理界面。界面中会按照顺序显示文档解析后的文本块内容预览,每个文本块会附带一个标签,表示该文本块中的信息来源于文档中的段落、列表或者是图片。右侧的开关表示该文本块是否被启用。 - -![文本块结果预览](./pictures/文本块结果预览.png) - -**支持操作**: - -- 关闭/启用文本块:点击文本块右侧的开关即可关闭/启用对应文本块,也可以选中多个文本块批量关闭/启用。 - - ![批量启用](./pictures/批量启用.png) - -- 搜索文本块:在搜索框中键入内容,可以查找包含对应内容的文本块。 diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/diagnosis_guidance.md b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/diagnosis_guidance.md deleted file mode 100644 index eb428ada7017effc1e0d58c18c71eae7ffe88050..0000000000000000000000000000000000000000 --- a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/diagnosis_guidance.md +++ /dev/null @@ -1,50 +0,0 @@ -# 智能插件:智能诊断 - -部署智能诊断工具后,可以通过 EulerCopilot 智能体框架实现对本机进行诊断。 -在智能诊断模式提问,智能体框架服务可以调用本机的诊断工具诊断异常状况、分析并生成报告。 - -## 操作步骤 - -**步骤1** 切换到“智能插件”模式 - -```bash -copilot -p -``` - -![切换到智能插件模式](./pictures/shell-plugin-diagnose-switch-mode.png) - -**步骤2** 异常事件检测 - -```bash -帮我进行异常事件检测 -``` - -按下 `Ctrl + O` 键提问,然后在插件列表中选择“智能诊断”。 - -![异常事件检测](./pictures/shell-plugin-diagnose-detect.png) - -**步骤3** 查看异常事件详情 - -```bash -查看 XXX 容器的异常事件详情 -``` - -![查看异常事件详情](./pictures/shell-plugin-diagnose-detail.png) - -**步骤4** 执行异常事件分析 - -```bash -请对 XXX 容器的 XXX 指标执行 profiling 分析 -``` - -![异常事件分析](./pictures/shell-plugin-diagnose-profiling.png) - -**步骤5** 查看异常事件分析报告 - -等待 5 至 10 分钟,然后查看分析报告。 - -```bash -查看 对应的 profiling 报告 -``` - -![执行优化脚本](./pictures/shell-plugin-diagnose-report.png) diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/tune_guidance.md b/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/tune_guidance.md deleted file mode 100644 index a9e915e1d80a36e8483d84729d319be5c8e873ec..0000000000000000000000000000000000000000 --- a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/tune_guidance.md +++ /dev/null @@ -1,53 +0,0 @@ -# 智能插件:智能调优 - -部署智能调优工具后,可以通过 EulerCopilot 智能体框架实现对本机进行调优。 -在智能调优模式提问,智能体框架服务可以调用本机的调优工具采集性能指标,并生成性能分析报告和性能优化建议。 - -## 操作步骤 - -**步骤1** 切换到“智能调优”模式 - -```bash -copilot -t -``` - -![切换到智能调优模式](./pictures/shell-plugin-tuning-switch-mode.png) - -**步骤2** 采集性能指标 - -```bash -帮我进行性能指标采集 -``` - -![性能指标采集](./pictures/shell-plugin-tuning-metrics-collect.png) - -**步骤3** 生成性能分析报告 - -```bash -帮我生成性能分析报告 -``` - -![性能分析报告](./pictures/shell-plugin-tuning-report.png) - -**步骤4** 生成性能优化建议 - -```bash -请生成性能优化脚本 -``` - -![性能优化脚本](./pictures/shell-plugin-tuning-script-gen.png) - -**步骤5** 选择“执行命令”,运行优化脚本 - -![执行优化脚本](./pictures/shell-plugin-tuning-script-exec.png) - -- 脚本内容如图: - ![优化脚本内容](./pictures/shell-plugin-tuning-script-view.png) - -## 远程调优 - -如果需要对其他机器进行远程调优,请在上文示例的问题前面加上对应机器的 IP 地址。 - -例如:`请对 192.168.1.100 这台机器进行性能指标采集。` - -进行远程调优前请确保目标机器已部署智能调优工具,同时请确保 EulerCopilot 智能体框架能够访问目标机器。 diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/API_key.md b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/API_key.md deleted file mode 100644 index 5fc8699eb097e6a7dcb17409519343c46b9801ca..0000000000000000000000000000000000000000 --- a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/API_key.md +++ /dev/null @@ -1,29 +0,0 @@ -# 获取 API Key - -## 前言 - -EulerCopilot 命令行助手使用 API Key 来验证用户身份,并获取 API 访问权限。 -因此,开始使用前,您需要先获取 API Key。 - -## 注意事项 - -- 请妥善保管您的 API Key,不要泄露给他人。 -- API Key 仅用于命令行助手与 DevStation 桌面端,不用于其他用途。 -- 每位用户仅可拥有一个 API Key,重复创建 API Key 将导致旧密钥失效。 -- API Key 仅在创建时显示一次,请务必及时保存。若密钥丢失,您需要重新创建。 -- 若您在使用过程中遇到“请求过于频繁”的错误,您的 API Key 可能已被他人使用,请及时前往官网刷新或撤销 API Key。 - -## 获取方法 - -1. 登录 EulerCopilot 网页端。 -2. 点击右上角头像,选择“API KEY”。 -3. 点击“新建”按钮。 -4. **请立即保存 API Key,它仅在创建时显示一次,请勿泄露给他人。** - -## 管理 API Key - -1. 登录 EulerCopilot 网页端。 -2. 点击右上角头像,选择“API KEY”。 -3. 点击“刷新”按钮,刷新 API Key;点击“撤销”按钮,撤销 API Key。 - -- 刷新 API Key 后,旧密钥失效,请立即保存新生成的 API Key。 diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/shell.md b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/shell.md deleted file mode 100644 index bb88c52f55a984d8115227454e67ed1970423583..0000000000000000000000000000000000000000 --- a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/shell.md +++ /dev/null @@ -1,157 +0,0 @@ -# 命令行助手使用指南 - -## 简介 - -EulerCopilot 命令行助手是一个命令行(Shell)AI 助手,您可以通过它来快速生成 Shell 命令并执行,从而提高您的工作效率。除此之外,基于 Gitee AI 在线服务的标准版本还内置了 openEuler 的相关知识,可以助力您学习与使用 openEuler 操作系统。 - -## 环境要求 - -- 操作系统:openEuler 22.03 LTS SP3,或者 openEuler 24.03 LTS 及以上版本 -- 命令行软件: - - Linux 桌面环境:支持 GNOME、KDE、DDE 等桌面环境的内置终端 - - 远程 SSH 链接:支持兼容 xterm-256 与 UTF-8 字符集的终端 - -## 安装 - -EulerCopilot 命令行助手支持通过 OEPKGS 仓库进行安装。 - -### 配置 OEPKGS 仓库 - -```bash -sudo dnf config-manager --add-repo https://repo.oepkgs.net/openeuler/rpm/`sed 's/release //;s/[()]//g;s/ /-/g' /etc/openEuler-release`/extras/`uname -m` -``` - -```bash -sudo dnf clean all -``` - -```bash -sudo dnf makecache -``` - -### 安装命令行助手 - -```bash -sudo dnf install eulercopilot-cli -``` - -若遇到 `Error: GPG check FAILED` 错误,使用 `--nogpgcheck` 跳过检查。 - -```bash -sudo dnf install --nogpgcheck eulercopilot-cli -``` - -## 初始化 - -```bash -copilot --init -``` - -然后根据提示输入 API Key 完成配置。 - -![shell-init](./pictures/shell-init.png) - -初次使用前请先退出终端或重新连接 SSH 会话使配置生效。 - -- **查看助手帮助页面** - - ```bash - copilot --help - ``` - - ![shell-help](./pictures/shell-help.png) - -## 使用 - -在终端中输入问题,按下 `Ctrl + O` 提问。 - -### 快捷键 - -- 输入自然语言问题后,按下 `Ctrl + O` 可以直接向 AI 提问。 -- 直接按下 `Ctrl + O` 可以自动填充命令前缀 `copilot`,输入参数后按下 `Enter` 即可执行。 - -### 智能问答 - -命令行助手初始化完成后,默认处于智能问答模式。 -命令提示符**左上角**会显示当前模式。 -若当前模式不是“智能问答”,执行 `copilot -c` (`copilot --chat`) 切换到智能问答模式。 - -![chat-ask](./pictures/shell-chat-ask.png) - -AI 回答完毕后,会根据历史问答生成推荐问题,您可以复制、粘贴到命令行中进行追问。输入追问的问题后,按下 `Enter` 提问。 - -![chat-next](./pictures/shell-chat-continue.png) - -![chat-next-result](./pictures/shell-chat-continue-result.png) - -智能问答模式下支持连续追问,每次追问最多可以关联3条历史问答的上下文。 - -输入 `exit` 可以退出智能问答模式,回到 Linux 命令行。 - -![chat-exit](./pictures/shell-chat-exit.png) - -- 若问答过程中遇到程序错误,可以按下 `Ctrl + C` 立即退出当前问答,再尝试重新提问。 - -### Shell 命令 - -AI 会根据您的问题返回 Shell 命令,EulerCopilot 命令行助手可以解释、编辑或执行这些命令,并显示命令执行结果。 - -![shell-cmd](./pictures/shell-cmd.png) - -命令行助手会自动提取 AI 回答中的命令,并显示相关操作。您可以通过键盘上下键选择操作,按下 `Enter` 确认。 - -![shell-cmd-interact](./pictures/shell-cmd-interact.png) - -#### 解释 - -如果 AI 仅返回了一条命令,选择解释后会直接请求 AI 解释命令,并显示回答。 -若 AI 回答了多条命令,选择后会显示命令列表,您每次可以选择**一条**请求 AI 解释。 - -![shell-cmd-explain-select](./pictures/shell-cmd-explain-select.png) - -完成解释后,您可以继续选择其他操作。 - -![shell-cmd-explain-result](./pictures/shell-cmd-explain-result.png) - -#### 编辑 - -![shell-cmd-edit](./pictures/shell-cmd-edit.png) - -选择一条命令进行编辑,编辑完成后按下 `Enter` 确认。 - -![shell-cmd-edit-result](./pictures/shell-cmd-edit-result.png) - -完成编辑后,您可以继续编辑其他命令或选择其他操作。 - -#### 执行 - -如果 AI 仅返回了一条命令,选择执行后会直接执行命令,并显示执行结果。 -若 AI 回答了多条命令,选择后会显示命令列表,您每次可以选择**多条**命令来执行。 - -您可以通过键盘上下键移动光标,按下 `空格键` 选择命令,按下 `Enter` 执行所选命令。 -被选中的命令会显示**蓝色高亮**,如图所示。 - -![shell-cmd-exec-multi-select](./pictures/shell-cmd-exec-multi-select.png) - -若不选择任何命令,直接按下 `Enter`,则会跳过执行命令,直接进入下一轮问答。 - -按下 `Enter` 后,被选中的命令会从上到下依次执行。 - -![shell-cmd-exec-result](./pictures/shell-cmd-exec-result.png) - -若执行过程中遇到错误,命令行助手会显示错误信息,并**终止执行命令**,进入下一轮问答。 -您可以在下一轮问答中提示 AI 更正命令,或要求 AI 重新生成命令。 - -## 卸载 - -```bash -sudo dnf remove eulercopilot-cli -``` - -然后使用以下命令删除配置文件。 - -```bash -rm ~/.config/eulercopilot/config.json -``` - -卸载完成后请重启终端或重新连接 SSH 会话使配置还原。 diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/offline.md b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/offline.md deleted file mode 100644 index 8167907479569db279a838dc9dcf3e17597abbfd..0000000000000000000000000000000000000000 --- a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/offline.md +++ /dev/null @@ -1,620 +0,0 @@ -# **EulerCopilot 智能助手部署指南** - -版本信息 -当前版本:v0.9.5 -更新日期:2025年4月1日 - -## 产品概述 - -EulerCopilot 是一款智能问答工具,使用 EulerCopilot 可以解决操作系统知识获取的便捷性,并且为OS领域模型赋能开发者及运维人员。作为获取操作系统知识,使能操作系统生产力工具 (如 A-Ops / A-Tune / x2openEuler / EulerMaker / EulerDevOps / StratoVirt / iSulad 等),颠覆传统命令交付方式,由传统命令交付方式向自然语义进化,并结合智能体任务规划能力,降低开发、使用操作系统特性的门槛。 - -本指南提供基于自动化脚本的EulerCopilot智能助手系统部署说明,支持一键自动部署和手动分步部署两种方式。 - -### 组件介绍 - -| 组件 | 端口 | 说明 | -| ----------------------------- | --------------- | -------------------- | -| euler-copilot-framework | 8002 (内部端口) | 智能体框架服务 | -| euler-copilot-web | 8080 | 智能体前端界面 | -| euler-copilot-rag | 9988 (内部端口) | 检索增强服务 | -| authhub-backend-service | 11120 (内部端口) | 鉴权服务后端 | -| authhub-web-service | 8000 | 鉴权服务前端 | -| mysql | 3306 (内部端口) | MySQL数据库 | -| redis | 6379 (内部端口) | Redis数据库 | -| minio | 9000 (内部端口) 9001(外部部端口) | minio数据库 | -| mongo | 27017 (内部端口) | mongo数据库 | -| postgres | 5432 (内部端口) | 向量数据库 | -| secret_inject | 无 | 配置文件安全复制工具 | - -### 软件要求 - -| 类型 | 版本要求 | 说明 | -|----------------| -------------------------------------|--------------------------------------| -| 操作系统 | openEuler 22.03 LTS 及以上版本 | 无 | -| K3s | >= v1.30.2,带有 Traefik Ingress 工具 | K3s 提供轻量级的 Kubernetes 集群,易于部署和管理 | -| Helm | >= v3.15.3 | Helm 是一个 Kubernetes 的包管理工具,其目的是快速安装、升级、卸载 EulerCopilot 服务 | -| python | >=3.9.9 | python3.9.9 以上版本为模型的下载和安装提供运行环境 | - ---- - -### 硬件规格 - -| 硬件资源 | 最小配置 | 推荐配置 | -|--------------|----------------------------|------------------------------| -| CPU | 4 核心 | 16 核心及以上 | -| RAM | 4 GB | 64 GB | -| 存储 | 32 GB | 64G | -| 大模型名称 | deepseek-llm-7b-chat | DeepSeek-R1-Llama-8B | -| 显存 (GPU) | NVIDIA RTX A4000 8GB | NVIDIA A100 80GB * 2 | - -**关键说明**: - -- 纯CPU环境,建议通过调用 OpenAI 接口或使用自带的模型部署方式来实现功能。 -- 如果k8s集群环境,则不需要单独安装k3s,要求version >= 1.28 - ---- - -### 部署视图 - -![部署图](./pictures/部署视图.png) - ---- - -### 域名配置 - -需准备以下两个服务域名: - -- authhub认证服务:authhub.eulercopilot.local -- EulerCopilot web服务:www.eulercopilot.local - -```bash -# 本地Windows主机中进行配置 -# 打开 C:\Windows\System32\drivers\etc\hosts 添加记录 -# 替换127.0.0.1为目标服务器的IP -127.0.0.1 authhub.eulercopilot.local -127.0.0.1 www.eulercopilot.local -``` - -## 快速开始 - -### 1. 资源获取 - -- **获取部署脚本** - [EulerCopilot 的官方Git仓库](https://gitee.com/openeuler/euler-copilot-framework) - 切换至dev分支点击下载ZIP, 并上传至目标服务器 - - ```bash - unzip euler-copilot-framework.tar -d /home - ``` - -- **资源清单** - - - **下载地址** - [EulerCopilot 资源仓库](https://repo.oepkgs.net/openEuler/rpm/openEuler-22.03-LTS/contrib/eulercopilot) - - **镜像文件** - - ```bash - # 使用脚本自动下载所有镜像(需在联网环境执行) - cd /home/euler-copilot-framework/deploy/scripts/9-other-script/ - # 执行镜像下载(版本号可替换) - ./save_images.sh 0.9.5 # 自动保存到目录/home/eulercopilot/images - # 上传至目标服务器 - scp /home/eulercopilot/images/* root@target-server:/home/eulercopilot/images/ - ``` - - - **模型部署文件**:`bge-m3-Q4_K_M.gguf`, `deepseek-llm-7b-chat-Q4_K_M.gguf`, `ollama-linux-arm64.tgz/ollama-linux-amd64.tgz` - - **工具包**:`helm-v3.15.0-linux-arm64.tar.gz/helm-v3.15.0-linux-amd64.tar.gz`, `k3s-airgap-images-arm64.tar.zst/k3s-airgap-images-amd64.tar.zst`,`k3s-arm64/k3s-amd64`, `k3s-install.sh` - -- **关键说明** - - - **网络要求** - - 手动下载需确保存在可访问外网文件的Windows环境,全部下载完成后传输至离线环境 - - 脚本下载需在联网服务器执行,仅完成镜像下载,完成传输至离线环境 - - **确保目标服务器存在以下目录** - - ```bash - /home/eulercopilot/ - ├── images/ # 存放镜像文件 - ├── models/ # 存放模型文件 - └── tools/ # 存放工具包 - ``` - -### 2. 部署EulerCopilot - -#### 一键部署 - -```bash -cd /home/euler-copilot-framework/deploy/scripts -``` - -```bash -bash deploy.sh -``` - -```bash -# 输入0进行一键自动部署 -============================== - 主部署菜单 -============================== -0) 一键自动部署 -1) 手动分步部署 -2) 重启服务 -3) 卸载所有组件并清除数据 -4) 退出程序 -============================== -请输入选项编号(0-3): 0 -``` - ---- - -#### 手动分步部署 - -```bash -# 选择1 -> 1 进入手动分步部署 -============================== - 主部署菜单 -============================== -0) 一键自动部署 -1) 手动分步部署 -2) 重启服务 -3) 卸载所有组件并清除数据 -4) 退出程序 -============================== -请输入选项编号(0-3): 1 -``` - -```bash -# 输入选项编号(0-9),逐步部署 -============================== - 手动分步部署菜单 -============================== -1) 执行环境检查脚本 -2) 安装k3s和helm -3) 安装Ollama -4) 部署Deepseek模型 -5) 部署Embedding模型 -6) 安装数据库 -7) 安装AuthHub -8) 安装EulerCopilot -9) 返回主菜单 -============================== -请输入选项编号(0-9): -``` - ---- - -**关键说明**: - -1. 安装部署前需要预先准备好部署所需的资源 -2. 在部署过程中,您需要输入 Authhub 域名和 EulerCopilot 域名, 不输入则使用默认域名`authhub.eulercopilot.local`, `www.eulercopilot.local`。 - -#### 重启服务 - -```bash -# 输入选项重启服务 -============================== - 服务重启菜单 -============================== -可重启的服务列表: -1) authhub-backend -2) authhub -3) framework -4) minio -5) mongo -6) mysql -7) pgsql -8) rag -9) rag-web -10) redis -11) web -12) 返回主菜单 -============================== -请输入要重启的服务编号(1-12): -``` - -#### 卸载所有组件 - -```bash -sudo ./deploy.sh -# 选择2进行完全卸载 -============================== - 主部署菜单 -============================== -0) 一键自动部署 -1) 手动分步部署 -2) 卸载所有组件并清除数据 -3) 退出程序 -============================== -请输入选项编号(0-3): 2 -``` - ---- - -**关键说明**: - -- 在部署过程中,您需要输入 Authhub 域名和 EulerCopilot 域名, 不输入则使用默认域名`authhub.eulercopilot.local`, `www.eulercopilot.local`。 -- 资源不足时可参考 FAQ 中的评估资源可用性解决 -- 查看组件日志 - -```bash -kubectl logs -n euler-copilot -``` - -- 查看服务状态 - -```bash -kubectl get pod -n euler-copilot -``` - -- 大模型配置修改并更新EulerCopilot - -```bash -cd /home/euler-copilot-framework/deploy/chart/euler-copilot -``` - -```bash -vim values.yaml -``` - -```bash -helm upgrade euler-copilot -n euler-copilot . -``` - -## 验证安装 - -恭喜您,**EulerCopilot** 已成功部署!为了开始您的体验,请在浏览器中输入 `https://您的EulerCopilot域名` 链接访问 EulerCopilot 的网页界面: - -首次访问时,您需要点击页面上的 **立即注册** 按钮来创建一个新的账号,并完成登录过程。 - -![Web登录界面](./pictures/WEB登录界面.png) -![Web 界面](./pictures/WEB界面.png) - -## 构建专有领域智能问答 - -点击知识库,可登录本地知识库管理页面,详细信息请参考文档 [本地资产库构建指南](../../../advance/knowledge_base/deploy_guide/witChainD_deployment.md) -**知识库登录默认账号 `admin`, 密码 `123456`** - ---- - -## 附录 - -### 大模型准备 - -#### GPU 环境 - -可直接使用部署的deepseek大模型参考以下方式进行部署 - -1. 下载模型文件 - - ```bash - huggingface-cli download --resume-download Qwen/Qwen1.5-14B-Chat --local-dir Qwen1.5-14B-Chat - ``` - -2. 创建终端 control - - ```bash - screen -S control - ``` - - ```bash - python3 -m fastchat.serve.controller - ``` - - 按 Ctrl A+D 置于后台 - -3. 创建新终端 api - - ```bash - screen -S api - ``` - - ```bash - python3 -m fastchat.serve.openai_api_server --host 0.0.0.0 --port 30000 --api-keys sk-123456 - ``` - - 按 Ctrl A+D 置于后台 - 如果当前环境的 Python 版本是 3.12 或者 3.9 可以创建 python3.10 的 conda 虚拟环境 - - ```bash - mkdir -p /root/py310 - ``` - - ```bash - conda create --prefix=/root/py310 python==3.10.14 - ``` - - ```bash - conda activate /root/py310 - ``` - -4. 创建新终端 worker - - ```bash - screen -S worker - ``` - - ```bash - screen -r worker - ``` - - 安装 fastchat 和 vllm - - ```bash - pip install fschat vllm - ``` - - 安装依赖: - - ```bash - pip install fschat[model_worker] - ``` - - ```bash - python3 -m fastchat.serve.vllm_worker --model-path /root/models/Qwen1.5-14B-Chat/ --model-name qwen1.5 --num-gpus 8 --gpu-memory-utilization=0.7 --dtype=half - ``` - - 按 Ctrl A+D 置于后台 - -5. 按照如下方式修改配置的大模型参数,并更新服务。 - - ```bash - vim /home/euler-copilot-framework/deploy/chart/euler_copilot/values.yaml - ``` - - 修改如下部分 - - ```yaml - # 模型设置 - models: - # 用于问答的大模型;需要为OpenAI兼容接口 - answer: - # [必填] 接口URL(无需带上“v1”后缀) - url: http://172.168.178.107:11434 - # [必填] 接口API Key;默认置空 - key: sk-123456 - # [必填] 模型名称 - name: deepseek-llm-7b-chat:latest - # [必填] 模型最大上下文数;建议>=8192 - ctx_length: 8192 - # 模型最大输出长度,建议>=2048 - max_tokens: 2048 - # 用于Function Call的模型;建议使用特定推理框架 - functioncall: - # 推理框架类型,默认为ollama - # 可用的框架类型:["vllm", "sglang", "ollama", "openai"] - backend: - # 模型地址;不填则与问答模型一致 - url: ollama - # API Key;不填则与问答模型一致 - key: - # 模型名称;不填则与问答模型一致 - name: - # 模型最大上下文数;不填则与问答模型一致 - ctx_length: - # 模型最大输出长度;不填则与问答模型一致 - max_tokens: - # 用于数据向量化(Embedding)的模型 - embedding: - # 推理框架类型,默认为openai - # [必填] Embedding接口类型:["openai", "mindie"] - type: openai - # [必填] Embedding URL(需要带上“v1”后缀) - url: http://172.168.178.107:11434 - # [必填] Embedding 模型API Key - key: sk-123456 - # [必填] Embedding 模型名称 - name: bge-m3:latest - ``` - - ```bash - # 更新服务 - helm upgrade -n euler-copilot euler-copilot . - # 重启framework服务 - kubectl get pod -n euler-copilot - kubectl delete pod framework-deploy-65b669fc58-q9bw7 -n euler-copilot - ``` - -#### NPU 环境 - -NPU 环境部署可参考链接 [MindIE安装指南](https://www.hiascend.com/document/detail/zh/mindie/10RC2/whatismindie/mindie_what_0001.html) - -### FAQ - -#### 1. 解决 Hugging Face 连接错误 - -如果遇到如下连接错误: - -```text -urllib3.exceptions.NewConnectionError: : Failed to establish a new connection: [Errno 101] Network is unreachable -``` - -尝试以下解决方案: - -- 更新 `huggingface_hub` 包到最新版本。 - - ```bash - pip3 install -U huggingface_hub - ``` - -- 如果网络问题依旧存在,可以尝试使用镜像站点作为端点。 - - ```bash - export HF_ENDPOINT=https://hf-mirror.com - ``` - -#### 2. 在 RAG 容器中调用问答接口 - -进入对应的 RAG Pod 后,可以通过 `curl` 命令发送 POST 请求来获取问答结果。请确保在请求体中提供具体的问题文本。 - -```bash -curl -k -X POST "http://localhost:9988/kb/get_answer" \ - -H "Content-Type: application/json" \ - -d '{ - "question": "您的问题", - "kb_sn": "default_test", - "fetch_source": true - }' -``` - -#### 3. 解决 `helm upgrade` 错误 - -当 Kubernetes 集群不可达时,您可能会遇到类似下面的错误信息: - -```text -Error: UPGRADE FAILED: Kubernetes cluster unreachable -``` - -确保设置了正确的 KUBECONFIG 环境变量指向有效的配置文件。 - -```bash -echo "export KUBECONFIG=/etc/rancher/k3s/k3s.yaml" >> /root/.bashrc -source /root/.bashrc -``` - -#### 4. 查看 Pod 日志失败 - -如果您遇到查看 Pod 日志时权限被拒绝的问题,检查是否正确配置了代理设置,并将本机 IP 地址添加到 `no_proxy` 环境变量中。 - -```bash -cat /etc/systemd/system/k3s.service.env -``` - -编辑文件并确保包含: - -```bash -no_proxy=XXX.XXX.XXX.XXX -``` - -#### 5. GPU环境中大模型流式回复问题 - -对于某些服务执行 curl 大模型时无法进行流式回复的情况,尝试修改请求中的 `"stream"` 参数为 `false`。此外,确认已安装兼容版本的 Pydantic 库。 - -```bash -pip install pydantic==1.10.13 -``` - -#### 6. sglang 模型部署指南 - -按照以下步骤部署基于 sglang 的模型: - -```bash -# 1. 激活名为 `myenv` 的 Conda 环境,该环境基于 Python 3.10 创建: -conda activate myenv - -# 2. 安装 sglang 及其所有依赖项,指定版本为 0.3.0 -pip install "sglang[all]==0.3.0" - -# 3. 从特定索引安装 flashinfer,确保与您的 CUDA 和 PyTorch 版本兼容 -pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.4/ - -# 4. 使用 sglang 启动服务器,配置如下: -python -m sglang.launch_server \ - --served-model-name Qwen2.5-32B \ - --model-path Qwen2.5-32B-Instruct-AWQ \ - --host 0.0.0.0 \ - --port 8001 \ - --api-key "sk-12345" \ - --mem-fraction-static 0.5 \ - --tp 8 -``` - -- 验证安装 - - ```bash - pip show sglang - pip show flashinfer - ``` - -**注意事项:** - -- API Key:请确保 `--api-key` 参数中的 API 密钥是正确的 -- 模型路径: 确保 `--model-path` 参数中的路径是正确的,并且模型文件存在于该路径下。 -- CUDA 版本:确保你的系统上安装了 CUDA 12.1 和 PyTorch 2.4,因为 `flashinfer` 包依赖于这些特定版本。 -- 线程池大小:根据你的GPU资源和预期负载调整线程池大小。如果你有 8 个 GPU,那么可以选择 --tp 8 来充分利用这些资源。 - -#### 7. 获取 Embedding - -使用 curl 发送 POST 请求以获取 embedding 结果: - -```bash -curl -k -X POST http://localhost:11434/v1/embeddings \ - -H "Content-Type: application/json" \ - -d {"input": "The food was delicious and the waiter...", "model": "bge-m3", "encoding_format": "float"} -``` - -#### 8. 生成证书 - -为了生成自签名证书,首先下载 [mkcert](https://github.com/FiloSottile/mkcert/releases)工具,然后运行以下命令: - -```bash -mkcert -install -mkcert example.com -``` - -最后,将生成的证书和私钥拷贝到 values.yaml 中, 并应用至 Kubernetes Secret。 - -```bash -vim /home/euler-copilot-framework_openeuler/deploy/chart_ssl/traefik-secret.yaml -``` - -```bash -kubectl apply -f traefik-secret.yaml -``` - -#### 9. 问题排查方法 - -1. **获取集群事件信息** - - 为了更好地定位 Pod 失败的原因,请首先检查 Kubernetes 集群中的事件 (Events)。这可以提供有关 Pod 状态变化的上下文信息。 - - ```bash - kubectl get events -n euler-copilot - ``` - -2. **验证镜像拉取状态** - - 确认容器镜像是否成功拉取。如果镜像未能正确加载,可能是由于网络问题或镜像仓库配置错误。 - - ```bash - k3s crictl images - ``` - -3. **审查 Pod 日志** - - 检查相关 Pod 的日志,以寻找可能的错误信息或异常行为。这对于诊断应用程序级别的问题特别有用。 - - ```bash - kubectl logs rag-deploy-service-5b7887644c-sm58z -n euler-copilot - ``` - -4. **评估资源可用性** - - 确保 Kubernetes 集群有足够的资源(如 CPU、内存和存储)来支持 Pod 的运行。资源不足可能导致镜像拉取失败或其他性能问题,或使得 Pod 状态从 Running 变为 Pending 或 Completed。可查看磁盘空间并保证至少有 30% 的可用空间。这有助于维持 Pod 的稳定运行状态。参考该链接挂载空间较大的磁盘[How to move k3s data to another location](https://mrkandreev.name/snippets/how_to_move_k3s_data_to_another_location/) - - ```bash - kubectl top nodes - ``` - -5. **确认 k3s 版本兼容性** - - 如果遇到镜像拉取失败且镜像大小为 0 的问题,请检查您的 k3s 版本是否符合最低要求(v1.30.2 或更高)。较低版本可能存在不兼容的问题。 - - ```bash - k3s -v - ``` - -6. **检查配置** - - 检查 `values.yaml` 文件中关于 OIDC 配置和域名配置是否填写正确,确保配置无误后更新服务。 - - ```bash - cat /home/euler-copilot-framework/deploy/chart/euler_copilot - ``` - - ```bash - vim values.yaml | grep oidc - ``` - - ```bash - helm upgrade euler-copilot -n euler-copilot . - ``` diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/CPU\351\200\273\350\276\221\346\240\270\345\277\203.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/CPU\351\200\273\350\276\221\346\240\270\345\277\203.png" deleted file mode 100644 index 74ae942b5a5217b8a5e34a2b2cd8d32a49be7a00..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/CPU\351\200\273\350\276\221\346\240\270\345\277\203.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/Copilot\345\244\247\346\250\241\345\236\213\351\203\250\347\275\262\345\267\256\345\274\202.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/Copilot\345\244\247\346\250\241\345\236\213\351\203\250\347\275\262\345\267\256\345\274\202.png" deleted file mode 100644 index 8f1de7892e04be698310691d2cfdeb07cbfa579d..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/Copilot\345\244\247\346\250\241\345\236\213\351\203\250\347\275\262\345\267\256\345\274\202.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2761.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2761.png" deleted file mode 100644 index e59e8b669c3039341655eadd75ce1fda5cda1776..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2761.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2762.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2762.png" deleted file mode 100644 index 68ae1c7cb11e663cabbf1225b188fdfd628bf549..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2762.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2763.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2763.png" deleted file mode 100644 index d90f6182fb6ec63f868a5c2598de73db093775f2..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\216\245\345\205\245copilot\346\225\210\346\236\234\345\233\2763.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\265\213\350\257\225\346\216\245\345\217\243\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\265\213\350\257\225\346\216\245\345\217\243\346\210\220\345\212\237.png" deleted file mode 100644 index 374c3a2cc0be67a012ef8bf0ddc7688f97702d79..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\346\265\213\350\257\225\346\216\245\345\217\243\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\350\275\273\351\207\217\345\214\226\351\203\250\347\275\262\350\247\206\345\233\276.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\350\275\273\351\207\217\345\214\226\351\203\250\347\275\262\350\247\206\345\233\276.png" deleted file mode 100644 index 297ad86cac9226084483816f0c88c9116071b675..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/CPU\346\216\250\347\220\206\351\203\250\347\275\262/\350\275\273\351\207\217\345\214\226\351\203\250\347\275\262\350\247\206\345\233\276.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/WEB\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/WEB\347\225\214\351\235\242.png" deleted file mode 100644 index bb9be4e33ce470865fe5a07decbc056b9ee4e9bb..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/WEB\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/WEB\347\231\273\345\275\225\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/WEB\347\231\273\345\275\225\347\225\214\351\235\242.png" deleted file mode 100644 index fddbab4df70b940d5d5ed26fb8ec688f1592b5e8..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/WEB\347\231\273\345\275\225\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/authhub\347\231\273\345\275\225\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/authhub\347\231\273\345\275\225\347\225\214\351\235\242.png" deleted file mode 100644 index 341828b1b6f728888d1dd52eec755033680155da..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/authhub\347\231\273\345\275\225\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/witchaind\347\231\273\345\275\225\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/witchaind\347\231\273\345\275\225\347\225\214\351\235\242.png" deleted file mode 100644 index dfc28f4046fd4d61f48a0b0903ae2cf565ec5bc3..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/witchaind\347\231\273\345\275\225\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\233\236\345\210\260\351\246\226\351\241\265.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\233\236\345\210\260\351\246\226\351\241\265.png" deleted file mode 100644 index 92685c5d977abe55f5d201aa57da479c8af84561..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\233\236\345\210\260\351\246\226\351\241\265.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\257\274\345\205\245\346\226\207\346\241\243\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\257\274\345\205\245\346\226\207\346\241\243\347\225\214\351\235\242.png" deleted file mode 100644 index c4b71d6def0b6407f721cf3c137d714d923f86f1..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\257\274\345\205\245\346\226\207\346\241\243\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\346\210\220\345\212\237.png" deleted file mode 100644 index 3458c5330fad7b8c89cb0bc8efb70f875d6f17d2..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\350\265\204\344\272\247\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\350\265\204\344\272\247\347\225\214\351\235\242.png" deleted file mode 100644 index 469871fa9483a698b03374c3686b22156ad6e33a..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\350\265\204\344\272\247\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\346\210\220\345\212\237\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\346\210\220\345\212\237\347\225\214\351\235\242.png" deleted file mode 100644 index 8aba84e49c981c8f81cb91b14eee64f179bf0b38..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\346\210\220\345\212\237\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\347\225\214\351\235\242.png" deleted file mode 100644 index 7932773ccf59f58a283caccb92bd5af9475a7be9..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\346\255\243\345\234\250\345\257\274\345\205\245\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\346\255\243\345\234\250\345\257\274\345\205\245\347\225\214\351\235\242.png" deleted file mode 100644 index 50805afdb4764b74d9d16067999d7b39ce901d2a..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\346\255\243\345\234\250\345\257\274\345\205\245\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\345\241\253\345\206\231\345\261\225\347\244\272\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\345\241\253\345\206\231\345\261\225\347\244\272\347\225\214\351\235\242.png" deleted file mode 100644 index 8eb29b167f6ff1c2d951cd841f2340b027dec808..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\345\241\253\345\206\231\345\261\225\347\244\272\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" deleted file mode 100644 index 9da6121b1c1271c5b09c9292690ba3ab8d0a6cd2..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\237\245\347\234\213\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\237\245\347\234\213\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" deleted file mode 100644 index a533772ce715bbf2c4a9f374b03e7fe20bf470a1..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\237\245\347\234\213\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\255\243\345\234\250\345\257\274\345\207\272\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\255\243\345\234\250\345\257\274\345\207\272\347\225\214\351\235\242.png" deleted file mode 100644 index 659ebeae5b25738043f7750c7cc44a1e80557ed8..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\255\243\345\234\250\345\257\274\345\207\272\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\347\202\271\345\207\273\351\200\200\345\207\272\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\347\202\271\345\207\273\351\200\200\345\207\272\347\225\214\351\235\242.png" deleted file mode 100644 index 22b02fff81fe1db3232b80607da6f10f710c8c64..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\347\202\271\345\207\273\351\200\200\345\207\272\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\344\270\255\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\344\270\255\347\225\214\351\235\242.png" deleted file mode 100644 index 913a5ce34a0a3e95af29e7c4433e5367c0adf008..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\344\270\255\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\210\220\345\212\237\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\210\220\345\212\237\347\225\214\351\235\242.png" deleted file mode 100644 index a1c6dc638d0dbd51abc374d563da150ff328cbe3..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\210\220\345\212\237\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\226\271\346\263\225\351\200\211\346\213\251\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\226\271\346\263\225\351\200\211\346\213\251\347\225\214\351\235\242.png" deleted file mode 100644 index f0449b134e1ebe5d54ca46099b57c6ad0b949eca..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\226\271\346\263\225\351\200\211\346\213\251\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\347\273\223\346\236\234\350\277\207\346\273\244\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\347\273\223\346\236\234\350\277\207\346\273\244\347\225\214\351\235\242.png" deleted file mode 100644 index e3d3ba7727d53490b22ecc7a1b422d5ae03390d3..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\347\273\223\346\236\234\350\277\207\346\273\244\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\350\257\246\346\203\205\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\350\257\246\346\203\205\347\225\214\351\235\242.png" deleted file mode 100644 index e018cb0904b414d63e1008209adb47c0b8afb858..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\350\257\246\346\203\205\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\345\256\214\346\210\220\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\345\256\214\346\210\220\347\225\214\351\235\242.png" deleted file mode 100644 index 7bf98b8217dda2358621fe9b11164407e2040ae8..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\345\256\214\346\210\220\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\277\233\345\205\245\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\277\233\345\205\245\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" deleted file mode 100644 index 795e99cdad03b2a3377fe77e51e336c6a6ca5b29..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\277\233\345\205\245\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\351\200\211\346\213\251\346\226\207\344\273\266.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\351\200\211\346\213\251\346\226\207\344\273\266.png" deleted file mode 100644 index 8031fec14e15b0e80e596f21cf79fe2b58ff7293..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\351\200\211\346\213\251\346\226\207\344\273\266.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\346\210\220\345\212\237\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\346\210\220\345\212\237\347\225\214\351\235\242.png" deleted file mode 100644 index a871907f348317e43633cf05f5241cb978476fb4..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\346\210\220\345\212\237\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\347\225\214\351\235\242.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\347\225\214\351\235\242.png" deleted file mode 100644 index d82c736a94b106a30fd8d1f7b781f9e335bb441f..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\345\210\233\345\273\272\345\272\224\347\224\250\347\225\214\351\235\242.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/k8s\351\233\206\347\276\244\344\270\255postgres\346\234\215\345\212\241\347\232\204\345\220\215\347\247\260.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/k8s\351\233\206\347\276\244\344\270\255postgres\346\234\215\345\212\241\347\232\204\345\220\215\347\247\260.png" deleted file mode 100644 index 473a0006c9710c92375e226a760c3a79989312f9..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/k8s\351\233\206\347\276\244\344\270\255postgres\346\234\215\345\212\241\347\232\204\345\220\215\347\247\260.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/postgres\346\234\215\345\212\241\347\253\257\345\217\243.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/postgres\346\234\215\345\212\241\347\253\257\345\217\243.png" deleted file mode 100644 index cfee6d88da56bc939886caece540f7de8cf77bbc..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/postgres\346\234\215\345\212\241\347\253\257\345\217\243.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag_port.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag_port.png" deleted file mode 100644 index b1d93f9c9d7587aa88a27d7e0bf185586583d438..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag_port.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" deleted file mode 100644 index fec3cdaa2b260e50f5523477da3e58a9e14e2130..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/rag\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\347\224\261\344\272\216\347\273\237\344\270\200\350\265\204\344\272\247\344\270\213\345\255\230\345\234\250\345\220\214\345\220\215\350\265\204\344\272\247\345\272\223.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\347\224\261\344\272\216\347\273\237\344\270\200\350\265\204\344\272\247\344\270\213\345\255\230\345\234\250\345\220\214\345\220\215\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index 624459821de4542b635eeffa115eeba780929a4e..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\347\224\261\344\272\216\347\273\237\344\270\200\350\265\204\344\272\247\344\270\213\345\255\230\345\234\250\345\220\214\345\220\215\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\346\210\220\345\212\237.png" deleted file mode 100644 index 3104717bfa8f6615ad6726577a24938bc29884b2..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\233\345\273\272\350\265\204\344\272\247\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\244\261\350\264\245.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\244\261\350\264\245.png" deleted file mode 100644 index 454b9fdfa4b7f209dc370f78677a2f4e71ea49be..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\244\261\350\264\245.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\257\255\346\226\231.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\257\255\346\226\231.png" deleted file mode 100644 index d52d25d4778f6db2d2ec076d65018c40cd1da4d3..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\257\255\346\226\231.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\357\274\214\350\265\204\344\272\247\344\270\213\344\270\215\345\255\230\345\234\250\345\257\271\345\272\224\350\265\204\344\272\247\345\272\223.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\357\274\214\350\265\204\344\272\247\344\270\213\344\270\215\345\255\230\345\234\250\345\257\271\345\272\224\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index 82ed79c0154bd8e406621440c4e4a7caaab7e06e..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245\357\274\214\350\265\204\344\272\247\344\270\213\344\270\215\345\255\230\345\234\250\345\257\271\345\272\224\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\346\210\220\345\212\237.png" deleted file mode 100644 index 7dd2dea945f39ada1d7dd053d150a995b160f203..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\210\240\351\231\244\350\265\204\344\272\247\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\273\272\347\253\213\350\265\204\344\272\247\345\272\223.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\273\272\347\253\213\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index 84737b4185ce781d7b32ab42d39b8d2452138dad..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\345\273\272\347\253\213\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\214\207\345\256\232\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\214\207\345\256\232\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245.png" deleted file mode 100644 index be89bdfde2518bba3941eee5d475f52ad9124343..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\214\207\345\256\232\344\270\215\345\255\230\345\234\250\347\232\204\350\265\204\344\272\247\345\210\233\345\273\272\350\265\204\344\272\247\345\272\223\345\244\261\350\264\245.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\345\210\235\345\247\213\345\214\226.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\345\210\235\345\247\213\345\214\226.png" deleted file mode 100644 index 27530840aaa5382a226e1ed8baea883895d9d75e..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\345\210\235\345\247\213\345\214\226.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" deleted file mode 100644 index aa04e6f7f0648adfca1240c750ca5b79b88da5f9..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\225\260\346\215\256\345\272\223\351\205\215\347\275\256\344\277\241\346\201\257\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\227\240\350\265\204\344\272\247\346\227\266\346\237\245\350\257\242\350\265\204\344\272\247.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\227\240\350\265\204\344\272\247\346\227\266\346\237\245\350\257\242\350\265\204\344\272\247.png" deleted file mode 100644 index 74905172c0c0a0acc4c4d0e35efd2493dc421c4e..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\227\240\350\265\204\344\272\247\346\227\266\346\237\245\350\257\242\350\265\204\344\272\247.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\347\234\213\346\226\207\346\241\243\344\272\247\347\224\237\347\211\207\346\256\265\346\200\273\346\225\260\345\222\214\344\270\212\344\274\240\346\210\220\345\212\237\346\200\273\346\225\260.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\347\234\213\346\226\207\346\241\243\344\272\247\347\224\237\347\211\207\346\256\265\346\200\273\346\225\260\345\222\214\344\270\212\344\274\240\346\210\220\345\212\237\346\200\273\346\225\260.png" deleted file mode 100644 index 432fbfcd02f6d2220e7d2a8512aee893d67be24d..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\347\234\213\346\226\207\346\241\243\344\272\247\347\224\237\347\211\207\346\256\265\346\200\273\346\225\260\345\222\214\344\270\212\344\274\240\346\210\220\345\212\237\346\200\273\346\225\260.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\345\205\250\351\203\250\350\257\255\346\226\231.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\345\205\250\351\203\250\350\257\255\346\226\231.png" deleted file mode 100644 index a4f4ea8a3999a9ab659ccd9ea39b80b21ff46e84..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\345\205\250\351\203\250\350\257\255\346\226\231.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\350\265\204\344\272\247.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\350\265\204\344\272\247.png" deleted file mode 100644 index 675b40297363664007f96948fb21b1cb90d6beea..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\346\237\245\350\257\242\350\265\204\344\272\247.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\216\267\345\217\226\346\225\260\346\215\256\345\272\223pod\345\220\215\347\247\260.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\216\267\345\217\226\346\225\260\346\215\256\345\272\223pod\345\220\215\347\247\260.png" deleted file mode 100644 index 8fc0c988e8b3830c550c6be6e42b88ac13448d1a..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\216\267\345\217\226\346\225\260\346\215\256\345\272\223pod\345\220\215\347\247\260.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\344\270\212\344\274\240\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\344\270\212\344\274\240\346\210\220\345\212\237.png" deleted file mode 100644 index 5c897e9883e868bf5160d92cb106ea4e4e9bc356..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\344\270\212\344\274\240\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\346\234\252\346\237\245\350\257\242\345\210\260\347\233\270\345\205\263\350\257\255\346\226\231.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\346\234\252\346\237\245\350\257\242\345\210\260\347\233\270\345\205\263\350\257\255\346\226\231.png" deleted file mode 100644 index 407e49b929b7ff4cf14703046a4ba0bfe1bb441e..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\346\234\252\346\237\245\350\257\242\345\210\260\347\233\270\345\205\263\350\257\255\346\226\231.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\346\237\245\350\257\242\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\346\237\245\350\257\242\346\210\220\345\212\237.png" deleted file mode 100644 index a4f4ea8a3999a9ab659ccd9ea39b80b21ff46e84..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\257\255\346\226\231\346\237\245\350\257\242\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\234\252\346\237\245\350\257\242\345\210\260\350\265\204\344\272\247\345\272\223.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\234\252\346\237\245\350\257\242\345\210\260\350\265\204\344\272\247\345\272\223.png" deleted file mode 100644 index 45ab521ec5f5afbd81ad54f023aae3b7a867dbf2..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\234\252\346\237\245\350\257\242\345\210\260\350\265\204\344\272\247\345\272\223.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\237\245\350\257\242\350\265\204\344\272\247\345\272\223\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\237\245\350\257\242\350\265\204\344\272\247\345\272\223\346\210\220\345\212\237.png" deleted file mode 100644 index 90ed5624ae93ff9784a750514c53293df4e961f0..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\344\270\213\346\237\245\350\257\242\350\265\204\344\272\247\345\272\223\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\346\210\220\345\212\237.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\346\210\220\345\212\237.png" deleted file mode 100644 index 7b2cc38a931c9c236517c14c86fa93e3eb2b6dcd..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\346\210\220\345\212\237.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" deleted file mode 100644 index 1365a8d69467dec250d3451ac63e2615a2194c18..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\346\210\220\345\212\237png.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\346\210\220\345\212\237png.png" deleted file mode 100644 index 1bd944264baa9369e6f8fbfd04cabcd12730c0e9..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\345\210\240\351\231\244\346\210\220\345\212\237png.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\346\237\245\350\257\242\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\346\237\245\350\257\242\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" deleted file mode 100644 index 58bcd320e145dd29d9e5d49cb6d86964ebb83b51..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\350\265\204\344\272\247\345\272\223\346\237\245\350\257\242\345\244\261\350\264\245\357\274\214\344\270\215\345\255\230\345\234\250\350\265\204\344\272\247.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\344\270\255\351\227\264\345\261\202.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\344\270\255\351\227\264\345\261\202.png" deleted file mode 100644 index 809b785b999b6663d9e9bd41fed953925093d6bd..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\344\270\255\351\227\264\345\261\202.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\346\272\220\347\233\256\345\275\225.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\346\272\220\347\233\256\345\275\225.png" deleted file mode 100644 index 62ba5f6615f18deb3d5a71fd68ee8c929638d814..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\346\272\220\347\233\256\345\275\225.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\347\233\256\346\240\207\347\233\256\345\275\225.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\347\233\256\346\240\207\347\233\256\345\275\225.png" deleted file mode 100644 index d32c672fafcb0ef665bda0bcfdce19d2df44db01..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\205\215\347\275\256\346\230\240\345\260\204\347\233\256\346\240\207\347\233\256\345\275\225.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\207\215\345\244\215\345\210\233\345\273\272\350\265\204\344\272\247\345\244\261\350\264\245.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\207\215\345\244\215\345\210\233\345\273\272\350\265\204\344\272\247\345\244\261\350\264\245.png" deleted file mode 100644 index a5ecd6b65abc97320e7467f00d82ff1fd9bf0e44..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\346\234\254\345\234\260\350\265\204\344\272\247\345\272\223\346\236\204\345\273\272/\351\207\215\345\244\215\345\210\233\345\273\272\350\265\204\344\272\247\345\244\261\350\264\245.png" and /dev/null differ diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\351\203\250\347\275\262\350\247\206\345\233\276.png" "b/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\351\203\250\347\275\262\350\247\206\345\233\276.png" deleted file mode 100644 index 181bf1d2ddbe15cfd296c27df27d865bdbce8d69..0000000000000000000000000000000000000000 Binary files "a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\351\203\250\347\275\262\350\247\206\345\233\276.png" and /dev/null differ diff --git a/docs/zh/openEuler_intelligence/intelligent_vulnerability_patching/_toc.yaml b/docs/zh/openEuler_intelligence/intelligent_vulnerability_patching/_toc.yaml deleted file mode 100644 index b1a4ea7f0bc6c9169f6c9cc1ab084ad03074091b..0000000000000000000000000000000000000000 --- a/docs/zh/openEuler_intelligence/intelligent_vulnerability_patching/_toc.yaml +++ /dev/null @@ -1,6 +0,0 @@ -label: 智能化漏洞修补用户指南 -isManual: true -description: 支持kernel仓库的漏洞修补 -sections: - - label: 智能化漏洞修补用户指南 - href: ./intelligent-vulnerability-patching-user-guide.md diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/_toc.yaml b/docs/zh/openeuler_intelligence/intelligent_assistant/_toc.yaml similarity index 90% rename from docs/zh/openEuler_intelligence/intelligent_assistant/_toc.yaml rename to docs/zh/openeuler_intelligence/intelligent_assistant/_toc.yaml index f6172785fb448f5ae708916e5bcb01a6d652e91e..9bf30db6740b15d1cab02614f9ae50581d328f80 100644 --- a/docs/zh/openEuler_intelligence/intelligent_assistant/_toc.yaml +++ b/docs/zh/openeuler_intelligence/intelligent_assistant/_toc.yaml @@ -25,7 +25,7 @@ sections: - label: 使用指导 sections: - label: API Key 获取 - href: ./quick_start/smart_shell/user_guide/API_key.md + href: ./quick_start/smart_shell/user_guide/api_key.md - label: 命令行助手使用 href: ./quick_start/smart_shell/user_guide/shell.md - label: 进阶使用 @@ -45,9 +45,9 @@ sections: - label: 知识库管理 sections: - label: 部署指南 - href: ./advance/knowledge_base/deploy_guide/witChainD_deployment.md + href: ./advance/knowledge_base/deploy_guide/witchaind_deployment.md - label: 使用指导 - href: ./advance/knowledge_base/user_guide/witChainD_guidance.md + href: ./advance/knowledge_base/user_guide/witchaind_guidance.md - label: 工作流编排 sections: - label: 部署指南 diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\345\256\214\346\210\220\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/asset_library_creation_completed_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\265\204\344\272\247\345\272\223\345\210\233\345\273\272\345\256\214\346\210\220\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/asset_library_creation_completed_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/batch_import_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/batch_import_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\346\210\220\345\212\237\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/batch_import_successful_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\345\257\274\345\205\245\346\210\220\345\212\237\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/batch_import_successful_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\346\255\243\345\234\250\345\257\274\345\205\245\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/batch_importing_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\211\271\351\207\217\346\255\243\345\234\250\345\257\274\345\205\245\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/batch_importing_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\345\241\253\345\206\231\345\261\225\347\244\272\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/create_new_asset_library_fill_display_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\345\241\253\345\206\231\345\261\225\347\244\272\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/create_new_asset_library_fill_display_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/create_new_asset_library_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\226\260\345\273\272\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/create_new_asset_library_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\277\233\345\205\245\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/enter_asset_library_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\277\233\345\205\245\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/enter_asset_library_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\350\265\204\344\272\247\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/export_assets_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\350\265\204\344\272\247\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/export_assets_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\346\210\220\345\212\237.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/export_successful.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\257\274\345\207\272\346\210\220\345\212\237.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/export_successful.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\255\243\345\234\250\345\257\274\345\207\272\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/exporting_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\255\243\345\234\250\345\257\274\345\207\272\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/exporting_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\257\274\345\205\245\346\226\207\346\241\243\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/import_document_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\257\274\345\205\245\346\226\207\346\241\243\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/import_document_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\350\257\246\346\203\205\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parse_details_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\350\257\246\346\203\205\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parse_details_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\347\273\223\346\236\234\350\277\207\346\273\244\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parse_result_filter_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\347\273\223\346\236\234\350\277\207\346\273\244\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parse_result_filter_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\210\220\345\212\237\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parse_successful_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\210\220\345\212\237\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parse_successful_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\344\270\255\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parsing_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\344\270\255\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parsing_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\226\271\346\263\225\351\200\211\346\213\251\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parsing_method_selection_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\350\247\243\346\236\220\346\226\271\346\263\225\351\200\211\346\213\251\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/parsing_method_selection_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\233\236\345\210\260\351\246\226\351\241\265.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/return_to_homepage.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\345\233\236\345\210\260\351\246\226\351\241\265.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/return_to_homepage.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\351\200\211\346\213\251\346\226\207\344\273\266.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/select_file.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\351\200\211\346\213\251\346\226\207\344\273\266.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/select_file.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\237\245\347\234\213\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/view_asset_library_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/\346\237\245\347\234\213\350\265\204\344\272\247\345\272\223\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/view_asset_library_interface.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/witchaind\347\231\273\345\275\225\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/witchaind_login_interface.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/witChainD/witchaind\347\231\273\345\275\225\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/pictures/witchaind/witchaind_login_interface.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/witChainD_deployment.md b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/witchaind_deployment.md similarity index 58% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/witChainD_deployment.md rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/witchaind_deployment.md index 7aa0d750ae4421365b42ed38e45d3e06d88767ff..cfb912bb95777b0473afe6d84e0020e0360ff19c 100644 --- a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/witChainD_deployment.md +++ b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/deploy_guide/witchaind_deployment.md @@ -9,9 +9,9 @@ 通过指定资产名称、资产简介、资产默认解析方法、资产默认分块大小和embedding模型等条目创建、删除、导入资产、编辑资产或资产信息 - 文档管理 - - 支持用户上传限定大小和限定数量的文件,也允许用户下载自己上传的文件,用户可以通过点击资产卡片的形式进入资产,此时文档以条目的形式展示。 - - 支持txt、md、xlsx、docx和doc以及pdf文档的文档解析 - - 文档解析方式有三种general、ocr和enhanced模式,general模式下只提取文字和表格,ocr模式下不仅提取文字和表格嗨提取部分文档的图片内容,enhanced在ocr的基础上对图片中提取的内容进行总结。 + - 支持用户上传限定大小和限定数量的文件,也允许用户下载自己上传的文件,用户可以通过点击资产卡片的形式进入资产,此时文档以条目的形式展示。 + - 支持txt、md、xlsx、docx和doc以及pdf文档的文档解析 + - 文档解析方式有三种general、ocr和enhanced模式,general模式下只提取文字和表格,ocr模式下不仅提取文字和表格嗨提取部分文档的图片内容,enhanced在ocr的基础上对图片中提取的内容进行总结。 - 文档解析结果管理: @@ -28,7 +28,7 @@ 请在浏览器中输入 `https://$(wichaind域名)`访问 EulerCopilot 的 WitChainD 网页, 登入界面如下,输入账号(admin)、密码(123456)点击登录按钮登录系统。 -![witchaind登录界面](./pictures/witChainD/witchaind登录界面.png) +![witchaind登录界面](./pictures/witchaind/witchaind_login_interface.png) ## 新建资产 @@ -36,7 +36,7 @@ 进入资产卡片显示页面,卡片展示了资产的名称、简介、文档篇数、创建时间和资产ID。 -![查看资产库界面](./pictures/witChainD/查看资产库界面.png) +![查看资产库界面](./pictures/witchaind/view_asset_library_interface.png) 可通过红框中的按钮将卡片展示的资产转换为条目显示。 @@ -44,18 +44,18 @@ 点击新建资产,会跳出资产信息配置的框图 -![新建资产库界面](./pictures/witChainD/新建资产库界面.png) +![新建资产库界面](./pictures/witchaind/create_new_asset_library_interface.png) 填写资产库名称、资产库描述(可选)、语言、嵌入模型、默认解析方法和默认文件分块大小后,点击确定。 -![新建资产库填写展示界面](./pictures/witChainD/新建资产库填写展示界面.png) +![新建资产库填写展示界面](./pictures/witchaind/create_new_asset_library_fill_display_interface.png) 资产库建立完毕之后会显示是否添加文档,点击确定 -![资产库创建完成界面](./pictures/witChainD/资产库创建完成界面.png) +![资产库创建完成界面](./pictures/witchaind/asset_library_creation_completed_interface.png) 点击确定完成后进入资产库 -![资产库创建完成界面](./pictures/witChainD/进入资产库界面.png) +![资产库创建完成界面](./pictures/witchaind/enter_asset_library_interface.png) ## 导入文档 @@ -63,60 +63,60 @@ 点击导入文档按钮跳出文档上传框,选择本地文件并勾选进行上传 -![导入文档界面](./pictures/witChainD/导入文档界面.png) +![导入文档界面](./pictures/witchaind/import_document_interface.png) -![选择文件](./pictures/witChainD/选择文件.png) +![选择文件](./pictures/witchaind/select_file.png) ### 批量导入 点击1批量导入资产,2选择本地资产,3勾选本地资产,最后点击确定进行资产导入。 -![批量导入界面](./pictures/witChainD/批量导入界面.png) +![批量导入界面](./pictures/witchaind/batch_import_interface.png) 资产导入中 -![批量正在导入界面](./pictures/witChainD/批量正在导入界面.png) +![批量正在导入界面](./pictures/witchaind/batch_importing_interface.png) 资产导入成功 -![批量导入成功界面](./pictures/witChainD/批量导入成功界面.png) +![批量导入成功界面](./pictures/witchaind/batch_import_successful_interface.png) ## 解析文档 等待解析中,点击取消可以停止文档解析。 -![解析中界面](./pictures/witChainD/解析中界面.png) +![解析中界面](./pictures/witchaind/parsing_interface.png) 解析完成后,解析状态会显示解析成功。 -![解析成功界面](./pictures/witChainD/解析成功界面.png) +![解析成功界面](./pictures/witchaind/parse_successful_interface.png) 文档解析方式有三种general、ocr和enhanced模式,请根据需要选择合适的文档解析方法 -![解析方法选择界面](./pictures/witChainD/解析方法选择界面.png) +![解析方法选择界面](./pictures/witchaind/parsing_method_selection_interface.png) 解析完毕可以通过点击文件名进入文档解析结果展示详情,可以查看文档解析结果,如下图所示: -![解析详情界面](./pictures/witChainD/解析详情界面.png) +![解析详情界面](./pictures/witchaind/parse_details_interface.png) 可以通过1过滤文档解析的片段、表格和图片等内容,通过2可以通过关键字检索模糊检索对应的片段,通过3可以设定是否在检索中是否启用片段,如下图所示: -![解析结果过滤界面](./pictures/witChainD/解析结果过滤界面.png) +![解析结果过滤界面](./pictures/witchaind/parse_result_filter_interface.png) ## 导出资产 点击回到首页 -![回到首页](./pictures/witChainD/回到首页.png) +![回到首页](./pictures/witchaind/return_to_homepage.png) 再点击导出资产 -![导出资产界面](./pictures/witChainD/导出资产界面.png) +![导出资产界面](./pictures/witchaind/export_assets_interface.png) 显示资产正在导出中,如下图所示: -![正在导出界面](./pictures/witChainD/正在导出界面.png) +![正在导出界面](./pictures/witchaind/exporting_interface.png) 导出成功点击下载,显示下载成功 -![导出成功](./pictures/witChainD/导出成功.png) +![导出成功](./pictures/witchaind/export_successful.png) diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\350\265\204\344\272\247\345\272\223\347\256\241\347\220\206\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/asset_library_management_interface.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\350\265\204\344\272\247\345\272\223\347\256\241\347\220\206\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/asset_library_management_interface.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\211\271\351\207\217\345\220\257\347\224\250.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/batch_enable.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\211\271\351\207\217\345\220\257\347\224\250.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/batch_enable.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\211\271\351\207\217\345\257\274\345\205\245\350\265\204\344\272\247\345\272\223.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/batch_import_asset_libraries.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\211\271\351\207\217\345\257\274\345\205\245\350\265\204\344\272\247\345\272\223.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/batch_import_asset_libraries.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\260\345\242\236\350\265\204\344\272\247\345\272\223.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/create_new_asset_library.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\260\345\242\236\350\265\204\344\272\247\345\272\223.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/create_new_asset_library.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\207\346\241\243\347\256\241\347\220\206\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/document_management_interface.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\207\346\241\243\347\256\241\347\220\206\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/document_management_interface.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\207\346\241\243\350\247\243\346\236\220.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/document_parsing.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\207\346\241\243\350\247\243\346\236\220.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/document_parsing.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\207\346\241\243\350\247\243\346\236\2202.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/document_parsing_2.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\207\346\241\243\350\247\243\346\236\2202.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/document_parsing_2.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\347\274\226\350\276\221\350\265\204\344\272\247\345\272\223.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/edit_asset_library.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\347\274\226\350\276\221\350\265\204\344\272\247\345\272\223.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/edit_asset_library.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\347\274\226\350\276\221\350\265\204\344\272\247\345\272\2230.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/edit_asset_library_0.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\347\274\226\350\276\221\350\265\204\344\272\247\345\272\2230.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/edit_asset_library_0.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\347\274\226\350\276\221\350\265\204\344\272\247\345\272\223\351\205\215\347\275\256.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/edit_asset_library_configuration.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\347\274\226\350\276\221\350\265\204\344\272\247\345\272\223\351\205\215\347\275\256.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/edit_asset_library_configuration.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\347\274\226\350\276\221\346\226\207\346\241\243\351\205\215\347\275\256.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/edit_document_configuration.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\347\274\226\350\276\221\346\226\207\346\241\243\351\205\215\347\275\256.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/edit_document_configuration.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\345\257\274\345\207\272\350\265\204\344\272\247\345\272\223.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/export_asset_library.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\345\257\274\345\207\272\350\265\204\344\272\247\345\272\223.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/export_asset_library.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\345\257\274\345\205\245\346\226\207\346\241\243.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/import_documents.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\345\257\274\345\205\245\346\226\207\346\241\243.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/import_documents.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\250\241\345\236\213\351\205\215\347\275\256.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/model_configuration.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\250\241\345\236\213\351\205\215\347\275\256.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/model_configuration.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\350\247\243\346\236\220\345\256\214\346\210\220.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/parsing_complete.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\350\247\243\346\236\220\345\256\214\346\210\220.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/parsing_complete.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\220\234\347\264\242\346\226\207\346\241\243.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/search_documents.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\220\234\347\264\242\346\226\207\346\241\243.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/search_documents.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\207\346\234\254\345\235\227\347\273\223\346\236\234\351\242\204\350\247\210.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/text_block_result_preview.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/\346\226\207\346\234\254\345\235\227\347\273\223\346\236\234\351\242\204\350\247\210.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/pictures/text_block_result_preview.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/witChainD_guidance.md b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/witchaind_guidance.md similarity index 73% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/witChainD_guidance.md rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/witchaind_guidance.md index 4759a57baa4e35ee529e9f4da70e1d1405612e6e..9caf3aaebfe2eef9f9645ba5127778ff8c8a9c44 100644 --- a/docs/en/openEuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/witChainD_guidance.md +++ b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/knowledge_base/user_guide/witchaind_guidance.md @@ -8,31 +8,31 @@ 该页面为资产库管理界面,用户登录后将会进入该界面。 -![资产库管理界面](./pictures/资产库管理界面.png) +![资产库管理界面](./pictures/asset_library_management_interface.png) **支持操作:** - 配置模型:点击右上角的设置按键,可以修改模型相关的配置。 - ![模型配置](./pictures/模型配置.png) + ![模型配置](./pictures/model_configuration.png) - 新增资产库:点击新建资产库按钮新建,支持自定义名称、描述、语言、嵌入模型、解析方法、文件分块大小、文档类别。注意:重复名称会自动将名称修改成资产库id。 - ![新增资产库](./pictures/新增资产库.png) + ![新增资产库](./pictures/create_new_asset_library.png) - 编辑资产库:点击资产库的编辑按钮编辑,支持修改名称、描述、语言、解析方法、文件分块大小、文档类别。注意:不能修改成重复名称。 - ![编辑资产库](./pictures/编辑资产库0.png) + ![编辑资产库](./pictures/edit_asset_library_0.png) - ![编辑资产库](./pictures/编辑资产库.png) + ![编辑资产库](./pictures/edit_asset_library.png) - 导出资产库:点击资产库的导出按钮导出,导出完成后需要按任务列表中的下载任务下载对应资产库到本地。 - ![导出资产库](./pictures/导出资产库.png) + ![导出资产库](./pictures/export_asset_library.png) - 批量导入资产库:点击批量导入,上传本地文件后选中即可导入。 - ![批量导入资产库](./pictures/批量导入资产库.png) + ![批量导入资产库](./pictures/batch_import_asset_libraries.png) - 搜索资产库:在搜索栏中键入文本,可以搜索得到名称包含对应文本的资产库。 @@ -40,25 +40,25 @@ 在资产管理界面点击对应资产库,可以进入文档管理界面。 -![文档管理界面](./pictures/文档管理界面.png) +![文档管理界面](./pictures/document_management_interface.png) **支持操作:** - 导入文档:点击导入文档,从本地上传文件导入,导入后会自动以该资产库默认配置开始解析。 - ![导入文档](./pictures/导入文档.png) + ![导入文档](./pictures/import_documents.png) - 解析文档:点击操作中的解析,对文档进行解析。也可以选中多个文档批量解析。 - ![文档解析](./pictures/文档解析.png) + ![文档解析](./pictures/document_parsing.png) - ![文档解析2](./pictures/文档解析2.png) + ![文档解析2](./pictures/document_parsing_2.png) - ![解析完成](./pictures/解析完成.png) + ![解析完成](./pictures/parsing_complete.png) - 编辑文档配置:点击编辑对文档配置进行编辑,支持编辑文档名称、解析方法、类别、文件分块大小。 - ![编辑文档配置](./pictures/编辑文档配置.png) + ![编辑文档配置](./pictures/edit_document_configuration.png) - 下载文档:点击下载即可将文档下载至本地,也可以选中多个文档批量下载。 @@ -66,22 +66,22 @@ - 搜索文档:点击文档名称旁的搜索键,在弹出的搜索框中键入搜索的文本,可以搜索得到名称包含这些文本的文档。 - ![搜索文档](./pictures/搜索文档.png) + ![搜索文档](./pictures/search_documents.png) - 编辑资产库配置:支持编辑资产库名称、描述、语言、默认解析方法、文件分块大小、文档信息类别。 - ![编辑资产库配置](./pictures/编辑资产库配置.png) + ![编辑资产库配置](./pictures/edit_asset_library_configuration.png) ## 3. 解析结果管理界面 点击解析完成的文档,可以进入文档的解析结果管理界面。界面中会按照顺序显示文档解析后的文本块内容预览,每个文本块会附带一个标签,表示该文本块中的信息来源于文档中的段落、列表或者是图片。右侧的开关表示该文本块是否被启用。 -![文本块结果预览](./pictures/文本块结果预览.png) +![文本块结果预览](./pictures/text_block_result_preview.png) **支持操作**: - 关闭/启用文本块:点击文本块右侧的开关即可关闭/启用对应文本块,也可以选中多个文本块批量关闭/启用。 - ![批量启用](./pictures/批量启用.png) + ![批量启用](./pictures/batch_enable.png) - 搜索文本块:在搜索框中键入内容,可以查找包含对应内容的文本块。 diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/deploy_guide/diagnosis_deployment.md b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/deploy_guide/diagnosis_deployment.md similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/deploy_guide/diagnosis_deployment.md rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/deploy_guide/diagnosis_deployment.md diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/diagnosis_guidance.md b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/diagnosis_guidance.md similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/diagnosis_guidance.md rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/diagnosis_guidance.md diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-ask.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-ask.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-ask.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-ask.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-continue-result.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-continue-result.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-continue-result.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-continue-result.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-continue.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-continue.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-continue.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-continue.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-exit.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-exit.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-exit.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-chat-exit.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-edit-result.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-edit-result.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-edit-result.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-edit-result.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-edit.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-edit.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-edit.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-edit.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-exec-multi-select.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-exec-multi-select.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-exec-multi-select.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-exec-multi-select.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-exec-result.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-exec-result.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-exec-result.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-exec-result.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-explain-result.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-explain-result.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-explain-result.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-explain-result.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-explain-select.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-explain-select.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-explain-select.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-explain-select.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-interact.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-interact.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-interact.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd-interact.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-cmd.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-help.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-help.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-help.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-help.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-init.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-init.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-init.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-init.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-detail.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-detail.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-detail.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-detail.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-detect.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-detect.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-detect.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-detect.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-profiling.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-profiling.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-profiling.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-profiling.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-report.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-report.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-report.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-report.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-switch-mode.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-switch-mode.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-switch-mode.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-diagnose-switch-mode.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-result.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-result.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-result.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-result.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-select.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-select.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-select.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-select.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-metrics-collect.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-metrics-collect.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-metrics-collect.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-metrics-collect.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-report.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-report.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-report.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-report.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-exec.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-exec.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-exec.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-exec.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-gen.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-gen.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-gen.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-gen.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-view.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-view.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-view.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-script-view.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-switch-mode.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-switch-mode.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-switch-mode.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin-tuning-switch-mode.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_diagnosis/user_guide/pictures/shell-plugin.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/deploy_guide/tune_deployment.md b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/deploy_guide/tune_deployment.md similarity index 85% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/deploy_guide/tune_deployment.md rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/deploy_guide/tune_deployment.md index a770bc81e7efd9ad9f3910d92396a97c62be7ea2..5f986a6e7d612d5637ccd3b0bba50ffbafb6d6f2 100644 --- a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/deploy_guide/tune_deployment.md +++ b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/deploy_guide/tune_deployment.md @@ -109,9 +109,9 @@ kubectl delete pod framework-deploy-service-bb5b58678-jxzqr -n eulercopilot ``` + pod启动失败排查办法 - + 检查 euler-copilot-tune 目录下的 openapi.yaml 中 `servers.url` 字段,确保调优服务的启动地址被正确设置 - + 检查 `$plugin_dir` 插件文件夹的路径是否配置正确,该变量位于 `deploy/chart/euler_copilot/values.yaml` 中的 `framework`模块,如果插件目录不存在,需新建该目录,并需要将该目录下的 euler-copilot-tune 文件夹放到 `$plugin_dir` 中。 - + 检查sglang的地址和key填写是否正确,该变量位于 `vim /home/euler-copilot-framework/deploy/chart/euler_copilot/values.yaml` + + 检查 euler-copilot-tune 目录下的 openapi.yaml 中 `servers.url` 字段,确保调优服务的启动地址被正确设置 + + 检查 `$plugin_dir` 插件文件夹的路径是否配置正确,该变量位于 `deploy/chart/euler_copilot/values.yaml` 中的 `framework`模块,如果插件目录不存在,需新建该目录,并需要将该目录下的 euler-copilot-tune 文件夹放到 `$plugin_dir` 中。 + + 检查sglang的地址和key填写是否正确,该变量位于 `vim /home/euler-copilot-framework/deploy/chart/euler_copilot/values.yaml` ```yaml # 用于Function Call的模型 diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-ask.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-ask.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-ask.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-ask.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-continue-result.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-continue-result.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-continue-result.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-continue-result.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-continue.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-continue.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-continue.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-continue.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-exit.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-exit.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-exit.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-chat-exit.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-edit-result.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-edit-result.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-edit-result.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-edit-result.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-edit.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-edit.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-edit.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-edit.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-exec-multi-select.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-exec-multi-select.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-exec-multi-select.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-exec-multi-select.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-exec-result.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-exec-result.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-exec-result.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-exec-result.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-explain-result.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-explain-result.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-explain-result.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-explain-result.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-explain-select.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-explain-select.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-explain-select.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-explain-select.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-interact.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-interact.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-interact.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd-interact.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-cmd.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-help.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-help.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-help.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-help.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-init.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-init.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-init.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-init.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-detail.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-detail.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-detail.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-detail.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-detect.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-detect.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-detect.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-detect.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-profiling.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-profiling.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-profiling.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-profiling.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-report.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-report.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-report.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-report.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-switch-mode.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-switch-mode.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-switch-mode.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-diagnose-switch-mode.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-result.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-result.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-result.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-result.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-select.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-select.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-select.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-select.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-metrics-collect.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-metrics-collect.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-metrics-collect.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-metrics-collect.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-report.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-report.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-report.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-report.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-exec.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-exec.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-exec.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-exec.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-gen.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-gen.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-gen.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-gen.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-view.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-view.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-view.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-script-view.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-switch-mode.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-switch-mode.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-switch-mode.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin-tuning-switch-mode.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin.png b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/pictures/shell-plugin.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/tune_guidance.md b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/tune_guidance.md similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/tune_guidance.md rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/smart_tuning/user_guide/tune_guidance.md diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/work_flow/deploy_guide/workflow_deployment.md b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/work_flow/deploy_guide/workflow_deployment.md similarity index 43% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/work_flow/deploy_guide/workflow_deployment.md rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/work_flow/deploy_guide/workflow_deployment.md index 28a85e9089b05bd769a8e08e2aa5b3b66fdfc10e..6c72ebd1a0adf0f25172ca4e8bdb40698cb2715d 100644 --- a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/work_flow/deploy_guide/workflow_deployment.md +++ b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/work_flow/deploy_guide/workflow_deployment.md @@ -1,3 +1,3 @@ # 工作流编排部署指导 -(当前内容待更新,请等待) \ No newline at end of file +(当前内容待更新,请等待) diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/work_flow/user_guide/workflow_guidance.md b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/work_flow/user_guide/workflow_guidance.md similarity index 43% rename from docs/zh/openEuler_intelligence/intelligent_assistant/advance/work_flow/user_guide/workflow_guidance.md rename to docs/zh/openeuler_intelligence/intelligent_assistant/advance/work_flow/user_guide/workflow_guidance.md index cbb83f3811d35d76e2b115f7a7b82dc2f74760aa..bc4d8e02e5f546947b165b6efee7288998dcbac2 100644 --- a/docs/zh/openEuler_intelligence/intelligent_assistant/advance/work_flow/user_guide/workflow_guidance.md +++ b/docs/zh/openeuler_intelligence/intelligent_assistant/advance/work_flow/user_guide/workflow_guidance.md @@ -1,3 +1,3 @@ # 工作流编排使用指导 -(当前内容待更新,请等待) \ No newline at end of file +(当前内容待更新,请等待) diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/API_key.md b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/api_key.md similarity index 100% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/API_key.md rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/api_key.md diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-ask.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-ask.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-ask.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-ask.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-continue-result.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-continue-result.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-continue-result.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-continue-result.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-continue.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-continue.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-continue.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-continue.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-exit.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-exit.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-exit.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-chat-exit.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-edit-result.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-edit-result.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-edit-result.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-edit-result.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-edit.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-edit.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-edit.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-edit.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-exec-multi-select.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-exec-multi-select.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-exec-multi-select.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-exec-multi-select.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-exec-result.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-exec-result.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-exec-result.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-exec-result.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-explain-result.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-explain-result.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-explain-result.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-explain-result.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-explain-select.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-explain-select.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-explain-select.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-explain-select.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-interact.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-interact.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-interact.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd-interact.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-cmd.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-help.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-help.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-help.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-help.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-init.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-init.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-init.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-init.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-detail.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-detail.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-detail.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-detail.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-detect.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-detect.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-detect.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-detect.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-profiling.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-profiling.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-profiling.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-profiling.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-report.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-report.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-report.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-report.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-switch-mode.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-switch-mode.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-switch-mode.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-diagnose-switch-mode.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-result.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-result.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-result.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-result.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-select.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-select.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-select.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-select.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-metrics-collect.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-metrics-collect.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-metrics-collect.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-metrics-collect.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-report.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-report.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-report.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-report.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-exec.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-exec.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-exec.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-exec.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-gen.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-gen.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-gen.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-gen.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-view.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-view.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-view.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-script-view.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-switch-mode.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-switch-mode.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-switch-mode.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin-tuning-switch-mode.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/pictures/shell-plugin.png diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/shell.md b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/shell.md similarity index 96% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/shell.md rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/shell.md index bb88c52f55a984d8115227454e67ed1970423583..b0f8dde5ed888536483cd2c08519d80a4a30981a 100644 --- a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/shell.md +++ b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_shell/user_guide/shell.md @@ -8,8 +8,8 @@ EulerCopilot 命令行助手是一个命令行(Shell)AI 助手,您可以 - 操作系统:openEuler 22.03 LTS SP3,或者 openEuler 24.03 LTS 及以上版本 - 命令行软件: - - Linux 桌面环境:支持 GNOME、KDE、DDE 等桌面环境的内置终端 - - 远程 SSH 链接:支持兼容 xterm-256 与 UTF-8 字符集的终端 + - Linux 桌面环境:支持 GNOME、KDE、DDE 等桌面环境的内置终端 + - 远程 SSH 链接:支持兼容 xterm-256 与 UTF-8 字符集的终端 ## 安装 diff --git a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/offline.md b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/offline.md similarity index 97% rename from docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/offline.md rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/offline.md index 8167907479569db279a838dc9dcf3e17597abbfd..c79e5f562f408d50828876c54f5e466830706434 100644 --- a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/offline.md +++ b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/offline.md @@ -56,7 +56,7 @@ EulerCopilot 是一款智能问答工具,使用 EulerCopilot 可以解决操 ### 部署视图 -![部署图](./pictures/部署视图.png) +![部署图](./pictures/deployment-overview.png) --- @@ -89,9 +89,9 @@ EulerCopilot 是一款智能问答工具,使用 EulerCopilot 可以解决操 - **资源清单** - - **下载地址** + - **下载地址** [EulerCopilot 资源仓库](https://repo.oepkgs.net/openEuler/rpm/openEuler-22.03-LTS/contrib/eulercopilot) - - **镜像文件** + - **镜像文件** ```bash # 使用脚本自动下载所有镜像(需在联网环境执行) @@ -107,10 +107,10 @@ EulerCopilot 是一款智能问答工具,使用 EulerCopilot 可以解决操 - **关键说明** - - **网络要求** - - 手动下载需确保存在可访问外网文件的Windows环境,全部下载完成后传输至离线环境 - - 脚本下载需在联网服务器执行,仅完成镜像下载,完成传输至离线环境 - - **确保目标服务器存在以下目录** + - **网络要求** + - 手动下载需确保存在可访问外网文件的Windows环境,全部下载完成后传输至离线环境 + - 脚本下载需在联网服务器执行,仅完成镜像下载,完成传输至离线环境 + - **确保目标服务器存在以下目录** ```bash /home/eulercopilot/ @@ -266,12 +266,12 @@ helm upgrade euler-copilot -n euler-copilot . 首次访问时,您需要点击页面上的 **立即注册** 按钮来创建一个新的账号,并完成登录过程。 -![Web登录界面](./pictures/WEB登录界面.png) -![Web 界面](./pictures/WEB界面.png) +![Web登录界面](./pictures/web-login.png) +![Web 界面](./pictures/web.png) ## 构建专有领域智能问答 -点击知识库,可登录本地知识库管理页面,详细信息请参考文档 [本地资产库构建指南](../../../advance/knowledge_base/deploy_guide/witChainD_deployment.md) +点击知识库,可登录本地知识库管理页面,详细信息请参考文档 [本地资产库构建指南](../../../advance/knowledge_base/deploy_guide/witchaind_deployment.md) **知识库登录默认账号 `admin`, 密码 `123456`** --- diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/online.md b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/online.md similarity index 98% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/online.md rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/online.md index 3e946173658dffbc296f616d94bfbd92a3d70890..59527296199c2a1f06b81575815e481cbf54c6c2 100644 --- a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/online.md +++ b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/online.md @@ -56,7 +56,7 @@ EulerCopilot 是一款智能问答工具,使用 EulerCopilot 可以解决操 ### 部署视图 -![部署图](./pictures/部署视图.png) +![部署图](./pictures/deployment-overview.png) --- @@ -242,12 +242,12 @@ helm upgrade euler-copilot -n euler-copilot . 首次访问时,您需要点击页面上的 **立即注册** 按钮来创建一个新的账号,并完成登录过程。 -![Web登录界面](./pictures/WEB登录界面.png) -![Web 界面](./pictures/WEB界面.png) +![Web登录界面](./pictures/web-login.png) +![Web 界面](./pictures/web.png) ## 构建专有领域智能问答 -点击知识库,可登录本地知识库管理页面,详细信息请参考文档 [本地资产库构建指南](../../../advance/knowledge_base/deploy_guide/witChainD_deployment.md) +点击知识库,可登录本地知识库管理页面,详细信息请参考文档 [本地资产库构建指南](../../../advance/knowledge_base/deploy_guide/witchaind_deployment.md) **知识库登录默认账号 `admin`, 密码 `123456`** --- diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\351\203\250\347\275\262\350\247\206\345\233\276.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/deployment-overview.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/\351\203\250\347\275\262\350\247\206\345\233\276.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/deployment-overview.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/WEB\347\231\273\345\275\225\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/web-login.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/WEB\347\231\273\345\275\225\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/web-login.png diff --git "a/docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/WEB\347\225\214\351\235\242.png" b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/web.png similarity index 100% rename from "docs/en/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/WEB\347\225\214\351\235\242.png" rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/deploy_guide/pictures/web.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/_toc.yaml b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/_toc.yaml similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/_toc.yaml rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/_toc.yaml diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/introduction.md b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/introduction.md similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/introduction.md rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/introduction.md diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-login-click2signup.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-login-click2signup.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-login-click2signup.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-login-click2signup.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-login.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-login.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-login.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-login.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-signup.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-signup.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-signup.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/authhub-signup.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete-confirmation.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete-confirmation.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete-confirmation.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete-confirmation.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete-multi-select.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete-multi-select.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete-multi-select.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete-multi-select.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/bulk-delete.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/chat-area.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/chat-area.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/chat-area.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/chat-area.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/delete-session-confirmation.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/delete-session-confirmation.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/delete-session-confirmation.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/delete-session-confirmation.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/delete-session.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/delete-session.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/delete-session.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/delete-session.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-ask-against-file.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-ask-against-file.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-ask-against-file.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-ask-against-file.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-btn-prompt.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-btn-prompt.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-btn-prompt.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-btn-prompt.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-btn.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-btn.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-btn.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-btn.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-history-tag.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-history-tag.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-history-tag.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-history-tag.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-uploading.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-uploading.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-uploading.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/file-upload-uploading.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-cancel.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-cancel.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-cancel.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-cancel.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-confirm.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-confirm.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-confirm.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-confirm.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-edit.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-edit.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-edit.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-edit.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-search.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-search.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-search.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-search.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-user.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-user.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-user.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/icon-user.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/knowledge-base-setting.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/knowledge-base-setting.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/knowledge-base-setting.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/knowledge-base-setting.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/llm_creating.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/llm_creating.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/llm_creating.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/llm_creating.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/llm_setting.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/llm_setting.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/llm_setting.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/llm_setting.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/login-popup.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/login-popup.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/login-popup.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/login-popup.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/logout.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/logout.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/logout.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/logout.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/main-page-sections.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/main-page-sections.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/main-page-sections.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/main-page-sections.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/mulit-roud-of-converstation.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/mulit-roud-of-converstation.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/mulit-roud-of-converstation.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/mulit-roud-of-converstation.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/new-chat.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/new-chat.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/new-chat.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/new-chat.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/rename-session-confirmation.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/rename-session-confirmation.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/rename-session-confirmation.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/rename-session-confirmation.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/rename-session.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/rename-session.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/rename-session.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/rename-session.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/search-history.png b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/search-history.png similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/search-history.png rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/pictures/search-history.png diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/qa_guide.md b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/qa_guide.md similarity index 99% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/qa_guide.md rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/qa_guide.md index 5c6bc55ea795b0aa2d9ba576904fdd3855998566..ec525ab7e9c16680cc462cbf68af87a3c0b7db82 100644 --- a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/qa_guide.md +++ b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/qa_guide.md @@ -9,6 +9,7 @@ > 对话区位于页面的主体部分,如图 1 所示。 - 图 1 对话区 + ![对话区](./pictures/chat-area.png) > **说明** @@ -18,6 +19,7 @@ - 图二 知识库配置 ![知识库设置](./pictures/knowledge-base-setting.png) + > **说明** > > 用户可以点击左下角的设置按钮进入模型配置页面,现提供8中供应商的基础模板用于大模型创建 diff --git a/docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/registration_and_login.md b/docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/registration_and_login.md similarity index 100% rename from docs/zh/openEuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/registration_and_login.md rename to docs/zh/openeuler_intelligence/intelligent_assistant/quick_start/smart_web/user_guide/registration_and_login.md diff --git a/docs/en/openEuler_intelligence/intelligent_vulnerability_patching/_toc.yaml b/docs/zh/openeuler_intelligence/intelligent_vulnerability_patching/_toc.yaml similarity index 72% rename from docs/en/openEuler_intelligence/intelligent_vulnerability_patching/_toc.yaml rename to docs/zh/openeuler_intelligence/intelligent_vulnerability_patching/_toc.yaml index b1a4ea7f0bc6c9169f6c9cc1ab084ad03074091b..dd80caddc875c8a9e4fd510c148ed27bae11063a 100644 --- a/docs/en/openEuler_intelligence/intelligent_vulnerability_patching/_toc.yaml +++ b/docs/zh/openeuler_intelligence/intelligent_vulnerability_patching/_toc.yaml @@ -3,4 +3,4 @@ isManual: true description: 支持kernel仓库的漏洞修补 sections: - label: 智能化漏洞修补用户指南 - href: ./intelligent-vulnerability-patching-user-guide.md + href: ./intelligent_vulnerability_patching_user_guide.md diff --git a/docs/zh/openEuler_intelligence/intelligent_vulnerability_patching/intelligent-vulnerability-patching-user-guide.md b/docs/zh/openeuler_intelligence/intelligent_vulnerability_patching/intelligent_vulnerability_patching_user_guide.md similarity index 93% rename from docs/zh/openEuler_intelligence/intelligent_vulnerability_patching/intelligent-vulnerability-patching-user-guide.md rename to docs/zh/openeuler_intelligence/intelligent_vulnerability_patching/intelligent_vulnerability_patching_user_guide.md index b19e2c0025cf962506a663c72c6c484b67944a69..3c14b68913e6bd6b3ed1b534a55fc6720983fb58 100644 --- a/docs/zh/openEuler_intelligence/intelligent_vulnerability_patching/intelligent-vulnerability-patching-user-guide.md +++ b/docs/zh/openeuler_intelligence/intelligent_vulnerability_patching/intelligent_vulnerability_patching_user_guide.md @@ -8,13 +8,13 @@ 在src-openEuler的kernel仓库([https://gitee.com/src-openeuler/kernel.git](https://gitee.com/src-openeuler/kernel.git))中,对代表CVE的issue下面进行评论。 -![CVE截图](pictures/代表CVE的issue.png) +![CVE截图](pictures/cve.png) ## /analyze命令 `/analyze`命令提供了对CVE影响范围进行分析的能力。通过在issue下面评论`/analyze`,即可自动对当前维护范围内的openEuler版本进行分析,判断每一个openEuler版本是否引入该CVE,是否修复该CVE。 -![/analyze命令](pictures/analyze命令.png) +![/analyze命令](pictures/analyze.png) > [!NOTE]说明 > /analyze命令无参数 @@ -34,7 +34,7 @@ CVE是否修复存在如下几种情况: ## /create_pr命令 `/create_pr`命令提供了对CVE的补丁进行智能化修复的能力。通过在issue下面评论`/create_pr `,即可自动获得漏洞补丁,并通过创建PR来合入openEuler下的linux仓库([https://gitee.com/openeuler/kernel.git](https://gitee.com/openeuler/kernel.git))中。 -![/create_pr命令](pictures/create_pr命令.png) +![/create_pr命令](pictures/create_pr.png) `/create_pr`命令存在参数,包括如下几种情况: diff --git "a/docs/zh/openEuler_intelligence/intelligent_vulnerability_patching/pictures/analyze\345\221\275\344\273\244.png" b/docs/zh/openeuler_intelligence/intelligent_vulnerability_patching/pictures/analyze.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_vulnerability_patching/pictures/analyze\345\221\275\344\273\244.png" rename to docs/zh/openeuler_intelligence/intelligent_vulnerability_patching/pictures/analyze.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_vulnerability_patching/pictures/create_pr\345\221\275\344\273\244.png" b/docs/zh/openeuler_intelligence/intelligent_vulnerability_patching/pictures/create_pr.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_vulnerability_patching/pictures/create_pr\345\221\275\344\273\244.png" rename to docs/zh/openeuler_intelligence/intelligent_vulnerability_patching/pictures/create_pr.png diff --git "a/docs/zh/openEuler_intelligence/intelligent_vulnerability_patching/pictures/\344\273\243\350\241\250CVE\347\232\204issue.png" b/docs/zh/openeuler_intelligence/intelligent_vulnerability_patching/pictures/cve.png similarity index 100% rename from "docs/zh/openEuler_intelligence/intelligent_vulnerability_patching/pictures/\344\273\243\350\241\250CVE\347\232\204issue.png" rename to docs/zh/openeuler_intelligence/intelligent_vulnerability_patching/pictures/cve.png diff --git a/docs/zh/openEuler_intelligence/mcp_agent/_toc.yaml b/docs/zh/openeuler_intelligence/mcp_agent/_toc.yaml similarity index 100% rename from docs/zh/openEuler_intelligence/mcp_agent/_toc.yaml rename to docs/zh/openeuler_intelligence/mcp_agent/_toc.yaml diff --git a/docs/zh/openEuler_intelligence/mcp_agent/mcp_guide.md b/docs/zh/openeuler_intelligence/mcp_agent/mcp_guide.md similarity index 33% rename from docs/zh/openEuler_intelligence/mcp_agent/mcp_guide.md rename to docs/zh/openeuler_intelligence/mcp_agent/mcp_guide.md index c30fbf395d7b81d0df69b9d7cb5565e3cbee7e18..1518c6b91c4729d8fc0b7d4234b62a6cb78df5b2 100644 --- a/docs/zh/openEuler_intelligence/mcp_agent/mcp_guide.md +++ b/docs/zh/openeuler_intelligence/mcp_agent/mcp_guide.md @@ -1,3 +1,3 @@ # MCP 服务指南 -(当前内容待更新,请等待) \ No newline at end of file +(当前内容待更新,请等待) diff --git "a/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/deep-research-chat-area.png" "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/deep-research-chat-area.png" new file mode 100644 index 0000000000000000000000000000000000000000..9802dc8c11084cef2ba1b449e4ebc26edd6492b1 Binary files /dev/null and "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/deep-research-chat-area.png" differ diff --git "a/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/deep-research-delete-session.png" "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/deep-research-delete-session.png" new file mode 100644 index 0000000000000000000000000000000000000000..4f98fa0529b9899dfeac70ec050b11ac54bb400a Binary files /dev/null and "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/deep-research-delete-session.png" differ diff --git "a/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/deep-research-rename-delete-session.png" "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/deep-research-rename-delete-session.png" new file mode 100644 index 0000000000000000000000000000000000000000..bfd227b2831a75314d40c006255530f62f1cfdb2 Binary files /dev/null and "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/deep-research-rename-delete-session.png" differ diff --git "a/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/deep-research-rename-session.png" "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/deep-research-rename-session.png" new file mode 100644 index 0000000000000000000000000000000000000000..da1f91a4d62b703f52335f1aa9d7751e033895e5 Binary files /dev/null and "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/deep-research-rename-session.png" differ diff --git "a/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/deep-research.png" "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/deep-research.png" new file mode 100644 index 0000000000000000000000000000000000000000..359f23c4ef5300e44bb9e1f7cadb9ddc912d222b Binary files /dev/null and "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/deep-research.png" differ diff --git "a/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/generate-cite.png" "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/generate-cite.png" new file mode 100644 index 0000000000000000000000000000000000000000..bff0eea6b5a961bfa070c7f610f5fa33518529ef Binary files /dev/null and "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/generate-cite.png" differ diff --git "a/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/generate-report.png" "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/generate-report.png" new file mode 100644 index 0000000000000000000000000000000000000000..806af26c9833ad6bc0219baa7129aee5d5d8dc32 Binary files /dev/null and "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/generate-report.png" differ diff --git "a/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/knowledge-base.png" "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/knowledge-base.png" new file mode 100644 index 0000000000000000000000000000000000000000..285c915d5138067eaeb98bbe0ba4abd72c06cbe0 Binary files /dev/null and "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/knowledge-base.png" differ diff --git "a/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/search-mode.png" "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/search-mode.png" new file mode 100644 index 0000000000000000000000000000000000000000..a2552df7d20a3a99b8e239584970da39d5d42dd0 Binary files /dev/null and "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/search-mode.png" differ diff --git "a/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/search-plan.png" "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/search-plan.png" new file mode 100644 index 0000000000000000000000000000000000000000..2498aec18ef41ba88ea4486a2808554318b1e65b Binary files /dev/null and "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/search-plan.png" differ diff --git "a/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/thought-process.png" "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/thought-process.png" new file mode 100644 index 0000000000000000000000000000000000000000..7fb624886313acbf45387477bd613c8e9c28cf82 Binary files /dev/null and "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/thought-process.png" differ diff --git "a/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/update-search-plan.png" "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/update-search-plan.png" new file mode 100644 index 0000000000000000000000000000000000000000..f880a01c316c83ccfcf70a6dfcf5958ceabedd4e Binary files /dev/null and "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/pictures/update-search-plan.png" differ diff --git "a/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/\346\267\261\345\272\246\347\240\224\347\251\266\344\275\277\347\224\250\346\214\207\345\215\227.md" "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/\346\267\261\345\272\246\347\240\224\347\251\266\344\275\277\347\224\250\346\214\207\345\215\227.md" new file mode 100644 index 0000000000000000000000000000000000000000..9b0fcb62afb8a6ab45cdb3d2e0db2b56faf5db94 --- /dev/null +++ "b/documents/user-guide/\344\275\277\347\224\250\346\214\207\345\215\227/\347\272\277\344\270\212\346\234\215\345\212\241/\346\267\261\345\272\246\347\240\224\347\251\266\344\275\277\347\224\250\346\214\207\345\215\227.md" @@ -0,0 +1,168 @@ +# 深度研究使用指南 + +## 开始对话 + +在对话区下侧输入框即可输入想要研究的内容,输入 `Shift + Enter` 可进行换行,输入 `Enter` 即可发送研究内容,或者单击“发送”也可发送研究内容。 + +> **说明** +> +> 对话区位于页面的下方,如图 1 所示。 + +- 图 1 对话区 + ![对话区](./pictures/deep-research-chat-area.png) + +### 搜索模式 + +深度研究功能支持互联网、客户内部网络、知识库三种搜索源搜索信息,三种搜索源可随意搭配。 +其中: +- 互联网搜索:使用tavilt搜索引擎进行搜索,需要在MCP配置中配置后使用,详见配置说明。 +- 内部网络搜索:支持内部网络进行搜索。需要在MCP配置中配置后使用,详见配置说明。 +- 知识库搜索:支持Ragflow知识检索。需要在MCP配置中配置后使用,详见配置说明。 + +搜索源选择方式如图2所示。 + +- 图 2 外部数据源选择 + ![外部数据源选择](./pictures/search-mode.png) + + +### 深度研究 + +选择完搜索源之后,在对话框输入待分析的问题,点击发送。首先会生成如下研究计划: + +- 图 3 研究计划 + ![研究计划](./pictures/search-plan.png) + +此研究计划用于指导信息搜集过程,如果想要修改计划,可以直接点击修改计划后在对话框中输入修改内容或者直接在对话框中输入想要修改的内容,如图4示例: + +- 图 4 研究计划 + ![修改研究计划](./pictures/update-search-plan.png) + +计划可以按照如上步骤进行多次修改,修改完成后,点击开始研究,即可开始深度研究,右侧会实时显示思考过程以及最终报告。如图5-7所示: + +- 图 5 思考过程 + ![思考过程](./pictures/thought-process.png) + +- 图 6 报告生成 + ![报告生成](./pictures/generate-report.png) + +- 图 7 引用文献 + ![引用文献](./pictures/generate-cite.png) + + +## 管理对话 + +> **说明** +> +> 对话管理区在页面左侧。 + +### 新建对话 + +单击“新建会话”按钮即可新建对话,如图 8 所示。 + +- 图 8 “新建对话”按钮在页面左上方 + ![新建对话](./pictures/deep-research-chat-area.png) + + +### 对话历史记录管理 + +历史记录的列表位于新建会话的下方,在每条对话历史记录的右侧,单击图标即可编辑对话历史记录的名字或删除,如图 9 所示。 + +- 图 9 点击图标重命名或删除历史记录 + ![点击图标重命名或删除历史记录](./pictures/deep-research-rename-session.png) + +点击重命名后重新输入名字,并确认后即可完成历史记录重命名,单击图 10右侧![OK]即可完成重命名,或者单击图 10左侧![Cancel]放弃本次重命名,如图 10 所示。 + +- 图 10 完成/取消重命名历史记录 + ![完成/取消重命名历史记录](./pictures/deep-research-rename-delete-session.png) + +另外,单击对话历史记录右侧的删除按钮,如图 11 所示,即可对删除单条对话历史记录进行二次确认,在二次确认弹出框,如图 11 所示,单击“是”,可确认删除单条对话历史记录,或者单击“否”,取消本次删除。 + +- 图 11 点击删除按钮删除单条历史记录 + ![删除单条历史记录](./pictures/deep-research-delete-session.png) + +## 配置说明 +config.yaml文件配置说明: +```yaml +model: + model_platform: deepseek # 支持列表见camel官方文档:https://docs.camel-ai.org/key_modules/models#direct-integrations + model_type: deepseek-chat # 支持列表见camel官方文档:https://docs.camel-ai.org/key_modules/models#direct-integrations + api_key: # 对应模型提供商的api key + base_url: # 对应模型提供商的base url + model_config_dict: # 额外的模型配置 + stream: true + +# 数据源配置项:每个数据源对应一个或多个MCP服务,用户可指定数据源 +# web_search:对应于互联网搜索,下面的服务名tavily即为mcp_config.json配置中的服务名 +# knowledge:对应于知识检索服务,下面的服务名ragflow_retrieval即为mcp_config.json配置中的服务名 +# intra_search:对应于互联网搜索,下面的服务名intra_search即为mcp_config.json配置中的服务名 +datasource: + web_search: + mcp_server_names: + - tavily + knowledge: + mcp_server_names: + - ragflow_retrieval + intra_search: + mcp_server_names: + - intra_search +``` +mcp_config.json配置说明 +支持stdio、streamable_http方式。 +```json +{ + "mcpServers": { + "tavily": { + "command": "python", + "args": [ + "integrations/mcps/tavily.py", + "--mode", + "stdio" + ], + "env": { + "PYTHONPATH": "./", + "TAVILY_API_KEY": "YOUR API KEY" + } + }, + "ragflow_retrieval": {}, + "intra_search": {} + } +} +``` +其中ragflow_retrieval知识检索工具返回的格式要求为: +```json +{ + "query": "用户查询", + "results": [ + { + "url": "文档预览链接或者文档名", + "title": "文档标题", + "description": "文档描述", + "content": "文档内容" + } + ] +} +``` +用户画像功能配置: +此功能用于为深度研究构建定制化上下文,用户可以开启该功能提升深度研究在专业领域的表现。此功能基于Ragflow知识检索工具实现。 +开启此功能:环境变量中配置USE_CONTEXT=True,默认不开启。 +此功能需要配置文件mcp_context_config.json,内容如下: +```json +{ + "mcpServers": { + "context": { + "command": "python", + "args": [ + "integrations/mcps/user_profile.py", + "--mode", + "stdio" + ], + "env": { + "PYTHONPATH": "./", + "RAGFLOW_DATASET_ID" :"ragflow知识检索服务的目标知识库id", + "RAGFLOW_API_KEY" : "ragflow的用户个人api_key", + "DEFAULT_BASE_URL" : "ragflow知识检索服务地址" + } + } + } +} +``` diff --git a/pyproject.toml b/pyproject.toml index 681ea9fb4552650da9148a22dd55d2063dca2410..22b07eacd07d19ac7478b6af20fb0f972da742db 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,4 +50,4 @@ dev = [ "ruff==0.11.2", "sphinx==8.2.3", "sphinx-rtd-theme==3.0.2", -] +] \ No newline at end of file diff --git a/tests/common/test_queue.py b/tests/common/test_queue.py index 5375180a3f453a9d9d18d17b95b0ed9c39d8c5c4..db1f5ead746f35dd510b30698746e1a25f08d02d 100644 --- a/tests/common/test_queue.py +++ b/tests/common/test_queue.py @@ -74,8 +74,8 @@ async def test_push_output_with_flow(message_queue, mock_task): mock_task.state.flow_id = "flow_id" mock_task.state.step_id = "step_id" mock_task.state.step_name = "step_name" - mock_task.state.status = "running" - + mock_task.state.step_status = "running" + await message_queue.init("test_task") await message_queue.push_output(mock_task, EventType.TEXT_ADD, {}) diff --git a/tests/manager/test_user.py b/tests/manager/test_user.py index d350ca307a9f0aa2d6532cae614b8633b1ae0f81..ab6eeab4c498a2619c4f08c5b548a6096611aeaa 100644 --- a/tests/manager/test_user.py +++ b/tests/manager/test_user.py @@ -73,7 +73,7 @@ class TestUserManager(unittest.TestCase): mock_mysql_db_instance.get_session.return_value = mock_session # 调用被测方法 - updated_userinfo = UserManager.update_userinfo_by_user_sub(userinfo, refresh_revision=True) + updated_userinfo = UserManager.update_refresh_revision_by_user_sub(userinfo, refresh_revision=True) # 断言返回的用户信息的 revision_number 是否与原始用户信息一致 self.assertEqual(updated_userinfo.revision_number, userinfo.revision_number)