From e04c168a8f879e875e8e9d1f1418a3ea5e2f863d Mon Sep 17 00:00:00 2001 From: shengxiao Date: Fri, 13 Dec 2024 17:25:47 +0800 Subject: [PATCH 1/4] =?UTF-8?q?[feat]=201.=E6=B7=BB=E5=8A=A0=E6=96=B0?= =?UTF-8?q?=E7=9A=84=E6=A0=B7=E6=9D=BF=E5=B7=A5=E7=A8=8B&=E5=BF=AB?= =?UTF-8?q?=E9=80=9F=E6=90=AD=E5=BB=BA=E6=95=99=E7=A8=8B=E6=96=87=E6=A1=A3?= =?UTF-8?q?=202.=E4=BC=98=E5=8C=96=EF=BC=9Aworkflow=E4=B8=ADagent=E8=8A=82?= =?UTF-8?q?=E7=82=B9=E6=94=AF=E6=8C=81=E5=85=A5=E5=8F=82=E8=87=AA=E5=AE=9A?= =?UTF-8?q?=E4=B9=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- agentuniverse/workflow/node/agent_node.py | 17 +- au_sample_standard_app/__init__.py | 7 + au_sample_standard_app/bootstrap/__init__.py | 7 + .../bootstrap/intelligence/__init__.py | 7 + .../intelligence/server_application.py | 24 + au_sample_standard_app/config/config.toml | 65 +++ au_sample_standard_app/config/custom_key.toml | 43 ++ .../config/gunicorn_config.toml | 8 + au_sample_standard_app/config/log_config.toml | 32 ++ .../intelligence/__init__.py | 7 + .../intelligence/agentic/__init__.py | 7 + .../intelligence/agentic/agent/__init__.py | 7 + .../agentic/agent/agent_instance/__init__.py | 7 + .../multi_agent_case/__init__.py | 6 + .../pet_insurance_consult_agent.py | 228 +++++++++ .../pet_insurance_consult_agent.yaml | 15 + .../pet_question_planning_agent.py | 30 ++ .../pet_question_planning_agent.yaml | 12 + .../pet_question_rewrite_agent.py | 119 +++++ .../pet_question_rewrite_agent.yaml | 12 + .../single_agent_case/__init__.py | 6 + .../pet_insurance_react_agent.py | 227 +++++++++ .../pet_insurance_react_agent.yaml | 18 + .../template_agent_case/__init__.py | 6 + .../pet_consult_pro_agent.py | 58 +++ .../pet_consult_pro_agent.yaml | 15 + .../agentic/agent/agent_template/__init__.py | 7 + .../agent_template/pet_agent_template.py | 206 ++++++++ .../agentic/knowledge/__init__.py | 7 + .../knowledge/doc_processor/__init__.py | 7 + .../query_keyword_extractor.yaml | 7 + .../knowledge/query_paraphraser/__init__.py | 7 + .../custom_query_keyword_extractor.yaml | 7 + .../agentic/knowledge/rag_router/__init__.py | 7 + .../knowledge/rag_router/nlu_rag_router.yaml | 10 + .../agentic/knowledge/store/__init__.py | 7 + .../intelligence/agentic/llm/__init__.py | 7 + .../intelligence/agentic/llm/maya/__init__.py | 6 + .../llm/maya/pet_insurance_maya_llm.py | 247 ++++++++++ .../agentic/llm/maya/pet_qwen_72b_stream.yaml | 21 + .../intelligence/agentic/llm/qwen_llm.yaml | 8 + .../intelligence/agentic/memory/__init__.py | 7 + .../agentic/memory/demo_memory_a.yaml | 12 + .../agentic/memory/demo_memory_b.yaml | 12 + .../memory/memory_compressor/__init__.py | 7 + .../agentic/memory/memory_storage/__init__.py | 7 + .../memory_storage/chroma_memory_storage.yaml | 9 + .../intelligence/agentic/prompt/__init__.py | 7 + .../pet_insurance_multi_agent/__init__.py | 6 + .../pet_insurance_multi_agent_cn.yaml | 43 ++ .../pet_planning_query_agent_cn.yaml | 56 +++ .../pet_rewrite_query_agent_cn.yaml | 27 ++ .../prompt/pet_react_agent/__init__.py | 6 + .../pet_insurance_react_prompt.yaml | 37 ++ .../intelligence/agentic/tool/__init__.py | 7 + .../agentic/tool/google_search_tool.py | 34 ++ .../agentic/tool/google_search_tool.yaml | 12 + .../agentic/tool/mock_search_tool.py | 45 ++ .../agentic/tool/pet_ins/__init__.py | 6 + .../tool/pet_ins/pet_insurance_info_tool.py | 19 + .../tool/pet_ins/pet_insurance_info_tool.yaml | 12 + .../pet_insurance_search_context_tool.py | 78 +++ .../pet_insurance_search_context_tool.yaml | 31 ++ .../agentic/work_pattern/__init__.py | 7 + .../intelligence/service/__init__.py | 7 + .../service/agent_service/__init__.py | 7 + .../pet_insurance_react_service.yaml | 5 + .../service/classic_service/__init__.py | 7 + .../intelligence/test/__init__.py | 7 + .../pet_insurance_consult_pro_agent_test.py | 23 + .../test/pet_insurance_multi_agent_test.py | 20 + .../test/pet_insurance_react_agent_test.py | 20 + .../intelligence/utils/__init__.py | 7 + .../intelligence/utils/common/__init__.py | 7 + .../utils/common/jsonl_file_util.py | 91 ++++ .../utils/common/txt_file_util.py | 50 ++ .../intelligence/utils/constant/__init__.py | 6 + .../utils/constant/prod_description.py | 160 ++++++ ...67\344\276\213\346\226\207\346\241\243.md" | 456 ++++++++++++++++++ ...53\351\200\237\346\220\255\345\273\272.md" | 177 +++++++ 80 files changed, 3072 insertions(+), 16 deletions(-) create mode 100644 au_sample_standard_app/__init__.py create mode 100644 au_sample_standard_app/bootstrap/__init__.py create mode 100644 au_sample_standard_app/bootstrap/intelligence/__init__.py create mode 100644 au_sample_standard_app/bootstrap/intelligence/server_application.py create mode 100644 au_sample_standard_app/config/config.toml create mode 100644 au_sample_standard_app/config/custom_key.toml create mode 100644 au_sample_standard_app/config/gunicorn_config.toml create mode 100644 au_sample_standard_app/config/log_config.toml create mode 100644 au_sample_standard_app/intelligence/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/agent/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/agent/agent_instance/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.py create mode 100644 au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.py create mode 100644 au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.py create mode 100644 au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.py create mode 100644 au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.py create mode 100644 au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/agent/agent_template/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/agent/agent_template/pet_agent_template.py create mode 100644 au_sample_standard_app/intelligence/agentic/knowledge/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/knowledge/doc_processor/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/knowledge/doc_processor/query_keyword_extractor.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/knowledge/query_paraphraser/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/knowledge/query_paraphraser/custom_query_keyword_extractor.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/knowledge/rag_router/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/knowledge/rag_router/nlu_rag_router.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/knowledge/store/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/llm/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/llm/maya/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/llm/maya/pet_insurance_maya_llm.py create mode 100644 au_sample_standard_app/intelligence/agentic/llm/maya/pet_qwen_72b_stream.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/llm/qwen_llm.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/memory/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/memory/demo_memory_a.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/memory/demo_memory_b.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/memory/memory_compressor/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/memory/memory_storage/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/memory/memory_storage/chroma_memory_storage.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/prompt/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_insurance_multi_agent_cn.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_planning_query_agent_cn.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_rewrite_query_agent_cn.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/prompt/pet_react_agent/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/prompt/pet_react_agent/pet_insurance_react_prompt.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/tool/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/tool/google_search_tool.py create mode 100644 au_sample_standard_app/intelligence/agentic/tool/google_search_tool.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/tool/mock_search_tool.py create mode 100644 au_sample_standard_app/intelligence/agentic/tool/pet_ins/__init__.py create mode 100644 au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.py create mode 100644 au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.py create mode 100644 au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.yaml create mode 100644 au_sample_standard_app/intelligence/agentic/work_pattern/__init__.py create mode 100644 au_sample_standard_app/intelligence/service/__init__.py create mode 100644 au_sample_standard_app/intelligence/service/agent_service/__init__.py create mode 100644 au_sample_standard_app/intelligence/service/agent_service/pet_insurance_react_service.yaml create mode 100644 au_sample_standard_app/intelligence/service/classic_service/__init__.py create mode 100644 au_sample_standard_app/intelligence/test/__init__.py create mode 100644 au_sample_standard_app/intelligence/test/pet_insurance_consult_pro_agent_test.py create mode 100644 au_sample_standard_app/intelligence/test/pet_insurance_multi_agent_test.py create mode 100644 au_sample_standard_app/intelligence/test/pet_insurance_react_agent_test.py create mode 100644 au_sample_standard_app/intelligence/utils/__init__.py create mode 100644 au_sample_standard_app/intelligence/utils/common/__init__.py create mode 100644 au_sample_standard_app/intelligence/utils/common/jsonl_file_util.py create mode 100644 au_sample_standard_app/intelligence/utils/common/txt_file_util.py create mode 100644 au_sample_standard_app/intelligence/utils/constant/__init__.py create mode 100644 au_sample_standard_app/intelligence/utils/constant/prod_description.py create mode 100644 "docs/guidebook/zh/\346\240\267\344\276\213\346\226\207\346\241\243/API\351\200\202\351\205\215\346\240\267\344\276\213\346\226\207\346\241\243.md" create mode 100644 "docs/guidebook/zh/\346\240\267\344\276\213\346\226\207\346\241\243/AgentUniverse\345\277\253\351\200\237\346\220\255\345\273\272.md" diff --git a/agentuniverse/workflow/node/agent_node.py b/agentuniverse/workflow/node/agent_node.py index ac0aa74b..fa66d577 100644 --- a/agentuniverse/workflow/node/agent_node.py +++ b/agentuniverse/workflow/node/agent_node.py @@ -35,7 +35,6 @@ def _run(self, workflow_output: WorkflowOutput) -> NodeOutput: inputs: AgentNodeInputParams = self._data.inputs param_map = { - 'prompt': None, 'id': None } @@ -47,28 +46,14 @@ def _run(self, workflow_output: WorkflowOutput) -> NodeOutput: param_map[agent_param.name] = agent_param.value.get('content', None) agent_id = param_map['id'] - prompt = param_map['prompt'] agent: Agent = AgentManager().get_instance_obj(agent_id) if agent is None: raise ValueError("No agent with id {} was found.".format(agent_id)) - # Extract variables from the prompt template - template_variables = re.findall(r'\{\{(.*?)\}\}', prompt) - agent_input_params = self._resolve_input_params(inputs.input_param, workflow_output) - # Replace variables in the prompt - try: - for var in template_variables: - if var not in agent_input_params: - raise KeyError(f"The variable '{var}' is not found in the input params.") - prompt = prompt.replace(f'{{{{{var}}}}}', - str(agent_input_params[var]) if agent_input_params[var] else '') - except KeyError as e: - raise ValueError(f"Error processing template variables: {e}") - - agent_output: OutputObject = agent.run(input=prompt) + agent_output: OutputObject = agent.run(**agent_input_params) agent_output_dict = agent_output.to_dict() output_params: List[NodeOutputParams] = self._data.outputs diff --git a/au_sample_standard_app/__init__.py b/au_sample_standard_app/__init__.py new file mode 100644 index 00000000..21bea939 --- /dev/null +++ b/au_sample_standard_app/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/4/9 16:29 +# @Author : jerry.zzw +# @Email : jerry.zzw@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/bootstrap/__init__.py b/au_sample_standard_app/bootstrap/__init__.py new file mode 100644 index 00000000..757166c4 --- /dev/null +++ b/au_sample_standard_app/bootstrap/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/10/29 10:20 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/bootstrap/intelligence/__init__.py b/au_sample_standard_app/bootstrap/intelligence/__init__.py new file mode 100644 index 00000000..cc82af9b --- /dev/null +++ b/au_sample_standard_app/bootstrap/intelligence/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/10/29 10:21 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/bootstrap/intelligence/server_application.py b/au_sample_standard_app/bootstrap/intelligence/server_application.py new file mode 100644 index 00000000..3f59fb94 --- /dev/null +++ b/au_sample_standard_app/bootstrap/intelligence/server_application.py @@ -0,0 +1,24 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/4/8 20:58 +# @Author : jerry.zzw +# @Email : jerry.zzw@antgroup.com +# @FileName: server_application.py +from agentuniverse.agent_serve.web.web_booster import start_web_server +from agentuniverse.base.agentuniverse import AgentUniverse + + +class ServerApplication: + """ + Server application. + """ + + @classmethod + def start(cls): + AgentUniverse().start() + start_web_server() + + +if __name__ == "__main__": + ServerApplication.start() diff --git a/au_sample_standard_app/config/config.toml b/au_sample_standard_app/config/config.toml new file mode 100644 index 00000000..a846589d --- /dev/null +++ b/au_sample_standard_app/config/config.toml @@ -0,0 +1,65 @@ +[BASE_INFO] +# The app name will be applied to all processes including agent service integration. +appname = 'demo_app' + +[CORE_PACKAGE] +# Perform a full component scan and registration for all the paths under this list. +default = ['au_sample_standard_app.intelligence.agentic'] +# Scan and register agent components for all paths under this list, with priority over the default. +agent = ['au_sample_standard_app.intelligence.agentic.agent'] +# Scan and register knowledge components for all paths under this list, with priority over the default. +knowledge = ['au_sample_standard_app.intelligence.agentic.knowledge'] +# Scan and register llm components for all paths under this list, with priority over the default. +llm = ['au_sample_standard_app.intelligence.agentic.llm'] +# Scan and register planner components for all paths under this list, with priority over the default. +planner = [] +# Scan and register tool components for all paths under this list, with priority over the default. +tool = ['au_sample_standard_app.intelligence.agentic.tool'] +# Scan and register memory components for all paths under this list, with priority over the default. +memory = ['au_sample_standard_app.intelligence.agentic.memory'] +# Scan and register service components for all paths under this list, with priority over the default. +service = ['au_sample_standard_app.intelligence.service.agent_service'] +# Scan and register prompt components for all paths under this list, with priority over the default. +prompt = ['au_sample_standard_app.intelligence.agentic.prompt'] +# Scan and register product components for all paths under this list, with priority over the default. +#product = ['au_sample_standard_app.platform.difizen.product'] +# Scan and register workflow components for all paths under this list, with priority over the default. +#workflow = ['au_sample_standard_app.platform.difizen.workflow'] +# Scan and register store components for all paths under this list, with priority over the default. +store = ['au_sample_standard_app.intelligence.agentic.knowledge.store'] +# Scan and register rag_router components for all paths under this list, with priority over the default. +rag_router = ['au_sample_standard_app.intelligence.agentic.knowledge.rag_router'] +# Scan and register doc_processor components for all paths under this list, with priority over the default. +doc_processor = ['au_sample_standard_app.intelligence.agentic.knowledge.doc_processor'] +# Scan and register query_paraphraser components for all paths under this list, with priority over the default. +query_paraphraser = ['au_sample_standard_app.intelligence.agentic.knowledge.query_paraphraser'] +# Scan and register memory_compressor components for all paths under this list, with priority over the default. +memory_compressor = ['au_sample_standard_app.intelligence.agentic.memory.memory_compressor'] +# Scan and register memory_storage components for all paths under this list, with priority over the default. +memory_storage = ['au_sample_standard_app.intelligence.agentic.memory.memory_storage'] + +[SUB_CONFIG_PATH] +# Log config file path, an absolute path or a relative path based on the dir where the current config file is located. +log_config_path = './log_config.toml' +# Custom key file path, use to save your own secret key like open ai or sth else. REMEMBER TO ADD IT TO .gitignore. +custom_key_path = './custom_key.toml' + +[DB] +# A sqlalchemy db uri used for storing various info, for example, service request, generated during application running. +# If it's empty, agentUniverse will create a local sqlite db as default choice. +system_db_uri = '' + +[GUNICORN] +# Use gunicorn as http server when activate is 'true', or only use flask. +activate = 'false' +# Gunicorn config file path, an absolute path or a relative path based on the dir where the current config file is located. +gunicorn_config_path = './gunicorn_config.toml' + +[GRPC] +activate = 'false' +max_workers = 10 +server_port = 50051 + +[MONITOR] +activate = false +dir = './monitor' diff --git a/au_sample_standard_app/config/custom_key.toml b/au_sample_standard_app/config/custom_key.toml new file mode 100644 index 00000000..674294a7 --- /dev/null +++ b/au_sample_standard_app/config/custom_key.toml @@ -0,0 +1,43 @@ +# Example file of custom_key.toml. Rename to custom_key.toml while using. +[KEY_LIST] +# Perform a full component scan and registration for all the paths under this list. +#example_key = 'AnExampleKey' + +# models +#kimi default name: default_kimi_llm +#KIMI_API_KEY='' +# +##Qwen default name: default_qwen_llm +DASHSCOPE_API_KEY = 'sk-xxxxx' +# +##Opean default name: default_openai_llm +#OPENAI_API_KEY='sk-xxxxxx' +# +##DEEPSEEK default name: default_deepseek_llm +#DEEPSEEK_API_KEY='' +#DEEPSEEK_API_BASE='' +# +## WenXin default name: default_wenxin_llm +#QIANFAN_AK='' +#QIANFAN_SK='' +# +##Ollama default name: default_ollama_llm +#OLLAMA_BASE_URL='' +# +##claude default name: default_claude_llm +#ANTHROPIC_API_KEY='' +#ANTHROPIC_API_URL='xxxxxx' +# +##baichuan default name: default_baichuan_llm +#BAICHUAN_API_KEY='xxxxxx' + + +# search +#Google search +#SERPER_API_KEY='' +# +##search api +SEARCHAPI_API_KEY = '3KXgFLMt7r7RhwWe4JNK7GQr' +# +##bing seacrh +#BING_SUBSCRIPTION_KEY='xxxxxx' \ No newline at end of file diff --git a/au_sample_standard_app/config/gunicorn_config.toml b/au_sample_standard_app/config/gunicorn_config.toml new file mode 100644 index 00000000..3b8836de --- /dev/null +++ b/au_sample_standard_app/config/gunicorn_config.toml @@ -0,0 +1,8 @@ +[GUNICORN_CONFIG] +bind = '0.0.0.0:8888' +backlog = 2048 +worker_class = 'gthread' +threads = 4 +workers = 5 +timeout = 60 +keepalive = 10 diff --git a/au_sample_standard_app/config/log_config.toml b/au_sample_standard_app/config/log_config.toml new file mode 100644 index 00000000..d8d1c09e --- /dev/null +++ b/au_sample_standard_app/config/log_config.toml @@ -0,0 +1,32 @@ +[LOG_CONFIG] +[LOG_CONFIG.BASIC_CONFIG] +# Loguru log level. +log_level = "INFO" +# Output path of the log file. If value is empty, agentuniverse will create a subdir under your workdir to save logs. +log_path = "./.test_log_dir" +# Specifies the log rotation policy, controlling when a new log file is created. It can be a time period +# (e.g., "1 week"), a file size (e.g., "100 MB"), or a function returning True when rotation should occur. +log_rotation = "100 MB" +# Specifies the duration to keep old log files. It can be a time span (e.g., "30 days") or a function to filter the +# files to be retained. Files outside this policy are purged. +log_retention = "7 days" + +[LOG_CONFIG.EXTEND_MODULE] +# Whether you use Aliyun Simple Log Service (SLS), if the value is "True", you should fill in the ALIYUN_SLS_CONFIG below. +sls_log = "False" + +[LOG_CONFIG.ALIYUN_SLS_CONFIG] +# Aliyun sls endpoint. +sls_endpoint = "mock_endpoint" +# Your sls log project name. +sls_project = "mock_project" +# Your sls log store name. +sls_log_store = "mock_log_store" +# Aliyun sls access_key_id. +access_key_id = "mock_key_id" +# Aliyun sls access_key_secret. +access_key_secret = "mock_key_secret" +# Log queue max size, agentuniverse uses a queue to save the logs to be sent, they will be sent periodically. +sls_log_queue_max_size = 1000 +# Interval of sending logs to aliyun sls. +sls_log_send_interval = 3.0 diff --git a/au_sample_standard_app/intelligence/__init__.py b/au_sample_standard_app/intelligence/__init__.py new file mode 100644 index 00000000..3cf5b665 --- /dev/null +++ b/au_sample_standard_app/intelligence/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/10/29 10:19 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/__init__.py b/au_sample_standard_app/intelligence/agentic/__init__.py new file mode 100644 index 00000000..8089b750 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/10/29 10:41 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/__init__.py b/au_sample_standard_app/intelligence/agentic/agent/__init__.py new file mode 100644 index 00000000..f8808ca8 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/10/29 10:42 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/__init__.py b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/__init__.py new file mode 100644 index 00000000..06a05cdb --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/10/30 19:31 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/__init__.py b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/__init__.py new file mode 100644 index 00000000..ca99b8ba --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/__init__.py @@ -0,0 +1,6 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/12/12 11:04 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.py b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.py new file mode 100644 index 00000000..b9200b71 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.py @@ -0,0 +1,228 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/12/12 19:57 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: pet_insurance_consult_agent.py +import json +from typing import Optional, Any, List +from queue import Queue + +from agentuniverse.agent.agent_manager import AgentManager +from langchain_core.output_parsers import StrOutputParser +from langchain_core.runnables import RunnableSerializable + +from agentuniverse.agent.action.knowledge.knowledge import Knowledge +from agentuniverse.agent.action.knowledge.knowledge_manager import KnowledgeManager +from agentuniverse.agent.action.knowledge.store.document import Document +from agentuniverse.agent.action.tool.tool import Tool +from agentuniverse.agent.action.tool.tool_manager import ToolManager +from agentuniverse.agent.agent import Agent +from agentuniverse.agent.input_object import InputObject +from agentuniverse.agent.memory.memory import Memory +from agentuniverse.agent.memory.memory_manager import MemoryManager +from agentuniverse.agent.memory.message import Message +from agentuniverse.base.config.component_configer.configers.agent_configer import AgentConfiger +from agentuniverse.base.util.agent_util import assemble_memory_input, assemble_memory_output +from agentuniverse.base.util.common_util import stream_output +from agentuniverse.base.util.memory_util import generate_messages +from agentuniverse.base.util.prompt_util import process_llm_token +from agentuniverse.llm.llm import LLM +from agentuniverse.llm.llm_manager import LLMManager +from agentuniverse.prompt.chat_prompt import ChatPrompt +from agentuniverse.prompt.prompt import Prompt +from agentuniverse.prompt.prompt_manager import PromptManager +from agentuniverse.prompt.prompt_model import AgentPromptModel + + +class PetInsuranceConsultAgent(Agent): + llm_name: Optional[str] = '' + memory_name: Optional[str] = None + tool_names: Optional[list[str]] = None + knowledge_names: Optional[list[str]] = None + prompt_version: Optional[str] = None + + def input_keys(self) -> list[str]: + """Return the input keys of the Agent.""" + return ['input'] + + def output_keys(self) -> list[str]: + """Return the output keys of the Agent.""" + return ['output'] + + def parse_input(self, input_object: InputObject, agent_input: dict) -> dict: + agent_input['input'] = input_object.get_data('input') + return agent_input + + def parse_result(self, agent_result: dict) -> dict: + return agent_result + + def execute(self, input_object: InputObject, agent_input: dict, **kwargs) -> dict: + memory: Memory = self.process_memory(agent_input, **kwargs) + llm: LLM = self.process_llm(**kwargs) + prompt: Prompt = self.process_prompt(agent_input, **kwargs) + return self.customized_execute(input_object, agent_input, memory, llm, prompt, **kwargs) + + def customized_execute(self, input_object: InputObject, agent_input: dict, memory: Memory, llm: LLM, prompt: Prompt, + **kwargs) -> dict: + assemble_memory_input(memory, agent_input) + + # 改写问题 + detail_tool = ToolManager().get_instance_obj('pet_insurance_info_tool') + tool_res = detail_tool.run(query='宠物医保') + agent_input['prod_description'] = tool_res + rewrite_agent: Agent = AgentManager().get_instance_obj('pet_question_rewrite_agent') + rewrite_agent_res = rewrite_agent.run(**agent_input) + agent_input['rewrite_question'] = rewrite_agent_res.get_data('rewrite_output') + # 问题拆分 + planning_agent_res = AgentManager().get_instance_obj('pet_question_planning_agent').run(**agent_input) + split_questions = planning_agent_res.get_data('planning_output') + sub_query_list = json.loads(split_questions).get('sub_query_list') + + # 问题检索 + search_tool: Tool = ToolManager().get_instance_obj('pet_insurance_search_context_tool') + search_res = '' + for sub_query in sub_query_list: + search_res += search_tool.run(input=sub_query) + '\n' + + agent_input['search_context'] = search_res + + # llm表达 + process_llm_token(llm, prompt.as_langchain(), self.agent_model.profile, agent_input) + chain = prompt.as_langchain() | llm.as_langchain_runnable( + self.agent_model.llm_params()) | StrOutputParser() + res = self.invoke_chain(chain, agent_input, input_object, **kwargs) + assemble_memory_output(memory=memory, + agent_input=agent_input, + content=f"Human: {agent_input.get('input')}, AI: {res}") + self.add_output_stream(input_object.get_data('output_stream'), res) + return {**agent_input, 'output': res} + + def process_llm(self, **kwargs) -> LLM: + return LLMManager().get_instance_obj(self.llm_name) + + def process_memory(self, agent_input: dict, **kwargs) -> Memory | None: + memory: Memory = MemoryManager().get_instance_obj(component_instance_name=self.memory_name) + if memory is None: + return None + + chat_history: list = agent_input.get('chat_history') + # generate a list of temporary messages from the given chat history and add them to the memory instance. + temporary_messages: list[Message] = generate_messages(chat_history) + if temporary_messages: + memory.add(temporary_messages, **agent_input) + + params: dict = dict() + params['agent_llm_name'] = self.llm_name + return memory.set_by_agent_model(**params) + + def process_prompt(self, agent_input: dict, **kwargs) -> ChatPrompt: + expert_framework = agent_input.pop('expert_framework', '') or '' + + profile: dict = self.agent_model.profile + + profile_instruction = profile.get('instruction') + profile_instruction = expert_framework + profile_instruction if profile_instruction else profile_instruction + + profile_prompt_model: AgentPromptModel = AgentPromptModel(introduction=profile.get('introduction'), + target=profile.get('target'), + instruction=profile_instruction) + + # get the prompt by the prompt version + version_prompt: Prompt = PromptManager().get_instance_obj(self.prompt_version) + + if version_prompt is None and not profile_prompt_model: + raise Exception("Either the `prompt_version` or `introduction & target & instruction`" + " in agent profile configuration should be provided.") + if version_prompt: + version_prompt_model: AgentPromptModel = AgentPromptModel( + introduction=getattr(version_prompt, 'introduction', ''), + target=getattr(version_prompt, 'target', ''), + instruction=expert_framework + getattr(version_prompt, 'instruction', '')) + profile_prompt_model = profile_prompt_model + version_prompt_model + + chat_prompt = ChatPrompt().build_prompt(profile_prompt_model, ['introduction', 'target', 'instruction']) + image_urls: list = agent_input.pop('image_urls', []) or [] + if image_urls: + chat_prompt.generate_image_prompt(image_urls) + return chat_prompt + + def invoke_chain(self, chain: RunnableSerializable[Any, str], agent_input: dict, input_object: InputObject, + **kwargs): + if not input_object.get_data('output_stream'): + res = chain.invoke(input=agent_input) + return res + result = [] + for token in chain.stream(input=agent_input): + stream_output(input_object.get_data('output_stream', None), { + 'type': 'token', + 'data': { + 'chunk': token, + 'agent_info': self.agent_model.info + } + }) + result.append(token) + return "".join(result) + + async def async_invoke_chain(self, chain: RunnableSerializable[Any, str], agent_input: dict, + input_object: InputObject, **kwargs): + if not input_object.get_data('output_stream'): + res = await chain.ainvoke(input=agent_input) + return res + result = [] + async for token in chain.astream(input=agent_input): + stream_output(input_object.get_data('output_stream', None), { + 'type': 'token', + 'data': { + 'chunk': token, + 'agent_info': self.agent_model.info + } + }) + result.append(token) + return "".join(result) + + def invoke_tools(self, input_object: InputObject, **kwargs): + if not self.tool_names: + return '' + + tool_results: list = list() + + for tool_name in self.tool_names: + tool: Tool = ToolManager().get_instance_obj(tool_name) + if tool is None: + continue + tool_input = {key: input_object.get_data(key) for key in tool.input_keys} + tool_results.append(str(tool.run(**tool_input))) + return "\n\n".join(tool_results) + + def invoke_knowledge(self, query_str: str, input_object: InputObject, **kwargs): + if not self.knowledge_names or not query_str: + return '' + + knowledge_results: list = list() + + for knowledge_name in self.knowledge_names: + knowledge: Knowledge = KnowledgeManager().get_instance_obj(knowledge_name) + if knowledge is None: + continue + knowledge_res: List[Document] = knowledge.query_knowledge( + query_str=query_str, + **input_object.to_dict() + ) + knowledge_results.append(knowledge.to_llm(knowledge_res)) + return "\n\n".join(knowledge_results) + + def validate_required_params(self): + pass + + def add_output_stream(self, output_stream: Queue, agent_output: str) -> None: + pass + + def initialize_by_component_configer(self, component_configer: AgentConfiger) -> 'PetInsuranceConsultAgent': + super().initialize_by_component_configer(component_configer) + self.prompt_version = self.agent_model.profile.get('prompt_version', 'default_rag_agent.cn') + self.llm_name = self.agent_model.profile.get('llm_model', {}).get('name') + self.memory_name = self.agent_model.memory.get('name') + self.tool_names = self.agent_model.action.get('tool', []) + self.knowledge_names = self.agent_model.action.get('knowledge', []) + return self diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.yaml b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.yaml new file mode 100644 index 00000000..0b459268 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.yaml @@ -0,0 +1,15 @@ +info: + name: 'pet_insurance_consult_agent' + description: '宠物险智能顾问,提供“宠物医保”产品的智能问答' +profile: + prompt_version: pet_insurance_multi_agent.cn + llm_model: + name: 'ant_maya_qwen_72b_stream' + temperature: 0.1 +action: + tool: + - search_context_tool +metadata: + type: 'AGENT' + module: 'au_sample_standard_app.intelligence.agentic.agent.agent_instance.multi_agent_case.pet_insurance_consult_agent' + class: 'PetInsuranceConsultAgent' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.py b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.py new file mode 100644 index 00000000..525f13a7 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.py @@ -0,0 +1,30 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/12/12 20:58 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: pet_insurance_rewrite_agent.py +from agentuniverse.agent.input_object import InputObject +from agentuniverse.base.util.logging.logging_util import LOGGER + +from au_sample_standard_app.intelligence.agentic.agent.agent_instance.multi_agent_case.pet_question_rewrite_agent import \ + PetInsuranceRewriteAgent + + +class PetInsurancePlanningAgent(PetInsuranceRewriteAgent): + + def input_keys(self) -> list[str]: + return ['input', 'prod_description'] + + def output_keys(self) -> list[str]: + return ['planning_output'] + + def parse_input(self, input_object: InputObject, agent_input: dict) -> dict: + agent_input['input'] = input_object.get_data('input') + agent_input['prod_description'] = input_object.get_data('prod_description') + return agent_input + + def parse_result(self, agent_result: dict) -> dict: + planning_output = agent_result['output'] + LOGGER.info(f'智能体 pet_question_planning_agent 执行结果为: {planning_output}') + return {**agent_result, 'planning_output': agent_result['output']} diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.yaml b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.yaml new file mode 100644 index 00000000..5ecb86b5 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.yaml @@ -0,0 +1,12 @@ +info: + name: 'pet_question_planning_agent' + description: '问题策划,对改写后的问题进行拆分' +profile: + prompt_version: 'pet_planning_query_agent.cn' + llm_model: + name: 'ant_maya_qwen_72b_stream' + temperature: 0.3 +metadata: + type: 'AGENT' + module: 'au_sample_standard_app.intelligence.agentic.agent.agent_instance.multi_agent_case.pet_question_planning_agent' + class: 'PetInsurancePlanningAgent' diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.py b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.py new file mode 100644 index 00000000..db089ee8 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.py @@ -0,0 +1,119 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/12/12 20:58 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: pet_insurance_rewrite_agent.py +from typing import Optional, Any + +from agentuniverse.agent.agent import Agent +from agentuniverse.agent.input_object import InputObject +from agentuniverse.base.config.component_configer.configers.agent_configer import AgentConfiger +from agentuniverse.base.util.common_util import stream_output +from agentuniverse.base.util.logging.logging_util import LOGGER +from agentuniverse.base.util.prompt_util import process_llm_token +from agentuniverse.llm.llm import LLM +from agentuniverse.llm.llm_manager import LLMManager +from agentuniverse.prompt.chat_prompt import ChatPrompt +from agentuniverse.prompt.prompt import Prompt +from agentuniverse.prompt.prompt_manager import PromptManager +from agentuniverse.prompt.prompt_model import AgentPromptModel +from langchain_core.output_parsers import StrOutputParser +from langchain_core.runnables import RunnableSerializable + + +class PetInsuranceRewriteAgent(Agent): + llm_name: Optional[str] = '' + memory_name: Optional[str] = None + tool_names: Optional[list[str]] = None + knowledge_names: Optional[list[str]] = None + prompt_version: Optional[str] = None + + def input_keys(self) -> list[str]: + return ['input', 'prod_description'] + + def output_keys(self) -> list[str]: + return ['rewrite_output'] + + def parse_input(self, input_object: InputObject, agent_input: dict) -> dict: + agent_input['input'] = input_object.get_data('input') + agent_input['prod_description'] = input_object.get_data('prod_description') + return agent_input + + def parse_result(self, agent_result: dict) -> dict: + rewrite_output = agent_result['output'] + LOGGER.info(f'智能体 pet_question_planning_agent 执行结果为: {rewrite_output}') + return {**agent_result, 'rewrite_output': agent_result['output']} + + def execute(self, input_object: InputObject, agent_input: dict, **kwargs) -> dict: + llm: LLM = self.process_llm(**kwargs) + prompt: Prompt = self.process_prompt(agent_input, **kwargs) + return self.customized_execute(input_object, agent_input, llm, prompt, **kwargs) + + def customized_execute(self, input_object: InputObject, agent_input: dict, llm: LLM, prompt: Prompt, + **kwargs) -> dict: + process_llm_token(llm, prompt.as_langchain(), self.agent_model.profile, agent_input) + chain = prompt.as_langchain() | llm.as_langchain_runnable( + self.agent_model.llm_params()) | StrOutputParser() + res = self.invoke_chain(chain, agent_input, input_object, **kwargs) + return {**agent_input, 'output': res} + + def process_llm(self, **kwargs) -> LLM: + return LLMManager().get_instance_obj(self.llm_name) + + def process_prompt(self, agent_input: dict, **kwargs) -> ChatPrompt: + expert_framework = agent_input.pop('expert_framework', '') or '' + + profile: dict = self.agent_model.profile + + profile_instruction = profile.get('instruction') + profile_instruction = expert_framework + profile_instruction if profile_instruction else profile_instruction + + profile_prompt_model: AgentPromptModel = AgentPromptModel(introduction=profile.get('introduction'), + target=profile.get('target'), + instruction=profile_instruction) + + # get the prompt by the prompt version + version_prompt: Prompt = PromptManager().get_instance_obj(self.prompt_version) + + if version_prompt is None and not profile_prompt_model: + raise Exception("Either the `prompt_version` or `introduction & target & instruction`" + " in agent profile configuration should be provided.") + if version_prompt: + version_prompt_model: AgentPromptModel = AgentPromptModel( + introduction=getattr(version_prompt, 'introduction', ''), + target=getattr(version_prompt, 'target', ''), + instruction=expert_framework + getattr(version_prompt, 'instruction', '')) + profile_prompt_model = profile_prompt_model + version_prompt_model + + chat_prompt = ChatPrompt().build_prompt(profile_prompt_model, ['introduction', 'target', 'instruction']) + image_urls: list = agent_input.pop('image_urls', []) or [] + if image_urls: + chat_prompt.generate_image_prompt(image_urls) + return chat_prompt + + def invoke_chain(self, chain: RunnableSerializable[Any, str], agent_input: dict, input_object: InputObject, + **kwargs): + if not input_object.get_data('output_stream'): + res = chain.invoke(input=agent_input) + return res + result = [] + for token in chain.stream(input=agent_input): + stream_output(input_object.get_data('output_stream', None), { + 'type': 'token', + 'data': { + 'chunk': token, + 'agent_info': self.agent_model.info + } + }) + result.append(token) + return "".join(result) + + def initialize_by_component_configer(self, component_configer: AgentConfiger) -> 'RagAgentTemplate': + super().initialize_by_component_configer(component_configer) + self.llm_name = self.agent_model.profile.get('llm_model', {}).get('name') + self.memory_name = self.agent_model.memory.get('name') + self.tool_names = self.agent_model.action.get('tool', []) + self.knowledge_names = self.agent_model.action.get('knowledge', []) + self.prompt_version = self.agent_model.profile.get('prompt_version', 'default_rag_agent.cn') + return self diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.yaml b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.yaml new file mode 100644 index 00000000..a8cd2a1b --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.yaml @@ -0,0 +1,12 @@ +info: + name: 'pet_question_rewrite_agent' + description: '问题改写智能体' +profile: + prompt_version: 'pet_rewrite_query_agent.cn' + llm_model: + name: 'ant_maya_qwen_72b_stream' + temperature: 0.3 +metadata: + type: 'AGENT' + module: 'au_sample_standard_app.intelligence.agentic.agent.agent_instance.multi_agent_case.pet_question_rewrite_agent' + class: 'PetInsuranceRewriteAgent' diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/__init__.py b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/__init__.py new file mode 100644 index 00000000..ca99b8ba --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/__init__.py @@ -0,0 +1,6 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/12/12 11:04 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.py b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.py new file mode 100644 index 00000000..78cddd4c --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.py @@ -0,0 +1,227 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/12/12 11:24 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: pet_insurance_react_agent.py +from typing import Sequence, Optional, Union, List + +from agentuniverse.agent.memory.memory_manager import MemoryManager +from agentuniverse.base.util.memory_util import generate_messages +from agentuniverse.llm.llm_manager import LLMManager +from agentuniverse.prompt.chat_prompt import ChatPrompt +from agentuniverse.prompt.prompt_manager import PromptManager +from agentuniverse.prompt.prompt_model import AgentPromptModel +from langchain.agents.format_scratchpad import format_log_to_str +from langchain.agents.output_parsers import ReActSingleInputOutputParser +from langchain.agents import AgentExecutor, AgentOutputParser +from langchain.tools import Tool as LangchainTool +from langchain_core.language_models import BaseLanguageModel +from langchain_core.prompts import BasePromptTemplate +from langchain_core.runnables import RunnableConfig, RunnablePassthrough, Runnable +from langchain_core.tools import BaseTool, ToolsRenderer, render_text_description + +from agentuniverse.agent.memory.message import Message +from agentuniverse.base.config.component_configer.configers.agent_configer import AgentConfiger +from agentuniverse.base.util.agent_util import assemble_memory_input, assemble_memory_output +from agentuniverse.agent.action.knowledge.knowledge import Knowledge +from agentuniverse.agent.action.knowledge.knowledge_manager import KnowledgeManager +from agentuniverse.agent.action.tool.tool import Tool +from agentuniverse.agent.action.tool.tool_manager import ToolManager +from agentuniverse.agent.agent import Agent +from agentuniverse.agent.agent_manager import AgentManager +from agentuniverse.agent.input_object import InputObject +from agentuniverse.agent.memory.memory import Memory +from agentuniverse.agent.plan.planner.react_planner.stream_callback import StreamOutPutCallbackHandler +from agentuniverse.base.util.prompt_util import process_llm_token +from agentuniverse.llm.llm import LLM +from agentuniverse.prompt.prompt import Prompt + + +class PetInsuranceReactAgent(Agent): + agent_names: Optional[list[str]] = None + stop_sequence: Optional[list[str]] = None + max_iterations: Optional[int] = None + llm_name: Optional[str] = '' + memory_name: Optional[str] = None + tool_names: Optional[list[str]] = None + knowledge_names: Optional[list[str]] = None + prompt_version: Optional[str] = None + + def input_keys(self) -> list[str]: + return ['input'] + + def output_keys(self) -> list[str]: + return ['output'] + + def parse_input(self, input_object: InputObject, agent_input: dict) -> dict: + agent_input['input'] = input_object.get_data('input') + tools_context = self.build_tools_context() + agent_input['tools'] = tools_context[0] + agent_input['tool_names'] = tools_context[1] + agent_input['agent_scratchpad'] = '' + return agent_input + + def parse_result(self, agent_result: dict) -> dict: + return {**agent_result, 'output': agent_result['output']} + + def execute(self, input_object: InputObject, agent_input: dict, **kwargs) -> dict: + """执行主体""" + memory: Memory = self.process_memory(agent_input, **kwargs) + llm: LLM = self.process_llm(**kwargs) + prompt: Prompt = self.process_prompt(agent_input, **kwargs) + return self.customized_execute(input_object, agent_input, memory, llm, prompt, **kwargs) + + def process_llm(self, **kwargs) -> LLM: + return LLMManager().get_instance_obj(self.llm_name) + + def customized_execute(self, input_object: InputObject, agent_input: dict, memory: Memory, llm: LLM, prompt: Prompt, + **kwargs) -> dict: + assemble_memory_input(memory, agent_input) + process_llm_token(llm, prompt.as_langchain(), self.agent_model.profile, agent_input) + lc_tools: List[LangchainTool] = self._convert_to_langchain_tool() + agent = self.create_react_agent(llm.as_langchain(), lc_tools, prompt.as_langchain(), + stop_sequence=self.stop_sequence, + bind_params=self.agent_model.llm_params()) + agent_executor = AgentExecutor(agent=agent, tools=lc_tools, + verbose=True, + handle_parsing_errors=True, + max_iterations=self.max_iterations) + res = agent_executor.invoke(input=agent_input, memory=memory.as_langchain() if memory else None, + chat_history=agent_input.get(memory.memory_key) if memory else '', + config=self._get_run_config(input_object)) + assemble_memory_output(memory=memory, + agent_input=agent_input, + content=f"Human: {agent_input.get('input')}, AI: {res.get('output')}") + return res + + def process_prompt(self, agent_input: dict, **kwargs) -> ChatPrompt: + """处理提示词""" + expert_framework = agent_input.pop('expert_framework', '') or '' + + profile: dict = self.agent_model.profile + + profile_instruction = profile.get('instruction') + profile_instruction = expert_framework + profile_instruction if profile_instruction else profile_instruction + + profile_prompt_model: AgentPromptModel = AgentPromptModel(introduction=profile.get('introduction'), + target=profile.get('target'), + instruction=profile_instruction) + + # get the prompt by the prompt version + version_prompt: Prompt = PromptManager().get_instance_obj(self.prompt_version) + + if version_prompt is None and not profile_prompt_model: + raise Exception("Either the `prompt_version` or `introduction & target & instruction`" + " in agent profile configuration should be provided.") + if version_prompt: + version_prompt_model: AgentPromptModel = AgentPromptModel( + introduction=getattr(version_prompt, 'introduction', ''), + target=getattr(version_prompt, 'target', ''), + instruction=expert_framework + getattr(version_prompt, 'instruction', '')) + profile_prompt_model = profile_prompt_model + version_prompt_model + + chat_prompt = ChatPrompt().build_prompt(profile_prompt_model, ['introduction', 'target', 'instruction']) + image_urls: list = agent_input.pop('image_urls', []) or [] + if image_urls: + chat_prompt.generate_image_prompt(image_urls) + return chat_prompt + + def process_memory(self, agent_input: dict, **kwargs) -> Memory | None: + """处理记忆""" + memory: Memory = MemoryManager().get_instance_obj(component_instance_name=self.memory_name) + if memory is None: + return None + + chat_history: list = agent_input.get('chat_history') + # generate a list of temporary messages from the given chat history and add them to the memory instance. + temporary_messages: list[Message] = generate_messages(chat_history) + if temporary_messages: + memory.add(temporary_messages, **agent_input) + + params: dict = dict() + params['agent_llm_name'] = self.llm_name + return memory.set_by_agent_model(**params) + + def create_react_agent( + self, + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + prompt: BasePromptTemplate, + output_parser: Optional[AgentOutputParser] = None, + tools_renderer: ToolsRenderer = render_text_description, + *, + stop_sequence: Union[bool, List[str]] = True, + bind_params: Optional[dict], + ) -> Runnable: + missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference( + prompt.input_variables + list(prompt.partial_variables) + ) + if missing_vars: + raise ValueError(f"Prompt missing required variables: {missing_vars}") + + prompt = prompt.partial( + tools=tools_renderer(list(tools)), + tool_names=", ".join([t.name for t in tools]), + ) + if stop_sequence: + stop = ["\nObservation"] if stop_sequence is True else stop_sequence + llm_with_stop = llm.bind(stop=stop, **(bind_params or {})) + else: + llm_with_stop = llm.bind(**(bind_params or {})) + output_parser = output_parser or ReActSingleInputOutputParser() + agent = ( + RunnablePassthrough.assign( + agent_scratchpad=lambda x: format_log_to_str(x["intermediate_steps"]), + ) + | prompt + | llm_with_stop + | output_parser + ) + return agent + + def build_tools_context(self) -> tuple[str, str]: + lc_tools: [LangchainTool] = self._convert_to_langchain_tool() + tools_context = '' + if not lc_tools: + return '', '' + tool_names = [] + for lc_tool in lc_tools: + tools_context += f"tool name: {lc_tool.name}, tool description: {lc_tool.description}\n" + return tools_context, "|".join(tool_names) + + def _convert_to_langchain_tool(self) -> list[LangchainTool]: + lc_tools = [] + if self.tool_names: + for tool_name in self.tool_names: + tool: Tool = ToolManager().get_instance_obj(tool_name) + lc_tools.append(tool.as_langchain()) + if self.knowledge_names: + for knowledge_name in self.knowledge_names: + knowledge: Knowledge = KnowledgeManager().get_instance_obj(knowledge_name) + lc_tools.append(knowledge.as_langchain_tool()) + if self.agent_names: + for agent_name in self.agent_names: + agent: Agent = AgentManager().get_instance_obj(agent_name) + lc_tools.append(agent.as_langchain_tool()) + return lc_tools + + def _get_run_config(self, input_object: InputObject) -> RunnableConfig: + config = RunnableConfig() + callbacks = [] + output_stream = input_object.get_data('output_stream') + callbacks.append(StreamOutPutCallbackHandler(output_stream, agent_info=self.agent_model.info)) + config.setdefault("callbacks", callbacks) + return config + + def initialize_by_component_configer(self, component_configer: AgentConfiger) -> 'ReActAgentTemplate': + super().initialize_by_component_configer(component_configer) + self.llm_name = self.agent_model.profile.get('llm_model', {}).get('name') + self.memory_name = self.agent_model.memory.get('name') + self.tool_names = self.agent_model.action.get('tool', []) + self.knowledge_names = self.agent_model.action.get('knowledge', []) + self.prompt_version = self.agent_model.profile.get('prompt_version', 'default_react_agent.cn') + self.stop_sequence = self.agent_model.profile.get('stop_sequence') + self.max_iterations = self.agent_model.profile.get('max_iterations', 5) + self.agent_names = self.agent_model.action.get('agent', []) + return self diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.yaml b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.yaml new file mode 100644 index 00000000..db6e4bed --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.yaml @@ -0,0 +1,18 @@ +info: + name: 'pet_insurance_react_agent' + description: 'react模式的宠物险咨询智能体' +profile: + prompt_version: pet_insurance_react_agent.cn + llm_model: + name: 'qwen_llm' + model_name: 'qwen2-72b-instruct' + stop: 'Observation' + temperature: 0.1 +action: + tool: + - 'pet_insurance_info_tool' + - 'pet_insurance_search_context_tool' +metadata: + type: 'AGENT' + module: 'au_sample_standard_app.intelligence.agentic.agent.agent_instance.single_agent_case.pet_insurance_react_agent' + class: 'PetInsuranceReactAgent' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/__init__.py b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/__init__.py new file mode 100644 index 00000000..069ed4f1 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/__init__.py @@ -0,0 +1,6 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/12/12 11:05 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.py b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.py new file mode 100644 index 00000000..71291654 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.py @@ -0,0 +1,58 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/12/12 22:59 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: pet_consult_agent_pro.py +import json + +from agentuniverse.agent.action.tool.tool import Tool +from agentuniverse.agent.action.tool.tool_manager import ToolManager +from agentuniverse.agent.agent import Agent +from agentuniverse.agent.agent_manager import AgentManager +from agentuniverse.agent.input_object import InputObject +from agentuniverse.agent.memory.memory import Memory +from agentuniverse.base.util.agent_util import assemble_memory_output +from agentuniverse.base.util.prompt_util import process_llm_token +from agentuniverse.llm.llm import LLM +from agentuniverse.prompt.prompt import Prompt +from langchain_core.output_parsers import StrOutputParser + +from au_sample_standard_app.intelligence.agentic.agent.agent_template.pet_agent_template import \ + PetRagAgentTemplate + + +class PetInsuranceConsultProAgent(PetRagAgentTemplate): + + def customized_execute(self, input_object: InputObject, agent_input: dict, memory: Memory, llm: LLM, prompt: Prompt, + **kwargs) -> dict: + # 改写问题 + detail_tool = ToolManager().get_instance_obj('pet_insurance_info_tool') + tool_res = detail_tool.run(query='宠物医保') + agent_input['prod_description'] = tool_res + rewrite_agent: Agent = AgentManager().get_instance_obj('pet_question_rewrite_agent') + rewrite_agent_res = rewrite_agent.run(**agent_input) + agent_input['rewrite_question'] = rewrite_agent_res.get_data('rewrite_output') + # 问题拆分 + planning_agent_res = AgentManager().get_instance_obj('pet_question_planning_agent').run(**agent_input) + split_questions = planning_agent_res.get_data('planning_output') + sub_query_list = json.loads(split_questions).get('sub_query_list') + + # 问题检索 + search_tool: Tool = ToolManager().get_instance_obj('pet_insurance_search_context_tool') + search_res = '' + for sub_query in sub_query_list: + search_res += search_tool.run(input=sub_query) + '\n' + + agent_input['search_context'] = search_res + + # llm表达 + process_llm_token(llm, prompt.as_langchain(), self.agent_model.profile, agent_input) + chain = prompt.as_langchain() | llm.as_langchain_runnable( + self.agent_model.llm_params()) | StrOutputParser() + res = self.invoke_chain(chain, agent_input, input_object, **kwargs) + assemble_memory_output(memory=memory, + agent_input=agent_input, + content=f"Human: {agent_input.get('input')}, AI: {res}") + self.add_output_stream(input_object.get_data('output_stream'), res) + return {**agent_input, 'output': res} diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.yaml b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.yaml new file mode 100644 index 00000000..03637149 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.yaml @@ -0,0 +1,15 @@ +info: + name: 'pet_insurance_consult_pro_agent' + description: '宠物险智能顾问,提供“宠物医保”产品的智能问答' +profile: + prompt_version: pet_insurance_multi_agent.cn + llm_model: + name: 'ant_maya_qwen_72b_stream' + temperature: 0.1 +action: + tool: + - search_context_tool +metadata: + type: 'AGENT' + module: 'au_sample_standard_app.intelligence.agentic.agent.agent_instance.template_agent_case.pet_consult_pro_agent' + class: 'PetInsuranceConsultProAgent' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_template/__init__.py b/au_sample_standard_app/intelligence/agentic/agent/agent_template/__init__.py new file mode 100644 index 00000000..90f33988 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/agent_template/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/10/29 10:59 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_template/pet_agent_template.py b/au_sample_standard_app/intelligence/agentic/agent/agent_template/pet_agent_template.py new file mode 100644 index 00000000..945c8601 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/agent/agent_template/pet_agent_template.py @@ -0,0 +1,206 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/12/12 22:54 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: PetInsuranceRagAgentTemplate.py +import json +from typing import Optional, Any, List +from queue import Queue + +from agentuniverse.agent.agent_manager import AgentManager +from langchain_core.output_parsers import StrOutputParser +from langchain_core.runnables import RunnableSerializable + +from agentuniverse.agent.action.knowledge.knowledge import Knowledge +from agentuniverse.agent.action.knowledge.knowledge_manager import KnowledgeManager +from agentuniverse.agent.action.knowledge.store.document import Document +from agentuniverse.agent.action.tool.tool import Tool +from agentuniverse.agent.action.tool.tool_manager import ToolManager +from agentuniverse.agent.agent import Agent +from agentuniverse.agent.input_object import InputObject +from agentuniverse.agent.memory.memory import Memory +from agentuniverse.agent.memory.memory_manager import MemoryManager +from agentuniverse.agent.memory.message import Message +from agentuniverse.base.config.component_configer.configers.agent_configer import AgentConfiger +from agentuniverse.base.util.agent_util import assemble_memory_input, assemble_memory_output +from agentuniverse.base.util.common_util import stream_output +from agentuniverse.base.util.memory_util import generate_messages +from agentuniverse.base.util.prompt_util import process_llm_token +from agentuniverse.llm.llm import LLM +from agentuniverse.llm.llm_manager import LLMManager +from agentuniverse.prompt.chat_prompt import ChatPrompt +from agentuniverse.prompt.prompt import Prompt +from agentuniverse.prompt.prompt_manager import PromptManager +from agentuniverse.prompt.prompt_model import AgentPromptModel + + +class PetRagAgentTemplate(Agent): + llm_name: Optional[str] = '' + memory_name: Optional[str] = None + tool_names: Optional[list[str]] = None + knowledge_names: Optional[list[str]] = None + prompt_version: Optional[str] = None + + def input_keys(self) -> list[str]: + """Return the input keys of the Agent.""" + return ['input'] + + def output_keys(self) -> list[str]: + """Return the output keys of the Agent.""" + return ['output'] + + def parse_input(self, input_object: InputObject, agent_input: dict) -> dict: + agent_input['input'] = input_object.get_data('input') + return agent_input + + def parse_result(self, agent_result: dict) -> dict: + return agent_result + + def execute(self, input_object: InputObject, agent_input: dict, **kwargs) -> dict: + memory: Memory = self.process_memory(agent_input, **kwargs) + llm: LLM = self.process_llm(**kwargs) + prompt: Prompt = self.process_prompt(agent_input, **kwargs) + return self.customized_execute(input_object, agent_input, memory, llm, prompt, **kwargs) + + def customized_execute(self, input_object: InputObject, agent_input: dict, memory: Memory, llm: LLM, prompt: Prompt, + **kwargs) -> dict: + assemble_memory_input(memory, agent_input) + process_llm_token(llm, prompt.as_langchain(), self.agent_model.profile, agent_input) + chain = prompt.as_langchain() | llm.as_langchain_runnable( + self.agent_model.llm_params()) | StrOutputParser() + res = self.invoke_chain(chain, agent_input, input_object, **kwargs) + assemble_memory_output(memory=memory, + agent_input=agent_input, + content=f"Human: {agent_input.get('input')}, AI: {res}") + self.add_output_stream(input_object.get_data('output_stream'), res) + return {**agent_input, 'output': res} + + def process_llm(self, **kwargs) -> LLM: + return LLMManager().get_instance_obj(self.llm_name) + + def process_memory(self, agent_input: dict, **kwargs) -> Memory | None: + memory: Memory = MemoryManager().get_instance_obj(component_instance_name=self.memory_name) + if memory is None: + return None + + chat_history: list = agent_input.get('chat_history') + # generate a list of temporary messages from the given chat history and add them to the memory instance. + temporary_messages: list[Message] = generate_messages(chat_history) + if temporary_messages: + memory.add(temporary_messages, **agent_input) + + params: dict = dict() + params['agent_llm_name'] = self.llm_name + return memory.set_by_agent_model(**params) + + def process_prompt(self, agent_input: dict, **kwargs) -> ChatPrompt: + expert_framework = agent_input.pop('expert_framework', '') or '' + + profile: dict = self.agent_model.profile + + profile_instruction = profile.get('instruction') + profile_instruction = expert_framework + profile_instruction if profile_instruction else profile_instruction + + profile_prompt_model: AgentPromptModel = AgentPromptModel(introduction=profile.get('introduction'), + target=profile.get('target'), + instruction=profile_instruction) + + # get the prompt by the prompt version + version_prompt: Prompt = PromptManager().get_instance_obj(self.prompt_version) + + if version_prompt is None and not profile_prompt_model: + raise Exception("Either the `prompt_version` or `introduction & target & instruction`" + " in agent profile configuration should be provided.") + if version_prompt: + version_prompt_model: AgentPromptModel = AgentPromptModel( + introduction=getattr(version_prompt, 'introduction', ''), + target=getattr(version_prompt, 'target', ''), + instruction=expert_framework + getattr(version_prompt, 'instruction', '')) + profile_prompt_model = profile_prompt_model + version_prompt_model + + chat_prompt = ChatPrompt().build_prompt(profile_prompt_model, ['introduction', 'target', 'instruction']) + image_urls: list = agent_input.pop('image_urls', []) or [] + if image_urls: + chat_prompt.generate_image_prompt(image_urls) + return chat_prompt + + def invoke_chain(self, chain: RunnableSerializable[Any, str], agent_input: dict, input_object: InputObject, + **kwargs): + if not input_object.get_data('output_stream'): + res = chain.invoke(input=agent_input) + return res + result = [] + for token in chain.stream(input=agent_input): + stream_output(input_object.get_data('output_stream', None), { + 'type': 'token', + 'data': { + 'chunk': token, + 'agent_info': self.agent_model.info + } + }) + result.append(token) + return "".join(result) + + async def async_invoke_chain(self, chain: RunnableSerializable[Any, str], agent_input: dict, + input_object: InputObject, **kwargs): + if not input_object.get_data('output_stream'): + res = await chain.ainvoke(input=agent_input) + return res + result = [] + async for token in chain.astream(input=agent_input): + stream_output(input_object.get_data('output_stream', None), { + 'type': 'token', + 'data': { + 'chunk': token, + 'agent_info': self.agent_model.info + } + }) + result.append(token) + return "".join(result) + + def invoke_tools(self, input_object: InputObject, **kwargs): + if not self.tool_names: + return '' + + tool_results: list = list() + + for tool_name in self.tool_names: + tool: Tool = ToolManager().get_instance_obj(tool_name) + if tool is None: + continue + tool_input = {key: input_object.get_data(key) for key in tool.input_keys} + tool_results.append(str(tool.run(**tool_input))) + return "\n\n".join(tool_results) + + def invoke_knowledge(self, query_str: str, input_object: InputObject, **kwargs): + if not self.knowledge_names or not query_str: + return '' + + knowledge_results: list = list() + + for knowledge_name in self.knowledge_names: + knowledge: Knowledge = KnowledgeManager().get_instance_obj(knowledge_name) + if knowledge is None: + continue + knowledge_res: List[Document] = knowledge.query_knowledge( + query_str=query_str, + **input_object.to_dict() + ) + knowledge_results.append(knowledge.to_llm(knowledge_res)) + return "\n\n".join(knowledge_results) + + def validate_required_params(self): + pass + + def add_output_stream(self, output_stream: Queue, agent_output: str) -> None: + pass + + def initialize_by_component_configer(self, component_configer: AgentConfiger) -> 'PetInsuranceConsultAgent': + super().initialize_by_component_configer(component_configer) + self.prompt_version = self.agent_model.profile.get('prompt_version', 'default_rag_agent.cn') + self.llm_name = self.agent_model.profile.get('llm_model', {}).get('name') + self.memory_name = self.agent_model.memory.get('name') + self.tool_names = self.agent_model.action.get('tool', []) + self.knowledge_names = self.agent_model.action.get('knowledge', []) + return self diff --git a/au_sample_standard_app/intelligence/agentic/knowledge/__init__.py b/au_sample_standard_app/intelligence/agentic/knowledge/__init__.py new file mode 100644 index 00000000..e739cfc4 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/knowledge/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/10/29 10:43 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/knowledge/doc_processor/__init__.py b/au_sample_standard_app/intelligence/agentic/knowledge/doc_processor/__init__.py new file mode 100644 index 00000000..57356490 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/knowledge/doc_processor/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/8/14 15:58 +# @Author : fanen.lhy +# @Email : fanen.lhy@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/knowledge/doc_processor/query_keyword_extractor.yaml b/au_sample_standard_app/intelligence/agentic/knowledge/doc_processor/query_keyword_extractor.yaml new file mode 100644 index 00000000..ec1c105e --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/knowledge/doc_processor/query_keyword_extractor.yaml @@ -0,0 +1,7 @@ +name: 'query_keyword_extractor' +description: 'extract keywords from query' +top_k: 6 +metadata: + type: 'DOC_PROCESSOR' + module: 'agentuniverse.agent.action.knowledge.doc_processor.jieba_keyword_extractor' + class: 'JiebaKeywordExtractor' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/knowledge/query_paraphraser/__init__.py b/au_sample_standard_app/intelligence/agentic/knowledge/query_paraphraser/__init__.py new file mode 100644 index 00000000..afb8eef8 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/knowledge/query_paraphraser/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/8/14 15:59 +# @Author : fanen.lhy +# @Email : fanen.lhy@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/knowledge/query_paraphraser/custom_query_keyword_extractor.yaml b/au_sample_standard_app/intelligence/agentic/knowledge/query_paraphraser/custom_query_keyword_extractor.yaml new file mode 100644 index 00000000..91010189 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/knowledge/query_paraphraser/custom_query_keyword_extractor.yaml @@ -0,0 +1,7 @@ +name: 'custom_query_keyword_extractor' +description: 'extract keywords from query origin str' +keyword_extractor: 'query_keyword_extractor' +metadata: + type: 'QUERY_PARAPHRASER' + module: 'agentuniverse.agent.action.knowledge.query_paraphraser.query_keyword_extractor' + class: 'QueryKeywordExtractor' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/knowledge/rag_router/__init__.py b/au_sample_standard_app/intelligence/agentic/knowledge/rag_router/__init__.py new file mode 100644 index 00000000..c4051841 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/knowledge/rag_router/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/8/13 17:07 +# @Author : fanen.lhy +# @Email : fanen.lhy@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/knowledge/rag_router/nlu_rag_router.yaml b/au_sample_standard_app/intelligence/agentic/knowledge/rag_router/nlu_rag_router.yaml new file mode 100644 index 00000000..f945a022 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/knowledge/rag_router/nlu_rag_router.yaml @@ -0,0 +1,10 @@ +name: 'nlu_rag_router' +description: 'base rag router map query to all store' +store_amount: 2 +llm: + name: qwen_llm + model_name: qwen-max +metadata: + type: 'RAG_ROUTER' + module: 'agentuniverse.agent.action.knowledge.rag_router.nlu_rag_router' + class: 'NluRagRouter' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/knowledge/store/__init__.py b/au_sample_standard_app/intelligence/agentic/knowledge/store/__init__.py new file mode 100644 index 00000000..72e19321 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/knowledge/store/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/8/6 14:55 +# @Author : fanen.lhy +# @Email : fanen.lhy@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/llm/__init__.py b/au_sample_standard_app/intelligence/agentic/llm/__init__.py new file mode 100644 index 00000000..3ff93ea5 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/llm/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/4/2 17:35 +# @Author : jerry.zzw +# @Email : jerry.zzw@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/llm/maya/__init__.py b/au_sample_standard_app/intelligence/agentic/llm/maya/__init__.py new file mode 100644 index 00000000..41baaadf --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/llm/maya/__init__.py @@ -0,0 +1,6 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/12/10 10:51 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/llm/maya/pet_insurance_maya_llm.py b/au_sample_standard_app/intelligence/agentic/llm/maya/pet_insurance_maya_llm.py new file mode 100644 index 00000000..aebd24c3 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/llm/maya/pet_insurance_maya_llm.py @@ -0,0 +1,247 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/5/7 15:46 +# @Author : weizjajj +# @Email : weizhongjie.wzj@antgroup.com +# @FileName: ant_maya_llm.py +import json +from typing import Any, Optional, List, Union, Iterator + +import requests +import tiktoken +from agentuniverse.base.annotation.trace import trace_llm +from agentuniverse_ant_ext.llm.langchian_instance.langchain_instance import LangChainInstance +from langchain_core.callbacks import AsyncCallbackManagerForLLMRun +from langchain_core.language_models import BaseLanguageModel + +from agentuniverse.base.config.component_configer.configers.llm_configer import LLMConfiger +from agentuniverse.base.util.logging.logging_util import LOGGER +from agentuniverse.llm.llm import LLM +from agentuniverse.llm.llm_output import LLMOutput + + +class PetInsuranceMayaLLM(LLM): + model_name: Optional[str] = "AntGLM" + sceneName: Optional[str] = None + chainName: Optional[str] = None + publishDomain: Optional[str] = None + serviceId: Optional[str] = None + message_id: str = 'default' + endpoint: str = "xxx" + streaming: Optional[bool] = False + # max tokens to generate + max_tokens: int = 1024 + # max content length + max_length: int = 4096 + streaming_full: bool = True + params_filed: str = "data" + query_field: str = "query" + scene_code: Optional[str] = None + + @trace_llm + def call(self, *args: Any, **kwargs: Any) -> Union[LLMOutput, Iterator[LLMOutput]]: + """ + Call the model on the inputs. + """ + streaming = kwargs.pop("streaming") if "streaming" in kwargs else self.streaming + kwargs["streaming"] = streaming + if not streaming: + return self.no_streaming_call(*args, **kwargs) + else: + return self.streaming_call(*args, **kwargs) + + @staticmethod + def parse_output(result: dict) -> LLMOutput: + """ + Parse the output of the model. + """ + if "data" in result: + if "output_string" in result["data"]: + text = result["data"]["output_string"] + elif "result" in result["data"]: + text = result["data"]["result"] + else: + text = result["data"]["out_string"] + elif "result" in result: + if "output_string" in result["result"]: + text = result["result"]["output_string"] + elif "result" in result["result"]: + text = result["result"]["result"] + else: + text = result["result"]["out_string"] + else: + raise ValueError("No output found in response.") + return LLMOutput(text=text, raw=result) + + @staticmethod + def parse_stream_output(line: bytes, cursor: int) -> tuple[None, int] | tuple[LLMOutput, int]: + """ + Parse the output of the model. + """ + line = line.decode("utf-8") + if not line or "out_string" not in line: + return None, cursor + line_json = json.loads(line) + line_json['out_string'] = line_json['out_string'][cursor:] + size = len(line_json["out_string"]) + cursor += size + return LLMOutput(text=line_json["out_string"], raw=line_json), cursor + + def _call(self, *args: Any, **kwargs: Any) -> Union[LLMOutput, Iterator[LLMOutput]]: + return self.call(*args, **kwargs) + + async def _acall(self, *args: Any, **kwargs: Any) -> Union[LLMOutput, Iterator[LLMOutput]]: + return await self.acall(*args, **kwargs) + + def as_langchain(self) -> BaseLanguageModel: + """ + Return the LangChain representation of this LLM. + """ + return LangChainInstance(streaming=self.streaming, llm=self, llm_type="Maya") + + def max_context_length(self) -> int: + """Max context length. + + The total length of input tokens and generated tokens is limited by the openai model's context length. + """ + return self.max_length + + def get_num_tokens(self, text: str) -> int: + """Get the number of tokens present in the text. + + Useful for checking if an input will fit in an openai model's context window. + + Args: + text: The string input to tokenize. + + Returns: + The integer number of tokens in the text. + """ + + encoding = tiktoken.get_encoding("cl100k_base") + return len(encoding.encode(text)) + + def request_stream_data(self, prompt: str, stop: str = ''): + return { + "sceneName": self.sceneName, + "sceneCode": self.scene_code, + "chainName": self.chainName, + "publishDomain": self.publishDomain, + "serviceId": self.serviceId, + "features": {self.params_filed: json.dumps({self.query_field: prompt, "sync": False}), + "temperature": self.temperature, + "stop_words": stop, + "max_output_length": self.max_length}, + } + + def request_data(self, prompt: str, stop: str = None): + return { + "sceneName": self.sceneName, + "sceneCode": self.scene_code, + "chainName": self.chainName, + "publishDomain": self.publishDomain, + "serviceId": self.serviceId, + "features": {self.params_filed: json.dumps({self.query_field: prompt, "sync": False}), + "temperature": self.temperature, + "stop_words": stop, + "max_output_length": self.max_length}, + } + + def no_streaming_call(self, + prompt: str, + stop: Optional[List[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs) -> LLMOutput: + suffix = f"?sceneCode={self.scene_code}&model_name={self.model_name}" \ + if self.scene_code else f"?model_name={self.model_name}" + # 同步http包发送http请求 + resp = requests.post( + url=self.endpoint + suffix, + headers={"Content-Type": "application/json", "x-fincopilotcore-signature": "LmHJoTYJxDh3yq@2dQ"}, + data=json.dumps(self.request_data(prompt, stop[0] if stop else ''), ensure_ascii=False).encode("utf-8"), + timeout=self.request_timeout, + ) + resp = resp.json() + try: + if resp and resp["success"]: + return self.parse_output(resp) + else: + LOGGER.debug("请求ChatGLM失败:", resp) + raise Exception(resp) + except Exception as e: + LOGGER.exception("请求ChatGLM失败") + raise e + + def streaming_call(self, + prompt: str, + stop: Optional[List[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs): + suffix = f"?sceneCode={self.scene_code}&model_name={self.model_name}" \ + if self.scene_code else f"?model_name={self.model_name}" + # 异步http包发送http请求 + with requests.post( + url=self.endpoint + suffix, + data=json.dumps(self.request_stream_data(prompt, stop[0] if stop else ''), ensure_ascii=False).encode( + "utf-8"), + timeout=self.request_timeout, + headers={"Content-Type": "application/json", "x-fincopilotcore-signature": "LmHJoTYJxDh3yq@2dQ"}, + stream=True + ) as resp: + cursor = 0 + for line in resp.iter_lines(): + if not self.streaming_full: + cursor = 0 + output, cursor = self.parse_stream_output(line, cursor) + if output: + yield output + + def set_by_agent_model(self, **kwargs) -> 'AntMayaLLM': + copied_obj = super().set_by_agent_model(**kwargs) + """Set the parameters of the agent model.""" + if "ext_info" in kwargs: + ext_info = kwargs.get("ext_info", self.ext_info) + if "sceneName" in ext_info: + copied_obj.sceneName = ext_info.get("sceneName", self.sceneName) + if "chainName" in ext_info: + copied_obj.chainName = ext_info.get("chainName", self.chainName) + if "publishDomain" in ext_info: + copied_obj.publishDomain = ext_info.get("publishDomain", self.publishDomain) + if "serviceId" in ext_info: + copied_obj.serviceId = ext_info.get("serviceId", self.serviceId) + if "endpoint" in ext_info: + copied_obj.endpoint = ext_info.get("endpoint", self.endpoint) + if "max_length" in ext_info: + copied_obj.max_length = ext_info.get("max_length", self.max_length) + if "params_filed" in ext_info: + copied_obj.params_filed = ext_info.get("params_filed", self.params_filed) + if "query_field" in ext_info: + copied_obj.query_field = ext_info.get("query_field", self.query_field) + return copied_obj + + def initialize_by_component_configer(self, configer: LLMConfiger): + """Initialize the agent model by component configer.""" + self.scene_code = configer.configer.value.get('scene_code', None) + ext_info = configer.ext_info + if not ext_info: + return super().initialize_by_component_configer(configer) + if "sceneName" in ext_info: + self.sceneName = ext_info["sceneName"] + if "chainName" in ext_info: + self.chainName = ext_info["chainName"] + if "publishDomain" in ext_info: + self.publishDomain = ext_info["publishDomain"] + if "serviceId" in ext_info: + self.serviceId = ext_info["serviceId"] + if "endpoint" in ext_info: + self.endpoint = ext_info["endpoint"] + if "max_length" in ext_info: + self.max_length = ext_info["max_length"] + if "params_filed" in ext_info: + self.params_filed = ext_info["params_filed"] + if "query_field" in ext_info: + self.query_field = ext_info["query_field"] + if "streaming_full" in ext_info: + self.streaming_full = ext_info["streaming_full"] + super().initialize_by_component_configer(configer) + return self diff --git a/au_sample_standard_app/intelligence/agentic/llm/maya/pet_qwen_72b_stream.yaml b/au_sample_standard_app/intelligence/agentic/llm/maya/pet_qwen_72b_stream.yaml new file mode 100644 index 00000000..a23addc6 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/llm/maya/pet_qwen_72b_stream.yaml @@ -0,0 +1,21 @@ +name: 'ant_maya_qwen_72b_stream' +description: 'ant group openai llm with spi' +model_name: 'qwen_test_72b"' +max_tokens: 3000 +max_length: 1024 +streaming: True +ext_info: + sceneName: 'xxxxxx' + chainName: 'xxxxxx' + publishDomain: 'xxxx' + serviceId: 'xxxxx' + endpoint: 'xxxx' + streaming_full: False + params_filed: "data" + query_field: "query" + streaming: True +request_timeout: 60 +metadata: + type: 'LLM' + module: 'au_sample_standard_app.intelligence.agentic.llm.maya.pet_insurance_maya_llm' + class: 'PetInsuranceMayaLLM' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/llm/qwen_llm.yaml b/au_sample_standard_app/intelligence/agentic/llm/qwen_llm.yaml new file mode 100644 index 00000000..d5c01b52 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/llm/qwen_llm.yaml @@ -0,0 +1,8 @@ +name: 'qwen_llm' +description: 'demo qwen llm with spi' +model_name: 'qwen2-72b-instruct' +max_tokens: 2500 +metadata: + type: 'LLM' + module: 'agentuniverse.llm.default.qwen_openai_style_llm' + class: 'QWenOpenAIStyleLLM' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/memory/__init__.py b/au_sample_standard_app/intelligence/agentic/memory/__init__.py new file mode 100644 index 00000000..e739cfc4 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/memory/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/10/29 10:43 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/memory/demo_memory_a.yaml b/au_sample_standard_app/intelligence/agentic/memory/demo_memory_a.yaml new file mode 100644 index 00000000..94a4b7cc --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/memory/demo_memory_a.yaml @@ -0,0 +1,12 @@ +name: 'demo_memory_a' +description: 'demo memory with chroma storage' +type: 'long_term' +memory_key: 'chat_history' +max_tokens: 3000 +memory_compressor: default_memory_compressor +memory_storages: + - chroma_memory_storage +metadata: + type: 'MEMORY' + module: 'agentuniverse.agent.memory.memory' + class: 'Memory' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/memory/demo_memory_b.yaml b/au_sample_standard_app/intelligence/agentic/memory/demo_memory_b.yaml new file mode 100644 index 00000000..35162109 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/memory/demo_memory_b.yaml @@ -0,0 +1,12 @@ +name: 'demo_memory_b' +description: 'demo memory with local storage' +type: 'short_term' +memory_key: 'chat_history' +max_tokens: 3000 +memory_compressor: default_memory_compressor +memory_storages: + - local_memory_storage +metadata: + type: 'MEMORY' + module: 'agentuniverse.agent.memory.memory' + class: 'Memory' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/memory/memory_compressor/__init__.py b/au_sample_standard_app/intelligence/agentic/memory/memory_compressor/__init__.py new file mode 100644 index 00000000..0fafc063 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/memory/memory_compressor/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/10/29 10:55 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/memory/memory_storage/__init__.py b/au_sample_standard_app/intelligence/agentic/memory/memory_storage/__init__.py new file mode 100644 index 00000000..f0de8d2e --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/memory/memory_storage/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/10/11 16:49 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/memory/memory_storage/chroma_memory_storage.yaml b/au_sample_standard_app/intelligence/agentic/memory/memory_storage/chroma_memory_storage.yaml new file mode 100644 index 00000000..bc4d0877 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/memory/memory_storage/chroma_memory_storage.yaml @@ -0,0 +1,9 @@ +name: 'chroma_memory_storage' +description: 'demo chroma memory storage' +collection_name: 'memory' +persist_path: '../../db/memory.db' +embedding_model: 'dashscope_embedding' +metadata: + type: 'MEMORY_STORAGE' + module: 'agentuniverse.agent.memory.memory_storage.chroma_memory_storage' + class: 'ChromaMemoryStorage' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/prompt/__init__.py b/au_sample_standard_app/intelligence/agentic/prompt/__init__.py new file mode 100644 index 00000000..ebc407e0 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/prompt/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/5/7 20:04 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/__init__.py b/au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/__init__.py new file mode 100644 index 00000000..11dc945f --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/__init__.py @@ -0,0 +1,6 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/12/12 20:56 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_insurance_multi_agent_cn.yaml b/au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_insurance_multi_agent_cn.yaml new file mode 100644 index 00000000..1d65b618 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_insurance_multi_agent_cn.yaml @@ -0,0 +1,43 @@ +introduction: | + # 身份设定 + 从现在开始,你的角色是一名宠物保险的保险顾问,用户从某个保险产品投保页面进来问一些宠物保险相关的问题。 +target: | + # 目标 + 需要参考当前保险产品投保页给定的产品信息和根据用户问题检索到的相关知识,有针对性、正面回答用户关于投保、增值服务、保单管理、理赔、续保、退保等宠物保险咨询问题。 + +instruction: | + # 当前保险产品投保页保险产品名称是宠物医保产品信息,产品信息是: + 检索触发句:宠物医保产品信息 + 参考信息1:宠物医保产品信息 + {prod_description} + + # 根据用户问题检索到的相关知识是: + {search_context} + + # 回答问题需要的知识 + 回答问题,给定你两种知识信息来源: + 1. 当前保险产品投保页给定的产品信息 + 2. 根据用户问题检索到的相关知识 + + # 回答问题的要求和限制: + 1. 回答内容精炼,不要有重复信息,也不要重复回答! + 2. 参考两种知识来源中的信息,简要、正面回答用户的问题。 + 3. 你在回答问题时要具备严谨性,回答内容严格以上面提到的两个知识来源内容为准,不要做这两部分给定内容外的臆想和推理, 严格按照两个知识来源内容进行输出! + 4. 请保证全面输出,不要对某条知识内容做截断和部分输出! + + #回答问题的思路 + 1.先理解用户的问题和改写的完整问题。 + 2.再理解产品信息和检索到的相关知识。 + 2.选择以下一种回答策略进行回答: + a.采纳回答:如果产品信息中的某些信息或者检索到的相关知识中的某一条knowledgeTitle对应的knowledgeContent能够完全回答该问题,则直接输出相关的knowledgeContent + b.融合回答:如果单条的知识无法完全回答用户的问题,需要参照产品信息和检索到的多条知识进行理解、融合并回答,但不要做融合回答之外的臆想和推理 + + 开始! + + 不要向用户透露你的知识来源和回答策略。 + + 用户的原问题是: {input} + 你需要回答的答案是: +metadata: + type: 'PROMPT' + version: 'pet_insurance_multi_agent.cn' diff --git a/au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_planning_query_agent_cn.yaml b/au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_planning_query_agent_cn.yaml new file mode 100644 index 00000000..ba5a7465 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_planning_query_agent_cn.yaml @@ -0,0 +1,56 @@ +introduction: | + # 身份设定 + 从现在开始,你的角色是一名宠物保险的保险顾问,用户从某个保险产品投保页面进来问一些宠物保险相关的问题。 +target: | + # 目标 + 参考当前保险产品投保页对应的产品信息,用户向你提问,你负责将用户提出的问题进行拆解,生成多个子问题,以便通过搜索引擎搜索出关键信息。 + +instruction: | + + ######################################## + few shot + ## 示例1: + ------------------------ + 当前投保页的保险产品名称是: 宠物医保 + + 当前投保页的保险产品信息是: + 宠物医保 + 1、保险产品: + "宠物医保"(以下简称"商险")保障期限12个月,是付费版商业险,有三个保障:基础版、升级版、尊享版。 + ------------------------ + 用户的原问题是: 满多大才能买保险 + ------------------------ + 拆分的子问题列表为: + {{'sub_query_list': ['宠物满多大才能买宠物医保?', '宠物医保投保对宠物年龄的要求是多少?', '宠物医保投保时对宠物有哪些要求?']}} + ######################################## + + # 当前投保页的保险产品名称是: 宠物医保 + + # 当前投保页的保险产品信息是: + {prod_description} + + # 拆分问题的规则: + 1. 每个拆分后的子问题适合作为独立搜索或执行的内容。 + 2. 拆分的每个子问题必须严格遵循用户的原问题,不可以延伸问题,也不可以直接回答问题。 + 3. 拆分的每个子问题都是一个完整的问题,能够独立通过比如google搜索信息并做出解答。 + 4. 每个子问题必须是有答案的,不能是开放性的问题。 + 5. 每个子问题必须是完整的句子,不能有任何歧义。 + 6. 每个子问题应包含明确的主体和客体信息,以便通过搜索引擎搜索出关键信息。 + 7. 尽量拆分为2-3个子问题,如果是复杂的场景,全面拆分后子问题数量为3-5个。 + + # 输出格式 + 输出必须是按照以下格式化的Json代码片段,sub_query_list字段代表拆解后的子问题列表。 + {{ + "sub_query_list": list[string] + }} + + 开始! + 用户的原问题是: {input} + + 注意: + 拆分的每个子问题必须严格遵循用户的原问题,不可以延伸问题,也不可以直接回答问题。 + + 你拆解后的问题是: +metadata: + type: 'PROMPT' + version: 'pet_planning_query_agent.cn' diff --git a/au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_rewrite_query_agent_cn.yaml b/au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_rewrite_query_agent_cn.yaml new file mode 100644 index 00000000..d218216e --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_rewrite_query_agent_cn.yaml @@ -0,0 +1,27 @@ +introduction: | + # 身份设定 + 从现在开始,你的角色是一名宠物保险的保险顾问,用户从某个保险产品投保页面进来问一些宠物保险相关的问题。 +target: | + # 目标 + 用户向你提问,你负责将问题,参考对话上文,以及当前保险产品投保页对应的产品信息,将用户的问题改写成完整的问题。 + +instruction: | + 当前投保页的保险产品名称是: + 宠物医保 + + 当前投保页的保险产品信息是: + {prod_description} + + # 要求 + 1. 改写后的问题不要过于冗余、复杂。 + 2. 改写后的问题适合作为独立搜索或执行的内容。 + 3. 改写后的问题必须是有答案的,不能是开放性的问题。 + + 开始! + + 用户的原问题是: {input} + + 你改写后的问题是: +metadata: + type: 'PROMPT' + version: 'pet_rewrite_query_agent.cn' diff --git a/au_sample_standard_app/intelligence/agentic/prompt/pet_react_agent/__init__.py b/au_sample_standard_app/intelligence/agentic/prompt/pet_react_agent/__init__.py new file mode 100644 index 00000000..19635cca --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/prompt/pet_react_agent/__init__.py @@ -0,0 +1,6 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/12/12 20:55 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/prompt/pet_react_agent/pet_insurance_react_prompt.yaml b/au_sample_standard_app/intelligence/agentic/prompt/pet_react_agent/pet_insurance_react_prompt.yaml new file mode 100644 index 00000000..4dab6165 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/prompt/pet_react_agent/pet_insurance_react_prompt.yaml @@ -0,0 +1,37 @@ +introduction: 你是一个精通工具使用的宠物险顾问。 +target: 你的目标是根据用户的问题,使用工具回答用户关于宠物险产品的问题。尤其擅长使用宠物险咨询相关问题。 +instruction: | + 你必须优先选择使用提供的工具回答用户提出的问题,若用户没有提供工具可以根据你的通识能力解决问题。 + 你在回答时问题必须使用中文回答。 + 你必须从多个角度、维度分析用户的问题,帮助用户获取最全面的信息,需要根据背景和问题,决定搜索哪些信息可以回答问题。 + 你必须把大问题拆解为多个小问题,并规划解决步骤。 + + 您可以使用以下工具: + {tools} + + 你的回答必须严格使用以下格式: + + Question: 您必须回答的问题 + Thought: 我这一步应该做什么,为什么要这么做,我现在要使用一个工具, 不允许回答Final Answer + Action: 要使用的工具应该,值必须是 [{tool_names}] 之一 + Action Input: 工具的输入 + Observation: 工具的执行结果 + ... (Thought/Action/Action Input/Observation 的过程可以重复 N 次) + Thought: 我现在知道所有问题的最终答案了 + Final Answer: 所有问题的最终答案 + + 背景信息是: + {background} + + 开始! + 注意: + 1.你的回答必须是(Thought/Action/Observation)与(Thought/Final Answer)两种格式之一 + 2.你现在必须根据上一步Observation的结果(成功、失败、报错,信息不完整),判断下一步要执行的动作 + + Question: {input} + Thought: {agent_scratchpad} + + +metadata: + type: 'PROMPT' + version: 'pet_insurance_react_agent.cn' diff --git a/au_sample_standard_app/intelligence/agentic/tool/__init__.py b/au_sample_standard_app/intelligence/agentic/tool/__init__.py new file mode 100644 index 00000000..ce960370 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/tool/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/4/2 17:36 +# @Author : jerry.zzw +# @Email : jerry.zzw@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/tool/google_search_tool.py b/au_sample_standard_app/intelligence/agentic/tool/google_search_tool.py new file mode 100644 index 00000000..c453f81d --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/tool/google_search_tool.py @@ -0,0 +1,34 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/3/31 11:00 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: google_search_tool.py +from typing import Optional + +from pydantic import Field +from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper +from agentuniverse.agent.action.tool.tool import Tool, ToolInput +from agentuniverse.base.util.env_util import get_from_env + +from au_sample_standard_app.intelligence.agentic.tool.mock_search_tool import MockSearchTool + + +class GoogleSearchTool(Tool): + """The demo google search tool. + + Implement the execute method of demo google search tool, using the `GoogleSerperAPIWrapper` to implement a simple Google search. + + Note: + You need to sign up for a free account at https://serper.dev and get the serpher api key (2500 free queries). + """ + + serper_api_key: Optional[str] = Field(default_factory=lambda: get_from_env("SERPER_API_KEY")) + + def execute(self, tool_input: ToolInput): + input = tool_input.get_data("input") + if self.serper_api_key is None: + return MockSearchTool().execute(tool_input=tool_input) + # get top10 results from Google search. + search = GoogleSerperAPIWrapper(serper_api_key=self.serper_api_key, k=10, gl="us", hl="en", type="search") + return search.run(query=input) \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/tool/google_search_tool.yaml b/au_sample_standard_app/intelligence/agentic/tool/google_search_tool.yaml new file mode 100644 index 00000000..ba87a52d --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/tool/google_search_tool.yaml @@ -0,0 +1,12 @@ +name: 'google_search_tool' +description: | + 该工具可以用来进行谷歌搜索,工具的输入是你想搜索的内容。 + 工具输入示例: + 示例1: 你想要搜索上海的天气时,工具的输入应该是:上海今天的天气 + 示例2: 你想要搜索日本的天气时,工具的输入应该是:日本的天气 +tool_type: 'api' +input_keys: ['input'] +metadata: + type: 'TOOL' + module: 'au_sample_standard_app.intelligence.agentic.tool.google_search_tool' + class: 'GoogleSearchTool' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/tool/mock_search_tool.py b/au_sample_standard_app/intelligence/agentic/tool/mock_search_tool.py new file mode 100644 index 00000000..ebd78516 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/tool/mock_search_tool.py @@ -0,0 +1,45 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/4/15 13:50 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: mock_search_tool.py +from agentuniverse.agent.action.tool.tool import Tool, ToolInput + +MOCK_SEARCH_RESULT = """ +巴菲特旗下的伯克希尔·哈撒韦公司自2022年8月24日首次减持比亚迪股票以来,已经披露了13次减持行为,包括2022年9月1日、11月1日、11月8日、11月17日、12月8日、2023年1月3日、1月27日、2月9日、3月31日、5月2日等时间点。 +最近的一次减持是在2023年10月25日,当时伯克希尔·哈撒韦公司出售82.05万比亚迪H股,使其持股比例降至7.98%。 +这次减持的平均价格为每股245.86港元,但整体来看,比亚迪的股价并未受到太大影响。 + +巴菲特的投资策略包括: +1. 长期投资:巴菲特主张买入并持有优质股票,而非短期交易。他的投资策略往往以做多绩优股为主,不排除出现长期慢牛的可能。 +2. 价值投资:巴菲特注重企业的内在价值,而不是短期的股价波动。他会深入研究公司的基本面,包括其盈利能力、市场地位、管理层质量等。 +3. 能力圈原则:巴菲特建议投资者只投资于自己理解的领域,即自己的“能力圈”内。这样可以更好地评估企业的真实价值和未来前景。 +巴菲特的减持并不会改变比亚迪公司优质的基本面,比亚迪依然是中国新能源汽车行业的龙头。 + +放眼中国新能源产业版图,比亚迪绝对是举足轻重的一员。比亚迪在新能源汽车领域形成了上中下游全产业链的完整布局,从电池原材料到新能源汽车三电系统,再到动力电池回收利用,各板块协同效应显著: +1. 在中游零部件领域,公司自产自研汽车核心零部件以及三电系统,在动力电池、发动机、变速箱等关键部件上均实现自主生产,2020年3月成立的弗迪公司,进一步加快了新能源汽车核心零部件的对外销售; +2. 在下游整车领域,公司具备完成的整车制造及研发体系,在不同价格区间陆续推出多款不同车型,丰富的产品类型拉动终端需求,销量在国内自主品牌中常年稳居首位。 + +巴菲特最近的一次减持是在2023年10月25日,10月30日晚,比亚迪披露了一份亮眼的三季报。三季报显示比亚迪前三季度实现营业收入4222.75亿元,同比增长58%,实现净利润213.67亿元,同比增长130%,业绩保持高速增长态势;其中,第三季度营收1621.51亿元,同比增长38.49%;净利润104.13亿元,平均每天挣1.13亿元。 +截至10月31日,比亚迪H股报收237.4港元/股,A股报收238.54元/股,合计总市值约6568亿元。 + +尽管减持比亚迪股票,巴菲特与搭档查理芒格在2023年对比亚迪仍有极高评价,芒格2023年演讲也提及比亚迪是至今最爱的股票,且相较于美国特斯拉 (TSLA-US) 更看好比亚迪发展。巴菲特对比亚迪这家公司,对王传福这个创业者始终表现出了充分的尊重。2008年以后每年的股东大会上,巴菲特一直都为比亚迪站台。 +早在2008年9月,巴菲特就发现了新能源汽车的市场潜力,并与比亚迪签署协议,以每股港元8元的价格认购2.25亿股比亚迪的股份,约占其配售后10%的股份比例,总金额约为18亿港元,就此开启了“股神”长达14年的持股之旅。 +时至今日,比亚迪的股价早已今非昔比,按照8月30日比亚迪收盘的263港元/股计,即便不算分红,巴菲特所持比亚迪股票总体增值约31倍,价值近600亿港币,这一投资收益已足够令所有人称赞和羡慕。 +""" + + +class MockSearchTool(Tool): + """The mock search tool. + + In this tool, we mocked the search engine's answers to search for information about BYD and Warren Buffett. + + Note: + The tool is only suitable for users searching for Buffett or BYD related queries. + We recommend that you configure your `SERPER_API_KEY` and use google_search_tool to get information. + """ + + def execute(self, tool_input: ToolInput): + """Demonstrates the execute method of the Tool class.""" + return MOCK_SEARCH_RESULT diff --git a/au_sample_standard_app/intelligence/agentic/tool/pet_ins/__init__.py b/au_sample_standard_app/intelligence/agentic/tool/pet_ins/__init__.py new file mode 100644 index 00000000..81953bc4 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/tool/pet_ins/__init__.py @@ -0,0 +1,6 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/11/28 17:16 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.py b/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.py new file mode 100644 index 00000000..f60fd0ae --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.py @@ -0,0 +1,19 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/11/28 17:17 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: pet_insurance_tool.py +from agentuniverse.agent.action.tool.tool import Tool, ToolInput + +from au_sample_standard_app.intelligence.utils.constant.prod_description import PROD_A_DESCRIPTION, PROD_B_DESCRIPTION + + +class PetInsuranceInfoTool(Tool): + def execute(self, tool_input: ToolInput): + ins_name = tool_input.get_data('ins_name') + if ins_name == '宠物医保(体验版)': + return PROD_A_DESCRIPTION + if ins_name == '宠物医保': + return PROD_B_DESCRIPTION + return PROD_B_DESCRIPTION diff --git a/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.yaml b/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.yaml new file mode 100644 index 00000000..5c63a4ed --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.yaml @@ -0,0 +1,12 @@ +name: 'pet_insurance_info_tool' +description: | + 该工具提供“宠物医保(体验版)”或者“宠物医保”的详细信息(仅限这两款产品),输入为产品名,输出为产品详细描述。 + 工具输入示例: + 输入: 宠物医保 + 输出: "宠物医保"(以下简称"商险")保障期限12个月,是付费版商业险,有三个保障:基础版、升级版、尊享版。 +tool_type: 'api' +input_keys: ['query'] +metadata: + type: 'TOOL' + module: 'au_sample_standard_app.intelligence.agentic.tool.pet_ins.pet_insurance_info_tool' + class: 'PetInsuranceInfoTool' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.py b/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.py new file mode 100644 index 00000000..4fcb7815 --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.py @@ -0,0 +1,78 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/11/12 11:59 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: search_context_tool.py +import json + +import requests + +from agentuniverse.agent.action.tool.tool import Tool, ToolInput +from agentuniverse.base.util.logging.logging_util import LOGGER + +PRE_API_URL = "https://fincopilotcore.antgroup-inc.cn/api/copilot/runMxc/faq" + + +class SearchContextTool(Tool): + + def execute(self, tool_input: ToolInput): + question = tool_input.get_data('input') + try: + headers = { + "Content-Type": "application/json", + "x-fincopilotcore-signature": "LmHJoTYJxDh3yq@2dQ", + } + # 要发送的数据 + data = { + "chatId": "6bc634d8dbf049feb9b64c91e35832fc-c", + "sessionId": "e5cdeb076d5b40eda74071e8d33c3594-s", + "userId": "2088942002730533", + "sceneCode": "ant_fortune_insurance_property", + "query": question, + "decoderType": "ins_slot_v2", + "inputMethod": "user_input", + "userInfoMap": { + "userId": "2088942002730533", + "consultantSceneCode": "ant_fortune_insurance_property", + "spNo": "36763", + "prodNo": "36763", + }, + "enterScene": { + "sceneCode": "ant_fortune_insurance_property", + "productNo": "36763", + } + } + top_k = tool_input.get_data('top_k') if tool_input.get_data('top_k') else 2 + LOGGER.info(f"search context tool input: {data}") + response = requests.post(PRE_API_URL, headers=headers, data=json.dumps(data, ensure_ascii=False)) + result = response.json()['result'] + recallResultTuples = result.get('recallResultTuples') + + context = f"提出的问题是:{question}\n\n这个问题检索到的答案相关内容是:\n\n" + index = 0 + for recallResult in recallResultTuples: + if index == top_k: + return context + if recallResult.get('content'): + context += (f"knowledgeTitle: {recallResult.get('knowledgeTitle')}\n" + f"knowledgeContent: {recallResult.get('content')}\n\n") + index += 1 + return context + except Exception as e: + LOGGER.error(f"invoke search context tool failed: {str(e)}") + raise e + + +def main(): + tool = SearchContextTool() + tool_input_dict = { + "input": "宠物医保投保对宠物年龄的要求是多少?" + } + response = tool.run(**tool_input_dict) + print(response) + + +if __name__ == '__main__': + main() diff --git a/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.yaml b/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.yaml new file mode 100644 index 00000000..de556dfa --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.yaml @@ -0,0 +1,31 @@ +name: 'pet_insurance_search_context_tool' +description: | + 针对宠物险相关问题,给出相关问题答案的检索结果,非宠物险问题无法检索。 + + 工具输入:待检索的问题 + + 工具输出:格式如下 + 提出的问题是:xxx + + 这个问题检索到的答案相关内容是: + + knowledgeTitle: xxx + knowledgeContent: xxx + + 工具输入输出示例: + #工具输入: + 你想要搜索上海的天气时,工具的输入应该是:上海今天的天气 + #工具输出: + 提出的问题是:宠物医保投保对宠物年龄的要求是多少? + + 这个问题检索到的答案相关内容是: + + knowledgeTitle: 多大年龄可以投保 + knowledgeContent:

宠物医保这款产品的投、被保险人为具备完全民事行为的个人,且须为同一人,本产品仅限宠物主本人购买,其承保的宠物须为被保险人以玩赏、陪伴为目的而合法饲养的、可明确鉴别身份的年龄为60天-10周岁的犬类或猫类宠物。

+ +tool_type: 'api' +input_keys: ['input'] +metadata: + type: 'TOOL' + module: 'au_sample_standard_app.intelligence.agentic.tool.pet_ins.pet_insurance_search_context_tool' + class: 'SearchContextTool' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/work_pattern/__init__.py b/au_sample_standard_app/intelligence/agentic/work_pattern/__init__.py new file mode 100644 index 00000000..9b0b281e --- /dev/null +++ b/au_sample_standard_app/intelligence/agentic/work_pattern/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/10/29 10:44 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/service/__init__.py b/au_sample_standard_app/intelligence/service/__init__.py new file mode 100644 index 00000000..8c2d00fe --- /dev/null +++ b/au_sample_standard_app/intelligence/service/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/10/29 10:29 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/service/agent_service/__init__.py b/au_sample_standard_app/intelligence/service/agent_service/__init__.py new file mode 100644 index 00000000..b40d6d53 --- /dev/null +++ b/au_sample_standard_app/intelligence/service/agent_service/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/4/2 18:52 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/service/agent_service/pet_insurance_react_service.yaml b/au_sample_standard_app/intelligence/service/agent_service/pet_insurance_react_service.yaml new file mode 100644 index 00000000..48a9263d --- /dev/null +++ b/au_sample_standard_app/intelligence/service/agent_service/pet_insurance_react_service.yaml @@ -0,0 +1,5 @@ +name: 'pet_insurance_react_service' +description: 'demo react service of demo agent' +agent: 'pet_insurance_react_agent' +metadata: + type: 'SERVICE' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/service/classic_service/__init__.py b/au_sample_standard_app/intelligence/service/classic_service/__init__.py new file mode 100644 index 00000000..5b9c1300 --- /dev/null +++ b/au_sample_standard_app/intelligence/service/classic_service/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/10/29 10:30 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/test/__init__.py b/au_sample_standard_app/intelligence/test/__init__.py new file mode 100644 index 00000000..e902fb70 --- /dev/null +++ b/au_sample_standard_app/intelligence/test/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/4/2 17:32 +# @Author : jerry.zzw +# @Email : jerry.zzw@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/test/pet_insurance_consult_pro_agent_test.py b/au_sample_standard_app/intelligence/test/pet_insurance_consult_pro_agent_test.py new file mode 100644 index 00000000..0365ea1b --- /dev/null +++ b/au_sample_standard_app/intelligence/test/pet_insurance_consult_pro_agent_test.py @@ -0,0 +1,23 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/12/12 23:03 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: pet_insurance_consult_agent_test.py + +from agentuniverse.base.agentuniverse import AgentUniverse +from agentuniverse.agent.agent import Agent +from agentuniverse.agent.agent_manager import AgentManager + +AgentUniverse().start(config_path='../../config/config.toml', core_mode=True) + + +def chat(question: str): + instance: Agent = AgentManager().get_instance_obj('pet_insurance_consult_pro_agent') + output = instance.run(input=question) + return output.get_data('output') + + +if __name__ == '__main__': + res = chat("医保怎么升级") + print(f'最终执行结果:{res}') \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/test/pet_insurance_multi_agent_test.py b/au_sample_standard_app/intelligence/test/pet_insurance_multi_agent_test.py new file mode 100644 index 00000000..77f6e360 --- /dev/null +++ b/au_sample_standard_app/intelligence/test/pet_insurance_multi_agent_test.py @@ -0,0 +1,20 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/11/28 17:17 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: pet_insurance_multi_agent_test.py +from agentuniverse.base.agentuniverse import AgentUniverse +from agentuniverse.agent.agent import Agent +from agentuniverse.agent.agent_manager import AgentManager + +AgentUniverse().start(config_path='../../config/config.toml', core_mode=True) + + +def chat(question: str): + instance: Agent = AgentManager().get_instance_obj('pet_insurance_consult_agent') + return instance.run(input=question) + + +if __name__ == '__main__': + chat("医保怎么升级") diff --git a/au_sample_standard_app/intelligence/test/pet_insurance_react_agent_test.py b/au_sample_standard_app/intelligence/test/pet_insurance_react_agent_test.py new file mode 100644 index 00000000..0e4b765f --- /dev/null +++ b/au_sample_standard_app/intelligence/test/pet_insurance_react_agent_test.py @@ -0,0 +1,20 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/11/28 17:17 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: pet_insurance_react_agent_test.py +from agentuniverse.base.agentuniverse import AgentUniverse +from agentuniverse.agent.agent import Agent +from agentuniverse.agent.agent_manager import AgentManager + +AgentUniverse().start(config_path='../../config/config.toml', core_mode=True) + + +def chat(question: str): + instance: Agent = AgentManager().get_instance_obj('pet_insurance_react_agent') + instance.run(input=question) + + +if __name__ == '__main__': + chat("医保怎么升级") diff --git a/au_sample_standard_app/intelligence/utils/__init__.py b/au_sample_standard_app/intelligence/utils/__init__.py new file mode 100644 index 00000000..51890591 --- /dev/null +++ b/au_sample_standard_app/intelligence/utils/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/10/29 10:36 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/utils/common/__init__.py b/au_sample_standard_app/intelligence/utils/common/__init__.py new file mode 100644 index 00000000..b2493d93 --- /dev/null +++ b/au_sample_standard_app/intelligence/utils/common/__init__.py @@ -0,0 +1,7 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/10/29 10:39 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/utils/common/jsonl_file_util.py b/au_sample_standard_app/intelligence/utils/common/jsonl_file_util.py new file mode 100644 index 00000000..b8b16fd9 --- /dev/null +++ b/au_sample_standard_app/intelligence/utils/common/jsonl_file_util.py @@ -0,0 +1,91 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/7/1 21:09 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: jsonl_file_util.py +import json +import os +import sys + +from agentuniverse.base.util.logging.logging_util import LOGGER + +DATA_DIR = './data/' + +sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) + + +class JsonFileOps(object): + def __init__(self): + return + + @classmethod + def is_file_exist(cls, file_path): + file_name, ext = os.path.splitext(file_path) + if ext.lower() != '.jsonl': + raise Exception('Unsupported file extension') + return os.path.exists(file_path) + + +class JsonFileReader(object): + def __init__(self, file_path: str): + self.file_handler = None + self.file_name = file_path + if JsonFileOps.is_file_exist(file_path): + self.file_handler = open(file_path, 'r', encoding='utf-8') + + def read_json_obj(self): + if not self.file_handler: + raise Exception(f"None json file to read: {self.file_name}") + json_line = self.file_handler.readline() + if json_line: + try: + json_obj = json.loads(json_line.strip()) + return json_obj + except Exception as e: + LOGGER.warn(f"except[read_json_line]>>>{e}:{json_line}") + return json.loads('{}') + else: + return None + + def read_json_obj_list(self): + obj_list = [] + while True: + obj = self.read_json_obj() + if obj is None: + break + obj_list.append(obj) + return obj_list + + +class JsonFileWriter(object): + def __init__(self, output_file_name: str, extension='jsonl', directory=DATA_DIR): + self.outfile_path = directory + output_file_name + '.' + extension + directory = os.path.dirname(self.outfile_path) + if not os.path.exists(directory): + os.makedirs(directory) + self.outfile_handler = open(self.outfile_path, 'w', encoding='utf-8') + + def write_json_obj(self, json_obj: dict): + try: + # confirm that it's a json string and then write. + json_line = json.dumps(json_obj, ensure_ascii=False) + self.outfile_handler.write(json_line.strip() + '\n') + self.outfile_handler.flush() + except Exception as e: + LOGGER.warn(f"except[write_json_obj]>>>{e}:{json_obj}") + return + + def write_json_obj_list(self, json_obj_list: list): + for i in range(0, len(json_obj_list)): + self.write_json_obj(json_obj_list[i]) + return + + def write_json_query_answer(self, query: str, answer: str): + json_obj = {"query": query, "answer": answer} + self.write_json_obj(json_obj) + + def write_json_query_answer_list(self, query_answer_list: list): + for i in range(0, len(query_answer_list)): + self.write_json_query_answer(query_answer_list[i][0], query_answer_list[i][1]) diff --git a/au_sample_standard_app/intelligence/utils/common/txt_file_util.py b/au_sample_standard_app/intelligence/utils/common/txt_file_util.py new file mode 100644 index 00000000..281bf715 --- /dev/null +++ b/au_sample_standard_app/intelligence/utils/common/txt_file_util.py @@ -0,0 +1,50 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/7/1 16:05 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: txt_file_util.py +import os +import sys + +sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) + + +class TxtFileOps(object): + def __init__(self): + return + + @classmethod + def is_file_exist(cls, file_path): + file_name, ext = os.path.splitext(file_path) + if ext.lower() != '.txt': + raise Exception('Unsupported file extension') + return os.path.exists(file_path) + + +class TxtFileReader(object): + + def __init__(self, file_path: str): + self.file_handler = None + self.file_name = file_path + if TxtFileOps.is_file_exist(file_path): + self.file_handler = open(file_path, 'r', encoding='utf-8') + + def read_txt_obj(self): + if not self.file_handler: + raise Exception(f"No txt file to read: {self.file_name}") + txt_line = self.file_handler.readline() + if txt_line: + return txt_line.strip() + else: + return None + + def read_txt_obj_list(self): + obj_list = [] + while True: + obj = self.read_txt_obj() + if obj is None: + break + obj_list.append(obj) + return obj_list diff --git a/au_sample_standard_app/intelligence/utils/constant/__init__.py b/au_sample_standard_app/intelligence/utils/constant/__init__.py new file mode 100644 index 00000000..33d17765 --- /dev/null +++ b/au_sample_standard_app/intelligence/utils/constant/__init__.py @@ -0,0 +1,6 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- +# @Time : 2024/11/28 17:20 +# @Author : jijiawei +# @Email : jijiawei.jjw@antgroup.com +# @FileName: __init__.py.py diff --git a/au_sample_standard_app/intelligence/utils/constant/prod_description.py b/au_sample_standard_app/intelligence/utils/constant/prod_description.py new file mode 100644 index 00000000..cfb6c56c --- /dev/null +++ b/au_sample_standard_app/intelligence/utils/constant/prod_description.py @@ -0,0 +1,160 @@ +# !/usr/bin/env python3 +# -*- coding:utf-8 -*- + +# @Time : 2024/11/12 15:42 +# @Author : wangchongshi +# @Email : wangchongshi.wcs@antgroup.com +# @FileName: prod_description.py + +PROD_A_DESCRIPTION = """ +宠物医保(体验版) +1、保险产品: + "宠物医保(体验版)"(以下简称"赠险")免费体验30天,到期后会自动承接付费版"宠物医保"(以下简称"商险"),商险保障期12个月。 +2、投/被保险人: + 在中华人民共和国境内(不包括港、澳、台地区),饲养或受他人委托临时代为饲养(以下统称"饲养")宠物的年满16周岁的具备完全民事行为的个人,可作为本保险的投被保险人。 +3、被保险宠物: + 本保险合同承保的宠物(以下简称"被保险宠物")为被保险人以玩赏、陪伴为目的而合法饲养的、可明确鉴别身份的年龄为60天-10周岁的犬类或猫类宠物。 +4、保险期限: + "赠险":自投保申请核保通过后次日零时生效,保险期间30天(具体以保单载明的起讫时间为准),若被保险宠物年龄不满足60天也可进行领取并约定保险生效日期(约定的保险起讫时间将在保单中载明),等待期从生效之日起算; + "商险":自投保申请核保通过后,于30天后(或者"赠险"保险期间结束后)的次日零时生效,保险期间12个月(具体以保单载明的起讫时间为准)。 +5、"赠险"保障责任: + 用于报销猫、狗宠物因常见疾病、意外事故而导致的医疗费用。先天、遗传性疾病不在本产品保障范围内。具体保障方案如下: + 1. 保障总额:20000元 + 2. 免赔额:门诊0元,手术0元 + 3. 报销比例: 定点医院70%(若上一年未发生理赔,赔付比例上调10%,最高可达90%),非定点医院40%(若上一年未发生理赔,赔付比例上调10%,最高可达60%) + 4. 单次事故赔付上限:门诊1200元,手术2000元 + 5. 等待期: 意外医疗保险责任10天,肿瘤和口腔疾病60天,其他疾病30天。 + 6. 保障详情:在保险期间内,被保险人饲养的、在本保险合同中载明的被保险宠物在等待期届满后遭受意外伤害事故或罹患疾病,因此在本保险合同中载明的宠物医疗机构接受本保险合同载明的医疗项目治疗的,对于被保险人支付的必要的、合理的医疗费用,保险人按照本合同的约定负责赔偿。被保险宠物在保险期间内接受上述治疗,本合同保险期间届满时,被保险宠物未结束本次治疗的,保险人根据本保险合同约定继续承担保险责任,延长保险责任期间由投保人与保险人协商确定,但最长不超过30天,并在本保险合同中载明。 + + 注意点: + 1. "赠险"和"商险"是各自独立保单,"赠险"保障期限30天,"商险"保障期限12个月。 + 2. "赠险"体验到期自动承保"商险"的保障方案是升级版,保障责任不变,是一致的。 + 3. "赠险"等待期是意外医疗保险责任10天(赠险30天保障期限内,10天等待期过后意外医疗保险即可享理赔服务),肿瘤和口腔疾病60天,其他疾病30天。若免费体验30天后转为付费版"商险",免费体验的30天保障期限也计入等待期,"商险"生效后意外伤害、普通疾病免等待期,肿瘤及口腔疾病剩余30天等待期。 + 4. 第二年"商险"续保成功的话无等待期。 +6、增值服务: + "赠险"可享受每月200元消费额度(本折扣额度非现金,为权益抵扣价值),在蚂蚁宠物商城买药、买服务可享优惠。体验30天到期转付费版商险可额外再享受三项增值服务:犬多联/猫三联疫苗一针、体外驱虫一支、宠物医师电话咨询。 + + 注意点: + 1. "赠险"下发的200元消费额度,未使用部分不可折现,若赠险保单失效则消费额度一同失效。后续如果自动承接"商险",则"商险"每月下发的200元消费额度在"商险"保障期间(12个月)内可累积,最高可达2400元。 + 2. 本产品赠送的增值服务使用者需与投保宠物为同一只宠物,不得转赠其他宠物。 + 3. 赠险体验30天到期转付费版商险,商险使用增值服务后(消费额度除外),视为用户同意在保至少4个月及以上(期间不办理退保)。即对于商险月缴用户,视为同意在保单完成缴纳4期保费前不进行退保;对于商险年缴用户视为同意在投保4个月前不进行退保。 + +7、缴费方式及金额: + 30天的"赠险"产品属于赠送福利,免费体验,无需缴纳保险费。 + 本产品保障期满会自动承接"商险",有月缴和年缴两种缴费方式,可根据需求自行选择: + a. 月缴:猫类月保费为34.08元,犬类月保费为37.5元。总共12期,首期扣款时间为前序"赠险"30天保障期结束的前10天(含终止日)。剩余11期的扣款日,是每月的"商险"保单生效日,如果当月该天不存在,就提前到当月的最后一天。举例,"赠险"是2024年1月29号保障期终止,那么后续"商险"的月缴首期扣款日是2024年1月20号,"商险"是2024年1月30号生效,第二期扣款日是2024年2月29号(因为当月30号不存在),第三期扣款日是2024年3月30号,以此类推。 + b. 年缴:猫类年保费为409元,犬类年保费为450元,一次性为12个月的"商险"全额缴费。扣款时间为前序"赠险"30天保障期结束的前10天(含终止日)。举例,"赠险"是2024年1月29号保障期终止,那么后续"商险"的年缴扣款日是2024年1月20号。 + + 注意点: + 1. 缴费方式选定后,保障期限内不可更改。 + 2. 缴费账号是被保人的支付宝账号,支持与其支付宝账号绑定的多种支付渠道(例如余额、银行卡、信用卡等)。 + 3. 基于金额的四舍五入,月缴和年缴的最终的保费总额是一致的。 +8、产品犹豫期: +本产品无犹豫期。 +9、续保规则: + 赠险不存在续保的情况,赠险保障期限30天,保障到期自动承接商险。 + 商险续保规则如下: + 1. 不保证续保。 + 2. 您在本年度投保时选择了开通下一年延续保障(以下简称"续保")服务并同意相关服务协议,或在该保单到期前30天内或到期后15天内为同一被保险人重新申请投保的,经保险人审核同意后缴纳保费,重新获得新的保险合同,续保时会根据上一年度风险重新厘定具体缴纳保费以保单展示为准、保障范围以重新出具的保单约定及对应投保须知内容为准。 + 3. 本年度未出险的,续保保单赔付比例提高10%(定点医院最高赔付比例为90%,非定点医院最高赔付比例为60%);若续保保单出险,第三年经保险人审核同意续保的,定点医院赔付比例恢复到70%,非定点医院赔付比例恢复到40%。 +10、产品合同解除(退保)规则: + 赠险期间可以随时取消或退保,可以在【蚂蚁保】-【我的】-【全部保单】中找到对应保单进行操作,退还保费为0元。 + "赠险"生效后,免费体验期为30天,到期后会自动承接至付费版"商险","商险"退保规则如下: + 1. 保单未生效:保单未生效申请退保,将全额退还已缴纳的保险费; + 2. 保单已生效: + a. 年缴保单:在保单生效后申请退保的,将退还未满期保险费。计算公式为:未满期保险费=保险费×(1- 保险单已经过天数/保险期间天数)。 + b. 月缴保单:在保单生效后申请退保的,将退还未满期保险费。计算公式为:退还未满期保险费=本合同当期月度保险费×(1-当月实际经过天数/当月实际天数)。 + 注意点: + 1. 商险保障期限为12个月,期间支持退保,但若使用增值服务后(消费额度除外),视为用户同意连续在保4个月及以上(期间不办理退保) + +11、其他说明: + 1. 等待期:指自本合同保险期间开始之日起连续计算的一段时间,具体天数由保险人和投保人在投保时约定并在本保险合同上载明,连续性投保无等待期。在等待期内发生保险事故的,保险人不承担给付保险金的责任。 + 2. 宽限期:若你选择月缴分期付款,则缴费宽限期为15天,若超过缴费宽限期仍未能完成保费缴纳,保单及增值服务均自动失效,保单终止日期为上一缴费期最后一天。宽限期内保障不变,宽限期内,即使没有交费,出险依然能赔。过了宽限期没有交费,保单效力中止,出险不赔。 + 3. 消费额度:随保每月下发200元消费额度(也可以叫做消费折扣特权或健康服务折扣权益),只在当前保单有效期内可累计。该额度可用于在"蚂蚁宠物"内以优惠价格购买药品、服务等。本折扣额度非现金,为权益抵扣价值。 + 4. 先天性疾病/畸形、遗传性疾病释义及具体病种清单: + (1)先天性疾病、先天性畸形:指被保险宠物出生时就具有的疾病、畸形或身体健康异常。 + (2)遗传性疾病:指生殖细胞或受精卵的遗传物质(染色体和基因)发生突变或畸变所引起的疾病,通常具有由亲代传至后代的垂直传递的特征。 + (3)具体病种清单: + a. 代谢性等疾病:血友病,尼曼匹克综合征先天性凝血因子缺乏,先天肌无力,丙酮酸肌酸缺乏,甘露糖贮积症,II型糖尿病,肢端肥大,胱氨酸尿症,新生犬/猫黄疸症 + b. 心血管系统疾病:先天性门静脉短路心室间隔缺损,心膜内壁缺损,室中隔缺损,二尖瓣发育不良三尖瓣发育不良,瓣膜闭锁不全,法洛氏四重畸形,房中隔缺损,右主动脉弓,三心房,法洛四联症,猫肥厚性心肌病,动脉导管未闭(PDA、永久性动脉导管),肺动脉瓣狭窄 + c. 眼部疾病:眼睑内翻,眼睑外翻,倒睫,异位睫,双行睫,小眼球症,先天性视网膜失养症,视网膜发育不全先天性白内障,视网膜萎缩,第三眼睑脱出,瞬膜软骨变形(内翻、外翻),Merle 眼发育异常,遗传性视网膜变性症,原发性青光眼,无晶状体症小晶状体症,晶状体缺损,圆锥晶状体,永存玻璃体动脉,原始玻璃体持续增生症,晶状体血管膜持续增生症,永存瞳孔膜,虹膜发育不良,虹膜缺损,眼前节发育不良脉络膜发育不良,视(神经)盘发育不良 + d. 骨关节系统:腭裂,肘关节发育不良,髋关节发育不良,髌骨脱位,软骨病,折耳猫骨骼疾病,斜颈,多指畸形,品种特异性关节炎综合征,漏斗胸,半椎体,骶尾发育不全。 + e. 神经性疾病:先天性前庭综合征,先天性癫痫,小脑发育不良,非获得性脑积水 + f. 口腔疾病:双排牙,咬合不正 + g. 泌尿系统疾病:输尿管异位,先天性尿道狭窄 + h. 呼吸系统疾病:哮喘,短鼻综合征,软腭过长症,气管塌陷,鼻塌陷,喉管塌陷,气管狭窄,扁平胸,漏斗胸 + i. 其他先天性、遗传性疾病:任何部位的发育不良,血小板减少性紫癜,隐睾,先天性凝血功能障碍,唇裂,晕车症,腹壁疝,食物性变态反应,脐疝,自身免疫介导性溶血,腹股沟阴囊疝,重症肌无力,会阴疝,先天性食道无力,膈疝,门静脉高压症 +""" + +PROD_B_DESCRIPTION = """ +宠物医保 +1、保险产品: + "宠物医保"(以下简称"商险")保障期限12个月,是付费版商业险,有三个保障:基础版、升级版、尊享版。 +2、投/被保险人: + 在中华人民共和国境内(不包括港、澳、台地区),饲养或受他人委托临时代为饲养(以下统称"饲养")宠物的年满16周岁的具备完全民事行为的个人,可作为本保险的投被保险人。 +3、被保险宠物: + 本保险合同承保的宠物(以下简称"被保险宠物")为被保险人以玩赏、陪伴为目的而合法饲养的、可明确鉴别身份的年龄为60天-10周岁的犬类或猫类宠物。 +4、保险期限: + "商险":自投保申请核保通过后次日零时生效,保险期间12个月(具体以保单载明的起讫时间为准)。 +5、"商险"保障责任: + 用于报销猫、狗宠物因常见疾病、意外事故而导致的医疗费用。先天、遗传性疾病不在本产品保障范围内。具体责任上,分为三个保障方案:基础版、升级版、尊享版,具体方案如下: + 1. 保障总额:基础版10000元,升级版20000元,尊享版30000元 + 2. 免赔额:基础版、升级版、尊享版均一样,门诊0元,手术0元 + 3. 报销比例: 定点医院70%(若上一年未发生理赔,赔付比例上调10%,最高可达90%),非定点医院40%(若上一年未发生理赔,赔付比例上调10%,最高可达90%) + 4. 单次事故赔付上限:1)基础版:门诊500元,手术1000元;2)升级版:门诊1200元,手术2000元;3)尊享版:门诊2000元,手术3000元 + 5. 等待期: 首次或非连续投保本保险时,意外医疗保险责任10天,肿瘤和口腔疾病60天,其他疾病30天。 + 6. 保障详情:在保险期间内,被保险人饲养的、在本合同中载明的被保险宠物在等待期届满后遭受意外伤害事故或罹患疾病,因此在本合同中载明的宠物医疗机构接受本合同载明的医疗项目治疗的,对于被保险人支付的必要的、合理的医疗费用,保险人按照本合同的约定负责赔偿。保险期满时,被保险宠物仍然需要接受治疗的,保险人根据本合同的约定继续承担保险责任,最长截至保险期满之日起第30日止。 + 注意点: + 1. 第二年"商险"续保成功的话无等待期。 + +6、增值服务: + 增值服务是本产品赠送的高价值宠物健康服务。具体包括包括以下服务: + 1)基础版:邮寄体外驱虫药1支;电话问诊咨询;每月200元消费额度。 + 2)升级版:邮寄体外驱虫药1支;电话问诊咨询;犬多联/猫三联疫苗一针;每月200元消费额度。 + 3)尊享版:邮寄体外驱虫药1支;电话问诊咨询;犬多联/猫三联疫苗一针;狂犬疫苗1针;全面体检1次(含粪检、血常规);每月200元消费额度。 + + 注意点: + 1. 每月下发200元消费额度(本折扣额度非现金,为权益抵扣价值),该额度在"蚂蚁宠物"小程序中购买药品、健康护理可享一定优惠,该额度在"商险"12个月保障期间内可每月累积,未使用部分不可折现,若保单失效,则消费额度一同失效。 + 2. 本产品赠送的增值服务使用者需与投保宠物为同一只宠物,不得转赠其他宠物。 + 3. "商险"使用增值服务后(消费额度除外),视为用户同意在保至少4个月及以上(期间不办理退保)。即对于月缴用户,视为同意在保单完成缴纳4期保费前不进行退保;对于年缴用户视为同意在投保4个月前不进行退保。 + 4. 增值服务有效期同保单有效期。 + +7、缴费方式及金额: + 本保险方案有月缴和年缴两种缴费方式,可根据需求自行选择: + a. 月缴:基础版,猫类月保费为17.33元,犬类月保费为21.58元;升级版,猫类月保费为34.08元,犬类月保费为37.5元;尊享版,猫类月保费67.08元,犬类月保费73元。总共12期,首期付款时间为投保当日。剩余11期的扣款日,是每月的"商险"保单生效日,如果当月该天不存在,就提前到当月的最后一天。举例,被保人2024年1月29号投保"商险"并选择月缴方式,则当天2024年1月29号进行首期付款,"商险"于次日2024年1月30号生效,第二期扣款日是2024年2月29号(因为当月30号不存在),第三期扣款日是2024年3月30号,以此类推。 + b. 年缴:基础版,猫类年保费为208元,犬类年保费为259元;升级版,猫类年保费为409元,犬类年保费为450元;尊享版,猫类年保费805元,犬类年保费876元。需要在投保当日一次性全额扣款。 + 注意点: + 1. 缴费方式选定后,保障期限内不可更改。 + 2. 缴费账号是被保人的支付宝账号,支持与其支付宝账号绑定的多种支付渠道(例如余额、银行卡、信用卡等)。 + 3. 基于金额的四舍五入,月缴和年缴的最终的保费总额是一致的。 +8、产品犹豫期: +本产品无犹豫期。 +9、续保规则: + 商险是不保证续保的。 + 您在本年度投保时选择了开通下一年延续保障(以下简称"续保")服务并同意相关服务协议,或在该保单到期前30天内或到期后15天内为同一被保险人重新申请投保的,经保险人审核同意后缴纳保费,重新获得新的保险合同,续保时会根据上一年度风险重新厘定具体缴纳保费以保单展示为准、保障范围以重新出具的保单约定及对应投保须知内容为准。 + 本年度未出险的,续保保单赔付比例提高10%(定点医院最高赔付比例为90%,非定点医院最高赔付比例为60%);若续保保单出险,第三年经保险人审核同意续保的,定点医院赔付比例恢复到70%,非定点医院赔付比例恢复到40%。 +10、产品合同解除(退保)规则: + 1. 保单未生效:保单未生效申请退保,将全额退还已缴纳的保险费; + 2. 保单已生效: + a. 年缴保单:在保单生效后申请退保的,将退还未满期保险费。计算公式为:未满期保险费=保险费×(1- 保险单已经过天数/保险期间天数)。 + b. 月缴保单:在保单生效后申请退保的,将退还未满期保险费。计算公式为:退还未满期保险费=本合同当期月度保险费×(1-当月实际经过天数/当月实际天数)。 + 注意点: + 1. 商险保障期限为12个月,期间支持退保,但若使用增值服务后(消费额度除外),视为用户同意连续在保4个月及以上(期间不办理退保) +11、其他说明: + 1. 等待期:指自本合同保险期间开始之日起连续计算的一段时间,具体天数由保险人和投保人在投保时约定并在本保险合同上载明,连续性投保无等待期。在等待期内发生保险事故的,保险人不承担给付保险金的责任。 + 2. 宽限期:若你选择月缴分期付款,则缴费宽限期为15天,若超过缴费宽限期仍未能完成保费缴纳,保单及增值服务均自动失效,保单终止日期为上一缴费期最后一天。宽限期内保障不变,宽限期内,即使没有交费,出险依然能赔。过了宽限期没有交费,保单效力中止,出险不赔。 + 3. 消费额度:随保每月下发200元消费额度(也可叫做消费折扣特权或健康服务折扣权益),只在当前保单有效期内可累计。该额度可用于在"蚂蚁宠物"内以优惠价格购买药品、服务等。 + 4. 先天性疾病/畸形、遗传性疾病释义及具体病种清单: + (1)先天性疾病、先天性畸形:指被保险宠物出生时就具有的疾病、畸形或身体健康异常。 + (2)遗传性疾病:指生殖细胞或受精卵的遗传物质(染色体和基因)发生突变或畸变所引起的疾病,通常具有由亲代传至后代的垂直传递的特征。 + (3)具体病种清单: + a. 代谢性等疾病:血友病,尼曼匹克综合征先天性凝血因子缺乏,先天肌无力,丙酮酸肌酸缺乏,甘露糖贮积症,II型糖尿病,肢端肥大,胱氨酸尿症,新生犬/猫黄疸症 + b. 心血管系统疾病:先天性门静脉短路心室间隔缺损,心膜内壁缺损,室中隔缺损,二尖瓣发育不良三尖瓣发育不良,瓣膜闭锁不全,法洛氏四重畸形,房中隔缺损,右主动脉弓,三心房,法洛四联症,猫肥厚性心肌病,动脉导管未闭(PDA、永久性动脉导管),肺动脉瓣狭窄 + c. 眼部疾病:眼睑内翻,眼睑外翻,倒睫,异位睫,双行睫,小眼球症,先天性视网膜失养症,视网膜发育不全先天性白内障,视网膜萎缩,第三眼睑脱出,瞬膜软骨变形(内翻、外翻),Merle 眼发育异常,遗传性视网膜变性症,原发性青光眼,无晶状体症小晶状体症,晶状体缺损,圆锥晶状体,永存玻璃体动脉,原始玻璃体持续增生症,晶状体血管膜持续增生症,永存瞳孔膜,虹膜发育不良,虹膜缺损,眼前节发育不良脉络膜发育不良,视(神经)盘发育不良 + d. 骨关节系统:腭裂,肘关节发育不良,髋关节发育不良,髌骨脱位,软骨病,折耳猫骨骼疾病,斜颈,多指畸形,品种特异性关节炎综合征,漏斗胸,半椎体,骶尾发育不全。 + e. 神经性疾病:先天性前庭综合征,先天性癫痫,小脑发育不良,非获得性脑积水 + f. 口腔疾病:双排牙,咬合不正 + g. 泌尿系统疾病:输尿管异位,先天性尿道狭窄 + h. 呼吸系统疾病:哮喘,短鼻综合征,软腭过长症,气管塌陷,鼻塌陷,喉管塌陷,气管狭窄,扁平胸,漏斗胸 + i. 其他先天性、遗传性疾病:任何部位的发育不良,血小板减少性紫癜,隐睾,先天性凝血功能障碍,唇裂,晕车症,腹壁疝,食物性变态反应,脐疝,自身免疫介导性溶血,腹股沟阴囊疝,重症肌无力,会阴疝,先天性食道无力,膈疝,门静脉高压症 +""" \ No newline at end of file diff --git "a/docs/guidebook/zh/\346\240\267\344\276\213\346\226\207\346\241\243/API\351\200\202\351\205\215\346\240\267\344\276\213\346\226\207\346\241\243.md" "b/docs/guidebook/zh/\346\240\267\344\276\213\346\226\207\346\241\243/API\351\200\202\351\205\215\346\240\267\344\276\213\346\226\207\346\241\243.md" new file mode 100644 index 00000000..8e0c2517 --- /dev/null +++ "b/docs/guidebook/zh/\346\240\267\344\276\213\346\226\207\346\241\243/API\351\200\202\351\205\215\346\240\267\344\276\213\346\226\207\346\241\243.md" @@ -0,0 +1,456 @@ +# Bing并发搜索api +## api接口描述 +使用该工具可以在bing中并发检索多条信息。 + +bing搜索需要[https://www.searchapi.io/](https://www.searchapi.io/)申请相应的api_key。该工具基于单个问题检索实现了并发多信息检索。 + +## 输入描述 +入参inputs是一个json字符串,内容是一个待检索list,每个元素表示一条需要搜索的信息。 + +```json +["贝多芬 交响乐", "肖邦 随想曲", "舒伯特 夜曲"] +``` + +## 输出描述 +输出是对每个检索问题和结果的封装,以“--------------------------”隔开,格式如下 + +```text +query: 贝多芬 交响乐 +result: 世界音乐史上最伟大交响曲的缔造,被誉为“乐圣”的贝多芬,其一生中所创作的九大交响曲,“无人能望其项背”。其中有三首被世界纳入世界音乐史最伟大的十首交响曲之中,其余六首也在世界交响曲排名中名列前茅。 +-------------------------- +query: 肖邦 随想曲 +result: E大调随想曲是肖邦作品中最著名的一首,也是浪漫时期钢琴曲中的代表之一。 +------------------------- +query: 舒伯特 夜曲 +result: 《舒伯特小夜曲》是奥地利作曲家舒伯特(1797-1828)创作的歌曲,他在欧洲音乐史上有“歌曲之王”的称誉。当时的民间传说认为,天鹅将死的时候,会唱出最动人的歌。 +``` + +## AU中封装该api工具 +**yaml配置** + +```yaml +name: 'parallel_search_detail_api' +description: '使用该工具可以在bing中并发搜索多条信息 + <输入描述> + 入参inputs是一个json字符串,内容是一个待检索list,每个元素表示一条需要搜索的信息。 + + <输出描述> + 输出是对每个检索问题和结果的封装,以“--------------------------”隔开。 + query: xxx + result: xxx + ------------------------- + query: xxx + result: xxx + ------------------------- + + <工具输入示例> + 你想要搜索信息,如不同的几位音乐家的作品,且不需要返回详细结果的话,工具的输入应该是: + ["贝多芬 交响乐", "肖邦 随想曲", "舒伯特 夜曲"] + + <工具输出示例> + 工具的输出应该是: + query: 贝多芬 交响乐 + result: 世界音乐史上最伟大交响曲的缔造,被誉为“乐圣”的贝多芬,其一生中所创作的九大交响曲,“无人能望其项背”。其中有三首被世界纳入世界音乐史最伟大的十首交响曲之中,其余六首也在世界交响曲排名中名列前茅。 + -------------------------- + query: 肖邦 随想曲 + result: E大调随想曲是肖邦作品中最著名的一首,也是浪漫时期钢琴曲中的代表之一。 + ------------------------- + query: 舒伯特 夜曲 + result: 《舒伯特小夜曲》是奥地利作曲家舒伯特(1797-1828)创作的歌曲,他在欧洲音乐史上有“歌曲之王”的称誉。当时的民间传说认为,天鹅将死的时候,会唱出最动人的歌。 + +tool_type: 'api' +input_keys: ['inputs'] +metadata: + type: 'TOOL' + module: 'au_expert_assistant.intelligence.agentic.tool.search.parallel_search_detail_api' + class: 'ParallelSearchDetailApi' +``` + +**代码实现参考** + +```text +class ParallelSearchDetailApi(ZxzTool): + async def execute(self, tool_input: ToolInput): + try: + json_input = tool_input.get_data("input") + json_input = parse_and_check_json_markdown(json_input, ["input_params", "save_params"]) + query_list = json_input['input_params'].get('query', []) + search_tool = ToolManager().get_instance_obj('knowledge_search_detail_api') + + executor_res = await asyncio.gather( + *[search_tool.run( + query=query, + search_top=3 + ) for query in query_list] + ) + result_str = '' + + for res in executor_res: + result_str += f'query:' + res['query'] + '\n' + result_str += f'result:' + res['search_results'][0] + '\n' + result_str += '--------------------------\n' + + update_react_memory( + name=json_input["save_params"]["name"], + data=result_str, + description=json_input["save_params"]["description"] + ) + if json_input["save_params"].get("full_return"): + return result_str + return '执行成功,可继续下一步' + except Exception as e: + error_message = traceback.format_exc() + return error_message + +``` + +# 谷歌酒店搜索api +## api接口描述 +该api调用谷歌的开放api接口https://serpapi.com/search中的google_hotels引擎服务查询酒店相关信息(该接口需要去谷歌api开放官网申请相应的api-key即可访问)。 + +## 输入描述 +工具的输入input为json_markdown格式的字符串,内容是下面的结构 + +```json +{ + "input_params": { + "query": "怡莱酒店南昌胜利路步行街店", + "check_in_date": "2024-11-24", + "check_out_date": "2024-11-25", + "search_type": "name", + "hotel_class": 3 + }, + "save_params":{ + "name": "怡莱酒店南昌胜利路步行街店", + "description":"记载了怡莱酒店南昌胜利路步行街店的具体住宿信息", + "full_return": false + } +} + +``` + +**必选参数:** + +query:可以是具体酒店名称或者地名,注意地名只能是巴黎、南昌这种地名。这个工具不支持复杂的搜索如巴黎民宿、南昌高档酒店 + +search_type:有两种取值location和name,当query是具体酒店名称时,该取值为name,地名的话则是location。 + +check_in_date和check_out_date为YYYY-MM-DD格式的入住和离店日期 + +**可选参数:** + +hotel_class取值为范围为2-5表示酒店档次。 + +min_price和max_price表示筛选的价格区间,但min_price和max_price取值不能为0。 + +save_params参数:用于将最后结果归档。save_params包含三个属性,归档名称name和结果内容的简要描述description以及是否表示需要返回完整结果的full_return + +## 输出描述 +输出为json字符串,内容为检索到的hotel信息列表,按匹配度从高到低排列,每个hotel结构包含描述,链接,地址,联系方式,图片,定位,价格等信息。下面为示例: + +```json +[{ + "type": "hotel", + "name": "怡莱酒店(南昌胜利路步行街店)", + "description": "怡莱酒店(南昌胜利路步行街店)位于南昌繁华市中心步行街建德观街,门即是中山路——胜利路步行街,门前是南昌知名小吃夜宵街——建德观街;紧靠八一大桥,距江南三大名...", + "link": "https://www.h10hotels.com/en/barcelona-hotels/h10-port-vell?utm_source=google_my_business&utm_medium=boton_sitio_web&utm_campaign=hpv", + "address": "0791-82075888", + "phone": "+34 933 10 30 65", + "phone_link": "tel:+34933103065", + "gps_coordinates": { + "latitude": 41.381571799999996, + "longitude": 2.1838414999999998 + }, + "check_in_time": "3:00 PM", + "check_out_time": "12:00 PM", + "rate_per_night": { + "lowest": "$123", + "extracted_lowest": 123, + "before_taxes_fees": "$100", + "extracted_before_taxes_fees": 100 + }, + "total_rate": { + "lowest": "$123", + "extracted_lowest": 123, + "before_taxes_fees": "$100", + "extracted_before_taxes_fees": 100 + } +}] +``` + +## AU中封装该api工具 +```yaml +name: 'hotel_search_api' +description: '使用该工具可以搜索酒店相关的信息。 + <输入描述> + query可以是具体酒店名称或者地名,注意地名只能是巴黎、南昌这种地名。这个工具不支持复杂的搜索如巴黎民宿、南昌高档酒店 + search_type有两种取值location和name,当query是具体酒店名称时,该取值为name,地名的话则是location。 + check_in_date和check_out_date为YYYY-MM-DD格式的入住和离店日期 + 可选参数: + hotel_class取值为范围为2-5表示酒店档次。 + min_price和max_price表示筛选的价格区间,但min_price和max_price取值不能为0。 + 同时你也应当提供一个save_params参数,用于将最后结果归档。save_params包含三个属性,归档名称name和结果内容的简要描述description以及是否表示需要返回完整结果的full_return + + <输出描述> + 输出为json字符串,内容为检索到的hotel信息列表,按匹配度从高到低排列,每个hotel结构包含描述,链接,地址,联系方式,图片,定位,价格等信息。 + + <工具输入示例> + 你想要搜索指定酒店住宿信息时如怡莱酒店南昌胜利路步行街店,2024-11-24到2024-11-25的房间信息,且不需要返回详细结果的话,工具的输入应该是: + ```json + { + "input_params": { + "query": "怡莱酒店南昌胜利路步行街店", + "check_in_date": "2024-11-24", + "check_out_date": "2024-11-25", + "search_type": "name", + "hotel_class": 3 + }, + "save_params":{ + "name": "怡莱酒店南昌胜利路步行街店", + "description":"记载了怡莱酒店南昌胜利路步行街店的具体住宿信息", + "full_return": false + } + } + ``` + <工具输出示例> + 返回莱酒店南昌胜利路步行街店的检索结果为: + [{ + "type": "hotel", + "name": "怡莱酒店(南昌胜利路步行街店)", + "description": "怡莱酒店(南昌胜利路步行街店)位于南昌繁华市中心步行街建德观街,门即是中山路——胜利路步行街,门前是南昌知名小吃夜宵街——建德观街;紧靠八一大桥,距江南三大名..." + "link": "https://www.h10hotels.com/en/barcelona-hotels/h10-port-vell?utm_source=google_my_business&utm_medium=boton_sitio_web&utm_campaign=hpv", + "address": "0791-82075888", + "phone": "+34 933 10 30 65", + "phone_link": "tel:+34933103065", + "gps_coordinates": { + "latitude": 41.381571799999996, + "longitude": 2.1838414999999998 + }, + "check_in_time": "3:00 PM", + "check_out_time": "12:00 PM", + "rate_per_night": { + "lowest": "$123", + "extracted_lowest": 123, + "before_taxes_fees": "$100", + "extracted_before_taxes_fees": 100 + }, + "total_rate": { + "lowest": "$123", + "extracted_lowest": 123, + "before_taxes_fees": "$100", + "extracted_before_taxes_fees": 100 + } + }] + ' +tool_type: 'api' +input_keys: ['input'] +metadata: + type: 'TOOL' + module: 'au_expert_assistant.intelligence.agentic.tool.search.hotel_search_api' + class: 'HotelSearchApi' +``` + +**代码实现参考** + +```text +class HotelSearchApi(Tool): + api_key: Optional[str] = Field( + default_factory=lambda: get_from_env("TOUR_SERP_API_KEY")) + base_url: str = "https://serpapi.com/search" + + async def request_serpapi(self, url, name, check_in_date, check_out_date, **kwargs): + language_version = FrameworkContextManager().get_context( + "language_version") + params = { + "engine": "google_hotels", + "q": name, + "check_in_date": check_in_date, + "check_out_date": check_out_date, + "currency": "CNY" if language_version == "cn" else "USD", + "gl": "cn", + "hl": "zh-CN" if language_version == "cn" else "en", + "api_key": self.api_key, + **kwargs + } + for i in range(3): + try: + async with httpx.AsyncClient(verify=False) as client: + response = await client.get(url, params=params) + return json.loads(response.text) + except Exception as e: + print(e) + return {'properties':[]} + async def execute(self, tool_input: ToolInput): + try: + json_input = tool_input.get_data("input") + json_input = parse_and_check_json_markdown(json_input, ["input_params", "save_params"]) + + # 先获取酒店详细信息链接 + current_date = datetime.now() + current_date = current_date.strftime("%Y-%m-%d") + search_type = json_input["input_params"].pop('search_type') + if search_type == 'name': + json_input["input_params"].pop('hotel_class', None) + if 'min_price' in json_input["input_params"]: + json_input["input_params"]['min_price'] += 1 + check_in_date = json_input["input_params"].pop('check_in_date', current_date) + check_in_date = check_in_date if check_in_date > current_date else current_date + check_out_date = json_input["input_params"].pop('check_out_date', current_date) + check_out_date = check_out_date if check_out_date > current_date else current_date + + result_dict = await self.request_serpapi( + self.base_url, + json_input["input_params"].pop('query'), + check_in_date, + check_out_date, + **json_input["input_params"] + ) + hotel_details = [] + + keys = ['name', 'nearby_places', 'total_rate', 'amenities', + 'excluded_amenities', "prices", "rate_per_night"] + if not 'properties' in result_dict: + print(result_dict) + return "无相关酒店信息" + for hotel in result_dict['properties']: + if hotel['type'] in ['hotel', 'vacation rental'] and 'total_rate' in hotel and 'serpapi_property_details_link' in hotel: + hotel_details.append({key: hotel[key] for key in keys if key in hotel}) + if search_type == 'name': + break + elif len(hotel_details)>7: + break + + if not hotel_details: + return "无相关酒店信息" + + result_str = json.dumps(hotel_details, ensure_ascii=False) + + update_react_memory( + name=json_input["save_params"]["name"], + data=result_str, + description=json_input["save_params"]["description"] + ) + if json_input["save_params"].get("full_return"): + return result_str + return '执行成功,可继续下一步' + except Exception as e: + error_message = traceback.format_exc() + return error_message + +``` + +# 自定义api-宠物险咨询检索工具 +## api接口描述 +基于ES自建索引实现对宠物险产品相关问题答案的检索。只针对“宠物医保”及“宠物医保(体验版)”产品有效。 + +## 输入描述 +入参input为待检索的问题 + +```text +“宠物医保”如何从基础版升级到尊享版? +``` + +## 输出描述 +输出包含“提出的问题”以及“检索到的答案”两部分内容 + +```plain +提出的问题是:宠物医保投保对宠物年龄的要求是多少? + +这个问题检索到的答案相关内容是: +knowledgeTitle: 多大年龄可以投保 +knowledgeContent:

宠物医保这款产品的投、被保险人为具备完全民事行为的个人,且须为同一人,本产品仅限宠物主本人购买,其承保的宠物须为被保险人以玩赏、陪伴为目的而合法饲养的、可明确鉴别身份的年龄为60天-10周岁的犬类或猫类宠物。

+ +``` + +检索答案包含knowledgeTitle和knowledgeContent,分别代表检索的知识标题和内容,最终回答一般取knowledgeContent即可。 + +## AU中封装该api工具 +**yaml配置** + +```yaml +name: 'pet_insurance_search_context_tool' +description: | + #工具名称:宠物险产品信息检索工具 + + #功能描述:提供宠物险产品相关问题答案的检索。只针对“宠物医保”及“宠物医保(体验版)”产品有效。 + + #工具输入:待检索的问题。 + + #工具输出:输出格式如下 + ------------------------ + 提出的问题是:xxx + + 这个问题检索到的答案相关内容是: + knowledgeTitle: xxx + knowledgeContent: xxx + ------------------------ + 其中检索答案包含knowledgeTitle和knowledgeContent,分别代表检索的知识标题和内容,最终回答一般取knowledgeContent即可。 + + #工具输入输出示例: + 工具输入: + 宠物医保投保对宠物年龄的要求是多少? + + 工具输出: + 提出的问题是:宠物医保投保对宠物年龄的要求是多少? + + 这个问题检索到的答案相关内容是: + + knowledgeTitle: 多大年龄可以投保 + knowledgeContent:

宠物医保这款产品的投、被保险人为具备完全民事行为的个人,且须为同一人,本产品仅限宠物主本人购买,其承保的宠物须为被保险人以玩赏、陪伴为目的而合法饲养的、可明确鉴别身份的年龄为60天-10周岁的犬类或猫类宠物。

+ +tool_type: 'api' +input_keys: ['input'] +metadata: + type: 'TOOL' + module: 'sample_standard_app.intelligence.agentic.tool.pet_ins.pet_insurance_search_context_tool' + class: 'SearchContextTool' +``` + +**代码实现参考** + +```text +PRE_API_URL = "https://fincopilotcore.antgroup-inc.cn/api/copilot/runMxc/faq" + + +class SearchContextTool(Tool): + + def execute(self, tool_input: ToolInput): + question = tool_input.get_data('input') + try: + headers = { + "Content-Type": "application/json", + } + # 要发送的数据 + data = { + "sceneCode": "ant_fortune_insurance_property", + "query": question, + "decoderType": "ins_slot_v2", + "inputMethod": "user_input", + "userInfoMap": { + "consultantSceneCode": "ant_fortune_insurance_property", + }, + "enterScene": { + "sceneCode": "ant_fortune_insurance_property", + } + } + top_k = tool_input.get_data('top_k') if tool_input.get_data('top_k') else 2 + LOGGER.info(f"search context tool input: {data}") + response = requests.post(PRE_API_URL, headers=headers, data=json.dumps(data, ensure_ascii=False)) + result = response.json()['result'] + recallResultTuples = result.get('recallResultTuples') + + context = f"提出的问题是:{question}\n\n这个问题检索到的答案相关内容是:\n\n" + index = 0 + for recallResult in recallResultTuples: + if index == top_k: + return context + if recallResult.get('content'): + context += (f"knowledgeTitle: {recallResult.get('knowledgeTitle')}\n" + f"knowledgeContent: {recallResult.get('content')}\n\n") + index += 1 + return context + except Exception as e: + LOGGER.error(f"invoke search context tool failed: {str(e)}") + raise e +``` + diff --git "a/docs/guidebook/zh/\346\240\267\344\276\213\346\226\207\346\241\243/AgentUniverse\345\277\253\351\200\237\346\220\255\345\273\272.md" "b/docs/guidebook/zh/\346\240\267\344\276\213\346\226\207\346\241\243/AgentUniverse\345\277\253\351\200\237\346\220\255\345\273\272.md" new file mode 100644 index 00000000..bb26be4a --- /dev/null +++ "b/docs/guidebook/zh/\346\240\267\344\276\213\346\226\207\346\241\243/AgentUniverse\345\277\253\351\200\237\346\220\255\345\273\272.md" @@ -0,0 +1,177 @@ +# 文档说明 +为了让大家快速上手AU框架,我们提供了样板工程(可以直接运行)并配套详细的文字说明,在实际研发过程中直接基于其中的某些智能体结合自己业务进行一小部分代码/配置改写即可完成应用的搭建。 + +样板工程地址:[au_sample_standard_app](../../../../au_sample_standard_app) + +样板工程拟定了业务是关于宠物险产品的智能顾问,对用户提出的产品问题进行解答。 + +# 从一个智能体搭建开始 +[single_agent_case](../../../../au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case)下构建一个宠物险顾问智能体,使用qwen72b模型,提供宠物医保的检索以及信息补充工具,react模式自行让模型选择工具帮助回答用户的问题。 + +从0开始搭建的整体流程如下: + +![画板](https://intranetproxy.alipay.com/skylark/lark/0/2024/jpeg/18056974/1733987419009-14ac52dc-dc1b-4998-a7ff-fc2176083720.jpeg) + +## 构建agent所依赖的组件 +### 构建tool +**<配置封装>** +[pet_insurance_search_context_tool.yaml](../../../../au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.yaml) + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1733990583392-c4df1a8b-51d1-44a9-84e9-ebf7c4251c0b.png) + +**<代码实现>** +[pet_insurance_search_context_tool.py](../../../../au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.py) + +所有的自定义工具都都继承自agentuniverse.agent.action.tool.tool的Tool基类,核心执行方法为execute(你需要overwrite的地方),入参类型为ToolInput(key-value经过封装后的结构)。 + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1733988837626-fd96612d-6439-4aa0-841a-916031b51bdf.png) + +API适配AU样例可参考 [API适配样例文档.md](API适配样例文档.md) + +### 构建llm +#### 配置封装 +[pet_qwen_72b_stream.yaml](../../../../au_sample_standard_app/intelligence/agentic/llm/maya/pet_qwen_72b_stream.yaml) +拓展字段ext_info中可以添加自定义参数,并在代码初始化方法中解析,成为PetInsuranceMayaLLM的属性。 + +#### 代码实现 +所有的llm都继承自agentuniverse.llm.llm的LLM基类。 + +**初始化** + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1733990996692-856663e2-1618-4d5a-b837-0cbdaac6e092.png) + +overwrite方法initialize_by_component_configer,基于父类的方法,在ext_info中添加一些自定义参数。如llm走的是企业内部maya服务,需要sceneName,chainName等参数,在初始化中注入之后就可以通过在yaml中配置生效。 + +**执行主体逻辑** + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1733998236348-4ab62c1e-c05b-4e24-b0c9-f306a6eecb3b.png) + +llm需要封装成langchain实例,实际调度过程中由该实例唤起执行call方法。call方法为主体逻辑,在样例中对流式和非流式分别实现了http的模型请求调用。 + +注意:样例所用的llm为 + +## 构建智能体 +#### 配置封装 +[pet_insurance_react_agent.yaml](../../../../au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.yaml) + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734003384746-701f2ff8-46b7-4d32-9cdb-0bb7594594b1.png) + +该智能体依赖的prompt定义 [pet_insurance_react_prompt.yaml](../../../../au_sample_standard_app/intelligence/agentic/prompt/pet_react_agent/pet_insurance_react_prompt.yaml) + + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734071728918-f8608995-b782-4fdd-9374-072845c839d7.png) + +react模式完全依赖llm自行思考问题,选择工具调度组装结果,在实际使用过程中需要注意思维链的迭代,尤其是停止词stop的配置(一般为Observation),不同模型对prompt需要进行微调去适配。 + +#### 代码实现 +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1733985532204-5f738d83-7163-4b1e-a948-cd413699c579.png) + +智能体继承Agent类,框架会对input_keys进行校验,一定要注意输入和输出字段。 + +上图示例是一个react智能体,执行主体execute方法中依次对memory,token,llm,tool加工处理后基于langchain构建了reAct实例,该实例通过invoke方法唤起。 + +## 运行测试 +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734001509704-bd0c67c8-e98e-44f3-8c36-a2a78bce2b09.png) + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734001585855-35f64dd1-1af1-4e79-bcbc-f30b836f0a21.png) + +能看到这个思维链是先去调用pet_insurance_info_tool来获取“宠物医保”的详细信息,再去使用pet_insurance_search_context_tool尝试找到关于升级流程的直接信息。 + +最终的回答为: “宠物医保”在保障期间内不支持升级或更改版本。如果您希望获得更高一级的保障,您需要在当前保险周期结束时,在续保时选择您想要的更高保障版本。例如,如果您目前投保的是基础版,那么在保险到期后,您可以选择续保时升级到升级版或尊享版。请记得在续保时仔细阅读条款,以确保您了解新版本的所有细节和费用。 + +## 效果微调-优化prompt +优化prompt是调整智能体效果的常用手段。2.3中运行结果的表达相对啰嗦和重复,希望在表达上能够更精简,我们可以适当修改pet_insurance_react_agent.cn,优化llm的结果输出。 + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734072545230-55d74f48-f555-4beb-9087-a4d4b5f031a2.png) + +运行后的效果如下,对比2.3要好些。 + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734072518223-8d7966d4-cd3c-4268-8395-e572f1ae5432.png) + +## 服务化 +[pet_insurance_react_service.yaml](../../../../au_sample_standard_app/intelligence/service/agent_service/pet_insurance_react_service.yaml) + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734005968227-3039b182-09e2-49d2-982b-60e3df1e265e.png) + +本地起服务调试,构建下面的请求 + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734005989173-7b555ca3-614e-4717-baa9-2012e6a9c57a.png) + +# 从单智能体到多智能体 +对于比较复杂的场景,单智能可能不足以完成任何,需要多个智能体协作。 + +以样板工程的宠物险问答为例,由于用户的问题可能比较发散,单智能体的回答有时候会产生偏离。在部分专家经验的驱动下,对于这种场景我们拆分以下几个智能体。 + +![画板](https://intranetproxy.alipay.com/skylark/lark/0/2024/jpeg/18056974/1734014243591-e9539d7d-4bb6-45c0-95f3-698093449058.jpeg) + +## 子智能体 +改写和策划两个子智能体不再单独介绍了,参考2.1进行构建 + +改写智能体: +[pet_question_rewrite_agent.yaml](../../../../au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.yaml) + + +策划智能体(拆解问题): +[pet_question_planning_agent.yaml](../../../../au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.yaml) + + +这里要注意的是出入参的定义和转换,要能和下个智能体衔接起来。这个可以在主智能体串接时适配。 + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734014843998-a826cd8b-3d4f-4fb4-9ef0-65e622983368.png) + +## 主智能体 +[pet_insurance_consult_agent.yaml](../../../../au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.yaml) + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734014662057-cbd2eae4-8d92-4a28-8717-fa42bf6229c0.png) + +实际上就是把几个智能体和工具按照一定的逻辑串接起来,这里要注意的是参数的传递,agent_input作为线程上下文可以存储中间产物给后面的智能体使用,每个智能体的出入参字段是可以按照需求自定义,建议不要重复。 + +## 运行测试 + +运行 [pet_insurance_consult_pro_agent_test.py](../../../../au_sample_standard_app/intelligence/test/pet_insurance_consult_pro_agent_test.py) + + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734073361810-67db902b-4ede-4bbf-bd4d-c9f937434536.png) + +**智能体 pet_question_planning_agent 执行结果为: ** + +```text +如何将宠物医保的基础版升级到升级版或尊享版?升级的具体流程和条件是什么? +``` + +**智能体 pet_question_planning_agent 执行结果为:** + +```json + { + "sub_query_list": [ + "如何将宠物医保的基础版升级到升级版或尊享版?", + "宠物医保升级版和尊享版的具体保障内容有什么区别?", + "升级宠物医保版本是否需要额外费用,费用如何计算?" + ] +} +``` + +**pet_insurance_consult_agent 最终执行结果:** + +```text +宠物医保在保障期间内不支持从基础版升级到升级版或尊享版。您如果希望获得更高版本的保障,可以在当前保单到期后,在续保时选择升级到所需的版本。 +请注意,续保时的保费可能会根据宠物的当前状况和保险公司的评估有所调整。 +``` + + + +# 封装智能体模版 +在上面的多智能体应用中我们会发现,三个智能体在实现上有很多相同的逻辑,甚至策划智能体继承自改写智能体。为了提升智能体的复用性,我们可以抽像出一些通用的逻辑和方法,封装成一个智能体模版AgentTemplate,大部分智能体基于这个模版做一些微小的调整,甚至只是yaml配置的改动即可实现多种智能体逻辑。 + +参考 [pet_agent_template.py](../../../../au_sample_standard_app/intelligence/agentic/agent/agent_template/pet_agent_template.py) + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734016162848-dc15f8cf-c362-4834-b99b-abe9e0ba2cdc.png) + +这里将智能体会用到的包括注册agent,处理记忆/llm/工具/prompt以及执行主体的一些通用方法进行了一定的抽象,这些方法在宠物险的智能体中大部分都可以复用。以主智能体为例,只要overwrite方法customized_execute对主流程部分逻辑进行定制化即可。 + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734016447429-99561e21-98fa-45f2-8e55-8a9a696f8022.png) + +实际上AU框架提供了相当多的智能体模版,包括Rag,React,PEER模式等,建议使用这些现有的模版构建自己的智能体。 + +![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734016549408-58b1b46e-6f09-4339-b819-7efccab3d78b.png) + From acb4aec499f730b0f45fcd10b4e056a445c0b52e Mon Sep 17 00:00:00 2001 From: wangchongshi Date: Fri, 13 Dec 2024 17:50:40 +0800 Subject: [PATCH 2/4] improvement: refactoring the sample project in agentUniverse. --- README.md | 2 +- agentuniverse/agent/agent.py | 134 +++++++++++++++++- .../executing_planner/executing_planner.py | 11 +- .../expressing_planner/expressing_planner.py | 11 +- .../planner/nl2api_planner/nl2api_planner.py | 11 +- .../planning_planner/planning_planner.py | 11 +- .../plan/planner/rag_planner/rag_planner.py | 11 +- .../reviewing_planner/reviewing_planner.py | 11 +- .../agent/template/agent_template.py | 130 +---------------- agentuniverse/base/util/prompt_util.py | 2 +- .../guidebook/en/Examples/Discussion_Group.md | 4 +- docs/guidebook/en/Examples/Legal_Advice.md | 2 +- ...ation_Project_Structure_and_Explanation.md | 8 +- docs/guidebook/en/Get_Start/Start_Tutorial.md | 6 +- .../en/How-to/How_To_Build_A_RAG_Agent.md | 2 +- .../en/How-to/Product_Platform_Quick_Start.md | 4 +- .../Tutorials/Agent/Agent_Create_And_Use.md | 10 +- .../Tutorials/Data_Autonomous_Agent.md | 6 +- .../Tutorials/Plan/Planner_Define_And_Use.md | 2 +- ...11\344\270\216\344\275\277\347\224\250.md" | 2 +- ...04\345\217\212\350\257\264\346\230\216.md" | 12 +- ...00\345\247\213\344\275\277\347\224\250.md" | 6 +- ...50\350\257\242\346\241\210\344\276\213.md" | 2 +- .../{bootstrap => boostrap}/__init__.py | 0 .../intelligence/__init__.py | 0 .../intelligence/server_application.py | 0 .../platform/__init__.py | 0 .../platform/product_application.py | 2 +- .../discussion_group_template.py | 6 +- sample_standard_app/quick_start.bat | 2 +- sample_standard_app/quick_start.sh | 2 +- 31 files changed, 204 insertions(+), 208 deletions(-) rename sample_standard_app/{bootstrap => boostrap}/__init__.py (100%) rename sample_standard_app/{bootstrap => boostrap}/intelligence/__init__.py (100%) rename sample_standard_app/{bootstrap => boostrap}/intelligence/server_application.py (100%) rename sample_standard_app/{bootstrap => boostrap}/platform/__init__.py (100%) rename sample_standard_app/{bootstrap => boostrap}/platform/product_application.py (94%) diff --git a/README.md b/README.md index 6f7c29b8..bd186228 100644 --- a/README.md +++ b/README.md @@ -107,7 +107,7 @@ pip install magent-ui ruamel.yaml **One-click Run** -Run [product_application.py](sample_standard_app/boostrap/platform/product_application.py) in sample_standard_app/app/bootstrap for quick startup. +Run [product_application.py](sample_standard_app/boostrap/platform/product_application.py) in sample_standard_app/boostrap/platform for quick startup. For more details, refer to [Quick Start for Product Platform](docs/guidebook/en/How-to/Product_Platform_Quick_Start.md) and the [Advanced Guide](docs/guidebook/en/How-to/Product_Platform_Advancement_Guide.md). diff --git a/agentuniverse/agent/agent.py b/agentuniverse/agent/agent.py index 878a1073..15797c23 100644 --- a/agentuniverse/agent/agent.py +++ b/agentuniverse/agent/agent.py @@ -5,16 +5,24 @@ # @Author : heji # @Email : lc299034@antgroup.com # @FileName: agent.py -"""The definition of agent paradigm.""" import json from abc import abstractmethod, ABC from datetime import datetime -from typing import Optional +from typing import Optional, Any, List +from langchain_core.runnables import RunnableSerializable from langchain_core.utils.json import parse_json_markdown +from agentuniverse.agent.action.knowledge.knowledge import Knowledge +from agentuniverse.agent.action.knowledge.knowledge_manager import KnowledgeManager +from agentuniverse.agent.action.knowledge.store.document import Document +from agentuniverse.agent.action.tool.tool import Tool +from agentuniverse.agent.action.tool.tool_manager import ToolManager from agentuniverse.agent.agent_model import AgentModel from agentuniverse.agent.input_object import InputObject +from agentuniverse.agent.memory.memory import Memory +from agentuniverse.agent.memory.memory_manager import MemoryManager +from agentuniverse.agent.memory.message import Message from agentuniverse.agent.output_object import OutputObject from agentuniverse.agent.plan.planner.planner import Planner from agentuniverse.agent.plan.planner.planner_manager import PlannerManager @@ -25,7 +33,15 @@ import ApplicationConfigManager from agentuniverse.base.config.component_configer.configers.agent_configer \ import AgentConfiger +from agentuniverse.base.util.common_util import stream_output from agentuniverse.base.util.logging.logging_util import LOGGER +from agentuniverse.base.util.memory_util import generate_messages +from agentuniverse.llm.llm import LLM +from agentuniverse.llm.llm_manager import LLMManager +from agentuniverse.prompt.chat_prompt import ChatPrompt +from agentuniverse.prompt.prompt import Prompt +from agentuniverse.prompt.prompt_manager import PromptManager +from agentuniverse.prompt.prompt_model import AgentPromptModel class Agent(ComponentBase, ABC): @@ -211,3 +227,117 @@ def as_langchain_tool(self): func=self.langchain_run, description=self.agent_model.info.get("description") + args_description ) + + def process_llm(self, **kwargs) -> LLM: + return LLMManager().get_instance_obj(self.llm_name) + + def process_memory(self, agent_input: dict, **kwargs) -> Memory | None: + memory: Memory = MemoryManager().get_instance_obj(component_instance_name=self.memory_name) + if memory is None: + return None + + chat_history: list = agent_input.get('chat_history') + # generate a list of temporary messages from the given chat history and add them to the memory instance. + temporary_messages: list[Message] = generate_messages(chat_history) + if temporary_messages: + memory.add(temporary_messages, **agent_input) + + params: dict = dict() + params['agent_llm_name'] = self.llm_name + return memory.set_by_agent_model(**params) + + def invoke_chain(self, chain: RunnableSerializable[Any, str], agent_input: dict, input_object: InputObject, + **kwargs): + if not input_object.get_data('output_stream'): + res = chain.invoke(input=agent_input) + return res + result = [] + for token in chain.stream(input=agent_input): + stream_output(input_object.get_data('output_stream', None), { + 'type': 'token', + 'data': { + 'chunk': token, + 'agent_info': self.agent_model.info + } + }) + result.append(token) + return "".join(result) + + async def async_invoke_chain(self, chain: RunnableSerializable[Any, str], agent_input: dict, + input_object: InputObject, **kwargs): + if not input_object.get_data('output_stream'): + res = await chain.ainvoke(input=agent_input) + return res + result = [] + async for token in chain.astream(input=agent_input): + stream_output(input_object.get_data('output_stream', None), { + 'type': 'token', + 'data': { + 'chunk': token, + 'agent_info': self.agent_model.info + } + }) + result.append(token) + return "".join(result) + + def invoke_tools(self, input_object: InputObject, **kwargs) -> str: + if not self.tool_names: + return '' + + tool_results: list = list() + + for tool_name in self.tool_names: + tool: Tool = ToolManager().get_instance_obj(tool_name) + if tool is None: + continue + tool_input = {key: input_object.get_data(key) for key in tool.input_keys} + tool_results.append(str(tool.run(**tool_input))) + return "\n\n".join(tool_results) + + def invoke_knowledge(self, query_str: str, input_object: InputObject, **kwargs) -> str: + if not self.knowledge_names or not query_str: + return '' + + knowledge_results: list = list() + + for knowledge_name in self.knowledge_names: + knowledge: Knowledge = KnowledgeManager().get_instance_obj(knowledge_name) + if knowledge is None: + continue + knowledge_res: List[Document] = knowledge.query_knowledge( + query_str=query_str, + **input_object.to_dict() + ) + knowledge_results.append(knowledge.to_llm(knowledge_res)) + return "\n\n".join(knowledge_results) + + def process_prompt(self, agent_input: dict, **kwargs) -> ChatPrompt: + expert_framework = agent_input.pop('expert_framework', '') or '' + + profile: dict = self.agent_model.profile + + profile_instruction = profile.get('instruction') + profile_instruction = expert_framework + profile_instruction if profile_instruction else profile_instruction + + profile_prompt_model: AgentPromptModel = AgentPromptModel(introduction=profile.get('introduction'), + target=profile.get('target'), + instruction=profile_instruction) + + # get the prompt by the prompt version + version_prompt: Prompt = PromptManager().get_instance_obj(self.prompt_version) + + if version_prompt is None and not profile_prompt_model: + raise Exception("Either the `prompt_version` or `introduction & target & instruction`" + " in agent profile configuration should be provided.") + if version_prompt: + version_prompt_model: AgentPromptModel = AgentPromptModel( + introduction=getattr(version_prompt, 'introduction', ''), + target=getattr(version_prompt, 'target', ''), + instruction=expert_framework + getattr(version_prompt, 'instruction', '')) + profile_prompt_model = profile_prompt_model + version_prompt_model + + chat_prompt = ChatPrompt().build_prompt(profile_prompt_model, ['introduction', 'target', 'instruction']) + image_urls: list = agent_input.pop('image_urls', []) or [] + if image_urls: + chat_prompt.generate_image_prompt(image_urls) + return chat_prompt diff --git a/agentuniverse/agent/plan/planner/executing_planner/executing_planner.py b/agentuniverse/agent/plan/planner/executing_planner/executing_planner.py index 90b11c7f..5c7bcf24 100644 --- a/agentuniverse/agent/plan/planner/executing_planner/executing_planner.py +++ b/agentuniverse/agent/plan/planner/executing_planner/executing_planner.py @@ -41,16 +41,15 @@ def invoke(self, agent_model: AgentModel, planner_input: dict, input_object: Inp prompt: Prompt = self.handle_prompt(agent_model, planner_input) process_llm_token(llm, prompt.as_langchain(), agent_model.profile, planner_input) - memory_messages = assemble_memory_input(memory, planner_input) + assemble_memory_input(memory, planner_input) chain = prompt.as_langchain() | llm.as_langchain_runnable(agent_model.llm_params()) | StrOutputParser() res = chain.invoke(input=planner_input) - memory_messages = assemble_memory_output(memory=memory, - agent_input=planner_input, - content=f"Human: {planner_input.get(self.input_key)}, AI: {res}", - memory_messages=memory_messages) - return {**planner_input, self.output_key: res, 'chat_history': memory_messages} + assemble_memory_output(memory=memory, + agent_input=planner_input, + content=f"Human: {planner_input.get(self.input_key)}, AI: {res}") + return {**planner_input, self.output_key: res} def handle_prompt(self, agent_model: AgentModel, planner_input: dict) -> Prompt: """Prompt module processing. diff --git a/agentuniverse/agent/plan/planner/expressing_planner/expressing_planner.py b/agentuniverse/agent/plan/planner/expressing_planner/expressing_planner.py index afe1b396..960dc6d7 100644 --- a/agentuniverse/agent/plan/planner/expressing_planner/expressing_planner.py +++ b/agentuniverse/agent/plan/planner/expressing_planner/expressing_planner.py @@ -39,16 +39,15 @@ def invoke(self, agent_model: AgentModel, planner_input: dict, input_object: Inp prompt: Prompt = self.handle_prompt(agent_model, planner_input) process_llm_token(llm, prompt.as_langchain(), agent_model.profile, planner_input) - memory_messages = assemble_memory_input(memory, planner_input) + assemble_memory_input(memory, planner_input) chain = prompt.as_langchain() | llm.as_langchain_runnable(agent_model.llm_params()) | StrOutputParser() res = self.invoke_chain(agent_model, chain, planner_input, None, input_object) - memory_messages = assemble_memory_output(memory=memory, - agent_input=planner_input, - content=f"Human: {planner_input.get(self.input_key)}, AI: {res}", - memory_messages=memory_messages) - return {**planner_input, self.output_key: res, 'chat_history': memory_messages} + assemble_memory_output(memory=memory, + agent_input=planner_input, + content=f"Human: {planner_input.get(self.input_key)}, AI: {res}") + return {**planner_input, self.output_key: res} def handle_prompt(self, agent_model: AgentModel, planner_input: dict) -> Prompt: """Prompt module processing. diff --git a/agentuniverse/agent/plan/planner/nl2api_planner/nl2api_planner.py b/agentuniverse/agent/plan/planner/nl2api_planner/nl2api_planner.py index b702d369..a5da6cc3 100644 --- a/agentuniverse/agent/plan/planner/nl2api_planner/nl2api_planner.py +++ b/agentuniverse/agent/plan/planner/nl2api_planner/nl2api_planner.py @@ -43,16 +43,15 @@ def invoke(self, agent_model: AgentModel, planner_input: dict, input_object: Inp process_llm_token(llm, prompt.as_langchain(), agent_model.profile, planner_input) - memory_messages = assemble_memory_input(memory, planner_input) + assemble_memory_input(memory, planner_input) chain = prompt.as_langchain() | llm.as_langchain_runnable(agent_model.llm_params()) | StrOutputParser() res = self.invoke_chain(agent_model, chain, planner_input, None, input_object) - memory_messages = assemble_memory_output(memory=memory, - agent_input=planner_input, - content=f"Human: {planner_input.get(self.input_key)}, AI: {res}", - memory_messages=memory_messages) - return {**planner_input, self.output_key: res, 'chat_history': memory_messages} + assemble_memory_output(memory=memory, + agent_input=planner_input, + content=f"Human: {planner_input.get(self.input_key)}, AI: {res}") + return {**planner_input, self.output_key: res} @staticmethod def acquire_tools(action) -> list[LangchainTool]: diff --git a/agentuniverse/agent/plan/planner/planning_planner/planning_planner.py b/agentuniverse/agent/plan/planner/planning_planner/planning_planner.py index 8726c6a1..c1c9874a 100644 --- a/agentuniverse/agent/plan/planner/planning_planner/planning_planner.py +++ b/agentuniverse/agent/plan/planner/planning_planner/planning_planner.py @@ -40,16 +40,15 @@ def invoke(self, agent_model: AgentModel, planner_input: dict, prompt: Prompt = self.handle_prompt(agent_model, planner_input) process_llm_token(llm, prompt.as_langchain(), agent_model.profile, planner_input) - memory_messages = assemble_memory_input(memory, planner_input) + assemble_memory_input(memory, planner_input) chain = prompt.as_langchain() | llm.as_langchain_runnable(agent_model.llm_params()) | StrOutputParser() res = self.invoke_chain(agent_model, chain, planner_input, None, input_object) - memory_messages = assemble_memory_output(memory=memory, - agent_input=planner_input, - content=f"Human: {planner_input.get(self.input_key)}, AI: {res}", - memory_messages=memory_messages) - return {**planner_input, self.output_key: res, 'chat_history': memory_messages} + assemble_memory_output(memory=memory, + agent_input=planner_input, + content=f"Human: {planner_input.get(self.input_key)}, AI: {res}") + return {**planner_input, self.output_key: res} def handle_prompt(self, agent_model: AgentModel, planner_input: dict) -> Prompt: """Prompt module processing. diff --git a/agentuniverse/agent/plan/planner/rag_planner/rag_planner.py b/agentuniverse/agent/plan/planner/rag_planner/rag_planner.py index c5633bd3..923cdf33 100644 --- a/agentuniverse/agent/plan/planner/rag_planner/rag_planner.py +++ b/agentuniverse/agent/plan/planner/rag_planner/rag_planner.py @@ -43,17 +43,16 @@ def invoke(self, agent_model: AgentModel, planner_input: dict, prompt: ChatPrompt = self.handle_prompt(agent_model, planner_input) process_llm_token(llm, prompt.as_langchain(), agent_model.profile, planner_input) - memory_messages = assemble_memory_input(memory, planner_input) + assemble_memory_input(memory, planner_input) chain = prompt.as_langchain() | llm.as_langchain_runnable(agent_model.llm_params()) | StrOutputParser() res = self.invoke_chain(agent_model, chain, planner_input, None, input_object) - memory_messages = assemble_memory_output(memory=memory, - agent_input=planner_input, - content=f"Human: {planner_input.get(self.input_key)}, AI: {res}", - memory_messages=memory_messages) - return {**planner_input, self.output_key: res, 'chat_history': memory_messages} + assemble_memory_output(memory=memory, + agent_input=planner_input, + content=f"Human: {planner_input.get(self.input_key)}, AI: {res}") + return {**planner_input, self.output_key: res} def handle_prompt(self, agent_model: AgentModel, planner_input: dict) -> ChatPrompt: """Prompt module processing. diff --git a/agentuniverse/agent/plan/planner/reviewing_planner/reviewing_planner.py b/agentuniverse/agent/plan/planner/reviewing_planner/reviewing_planner.py index 496cca33..62ed672f 100644 --- a/agentuniverse/agent/plan/planner/reviewing_planner/reviewing_planner.py +++ b/agentuniverse/agent/plan/planner/reviewing_planner/reviewing_planner.py @@ -39,16 +39,15 @@ def invoke(self, agent_model: AgentModel, planner_input: dict, input_object: Inp prompt: Prompt = self.handle_prompt(agent_model, planner_input) process_llm_token(llm, prompt.as_langchain(), agent_model.profile, planner_input) - memory_messages = assemble_memory_input(memory, planner_input) + assemble_memory_input(memory, planner_input) chain = prompt.as_langchain() | llm.as_langchain_runnable(agent_model.llm_params()) | StrOutputParser() res = self.invoke_chain(agent_model, chain, planner_input, None, input_object) - memory_messages = assemble_memory_output(memory=memory, - agent_input=planner_input, - content=f"Human: {planner_input.get(self.input_key)}, AI: {res}", - memory_messages=memory_messages) - return {**planner_input, self.output_key: res, 'chat_history': memory_messages} + assemble_memory_output(memory=memory, + agent_input=planner_input, + content=f"Human: {planner_input.get(self.input_key)}, AI: {res}") + return {**planner_input, self.output_key: res} def handle_prompt(self, agent_model: AgentModel, planner_input: dict) -> Prompt: """Generate prompt template for the planner. diff --git a/agentuniverse/agent/template/agent_template.py b/agentuniverse/agent/template/agent_template.py index cbbe9953..0bd8bf8f 100644 --- a/agentuniverse/agent/template/agent_template.py +++ b/agentuniverse/agent/template/agent_template.py @@ -6,33 +6,19 @@ # @Email : wangchongshi.wcs@antgroup.com # @FileName: agent_template.py from abc import ABC -from typing import Optional, Any, List +from typing import Optional from queue import Queue from langchain_core.output_parsers import StrOutputParser -from langchain_core.runnables import RunnableSerializable -from agentuniverse.agent.action.knowledge.knowledge import Knowledge -from agentuniverse.agent.action.knowledge.knowledge_manager import KnowledgeManager -from agentuniverse.agent.action.knowledge.store.document import Document -from agentuniverse.agent.action.tool.tool import Tool -from agentuniverse.agent.action.tool.tool_manager import ToolManager from agentuniverse.agent.agent import Agent from agentuniverse.agent.input_object import InputObject from agentuniverse.agent.memory.memory import Memory -from agentuniverse.agent.memory.memory_manager import MemoryManager -from agentuniverse.agent.memory.message import Message from agentuniverse.base.config.component_configer.configers.agent_configer import AgentConfiger from agentuniverse.base.util.agent_util import assemble_memory_input, assemble_memory_output -from agentuniverse.base.util.common_util import stream_output -from agentuniverse.base.util.memory_util import generate_messages from agentuniverse.base.util.prompt_util import process_llm_token from agentuniverse.llm.llm import LLM -from agentuniverse.llm.llm_manager import LLMManager -from agentuniverse.prompt.chat_prompt import ChatPrompt from agentuniverse.prompt.prompt import Prompt -from agentuniverse.prompt.prompt_manager import PromptManager -from agentuniverse.prompt.prompt_model import AgentPromptModel class AgentTemplate(Agent, ABC): @@ -80,120 +66,6 @@ async def customized_async_execute(self, input_object: InputObject, agent_input: self.add_output_stream(input_object.get_data('output_stream'), res) return {**agent_input, 'output': res} - def process_llm(self, **kwargs) -> LLM: - return LLMManager().get_instance_obj(self.llm_name) - - def process_memory(self, agent_input: dict, **kwargs) -> Memory | None: - memory: Memory = MemoryManager().get_instance_obj(component_instance_name=self.memory_name) - if memory is None: - return None - - chat_history: list = agent_input.get('chat_history') - # generate a list of temporary messages from the given chat history and add them to the memory instance. - temporary_messages: list[Message] = generate_messages(chat_history) - if temporary_messages: - memory.add(temporary_messages, **agent_input) - - params: dict = dict() - params['agent_llm_name'] = self.llm_name - return memory.set_by_agent_model(**params) - - def process_prompt(self, agent_input: dict, **kwargs) -> ChatPrompt: - expert_framework = agent_input.pop('expert_framework', '') or '' - - profile: dict = self.agent_model.profile - - profile_instruction = profile.get('instruction') - profile_instruction = expert_framework + profile_instruction if profile_instruction else profile_instruction - - profile_prompt_model: AgentPromptModel = AgentPromptModel(introduction=profile.get('introduction'), - target=profile.get('target'), - instruction=profile_instruction) - - # get the prompt by the prompt version - version_prompt: Prompt = PromptManager().get_instance_obj(self.prompt_version) - - if version_prompt is None and not profile_prompt_model: - raise Exception("Either the `prompt_version` or `introduction & target & instruction`" - " in agent profile configuration should be provided.") - if version_prompt: - version_prompt_model: AgentPromptModel = AgentPromptModel( - introduction=getattr(version_prompt, 'introduction', ''), - target=getattr(version_prompt, 'target', ''), - instruction=expert_framework + getattr(version_prompt, 'instruction', '')) - profile_prompt_model = profile_prompt_model + version_prompt_model - - chat_prompt = ChatPrompt().build_prompt(profile_prompt_model, ['introduction', 'target', 'instruction']) - image_urls: list = agent_input.pop('image_urls', []) or [] - if image_urls: - chat_prompt.generate_image_prompt(image_urls) - return chat_prompt - - def invoke_chain(self, chain: RunnableSerializable[Any, str], agent_input: dict, input_object: InputObject, - **kwargs): - if not input_object.get_data('output_stream'): - res = chain.invoke(input=agent_input) - return res - result = [] - for token in chain.stream(input=agent_input): - stream_output(input_object.get_data('output_stream', None), { - 'type': 'token', - 'data': { - 'chunk': token, - 'agent_info': self.agent_model.info - } - }) - result.append(token) - return "".join(result) - - async def async_invoke_chain(self, chain: RunnableSerializable[Any, str], agent_input: dict, - input_object: InputObject, **kwargs): - if not input_object.get_data('output_stream'): - res = await chain.ainvoke(input=agent_input) - return res - result = [] - async for token in chain.astream(input=agent_input): - stream_output(input_object.get_data('output_stream', None), { - 'type': 'token', - 'data': { - 'chunk': token, - 'agent_info': self.agent_model.info - } - }) - result.append(token) - return "".join(result) - - def invoke_tools(self, input_object: InputObject, **kwargs): - if not self.tool_names: - return '' - - tool_results: list = list() - - for tool_name in self.tool_names: - tool: Tool = ToolManager().get_instance_obj(tool_name) - if tool is None: - continue - tool_input = {key: input_object.get_data(key) for key in tool.input_keys} - tool_results.append(str(tool.run(**tool_input))) - return "\n\n".join(tool_results) - - def invoke_knowledge(self, query_str: str, input_object: InputObject, **kwargs): - if not self.knowledge_names or not query_str: - return '' - - knowledge_results: list = list() - - for knowledge_name in self.knowledge_names: - knowledge: Knowledge = KnowledgeManager().get_instance_obj(knowledge_name) - if knowledge is None: - continue - knowledge_res: List[Document] = knowledge.query_knowledge( - query_str=query_str, - **input_object.to_dict() - ) - knowledge_results.append(knowledge.to_llm(knowledge_res)) - return "\n\n".join(knowledge_results) - def validate_required_params(self): pass diff --git a/agentuniverse/base/util/prompt_util.py b/agentuniverse/base/util/prompt_util.py index cc0b0612..532a57c9 100644 --- a/agentuniverse/base/util/prompt_util.py +++ b/agentuniverse/base/util/prompt_util.py @@ -162,7 +162,7 @@ def process_llm_token(agent_llm: LLM, lc_prompt_template, profile: dict, planner # get the number of tokens in the prompt prompt_tokens: int = agent_llm.get_num_tokens(prompt) - input_tokens = agent_llm.max_context_length() - agent_llm.max_tokens + input_tokens = agent_llm.max_context_length() - llm_model.get('max_tokens', agent_llm.max_tokens) if input_tokens <= 0: raise Exception("The current output max tokens limit is greater than the context length of the LLM model, " "please adjust it by editing the `max_tokens` parameter in the llm yaml.") diff --git a/docs/guidebook/en/Examples/Discussion_Group.md b/docs/guidebook/en/Examples/Discussion_Group.md index 57d41d61..3cac15cf 100644 --- a/docs/guidebook/en/Examples/Discussion_Group.md +++ b/docs/guidebook/en/Examples/Discussion_Group.md @@ -39,7 +39,7 @@ memory: name: 'demo_memory' metadata: type: 'AGENT' - module: 'sample_standard_app.app.core.agent.discussion_agent_case.host_agent' + module: 'sample_standard_app.intelligence.agentic.agent.agent_instance.discussion_agent_case.host_agent' class: 'HostAgent' ``` @@ -103,7 +103,7 @@ memory: name: 'demo_memory' metadata: type: 'AGENT' - module: 'sample_standard_app.app.core.agent.discussion_agent_case.host_agent' + module: 'sample_standard_app.intelligence.agentic.agent.agent_instance.discussion_agent_case.host_agent' class: 'HostAgent' ``` diff --git a/docs/guidebook/en/Examples/Legal_Advice.md b/docs/guidebook/en/Examples/Legal_Advice.md index 9b983c2f..06e1c3e9 100644 --- a/docs/guidebook/en/Examples/Legal_Advice.md +++ b/docs/guidebook/en/Examples/Legal_Advice.md @@ -32,7 +32,7 @@ readers: metadata: type: 'KNOWLEDGE' - module: 'sample_standard_app.app.core.knowledge.law_knowledge' + module: 'sample_standard_app.intelligence.agentic.knowledge.law_knowledge' class: 'LawKnowledge' ``` diff --git a/docs/guidebook/en/Get_Start/Application_Project_Structure_and_Explanation.md b/docs/guidebook/en/Get_Start/Application_Project_Structure_and_Explanation.md index c9982579..1a48eec5 100644 --- a/docs/guidebook/en/Get_Start/Application_Project_Structure_and_Explanation.md +++ b/docs/guidebook/en/Get_Start/Application_Project_Structure_and_Explanation.md @@ -6,7 +6,7 @@ The directory structure provided below is only a suggestion, and you are free to ``` / -├── bootstrap/ +├── boostrap/ │ ├── intelligence/ │ │ └── server_application.py │ ├── platform/ @@ -39,7 +39,7 @@ The directory structure provided below is only a suggestion, and you are free to ``` Here's what each package directory level means: -* bootstrap: The entry layer for starting the web server +* boostrap: The entry layer for starting the web server * intelligence - The entry layer for starting Intelligent web server * platform - The entry layer for productization web server * intelligence: Intelligent project layer, used for agent construction, component customization, and service implementation. @@ -69,7 +69,7 @@ Here's what each package directory level means: ## Using Any Project Directory Structure You can adjust the project directory structure according to your preferences and actual circumstances, but please ensure you follow the rules below. -### Bootstrap Startup Directory +### Boostrap Startup Directory Regardless of the location of your project's startup script, except for testing, you should start the application service with the following statement: ```python @@ -93,7 +93,7 @@ ServerApplication.start() `ServerApplication.start()` is the server startup method for this framework, which accepts a configuration path `config_path` as an input parameter. The default `config_path` points to a file named 'config.toml', located in the config directory under the project root path(`project_root_dir/config/config.toml`). Ensure that the config file path is correct; if you have further changed the directory of the config file, adjust the `config_path` accordingly. ### Config Directory -As mentioned in the [Bootstrap Startup Directory](#bootstrap-startup-directory), the default config path for the project is `project_root_dir/config/config.toml`. If you have made any adjustments to this, please ensure that the correct config file path is provided to the startup method when the application server is launched. +As mentioned in the [Boostrap Startup Directory](#boostrap-startup-directory), the default config path for the project is `project_root_dir/config/config.toml`. If you have made any adjustments to this, please ensure that the correct config file path is provided to the startup method when the application server is launched. ### Core Directory As shown in the recommended directory structure, the agentic directory within intelligence is primarily used to place domain components related to agents, knowledge, and LLMs. If you want to customize the location of core components, you can specify the paths of the domain components in the [CORE_PACKAGE] section of the main configuration file config/config.toml as follows: diff --git a/docs/guidebook/en/Get_Start/Start_Tutorial.md b/docs/guidebook/en/Get_Start/Start_Tutorial.md index 2706a6f2..54aa6d93 100644 --- a/docs/guidebook/en/Get_Start/Start_Tutorial.md +++ b/docs/guidebook/en/Get_Start/Start_Tutorial.md @@ -217,11 +217,11 @@ metadata: In `xx_service.yaml`, we define a `demo_service` configuration. The `name` field defines the name of the service, the `description` field defines the description of the service, and the `agent` field defines which agent provides the service. ### Start the Service -Start using the `server_application.py` file found in the `bootstrap` folder within your IDE, +Start using the `server_application.py` file found in the `boostrap` folder within your IDE, or enter the following command in the terminal to start the service interface and begin listening: ```shell -# under the bootstrap directory of the project -cd `your bootstrap directory path` +# under the boostrap directory of the project +cd `your boostrap directory path` python server_application.py ``` diff --git a/docs/guidebook/en/How-to/How_To_Build_A_RAG_Agent.md b/docs/guidebook/en/How-to/How_To_Build_A_RAG_Agent.md index c9a05870..1c5c8514 100644 --- a/docs/guidebook/en/How-to/How_To_Build_A_RAG_Agent.md +++ b/docs/guidebook/en/How-to/How_To_Build_A_RAG_Agent.md @@ -22,7 +22,7 @@ readers: metadata: type: 'KNOWLEDGE' - module: 'sample_standard_app.app.core.knowledge.law_knowledge' + module: 'sample_standard_app.intelligence.agentic.knowledge.law_knowledge' class: 'LawKnowledge' ``` diff --git a/docs/guidebook/en/How-to/Product_Platform_Quick_Start.md b/docs/guidebook/en/How-to/Product_Platform_Quick_Start.md index 2198e341..7e4beae1 100644 --- a/docs/guidebook/en/How-to/Product_Platform_Quick_Start.md +++ b/docs/guidebook/en/How-to/Product_Platform_Quick_Start.md @@ -23,7 +23,7 @@ If you have previously used agentUniverse’s sample project, please add the fol # Ignore the context content. [CORE_PACKAGE] # Scan and register product components for all paths under this list, with priority over the default. -product = ['sample_standard_app.app.core.product'] +product = ['sample_standard_app.platform.difizen.product'] # Ignore the context content. ``` If you are using agentUniverse for the first time, you can directly adopt the latest sample project’s `config.toml` file. @@ -34,7 +34,7 @@ Of course, when utilizing the agent, you need to preconfigure the various LLM mo ## Using the agentUniverse Product Platform ### Starting the Product Service -To start the product service with a single click, run the [product_application](../../../../sample_standard_app/boostrap/platform/product_application.py) file located in `sample_standard_app/app/bootstrap` . +To start the product service with a single click, run the [product_application](../../../../sample_standard_app/boostrap/platform/product_application.py) file located in `sample_standard_app/boostrap/platform` . ![img.png](../../_picture/product_start.png) Upon successful initiation, it will automatically redirect you to the product homepage, which features system presets as well as your customized Agent, Tool and Knowledge product modules. diff --git a/docs/guidebook/en/In-Depth_Guides/Tutorials/Agent/Agent_Create_And_Use.md b/docs/guidebook/en/In-Depth_Guides/Tutorials/Agent/Agent_Create_And_Use.md index 855790b9..6584150f 100644 --- a/docs/guidebook/en/In-Depth_Guides/Tutorials/Agent/Agent_Create_And_Use.md +++ b/docs/guidebook/en/In-Depth_Guides/Tutorials/Agent/Agent_Create_And_Use.md @@ -101,12 +101,12 @@ action: # - 'knowledge_a' metadata: type: 'AGENT' - module: 'sample_standard_app.app.core.agent.rag_agent_case.demo_rag_agent' + module: 'sample_standard_app.intelligence.agentic.agent.agent_instance.rag_agent_case.demo_rag_agent' class: 'DemoRagAgent' ``` The above is an actual example of an agent configuration. Besides the standard configuration items introduced above, those of you who are observant may have noticed variables in the prompt, such as `{background}` and `{input}`. This is a very practical prompt replacement feature, which we will explain further in the section titled "[How to dynamically adjust settings based on user input](#How to dynamically adjust settings based on user input)". -You can find more examples of agent configuration YAML files in our sample project, located under the path `sample_standard_app.app.core.agent`. +You can find more examples of agent configuration YAML files in our sample project, located under the path `sample_standard_app.intelligence.agentic.agent`. In addition, agentUniverse does not restrict users from extending the YAML configuration content for agents. You can create any custom configuration keys according to your own requirements, but please ensure that you do not duplicate the default configuration keywords mentioned above. @@ -252,7 +252,7 @@ Taking the configuration of the example project as a reference, it would look so ```yaml [CORE_PACKAGE] # Scan and register agent components for all paths under this list, with priority over the default. -agent = ['sample_standard_app.app.core.agent'] +agent = ['sample_standard_app.intelligence.agentic.agent'] ``` ## Other techniques for agent development @@ -265,7 +265,7 @@ In the [Creating Agent Domain Behavior Definitions](#Creating Agent Domain Behav In the [An actual example of an agent configuration](#An actual example of an agent configuration.) section of this document, the prompt includes variables like `{background}`,`{input}`, etc. This feature is the prompt variable template replacement function, aimed at dynamically influencing the prompt based on the user's input. One only needs to define the text using `{variable}` format in the agent configuration settings section and then define the corresponding variables in the `agent_input` method's agent_input to dynamically replace the prompt based on the input portion. -For example, in the sample agent `sample_standard_app.app.core.agent.rag_agent_case.demo_rag_agent.py`, there is the following `parse_input` method. +For example, in the sample agent `sample_standard_app.intelligence.agentic.agent.agent_instance.rag_agent_case.demo_rag_agent.py`, there is the following `parse_input` method. ```text def parse_input(self, input_object: InputObject, agent_input: dict) -> dict: @@ -273,7 +273,7 @@ def parse_input(self, input_object: InputObject, agent_input: dict) -> dict: return agent_input ``` -In its agent settings file `sample_standard_app.app.core.agent.rag_agent_case.demo_rag_agent.yaml`, in the `instruction` section, we can see the following configuration. +In its agent settings file `sample_standard_app.intelligence.agentic.agent.agent_instance.rag_agent_case.demo_rag_agent.yaml`, in the `instruction` section, we can see the following configuration. ```text instruction: | diff --git a/docs/guidebook/en/In-Depth_Guides/Tutorials/Data_Autonomous_Agent.md b/docs/guidebook/en/In-Depth_Guides/Tutorials/Data_Autonomous_Agent.md index de0b9ae2..82bef3a3 100644 --- a/docs/guidebook/en/In-Depth_Guides/Tutorials/Data_Autonomous_Agent.md +++ b/docs/guidebook/en/In-Depth_Guides/Tutorials/Data_Autonomous_Agent.md @@ -42,7 +42,7 @@ plan: dataset_evaluator: 'dataset_eval_agent' metadata: type: 'AGENT' - module: 'sample_standard_app.app.core.agent.data_agent_case.data_agent' + module: 'sample_standard_app.intelligence.agentic.agent.agent_instance.data_agent_case.data_agent' class: 'DataAgent' ``` [data_agent sample configuration file](../../../../../sample_standard_app/app/core/agent/data_agent_case/data_agent.yaml) @@ -64,7 +64,7 @@ plan: candidate: 'demo_rag_agent' metadata: type: 'AGENT' - module: 'sample_standard_app.app.core.agent.data_agent_case.dataset_build_agent' + module: 'sample_standard_app.intelligence.agentic.agent.agent_instance.data_agent_case.dataset_build_agent' class: 'DatasetBuildAgent' ``` [dataset_build_agent sample configuration file](../../../../../sample_standard_app/app/core/agent/data_agent_case/dataset_build_agent.yaml) @@ -88,7 +88,7 @@ profile: temperature: 0.1 metadata: type: 'AGENT' - module: 'sample_standard_app.app.core.agent.data_agent_case.dataset_eval_agent' + module: 'sample_standard_app.intelligence.agentic.agent.agent_instance.data_agent_case.dataset_eval_agent' class: 'DatasetEvalAgent' ``` [dataset_eval_agent sample configuration file](../../../../../sample_standard_app/app/core/agent/data_agent_case/dataset_eval_agent.yaml) diff --git a/docs/guidebook/en/In-Depth_Guides/Tutorials/Plan/Planner_Define_And_Use.md b/docs/guidebook/en/In-Depth_Guides/Tutorials/Plan/Planner_Define_And_Use.md index ff08e715..9b96c8a4 100644 --- a/docs/guidebook/en/In-Depth_Guides/Tutorials/Plan/Planner_Define_And_Use.md +++ b/docs/guidebook/en/In-Depth_Guides/Tutorials/Plan/Planner_Define_And_Use.md @@ -129,7 +129,7 @@ class RagPlanner(Planner): ) res = asyncio.run( chain_with_history.ainvoke(input=planner_input, config={"configurable": {"session_id": "unused"}})) - return {**planner_input, self.output_key: res.content, 'chat_history': generate_memories(chat_history)} + return {**planner_input, self.output_key: res.content} def handle_prompt(self, agent_model: AgentModel, planner_input: dict) -> ChatPrompt: """Prompt module processing. diff --git "a/docs/guidebook/zh/In-Depth_Guides/\345\216\237\347\220\206\344\273\213\347\273\215/\350\256\241\345\210\222/\350\256\241\345\210\222\345\256\232\344\271\211\344\270\216\344\275\277\347\224\250.md" "b/docs/guidebook/zh/In-Depth_Guides/\345\216\237\347\220\206\344\273\213\347\273\215/\350\256\241\345\210\222/\350\256\241\345\210\222\345\256\232\344\271\211\344\270\216\344\275\277\347\224\250.md" index 93695f87..eeec77c9 100644 --- "a/docs/guidebook/zh/In-Depth_Guides/\345\216\237\347\220\206\344\273\213\347\273\215/\350\256\241\345\210\222/\350\256\241\345\210\222\345\256\232\344\271\211\344\270\216\344\275\277\347\224\250.md" +++ "b/docs/guidebook/zh/In-Depth_Guides/\345\216\237\347\220\206\344\273\213\347\273\215/\350\256\241\345\210\222/\350\256\241\345\210\222\345\256\232\344\271\211\344\270\216\344\275\277\347\224\250.md" @@ -129,7 +129,7 @@ class RagPlanner(Planner): ) res = asyncio.run( chain_with_history.ainvoke(input=planner_input, config={"configurable": {"session_id": "unused"}})) - return {**planner_input, self.output_key: res.content, 'chat_history': generate_memories(chat_history)} + return {**planner_input, self.output_key: res.content} def handle_prompt(self, agent_model: AgentModel, planner_input: dict) -> ChatPrompt: """Prompt module processing. diff --git "a/docs/guidebook/zh/\345\274\200\345\247\213\344\275\277\347\224\250/\345\272\224\347\224\250\345\267\245\347\250\213\347\273\223\346\236\204\345\217\212\350\257\264\346\230\216.md" "b/docs/guidebook/zh/\345\274\200\345\247\213\344\275\277\347\224\250/\345\272\224\347\224\250\345\267\245\347\250\213\347\273\223\346\236\204\345\217\212\350\257\264\346\230\216.md" index bf2b17dd..4c207e72 100644 --- "a/docs/guidebook/zh/\345\274\200\345\247\213\344\275\277\347\224\250/\345\272\224\347\224\250\345\267\245\347\250\213\347\273\223\346\236\204\345\217\212\350\257\264\346\230\216.md" +++ "b/docs/guidebook/zh/\345\274\200\345\247\213\344\275\277\347\224\250/\345\272\224\347\224\250\345\267\245\347\250\213\347\273\223\346\236\204\345\217\212\350\257\264\346\230\216.md" @@ -6,7 +6,7 @@ ``` / -├── bootstrap/ +├── boostrap/ │ ├── intelligence/ │ │ └── server_application.py │ ├── platform/ @@ -39,9 +39,9 @@ ``` 每个层级包目录具体含义如下: -* bootstrap - webserve启动入口层 - * intelligence - 智能应用webserve启动入口层 - * platform - 产品化服务webserve启动入口层 +* boostrap - web server启动入口层 + * intelligence - 智能应用web server启动入口层 + * platform - 产品化服务web server启动入口层 * intelligence - 智能工程层:用于智能体构建,组件拓展定制以及服务化实现 * agentic - 智能领域层,放置智能体相关的agentUniverse领域组件 * agent - 智能体层,对应agentUniverse的agent领域组件,可构建智能体模版、智能体实例等 @@ -70,7 +70,7 @@ ## 使用任意工程目录结构 以上为agentUniverse官方推荐的工程目录结构,若您想按照个人喜好与实际情况进行自定义调整,但请务必遵循以下的规则。 -### 启动目录bootstrap +### 启动目录boostrap 无论您的项目启动脚本在何位置,除测试外请保证应用服务使用如下语句进行服务启动: ```python @@ -94,7 +94,7 @@ ServerApplication.start() `ServerApplication.start()`为本框架的serve启动方法,其会接收项目配置路径`config_path`作为入参,`config_path`默认路径为项目根路径下的config目录中的文件 `project_root_dir/config/config.toml`,请确保config文件路径正确,如果您进一步变更了config文件的目录,请将`config_path`作为调整; ### config目录 -如 [启动目录bootstrap](#启动目录bootstrap)部分所述,项目的默认config路径为`project_root_dir/config/config.toml`,若您对其进行了调整,请确保在应用serve启动时将正确的config文件路径传入启动方法。 +如 [启动目录boostrap](#启动目录boostrap)部分所述,项目的默认config路径为`project_root_dir/config/config.toml`,若您对其进行了调整,请确保在应用server启动时将正确的config文件路径传入启动方法。 ### core目录 如推荐目录结构工程所示,intelligence中的agentic工程目录主要用于放置agent、knowledge、LLM等智能化相关的领域组件,若您想自定义核心组件位置,只需在项目工程的主配置文件`config/config.toml`中的`[CORE_PACKAGE]`对领域组件进行路径指定即可,如下: diff --git "a/docs/guidebook/zh/\345\274\200\345\247\213\344\275\277\347\224\250/\345\274\200\345\247\213\344\275\277\347\224\250.md" "b/docs/guidebook/zh/\345\274\200\345\247\213\344\275\277\347\224\250/\345\274\200\345\247\213\344\275\277\347\224\250.md" index 0f2dbb28..0ec70be7 100644 --- "a/docs/guidebook/zh/\345\274\200\345\247\213\344\275\277\347\224\250/\345\274\200\345\247\213\344\275\277\347\224\250.md" +++ "b/docs/guidebook/zh/\345\274\200\345\247\213\344\275\277\347\224\250/\345\274\200\345\247\213\344\275\277\347\224\250.md" @@ -215,11 +215,11 @@ metadata: 在`xx_service.yaml`中,我们定义了一个`demo_service`的配置。`name`字段定义了服务的名称,`description`字段定义了服务的描述,`agent`字段定义该服务由哪个agent提供。 ### 启动服务 -使用在IDE中`bootstrap`文件夹下的`server_application.py`文件启动, +使用在IDE中`boostrap`文件夹下的`server_application.py`文件启动, 或在终端中输入如下命令完成服务接口的启动与监听: ```shell -# under the bootstrap directory of the project -cd `your bootstrap directory path` +# under the boostrap directory of the project +cd `your boostrap directory path` python server_application.py ``` diff --git "a/docs/guidebook/zh/\346\240\267\344\276\213\346\226\207\346\241\243/\346\263\225\345\276\213\345\222\250\350\257\242\346\241\210\344\276\213.md" "b/docs/guidebook/zh/\346\240\267\344\276\213\346\226\207\346\241\243/\346\263\225\345\276\213\345\222\250\350\257\242\346\241\210\344\276\213.md" index a2efc8a6..71878a90 100644 --- "a/docs/guidebook/zh/\346\240\267\344\276\213\346\226\207\346\241\243/\346\263\225\345\276\213\345\222\250\350\257\242\346\241\210\344\276\213.md" +++ "b/docs/guidebook/zh/\346\240\267\344\276\213\346\226\207\346\241\243/\346\263\225\345\276\213\345\222\250\350\257\242\346\241\210\344\276\213.md" @@ -32,7 +32,7 @@ readers: metadata: type: 'KNOWLEDGE' - module: 'sample_standard_app.app.core.knowledge.law_knowledge' + module: 'sample_standard_app.intelligence.agentic.knowledge.law_knowledge' class: 'LawKnowledge' ``` diff --git a/sample_standard_app/bootstrap/__init__.py b/sample_standard_app/boostrap/__init__.py similarity index 100% rename from sample_standard_app/bootstrap/__init__.py rename to sample_standard_app/boostrap/__init__.py diff --git a/sample_standard_app/bootstrap/intelligence/__init__.py b/sample_standard_app/boostrap/intelligence/__init__.py similarity index 100% rename from sample_standard_app/bootstrap/intelligence/__init__.py rename to sample_standard_app/boostrap/intelligence/__init__.py diff --git a/sample_standard_app/bootstrap/intelligence/server_application.py b/sample_standard_app/boostrap/intelligence/server_application.py similarity index 100% rename from sample_standard_app/bootstrap/intelligence/server_application.py rename to sample_standard_app/boostrap/intelligence/server_application.py diff --git a/sample_standard_app/bootstrap/platform/__init__.py b/sample_standard_app/boostrap/platform/__init__.py similarity index 100% rename from sample_standard_app/bootstrap/platform/__init__.py rename to sample_standard_app/boostrap/platform/__init__.py diff --git a/sample_standard_app/bootstrap/platform/product_application.py b/sample_standard_app/boostrap/platform/product_application.py similarity index 94% rename from sample_standard_app/bootstrap/platform/product_application.py rename to sample_standard_app/boostrap/platform/product_application.py index 9296270d..f7032b58 100644 --- a/sample_standard_app/bootstrap/platform/product_application.py +++ b/sample_standard_app/boostrap/platform/product_application.py @@ -4,7 +4,7 @@ # @Time : 2024/7/24 15:35 # @Author : wangchongshi # @Email : wangchongshi.wcs@antgroup.com -# @FileName: difizen_boostrap.py +# @FileName: product_application.py from agentuniverse.base.agentuniverse import AgentUniverse from agentuniverse_product.agentuniverse_product import AgentUniverseProduct diff --git a/sample_standard_app/intelligence/agentic/agent/agent_template/discussion_group_template.py b/sample_standard_app/intelligence/agentic/agent/agent_template/discussion_group_template.py index 861d5d78..600a072d 100644 --- a/sample_standard_app/intelligence/agentic/agent/agent_template/discussion_group_template.py +++ b/sample_standard_app/intelligence/agentic/agent/agent_template/discussion_group_template.py @@ -145,7 +145,7 @@ def host_agent_run(self, agent_input: dict, input_object: InputObject) -> dict: prompt: ChatPrompt = self.process_prompt(agent_input) process_llm_token(llm, prompt.as_langchain(), self.agent_model.profile, agent_input) - memory_messages = assemble_memory_input(memory, agent_input) + assemble_memory_input(memory, agent_input) chain = prompt.as_langchain() | llm.as_langchain_runnable(self.agent_model.llm_params()) | StrOutputParser() res = self.invoke_chain(chain, agent_input, input_object) @@ -154,10 +154,10 @@ def host_agent_run(self, agent_input: dict, input_object: InputObject) -> dict: f"ai: after several rounds of discussions among the participants, " f"the host in the discussion group came to the conclusion:{res}") - memory_messages = assemble_memory_output(memory, agent_input, content, '', memory_messages) + assemble_memory_output(memory, agent_input, content, '') LOGGER.info(f"Discussion summary is: {res}") - return {**agent_input, 'output': res, 'chat_history': memory_messages} + return {**agent_input, 'output': res} def initialize_by_component_configer(self, component_configer: AgentConfiger) -> 'DiscussionGroupTemplate': super().initialize_by_component_configer(component_configer) diff --git a/sample_standard_app/quick_start.bat b/sample_standard_app/quick_start.bat index d4ff0379..1a66a679 100644 --- a/sample_standard_app/quick_start.bat +++ b/sample_standard_app/quick_start.bat @@ -82,5 +82,5 @@ rem Set PYTHONPATH to the parent directory of the current working directory set PYTHONPATH=%cd%\..\.. rem Start the service -cd app\bootstrap +cd boostrap/intelligence python -u server_application.py diff --git a/sample_standard_app/quick_start.sh b/sample_standard_app/quick_start.sh index a98f8c63..b673ce94 100755 --- a/sample_standard_app/quick_start.sh +++ b/sample_standard_app/quick_start.sh @@ -97,6 +97,6 @@ sed -i '' 's/^#custom_key_path = '\''\.\/custom_key\.toml'\''/custom_key_path = export PYTHONPATH=$PWD/../.. # Start the service -cd app/bootstrap +cd boostrap/intelligence python -u server_application.py From 736a246692387573bf9b0516eaa97f30aeae8e01 Mon Sep 17 00:00:00 2001 From: shengxiao Date: Fri, 13 Dec 2024 18:03:39 +0800 Subject: [PATCH 3/4] =?UTF-8?q?[feat]=201.=E6=B7=BB=E5=8A=A0=E6=96=B0?= =?UTF-8?q?=E7=9A=84=E6=A0=B7=E6=9D=BF=E5=B7=A5=E7=A8=8B&=E5=BF=AB?= =?UTF-8?q?=E9=80=9F=E6=90=AD=E5=BB=BA=E6=95=99=E7=A8=8B=E6=96=87=E6=A1=A3?= =?UTF-8?q?=202.=E4=BC=98=E5=8C=96=EF=BC=9Aworkflow=E4=B8=ADagent=E8=8A=82?= =?UTF-8?q?=E7=82=B9=E6=94=AF=E6=8C=81=E5=85=A5=E5=8F=82=E8=87=AA=E5=AE=9A?= =?UTF-8?q?=E4=B9=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../__init__.py | 0 .../bootstrap/__init__.py | 0 .../bootstrap/intelligence/__init__.py | 0 .../intelligence/server_application.py | 0 .../config/config.toml | 28 +++++++++---------- .../config/custom_key.toml | 0 .../config/gunicorn_config.toml | 0 .../config/log_config.toml | 0 .../intelligence/__init__.py | 0 .../intelligence/agentic/__init__.py | 0 .../intelligence/agentic/agent/__init__.py | 0 .../agentic/agent/agent_instance/__init__.py | 0 .../multi_agent_case/__init__.py | 0 .../pet_insurance_consult_agent.py | 0 .../pet_insurance_consult_agent.yaml | 2 +- .../pet_question_planning_agent.py | 2 +- .../pet_question_planning_agent.yaml | 2 +- .../pet_question_rewrite_agent.py | 0 .../pet_question_rewrite_agent.yaml | 2 +- .../single_agent_case/__init__.py | 0 .../pet_insurance_react_agent.py | 0 .../pet_insurance_react_agent.yaml | 2 +- .../template_agent_case/__init__.py | 0 .../pet_consult_pro_agent.py | 2 +- .../pet_consult_pro_agent.yaml | 2 +- .../agentic/agent/agent_template/__init__.py | 0 .../agent_template/pet_agent_template.py | 0 .../agentic/knowledge/__init__.py | 0 .../knowledge/doc_processor/__init__.py | 0 .../query_keyword_extractor.yaml | 0 .../knowledge/query_paraphraser/__init__.py | 0 .../custom_query_keyword_extractor.yaml | 0 .../agentic/knowledge/rag_router/__init__.py | 0 .../knowledge/rag_router/nlu_rag_router.yaml | 0 .../agentic/knowledge/store/__init__.py | 0 .../intelligence/agentic/llm/__init__.py | 0 .../intelligence/agentic/llm/maya/__init__.py | 0 .../llm/maya/pet_insurance_maya_llm.py | 0 .../agentic/llm/maya/pet_qwen_72b_stream.yaml | 2 +- .../intelligence/agentic/llm/qwen_llm.yaml | 0 .../intelligence/agentic/memory/__init__.py | 0 .../agentic/memory/demo_memory_a.yaml | 0 .../agentic/memory/demo_memory_b.yaml | 0 .../memory/memory_compressor/__init__.py | 0 .../agentic/memory/memory_storage/__init__.py | 0 .../memory_storage/chroma_memory_storage.yaml | 0 .../intelligence/agentic/prompt/__init__.py | 0 .../pet_insurance_multi_agent/__init__.py | 0 .../pet_insurance_multi_agent_cn.yaml | 0 .../pet_planning_query_agent_cn.yaml | 0 .../pet_rewrite_query_agent_cn.yaml | 0 .../prompt/pet_react_agent/__init__.py | 0 .../pet_insurance_react_prompt.yaml | 0 .../intelligence/agentic/tool/__init__.py | 0 .../agentic/tool/google_search_tool.py | 2 +- .../agentic/tool/google_search_tool.yaml | 2 +- .../agentic/tool/mock_search_tool.py | 0 .../agentic/tool/pet_ins/__init__.py | 0 .../tool/pet_ins/pet_insurance_info_tool.py | 2 +- .../tool/pet_ins/pet_insurance_info_tool.yaml | 2 +- .../pet_insurance_search_context_tool.py | 0 .../pet_insurance_search_context_tool.yaml | 2 +- .../agentic/work_pattern/__init__.py | 0 .../intelligence/service/__init__.py | 0 .../service/agent_service/__init__.py | 0 .../pet_insurance_consult_service.yaml | 5 ++++ .../pet_insurance_react_service.yaml | 0 .../service/classic_service/__init__.py | 0 .../intelligence/test/__init__.py | 0 .../pet_insurance_consult_pro_agent_test.py | 0 .../test/pet_insurance_multi_agent_test.py | 0 .../test/pet_insurance_react_agent_test.py | 0 .../intelligence/utils/__init__.py | 0 .../intelligence/utils/common/__init__.py | 0 .../utils/common/jsonl_file_util.py | 0 .../utils/common/txt_file_util.py | 0 .../intelligence/utils/constant/__init__.py | 0 .../utils/constant/prod_description.py | 0 ...53\351\200\237\346\220\255\345\273\272.md" | 26 ++++++++--------- 79 files changed, 45 insertions(+), 40 deletions(-) rename {au_sample_standard_app => demo_startup_app}/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/bootstrap/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/bootstrap/intelligence/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/bootstrap/intelligence/server_application.py (100%) rename {au_sample_standard_app => demo_startup_app}/config/config.toml (74%) rename {au_sample_standard_app => demo_startup_app}/config/custom_key.toml (100%) rename {au_sample_standard_app => demo_startup_app}/config/gunicorn_config.toml (100%) rename {au_sample_standard_app => demo_startup_app}/config/log_config.toml (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/agent_instance/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/agent_instance/multi_agent_case/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.yaml (74%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.py (89%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.yaml (71%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.yaml (69%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/agent_instance/single_agent_case/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.yaml (77%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/agent_instance/template_agent_case/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.py (96%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.yaml (75%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/agent_template/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/agent/agent_template/pet_agent_template.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/knowledge/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/knowledge/doc_processor/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/knowledge/doc_processor/query_keyword_extractor.yaml (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/knowledge/query_paraphraser/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/knowledge/query_paraphraser/custom_query_keyword_extractor.yaml (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/knowledge/rag_router/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/knowledge/rag_router/nlu_rag_router.yaml (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/knowledge/store/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/llm/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/llm/maya/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/llm/maya/pet_insurance_maya_llm.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/llm/maya/pet_qwen_72b_stream.yaml (83%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/llm/qwen_llm.yaml (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/memory/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/memory/demo_memory_a.yaml (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/memory/demo_memory_b.yaml (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/memory/memory_compressor/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/memory/memory_storage/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/memory/memory_storage/chroma_memory_storage.yaml (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/prompt/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/prompt/pet_insurance_multi_agent/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_insurance_multi_agent_cn.yaml (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_planning_query_agent_cn.yaml (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_rewrite_query_agent_cn.yaml (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/prompt/pet_react_agent/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/prompt/pet_react_agent/pet_insurance_react_prompt.yaml (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/tool/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/tool/google_search_tool.py (93%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/tool/google_search_tool.yaml (84%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/tool/mock_search_tool.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/tool/pet_ins/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.py (82%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.yaml (84%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.yaml (91%) rename {au_sample_standard_app => demo_startup_app}/intelligence/agentic/work_pattern/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/service/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/service/agent_service/__init__.py (100%) create mode 100644 demo_startup_app/intelligence/service/agent_service/pet_insurance_consult_service.yaml rename {au_sample_standard_app => demo_startup_app}/intelligence/service/agent_service/pet_insurance_react_service.yaml (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/service/classic_service/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/test/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/test/pet_insurance_consult_pro_agent_test.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/test/pet_insurance_multi_agent_test.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/test/pet_insurance_react_agent_test.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/utils/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/utils/common/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/utils/common/jsonl_file_util.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/utils/common/txt_file_util.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/utils/constant/__init__.py (100%) rename {au_sample_standard_app => demo_startup_app}/intelligence/utils/constant/prod_description.py (100%) diff --git a/au_sample_standard_app/__init__.py b/demo_startup_app/__init__.py similarity index 100% rename from au_sample_standard_app/__init__.py rename to demo_startup_app/__init__.py diff --git a/au_sample_standard_app/bootstrap/__init__.py b/demo_startup_app/bootstrap/__init__.py similarity index 100% rename from au_sample_standard_app/bootstrap/__init__.py rename to demo_startup_app/bootstrap/__init__.py diff --git a/au_sample_standard_app/bootstrap/intelligence/__init__.py b/demo_startup_app/bootstrap/intelligence/__init__.py similarity index 100% rename from au_sample_standard_app/bootstrap/intelligence/__init__.py rename to demo_startup_app/bootstrap/intelligence/__init__.py diff --git a/au_sample_standard_app/bootstrap/intelligence/server_application.py b/demo_startup_app/bootstrap/intelligence/server_application.py similarity index 100% rename from au_sample_standard_app/bootstrap/intelligence/server_application.py rename to demo_startup_app/bootstrap/intelligence/server_application.py diff --git a/au_sample_standard_app/config/config.toml b/demo_startup_app/config/config.toml similarity index 74% rename from au_sample_standard_app/config/config.toml rename to demo_startup_app/config/config.toml index a846589d..1662ccfe 100644 --- a/au_sample_standard_app/config/config.toml +++ b/demo_startup_app/config/config.toml @@ -4,39 +4,39 @@ appname = 'demo_app' [CORE_PACKAGE] # Perform a full component scan and registration for all the paths under this list. -default = ['au_sample_standard_app.intelligence.agentic'] +default = ['demo_startup_app.intelligence.agentic'] # Scan and register agent components for all paths under this list, with priority over the default. -agent = ['au_sample_standard_app.intelligence.agentic.agent'] +agent = ['demo_startup_app.intelligence.agentic.agent'] # Scan and register knowledge components for all paths under this list, with priority over the default. -knowledge = ['au_sample_standard_app.intelligence.agentic.knowledge'] +knowledge = ['demo_startup_app.intelligence.agentic.knowledge'] # Scan and register llm components for all paths under this list, with priority over the default. -llm = ['au_sample_standard_app.intelligence.agentic.llm'] +llm = ['demo_startup_app.intelligence.agentic.llm'] # Scan and register planner components for all paths under this list, with priority over the default. planner = [] # Scan and register tool components for all paths under this list, with priority over the default. -tool = ['au_sample_standard_app.intelligence.agentic.tool'] +tool = ['demo_startup_app.intelligence.agentic.tool'] # Scan and register memory components for all paths under this list, with priority over the default. -memory = ['au_sample_standard_app.intelligence.agentic.memory'] +memory = ['demo_startup_app.intelligence.agentic.memory'] # Scan and register service components for all paths under this list, with priority over the default. -service = ['au_sample_standard_app.intelligence.service.agent_service'] +service = ['demo_startup_app.intelligence.service.agent_service'] # Scan and register prompt components for all paths under this list, with priority over the default. -prompt = ['au_sample_standard_app.intelligence.agentic.prompt'] +prompt = ['demo_startup_app.intelligence.agentic.prompt'] # Scan and register product components for all paths under this list, with priority over the default. #product = ['au_sample_standard_app.platform.difizen.product'] # Scan and register workflow components for all paths under this list, with priority over the default. #workflow = ['au_sample_standard_app.platform.difizen.workflow'] # Scan and register store components for all paths under this list, with priority over the default. -store = ['au_sample_standard_app.intelligence.agentic.knowledge.store'] +store = ['demo_startup_app.intelligence.agentic.knowledge.store'] # Scan and register rag_router components for all paths under this list, with priority over the default. -rag_router = ['au_sample_standard_app.intelligence.agentic.knowledge.rag_router'] +rag_router = ['demo_startup_app.intelligence.agentic.knowledge.rag_router'] # Scan and register doc_processor components for all paths under this list, with priority over the default. -doc_processor = ['au_sample_standard_app.intelligence.agentic.knowledge.doc_processor'] +doc_processor = ['demo_startup_app.intelligence.agentic.knowledge.doc_processor'] # Scan and register query_paraphraser components for all paths under this list, with priority over the default. -query_paraphraser = ['au_sample_standard_app.intelligence.agentic.knowledge.query_paraphraser'] +query_paraphraser = ['demo_startup_app.intelligence.agentic.knowledge.query_paraphraser'] # Scan and register memory_compressor components for all paths under this list, with priority over the default. -memory_compressor = ['au_sample_standard_app.intelligence.agentic.memory.memory_compressor'] +memory_compressor = ['demo_startup_app.intelligence.agentic.memory.memory_compressor'] # Scan and register memory_storage components for all paths under this list, with priority over the default. -memory_storage = ['au_sample_standard_app.intelligence.agentic.memory.memory_storage'] +memory_storage = ['demo_startup_app.intelligence.agentic.memory.memory_storage'] [SUB_CONFIG_PATH] # Log config file path, an absolute path or a relative path based on the dir where the current config file is located. diff --git a/au_sample_standard_app/config/custom_key.toml b/demo_startup_app/config/custom_key.toml similarity index 100% rename from au_sample_standard_app/config/custom_key.toml rename to demo_startup_app/config/custom_key.toml diff --git a/au_sample_standard_app/config/gunicorn_config.toml b/demo_startup_app/config/gunicorn_config.toml similarity index 100% rename from au_sample_standard_app/config/gunicorn_config.toml rename to demo_startup_app/config/gunicorn_config.toml diff --git a/au_sample_standard_app/config/log_config.toml b/demo_startup_app/config/log_config.toml similarity index 100% rename from au_sample_standard_app/config/log_config.toml rename to demo_startup_app/config/log_config.toml diff --git a/au_sample_standard_app/intelligence/__init__.py b/demo_startup_app/intelligence/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/__init__.py rename to demo_startup_app/intelligence/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/__init__.py b/demo_startup_app/intelligence/agentic/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/__init__.py rename to demo_startup_app/intelligence/agentic/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/__init__.py b/demo_startup_app/intelligence/agentic/agent/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/agent/__init__.py rename to demo_startup_app/intelligence/agentic/agent/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/__init__.py b/demo_startup_app/intelligence/agentic/agent/agent_instance/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/agent/agent_instance/__init__.py rename to demo_startup_app/intelligence/agentic/agent/agent_instance/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/__init__.py b/demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/__init__.py rename to demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.py b/demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.py rename to demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.yaml b/demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.yaml similarity index 74% rename from au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.yaml rename to demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.yaml index 0b459268..7a74c22c 100644 --- a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.yaml +++ b/demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.yaml @@ -11,5 +11,5 @@ action: - search_context_tool metadata: type: 'AGENT' - module: 'au_sample_standard_app.intelligence.agentic.agent.agent_instance.multi_agent_case.pet_insurance_consult_agent' + module: 'demo_startup_app.intelligence.agentic.agent.agent_instance.multi_agent_case.pet_insurance_consult_agent' class: 'PetInsuranceConsultAgent' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.py b/demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.py similarity index 89% rename from au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.py rename to demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.py index 525f13a7..14df46e9 100644 --- a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.py +++ b/demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.py @@ -7,7 +7,7 @@ from agentuniverse.agent.input_object import InputObject from agentuniverse.base.util.logging.logging_util import LOGGER -from au_sample_standard_app.intelligence.agentic.agent.agent_instance.multi_agent_case.pet_question_rewrite_agent import \ +from demo_startup_app.intelligence.agentic.agent.agent_instance.multi_agent_case.pet_question_rewrite_agent import \ PetInsuranceRewriteAgent diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.yaml b/demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.yaml similarity index 71% rename from au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.yaml rename to demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.yaml index 5ecb86b5..643e7cba 100644 --- a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.yaml +++ b/demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.yaml @@ -8,5 +8,5 @@ profile: temperature: 0.3 metadata: type: 'AGENT' - module: 'au_sample_standard_app.intelligence.agentic.agent.agent_instance.multi_agent_case.pet_question_planning_agent' + module: 'demo_startup_app.intelligence.agentic.agent.agent_instance.multi_agent_case.pet_question_planning_agent' class: 'PetInsurancePlanningAgent' diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.py b/demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.py rename to demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.yaml b/demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.yaml similarity index 69% rename from au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.yaml rename to demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.yaml index a8cd2a1b..70dc7457 100644 --- a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.yaml +++ b/demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.yaml @@ -8,5 +8,5 @@ profile: temperature: 0.3 metadata: type: 'AGENT' - module: 'au_sample_standard_app.intelligence.agentic.agent.agent_instance.multi_agent_case.pet_question_rewrite_agent' + module: 'demo_startup_app.intelligence.agentic.agent.agent_instance.multi_agent_case.pet_question_rewrite_agent' class: 'PetInsuranceRewriteAgent' diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/__init__.py b/demo_startup_app/intelligence/agentic/agent/agent_instance/single_agent_case/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/__init__.py rename to demo_startup_app/intelligence/agentic/agent/agent_instance/single_agent_case/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.py b/demo_startup_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.py rename to demo_startup_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.yaml b/demo_startup_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.yaml similarity index 77% rename from au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.yaml rename to demo_startup_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.yaml index db6e4bed..9c516239 100644 --- a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.yaml +++ b/demo_startup_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.yaml @@ -14,5 +14,5 @@ action: - 'pet_insurance_search_context_tool' metadata: type: 'AGENT' - module: 'au_sample_standard_app.intelligence.agentic.agent.agent_instance.single_agent_case.pet_insurance_react_agent' + module: 'demo_startup_app.intelligence.agentic.agent.agent_instance.single_agent_case.pet_insurance_react_agent' class: 'PetInsuranceReactAgent' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/__init__.py b/demo_startup_app/intelligence/agentic/agent/agent_instance/template_agent_case/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/__init__.py rename to demo_startup_app/intelligence/agentic/agent/agent_instance/template_agent_case/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.py b/demo_startup_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.py similarity index 96% rename from au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.py rename to demo_startup_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.py index 71291654..2f6ecc28 100644 --- a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.py +++ b/demo_startup_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.py @@ -18,7 +18,7 @@ from agentuniverse.prompt.prompt import Prompt from langchain_core.output_parsers import StrOutputParser -from au_sample_standard_app.intelligence.agentic.agent.agent_template.pet_agent_template import \ +from demo_startup_app.intelligence.agentic.agent.agent_template.pet_agent_template import \ PetRagAgentTemplate diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.yaml b/demo_startup_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.yaml similarity index 75% rename from au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.yaml rename to demo_startup_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.yaml index 03637149..c50abcd8 100644 --- a/au_sample_standard_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.yaml +++ b/demo_startup_app/intelligence/agentic/agent/agent_instance/template_agent_case/pet_consult_pro_agent.yaml @@ -11,5 +11,5 @@ action: - search_context_tool metadata: type: 'AGENT' - module: 'au_sample_standard_app.intelligence.agentic.agent.agent_instance.template_agent_case.pet_consult_pro_agent' + module: 'demo_startup_app.intelligence.agentic.agent.agent_instance.template_agent_case.pet_consult_pro_agent' class: 'PetInsuranceConsultProAgent' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_template/__init__.py b/demo_startup_app/intelligence/agentic/agent/agent_template/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/agent/agent_template/__init__.py rename to demo_startup_app/intelligence/agentic/agent/agent_template/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/agent/agent_template/pet_agent_template.py b/demo_startup_app/intelligence/agentic/agent/agent_template/pet_agent_template.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/agent/agent_template/pet_agent_template.py rename to demo_startup_app/intelligence/agentic/agent/agent_template/pet_agent_template.py diff --git a/au_sample_standard_app/intelligence/agentic/knowledge/__init__.py b/demo_startup_app/intelligence/agentic/knowledge/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/knowledge/__init__.py rename to demo_startup_app/intelligence/agentic/knowledge/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/knowledge/doc_processor/__init__.py b/demo_startup_app/intelligence/agentic/knowledge/doc_processor/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/knowledge/doc_processor/__init__.py rename to demo_startup_app/intelligence/agentic/knowledge/doc_processor/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/knowledge/doc_processor/query_keyword_extractor.yaml b/demo_startup_app/intelligence/agentic/knowledge/doc_processor/query_keyword_extractor.yaml similarity index 100% rename from au_sample_standard_app/intelligence/agentic/knowledge/doc_processor/query_keyword_extractor.yaml rename to demo_startup_app/intelligence/agentic/knowledge/doc_processor/query_keyword_extractor.yaml diff --git a/au_sample_standard_app/intelligence/agentic/knowledge/query_paraphraser/__init__.py b/demo_startup_app/intelligence/agentic/knowledge/query_paraphraser/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/knowledge/query_paraphraser/__init__.py rename to demo_startup_app/intelligence/agentic/knowledge/query_paraphraser/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/knowledge/query_paraphraser/custom_query_keyword_extractor.yaml b/demo_startup_app/intelligence/agentic/knowledge/query_paraphraser/custom_query_keyword_extractor.yaml similarity index 100% rename from au_sample_standard_app/intelligence/agentic/knowledge/query_paraphraser/custom_query_keyword_extractor.yaml rename to demo_startup_app/intelligence/agentic/knowledge/query_paraphraser/custom_query_keyword_extractor.yaml diff --git a/au_sample_standard_app/intelligence/agentic/knowledge/rag_router/__init__.py b/demo_startup_app/intelligence/agentic/knowledge/rag_router/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/knowledge/rag_router/__init__.py rename to demo_startup_app/intelligence/agentic/knowledge/rag_router/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/knowledge/rag_router/nlu_rag_router.yaml b/demo_startup_app/intelligence/agentic/knowledge/rag_router/nlu_rag_router.yaml similarity index 100% rename from au_sample_standard_app/intelligence/agentic/knowledge/rag_router/nlu_rag_router.yaml rename to demo_startup_app/intelligence/agentic/knowledge/rag_router/nlu_rag_router.yaml diff --git a/au_sample_standard_app/intelligence/agentic/knowledge/store/__init__.py b/demo_startup_app/intelligence/agentic/knowledge/store/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/knowledge/store/__init__.py rename to demo_startup_app/intelligence/agentic/knowledge/store/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/llm/__init__.py b/demo_startup_app/intelligence/agentic/llm/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/llm/__init__.py rename to demo_startup_app/intelligence/agentic/llm/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/llm/maya/__init__.py b/demo_startup_app/intelligence/agentic/llm/maya/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/llm/maya/__init__.py rename to demo_startup_app/intelligence/agentic/llm/maya/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/llm/maya/pet_insurance_maya_llm.py b/demo_startup_app/intelligence/agentic/llm/maya/pet_insurance_maya_llm.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/llm/maya/pet_insurance_maya_llm.py rename to demo_startup_app/intelligence/agentic/llm/maya/pet_insurance_maya_llm.py diff --git a/au_sample_standard_app/intelligence/agentic/llm/maya/pet_qwen_72b_stream.yaml b/demo_startup_app/intelligence/agentic/llm/maya/pet_qwen_72b_stream.yaml similarity index 83% rename from au_sample_standard_app/intelligence/agentic/llm/maya/pet_qwen_72b_stream.yaml rename to demo_startup_app/intelligence/agentic/llm/maya/pet_qwen_72b_stream.yaml index a23addc6..f2789572 100644 --- a/au_sample_standard_app/intelligence/agentic/llm/maya/pet_qwen_72b_stream.yaml +++ b/demo_startup_app/intelligence/agentic/llm/maya/pet_qwen_72b_stream.yaml @@ -17,5 +17,5 @@ ext_info: request_timeout: 60 metadata: type: 'LLM' - module: 'au_sample_standard_app.intelligence.agentic.llm.maya.pet_insurance_maya_llm' + module: 'demo_startup_app.intelligence.agentic.llm.maya.pet_insurance_maya_llm' class: 'PetInsuranceMayaLLM' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/llm/qwen_llm.yaml b/demo_startup_app/intelligence/agentic/llm/qwen_llm.yaml similarity index 100% rename from au_sample_standard_app/intelligence/agentic/llm/qwen_llm.yaml rename to demo_startup_app/intelligence/agentic/llm/qwen_llm.yaml diff --git a/au_sample_standard_app/intelligence/agentic/memory/__init__.py b/demo_startup_app/intelligence/agentic/memory/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/memory/__init__.py rename to demo_startup_app/intelligence/agentic/memory/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/memory/demo_memory_a.yaml b/demo_startup_app/intelligence/agentic/memory/demo_memory_a.yaml similarity index 100% rename from au_sample_standard_app/intelligence/agentic/memory/demo_memory_a.yaml rename to demo_startup_app/intelligence/agentic/memory/demo_memory_a.yaml diff --git a/au_sample_standard_app/intelligence/agentic/memory/demo_memory_b.yaml b/demo_startup_app/intelligence/agentic/memory/demo_memory_b.yaml similarity index 100% rename from au_sample_standard_app/intelligence/agentic/memory/demo_memory_b.yaml rename to demo_startup_app/intelligence/agentic/memory/demo_memory_b.yaml diff --git a/au_sample_standard_app/intelligence/agentic/memory/memory_compressor/__init__.py b/demo_startup_app/intelligence/agentic/memory/memory_compressor/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/memory/memory_compressor/__init__.py rename to demo_startup_app/intelligence/agentic/memory/memory_compressor/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/memory/memory_storage/__init__.py b/demo_startup_app/intelligence/agentic/memory/memory_storage/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/memory/memory_storage/__init__.py rename to demo_startup_app/intelligence/agentic/memory/memory_storage/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/memory/memory_storage/chroma_memory_storage.yaml b/demo_startup_app/intelligence/agentic/memory/memory_storage/chroma_memory_storage.yaml similarity index 100% rename from au_sample_standard_app/intelligence/agentic/memory/memory_storage/chroma_memory_storage.yaml rename to demo_startup_app/intelligence/agentic/memory/memory_storage/chroma_memory_storage.yaml diff --git a/au_sample_standard_app/intelligence/agentic/prompt/__init__.py b/demo_startup_app/intelligence/agentic/prompt/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/prompt/__init__.py rename to demo_startup_app/intelligence/agentic/prompt/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/__init__.py b/demo_startup_app/intelligence/agentic/prompt/pet_insurance_multi_agent/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/__init__.py rename to demo_startup_app/intelligence/agentic/prompt/pet_insurance_multi_agent/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_insurance_multi_agent_cn.yaml b/demo_startup_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_insurance_multi_agent_cn.yaml similarity index 100% rename from au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_insurance_multi_agent_cn.yaml rename to demo_startup_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_insurance_multi_agent_cn.yaml diff --git a/au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_planning_query_agent_cn.yaml b/demo_startup_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_planning_query_agent_cn.yaml similarity index 100% rename from au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_planning_query_agent_cn.yaml rename to demo_startup_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_planning_query_agent_cn.yaml diff --git a/au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_rewrite_query_agent_cn.yaml b/demo_startup_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_rewrite_query_agent_cn.yaml similarity index 100% rename from au_sample_standard_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_rewrite_query_agent_cn.yaml rename to demo_startup_app/intelligence/agentic/prompt/pet_insurance_multi_agent/pet_rewrite_query_agent_cn.yaml diff --git a/au_sample_standard_app/intelligence/agentic/prompt/pet_react_agent/__init__.py b/demo_startup_app/intelligence/agentic/prompt/pet_react_agent/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/prompt/pet_react_agent/__init__.py rename to demo_startup_app/intelligence/agentic/prompt/pet_react_agent/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/prompt/pet_react_agent/pet_insurance_react_prompt.yaml b/demo_startup_app/intelligence/agentic/prompt/pet_react_agent/pet_insurance_react_prompt.yaml similarity index 100% rename from au_sample_standard_app/intelligence/agentic/prompt/pet_react_agent/pet_insurance_react_prompt.yaml rename to demo_startup_app/intelligence/agentic/prompt/pet_react_agent/pet_insurance_react_prompt.yaml diff --git a/au_sample_standard_app/intelligence/agentic/tool/__init__.py b/demo_startup_app/intelligence/agentic/tool/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/tool/__init__.py rename to demo_startup_app/intelligence/agentic/tool/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/tool/google_search_tool.py b/demo_startup_app/intelligence/agentic/tool/google_search_tool.py similarity index 93% rename from au_sample_standard_app/intelligence/agentic/tool/google_search_tool.py rename to demo_startup_app/intelligence/agentic/tool/google_search_tool.py index c453f81d..ce29b4b5 100644 --- a/au_sample_standard_app/intelligence/agentic/tool/google_search_tool.py +++ b/demo_startup_app/intelligence/agentic/tool/google_search_tool.py @@ -11,7 +11,7 @@ from agentuniverse.agent.action.tool.tool import Tool, ToolInput from agentuniverse.base.util.env_util import get_from_env -from au_sample_standard_app.intelligence.agentic.tool.mock_search_tool import MockSearchTool +from demo_startup_app.intelligence.agentic.tool.mock_search_tool import MockSearchTool class GoogleSearchTool(Tool): diff --git a/au_sample_standard_app/intelligence/agentic/tool/google_search_tool.yaml b/demo_startup_app/intelligence/agentic/tool/google_search_tool.yaml similarity index 84% rename from au_sample_standard_app/intelligence/agentic/tool/google_search_tool.yaml rename to demo_startup_app/intelligence/agentic/tool/google_search_tool.yaml index ba87a52d..7b80913b 100644 --- a/au_sample_standard_app/intelligence/agentic/tool/google_search_tool.yaml +++ b/demo_startup_app/intelligence/agentic/tool/google_search_tool.yaml @@ -8,5 +8,5 @@ tool_type: 'api' input_keys: ['input'] metadata: type: 'TOOL' - module: 'au_sample_standard_app.intelligence.agentic.tool.google_search_tool' + module: 'demo_startup_app.intelligence.agentic.tool.google_search_tool' class: 'GoogleSearchTool' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/tool/mock_search_tool.py b/demo_startup_app/intelligence/agentic/tool/mock_search_tool.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/tool/mock_search_tool.py rename to demo_startup_app/intelligence/agentic/tool/mock_search_tool.py diff --git a/au_sample_standard_app/intelligence/agentic/tool/pet_ins/__init__.py b/demo_startup_app/intelligence/agentic/tool/pet_ins/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/tool/pet_ins/__init__.py rename to demo_startup_app/intelligence/agentic/tool/pet_ins/__init__.py diff --git a/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.py b/demo_startup_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.py similarity index 82% rename from au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.py rename to demo_startup_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.py index f60fd0ae..07d10a68 100644 --- a/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.py +++ b/demo_startup_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.py @@ -6,7 +6,7 @@ # @FileName: pet_insurance_tool.py from agentuniverse.agent.action.tool.tool import Tool, ToolInput -from au_sample_standard_app.intelligence.utils.constant.prod_description import PROD_A_DESCRIPTION, PROD_B_DESCRIPTION +from demo_startup_app.intelligence.utils.constant.prod_description import PROD_A_DESCRIPTION, PROD_B_DESCRIPTION class PetInsuranceInfoTool(Tool): diff --git a/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.yaml b/demo_startup_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.yaml similarity index 84% rename from au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.yaml rename to demo_startup_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.yaml index 5c63a4ed..e7d83bc3 100644 --- a/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.yaml +++ b/demo_startup_app/intelligence/agentic/tool/pet_ins/pet_insurance_info_tool.yaml @@ -8,5 +8,5 @@ tool_type: 'api' input_keys: ['query'] metadata: type: 'TOOL' - module: 'au_sample_standard_app.intelligence.agentic.tool.pet_ins.pet_insurance_info_tool' + module: 'demo_startup_app.intelligence.agentic.tool.pet_ins.pet_insurance_info_tool' class: 'PetInsuranceInfoTool' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.py b/demo_startup_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.py rename to demo_startup_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.py diff --git a/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.yaml b/demo_startup_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.yaml similarity index 91% rename from au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.yaml rename to demo_startup_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.yaml index de556dfa..7ab5555a 100644 --- a/au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.yaml +++ b/demo_startup_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.yaml @@ -27,5 +27,5 @@ tool_type: 'api' input_keys: ['input'] metadata: type: 'TOOL' - module: 'au_sample_standard_app.intelligence.agentic.tool.pet_ins.pet_insurance_search_context_tool' + module: 'demo_startup_app.intelligence.agentic.tool.pet_ins.pet_insurance_search_context_tool' class: 'SearchContextTool' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/agentic/work_pattern/__init__.py b/demo_startup_app/intelligence/agentic/work_pattern/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/agentic/work_pattern/__init__.py rename to demo_startup_app/intelligence/agentic/work_pattern/__init__.py diff --git a/au_sample_standard_app/intelligence/service/__init__.py b/demo_startup_app/intelligence/service/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/service/__init__.py rename to demo_startup_app/intelligence/service/__init__.py diff --git a/au_sample_standard_app/intelligence/service/agent_service/__init__.py b/demo_startup_app/intelligence/service/agent_service/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/service/agent_service/__init__.py rename to demo_startup_app/intelligence/service/agent_service/__init__.py diff --git a/demo_startup_app/intelligence/service/agent_service/pet_insurance_consult_service.yaml b/demo_startup_app/intelligence/service/agent_service/pet_insurance_consult_service.yaml new file mode 100644 index 00000000..4e0fb447 --- /dev/null +++ b/demo_startup_app/intelligence/service/agent_service/pet_insurance_consult_service.yaml @@ -0,0 +1,5 @@ +name: 'pet_insurance_consult_service' +description: 'demo service of consult agent' +agent: 'pet_insurance_consult_agent' +metadata: + type: 'SERVICE' \ No newline at end of file diff --git a/au_sample_standard_app/intelligence/service/agent_service/pet_insurance_react_service.yaml b/demo_startup_app/intelligence/service/agent_service/pet_insurance_react_service.yaml similarity index 100% rename from au_sample_standard_app/intelligence/service/agent_service/pet_insurance_react_service.yaml rename to demo_startup_app/intelligence/service/agent_service/pet_insurance_react_service.yaml diff --git a/au_sample_standard_app/intelligence/service/classic_service/__init__.py b/demo_startup_app/intelligence/service/classic_service/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/service/classic_service/__init__.py rename to demo_startup_app/intelligence/service/classic_service/__init__.py diff --git a/au_sample_standard_app/intelligence/test/__init__.py b/demo_startup_app/intelligence/test/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/test/__init__.py rename to demo_startup_app/intelligence/test/__init__.py diff --git a/au_sample_standard_app/intelligence/test/pet_insurance_consult_pro_agent_test.py b/demo_startup_app/intelligence/test/pet_insurance_consult_pro_agent_test.py similarity index 100% rename from au_sample_standard_app/intelligence/test/pet_insurance_consult_pro_agent_test.py rename to demo_startup_app/intelligence/test/pet_insurance_consult_pro_agent_test.py diff --git a/au_sample_standard_app/intelligence/test/pet_insurance_multi_agent_test.py b/demo_startup_app/intelligence/test/pet_insurance_multi_agent_test.py similarity index 100% rename from au_sample_standard_app/intelligence/test/pet_insurance_multi_agent_test.py rename to demo_startup_app/intelligence/test/pet_insurance_multi_agent_test.py diff --git a/au_sample_standard_app/intelligence/test/pet_insurance_react_agent_test.py b/demo_startup_app/intelligence/test/pet_insurance_react_agent_test.py similarity index 100% rename from au_sample_standard_app/intelligence/test/pet_insurance_react_agent_test.py rename to demo_startup_app/intelligence/test/pet_insurance_react_agent_test.py diff --git a/au_sample_standard_app/intelligence/utils/__init__.py b/demo_startup_app/intelligence/utils/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/utils/__init__.py rename to demo_startup_app/intelligence/utils/__init__.py diff --git a/au_sample_standard_app/intelligence/utils/common/__init__.py b/demo_startup_app/intelligence/utils/common/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/utils/common/__init__.py rename to demo_startup_app/intelligence/utils/common/__init__.py diff --git a/au_sample_standard_app/intelligence/utils/common/jsonl_file_util.py b/demo_startup_app/intelligence/utils/common/jsonl_file_util.py similarity index 100% rename from au_sample_standard_app/intelligence/utils/common/jsonl_file_util.py rename to demo_startup_app/intelligence/utils/common/jsonl_file_util.py diff --git a/au_sample_standard_app/intelligence/utils/common/txt_file_util.py b/demo_startup_app/intelligence/utils/common/txt_file_util.py similarity index 100% rename from au_sample_standard_app/intelligence/utils/common/txt_file_util.py rename to demo_startup_app/intelligence/utils/common/txt_file_util.py diff --git a/au_sample_standard_app/intelligence/utils/constant/__init__.py b/demo_startup_app/intelligence/utils/constant/__init__.py similarity index 100% rename from au_sample_standard_app/intelligence/utils/constant/__init__.py rename to demo_startup_app/intelligence/utils/constant/__init__.py diff --git a/au_sample_standard_app/intelligence/utils/constant/prod_description.py b/demo_startup_app/intelligence/utils/constant/prod_description.py similarity index 100% rename from au_sample_standard_app/intelligence/utils/constant/prod_description.py rename to demo_startup_app/intelligence/utils/constant/prod_description.py diff --git "a/docs/guidebook/zh/\346\240\267\344\276\213\346\226\207\346\241\243/AgentUniverse\345\277\253\351\200\237\346\220\255\345\273\272.md" "b/docs/guidebook/zh/\346\240\267\344\276\213\346\226\207\346\241\243/AgentUniverse\345\277\253\351\200\237\346\220\255\345\273\272.md" index bb26be4a..044a8904 100644 --- "a/docs/guidebook/zh/\346\240\267\344\276\213\346\226\207\346\241\243/AgentUniverse\345\277\253\351\200\237\346\220\255\345\273\272.md" +++ "b/docs/guidebook/zh/\346\240\267\344\276\213\346\226\207\346\241\243/AgentUniverse\345\277\253\351\200\237\346\220\255\345\273\272.md" @@ -1,12 +1,12 @@ # 文档说明 为了让大家快速上手AU框架,我们提供了样板工程(可以直接运行)并配套详细的文字说明,在实际研发过程中直接基于其中的某些智能体结合自己业务进行一小部分代码/配置改写即可完成应用的搭建。 -样板工程地址:[au_sample_standard_app](../../../../au_sample_standard_app) +样板工程地址:[au_sample_standard_app](../../../../demo_startup_app) 样板工程拟定了业务是关于宠物险产品的智能顾问,对用户提出的产品问题进行解答。 # 从一个智能体搭建开始 -[single_agent_case](../../../../au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case)下构建一个宠物险顾问智能体,使用qwen72b模型,提供宠物医保的检索以及信息补充工具,react模式自行让模型选择工具帮助回答用户的问题。 +[single_agent_case](../../../../demo_startup_app/intelligence/agentic/agent/agent_instance/single_agent_case)下构建一个宠物险顾问智能体,使用qwen72b模型,提供宠物医保的检索以及信息补充工具,react模式自行让模型选择工具帮助回答用户的问题。 从0开始搭建的整体流程如下: @@ -15,12 +15,12 @@ ## 构建agent所依赖的组件 ### 构建tool **<配置封装>** -[pet_insurance_search_context_tool.yaml](../../../../au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.yaml) +[pet_insurance_search_context_tool.yaml](../../../../demo_startup_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.yaml) ![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1733990583392-c4df1a8b-51d1-44a9-84e9-ebf7c4251c0b.png) **<代码实现>** -[pet_insurance_search_context_tool.py](../../../../au_sample_standard_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.py) +[pet_insurance_search_context_tool.py](../../../../demo_startup_app/intelligence/agentic/tool/pet_ins/pet_insurance_search_context_tool.py) 所有的自定义工具都都继承自agentuniverse.agent.action.tool.tool的Tool基类,核心执行方法为execute(你需要overwrite的地方),入参类型为ToolInput(key-value经过封装后的结构)。 @@ -30,7 +30,7 @@ API适配AU样例可参考 [API适配样例文档.md](API适配样例文档.md) ### 构建llm #### 配置封装 -[pet_qwen_72b_stream.yaml](../../../../au_sample_standard_app/intelligence/agentic/llm/maya/pet_qwen_72b_stream.yaml) +[pet_qwen_72b_stream.yaml](../../../../demo_startup_app/intelligence/agentic/llm/maya/pet_qwen_72b_stream.yaml) 拓展字段ext_info中可以添加自定义参数,并在代码初始化方法中解析,成为PetInsuranceMayaLLM的属性。 #### 代码实现 @@ -52,11 +52,11 @@ llm需要封装成langchain实例,实际调度过程中由该实例唤起执 ## 构建智能体 #### 配置封装 -[pet_insurance_react_agent.yaml](../../../../au_sample_standard_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.yaml) +[pet_insurance_react_agent.yaml](../../../../demo_startup_app/intelligence/agentic/agent/agent_instance/single_agent_case/pet_insurance_react_agent.yaml) ![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734003384746-701f2ff8-46b7-4d32-9cdb-0bb7594594b1.png) -该智能体依赖的prompt定义 [pet_insurance_react_prompt.yaml](../../../../au_sample_standard_app/intelligence/agentic/prompt/pet_react_agent/pet_insurance_react_prompt.yaml) +该智能体依赖的prompt定义 [pet_insurance_react_prompt.yaml](../../../../demo_startup_app/intelligence/agentic/prompt/pet_react_agent/pet_insurance_react_prompt.yaml) ![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734071728918-f8608995-b782-4fdd-9374-072845c839d7.png) @@ -89,7 +89,7 @@ react模式完全依赖llm自行思考问题,选择工具调度组装结果, ![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734072518223-8d7966d4-cd3c-4268-8395-e572f1ae5432.png) ## 服务化 -[pet_insurance_react_service.yaml](../../../../au_sample_standard_app/intelligence/service/agent_service/pet_insurance_react_service.yaml) +[pet_insurance_react_service.yaml](../../../../demo_startup_app/intelligence/service/agent_service/pet_insurance_react_service.yaml) ![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734005968227-3039b182-09e2-49d2-982b-60e3df1e265e.png) @@ -108,11 +108,11 @@ react模式完全依赖llm自行思考问题,选择工具调度组装结果, 改写和策划两个子智能体不再单独介绍了,参考2.1进行构建 改写智能体: -[pet_question_rewrite_agent.yaml](../../../../au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.yaml) +[pet_question_rewrite_agent.yaml](../../../../demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_rewrite_agent.yaml) 策划智能体(拆解问题): -[pet_question_planning_agent.yaml](../../../../au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.yaml) +[pet_question_planning_agent.yaml](../../../../demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_question_planning_agent.yaml) 这里要注意的是出入参的定义和转换,要能和下个智能体衔接起来。这个可以在主智能体串接时适配。 @@ -120,7 +120,7 @@ react模式完全依赖llm自行思考问题,选择工具调度组装结果, ![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734014843998-a826cd8b-3d4f-4fb4-9ef0-65e622983368.png) ## 主智能体 -[pet_insurance_consult_agent.yaml](../../../../au_sample_standard_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.yaml) +[pet_insurance_consult_agent.yaml](../../../../demo_startup_app/intelligence/agentic/agent/agent_instance/multi_agent_case/pet_insurance_consult_agent.yaml) ![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734014662057-cbd2eae4-8d92-4a28-8717-fa42bf6229c0.png) @@ -128,7 +128,7 @@ react模式完全依赖llm自行思考问题,选择工具调度组装结果, ## 运行测试 -运行 [pet_insurance_consult_pro_agent_test.py](../../../../au_sample_standard_app/intelligence/test/pet_insurance_consult_pro_agent_test.py) +运行 [pet_insurance_consult_pro_agent_test.py](../../../../demo_startup_app/intelligence/test/pet_insurance_consult_pro_agent_test.py) ![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734073361810-67db902b-4ede-4bbf-bd4d-c9f937434536.png) @@ -163,7 +163,7 @@ react模式完全依赖llm自行思考问题,选择工具调度组装结果, # 封装智能体模版 在上面的多智能体应用中我们会发现,三个智能体在实现上有很多相同的逻辑,甚至策划智能体继承自改写智能体。为了提升智能体的复用性,我们可以抽像出一些通用的逻辑和方法,封装成一个智能体模版AgentTemplate,大部分智能体基于这个模版做一些微小的调整,甚至只是yaml配置的改动即可实现多种智能体逻辑。 -参考 [pet_agent_template.py](../../../../au_sample_standard_app/intelligence/agentic/agent/agent_template/pet_agent_template.py) +参考 [pet_agent_template.py](../../../../demo_startup_app/intelligence/agentic/agent/agent_template/pet_agent_template.py) ![](https://intranetproxy.alipay.com/skylark/lark/0/2024/png/18056974/1734016162848-dc15f8cf-c362-4834-b99b-abe9e0ba2cdc.png) From f7460747d608d19e5313140428031559e23edf80 Mon Sep 17 00:00:00 2001 From: shengxiao Date: Fri, 13 Dec 2024 18:12:27 +0800 Subject: [PATCH 4/4] =?UTF-8?q?[feat]=201.=E6=B7=BB=E5=8A=A0=E6=96=B0?= =?UTF-8?q?=E7=9A=84=E6=A0=B7=E6=9D=BF=E5=B7=A5=E7=A8=8B&=E5=BF=AB?= =?UTF-8?q?=E9=80=9F=E6=90=AD=E5=BB=BA=E6=95=99=E7=A8=8B=E6=96=87=E6=A1=A3?= =?UTF-8?q?=202.=E4=BC=98=E5=8C=96=EF=BC=9Aworkflow=E4=B8=ADagent=E8=8A=82?= =?UTF-8?q?=E7=82=B9=E6=94=AF=E6=8C=81=E5=85=A5=E5=8F=82=E8=87=AA=E5=AE=9A?= =?UTF-8?q?=E4=B9=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- demo_startup_app/config/custom_key.toml | 43 ------------------------- 1 file changed, 43 deletions(-) delete mode 100644 demo_startup_app/config/custom_key.toml diff --git a/demo_startup_app/config/custom_key.toml b/demo_startup_app/config/custom_key.toml deleted file mode 100644 index 674294a7..00000000 --- a/demo_startup_app/config/custom_key.toml +++ /dev/null @@ -1,43 +0,0 @@ -# Example file of custom_key.toml. Rename to custom_key.toml while using. -[KEY_LIST] -# Perform a full component scan and registration for all the paths under this list. -#example_key = 'AnExampleKey' - -# models -#kimi default name: default_kimi_llm -#KIMI_API_KEY='' -# -##Qwen default name: default_qwen_llm -DASHSCOPE_API_KEY = 'sk-xxxxx' -# -##Opean default name: default_openai_llm -#OPENAI_API_KEY='sk-xxxxxx' -# -##DEEPSEEK default name: default_deepseek_llm -#DEEPSEEK_API_KEY='' -#DEEPSEEK_API_BASE='' -# -## WenXin default name: default_wenxin_llm -#QIANFAN_AK='' -#QIANFAN_SK='' -# -##Ollama default name: default_ollama_llm -#OLLAMA_BASE_URL='' -# -##claude default name: default_claude_llm -#ANTHROPIC_API_KEY='' -#ANTHROPIC_API_URL='xxxxxx' -# -##baichuan default name: default_baichuan_llm -#BAICHUAN_API_KEY='xxxxxx' - - -# search -#Google search -#SERPER_API_KEY='' -# -##search api -SEARCHAPI_API_KEY = '3KXgFLMt7r7RhwWe4JNK7GQr' -# -##bing seacrh -#BING_SUBSCRIPTION_KEY='xxxxxx' \ No newline at end of file