Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

improvement: refactoring the sample project in agentUniverse. #213

Merged
merged 5 commits into from
Dec 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ pip install magent-ui ruamel.yaml

**One-click Run**

Run [product_application.py](sample_standard_app/boostrap/platform/product_application.py) in sample_standard_app/app/bootstrap for quick startup.
Run [product_application.py](sample_standard_app/boostrap/platform/product_application.py) in sample_standard_app/boostrap/platform for quick startup.

For more details, refer to [Quick Start for Product Platform](docs/guidebook/en/How-to/Product_Platform_Quick_Start.md) and the [Advanced Guide](docs/guidebook/en/How-to/Product_Platform_Advancement_Guide.md).

Expand Down
134 changes: 132 additions & 2 deletions agentuniverse/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,24 @@
# @Author : heji
# @Email : [email protected]
# @FileName: agent.py
"""The definition of agent paradigm."""
import json
from abc import abstractmethod, ABC
from datetime import datetime
from typing import Optional
from typing import Optional, Any, List

from langchain_core.runnables import RunnableSerializable
from langchain_core.utils.json import parse_json_markdown

from agentuniverse.agent.action.knowledge.knowledge import Knowledge
from agentuniverse.agent.action.knowledge.knowledge_manager import KnowledgeManager
from agentuniverse.agent.action.knowledge.store.document import Document
from agentuniverse.agent.action.tool.tool import Tool
from agentuniverse.agent.action.tool.tool_manager import ToolManager
from agentuniverse.agent.agent_model import AgentModel
from agentuniverse.agent.input_object import InputObject
from agentuniverse.agent.memory.memory import Memory
from agentuniverse.agent.memory.memory_manager import MemoryManager
from agentuniverse.agent.memory.message import Message
from agentuniverse.agent.output_object import OutputObject
from agentuniverse.agent.plan.planner.planner import Planner
from agentuniverse.agent.plan.planner.planner_manager import PlannerManager
Expand All @@ -25,7 +33,15 @@
import ApplicationConfigManager
from agentuniverse.base.config.component_configer.configers.agent_configer \
import AgentConfiger
from agentuniverse.base.util.common_util import stream_output
from agentuniverse.base.util.logging.logging_util import LOGGER
from agentuniverse.base.util.memory_util import generate_messages
from agentuniverse.llm.llm import LLM
from agentuniverse.llm.llm_manager import LLMManager
from agentuniverse.prompt.chat_prompt import ChatPrompt
from agentuniverse.prompt.prompt import Prompt
from agentuniverse.prompt.prompt_manager import PromptManager
from agentuniverse.prompt.prompt_model import AgentPromptModel


class Agent(ComponentBase, ABC):
Expand Down Expand Up @@ -211,3 +227,117 @@ def as_langchain_tool(self):
func=self.langchain_run,
description=self.agent_model.info.get("description") + args_description
)

def process_llm(self, **kwargs) -> LLM:
return LLMManager().get_instance_obj(self.llm_name)

def process_memory(self, agent_input: dict, **kwargs) -> Memory | None:
memory: Memory = MemoryManager().get_instance_obj(component_instance_name=self.memory_name)
if memory is None:
return None

chat_history: list = agent_input.get('chat_history')
# generate a list of temporary messages from the given chat history and add them to the memory instance.
temporary_messages: list[Message] = generate_messages(chat_history)
if temporary_messages:
memory.add(temporary_messages, **agent_input)

params: dict = dict()
params['agent_llm_name'] = self.llm_name
return memory.set_by_agent_model(**params)

def invoke_chain(self, chain: RunnableSerializable[Any, str], agent_input: dict, input_object: InputObject,
**kwargs):
if not input_object.get_data('output_stream'):
res = chain.invoke(input=agent_input)
return res
result = []
for token in chain.stream(input=agent_input):
stream_output(input_object.get_data('output_stream', None), {
'type': 'token',
'data': {
'chunk': token,
'agent_info': self.agent_model.info
}
})
result.append(token)
return "".join(result)

async def async_invoke_chain(self, chain: RunnableSerializable[Any, str], agent_input: dict,
input_object: InputObject, **kwargs):
if not input_object.get_data('output_stream'):
res = await chain.ainvoke(input=agent_input)
return res
result = []
async for token in chain.astream(input=agent_input):
stream_output(input_object.get_data('output_stream', None), {
'type': 'token',
'data': {
'chunk': token,
'agent_info': self.agent_model.info
}
})
result.append(token)
return "".join(result)

def invoke_tools(self, input_object: InputObject, **kwargs) -> str:
if not self.tool_names:
return ''

tool_results: list = list()

for tool_name in self.tool_names:
tool: Tool = ToolManager().get_instance_obj(tool_name)
if tool is None:
continue
tool_input = {key: input_object.get_data(key) for key in tool.input_keys}
tool_results.append(str(tool.run(**tool_input)))
return "\n\n".join(tool_results)

def invoke_knowledge(self, query_str: str, input_object: InputObject, **kwargs) -> str:
if not self.knowledge_names or not query_str:
return ''

knowledge_results: list = list()

for knowledge_name in self.knowledge_names:
knowledge: Knowledge = KnowledgeManager().get_instance_obj(knowledge_name)
if knowledge is None:
continue
knowledge_res: List[Document] = knowledge.query_knowledge(
query_str=query_str,
**input_object.to_dict()
)
knowledge_results.append(knowledge.to_llm(knowledge_res))
return "\n\n".join(knowledge_results)

def process_prompt(self, agent_input: dict, **kwargs) -> ChatPrompt:
expert_framework = agent_input.pop('expert_framework', '') or ''

profile: dict = self.agent_model.profile

profile_instruction = profile.get('instruction')
profile_instruction = expert_framework + profile_instruction if profile_instruction else profile_instruction

profile_prompt_model: AgentPromptModel = AgentPromptModel(introduction=profile.get('introduction'),
target=profile.get('target'),
instruction=profile_instruction)

# get the prompt by the prompt version
version_prompt: Prompt = PromptManager().get_instance_obj(self.prompt_version)

if version_prompt is None and not profile_prompt_model:
raise Exception("Either the `prompt_version` or `introduction & target & instruction`"
" in agent profile configuration should be provided.")
if version_prompt:
version_prompt_model: AgentPromptModel = AgentPromptModel(
introduction=getattr(version_prompt, 'introduction', ''),
target=getattr(version_prompt, 'target', ''),
instruction=expert_framework + getattr(version_prompt, 'instruction', ''))
profile_prompt_model = profile_prompt_model + version_prompt_model

chat_prompt = ChatPrompt().build_prompt(profile_prompt_model, ['introduction', 'target', 'instruction'])
image_urls: list = agent_input.pop('image_urls', []) or []
if image_urls:
chat_prompt.generate_image_prompt(image_urls)
return chat_prompt
Original file line number Diff line number Diff line change
Expand Up @@ -41,16 +41,15 @@ def invoke(self, agent_model: AgentModel, planner_input: dict, input_object: Inp
prompt: Prompt = self.handle_prompt(agent_model, planner_input)
process_llm_token(llm, prompt.as_langchain(), agent_model.profile, planner_input)

memory_messages = assemble_memory_input(memory, planner_input)
assemble_memory_input(memory, planner_input)

chain = prompt.as_langchain() | llm.as_langchain_runnable(agent_model.llm_params()) | StrOutputParser()
res = chain.invoke(input=planner_input)

memory_messages = assemble_memory_output(memory=memory,
agent_input=planner_input,
content=f"Human: {planner_input.get(self.input_key)}, AI: {res}",
memory_messages=memory_messages)
return {**planner_input, self.output_key: res, 'chat_history': memory_messages}
assemble_memory_output(memory=memory,
agent_input=planner_input,
content=f"Human: {planner_input.get(self.input_key)}, AI: {res}")
return {**planner_input, self.output_key: res}

def handle_prompt(self, agent_model: AgentModel, planner_input: dict) -> Prompt:
"""Prompt module processing.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,16 +39,15 @@ def invoke(self, agent_model: AgentModel, planner_input: dict, input_object: Inp
prompt: Prompt = self.handle_prompt(agent_model, planner_input)
process_llm_token(llm, prompt.as_langchain(), agent_model.profile, planner_input)

memory_messages = assemble_memory_input(memory, planner_input)
assemble_memory_input(memory, planner_input)

chain = prompt.as_langchain() | llm.as_langchain_runnable(agent_model.llm_params()) | StrOutputParser()
res = self.invoke_chain(agent_model, chain, planner_input, None, input_object)

memory_messages = assemble_memory_output(memory=memory,
agent_input=planner_input,
content=f"Human: {planner_input.get(self.input_key)}, AI: {res}",
memory_messages=memory_messages)
return {**planner_input, self.output_key: res, 'chat_history': memory_messages}
assemble_memory_output(memory=memory,
agent_input=planner_input,
content=f"Human: {planner_input.get(self.input_key)}, AI: {res}")
return {**planner_input, self.output_key: res}

def handle_prompt(self, agent_model: AgentModel, planner_input: dict) -> Prompt:
"""Prompt module processing.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,16 +43,15 @@ def invoke(self, agent_model: AgentModel, planner_input: dict, input_object: Inp

process_llm_token(llm, prompt.as_langchain(), agent_model.profile, planner_input)

memory_messages = assemble_memory_input(memory, planner_input)
assemble_memory_input(memory, planner_input)

chain = prompt.as_langchain() | llm.as_langchain_runnable(agent_model.llm_params()) | StrOutputParser()
res = self.invoke_chain(agent_model, chain, planner_input, None, input_object)

memory_messages = assemble_memory_output(memory=memory,
agent_input=planner_input,
content=f"Human: {planner_input.get(self.input_key)}, AI: {res}",
memory_messages=memory_messages)
return {**planner_input, self.output_key: res, 'chat_history': memory_messages}
assemble_memory_output(memory=memory,
agent_input=planner_input,
content=f"Human: {planner_input.get(self.input_key)}, AI: {res}")
return {**planner_input, self.output_key: res}

@staticmethod
def acquire_tools(action) -> list[LangchainTool]:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,16 +40,15 @@ def invoke(self, agent_model: AgentModel, planner_input: dict,
prompt: Prompt = self.handle_prompt(agent_model, planner_input)
process_llm_token(llm, prompt.as_langchain(), agent_model.profile, planner_input)

memory_messages = assemble_memory_input(memory, planner_input)
assemble_memory_input(memory, planner_input)
chain = prompt.as_langchain() | llm.as_langchain_runnable(agent_model.llm_params()) | StrOutputParser()

res = self.invoke_chain(agent_model, chain, planner_input, None, input_object)

memory_messages = assemble_memory_output(memory=memory,
agent_input=planner_input,
content=f"Human: {planner_input.get(self.input_key)}, AI: {res}",
memory_messages=memory_messages)
return {**planner_input, self.output_key: res, 'chat_history': memory_messages}
assemble_memory_output(memory=memory,
agent_input=planner_input,
content=f"Human: {planner_input.get(self.input_key)}, AI: {res}")
return {**planner_input, self.output_key: res}

def handle_prompt(self, agent_model: AgentModel, planner_input: dict) -> Prompt:
"""Prompt module processing.
Expand Down
11 changes: 5 additions & 6 deletions agentuniverse/agent/plan/planner/rag_planner/rag_planner.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,17 +43,16 @@ def invoke(self, agent_model: AgentModel, planner_input: dict,
prompt: ChatPrompt = self.handle_prompt(agent_model, planner_input)
process_llm_token(llm, prompt.as_langchain(), agent_model.profile, planner_input)

memory_messages = assemble_memory_input(memory, planner_input)
assemble_memory_input(memory, planner_input)

chain = prompt.as_langchain() | llm.as_langchain_runnable(agent_model.llm_params()) | StrOutputParser()

res = self.invoke_chain(agent_model, chain, planner_input, None, input_object)

memory_messages = assemble_memory_output(memory=memory,
agent_input=planner_input,
content=f"Human: {planner_input.get(self.input_key)}, AI: {res}",
memory_messages=memory_messages)
return {**planner_input, self.output_key: res, 'chat_history': memory_messages}
assemble_memory_output(memory=memory,
agent_input=planner_input,
content=f"Human: {planner_input.get(self.input_key)}, AI: {res}")
return {**planner_input, self.output_key: res}

def handle_prompt(self, agent_model: AgentModel, planner_input: dict) -> ChatPrompt:
"""Prompt module processing.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,16 +39,15 @@ def invoke(self, agent_model: AgentModel, planner_input: dict, input_object: Inp
prompt: Prompt = self.handle_prompt(agent_model, planner_input)
process_llm_token(llm, prompt.as_langchain(), agent_model.profile, planner_input)

memory_messages = assemble_memory_input(memory, planner_input)
assemble_memory_input(memory, planner_input)
chain = prompt.as_langchain() | llm.as_langchain_runnable(agent_model.llm_params()) | StrOutputParser()

res = self.invoke_chain(agent_model, chain, planner_input, None, input_object)

memory_messages = assemble_memory_output(memory=memory,
agent_input=planner_input,
content=f"Human: {planner_input.get(self.input_key)}, AI: {res}",
memory_messages=memory_messages)
return {**planner_input, self.output_key: res, 'chat_history': memory_messages}
assemble_memory_output(memory=memory,
agent_input=planner_input,
content=f"Human: {planner_input.get(self.input_key)}, AI: {res}")
return {**planner_input, self.output_key: res}

def handle_prompt(self, agent_model: AgentModel, planner_input: dict) -> Prompt:
"""Generate prompt template for the planner.
Expand Down
Loading