LLM Agent: Evolving from Chatbots to Intelligent Assistants
LLM agents are AI systems with autonomous decision-making and execution capabilities. They can understand goals, formulate plans, call tools, and adjust strategies based on feedback. This article explains how to build powerful agent systems.
Core Agent Architecture
Key Components of an Intelligent Agent
š§ Brain (LLM)
- ⢠Understand user intent
- ⢠Formulate execution plans
- ⢠Reasoning and decision-making
- ⢠Generate responses
š§ Tools
- ⢠API calls
- ⢠Database queries
- ⢠Code execution
- ⢠File operations
š¾ Memory
- ⢠Short-term memory (conversation history)
- ⢠Long-term memory (knowledge base)
- ⢠Working memory (task state)
- ⢠Episodic memory (experience)
š Planning
- ⢠Task decomposition
- ⢠Step ordering
- ⢠Resource scheduling
- ⢠Strategy optimization
ReAct Pattern: Think and Act
Reasoning + Acting = ReAct
class ReActAgent:
"""Agent implementation for the ReAct pattern"""
def __init__(self, llm, tools):
self.llm = llm
self.tools = tools
self.max_steps = 10
def run(self, task):
"""Execute a task"""
prompt = f"""You are an intelligent assistant and need to complete the following task: {task}
Available tools:
{self.get_tools_description()}
Please respond in the following format:
Thought: What I should do next
Action: Tool name
Action Input: Input parameters for the tool
Observation: Tool return result
... (repeat Thought/Action/Action Input/Observation)
Thought: I now know the final answer
Final Answer: <final answer>
Begin: """
messages = [{"role": "user", "content": prompt}]
steps = 0
while steps < self.max_steps:
# LLM thinking
response = self.llm.generate(messages)
# Parse response
if "Final Answer:" in response:
return self.extract_final_answer(response)
# Extract action
action, action_input = self.parse_action(response)
if action and action in self.tools:
# Execute tool
observation = self.tools[action](action_input)
# Append observation
messages.append({
"role": "assistant",
"content": response
})
messages.append({
"role": "user",
"content": f"Observation: {observation}"
})
else:
# Handle errors
messages.append({
"role": "user",
"content": "Invalid action. Please think again."
})
steps += 1
return "Reached maximum step limit"
def parse_action(self, text):
"""Parse action and input"""
import re
action_match = re.search(r'Action:\s*(.+)', text)
input_match = re.search(r'Action Input:\s*(.+)', text)
if action_match and input_match:
return action_match.group(1), input_match.group(1)
return None, None
# Usage example
agent = ReActAgent(
llm=llm_client,
tools={
"search": web_search_tool,
"calculator": calculator_tool,
"database": database_query_tool
}
)
result = agent.run("What is the weather like in Beijing today, and what is the temperature in Celsius?")
print(result)Function Calling Implementation
Let the LLM Call External Functions
import json
from typing import List, Dict, Any
class FunctionCallingAgent:
"""Agent based on Function Calling"""
def __init__(self, api_key: str):
self.client = OpenAI(api_key=api_key)
self.functions = []
self.function_map = {}
def register_function(self, func, description: str, parameters: Dict):
"""Register a callable function"""
function_spec = {
"name": func.__name__,
"description": description,
"parameters": parameters
}
self.functions.append(function_spec)
self.function_map[func.__name__] = func
def run(self, user_input: str):
"""Execute user request"""
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": user_input}
]
# First call: decide whether to use a function
response = self.client.chat.completions.create(
model="gpt-4",
messages=messages,
functions=self.functions,
function_call="auto"
)
message = response.choices[0].message
# Check if a function call is needed
if message.function_call:
# Parse function call
function_name = message.function_call.name
function_args = json.loads(message.function_call.arguments)
# Execute function
if function_name in self.function_map:
function_result = self.function_map[function_name](**function_args)
# Add function result to the conversation
messages.append(message)
messages.append({
"role": "function",
"name": function_name,
"content": str(function_result)
})
# Second call: generate the final reply based on function result
final_response = self.client.chat.completions.create(
model="gpt-4",
messages=messages
)
return final_response.choices[0].message.content
return message.content
# Define tool functions
def get_weather(location: str, unit: str = "celsius"):
"""Get weather information"""
# Should call a real weather API
return f"Weather in {location}: Sunny, 25 {unit}"
def calculate(expression: str):
"""Calculate a math expression"""
try:
result = eval(expression)
return f"Result: {result}"
except:
return "Calculation error"
def search_database(query: str, table: str):
"""Query a database"""
# Should execute a real database query
return f"Query '{query}' from table {table}: ..."
# Create agent and register functions
agent = FunctionCallingAgent(api_key="your-key")
agent.register_function(
get_weather,
"Get weather information for a specified location",
{
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "Location name"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "Temperature unit"
}
},
"required": ["location"]
}
)
agent.register_function(
calculate,
"Calculate a mathematical expression",
{
"type": "object",
"properties": {
"expression": {
"type": "string",
"description": "Mathematical expression"
}
},
"required": ["expression"]
}
)
# Use the agent
result = agent.run("What is the weather like in Beijing?")
print(result) # Automatically calls get_weather
result = agent.run("Calculate 123 * 456 + 789")
print(result)Multi-Agent Collaboration System
Build an Agent Team for Complex Tasks
class MultiAgentSystem:
"""Multi-agent collaboration system"""
def __init__(self):
self.agents = {}
self.message_queue = []
def register_agent(self, name: str, agent):
"""Register an agent"""
self.agents[name] = agent
def run_task(self, task: str):
"""Execute a collaborative task"""
# Manager agent decomposes the task
manager = self.agents["manager"]
plan = manager.create_plan(task)
results = {}
for step in plan["steps"]:
agent_name = step["agent"]
subtask = step["task"]
# Assign task to a specific agent
if agent_name in self.agents:
agent = self.agents[agent_name]
result = agent.execute(subtask, context=results)
results[step["id"]] = result
# Broadcast result to other agents
self.broadcast_message({
"from": agent_name,
"type": "result",
"content": result
})
# Summarize results
return manager.summarize(results)
def broadcast_message(self, message):
"""Broadcast messages"""
for agent_name, agent in self.agents.items():
if hasattr(agent, "receive_message"):
agent.receive_message(message)
# Specialized agent classes
class ResearchAgent:
"""Research agent - gathers information"""
def execute(self, task, context=None):
# Search and gather information
return f"Research result: information related to {task}..."
class WriterAgent:
"""Writer agent - creates content"""
def execute(self, task, context=None):
# Write content based on research
research = context.get("research", "")
return f"Content created based on {research}..."
class ReviewerAgent:
"""Reviewer agent - performs quality checks"""
def execute(self, task, context=None):
# Review content quality
content = context.get("content", "")
return f"Review feedback: Improvements needed for {content}..."
class ManagerAgent:
"""Manager agent - planning and coordination"""
def create_plan(self, task):
return {
"steps": [
{"id": "research", "agent": "researcher", "task": f"Research {task}"},
{"id": "write", "agent": "writer", "task": f"Write {task}"},
{"id": "review", "agent": "reviewer", "task": f"Review {task}"}
]
}
def summarize(self, results):
return f"Task complete. Final output: {results}"
# Build a multi-agent system
system = MultiAgentSystem()
system.register_agent("manager", ManagerAgent())
system.register_agent("researcher", ResearchAgent())
system.register_agent("writer", WriterAgent())
system.register_agent("reviewer", ReviewerAgent())
# Execute a complex task
result = system.run_task("an article about Artificial Intelligence")
print(result)Agent Memory System
Enable Agents to Remember and Learn
Short-Term Memory Implementation
class ShortTermMemory:
def __init__(self, max_size=10):
self.buffer = []
self.max_size = max_size
def add(self, item):
self.buffer.append(item)
if len(self.buffer) > self.max_size:
self.buffer.pop(0)
def get_context(self):
return "
".join(self.buffer[-5:])Long-Term Memory Implementation
class LongTermMemory:
def __init__(self, vector_db):
self.vector_db = vector_db
def store(self, content, metadata):
embedding = create_embedding(content)
self.vector_db.insert(
embedding, content, metadata
)
def recall(self, query, k=5):
results = self.vector_db.search(query, k)
return resultsAgent Framework Comparison
| Framework | Highlights | Usability | Extensibility | Use Cases |
|---|---|---|---|---|
| LangChain | Comprehensive features, rich ecosystem | āāā | āāāāā | General development |
| AutoGPT | Fully autonomous | āā | āāā | Task automation |
| AutoGen | Multi-agent collaboration | āāāā | āāāā | Team collaboration |
| CrewAI | Clear role definitions | āāāāā | āāā | Process automation |
| BabyAGI | Task-driven | āāā | āāā | Goal-oriented |
Agent Evaluation Metrics
How to Evaluate Agent Performance
ā” Efficiency
- ⢠Task completion time
- ⢠Number of steps
- ⢠Resource consumption
- ⢠Number of API calls
ā Accuracy
- ⢠Task success rate
- ⢠Error rate
- ⢠Output quality
- ⢠Consistency
š¤ Interaction
- ⢠Response time
- ⢠Understanding accuracy
- ⢠User satisfaction
- ⢠Number of clarifications
Practical Case: Customer Service Agent
Build an Intelligent Customer Service Agent
class CustomerServiceAgent:
"""Intelligent customer service agent"""
def __init__(self, llm, knowledge_base, order_system):
self.llm = llm
self.knowledge_base = knowledge_base
self.order_system = order_system
self.conversation_history = []
def handle_query(self, user_query):
"""Handle user query"""
# Intent recognition
intent = self.identify_intent(user_query)
# Route based on intent
if intent == "order_tracking":
return self.track_order(user_query)
elif intent == "product_inquiry":
return self.product_info(user_query)
elif intent == "complaint":
return self.handle_complaint(user_query)
else:
return self.general_response(user_query)
def identify_intent(self, query):
"""Identify user intent"""
prompt = f"""Analyze the user's intent:
Query: {query}
Possible intents:
- order_tracking: Order tracking
- product_inquiry: Product inquiry
- complaint: Complaint
- general: General question
Return the best matching intent type: """
response = self.llm.generate(prompt)
return response.strip().lower()
def track_order(self, query):
"""Order tracking"""
# Extract order ID
order_id = self.extract_order_id(query)
if order_id:
# Query order system
order_info = self.order_system.get_order(order_id)
if order_info:
return f"""Your order {order_id} status:
š¦ Status: {order_info['status']}
š Location: {order_info['location']}
š
ETA: {order_info['eta']}
If you need anything else, let me know."""
else:
return f"Sorry, order {order_id} was not found. Please verify the order ID."
else:
return "Please provide your order ID and I will look it up."
def product_info(self, query):
"""Product information query"""
# Retrieve from knowledge base
relevant_info = self.knowledge_base.search(query, top_k=3)
if relevant_info:
response = self.llm.generate(f"""
Answer the user's question based on the following product information:
Information: {relevant_info}
User question: {query}
Please provide an accurate and helpful answer: """)
return response
else:
return "Sorry, relevant product information was not found. I can transfer you to a human agent."
def handle_complaint(self, query):
"""Handle a complaint"""
# Log complaint
complaint_id = self.log_complaint(query)
# Generate empathetic reply
response = self.llm.generate(f"""
User complaint: {query}
Please generate an empathetic reply including:
1) Acknowledgment and apology
2) Process explanation
3) Complaint ID: {complaint_id}
4) Commitment to follow up
Reply: """)
return response
def general_response(self, query):
"""General response"""
# Combine recent conversation history
context = "
".join(self.conversation_history[-5:])
response = self.llm.generate(f"""
Conversation history:
{context}
User question: {query}
Please provide a friendly and professional answer: """)
self.conversation_history.append(f"User: {query}")
self.conversation_history.append(f"Agent: {response}")
return response
# Usage example
agent = CustomerServiceAgent(
llm=llm_client,
knowledge_base=vector_db,
order_system=order_api
)
# Handle various queries
print(agent.handle_query("When will my order 12345 arrive?"))
print(agent.handle_query("How long is the warranty for this product?"))
print(agent.handle_query("The product has quality issues; I want to file a complaint!"))Build Your Intelligent Agent
LLM agent technology evolves AI from simple Q&A tools into intelligent assistants capable of autonomously completing complex tasks. With LLM API, you can quickly build powerful agent systems.
Start Building Agents