feat: add a subagent frame

This commit is contained in:
tcmofashi
2026-04-03 22:15:53 +08:00
parent ce580d1f8b
commit 185361f2c3
72 changed files with 13062 additions and 0 deletions

View File

@@ -0,0 +1,80 @@
# AgentLite Examples
This directory contains examples demonstrating various features of AgentLite.
## Setup
Before running the examples, set your OpenAI API key:
```bash
export OPENAI_API_KEY="sk-..."
```
Or create a `.env` file:
```
OPENAI_API_KEY=sk-...
```
## Examples
### 1. Single Agent (`single_agent.py`)
Basic usage of a single agent with conversation history.
```bash
python examples/single_agent.py
```
### 2. Multi-Agent (`multi_agent.py`)
Multiple specialized agents working together on a task.
```bash
python examples/multi_agent.py
```
### 3. Custom Tools (`custom_tools.py`)
Defining and using custom tools with agents.
```bash
python examples/custom_tools.py
```
### 4. MCP Tools (`mcp_tools.py`)
Using tools from MCP (Model Context Protocol) servers.
**Prerequisites:**
- Node.js installed
- MCP filesystem server: `npm install -g @modelcontextprotocol/server-filesystem`
```bash
python examples/mcp_tools.py
```
## Creating Your Own
Use these examples as templates for your own applications:
```python
import asyncio
from agentlite import Agent, OpenAIProvider
async def main():
provider = OpenAIProvider(
api_key="your-api-key",
model="gpt-4",
)
agent = Agent(
provider=provider,
system_prompt="Your system prompt here.",
)
response = await agent.run("Your question here")
print(response)
asyncio.run(main())
```

View File

@@ -0,0 +1,118 @@
"""Example: Custom Tools
This example demonstrates how to define and use custom tools with agents.
"""
import asyncio
import os
from datetime import datetime
from pydantic import BaseModel
from agentlite import Agent, OpenAIProvider, tool
from agentlite.tool import CallableTool2, ToolOk
# Define a tool using the decorator
@tool()
async def get_current_time() -> str:
"""Get the current date and time."""
return datetime.now().isoformat()
@tool()
async def calculate(expression: str) -> str:
"""Evaluate a mathematical expression.
Args:
expression: The mathematical expression to evaluate (e.g., "2 + 2").
"""
try:
# Safe evaluation - only allow basic math operations
allowed_names = {
"abs": abs,
"max": max,
"min": min,
"pow": pow,
"round": round,
}
result = eval(expression, {"__builtins__": {}}, allowed_names)
return str(result)
except Exception as e:
return f"Error: {e}"
# Define a tool using CallableTool2 (type-safe)
class WeatherParams(BaseModel):
"""Parameters for weather tool."""
city: str
units: str = "celsius"
class GetWeather(CallableTool2[WeatherParams]):
"""Get weather information for a city."""
name = "get_weather"
description = "Get the current weather for a city."
params = WeatherParams
async def __call__(self, params: WeatherParams) -> ToolOk:
# This is a mock implementation
# In a real scenario, you would call a weather API
weather_data = {
"Beijing": {"temp": 22, "condition": "Sunny"},
"Shanghai": {"temp": 25, "condition": "Cloudy"},
"New York": {"temp": 18, "condition": "Rainy"},
"London": {"temp": 15, "condition": "Overcast"},
}
city = params.city
if city in weather_data:
data = weather_data[city]
temp = data["temp"]
if params.units == "fahrenheit":
temp = temp * 9 // 5 + 32
return ToolOk(
output=f"Weather in {city}: {data['condition']}, {temp}°{params.units[0].upper()}"
)
return ToolOk(output=f"Weather data not available for {city}")
async def main():
"""Run the custom tools example."""
# Create provider
provider = OpenAIProvider(
api_key=os.getenv("OPENAI_API_KEY", "your-api-key"),
model="gpt-4o-mini",
)
# Create agent with tools
agent = Agent(
provider=provider,
system_prompt="You are a helpful assistant with access to tools.",
tools=[
get_current_time,
calculate,
GetWeather(),
],
)
# Test tools
print("=== Testing Tools ===\n")
print("User: What time is it?")
response = await agent.run("What time is it?")
print(f"Agent: {response}\n")
print("User: What is 123 * 456?")
response = await agent.run("What is 123 * 456?")
print(f"Agent: {response}\n")
print("User: What's the weather in Beijing?")
response = await agent.run("What's the weather in Beijing?")
print(f"Agent: {response}\n")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,124 @@
"""Example demonstrating LLMClient usage.
This example shows how to use LLMClient for simple LLM calls
without the overhead of an Agent.
"""
import asyncio
from agentlite import LLMClient, llm_complete, llm_stream
from agentlite.config import AgentConfig, ProviderConfig, ModelConfig
async def main():
"""Run LLM client examples."""
# Example 1: Using simple function interface
print("=== Example 1: Simple Function ===")
print("Using llm_complete() function:")
# Note: This requires a valid API key
# response = await llm_complete(
# user_prompt="What is Python?",
# api_key="your-api-key",
# model="gpt-4",
# )
# print(response)
print("(Requires API key - uncomment to run)")
# Example 2: Using configuration-based client
print("\n=== Example 2: Configuration-Based Client ===")
config = AgentConfig(
name="simple_llm",
system_prompt="You are a helpful coding assistant.",
providers={
"openai": ProviderConfig(
type="openai",
api_key="your-api-key", # Replace with actual key
)
},
models={
"gpt4": ModelConfig(
provider="openai",
model="gpt-4",
temperature=0.7,
),
"gpt35": ModelConfig(
provider="openai",
model="gpt-3.5-turbo",
temperature=0.5,
),
},
default_model="gpt4",
)
# Create client
client = LLMClient(config)
# Make a call
# response = await client.complete(
# user_prompt="Explain async/await in Python",
# )
# print(f"Response: {response.content}")
# print(f"Model: {response.model}")
# if response.usage:
# print(f"Tokens: {response.usage.total}")
print("(Requires API key - uncomment to run)")
# Example 3: Streaming
print("\n=== Example 3: Streaming ===")
print("Using llm_stream() function:")
# async for chunk in llm_stream(
# user_prompt="Write a haiku about programming",
# api_key="your-api-key",
# ):
# print(chunk, end="")
print("\n(Requires API key - uncomment to run)")
# Example 4: Direct provider usage
print("\n=== Example 4: Direct Provider ===")
from agentlite import OpenAIProvider
provider = OpenAIProvider(
api_key="your-api-key",
model="gpt-4",
temperature=0.8,
)
client = LLMClient(provider=provider)
# response = await client.complete(
# user_prompt="What are the benefits of type hints?",
# system_prompt="You are a Python expert.",
# )
# print(response.content)
print("(Requires API key - uncomment to run)")
# Example 5: Model switching
print("\n=== Example 5: Model Switching ===")
# Use default model (gpt4)
# response1 = await client.complete(user_prompt="Hello!")
# Switch to different model
# client_gpt35 = LLMClient(config, model="gpt35")
# response2 = await client_gpt35.complete(user_prompt="Hello!")
print("(Requires API key - uncomment to run)")
print("\n=== Examples Complete ===")
print("To run these examples:")
print("1. Set your OpenAI API key")
print("2. Uncomment the example code")
print("3. Run: python examples/llm_client_example.py")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,79 @@
"""Example: MCP Tools
This example demonstrates how to use MCP (Model Context Protocol) tools
with AgentLite agents.
Note: This example requires an MCP server to be available.
"""
import asyncio
import os
from agentlite import Agent, MCPClient, OpenAIProvider
async def main():
"""Run the MCP tools example."""
# Create provider
provider = OpenAIProvider(
api_key=os.getenv("OPENAI_API_KEY", "your-api-key"),
model="gpt-4o-mini",
)
# Connect to MCP server
# This example uses the filesystem MCP server
# You can install it with: npm install -g @modelcontextprotocol/server-filesystem
print("Connecting to MCP server...")
async with MCPClient() as mcp:
# Connect via stdio
await mcp.connect_stdio(
command="npx",
args=["-y", "@modelcontextprotocol/server-filesystem", "/tmp"],
)
# Load tools from MCP server
print("Loading MCP tools...")
mcp_tools = await mcp.load_tools()
print(f"Loaded {len(mcp_tools)} tools from MCP server")
# Create agent with MCP tools
agent = Agent(
provider=provider,
system_prompt="You are a helpful assistant with access to filesystem tools.",
tools=mcp_tools,
)
# Test MCP tools
print("\n=== Testing MCP Tools ===\n")
print("User: List files in /tmp")
response = await agent.run("List files in /tmp")
print(f"Agent: {response}\n")
print("User: Create a file called test.txt with 'Hello from AgentLite!'")
response = await agent.run(
"Create a file called test.txt with content 'Hello from AgentLite!'"
)
print(f"Agent: {response}\n")
print("User: Read the test.txt file")
response = await agent.run("Read the test.txt file")
print(f"Agent: {response}\n")
if __name__ == "__main__":
# Note: This example requires Node.js and the MCP filesystem server
# npm install -g @modelcontextprotocol/server-filesystem
print("Note: This example requires Node.js and @modelcontextprotocol/server-filesystem")
print("Install with: npm install -g @modelcontextprotocol/server-filesystem\n")
try:
asyncio.run(main())
except Exception as e:
print(f"Error: {e}")
print("\nMake sure you have:")
print("1. Node.js installed")
print("2. @modelcontextprotocol/server-filesystem installed globally")
print("3. OPENAI_API_KEY environment variable set")

View File

@@ -0,0 +1,54 @@
"""Example: Multi-Agent Usage
This example demonstrates using multiple agents working independently.
"""
import asyncio
import os
from agentlite import Agent, OpenAIProvider
async def main():
"""Run the multi-agent example."""
# Create provider
provider = OpenAIProvider(
api_key=os.getenv("OPENAI_API_KEY", "your-api-key"),
model="gpt-4o-mini",
)
# Create specialized agents
researcher = Agent(
provider=provider,
system_prompt="You are a research assistant. Provide factual, well-researched information.",
)
writer = Agent(
provider=provider,
system_prompt="You are a creative writer. Write engaging and clear content.",
)
critic = Agent(
provider=provider,
system_prompt="You are an editor. Review and improve content for clarity and accuracy.",
)
# Research phase
print("=== Research Phase ===")
topic = "artificial intelligence in healthcare"
research = await researcher.run(f"Research {topic}. Provide key points.")
print(f"Research:\n{research}\n")
# Writing phase
print("=== Writing Phase ===")
content = await writer.run(f"Write a blog post about {topic} using this research:\n{research}")
print(f"Draft:\n{content}\n")
# Review phase
print("=== Review Phase ===")
review = await critic.run(f"Review this blog post and suggest improvements:\n{content}")
print(f"Review:\n{review}\n")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,42 @@
"""Example: Single Agent Usage
This example demonstrates basic usage of the AgentLite Agent class.
"""
import asyncio
import os
from agentlite import Agent, OpenAIProvider
async def main():
"""Run the single agent example."""
# Create provider
provider = OpenAIProvider(
api_key=os.getenv("OPENAI_API_KEY", "your-api-key"),
model="gpt-4o-mini",
)
# Create agent
agent = Agent(
provider=provider,
system_prompt="You are a helpful assistant. Be concise.",
)
# Run conversation
print("User: What is Python?")
response = await agent.run("What is Python?")
print(f"Agent: {response}\n")
print("User: What are its main features?")
response = await agent.run("What are its main features?")
print(f"Agent: {response}\n")
# Show conversation history
print("--- Conversation History ---")
for msg in agent.history:
print(f"{msg.role}: {msg.extract_text()[:100]}...")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,68 @@
---
name: code-reviewer
description: Review code for bugs, style issues, security vulnerabilities, and best practices. Use when the user asks to review, check, or audit code.
type: standard
---
# Code Reviewer
A comprehensive code review skill that checks for common issues and provides actionable feedback.
## Review Checklist
### 1. Correctness
- Check for logical errors
- Verify edge cases are handled
- Look for off-by-one errors
- Check null/None handling
- Verify error handling paths
### 2. Style & Readability
- Naming conventions (clear, descriptive names)
- Code organization and structure
- Comments where needed (not obvious code)
- Consistent formatting
- Function/class length
### 3. Performance
- Inefficient algorithms (O(n²) when O(n) possible)
- Unnecessary object creation
- Memory leaks
- Redundant operations
### 4. Security
- SQL injection vulnerabilities
- XSS vulnerabilities (for web code)
- Hardcoded secrets/passwords
- Unsafe deserialization
- Path traversal risks
### 5. Best Practices
- DRY principle (Don't Repeat Yourself)
- SOLID principles
- Proper use of language features
- Test coverage considerations
## Output Format
Provide your review in this structure:
```
## Summary
Brief overall assessment
## Critical Issues
- Issue 1: Description and fix
- Issue 2: Description and fix
## Warnings
- Warning 1: Description and suggestion
## Suggestions
- Suggestion 1: How to improve
## Positive Notes
- What's done well
```
Be constructive and specific. Include code examples for suggested fixes.

View File

@@ -0,0 +1,63 @@
---
name: release-process
description: Execute the release workflow including version checks, changelog updates, and PR creation. Use when the user wants to create a new release or version.
type: flow
---
# Release Process
Follow this structured workflow to create a new release.
## Flow
```mermaid
flowchart TD
BEGIN(( )) --> CHECK[Check for uncommitted changes]
CHECK --> CHANGES{Changes?}
CHANGES -->|Yes| COMMIT[Commit or stash changes]
CHANGES -->|No| VERSION{Version type?}
COMMIT --> VERSION
VERSION -->|Patch| UPDATE_PATCH[Update patch version]
VERSION -->|Minor| UPDATE_MINOR[Update minor version]
VERSION -->|Major| UPDATE_MAJOR[Update major version]
UPDATE_PATCH --> CHANGELOG[Update CHANGELOG.md]
UPDATE_MINOR --> CHANGELOG
UPDATE_MAJOR --> CHANGELOG
CHANGELOG --> BRANCH[Create release branch]
BRANCH --> PR[Create Pull Request]
PR --> END(( ))
```
## Node Details
### Check for uncommitted changes
Run `git status` and check if there are any uncommitted changes.
### Commit or stash changes
Ask the user whether to commit the changes or stash them for later.
### Version type
Ask the user what type of release this is:
- **Patch**: Bug fixes (0.0.X)
- **Minor**: New features, backward compatible (0.X.0)
- **Major**: Breaking changes (X.0.0)
### Update version
Update the version number in:
- `pyproject.toml` or `package.json`
- Any other version files
### Update CHANGELOG
Add a new section to CHANGELOG.md with:
- Version number and date
- List of changes
- Breaking changes (if any)
- Migration notes (if needed)
### Create release branch
Create a new branch: `release/vX.Y.Z`
### Create Pull Request
Open a PR with:
- Title: "Release vX.Y.Z"
- Description summarizing the changes

View File

@@ -0,0 +1,87 @@
"""Example demonstrating the skills system for AgentLite.
This example shows how to use skills with an Agent.
"""
import asyncio
from pathlib import Path
from agentlite import Agent, OpenAIProvider
from agentlite.skills import discover_skills, index_skills_by_name, SkillTool
async def main():
"""Run skills example."""
print("=" * 60)
print("AgentLite Skills Example")
print("=" * 60)
# Discover skills from examples directory
skills_dir = Path(__file__).parent / "skills"
skills = discover_skills(skills_dir)
print(f"\nDiscovered {len(skills)} skill(s):")
for skill in skills:
print(f" - {skill.name}: {skill.description}")
print(f" Type: {skill.type}")
if skill.flow:
print(f" Flow nodes: {len(skill.flow.nodes)}")
# Index skills by name
skill_index = index_skills_by_name(skills)
print(f"\nIndexed {len(skill_index)} skill(s)")
# Create agent (would need API key to actually run)
print("\n" + "-" * 40)
print("To use skills with an agent:")
print("-" * 40)
code = """
# Create provider
provider = OpenAIProvider(api_key="your-key", model="gpt-4")
# Create agent
agent = Agent(
provider=provider,
system_prompt="You are a helpful assistant with access to skills.",
)
# Create skill tool
skill_tool = SkillTool(skill_index, parent_agent=agent)
# Add skill tool to agent
agent.tools.add(skill_tool)
# Now the agent can use skills!
# The agent will see available skills in its context
# Example usage:
response = await agent.run("Review this Python code: def add(a, b): return a + b")
# The agent may choose to use the code-reviewer skill
"""
print(code)
print("\n" + "=" * 60)
print("Key Concepts:")
print("=" * 60)
print("1. Skills are defined in SKILL.md files")
print("2. YAML frontmatter specifies name, description, and type")
print("3. Standard skills load the markdown as a prompt")
print("4. Flow skills execute a structured flowchart")
print("5. Skills are discovered from directories")
print("6. SkillTool allows agents to execute skills")
print("\nSkill Format (SKILL.md):")
print(""" ---
name: skill-name
description: When to use this skill...
type: standard | flow
---
# Skill Content
Instructions for the skill...
""")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,169 @@
"""Example demonstrating subagent usage in AgentLite.
This example shows how to create a parent agent with subagents
and delegate tasks to them using the Task tool.
"""
import asyncio
from agentlite import Agent, OpenAIProvider
from agentlite.labor_market import LaborMarket
from agentlite.tools.multiagent.task import Task
async def main():
"""Run subagent example."""
print("=" * 60)
print("AgentLite Subagent Example")
print("=" * 60)
# Note: This example requires a valid API key
# Replace with your actual API key to run
api_key = "your-api-key"
if api_key == "your-api-key":
print("\nNOTE: Set your API key to run this example")
print("Example code is shown below:\n")
print("-" * 40)
# Create provider
provider = OpenAIProvider(api_key=api_key, model="gpt-4")
# Example 1: Create subagents manually
print("\n=== Example 1: Manual Subagent Setup ===")
# Create parent agent with empty labor market
parent = Agent(
provider=provider,
system_prompt="You are a coordinator agent that delegates tasks to specialists.",
name="coordinator",
)
# Create subagents
coder = Agent(
provider=provider,
system_prompt="You are a coding specialist. Write clean, well-documented code.",
name="coder",
)
reviewer = Agent(
provider=provider,
system_prompt="You are a code reviewer. Provide constructive feedback.",
name="reviewer",
)
# Register subagents with parent
parent.add_subagent("coder", coder, "Writes code", dynamic=False)
parent.add_subagent("reviewer", reviewer, "Reviews code", dynamic=False)
# Add Task tool to parent
parent.tools.add(Task(labor_market=parent.labor_market))
print("Created parent agent with subagents:")
print(f" - coder: Writes code")
print(f" - reviewer: Reviews code")
# Example 2: Using subagents
print("\n=== Example 2: Delegating Tasks ===")
# Parent agent delegates to coder
# response = await parent.run(
# "I need a Python function to calculate fibonacci numbers. "
# "Use the coder subagent to write it."
# )
print("(Requires API key - uncomment to run)")
# Example 3: Nested subagents (hierarchy)
print("\n=== Example 3: Hierarchical Structure ===")
# Create a team lead with team members as subagents
team_lead = Agent(
provider=provider,
system_prompt="You are a team lead. Coordinate work among your team members.",
name="team_lead",
)
# Create team members
backend_dev = Agent(
provider=provider,
system_prompt="You are a backend developer. Focus on API design and database.",
name="backend_dev",
)
frontend_dev = Agent(
provider=provider,
system_prompt="You are a frontend developer. Focus on UI/UX.",
name="frontend_dev",
)
tester = Agent(
provider=provider,
system_prompt="You are a QA engineer. Write test cases and find bugs.",
name="tester",
)
# Add subagents to team lead
team_lead.add_subagent("backend", backend_dev, "Backend development")
team_lead.add_subagent("frontend", frontend_dev, "Frontend development")
team_lead.add_subagent("qa", tester, "Quality assurance")
# Add Task tool
team_lead.tools.add(Task(labor_market=team_lead.labor_market))
print("Created team hierarchy:")
print(" team_lead/")
print(" ├── backend: Backend development")
print(" ├── frontend: Frontend development")
print(" └── qa: Quality assurance")
# Example 4: Dynamic subagents
print("\n=== Example 4: Dynamic Subagents ===")
# Create subagent dynamically
specialist = Agent(
provider=provider,
system_prompt="You are a specialist for a specific task.",
name="specialist",
)
# Add as dynamic subagent
team_lead.add_subagent("specialist", specialist, "Temporary specialist", dynamic=True)
print("Added dynamic subagent 'specialist' to team_lead")
# Example 5: Agent discovery
print("\n=== Example 5: Agent Discovery ===")
print(f"Team lead's subagents: {team_lead.labor_market.list_subagents()}")
print(f"Descriptions: {team_lead.labor_market.subagent_descriptions}")
# Check if subagent exists
if "backend" in team_lead.labor_market:
print("Backend subagent is available")
# Get specific subagent
backend = team_lead.get_subagent("backend")
print(f"Backend agent name: {backend.name if backend else 'not found'}")
# Example 6: Create subagent copy
print("\n=== Example 6: Subagent Copy ===")
# Create a copy of parent for use as subagent elsewhere
parent_copy = parent.create_subagent_copy()
print(f"Created copy of parent: {parent_copy.name}")
print(f"Copy has empty labor market: {len(parent_copy.labor_market) == 0}")
print("\n" + "=" * 60)
print("Examples Complete")
print("=" * 60)
print("\nKey Concepts:")
print("1. Parent agent holds subagents in LaborMarket")
print("2. Task tool allows parent to delegate to subagents")
print("3. Subagents have independent history and context")
print("4. Fixed subagents are defined at setup")
print("5. Dynamic subagents can be added at runtime")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,131 @@
"""Example demonstrating the configurable tool suite for AgentLite.
This example shows how to use the tool suite with configuration
to enable/disable specific tools.
"""
import asyncio
from pathlib import Path
from agentlite import Agent, OpenAIProvider
from agentlite.tools import (
ConfigurableToolset,
ToolSuiteConfig,
FileToolsConfig,
ShellToolsConfig,
)
async def main():
"""Demonstrate the configurable tool suite."""
# Example 1: Default configuration (all tools enabled)
print("=== Example 1: Default Configuration ===")
config = ToolSuiteConfig()
toolset = ConfigurableToolset(config)
print(f"Enabled tools: {len(toolset.tools)}")
for tool in toolset.tools:
print(f" - {tool.name}")
# Example 2: Disable specific tools
print("\n=== Example 2: Disable WriteFile ===")
config = ToolSuiteConfig(
file_tools=FileToolsConfig(
tools={"WriteFile": False} # Disable WriteFile
)
)
toolset = ConfigurableToolset(config)
print(f"Enabled tools: {len(toolset.tools)}")
for tool in toolset.tools:
print(f" - {tool.name}")
# Example 3: Disable entire tool groups
print("\n=== Example 3: Disable Shell Tools ===")
config = ToolSuiteConfig(shell_tools=ShellToolsConfig(enabled=False))
toolset = ConfigurableToolset(config)
print(f"Enabled tools: {len(toolset.tools)}")
for tool in toolset.tools:
print(f" - {tool.name}")
# Example 4: Custom file tool settings
print("\n=== Example 4: Custom File Tool Settings ===")
config = ToolSuiteConfig(
file_tools=FileToolsConfig(
max_lines=500,
max_bytes=50 * 1024, # 50KB
allow_write_outside_work_dir=True,
)
)
toolset = ConfigurableToolset(config)
print(f"File tool settings:")
print(f" Max lines: {config.file_tools.max_lines}")
print(f" Max bytes: {config.file_tools.max_bytes}")
print(f" Allow outside work dir: {config.file_tools.allow_write_outside_work_dir}")
# Example 5: Using with an Agent
print("\n=== Example 5: Using with Agent ===")
# Create a safe configuration (no shell, no write outside work dir)
safe_config = ToolSuiteConfig(
file_tools=FileToolsConfig(
allow_write_outside_work_dir=False,
),
shell_tools=ShellToolsConfig(enabled=False),
)
# This would require an API key to actually run
# provider = OpenAIProvider(api_key="your-api-key", model="gpt-4")
# agent = Agent(
# provider=provider,
# system_prompt="You are a helpful assistant with file access.",
# tools=ConfigurableToolset(safe_config).tools,
# )
print("Safe configuration created:")
print(" - Shell tools: DISABLED")
print(" - Write outside work dir: DISABLED")
print(" - Read file: ENABLED")
print(" - Glob/Grep: ENABLED")
# Example 6: Dynamic configuration reload
print("\n=== Example 6: Dynamic Reload ===")
config = ToolSuiteConfig()
toolset = ConfigurableToolset(config)
print(f"Initial tools: {len(toolset.tools)}")
# Disable some tools and reload
config.file_tools.disable_tool("WriteFile")
config.shell_tools.enabled = False
toolset.reload()
print(f"After reload: {len(toolset.tools)}")
for tool in toolset.tools:
print(f" - {tool.name}")
# Example 7: Using individual tools directly
print("\n=== Example 7: Direct Tool Usage ===")
from agentlite.tools.file import ReadFile, Glob
# Create tools directly
read_tool = ReadFile(work_dir=Path("."))
glob_tool = Glob(work_dir=Path("."))
# Use ReadFile
result = await read_tool.read({"path": "README.md"})
if not result.is_error:
print(f"README.md: {len(result.output)} characters")
else:
print(f"Could not read README.md: {result.message}")
# Use Glob
result = await glob_tool.glob({"pattern": "*.py"})
if not result.is_error:
files = result.output.split("\n") if result.output else []
print(f"Python files found: {len(files)}")
else:
print(f"Glob error: {result.message}")
if __name__ == "__main__":
asyncio.run(main())