feat: add a subagent frame
This commit is contained in:
279
agentlite/docs/llm_client.md
Normal file
279
agentlite/docs/llm_client.md
Normal file
@@ -0,0 +1,279 @@
|
||||
# LLM Client
|
||||
|
||||
Simple LLM client for direct LLM calls without agent overhead.
|
||||
|
||||
## Overview
|
||||
|
||||
The `LLMClient` provides a simple interface for making direct LLM calls, reusing the agentlite configuration system. This is useful when you don't need the full agent capabilities (tools, conversation history, etc.) and just want to call an LLM.
|
||||
|
||||
## Features
|
||||
|
||||
- **Simple Interface**: Just system prompt + user prompt → response
|
||||
- **Configuration Reuse**: Uses existing `AgentConfig` for provider/model setup
|
||||
- **Streaming Support**: Both non-streaming and streaming interfaces
|
||||
- **Flexible Usage**: Use with config, direct provider, or simple functions
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Method 1: Simple Function (Quickest)
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from agentlite import llm_complete
|
||||
|
||||
async def main():
|
||||
response = await llm_complete(
|
||||
user_prompt="What is Python?",
|
||||
api_key="your-api-key",
|
||||
model="gpt-4",
|
||||
)
|
||||
print(response)
|
||||
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
### Method 2: Using Configuration
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from agentlite import LLMClient, AgentConfig, ProviderConfig, ModelConfig
|
||||
|
||||
async def main():
|
||||
# Create configuration
|
||||
config = AgentConfig(
|
||||
providers={
|
||||
"openai": ProviderConfig(api_key="your-api-key")
|
||||
},
|
||||
models={
|
||||
"gpt4": ModelConfig(provider="openai", model="gpt-4")
|
||||
},
|
||||
default_model="gpt4",
|
||||
)
|
||||
|
||||
# Create client
|
||||
client = LLMClient(config)
|
||||
|
||||
# Make a call
|
||||
response = await client.complete(
|
||||
system_prompt="You are a helpful assistant.",
|
||||
user_prompt="What is Python?"
|
||||
)
|
||||
|
||||
print(response.content)
|
||||
print(f"Model: {response.model}")
|
||||
if response.usage:
|
||||
print(f"Tokens: {response.usage.total}")
|
||||
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
### Method 3: Direct Provider
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from agentlite import LLMClient, OpenAIProvider
|
||||
|
||||
async def main():
|
||||
# Create provider directly
|
||||
provider = OpenAIProvider(
|
||||
api_key="your-api-key",
|
||||
model="gpt-4",
|
||||
temperature=0.8,
|
||||
)
|
||||
|
||||
# Create client
|
||||
client = LLMClient(provider=provider)
|
||||
|
||||
# Make a call
|
||||
response = await client.complete(
|
||||
user_prompt="Explain async/await",
|
||||
system_prompt="You are a Python expert.",
|
||||
)
|
||||
|
||||
print(response.content)
|
||||
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
## Streaming
|
||||
|
||||
### Using Client
|
||||
|
||||
```python
|
||||
async for chunk in client.stream(
|
||||
user_prompt="Write a poem about AI",
|
||||
system_prompt="You are a creative writer.",
|
||||
):
|
||||
print(chunk, end="")
|
||||
```
|
||||
|
||||
### Using Function
|
||||
|
||||
```python
|
||||
async for chunk in llm_stream(
|
||||
user_prompt="Write a haiku",
|
||||
api_key="your-api-key",
|
||||
):
|
||||
print(chunk, end="")
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### LLMClient
|
||||
|
||||
```python
|
||||
class LLMClient:
|
||||
def __init__(
|
||||
self,
|
||||
config: Optional[AgentConfig] = None,
|
||||
provider: Optional[ChatProvider] = None,
|
||||
model: Optional[str] = None,
|
||||
)
|
||||
|
||||
async def complete(
|
||||
self,
|
||||
user_prompt: str,
|
||||
system_prompt: str = "You are a helpful assistant.",
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> LLMResponse
|
||||
|
||||
async def stream(
|
||||
self,
|
||||
user_prompt: str,
|
||||
system_prompt: str = "You are a helpful assistant.",
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> AsyncIterator[str]
|
||||
```
|
||||
|
||||
### LLMResponse
|
||||
|
||||
```python
|
||||
class LLMResponse:
|
||||
content: str # The response text
|
||||
usage: TokenUsage | None # Token usage stats
|
||||
model: str # Model name used
|
||||
```
|
||||
|
||||
### Convenience Functions
|
||||
|
||||
```python
|
||||
async def llm_complete(
|
||||
user_prompt: str,
|
||||
system_prompt: str = "You are a helpful assistant.",
|
||||
api_key: Optional[str] = None,
|
||||
model: str = "gpt-4",
|
||||
base_url: str = "https://api.openai.com/v1",
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> str
|
||||
|
||||
async def llm_stream(
|
||||
user_prompt: str,
|
||||
system_prompt: str = "You are a helpful assistant.",
|
||||
api_key: Optional[str] = None,
|
||||
model: str = "gpt-4",
|
||||
base_url: str = "https://api.openai.com/v1",
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> AsyncIterator[str]
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
|
||||
### Temperature and Max Tokens
|
||||
|
||||
You can override temperature and max_tokens per call:
|
||||
|
||||
```python
|
||||
response = await client.complete(
|
||||
user_prompt="Creative writing task",
|
||||
temperature=0.9, # More creative
|
||||
max_tokens=500, # Limit response length
|
||||
)
|
||||
```
|
||||
|
||||
### Model Switching
|
||||
|
||||
When using `AgentConfig`, you can switch models:
|
||||
|
||||
```python
|
||||
config = AgentConfig(
|
||||
providers={"openai": ProviderConfig(api_key="...")},
|
||||
models={
|
||||
"gpt4": ModelConfig(provider="openai", model="gpt-4"),
|
||||
"gpt35": ModelConfig(provider="openai", model="gpt-3.5-turbo"),
|
||||
},
|
||||
default_model="gpt4",
|
||||
)
|
||||
|
||||
# Use default model (gpt4)
|
||||
client = LLMClient(config)
|
||||
|
||||
# Use specific model
|
||||
client_gpt35 = LLMClient(config, model="gpt35")
|
||||
```
|
||||
|
||||
## Comparison with Agent
|
||||
|
||||
| Feature | LLMClient | Agent |
|
||||
|---------|-----------|-------|
|
||||
| Tools | ❌ No | ✅ Yes |
|
||||
| Conversation History | ❌ No | ✅ Yes |
|
||||
| System Prompt | ✅ Yes | ✅ Yes |
|
||||
| Configuration | ✅ Reuses AgentConfig | ✅ AgentConfig |
|
||||
| Streaming | ✅ Yes | ✅ Yes |
|
||||
| Use Case | Simple LLM calls | Complex agent workflows |
|
||||
|
||||
## Examples
|
||||
|
||||
### Translation
|
||||
|
||||
```python
|
||||
async def translate(text: str, target_language: str) -> str:
|
||||
response = await llm_complete(
|
||||
user_prompt=f"Translate to {target_language}: {text}",
|
||||
system_prompt="You are a translator. Return only the translation.",
|
||||
api_key="your-api-key",
|
||||
)
|
||||
return response
|
||||
```
|
||||
|
||||
### Code Review
|
||||
|
||||
```python
|
||||
async def review_code(code: str) -> str:
|
||||
client = LLMClient(config)
|
||||
response = await client.complete(
|
||||
user_prompt=f"Review this code:\n\n```python\n{code}\n```",
|
||||
system_prompt="You are a code reviewer. Provide constructive feedback.",
|
||||
)
|
||||
return response.content
|
||||
```
|
||||
|
||||
### Streaming Chat
|
||||
|
||||
```python
|
||||
async def chat_stream(user_message: str):
|
||||
async for chunk in client.stream(
|
||||
user_prompt=user_message,
|
||||
system_prompt="You are a helpful chat assistant.",
|
||||
):
|
||||
yield chunk
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
```python
|
||||
from agentlite.provider import APIConnectionError, APITimeoutError, APIStatusError
|
||||
|
||||
try:
|
||||
response = await client.complete(user_prompt="Hello")
|
||||
except APIConnectionError:
|
||||
print("Failed to connect to API")
|
||||
except APITimeoutError:
|
||||
print("Request timed out")
|
||||
except APIStatusError as e:
|
||||
print(f"API error {e.status_code}: {e.message}")
|
||||
```
|
||||
271
agentlite/docs/tools.md
Normal file
271
agentlite/docs/tools.md
Normal file
@@ -0,0 +1,271 @@
|
||||
# AgentLite Tool Suite
|
||||
|
||||
A comprehensive tool suite for AgentLite, inspired by kimi-cli's tools, with configuration support for enabling/disabling individual tools.
|
||||
|
||||
## Overview
|
||||
|
||||
This tool suite provides:
|
||||
|
||||
- **File Operations**: Read, write, edit, search files
|
||||
- **Shell Execution**: Execute shell commands
|
||||
- **Web Access**: Fetch URLs and search the web
|
||||
- **Multi-Agent**: Task delegation and subagent creation
|
||||
- **Utilities**: Todo lists and thinking tools
|
||||
- **Configuration**: Fine-grained control over which tools are available
|
||||
|
||||
## Installation
|
||||
|
||||
The tool suite is included with AgentLite. No additional installation required.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```python
|
||||
from agentlite.tools import ConfigurableToolset, ToolSuiteConfig
|
||||
from agentlite import Agent, OpenAIProvider
|
||||
|
||||
# Create toolset with default config (all tools enabled)
|
||||
toolset = ConfigurableToolset()
|
||||
|
||||
# Create agent with tools
|
||||
provider = OpenAIProvider(api_key="your-key", model="gpt-4")
|
||||
agent = Agent(
|
||||
provider=provider,
|
||||
system_prompt="You are a helpful assistant.",
|
||||
tools=toolset.tools,
|
||||
)
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Basic Configuration
|
||||
|
||||
```python
|
||||
from agentlite.tools import (
|
||||
ToolSuiteConfig,
|
||||
FileToolsConfig,
|
||||
ShellToolsConfig,
|
||||
)
|
||||
|
||||
# Disable specific tools
|
||||
config = ToolSuiteConfig(
|
||||
file_tools=FileToolsConfig(
|
||||
tools={"WriteFile": False, "StrReplaceFile": False}
|
||||
)
|
||||
)
|
||||
toolset = ConfigurableToolset(config)
|
||||
```
|
||||
|
||||
### Disable Entire Tool Groups
|
||||
|
||||
```python
|
||||
# Disable all shell tools
|
||||
config = ToolSuiteConfig(
|
||||
shell_tools=ShellToolsConfig(enabled=False)
|
||||
)
|
||||
toolset = ConfigurableToolset(config)
|
||||
```
|
||||
|
||||
### Custom Tool Settings
|
||||
|
||||
```python
|
||||
config = ToolSuiteConfig(
|
||||
file_tools=FileToolsConfig(
|
||||
max_lines=500,
|
||||
max_bytes=50 * 1024, # 50KB
|
||||
allow_write_outside_work_dir=False,
|
||||
),
|
||||
shell_tools=ShellToolsConfig(
|
||||
timeout=60,
|
||||
blocked_commands=["rm -rf", "sudo"],
|
||||
),
|
||||
)
|
||||
```
|
||||
|
||||
### Dynamic Configuration
|
||||
|
||||
```python
|
||||
# Create toolset
|
||||
config = ToolSuiteConfig()
|
||||
toolset = ConfigurableToolset(config)
|
||||
|
||||
# Disable tools and reload
|
||||
config.file_tools.disable_tool("WriteFile")
|
||||
config.shell_tools.enabled = False
|
||||
toolset.reload()
|
||||
```
|
||||
|
||||
## Available Tools
|
||||
|
||||
### File Tools
|
||||
|
||||
| Tool | Description | Config Options |
|
||||
|------|-------------|----------------|
|
||||
| `ReadFile` | Read text files with line numbers | `max_lines`, `max_bytes` |
|
||||
| `WriteFile` | Write or append to files | `allow_write_outside_work_dir` |
|
||||
| `StrReplaceFile` | Edit files using string replacement | `allow_write_outside_work_dir` |
|
||||
| `Glob` | Search files using glob patterns | `max_glob_matches` |
|
||||
| `Grep` | Search file contents with regex | - |
|
||||
| `ReadMediaFile` | Read images and videos | `max_size_mb` |
|
||||
|
||||
### Shell Tools
|
||||
|
||||
| Tool | Description | Config Options |
|
||||
|------|-------------|----------------|
|
||||
| `Shell` | Execute shell commands | `timeout`, `blocked_commands` |
|
||||
|
||||
### Web Tools
|
||||
|
||||
| Tool | Description | Config Options |
|
||||
|------|-------------|----------------|
|
||||
| `FetchURL` | Fetch web page content | `timeout`, `user_agent` |
|
||||
| `SearchWeb` | Search the web | `timeout` |
|
||||
|
||||
### Multi-Agent Tools
|
||||
|
||||
| Tool | Description | Config Options |
|
||||
|------|-------------|----------------|
|
||||
| `Task` | Delegate tasks to subagents | `max_steps` |
|
||||
| `CreateSubagent` | Create custom subagents | - |
|
||||
|
||||
### Utility Tools
|
||||
|
||||
| Tool | Description |
|
||||
|------|-------------|
|
||||
| `SetTodoList` | Manage todo lists |
|
||||
| `Think` | Record thinking steps |
|
||||
|
||||
## Safety Features
|
||||
|
||||
### Path Security
|
||||
|
||||
- Files outside the working directory require absolute paths
|
||||
- Optional restriction on writing outside working directory
|
||||
- Path traversal protection
|
||||
|
||||
### Shell Security
|
||||
|
||||
- Configurable command timeout
|
||||
- Blocked command list
|
||||
- No shell injection (uses `execve` style execution)
|
||||
|
||||
### Resource Limits
|
||||
|
||||
- File size limits
|
||||
- Line count limits
|
||||
- Glob match limits
|
||||
- HTTP content size limits
|
||||
|
||||
## Examples
|
||||
|
||||
### Safe Configuration for Untrusted Agents
|
||||
|
||||
```python
|
||||
from agentlite.tools import ToolSuiteConfig, FileToolsConfig, ShellToolsConfig
|
||||
|
||||
# Safe config - read-only file access, no shell
|
||||
safe_config = ToolSuiteConfig(
|
||||
file_tools=FileToolsConfig(
|
||||
allow_write_outside_work_dir=False,
|
||||
),
|
||||
shell_tools=ShellToolsConfig(enabled=False),
|
||||
)
|
||||
|
||||
toolset = ConfigurableToolset(safe_config)
|
||||
```
|
||||
|
||||
### Using Individual Tools
|
||||
|
||||
```python
|
||||
from agentlite.tools.file import ReadFile, Glob
|
||||
from pathlib import Path
|
||||
|
||||
# Create tools directly
|
||||
read_tool = ReadFile(work_dir=Path("."))
|
||||
glob_tool = Glob(work_dir=Path("."))
|
||||
|
||||
# Use tools
|
||||
result = await read_tool.read({"path": "README.md"})
|
||||
if not result.is_error:
|
||||
print(result.output)
|
||||
|
||||
result = await glob_tool.glob({"pattern": "*.py"})
|
||||
if not result.is_error:
|
||||
print(result.output)
|
||||
```
|
||||
|
||||
### Configuration from File
|
||||
|
||||
```python
|
||||
import json
|
||||
from agentlite.tools import ToolSuiteConfig
|
||||
|
||||
# Load config from file
|
||||
with open("tool_config.json") as f:
|
||||
config_dict = json.load(f)
|
||||
|
||||
config = ToolSuiteConfig.model_validate(config_dict)
|
||||
toolset = ConfigurableToolset(config)
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### Config Classes
|
||||
|
||||
#### `ToolSuiteConfig`
|
||||
|
||||
Main configuration class for all tools.
|
||||
|
||||
```python
|
||||
class ToolSuiteConfig(BaseModel):
|
||||
file_tools: FileToolsConfig
|
||||
shell_tools: ShellToolsConfig
|
||||
web_tools: WebToolsConfig
|
||||
multiagent_tools: MultiAgentToolsConfig
|
||||
misc_tools: ToolGroupConfig
|
||||
```
|
||||
|
||||
#### `FileToolsConfig`
|
||||
|
||||
```python
|
||||
class FileToolsConfig(ToolGroupConfig):
|
||||
max_lines: int = 1000
|
||||
max_line_length: int = 2000
|
||||
max_bytes: int = 100 * 1024
|
||||
allow_write_outside_work_dir: bool = False
|
||||
max_glob_matches: int = 1000
|
||||
```
|
||||
|
||||
#### `ShellToolsConfig`
|
||||
|
||||
```python
|
||||
class ShellToolsConfig(ToolGroupConfig):
|
||||
timeout: int = 60
|
||||
max_timeout: int = 300
|
||||
blocked_commands: list[str] = []
|
||||
```
|
||||
|
||||
#### `WebToolsConfig`
|
||||
|
||||
```python
|
||||
class WebToolsConfig(ToolGroupConfig):
|
||||
timeout: int = 30
|
||||
user_agent: str = "Mozilla/5.0 ..."
|
||||
max_content_length: int = 1024 * 1024
|
||||
```
|
||||
|
||||
### ConfigurableToolset
|
||||
|
||||
```python
|
||||
class ConfigurableToolset(SimpleToolset):
|
||||
def __init__(
|
||||
self,
|
||||
config: ToolSuiteConfig | None = None,
|
||||
work_dir: str | None = None,
|
||||
)
|
||||
|
||||
def reload(self, config: ToolSuiteConfig | None = None) -> None
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT License - same as AgentLite.
|
||||
Reference in New Issue
Block a user