feat: add a subagent frame
This commit is contained in:
116
agentlite/src/agentlite/__init__.py
Normal file
116
agentlite/src/agentlite/__init__.py
Normal file
@@ -0,0 +1,116 @@
|
||||
"""AgentLite - A lightweight, async-first Agent component library.
|
||||
|
||||
AgentLite provides clean abstractions for building LLM-powered agents with
|
||||
OpenAI-compatible APIs, supporting tools (including MCP), streaming, and
|
||||
multi-agent usage.
|
||||
|
||||
Example:
|
||||
>>> import asyncio
|
||||
>>> from agentlite import Agent, OpenAIProvider
|
||||
>>>
|
||||
>>> async def main():
|
||||
... provider = OpenAIProvider(api_key="sk-...", model="gpt-4")
|
||||
... agent = Agent(provider=provider, system_prompt="You are helpful.")
|
||||
... response = await agent.run("Hello!")
|
||||
... print(response)
|
||||
>>>
|
||||
>>> asyncio.run(main())
|
||||
"""
|
||||
|
||||
__version__ = "0.1.0"
|
||||
|
||||
# Core types
|
||||
from agentlite.message import (
|
||||
ContentPart,
|
||||
Message,
|
||||
Role,
|
||||
TextPart,
|
||||
ImageURLPart,
|
||||
AudioURLPart,
|
||||
ToolCall,
|
||||
ToolCallPart,
|
||||
)
|
||||
from agentlite.tool import (
|
||||
Tool,
|
||||
ToolResult,
|
||||
ToolOk,
|
||||
ToolError,
|
||||
CallableTool,
|
||||
CallableTool2,
|
||||
SimpleToolset,
|
||||
tool,
|
||||
)
|
||||
from agentlite.provider import (
|
||||
ChatProvider,
|
||||
StreamedMessage,
|
||||
TokenUsage,
|
||||
ChatProviderError,
|
||||
APIConnectionError,
|
||||
APITimeoutError,
|
||||
APIStatusError,
|
||||
)
|
||||
|
||||
# Configuration
|
||||
from agentlite.config import (
|
||||
ProviderConfig,
|
||||
ModelConfig,
|
||||
AgentConfig,
|
||||
)
|
||||
|
||||
# Agent
|
||||
from agentlite.agent import Agent
|
||||
|
||||
# MCP
|
||||
from agentlite.mcp import MCPClient
|
||||
|
||||
# OpenAI Provider
|
||||
from agentlite.providers.openai import OpenAIProvider
|
||||
|
||||
# LLM Client
|
||||
from agentlite.llm_client import LLMClient, LLMResponse, llm_complete, llm_stream
|
||||
|
||||
__all__ = [
|
||||
# Version
|
||||
"__version__",
|
||||
# Message types
|
||||
"ContentPart",
|
||||
"Message",
|
||||
"Role",
|
||||
"TextPart",
|
||||
"ImageURLPart",
|
||||
"AudioURLPart",
|
||||
"ToolCall",
|
||||
"ToolCallPart",
|
||||
# Tool types
|
||||
"Tool",
|
||||
"ToolResult",
|
||||
"ToolOk",
|
||||
"ToolError",
|
||||
"CallableTool",
|
||||
"CallableTool2",
|
||||
"SimpleToolset",
|
||||
"tool",
|
||||
# Provider types
|
||||
"ChatProvider",
|
||||
"StreamedMessage",
|
||||
"TokenUsage",
|
||||
"ChatProviderError",
|
||||
"APIConnectionError",
|
||||
"APITimeoutError",
|
||||
"APIStatusError",
|
||||
# Configuration
|
||||
"ProviderConfig",
|
||||
"ModelConfig",
|
||||
"AgentConfig",
|
||||
# Agent
|
||||
"Agent",
|
||||
# MCP
|
||||
"MCPClient",
|
||||
# Providers
|
||||
"OpenAIProvider",
|
||||
# LLM Client
|
||||
"LLMClient",
|
||||
"LLMResponse",
|
||||
"llm_complete",
|
||||
"llm_stream",
|
||||
]
|
||||
452
agentlite/src/agentlite/agent.py
Normal file
452
agentlite/src/agentlite/agent.py
Normal file
@@ -0,0 +1,452 @@
|
||||
"""Main Agent class for AgentLite.
|
||||
|
||||
This module provides the core Agent class that orchestrates LLM interactions,
|
||||
tool calling, and conversation management.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import AsyncIterator, Sequence
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
from agentlite.message import (
|
||||
ContentPart,
|
||||
Message,
|
||||
TextPart,
|
||||
ToolCall,
|
||||
ToolCallPart,
|
||||
)
|
||||
from agentlite.provider import ChatProvider, StreamedMessage, TokenUsage
|
||||
from agentlite.tool import SimpleToolset, Tool, ToolResult, ToolType
|
||||
from agentlite.labor_market import LaborMarket
|
||||
|
||||
if TYPE_CHECKING:
|
||||
pass
|
||||
|
||||
|
||||
class Agent:
|
||||
"""An LLM agent that can use tools and maintain conversation history.
|
||||
|
||||
The Agent class is the main interface for interacting with LLMs. It handles:
|
||||
- Sending messages to the LLM
|
||||
- Managing tool calls and execution
|
||||
- Maintaining conversation history
|
||||
- Streaming responses
|
||||
|
||||
Attributes:
|
||||
provider: The LLM provider to use.
|
||||
system_prompt: The system prompt for the agent.
|
||||
tools: The toolset containing available tools.
|
||||
history: The conversation history.
|
||||
|
||||
Example:
|
||||
>>> provider = OpenAIProvider(api_key="sk-...", model="gpt-4")
|
||||
>>> agent = Agent(
|
||||
... provider=provider,
|
||||
... system_prompt="You are a helpful assistant.",
|
||||
... )
|
||||
>>> response = await agent.run("Hello!")
|
||||
>>> print(response)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
provider: ChatProvider,
|
||||
system_prompt: str = "You are a helpful assistant.",
|
||||
tools: Sequence[ToolType] | None = None,
|
||||
max_iterations: int = 80,
|
||||
labor_market: LaborMarket | None = None,
|
||||
name: str = "agent",
|
||||
allow_subagents: bool = False,
|
||||
):
|
||||
"""Initialize the agent.
|
||||
|
||||
Args:
|
||||
provider: The LLM provider to use.
|
||||
system_prompt: The system prompt for the agent.
|
||||
tools: Optional sequence of tools to make available.
|
||||
max_iterations: Maximum number of tool call iterations per request.
|
||||
labor_market: Optional LaborMarket for managing subagents.
|
||||
name: Name of the agent (for identification in subagent hierarchies).
|
||||
allow_subagents: Whether this agent is allowed to register subagents.
|
||||
"""
|
||||
self.provider = provider
|
||||
self.system_prompt = system_prompt
|
||||
self.tools = SimpleToolset(tools)
|
||||
self.max_iterations = max_iterations
|
||||
self.labor_market = labor_market or LaborMarket()
|
||||
self.name = name
|
||||
self.allow_subagents = allow_subagents
|
||||
self._history: list[Message] = []
|
||||
|
||||
@property
|
||||
def history(self) -> list[Message]:
|
||||
"""Get the conversation history.
|
||||
|
||||
Returns:
|
||||
A copy of the conversation history.
|
||||
"""
|
||||
return self._history.copy()
|
||||
|
||||
def clear_history(self) -> None:
|
||||
"""Clear the conversation history."""
|
||||
self._history.clear()
|
||||
|
||||
def add_message(self, message: Message) -> None:
|
||||
"""Add a message to the history.
|
||||
|
||||
Args:
|
||||
message: The message to add.
|
||||
"""
|
||||
self._history.append(message)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
message: str,
|
||||
*,
|
||||
stream: bool = False,
|
||||
) -> str | AsyncIterator[str]:
|
||||
"""Run the agent with a user message.
|
||||
|
||||
This method sends the message to the LLM and handles any tool calls
|
||||
that the model requests. It continues the conversation until the
|
||||
model produces a final response without tool calls.
|
||||
|
||||
Args:
|
||||
message: The user message.
|
||||
stream: Whether to stream the response.
|
||||
|
||||
Returns:
|
||||
If stream=False: The complete response as a string.
|
||||
If stream=True: An async iterator yielding response chunks.
|
||||
|
||||
Example:
|
||||
# Non-streaming
|
||||
>>> response = await agent.run("What is 2 + 2?")
|
||||
>>> print(response)
|
||||
|
||||
# Streaming
|
||||
>>> async for chunk in await agent.run("Tell me a story", stream=True):
|
||||
... print(chunk, end="")
|
||||
"""
|
||||
# Add user message to history
|
||||
self._history.append(Message(role="user", content=message))
|
||||
|
||||
if stream:
|
||||
return self._run_streaming()
|
||||
else:
|
||||
return await self._run_non_streaming()
|
||||
|
||||
async def _run_non_streaming(self) -> str:
|
||||
"""Run the agent in non-streaming mode.
|
||||
|
||||
Returns:
|
||||
The complete response as a string.
|
||||
"""
|
||||
iterations = 0
|
||||
tool_calls: list[ToolCall] = []
|
||||
|
||||
while iterations < self.max_iterations:
|
||||
iterations += 1
|
||||
|
||||
# Generate response
|
||||
stream = await self.provider.generate(
|
||||
system_prompt=self.system_prompt,
|
||||
tools=self.tools.tools,
|
||||
history=self._history,
|
||||
)
|
||||
|
||||
# Collect response parts
|
||||
response_parts: list[ContentPart] = []
|
||||
tool_calls: list[ToolCall] = []
|
||||
|
||||
async for part in stream:
|
||||
if isinstance(part, ToolCall):
|
||||
tool_calls.append(part)
|
||||
elif isinstance(part, ToolCallPart):
|
||||
if tool_calls:
|
||||
tool_calls[-1].merge_in_place(part)
|
||||
elif isinstance(part, ContentPart):
|
||||
response_parts.append(part)
|
||||
|
||||
# Extract text from response
|
||||
response_text = ""
|
||||
for part in response_parts:
|
||||
if isinstance(part, TextPart):
|
||||
response_text += part.text
|
||||
|
||||
# Add assistant message to history
|
||||
self._history.append(
|
||||
Message(
|
||||
role="assistant",
|
||||
content=response_parts,
|
||||
tool_calls=tool_calls if tool_calls else None,
|
||||
)
|
||||
)
|
||||
|
||||
# If no tool calls, we're done
|
||||
if not tool_calls:
|
||||
return response_text
|
||||
|
||||
# Execute tool calls
|
||||
tool_results = await self._execute_tool_calls(tool_calls)
|
||||
|
||||
# Add tool results to history
|
||||
for result in tool_results:
|
||||
self._history.append(
|
||||
Message(
|
||||
role="tool",
|
||||
content=result.output,
|
||||
tool_call_id=result.tool_call_id,
|
||||
)
|
||||
)
|
||||
|
||||
# Max iterations reached
|
||||
last_tools_msg = ""
|
||||
try:
|
||||
if tool_calls:
|
||||
tool_names = [tc.function.name for tc in tool_calls if hasattr(tc, "function")]
|
||||
if tool_names:
|
||||
last_tools_msg = f" Last tools called: {', '.join(tool_names)}."
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return (
|
||||
f"Maximum tool call iterations reached ({self.max_iterations})."
|
||||
f"{last_tools_msg}"
|
||||
f" Consider increasing max_iterations or breaking the task into smaller steps."
|
||||
)
|
||||
|
||||
async def _run_streaming(self) -> AsyncIterator[str]:
|
||||
"""Run the agent in streaming mode.
|
||||
|
||||
Yields:
|
||||
Response text chunks.
|
||||
"""
|
||||
iterations = 0
|
||||
tool_calls: list[ToolCall] = []
|
||||
|
||||
while iterations < self.max_iterations:
|
||||
iterations += 1
|
||||
|
||||
# Generate response
|
||||
stream = await self.provider.generate(
|
||||
system_prompt=self.system_prompt,
|
||||
tools=self.tools.tools,
|
||||
history=self._history,
|
||||
)
|
||||
|
||||
# Collect response parts and yield text
|
||||
response_parts: list[ContentPart] = []
|
||||
tool_calls: list[ToolCall] = []
|
||||
|
||||
async for part in stream:
|
||||
if isinstance(part, ToolCall):
|
||||
tool_calls.append(part)
|
||||
elif isinstance(part, ToolCallPart):
|
||||
if tool_calls:
|
||||
tool_calls[-1].merge_in_place(part)
|
||||
elif isinstance(part, ContentPart):
|
||||
response_parts.append(part)
|
||||
if isinstance(part, TextPart):
|
||||
yield part.text
|
||||
|
||||
# Add assistant message to history
|
||||
self._history.append(
|
||||
Message(
|
||||
role="assistant",
|
||||
content=response_parts,
|
||||
tool_calls=tool_calls if tool_calls else None,
|
||||
)
|
||||
)
|
||||
|
||||
# If no tool calls, we're done
|
||||
if not tool_calls:
|
||||
return
|
||||
|
||||
# Execute tool calls
|
||||
tool_results = await self._execute_tool_calls(tool_calls)
|
||||
|
||||
# Add tool results to history
|
||||
for result in tool_results:
|
||||
self._history.append(
|
||||
Message(
|
||||
role="tool",
|
||||
content=result.output,
|
||||
tool_call_id=result.tool_call_id,
|
||||
)
|
||||
)
|
||||
|
||||
# Max iterations reached
|
||||
last_tools_msg = ""
|
||||
try:
|
||||
if tool_calls:
|
||||
tool_names = [tc.function.name for tc in tool_calls if hasattr(tc, "function")]
|
||||
if tool_names:
|
||||
last_tools_msg = f" Last tools called: {', '.join(tool_names)}."
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
yield (
|
||||
f"Maximum tool call iterations reached ({self.max_iterations})."
|
||||
f"{last_tools_msg}"
|
||||
f" Consider increasing max_iterations or breaking the task into smaller steps."
|
||||
)
|
||||
|
||||
async def _execute_tool_calls(
|
||||
self,
|
||||
tool_calls: list[ToolCall],
|
||||
) -> list[_ToolResult]:
|
||||
"""Execute a list of tool calls.
|
||||
|
||||
Args:
|
||||
tool_calls: The tool calls to execute.
|
||||
|
||||
Returns:
|
||||
List of tool results.
|
||||
"""
|
||||
results: list[_ToolResult] = []
|
||||
|
||||
# Execute all tool calls concurrently
|
||||
futures = [self.tools.handle(tc) for tc in tool_calls]
|
||||
|
||||
for tc, future in zip(tool_calls, futures):
|
||||
try:
|
||||
if asyncio.isfuture(future):
|
||||
result = await future
|
||||
else:
|
||||
result = future
|
||||
|
||||
results.append(
|
||||
_ToolResult(
|
||||
tool_call_id=tc.id,
|
||||
output=result.output if isinstance(result, ToolResult) else str(result),
|
||||
is_error=result.is_error if isinstance(result, ToolResult) else False,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
results.append(
|
||||
_ToolResult(
|
||||
tool_call_id=tc.id,
|
||||
output=str(e),
|
||||
is_error=True,
|
||||
)
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
async def generate(
|
||||
self,
|
||||
message: str,
|
||||
) -> Message:
|
||||
"""Generate a single response without tool calling loop.
|
||||
|
||||
This method sends a message to the LLM and returns the response
|
||||
without executing any tool calls. This is useful when you want
|
||||
to handle tool calls manually.
|
||||
|
||||
Args:
|
||||
message: The user message.
|
||||
|
||||
Returns:
|
||||
The assistant's response message.
|
||||
"""
|
||||
# Add user message to history
|
||||
self._history.append(Message(role="user", content=message))
|
||||
|
||||
# Generate response
|
||||
stream = await self.provider.generate(
|
||||
system_prompt=self.system_prompt,
|
||||
tools=self.tools.tools,
|
||||
history=self._history,
|
||||
)
|
||||
|
||||
# Collect response parts
|
||||
response_parts: list[ContentPart] = []
|
||||
tool_calls: list[ToolCall] = []
|
||||
|
||||
async for part in stream:
|
||||
if isinstance(part, ToolCall):
|
||||
tool_calls.append(part)
|
||||
elif isinstance(part, ToolCallPart):
|
||||
if tool_calls:
|
||||
tool_calls[-1].merge_in_place(part)
|
||||
elif isinstance(part, ContentPart):
|
||||
response_parts.append(part)
|
||||
|
||||
# Create response message
|
||||
response = Message(
|
||||
role="assistant",
|
||||
content=response_parts,
|
||||
tool_calls=tool_calls if tool_calls else None,
|
||||
)
|
||||
|
||||
# Add to history
|
||||
self._history.append(response)
|
||||
|
||||
return response
|
||||
|
||||
def add_subagent(
|
||||
self,
|
||||
name: str,
|
||||
agent: Agent,
|
||||
description: str,
|
||||
dynamic: bool = False,
|
||||
) -> None:
|
||||
"""Add a subagent to this agent's labor market.
|
||||
|
||||
Args:
|
||||
name: Unique name for the subagent
|
||||
agent: The Agent instance to add
|
||||
description: Description of what the subagent does
|
||||
dynamic: If True, add as dynamic subagent; otherwise fixed
|
||||
"""
|
||||
if not self.allow_subagents:
|
||||
raise RuntimeError("Subagent delegation is disabled for this agent runtime.")
|
||||
|
||||
if dynamic:
|
||||
self.labor_market.add_dynamic_subagent(name, agent)
|
||||
else:
|
||||
self.labor_market.add_fixed_subagent(name, agent, description)
|
||||
|
||||
def get_subagent(self, name: str) -> Agent | None:
|
||||
"""Get a subagent by name.
|
||||
|
||||
Args:
|
||||
name: Name of the subagent
|
||||
|
||||
Returns:
|
||||
The subagent Agent if found, None otherwise
|
||||
"""
|
||||
return self.labor_market.get_subagent(name)
|
||||
|
||||
def create_subagent_copy(self) -> Agent:
|
||||
"""Create a copy of this agent for use as a subagent.
|
||||
|
||||
The copy will have:
|
||||
- Same provider
|
||||
- Independent history (empty)
|
||||
- Empty labor market (subagents cannot have their own subagents by default)
|
||||
|
||||
Returns:
|
||||
A new Agent instance configured as a subagent
|
||||
"""
|
||||
return Agent(
|
||||
provider=self.provider,
|
||||
system_prompt=self.system_prompt,
|
||||
tools=list(self.tools._tools.values()),
|
||||
max_iterations=self.max_iterations,
|
||||
labor_market=LaborMarket(), # Empty labor market
|
||||
allow_subagents=False,
|
||||
name=f"{self.name}_sub",
|
||||
)
|
||||
|
||||
|
||||
class _ToolResult:
|
||||
"""Internal class for tool execution results."""
|
||||
|
||||
def __init__(self, tool_call_id: str, output: str, is_error: bool):
|
||||
self.tool_call_id = tool_call_id
|
||||
self.output = output
|
||||
self.is_error = is_error
|
||||
201
agentlite/src/agentlite/config.py
Normal file
201
agentlite/src/agentlite/config.py
Normal file
@@ -0,0 +1,201 @@
|
||||
"""Configuration models for AgentLite.
|
||||
|
||||
This module provides Pydantic-based configuration models for providers,
|
||||
models, and agent settings.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Literal, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field, SecretStr, model_validator
|
||||
|
||||
|
||||
ProviderType = Literal["openai", "anthropic", "google", "custom"]
|
||||
|
||||
|
||||
ModelCapability = Literal[
|
||||
"streaming",
|
||||
"tool_calling",
|
||||
"vision",
|
||||
"json_mode",
|
||||
"function_calling",
|
||||
]
|
||||
|
||||
|
||||
class ProviderConfig(BaseModel):
|
||||
"""Configuration for an LLM provider.
|
||||
|
||||
Attributes:
|
||||
type: The provider type (openai, anthropic, etc.)
|
||||
base_url: The API base URL
|
||||
api_key: The API key (stored securely)
|
||||
headers: Additional headers to include in requests
|
||||
timeout: Request timeout in seconds
|
||||
|
||||
Example:
|
||||
>>> config = ProviderConfig(
|
||||
... type="openai",
|
||||
... base_url="https://api.openai.com/v1",
|
||||
... api_key="sk-...",
|
||||
... )
|
||||
"""
|
||||
|
||||
type: ProviderType = "openai"
|
||||
base_url: str = "https://api.openai.com/v1"
|
||||
api_key: SecretStr
|
||||
headers: dict[str, str] = Field(default_factory=dict)
|
||||
timeout: float = 60.0
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_base_url(self) -> "ProviderConfig":
|
||||
"""Validate that base_url is a valid URL."""
|
||||
if not self.base_url.startswith(("http://", "https://")):
|
||||
raise ValueError("base_url must start with http:// or https://")
|
||||
return self
|
||||
|
||||
|
||||
class ModelConfig(BaseModel):
|
||||
"""Configuration for an LLM model.
|
||||
|
||||
Attributes:
|
||||
provider: Name of the provider to use
|
||||
model: The model name/ID
|
||||
max_tokens: Maximum tokens to generate
|
||||
temperature: Sampling temperature
|
||||
top_p: Nucleus sampling parameter
|
||||
capabilities: Set of model capabilities
|
||||
|
||||
Example:
|
||||
>>> config = ModelConfig(
|
||||
... provider="openai",
|
||||
... model="gpt-4",
|
||||
... temperature=0.7,
|
||||
... )
|
||||
"""
|
||||
|
||||
provider: str
|
||||
model: str
|
||||
max_tokens: Optional[int] = None
|
||||
temperature: Optional[float] = Field(default=None, ge=0.0, le=2.0)
|
||||
top_p: Optional[float] = Field(default=None, ge=0.0, le=1.0)
|
||||
capabilities: set[ModelCapability] = Field(default_factory=set)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_provider(self) -> "ModelConfig":
|
||||
"""Validate provider is not empty."""
|
||||
if not self.provider:
|
||||
raise ValueError("provider must not be empty")
|
||||
return self
|
||||
|
||||
|
||||
class ToolConfig(BaseModel):
|
||||
"""Configuration for tool usage.
|
||||
|
||||
Attributes:
|
||||
max_iterations: Maximum number of tool call iterations
|
||||
timeout: Timeout for tool execution in seconds
|
||||
"""
|
||||
|
||||
max_iterations: int = Field(default=80, ge=1, le=100)
|
||||
timeout: float = 60.0
|
||||
|
||||
|
||||
class AgentConfig(BaseModel):
|
||||
"""Complete configuration for an Agent.
|
||||
|
||||
This combines provider, model, and behavior settings into a single
|
||||
configuration object.
|
||||
|
||||
Attributes:
|
||||
name: Optional name for the agent
|
||||
system_prompt: The system prompt to use
|
||||
providers: Dictionary of provider configurations
|
||||
models: Dictionary of model configurations
|
||||
default_model: Name of the default model to use
|
||||
tools: Tool configuration
|
||||
max_history: Maximum number of messages to keep in history
|
||||
|
||||
Example:
|
||||
>>> config = AgentConfig(
|
||||
... name="my_agent",
|
||||
... system_prompt="You are a helpful assistant.",
|
||||
... providers={
|
||||
... "openai": ProviderConfig(
|
||||
... type="openai",
|
||||
... api_key="sk-...",
|
||||
... )
|
||||
... },
|
||||
... models={
|
||||
... "gpt4": ModelConfig(
|
||||
... provider="openai",
|
||||
... model="gpt-4",
|
||||
... )
|
||||
... },
|
||||
... default_model="gpt4",
|
||||
... )
|
||||
"""
|
||||
|
||||
name: str = "agent"
|
||||
system_prompt: str = "You are a helpful assistant."
|
||||
providers: dict[str, ProviderConfig] = Field(default_factory=dict)
|
||||
models: dict[str, ModelConfig] = Field(default_factory=dict)
|
||||
default_model: str = "default"
|
||||
tools: ToolConfig = Field(default_factory=ToolConfig)
|
||||
max_history: int = Field(default=100, ge=1)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_default_model(self) -> "AgentConfig":
|
||||
"""Validate that default_model exists in models."""
|
||||
if self.default_model and self.default_model not in self.models:
|
||||
raise ValueError(f"default_model '{self.default_model}' not found in models")
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_model_providers(self) -> "AgentConfig":
|
||||
"""Validate that all model providers exist."""
|
||||
for model_name, model_config in self.models.items():
|
||||
if model_config.provider not in self.providers:
|
||||
raise ValueError(
|
||||
f"Model '{model_name}' references unknown provider '{model_config.provider}'"
|
||||
)
|
||||
return self
|
||||
|
||||
def get_provider_config(self, model_name: Optional[str] = None) -> ProviderConfig:
|
||||
"""Get the provider config for a model.
|
||||
|
||||
Args:
|
||||
model_name: Name of the model. If None, uses default_model.
|
||||
|
||||
Returns:
|
||||
The provider configuration for the model.
|
||||
|
||||
Raises:
|
||||
ValueError: If the model or provider is not found.
|
||||
"""
|
||||
model_name = model_name or self.default_model
|
||||
if model_name not in self.models:
|
||||
raise ValueError(f"Model '{model_name}' not found")
|
||||
|
||||
model_config = self.models[model_name]
|
||||
if model_config.provider not in self.providers:
|
||||
raise ValueError(f"Provider '{model_config.provider}' not found")
|
||||
|
||||
return self.providers[model_config.provider]
|
||||
|
||||
def get_model_config(self, model_name: Optional[str] = None) -> ModelConfig:
|
||||
"""Get the configuration for a model.
|
||||
|
||||
Args:
|
||||
model_name: Name of the model. If None, uses default_model.
|
||||
|
||||
Returns:
|
||||
The model configuration.
|
||||
|
||||
Raises:
|
||||
ValueError: If the model is not found.
|
||||
"""
|
||||
model_name = model_name or self.default_model
|
||||
if model_name not in self.models:
|
||||
raise ValueError(f"Model '{model_name}' not found")
|
||||
return self.models[model_name]
|
||||
182
agentlite/src/agentlite/labor_market.py
Normal file
182
agentlite/src/agentlite/labor_market.py
Normal file
@@ -0,0 +1,182 @@
|
||||
"""Labor Market for managing subagents in AgentLite.
|
||||
|
||||
This module provides the LaborMarket class for managing subagents
|
||||
in a hierarchical agent architecture, similar to kimi-cli's approach.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from agentlite.agent import Agent
|
||||
|
||||
|
||||
class LaborMarket:
|
||||
"""Manages subagents for a parent agent.
|
||||
|
||||
The LaborMarket acts as a registry for subagents, allowing a parent
|
||||
agent to delegate tasks to its children. It supports both fixed
|
||||
(pre-defined) and dynamic (runtime-created) subagents.
|
||||
|
||||
This design follows kimi-cli's architecture where:
|
||||
- Fixed subagents are defined in configuration and loaded at startup
|
||||
- Dynamic subagents can be created at runtime using CreateSubagent tool
|
||||
- Subagents can be retrieved by name for task delegation
|
||||
|
||||
Example:
|
||||
>>> market = LaborMarket()
|
||||
>>> market.add_fixed_subagent("coder", coder_agent, "Writes code")
|
||||
>>> market.add_dynamic_subagent("temp", temp_agent)
|
||||
>>> agent = market.get_subagent("coder")
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize an empty labor market."""
|
||||
self._fixed_subagents: dict[str, Agent] = {}
|
||||
self._fixed_subagent_descs: dict[str, str] = {}
|
||||
self._dynamic_subagents: dict[str, Agent] = {}
|
||||
|
||||
@property
|
||||
def subagents(self) -> dict[str, Agent]:
|
||||
"""Get all subagents (both fixed and dynamic).
|
||||
|
||||
Returns:
|
||||
Dictionary mapping subagent names to Agent instances.
|
||||
"""
|
||||
return {**self._fixed_subagents, **self._dynamic_subagents}
|
||||
|
||||
@property
|
||||
def fixed_subagents(self) -> dict[str, Agent]:
|
||||
"""Get fixed (pre-defined) subagents.
|
||||
|
||||
Returns:
|
||||
Dictionary of fixed subagents.
|
||||
"""
|
||||
return self._fixed_subagents.copy()
|
||||
|
||||
@property
|
||||
def dynamic_subagents(self) -> dict[str, Agent]:
|
||||
"""Get dynamic (runtime-created) subagents.
|
||||
|
||||
Returns:
|
||||
Dictionary of dynamic subagents.
|
||||
"""
|
||||
return self._dynamic_subagents.copy()
|
||||
|
||||
@property
|
||||
def subagent_descriptions(self) -> dict[str, str]:
|
||||
"""Get descriptions of all subagents.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping subagent names to their descriptions.
|
||||
Only fixed subagents have descriptions.
|
||||
"""
|
||||
return self._fixed_subagent_descs.copy()
|
||||
|
||||
def add_fixed_subagent(self, name: str, agent: Agent, description: str) -> None:
|
||||
"""Add a fixed subagent.
|
||||
|
||||
Fixed subagents are defined in configuration and loaded at startup.
|
||||
They typically have their own LaborMarket (for isolation).
|
||||
|
||||
Args:
|
||||
name: Unique name for the subagent
|
||||
agent: The Agent instance
|
||||
description: Description of what the subagent does
|
||||
|
||||
Raises:
|
||||
ValueError: If a subagent with the same name already exists.
|
||||
"""
|
||||
if name in self.subagents:
|
||||
raise ValueError(f"Subagent '{name}' already exists")
|
||||
|
||||
self._fixed_subagents[name] = agent
|
||||
self._fixed_subagent_descs[name] = description
|
||||
|
||||
def add_dynamic_subagent(self, name: str, agent: Agent) -> None:
|
||||
"""Add a dynamic subagent.
|
||||
|
||||
Dynamic subagents are created at runtime, typically using the
|
||||
CreateSubagent tool. They share the parent's LaborMarket.
|
||||
|
||||
Args:
|
||||
name: Unique name for the subagent
|
||||
agent: The Agent instance
|
||||
|
||||
Raises:
|
||||
ValueError: If a subagent with the same name already exists.
|
||||
"""
|
||||
if name in self.subagents:
|
||||
raise ValueError(f"Subagent '{name}' already exists")
|
||||
|
||||
self._dynamic_subagents[name] = agent
|
||||
|
||||
def get_subagent(self, name: str) -> Optional[Agent]:
|
||||
"""Get a subagent by name.
|
||||
|
||||
Args:
|
||||
name: Name of the subagent
|
||||
|
||||
Returns:
|
||||
The Agent instance if found, None otherwise.
|
||||
"""
|
||||
return self.subagents.get(name)
|
||||
|
||||
def has_subagent(self, name: str) -> bool:
|
||||
"""Check if a subagent exists.
|
||||
|
||||
Args:
|
||||
name: Name of the subagent
|
||||
|
||||
Returns:
|
||||
True if the subagent exists, False otherwise.
|
||||
"""
|
||||
return name in self.subagents
|
||||
|
||||
def remove_subagent(self, name: str) -> bool:
|
||||
"""Remove a subagent.
|
||||
|
||||
Args:
|
||||
name: Name of the subagent to remove
|
||||
|
||||
Returns:
|
||||
True if the subagent was removed, False if it didn't exist.
|
||||
"""
|
||||
if name in self._fixed_subagents:
|
||||
del self._fixed_subagents[name]
|
||||
del self._fixed_subagent_descs[name]
|
||||
return True
|
||||
|
||||
if name in self._dynamic_subagents:
|
||||
del self._dynamic_subagents[name]
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def list_subagents(self) -> list[str]:
|
||||
"""List all subagent names.
|
||||
|
||||
Returns:
|
||||
List of subagent names.
|
||||
"""
|
||||
return list(self.subagents.keys())
|
||||
|
||||
def __contains__(self, name: str) -> bool:
|
||||
"""Check if a subagent exists using 'in' operator."""
|
||||
return self.has_subagent(name)
|
||||
|
||||
def __getitem__(self, name: str) -> Agent:
|
||||
"""Get a subagent using bracket notation."""
|
||||
agent = self.get_subagent(name)
|
||||
if agent is None:
|
||||
raise KeyError(f"Subagent '{name}' not found")
|
||||
return agent
|
||||
|
||||
def __iter__(self):
|
||||
"""Iterate over subagent names."""
|
||||
return iter(self.subagents)
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""Get the number of subagents."""
|
||||
return len(self.subagents)
|
||||
361
agentlite/src/agentlite/llm_client.py
Normal file
361
agentlite/src/agentlite/llm_client.py
Normal file
@@ -0,0 +1,361 @@
|
||||
"""Simple LLM client for direct LLM calls without agent overhead.
|
||||
|
||||
This module provides a simple interface for making direct LLM calls,
|
||||
reusing the agentlite configuration system.
|
||||
|
||||
Example:
|
||||
>>> from agentlite import LLMClient, AgentConfig, ProviderConfig, ModelConfig
|
||||
>>>
|
||||
>>> # Using configuration
|
||||
>>> config = AgentConfig(
|
||||
... providers={"openai": ProviderConfig(api_key="sk-...")},
|
||||
... models={"gpt4": ModelConfig(provider="openai", model="gpt-4")},
|
||||
... default_model="gpt4",
|
||||
... )
|
||||
>>> client = LLMClient(config)
|
||||
>>>
|
||||
>>> # Simple completion
|
||||
>>> response = await client.complete(
|
||||
... system_prompt="You are a helpful assistant.", user_prompt="What is Python?"
|
||||
... )
|
||||
>>> print(response)
|
||||
|
||||
>>> # Streaming
|
||||
>>> async for chunk in client.stream(
|
||||
... system_prompt="You are a helpful assistant.", user_prompt="Tell me a story"
|
||||
... ):
|
||||
... print(chunk, end="")
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import AsyncIterator
|
||||
from typing import Optional
|
||||
|
||||
from agentlite.config import AgentConfig, ModelConfig, ProviderConfig
|
||||
from agentlite.message import Message, TextPart
|
||||
from agentlite.provider import ChatProvider, TokenUsage
|
||||
from agentlite.providers.openai import OpenAIProvider
|
||||
from agentlite.tool import Tool
|
||||
|
||||
|
||||
class LLMResponse:
|
||||
"""Response from an LLM call.
|
||||
|
||||
Attributes:
|
||||
content: The complete response text
|
||||
usage: Token usage statistics
|
||||
model: The model name used
|
||||
"""
|
||||
|
||||
def __init__(self, content: str, usage: TokenUsage | None = None, model: str = ""):
|
||||
self.content = content
|
||||
self.usage = usage
|
||||
self.model = model
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.content
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"LLMResponse(content={self.content[:50]}..., model={self.model})"
|
||||
|
||||
|
||||
class LLMClient:
|
||||
"""Simple client for direct LLM calls.
|
||||
|
||||
This client provides a simple interface for calling LLMs without the
|
||||
overhead of an Agent. It reuses the agentlite configuration system.
|
||||
|
||||
Example:
|
||||
>>> # Using AgentConfig
|
||||
>>> config = AgentConfig(...)
|
||||
>>> client = LLMClient(config)
|
||||
>>>
|
||||
>>> # Using provider directly
|
||||
>>> provider = OpenAIProvider(api_key="sk-...", model="gpt-4")
|
||||
>>> client = LLMClient(provider=provider)
|
||||
>>>
|
||||
>>> # Make a call
|
||||
>>> response = await client.complete(system_prompt="You are helpful.", user_prompt="Hello!")
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config: Optional[AgentConfig] = None,
|
||||
provider: Optional[ChatProvider] = None,
|
||||
model: Optional[str] = None,
|
||||
):
|
||||
"""Initialize the LLM client.
|
||||
|
||||
Args:
|
||||
config: AgentConfig to use for provider/model configuration
|
||||
provider: Direct provider instance (alternative to config)
|
||||
model: Model name to use (when using config)
|
||||
|
||||
Raises:
|
||||
ValueError: If neither config nor provider is provided
|
||||
"""
|
||||
if provider is not None:
|
||||
self._provider = provider
|
||||
self._model_config = None
|
||||
elif config is not None:
|
||||
self._config = config
|
||||
self._model_name = model or config.default_model
|
||||
self._provider = self._create_provider()
|
||||
self._model_config = config.get_model_config(self._model_name)
|
||||
else:
|
||||
raise ValueError("Either config or provider must be provided")
|
||||
|
||||
def _create_provider(self) -> ChatProvider:
|
||||
"""Create a provider instance from config."""
|
||||
if not hasattr(self, "_config"):
|
||||
raise RuntimeError("No config available")
|
||||
|
||||
provider_config = self._config.get_provider_config(self._model_name)
|
||||
model_config = self._config.get_model_config(self._model_name)
|
||||
|
||||
# Create appropriate provider based on type
|
||||
if provider_config.type == "openai":
|
||||
return OpenAIProvider(
|
||||
api_key=provider_config.api_key.get_secret_value(),
|
||||
model=model_config.model,
|
||||
base_url=provider_config.base_url,
|
||||
timeout=provider_config.timeout,
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported provider type: {provider_config.type}")
|
||||
|
||||
async def complete(
|
||||
self,
|
||||
user_prompt: str,
|
||||
system_prompt: str = "You are a helpful assistant.",
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> LLMResponse:
|
||||
"""Make a non-streaming LLM call.
|
||||
|
||||
Args:
|
||||
user_prompt: The user message/prompt
|
||||
system_prompt: The system prompt (default: "You are a helpful assistant.")
|
||||
temperature: Sampling temperature (overrides config if provided)
|
||||
max_tokens: Maximum tokens to generate (overrides config if provided)
|
||||
|
||||
Returns:
|
||||
LLMResponse containing the complete response text and metadata
|
||||
|
||||
Example:
|
||||
>>> response = await client.complete(user_prompt="What is the capital of France?")
|
||||
>>> print(response.content)
|
||||
"The capital of France is Paris."
|
||||
"""
|
||||
# Build messages
|
||||
messages = [Message(role="user", content=user_prompt)]
|
||||
|
||||
# Create a temporary provider with overridden parameters if needed
|
||||
provider = self._provider
|
||||
if temperature is not None or max_tokens is not None:
|
||||
provider = self._create_provider_with_params(temperature, max_tokens)
|
||||
|
||||
# Generate response
|
||||
stream = await provider.generate(
|
||||
system_prompt=system_prompt,
|
||||
tools=[], # No tools for simple LLM calls
|
||||
history=messages,
|
||||
)
|
||||
|
||||
# Collect response
|
||||
content_parts = []
|
||||
usage = None
|
||||
|
||||
async for part in stream:
|
||||
if isinstance(part, TextPart):
|
||||
content_parts.append(part.text)
|
||||
# Try to get usage from stream
|
||||
try:
|
||||
if usage is None and hasattr(stream, "usage") and stream.usage:
|
||||
usage = stream.usage
|
||||
except:
|
||||
pass
|
||||
|
||||
content = "".join(content_parts)
|
||||
model_name = getattr(
|
||||
provider, "model_name", self._model_config.model if self._model_config else "unknown"
|
||||
)
|
||||
|
||||
return LLMResponse(
|
||||
content=content,
|
||||
usage=usage,
|
||||
model=model_name,
|
||||
)
|
||||
|
||||
async def stream(
|
||||
self,
|
||||
user_prompt: str,
|
||||
system_prompt: str = "You are a helpful assistant.",
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> AsyncIterator[str]:
|
||||
"""Make a streaming LLM call.
|
||||
|
||||
Args:
|
||||
user_prompt: The user message/prompt
|
||||
system_prompt: The system prompt (default: "You are a helpful assistant.")
|
||||
temperature: Sampling temperature (overrides config if provided)
|
||||
max_tokens: Maximum tokens to generate (overrides config if provided)
|
||||
|
||||
Yields:
|
||||
Response text chunks as they arrive
|
||||
|
||||
Example:
|
||||
>>> async for chunk in client.stream(user_prompt="Write a poem about AI"):
|
||||
... print(chunk, end="")
|
||||
"""
|
||||
# Build messages
|
||||
messages = [Message(role="user", content=user_prompt)]
|
||||
|
||||
# Create a temporary provider with overridden parameters if needed
|
||||
provider = self._provider
|
||||
if temperature is not None or max_tokens is not None:
|
||||
provider = self._create_provider_with_params(temperature, max_tokens)
|
||||
|
||||
# Generate response
|
||||
stream = await provider.generate(
|
||||
system_prompt=system_prompt,
|
||||
tools=[], # No tools for simple LLM calls
|
||||
history=messages,
|
||||
)
|
||||
|
||||
# Yield chunks
|
||||
async for part in stream:
|
||||
if isinstance(part, TextPart):
|
||||
yield part.text
|
||||
|
||||
def _create_provider_with_params(
|
||||
self,
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> ChatProvider:
|
||||
"""Create a provider with overridden parameters."""
|
||||
if not hasattr(self, "_config"):
|
||||
# Can't override params without config
|
||||
return self._provider
|
||||
|
||||
provider_config = self._config.get_provider_config(self._model_name)
|
||||
model_config = self._config.get_model_config(self._model_name)
|
||||
|
||||
# Override parameters
|
||||
temp = temperature if temperature is not None else model_config.temperature
|
||||
max_tok = max_tokens if max_tokens is not None else model_config.max_tokens
|
||||
|
||||
if provider_config.type == "openai":
|
||||
return OpenAIProvider(
|
||||
api_key=provider_config.api_key.get_secret_value(),
|
||||
model=model_config.model,
|
||||
base_url=provider_config.base_url,
|
||||
timeout=provider_config.timeout,
|
||||
temperature=temp,
|
||||
max_tokens=max_tok,
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unsupported provider type: {provider_config.type}")
|
||||
|
||||
|
||||
# Convenience functions for simple use cases
|
||||
|
||||
|
||||
async def llm_complete(
|
||||
user_prompt: str,
|
||||
system_prompt: str = "You are a helpful assistant.",
|
||||
api_key: Optional[str] = None,
|
||||
model: str = "gpt-4",
|
||||
base_url: str = "https://api.openai.com/v1",
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> str:
|
||||
"""Simple function for one-off LLM completions.
|
||||
|
||||
This is a convenience function for simple use cases where you don't
|
||||
need to reuse a client instance.
|
||||
|
||||
Args:
|
||||
user_prompt: The user message/prompt
|
||||
system_prompt: The system prompt
|
||||
api_key: API key (if not provided, must be set in env)
|
||||
model: Model name (default: gpt-4)
|
||||
base_url: API base URL
|
||||
temperature: Sampling temperature
|
||||
max_tokens: Maximum tokens to generate
|
||||
|
||||
Returns:
|
||||
The response text
|
||||
|
||||
Example:
|
||||
>>> response = await llm_complete(
|
||||
... user_prompt="What is 2+2?",
|
||||
... api_key="sk-...",
|
||||
... model="gpt-4",
|
||||
... )
|
||||
>>> print(response)
|
||||
"2+2 equals 4."
|
||||
"""
|
||||
provider = OpenAIProvider(
|
||||
api_key=api_key,
|
||||
model=model,
|
||||
base_url=base_url,
|
||||
)
|
||||
client = LLMClient(provider=provider)
|
||||
response = await client.complete(
|
||||
user_prompt=user_prompt,
|
||||
system_prompt=system_prompt,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
)
|
||||
return response.content
|
||||
|
||||
|
||||
async def llm_stream(
|
||||
user_prompt: str,
|
||||
system_prompt: str = "You are a helpful assistant.",
|
||||
api_key: Optional[str] = None,
|
||||
model: str = "gpt-4",
|
||||
base_url: str = "https://api.openai.com/v1",
|
||||
temperature: Optional[float] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> AsyncIterator[str]:
|
||||
"""Simple function for one-off streaming LLM completions.
|
||||
|
||||
This is a convenience function for simple use cases where you don't
|
||||
need to reuse a client instance.
|
||||
|
||||
Args:
|
||||
user_prompt: The user message/prompt
|
||||
system_prompt: The system prompt
|
||||
api_key: API key (if not provided, must be set in env)
|
||||
model: Model name (default: gpt-4)
|
||||
base_url: API base URL
|
||||
temperature: Sampling temperature
|
||||
max_tokens: Maximum tokens to generate
|
||||
|
||||
Yields:
|
||||
Response text chunks
|
||||
|
||||
Example:
|
||||
>>> async for chunk in llm_stream(
|
||||
... user_prompt="Write a haiku",
|
||||
... api_key="sk-...",
|
||||
... ):
|
||||
... print(chunk, end="")
|
||||
"""
|
||||
provider = OpenAIProvider(
|
||||
api_key=api_key,
|
||||
model=model,
|
||||
base_url=base_url,
|
||||
)
|
||||
client = LLMClient(provider=provider)
|
||||
async for chunk in client.stream(
|
||||
user_prompt=user_prompt,
|
||||
system_prompt=system_prompt,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
):
|
||||
yield chunk
|
||||
214
agentlite/src/agentlite/mcp.py
Normal file
214
agentlite/src/agentlite/mcp.py
Normal file
@@ -0,0 +1,214 @@
|
||||
"""MCP (Model Context Protocol) integration for AgentLite.
|
||||
|
||||
This module provides integration with MCP servers, allowing agents to use
|
||||
tools from external MCP-compatible servers.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from agentlite.message import TextPart
|
||||
from agentlite.tool import CallableTool, ToolOk, ToolResult, ToolError
|
||||
|
||||
if TYPE_CHECKING:
|
||||
pass
|
||||
|
||||
|
||||
class MCPClient:
|
||||
"""Client for connecting to MCP servers.
|
||||
|
||||
This client allows you to connect to MCP servers and load their tools
|
||||
into AgentLite agents.
|
||||
|
||||
Example:
|
||||
>>> client = MCPClient()
|
||||
>>> await client.connect_stdio(
|
||||
... "npx", ["-y", "@modelcontextprotocol/server-filesystem", "/tmp"]
|
||||
... )
|
||||
>>> tools = await client.load_tools()
|
||||
>>> agent = Agent(provider=provider, tools=tools)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the MCP client."""
|
||||
self._client: Any | None = None
|
||||
self._connected = False
|
||||
|
||||
def _check_fastmcp(self) -> None:
|
||||
"""Check if fastmcp is installed."""
|
||||
try:
|
||||
import fastmcp # noqa: F401
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"MCP support requires 'fastmcp' package. Install with: pip install agentlite[mcp]"
|
||||
) from e
|
||||
|
||||
async def connect_stdio(
|
||||
self,
|
||||
command: str,
|
||||
args: list[str] | None = None,
|
||||
env: dict[str, str] | None = None,
|
||||
) -> None:
|
||||
"""Connect to an MCP server via stdio.
|
||||
|
||||
Args:
|
||||
command: The command to run.
|
||||
args: Optional arguments for the command.
|
||||
env: Optional environment variables.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If already connected.
|
||||
ConnectionError: If the connection fails.
|
||||
"""
|
||||
if self._connected:
|
||||
raise RuntimeError("Already connected to an MCP server")
|
||||
|
||||
try:
|
||||
from fastmcp import Client
|
||||
from fastmcp.client.transports import PythonStdioTransport
|
||||
|
||||
transport = PythonStdioTransport(
|
||||
command_or_script=command,
|
||||
args=args or [],
|
||||
env=env,
|
||||
)
|
||||
self._client = Client(transport)
|
||||
self._connected = True
|
||||
except Exception as e:
|
||||
raise ConnectionError(f"Failed to connect to MCP server: {e}") from e
|
||||
|
||||
async def connect_sse(
|
||||
self,
|
||||
url: str,
|
||||
headers: dict[str, str] | None = None,
|
||||
) -> None:
|
||||
"""Connect to an MCP server via Server-Sent Events (SSE).
|
||||
|
||||
Args:
|
||||
url: The SSE endpoint URL.
|
||||
headers: Optional headers to include in requests.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If already connected.
|
||||
ConnectionError: If the connection fails.
|
||||
"""
|
||||
if self._connected:
|
||||
raise RuntimeError("Already connected to an MCP server")
|
||||
|
||||
try:
|
||||
from fastmcp import Client
|
||||
from fastmcp.client.transports import SSETransport
|
||||
|
||||
transport = SSETransport(url=url, headers=headers)
|
||||
self._client = Client(transport)
|
||||
self._connected = True
|
||||
except Exception as e:
|
||||
raise ConnectionError(f"Failed to connect to MCP server: {e}") from e
|
||||
|
||||
async def load_tools(self) -> list[CallableTool]:
|
||||
"""Load tools from the connected MCP server.
|
||||
|
||||
Returns:
|
||||
A list of CallableTool instances wrapping the MCP tools.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If not connected to an MCP server.
|
||||
"""
|
||||
if not self._connected or self._client is None:
|
||||
raise RuntimeError("Not connected to an MCP server")
|
||||
|
||||
tools: list[CallableTool] = []
|
||||
|
||||
try:
|
||||
async with self._client as client:
|
||||
mcp_tools = await client.list_tools()
|
||||
|
||||
for mcp_tool in mcp_tools:
|
||||
tool = _MCPTool(
|
||||
client=self._client,
|
||||
name=mcp_tool.name,
|
||||
description=mcp_tool.description or "No description provided",
|
||||
parameters=mcp_tool.inputSchema,
|
||||
)
|
||||
tools.append(tool)
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to load MCP tools: {e}") from e
|
||||
|
||||
return tools
|
||||
|
||||
async def close(self) -> None:
|
||||
"""Close the connection to the MCP server."""
|
||||
if self._client is not None:
|
||||
try:
|
||||
await self._client.close()
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
self._client = None
|
||||
self._connected = False
|
||||
|
||||
async def __aenter__(self) -> MCPClient:
|
||||
"""Async context manager entry."""
|
||||
return self
|
||||
|
||||
async def __aexit__(self, *args: Any) -> None:
|
||||
"""Async context manager exit."""
|
||||
await self.close()
|
||||
|
||||
|
||||
class _MCPTool(CallableTool):
|
||||
"""Wrapper for MCP tools."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
client: Any,
|
||||
name: str,
|
||||
description: str,
|
||||
parameters: dict[str, Any],
|
||||
):
|
||||
"""Initialize the MCP tool wrapper.
|
||||
|
||||
Args:
|
||||
client: The MCP client.
|
||||
name: The tool name.
|
||||
description: The tool description.
|
||||
parameters: The JSON schema for tool parameters.
|
||||
"""
|
||||
self._client = client
|
||||
super().__init__(
|
||||
name=name,
|
||||
description=description,
|
||||
parameters=parameters,
|
||||
)
|
||||
|
||||
async def __call__(self, **kwargs: Any) -> ToolResult:
|
||||
"""Execute the MCP tool.
|
||||
|
||||
Args:
|
||||
**kwargs: The tool arguments.
|
||||
|
||||
Returns:
|
||||
The tool result.
|
||||
"""
|
||||
try:
|
||||
async with self._client as client:
|
||||
result = await client.call_tool(self.name, kwargs)
|
||||
|
||||
# Convert MCP result to ToolResult
|
||||
content_parts = []
|
||||
for content in result.content:
|
||||
if hasattr(content, "text"):
|
||||
content_parts.append(content.text)
|
||||
else:
|
||||
content_parts.append(str(content))
|
||||
|
||||
output = "\n".join(content_parts)
|
||||
|
||||
if result.isError:
|
||||
return ToolError(message=output or "Tool execution failed")
|
||||
|
||||
return ToolOk(output=output)
|
||||
except Exception as e:
|
||||
return ToolError(message=f"MCP tool execution failed: {e}")
|
||||
292
agentlite/src/agentlite/message.py
Normal file
292
agentlite/src/agentlite/message.py
Normal file
@@ -0,0 +1,292 @@
|
||||
"""Core message types for AgentLite.
|
||||
|
||||
This module defines the message and content part types used throughout
|
||||
AgentLite for communication with LLM providers.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC
|
||||
from typing import Any, ClassVar, Literal, Optional, Union, cast
|
||||
|
||||
from pydantic import BaseModel, GetCoreSchemaHandler, field_validator
|
||||
from pydantic_core import core_schema
|
||||
|
||||
|
||||
Role = Literal["system", "user", "assistant", "tool"]
|
||||
|
||||
|
||||
class MergeableMixin:
|
||||
"""Mixin for content parts that can be merged during streaming."""
|
||||
|
||||
def merge_in_place(self, other: Any) -> bool:
|
||||
"""Merge another part into this one.
|
||||
|
||||
Args:
|
||||
other: The part to merge into this one.
|
||||
|
||||
Returns:
|
||||
True if the merge was successful, False otherwise.
|
||||
"""
|
||||
return False
|
||||
|
||||
|
||||
class ContentPart(BaseModel, ABC, MergeableMixin):
|
||||
"""Base class for message content parts.
|
||||
|
||||
ContentPart uses a registry pattern to allow polymorphic validation
|
||||
of content part subclasses based on the 'type' field.
|
||||
|
||||
Example:
|
||||
>>> text = TextPart(text="Hello")
|
||||
>>> print(text.model_dump())
|
||||
{'type': 'text', 'text': 'Hello'}
|
||||
"""
|
||||
|
||||
__content_part_registry: ClassVar[dict[str, type["ContentPart"]]] = {}
|
||||
|
||||
type: str
|
||||
|
||||
def __init_subclass__(cls, **kwargs: Any) -> None:
|
||||
super().__init_subclass__(**kwargs)
|
||||
|
||||
type_value = getattr(cls, "type", None)
|
||||
if type_value is None or not isinstance(type_value, str):
|
||||
raise ValueError(
|
||||
f"ContentPart subclass {cls.__name__} must have a 'type' field of type str"
|
||||
)
|
||||
|
||||
cls.__content_part_registry[type_value] = cls
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
cls, source_type: Any, handler: GetCoreSchemaHandler
|
||||
) -> core_schema.CoreSchema:
|
||||
"""Custom schema for polymorphic ContentPart validation."""
|
||||
if cls.__name__ == "ContentPart":
|
||||
|
||||
def validate_content_part(value: Any) -> Any:
|
||||
"""Validate a value as a ContentPart subclass."""
|
||||
# Already an instance
|
||||
if hasattr(value, "__class__") and issubclass(value.__class__, cls):
|
||||
return value
|
||||
|
||||
# Dict with type field - dispatch to subclass
|
||||
if isinstance(value, dict) and "type" in value:
|
||||
type_value = cast(dict[str, Any], value).get("type")
|
||||
if not isinstance(type_value, str):
|
||||
raise ValueError(f"Cannot validate {value} as ContentPart")
|
||||
target_class = cls.__content_part_registry.get(type_value)
|
||||
if target_class is None:
|
||||
raise ValueError(f"Unknown content part type: {type_value}")
|
||||
return target_class.model_validate(value)
|
||||
|
||||
raise ValueError(f"Cannot validate {value} as ContentPart")
|
||||
|
||||
return core_schema.no_info_plain_validator_function(validate_content_part)
|
||||
|
||||
# For subclasses, use default schema
|
||||
return handler(source_type)
|
||||
|
||||
|
||||
class TextPart(ContentPart):
|
||||
"""Text content part.
|
||||
|
||||
Attributes:
|
||||
text: The text content.
|
||||
|
||||
Example:
|
||||
>>> part = TextPart(text="Hello, world!")
|
||||
>>> part.model_dump()
|
||||
{'type': 'text', 'text': 'Hello, world!'}
|
||||
"""
|
||||
|
||||
type: str = "text"
|
||||
text: str
|
||||
|
||||
def merge_in_place(self, other: Any) -> bool:
|
||||
"""Merge another TextPart into this one."""
|
||||
if not isinstance(other, TextPart):
|
||||
return False
|
||||
self.text += other.text
|
||||
return True
|
||||
|
||||
|
||||
class ImageURLPart(ContentPart):
|
||||
"""Image URL content part.
|
||||
|
||||
Attributes:
|
||||
image_url: The image URL configuration.
|
||||
|
||||
Example:
|
||||
>>> part = ImageURLPart(
|
||||
... image_url=ImageURLPart.ImageURL(url="https://example.com/image.png")
|
||||
... )
|
||||
"""
|
||||
|
||||
class ImageURL(BaseModel):
|
||||
"""Image URL configuration."""
|
||||
|
||||
url: str
|
||||
"""The URL of the image. Can be a data URI like 'data:image/png;base64,...'."""
|
||||
detail: Optional[str] = None
|
||||
"""The detail level: 'low', 'high', or 'auto'."""
|
||||
|
||||
type: str = "image_url"
|
||||
image_url: ImageURL
|
||||
|
||||
|
||||
class AudioURLPart(ContentPart):
|
||||
"""Audio URL content part.
|
||||
|
||||
Attributes:
|
||||
audio_url: The audio URL configuration.
|
||||
"""
|
||||
|
||||
class AudioURL(BaseModel):
|
||||
"""Audio URL configuration."""
|
||||
|
||||
url: str
|
||||
"""The URL of the audio. Can be a data URI like 'data:audio/mp3;base64,...'."""
|
||||
|
||||
type: str = "audio_url"
|
||||
audio_url: AudioURL
|
||||
|
||||
|
||||
class ToolCall(BaseModel, MergeableMixin):
|
||||
"""A tool call requested by the assistant.
|
||||
|
||||
Attributes:
|
||||
id: Unique identifier for the tool call.
|
||||
function: The function to call.
|
||||
|
||||
Example:
|
||||
>>> call = ToolCall(
|
||||
... id="call_123",
|
||||
... function=ToolCall.FunctionBody(name="add", arguments='{"a": 1, "b": 2}'),
|
||||
... )
|
||||
"""
|
||||
|
||||
class FunctionBody(BaseModel):
|
||||
"""Function call details."""
|
||||
|
||||
name: str
|
||||
"""The name of the tool to call."""
|
||||
arguments: str
|
||||
"""The arguments as a JSON string."""
|
||||
|
||||
type: Literal["function"] = "function"
|
||||
id: str
|
||||
function: FunctionBody
|
||||
|
||||
def merge_in_place(self, other: Any) -> bool:
|
||||
"""Merge a ToolCallPart into this ToolCall."""
|
||||
if not isinstance(other, ToolCallPart):
|
||||
return False
|
||||
if other.arguments_part:
|
||||
self.function.arguments += other.arguments_part
|
||||
return True
|
||||
|
||||
|
||||
class ToolCallPart(BaseModel, MergeableMixin):
|
||||
"""A partial tool call during streaming.
|
||||
|
||||
This represents a chunk of a tool call that is being streamed.
|
||||
|
||||
Attributes:
|
||||
arguments_part: A chunk of the arguments JSON.
|
||||
"""
|
||||
|
||||
arguments_part: Optional[str] = None
|
||||
|
||||
def merge_in_place(self, other: Any) -> bool:
|
||||
"""Merge another ToolCallPart into this one."""
|
||||
if not isinstance(other, ToolCallPart):
|
||||
return False
|
||||
if other.arguments_part:
|
||||
if self.arguments_part is None:
|
||||
self.arguments_part = other.arguments_part
|
||||
else:
|
||||
self.arguments_part += other.arguments_part
|
||||
return True
|
||||
|
||||
|
||||
class Message(BaseModel):
|
||||
"""A message in a conversation.
|
||||
|
||||
Attributes:
|
||||
role: The role of the message sender.
|
||||
content: The content parts of the message.
|
||||
tool_calls: Tool calls requested by the assistant (only for assistant role).
|
||||
tool_call_id: The ID of the tool call being responded to (only for tool role).
|
||||
name: Optional name for the sender.
|
||||
|
||||
Example:
|
||||
>>> msg = Message(role="user", content="Hello!")
|
||||
>>> print(msg.extract_text())
|
||||
Hello!
|
||||
"""
|
||||
|
||||
role: Role
|
||||
content: list[ContentPart]
|
||||
tool_calls: Optional[list[ToolCall]] = None
|
||||
tool_call_id: Optional[str] = None
|
||||
name: Optional[str] = None
|
||||
|
||||
@field_validator("content", mode="before")
|
||||
@classmethod
|
||||
def _coerce_content(cls, value: Any) -> Any:
|
||||
"""Coerce string content to TextPart."""
|
||||
if isinstance(value, str):
|
||||
return [TextPart(text=value)]
|
||||
return value
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
role: Role,
|
||||
content: Union[list[ContentPart], ContentPart, str],
|
||||
tool_calls: Optional[list[ToolCall]] = None,
|
||||
tool_call_id: Optional[str] = None,
|
||||
name: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Initialize a message.
|
||||
|
||||
Args:
|
||||
role: The role of the message sender.
|
||||
content: The content, can be a string, single ContentPart, or list.
|
||||
tool_calls: Tool calls for assistant messages.
|
||||
tool_call_id: ID of the tool call being responded to.
|
||||
name: Optional name for the sender.
|
||||
"""
|
||||
if isinstance(content, str):
|
||||
content = [TextPart(text=content)]
|
||||
elif isinstance(content, ContentPart):
|
||||
content = [content]
|
||||
|
||||
super().__init__(
|
||||
role=role,
|
||||
content=content,
|
||||
tool_calls=tool_calls,
|
||||
tool_call_id=tool_call_id,
|
||||
name=name,
|
||||
)
|
||||
|
||||
def extract_text(self, sep: str = "") -> str:
|
||||
"""Extract all text from the message content.
|
||||
|
||||
Args:
|
||||
sep: Separator to use between text parts.
|
||||
|
||||
Returns:
|
||||
Concatenated text from all TextPart instances.
|
||||
"""
|
||||
return sep.join(part.text for part in self.content if isinstance(part, TextPart))
|
||||
|
||||
def has_tool_calls(self) -> bool:
|
||||
"""Check if this message contains tool calls.
|
||||
|
||||
Returns:
|
||||
True if the message has tool calls.
|
||||
"""
|
||||
return self.tool_calls is not None and len(self.tool_calls) > 0
|
||||
163
agentlite/src/agentlite/provider.py
Normal file
163
agentlite/src/agentlite/provider.py
Normal file
@@ -0,0 +1,163 @@
|
||||
"""Chat provider protocol and implementations for AgentLite.
|
||||
|
||||
This module defines the ChatProvider protocol that abstracts LLM providers
|
||||
and provides the base types for streaming responses.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import AsyncIterator, Sequence
|
||||
from typing import Protocol, runtime_checkable
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from agentlite.message import ContentPart, Message, ToolCall, ToolCallPart
|
||||
from agentlite.tool import Tool
|
||||
|
||||
|
||||
class TokenUsage(BaseModel):
|
||||
"""Token usage statistics for a generation.
|
||||
|
||||
Attributes:
|
||||
input_tokens: Number of input tokens used.
|
||||
output_tokens: Number of output tokens generated.
|
||||
cached_tokens: Number of cached input tokens (if applicable).
|
||||
|
||||
Example:
|
||||
>>> usage = TokenUsage(input_tokens=100, output_tokens=50)
|
||||
>>> print(usage.total)
|
||||
150
|
||||
"""
|
||||
|
||||
input_tokens: int
|
||||
"""Number of input tokens used."""
|
||||
|
||||
output_tokens: int
|
||||
"""Number of output tokens generated."""
|
||||
|
||||
cached_tokens: int = 0
|
||||
"""Number of cached input tokens (if applicable)."""
|
||||
|
||||
@property
|
||||
def total(self) -> int:
|
||||
"""Total tokens used (input + output)."""
|
||||
return self.input_tokens + self.output_tokens
|
||||
|
||||
|
||||
from typing import Union
|
||||
|
||||
StreamedPart = Union[ContentPart, ToolCall, ToolCallPart]
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class StreamedMessage(Protocol):
|
||||
"""Protocol for streamed message responses.
|
||||
|
||||
This protocol defines the interface for streaming responses from LLM
|
||||
providers. Implementations should yield content parts as they arrive.
|
||||
|
||||
Example:
|
||||
>>> stream = await provider.generate(system_prompt, tools, history)
|
||||
>>> async for part in stream:
|
||||
... print(part)
|
||||
"""
|
||||
|
||||
def __aiter__(self) -> AsyncIterator[StreamedPart]:
|
||||
"""Return an async iterator over the streamed parts."""
|
||||
...
|
||||
|
||||
@property
|
||||
def id(self) -> str | None:
|
||||
"""The unique identifier of the message, if available."""
|
||||
...
|
||||
|
||||
@property
|
||||
def usage(self) -> TokenUsage | None:
|
||||
"""Token usage statistics, if available."""
|
||||
...
|
||||
|
||||
|
||||
class ChatProviderError(Exception):
|
||||
"""Base exception for chat provider errors."""
|
||||
|
||||
def __init__(self, message: str):
|
||||
super().__init__(message)
|
||||
self.message = message
|
||||
|
||||
|
||||
class APIConnectionError(ChatProviderError):
|
||||
"""Error connecting to the API."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class APITimeoutError(ChatProviderError):
|
||||
"""API request timed out."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class APIStatusError(ChatProviderError):
|
||||
"""API returned an error status code.
|
||||
|
||||
Attributes:
|
||||
status_code: The HTTP status code returned.
|
||||
"""
|
||||
|
||||
def __init__(self, status_code: int, message: str):
|
||||
super().__init__(message)
|
||||
self.status_code = status_code
|
||||
|
||||
|
||||
class APIEmptyResponseError(ChatProviderError):
|
||||
"""API returned an empty response."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class ChatProvider(Protocol):
|
||||
"""Protocol for LLM chat providers.
|
||||
|
||||
This protocol defines the interface that all LLM providers must implement.
|
||||
It supports both streaming and non-streaming generation.
|
||||
|
||||
Example:
|
||||
>>> provider = OpenAIProvider(api_key="sk-...", model="gpt-4")
|
||||
>>> stream = await provider.generate(
|
||||
... system_prompt="You are helpful.",
|
||||
... tools=[],
|
||||
... history=[Message(role="user", content="Hello!")],
|
||||
... )
|
||||
>>> async for part in stream:
|
||||
... print(part)
|
||||
"""
|
||||
|
||||
@property
|
||||
def model_name(self) -> str:
|
||||
"""The name of the model being used."""
|
||||
...
|
||||
|
||||
async def generate(
|
||||
self,
|
||||
system_prompt: str,
|
||||
tools: Sequence[Tool],
|
||||
history: Sequence[Message],
|
||||
) -> StreamedMessage:
|
||||
"""Generate a response from the LLM.
|
||||
|
||||
Args:
|
||||
system_prompt: The system prompt to use.
|
||||
tools: Available tools for the model to call.
|
||||
history: The conversation history.
|
||||
|
||||
Returns:
|
||||
A streamed message that yields content parts.
|
||||
|
||||
Raises:
|
||||
APIConnectionError: If the connection fails.
|
||||
APITimeoutError: If the request times out.
|
||||
APIStatusError: If the API returns an error status.
|
||||
APIEmptyResponseError: If the response is empty.
|
||||
"""
|
||||
...
|
||||
5
agentlite/src/agentlite/providers/__init__.py
Normal file
5
agentlite/src/agentlite/providers/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""Providers package for AgentLite."""
|
||||
|
||||
from agentlite.providers.openai import OpenAIProvider
|
||||
|
||||
__all__ = ["OpenAIProvider"]
|
||||
307
agentlite/src/agentlite/providers/openai.py
Normal file
307
agentlite/src/agentlite/providers/openai.py
Normal file
@@ -0,0 +1,307 @@
|
||||
"""OpenAI provider implementation for AgentLite.
|
||||
|
||||
This module provides an OpenAI-compatible chat provider that works with
|
||||
the OpenAI API and any OpenAI-compatible API (e.g., Moonshot, Together, etc.).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import uuid
|
||||
from collections.abc import AsyncIterator, Sequence
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import httpx
|
||||
from openai import AsyncOpenAI, OpenAIError
|
||||
from openai.types.chat import (
|
||||
ChatCompletion,
|
||||
ChatCompletionChunk,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionToolParam,
|
||||
)
|
||||
|
||||
from agentlite.message import (
|
||||
Message,
|
||||
TextPart,
|
||||
ToolCall,
|
||||
ToolCallPart,
|
||||
)
|
||||
from agentlite.provider import (
|
||||
APIConnectionError,
|
||||
APIEmptyResponseError,
|
||||
APIStatusError,
|
||||
APITimeoutError,
|
||||
ChatProviderError,
|
||||
StreamedMessage,
|
||||
TokenUsage,
|
||||
)
|
||||
from agentlite.tool import Tool
|
||||
|
||||
if TYPE_CHECKING:
|
||||
pass
|
||||
|
||||
|
||||
def _convert_tool_to_openai(tool: Tool) -> ChatCompletionToolParam:
|
||||
"""Convert a Tool to OpenAI tool format.
|
||||
|
||||
Args:
|
||||
tool: The tool to convert.
|
||||
|
||||
Returns:
|
||||
The OpenAI tool format.
|
||||
"""
|
||||
return {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": tool.name,
|
||||
"description": tool.description,
|
||||
"parameters": tool.parameters,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _convert_message_to_openai(message: Message) -> ChatCompletionMessageParam:
|
||||
"""Convert a Message to OpenAI message format.
|
||||
|
||||
Args:
|
||||
message: The message to convert.
|
||||
|
||||
Returns:
|
||||
The OpenAI message format.
|
||||
"""
|
||||
# Start with basic message
|
||||
result: dict[str, Any] = {
|
||||
"role": message.role,
|
||||
}
|
||||
|
||||
# Handle content
|
||||
if message.role == "tool":
|
||||
# Tool response message
|
||||
result["content"] = message.extract_text()
|
||||
result["tool_call_id"] = message.tool_call_id
|
||||
elif message.has_tool_calls():
|
||||
# Assistant message with tool calls
|
||||
result["content"] = message.extract_text() or None
|
||||
result["tool_calls"] = [
|
||||
{
|
||||
"id": tc.id,
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": tc.function.name,
|
||||
"arguments": tc.function.arguments,
|
||||
},
|
||||
}
|
||||
for tc in (message.tool_calls or [])
|
||||
]
|
||||
else:
|
||||
# Regular message
|
||||
content_parts = []
|
||||
for part in message.content:
|
||||
if isinstance(part, TextPart):
|
||||
content_parts.append(part.text)
|
||||
result["content"] = "\n".join(content_parts) if content_parts else None
|
||||
|
||||
return result # type: ignore[return-value]
|
||||
|
||||
|
||||
class OpenAIStreamedMessage:
|
||||
"""Streamed message implementation for OpenAI.
|
||||
|
||||
This class wraps the OpenAI streaming response and converts chunks
|
||||
into AgentLite content parts.
|
||||
"""
|
||||
|
||||
def __init__(self, response: AsyncIterator[ChatCompletionChunk]):
|
||||
"""Initialize the streamed message.
|
||||
|
||||
Args:
|
||||
response: The OpenAI streaming response.
|
||||
"""
|
||||
self._response = response
|
||||
self._id: str | None = None
|
||||
self._usage = TokenUsage(input_tokens=0, output_tokens=0)
|
||||
|
||||
def __aiter__(self) -> AsyncIterator[Any]:
|
||||
"""Return an async iterator over the streamed parts."""
|
||||
return self._iter_chunks()
|
||||
|
||||
async def _iter_chunks(self) -> AsyncIterator[Any]:
|
||||
"""Iterate over response chunks and yield content parts."""
|
||||
try:
|
||||
async for chunk in self._response:
|
||||
# Track message ID
|
||||
if chunk.id:
|
||||
self._id = chunk.id
|
||||
|
||||
# Track usage if available
|
||||
if chunk.usage:
|
||||
self._usage = TokenUsage(
|
||||
input_tokens=chunk.usage.prompt_tokens,
|
||||
output_tokens=chunk.usage.completion_tokens,
|
||||
)
|
||||
|
||||
# Skip empty choices
|
||||
if not chunk.choices:
|
||||
continue
|
||||
|
||||
delta = chunk.choices[0].delta
|
||||
|
||||
# Yield text content
|
||||
if delta.content:
|
||||
yield TextPart(text=delta.content)
|
||||
|
||||
# Yield tool calls
|
||||
if delta.tool_calls:
|
||||
for tc in delta.tool_calls:
|
||||
if tc.function:
|
||||
if tc.function.name:
|
||||
# New tool call
|
||||
yield ToolCall(
|
||||
id=tc.id or str(uuid.uuid4()),
|
||||
function=ToolCall.FunctionBody(
|
||||
name=tc.function.name,
|
||||
arguments=tc.function.arguments or "",
|
||||
),
|
||||
)
|
||||
elif tc.function.arguments:
|
||||
# Continuation of tool call arguments
|
||||
yield ToolCallPart(arguments_part=tc.function.arguments)
|
||||
except (OpenAIError, httpx.HTTPError) as e:
|
||||
raise _convert_error(e) from e
|
||||
|
||||
@property
|
||||
def id(self) -> str | None:
|
||||
"""The unique identifier of the message."""
|
||||
return self._id
|
||||
|
||||
@property
|
||||
def usage(self) -> TokenUsage | None:
|
||||
"""Token usage statistics."""
|
||||
return self._usage
|
||||
|
||||
|
||||
class OpenAIProvider:
|
||||
"""OpenAI-compatible chat provider.
|
||||
|
||||
This provider works with the OpenAI API and any OpenAI-compatible API
|
||||
such as Moonshot, Together, Fireworks, etc.
|
||||
|
||||
Attributes:
|
||||
model: The model name to use.
|
||||
client: The underlying AsyncOpenAI client.
|
||||
|
||||
Example:
|
||||
>>> provider = OpenAIProvider(
|
||||
... api_key="sk-...",
|
||||
... model="gpt-4",
|
||||
... )
|
||||
>>> stream = await provider.generate(
|
||||
... system_prompt="You are helpful.",
|
||||
... tools=[],
|
||||
... history=[Message(role="user", content="Hello!")],
|
||||
... )
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
api_key: str,
|
||||
model: str,
|
||||
base_url: str | None = None,
|
||||
timeout: float = 60.0,
|
||||
**client_kwargs: Any,
|
||||
):
|
||||
"""Initialize the OpenAI provider.
|
||||
|
||||
Args:
|
||||
api_key: The API key for authentication.
|
||||
model: The model name to use (e.g., "gpt-4", "gpt-3.5-turbo").
|
||||
base_url: Optional custom base URL for OpenAI-compatible APIs.
|
||||
timeout: Request timeout in seconds.
|
||||
**client_kwargs: Additional arguments passed to AsyncOpenAI.
|
||||
"""
|
||||
self.model = model
|
||||
self.client = AsyncOpenAI(
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
timeout=timeout,
|
||||
**client_kwargs,
|
||||
)
|
||||
|
||||
@property
|
||||
def model_name(self) -> str:
|
||||
"""The name of the model being used."""
|
||||
return self.model
|
||||
|
||||
async def generate(
|
||||
self,
|
||||
system_prompt: str,
|
||||
tools: Sequence[Tool],
|
||||
history: Sequence[Message],
|
||||
) -> StreamedMessage:
|
||||
"""Generate a response from the OpenAI API.
|
||||
|
||||
Args:
|
||||
system_prompt: The system prompt to use.
|
||||
tools: Available tools for the model to call.
|
||||
history: The conversation history.
|
||||
|
||||
Returns:
|
||||
A streamed message that yields content parts.
|
||||
|
||||
Raises:
|
||||
APIConnectionError: If the connection fails.
|
||||
APITimeoutError: If the request times out.
|
||||
APIStatusError: If the API returns an error status.
|
||||
APIEmptyResponseError: If the response is empty.
|
||||
"""
|
||||
# Build messages
|
||||
messages: list[ChatCompletionMessageParam] = []
|
||||
if system_prompt:
|
||||
messages.append({"role": "system", "content": system_prompt})
|
||||
|
||||
for msg in history:
|
||||
messages.append(_convert_message_to_openai(msg))
|
||||
|
||||
# Build tools
|
||||
openai_tools = [_convert_tool_to_openai(t) for t in tools] if tools else None
|
||||
|
||||
try:
|
||||
# Make streaming request
|
||||
response = await self.client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=messages,
|
||||
tools=openai_tools,
|
||||
stream=True,
|
||||
stream_options={"include_usage": True},
|
||||
)
|
||||
|
||||
return OpenAIStreamedMessage(response) # type: ignore[arg-type]
|
||||
except (OpenAIError, httpx.HTTPError) as e:
|
||||
raise _convert_error(e) from e
|
||||
|
||||
|
||||
def _convert_error(error: OpenAIError | httpx.HTTPError) -> ChatProviderError:
|
||||
"""Convert an OpenAI or HTTP error to a ChatProviderError.
|
||||
|
||||
Args:
|
||||
error: The error to convert.
|
||||
|
||||
Returns:
|
||||
The appropriate ChatProviderError subclass.
|
||||
"""
|
||||
if isinstance(error, OpenAIError):
|
||||
if isinstance(error, OpenAIError.APIConnectionError):
|
||||
return APIConnectionError(str(error))
|
||||
elif isinstance(error, OpenAIError.APITimeoutError):
|
||||
return APITimeoutError(str(error))
|
||||
elif isinstance(error, OpenAIError.APIStatusError):
|
||||
return APIStatusError(error.status_code, str(error))
|
||||
|
||||
if isinstance(error, httpx.TimeoutException):
|
||||
return APITimeoutError(str(error))
|
||||
elif isinstance(error, httpx.NetworkError):
|
||||
return APIConnectionError(str(error))
|
||||
elif isinstance(error, httpx.HTTPStatusError):
|
||||
return APIStatusError(error.response.status_code, str(error))
|
||||
|
||||
return ChatProviderError(str(error))
|
||||
72
agentlite/src/agentlite/skills/__init__.py
Normal file
72
agentlite/src/agentlite/skills/__init__.py
Normal file
@@ -0,0 +1,72 @@
|
||||
"""Skills system for AgentLite.
|
||||
|
||||
This module provides a comprehensive skill system similar to kimi-cli,
|
||||
allowing agents to use modular, reusable skills defined in SKILL.md files.
|
||||
|
||||
Skills can be:
|
||||
- **Standard**: Text-based instructions loaded as prompts
|
||||
- **Flow**: Structured flowcharts (Mermaid/D2) for deterministic execution
|
||||
|
||||
Example:
|
||||
>>> from pathlib import Path
|
||||
>>> from agentlite.skills import discover_skills, SkillTool
|
||||
>>> # Discover skills
|
||||
>>> skills = discover_skills(Path("./skills"))
|
||||
>>> skill_index = {s.name.lower(): s for s in skills}
|
||||
>>> # Create skill tool
|
||||
>>> skill_tool = SkillTool(skill_index, parent_agent=agent)
|
||||
"""
|
||||
|
||||
from agentlite.skills.discovery import (
|
||||
discover_skills,
|
||||
discover_skills_from_roots,
|
||||
get_default_skills_dirs,
|
||||
index_skills_by_name,
|
||||
parse_frontmatter,
|
||||
parse_skill_text,
|
||||
)
|
||||
from agentlite.skills.flow_parser import (
|
||||
FlowParseError,
|
||||
parse_d2_flowchart,
|
||||
parse_mermaid_flowchart,
|
||||
)
|
||||
from agentlite.skills.flow_runner import FlowExecutionError, FlowRunner
|
||||
from agentlite.skills.models import (
|
||||
Flow,
|
||||
FlowEdge,
|
||||
FlowNode,
|
||||
FlowNodeKind,
|
||||
Skill,
|
||||
SkillType,
|
||||
index_skills,
|
||||
normalize_skill_name,
|
||||
)
|
||||
from agentlite.skills.skill_tool import SkillTool
|
||||
|
||||
__all__ = [
|
||||
# Models
|
||||
"Skill",
|
||||
"Flow",
|
||||
"FlowNode",
|
||||
"FlowEdge",
|
||||
"SkillType",
|
||||
"FlowNodeKind",
|
||||
# Discovery
|
||||
"discover_skills",
|
||||
"discover_skills_from_roots",
|
||||
"get_default_skills_dirs",
|
||||
"index_skills",
|
||||
"index_skills_by_name",
|
||||
"normalize_skill_name",
|
||||
"parse_skill_text",
|
||||
"parse_frontmatter",
|
||||
# Flow parsing
|
||||
"parse_mermaid_flowchart",
|
||||
"parse_d2_flowchart",
|
||||
"FlowParseError",
|
||||
# Flow execution
|
||||
"FlowRunner",
|
||||
"FlowExecutionError",
|
||||
# Tool
|
||||
"SkillTool",
|
||||
]
|
||||
308
agentlite/src/agentlite/skills/discovery.py
Normal file
308
agentlite/src/agentlite/skills/discovery.py
Normal file
@@ -0,0 +1,308 @@
|
||||
"""Skill discovery and loading utilities for AgentLite.
|
||||
|
||||
This module provides functions for discovering and loading skills from
|
||||
directory structures, similar to kimi-cli's skill system.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterable
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Dict, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from agentlite.skills.models import Flow, Skill
|
||||
|
||||
|
||||
def parse_frontmatter(content: str) -> Optional[Dict]:
|
||||
"""Parse YAML frontmatter from markdown content.
|
||||
|
||||
Args:
|
||||
content: The file content that may contain frontmatter
|
||||
|
||||
Returns:
|
||||
Dictionary of frontmatter data, or None if no frontmatter found
|
||||
|
||||
Example:
|
||||
>>> content = '''---
|
||||
... name: my-skill
|
||||
... description: Does something useful
|
||||
... ---
|
||||
... # Skill Content
|
||||
... '''
|
||||
>>> parse_frontmatter(content)
|
||||
{'name': 'my-skill', 'description': 'Does something useful'}
|
||||
"""
|
||||
if not content.startswith("---"):
|
||||
return None
|
||||
|
||||
try:
|
||||
# Find the end of frontmatter
|
||||
end_idx = content.find("\n---", 3)
|
||||
if end_idx == -1:
|
||||
return None
|
||||
|
||||
# Extract and parse YAML
|
||||
frontmatter_text = content[3:end_idx].strip()
|
||||
return yaml.safe_load(frontmatter_text) or {}
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def parse_flow_from_skill(content: str) -> "Flow":
|
||||
"""Parse a flowchart from skill content.
|
||||
|
||||
Looks for mermaid or d2 code blocks and parses them into Flow objects.
|
||||
|
||||
Args:
|
||||
content: The SKILL.md content containing a flowchart
|
||||
|
||||
Returns:
|
||||
Parsed Flow object
|
||||
|
||||
Raises:
|
||||
ValueError: If no valid flowchart found
|
||||
"""
|
||||
from agentlite.skills.flow_parser import (
|
||||
FlowParseError,
|
||||
parse_d2_flowchart,
|
||||
parse_mermaid_flowchart,
|
||||
)
|
||||
|
||||
# Extract code blocks
|
||||
code_blocks = _extract_code_blocks(content)
|
||||
|
||||
for lang, code in code_blocks:
|
||||
try:
|
||||
if lang == "mermaid":
|
||||
return parse_mermaid_flowchart(code)
|
||||
elif lang == "d2":
|
||||
return parse_d2_flowchart(code)
|
||||
except FlowParseError:
|
||||
continue
|
||||
|
||||
raise ValueError("No valid mermaid or d2 flowchart found in skill content")
|
||||
|
||||
|
||||
def _extract_code_blocks(content: str) -> list[tuple[str, str]]:
|
||||
"""Extract fenced code blocks from markdown content.
|
||||
|
||||
Args:
|
||||
content: Markdown content
|
||||
|
||||
Returns:
|
||||
List of (language, code) tuples
|
||||
"""
|
||||
blocks = []
|
||||
in_block = False
|
||||
current_lang = ""
|
||||
current_code = []
|
||||
fence_char = ""
|
||||
fence_len = 0
|
||||
|
||||
for line in content.split("\n"):
|
||||
stripped = line.lstrip()
|
||||
|
||||
if not in_block:
|
||||
# Check for fence start
|
||||
if stripped.startswith("```") or stripped.startswith("~~~"):
|
||||
fence_char = stripped[0]
|
||||
fence_len = len(stripped) - len(stripped.lstrip(fence_char))
|
||||
if fence_len >= 3:
|
||||
# Extract language
|
||||
info = stripped[fence_len:].strip()
|
||||
current_lang = info.split()[0] if info else ""
|
||||
in_block = True
|
||||
current_code = []
|
||||
else:
|
||||
# Check for fence end
|
||||
if stripped.startswith(fence_char * fence_len):
|
||||
blocks.append((current_lang, "\n".join(current_code)))
|
||||
in_block = False
|
||||
current_lang = ""
|
||||
current_code = []
|
||||
else:
|
||||
current_code.append(line)
|
||||
|
||||
return blocks
|
||||
|
||||
|
||||
def parse_skill_text(content: str, dir_path: Path) -> "Skill":
|
||||
"""Parse skill content into a Skill object.
|
||||
|
||||
Args:
|
||||
content: The SKILL.md content
|
||||
dir_path: Path to the skill directory
|
||||
|
||||
Returns:
|
||||
Parsed Skill object
|
||||
|
||||
Raises:
|
||||
ValueError: If the skill content is invalid
|
||||
"""
|
||||
from agentlite.skills.flow_parser import FlowParseError
|
||||
from agentlite.skills.models import Skill
|
||||
|
||||
frontmatter = parse_frontmatter(content) or {}
|
||||
|
||||
name = frontmatter.get("name") or dir_path.name
|
||||
description = frontmatter.get("description") or "No description provided."
|
||||
skill_type = frontmatter.get("type") or "standard"
|
||||
|
||||
if skill_type not in ("standard", "flow"):
|
||||
raise ValueError(f'Invalid skill type "{skill_type}"')
|
||||
|
||||
# Parse flow if this is a flow-type skill
|
||||
flow = None
|
||||
if skill_type == "flow":
|
||||
try:
|
||||
flow = parse_flow_from_skill(content)
|
||||
except (ValueError, FlowParseError) as e:
|
||||
# Log warning and fall back to standard
|
||||
import logging
|
||||
|
||||
logging.warning(
|
||||
f"Failed to parse flow skill '{name}': {e}. Treating as standard skill."
|
||||
)
|
||||
skill_type = "standard"
|
||||
flow = None
|
||||
|
||||
return Skill(
|
||||
name=name,
|
||||
description=description,
|
||||
type=skill_type,
|
||||
dir=dir_path,
|
||||
flow=flow,
|
||||
)
|
||||
|
||||
|
||||
def discover_skills(skills_dir: Path) -> list["Skill"]:
|
||||
"""Discover all skills in a directory.
|
||||
|
||||
Scans the directory for subdirectories containing SKILL.md files
|
||||
and parses them into Skill objects.
|
||||
|
||||
Args:
|
||||
skills_dir: Directory to scan for skills
|
||||
|
||||
Returns:
|
||||
List of discovered Skill objects, sorted by name
|
||||
|
||||
Example:
|
||||
>>> skills = discover_skills(Path("./skills"))
|
||||
>>> for skill in skills:
|
||||
... print(f"{skill.name}: {skill.description}")
|
||||
"""
|
||||
from agentlite.skills.models import Skill
|
||||
|
||||
if not skills_dir.is_dir():
|
||||
return []
|
||||
|
||||
skills: list[Skill] = []
|
||||
|
||||
for skill_dir in skills_dir.iterdir():
|
||||
if not skill_dir.is_dir():
|
||||
continue
|
||||
|
||||
skill_md = skill_dir / "SKILL.md"
|
||||
if not skill_md.is_file():
|
||||
continue
|
||||
|
||||
try:
|
||||
content = skill_md.read_text(encoding="utf-8")
|
||||
skills.append(parse_skill_text(content, skill_dir))
|
||||
except Exception as e:
|
||||
import logging
|
||||
|
||||
logging.warning(f"Failed to parse skill at {skill_md}: {e}")
|
||||
continue
|
||||
|
||||
return sorted(skills, key=lambda s: s.name)
|
||||
|
||||
|
||||
def discover_skills_from_roots(skills_dirs: Iterable[Path]) -> list["Skill"]:
|
||||
"""Discover skills from multiple directory roots.
|
||||
|
||||
Skills from later directories will override skills with the same name
|
||||
from earlier directories.
|
||||
|
||||
Args:
|
||||
skills_dirs: Iterable of directories to scan
|
||||
|
||||
Returns:
|
||||
List of unique Skill objects, sorted by name
|
||||
|
||||
Example:
|
||||
>>> roots = [Path("./builtin"), Path("~/.config/skills").expanduser()]
|
||||
>>> skills = discover_skills_from_roots(roots)
|
||||
"""
|
||||
from agentlite.skills.models import normalize_skill_name
|
||||
|
||||
skills_by_name: dict[str, "Skill"] = {}
|
||||
|
||||
for skills_dir in skills_dirs:
|
||||
for skill in discover_skills(skills_dir):
|
||||
# Later skills override earlier ones with same name
|
||||
skills_by_name[normalize_skill_name(skill.name)] = skill
|
||||
|
||||
return sorted(skills_by_name.values(), key=lambda s: s.name)
|
||||
|
||||
|
||||
def get_default_skills_dirs(work_dir: Path | None = None) -> list[Path]:
|
||||
"""Get the default skill directory search paths.
|
||||
|
||||
Returns directories in priority order:
|
||||
1. User-level: ~/.config/agents/skills/ (or alternatives)
|
||||
2. Project-level: ./.agents/skills/ (or alternatives)
|
||||
|
||||
Args:
|
||||
work_dir: Working directory for project-level search (default: current dir)
|
||||
|
||||
Returns:
|
||||
List of existing skill directories
|
||||
"""
|
||||
dirs: list[Path] = []
|
||||
|
||||
# User-level candidates
|
||||
user_candidates = [
|
||||
Path.home() / ".config" / "agents" / "skills",
|
||||
Path.home() / ".agents" / "skills",
|
||||
Path.home() / ".kimi" / "skills",
|
||||
]
|
||||
|
||||
for candidate in user_candidates:
|
||||
if candidate.is_dir():
|
||||
dirs.append(candidate)
|
||||
break # Only use first existing
|
||||
|
||||
# Project-level candidates
|
||||
if work_dir is None:
|
||||
work_dir = Path.cwd()
|
||||
|
||||
project_candidates = [
|
||||
work_dir / ".agents" / "skills",
|
||||
work_dir / ".kimi" / "skills",
|
||||
]
|
||||
|
||||
for candidate in project_candidates:
|
||||
if candidate.is_dir():
|
||||
dirs.append(candidate)
|
||||
break # Only use first existing
|
||||
|
||||
return dirs
|
||||
|
||||
|
||||
def index_skills_by_name(skills: Iterable["Skill"]) -> dict[str, "Skill"]:
|
||||
"""Build a lookup table for skills by normalized name.
|
||||
|
||||
Args:
|
||||
skills: Iterable of Skill objects
|
||||
|
||||
Returns:
|
||||
Dictionary mapping normalized names to Skill objects
|
||||
"""
|
||||
from agentlite.skills.models import normalize_skill_name
|
||||
|
||||
return {normalize_skill_name(skill.name): skill for skill in skills}
|
||||
252
agentlite/src/agentlite/skills/flow_parser.py
Normal file
252
agentlite/src/agentlite/skills/flow_parser.py
Normal file
@@ -0,0 +1,252 @@
|
||||
"""Flowchart parsers for flow-type skills.
|
||||
|
||||
This module provides parsers for Mermaid and D2 flowchart syntax
|
||||
to convert them into Flow objects that can be executed.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from agentlite.skills.models import Flow, FlowEdge, FlowNode
|
||||
|
||||
|
||||
class FlowParseError(ValueError):
|
||||
"""Raised when flowchart parsing fails."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def parse_mermaid_flowchart(content: str) -> "Flow":
|
||||
"""Parse a Mermaid flowchart into a Flow object.
|
||||
|
||||
Supports basic Mermaid flowchart syntax:
|
||||
- Node definitions: `id[label]`, `id(label)`, `id{label}`
|
||||
- Edges: `-->`, `---`, `-.->`
|
||||
- Labeled edges: `-->|label|`, `-.->|label|`
|
||||
- Special nodes: BEGIN(( )), END(( ))
|
||||
|
||||
Args:
|
||||
content: Mermaid flowchart definition
|
||||
|
||||
Returns:
|
||||
Flow object representing the flowchart
|
||||
|
||||
Raises:
|
||||
FlowParseError: If parsing fails
|
||||
|
||||
Example:
|
||||
>>> mermaid = '''
|
||||
... flowchart TD
|
||||
... BEGIN(( )) --> CHECK[Check input]
|
||||
... CHECK --> VALID{Is valid?}
|
||||
... VALID -->|Yes| PROCESS[Process]
|
||||
... VALID -->|No| ERROR[Show error]
|
||||
... PROCESS --> END(( ))
|
||||
... ERROR --> END
|
||||
... '''
|
||||
>>> flow = parse_mermaid_flowchart(mermaid)
|
||||
"""
|
||||
from agentlite.skills.models import Flow, FlowEdge, FlowNode
|
||||
|
||||
nodes: dict[str, FlowNode] = {}
|
||||
edges: list[FlowEdge] = []
|
||||
|
||||
# Node patterns
|
||||
# id[label] - rectangle
|
||||
# id(label) - rounded
|
||||
# id{label} - diamond
|
||||
# id(( )) - circle (used for begin/end)
|
||||
node_pattern = re.compile(
|
||||
r"^(\w+)\s*" # node ID
|
||||
r"(?:\[(.*?)\]|" # [label]
|
||||
r"\((.*?)\)|" # (label)
|
||||
r"\{(.*?)\}|" # {label}
|
||||
r"\(\((.*?)\)\))" # ((label))
|
||||
)
|
||||
|
||||
# Edge patterns
|
||||
# A --> B
|
||||
# A -->|label| B
|
||||
# A -.-> B
|
||||
edge_pattern = re.compile(
|
||||
r"^(\w+)\s*" # source
|
||||
r"(?:-->|---|-.->)" # arrow
|
||||
r"\|([^|]*)\|?\s*" # optional label
|
||||
r"(\w+)\s*$" # destination
|
||||
)
|
||||
|
||||
for line in content.strip().split("\n"):
|
||||
line = line.strip()
|
||||
if not line or line.startswith("flowchart") or line.startswith("graph"):
|
||||
continue
|
||||
|
||||
# Remove trailing punctuation
|
||||
line = line.rstrip(";")
|
||||
|
||||
# Try to match edge first
|
||||
edge_match = edge_pattern.match(line)
|
||||
if edge_match:
|
||||
src, label, dst = edge_match.groups()
|
||||
edges.append(
|
||||
FlowEdge(src=src.strip(), dst=dst.strip(), label=label.strip() if label else None)
|
||||
)
|
||||
continue
|
||||
|
||||
# Try to match node definition
|
||||
node_match = node_pattern.match(line)
|
||||
if node_match:
|
||||
node_id = node_match.group(1)
|
||||
# Get the first non-None label from groups
|
||||
label = next((g for g in node_match.groups()[1:] if g is not None), node_id)
|
||||
|
||||
# Determine node kind
|
||||
kind = "task"
|
||||
if label.strip() == "" or node_id.upper() in ("BEGIN", "START"):
|
||||
kind = "begin"
|
||||
elif node_id.upper() in ("END", "STOP", "FINISH"):
|
||||
kind = "end"
|
||||
elif "{" in line or "}" in line:
|
||||
kind = "decision"
|
||||
|
||||
nodes[node_id] = FlowNode(id=node_id, label=label, kind=kind)
|
||||
|
||||
# Build outgoing edge map
|
||||
outgoing: dict[str, list[FlowEdge]] = {}
|
||||
for edge in edges:
|
||||
if edge.src not in outgoing:
|
||||
outgoing[edge.src] = []
|
||||
outgoing[edge.src].append(edge)
|
||||
|
||||
# Find begin and end nodes
|
||||
begin_ids = [n.id for n in nodes.values() if n.kind == "begin"]
|
||||
end_ids = [n.id for n in nodes.values() if n.kind == "end"]
|
||||
|
||||
if not begin_ids:
|
||||
# Use first node if no explicit begin
|
||||
begin_ids = [list(nodes.keys())[0]] if nodes else []
|
||||
if not end_ids:
|
||||
# Use last node if no explicit end
|
||||
end_ids = [list(nodes.keys())[-1]] if nodes else []
|
||||
|
||||
if len(begin_ids) != 1:
|
||||
raise FlowParseError(f"Expected exactly one BEGIN node, found {len(begin_ids)}")
|
||||
if len(end_ids) != 1:
|
||||
raise FlowParseError(f"Expected exactly one END node, found {len(end_ids)}")
|
||||
|
||||
return Flow(nodes=nodes, outgoing=outgoing, begin_id=begin_ids[0], end_id=end_ids[0])
|
||||
|
||||
|
||||
def parse_d2_flowchart(content: str) -> "Flow":
|
||||
"""Parse a D2 flowchart into a Flow object.
|
||||
|
||||
Supports basic D2 syntax:
|
||||
- Node definitions: `id: label`
|
||||
- Edges: `id1 -> id2` or `id1 -> id2: label`
|
||||
- Special shapes: `id: {shape: circle}`
|
||||
|
||||
Args:
|
||||
content: D2 flowchart definition
|
||||
|
||||
Returns:
|
||||
Flow object representing the flowchart
|
||||
|
||||
Raises:
|
||||
FlowParseError: If parsing fails
|
||||
|
||||
Example:
|
||||
>>> d2 = '''
|
||||
... BEGIN: {shape: circle}
|
||||
... CHECK: Check input
|
||||
... VALID: Is valid? {shape: diamond}
|
||||
... PROCESS: Process
|
||||
... ERROR: Show error
|
||||
... END: {shape: circle}
|
||||
...
|
||||
... BEGIN -> CHECK
|
||||
... CHECK -> VALID
|
||||
... VALID -> PROCESS: Yes
|
||||
... VALID -> ERROR: No
|
||||
... PROCESS -> END
|
||||
... ERROR -> END
|
||||
... '''
|
||||
>>> flow = parse_d2_flowchart(d2)
|
||||
"""
|
||||
from agentlite.skills.models import Flow, FlowEdge, FlowNode
|
||||
|
||||
nodes: dict[str, FlowNode] = {}
|
||||
edges: list[FlowEdge] = []
|
||||
|
||||
# Node pattern: id: label or id: {shape: ...}
|
||||
node_pattern = re.compile(r"^(\w+)\s*:\s*(.+)$")
|
||||
|
||||
# Edge pattern: src -> dst or src -> dst: label
|
||||
edge_pattern = re.compile(r"^(\w+)\s*->\s*(\w+)(?:\s*:\s*(.+))?$")
|
||||
|
||||
for line in content.strip().split("\n"):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
# Try edge first
|
||||
edge_match = edge_pattern.match(line)
|
||||
if edge_match:
|
||||
src, dst, label = edge_match.groups()
|
||||
edges.append(
|
||||
FlowEdge(src=src.strip(), dst=dst.strip(), label=label.strip() if label else None)
|
||||
)
|
||||
continue
|
||||
|
||||
# Try node definition
|
||||
node_match = node_pattern.match(line)
|
||||
if node_match:
|
||||
node_id, rest = node_match.groups()
|
||||
rest = rest.strip()
|
||||
|
||||
# Check for shape definition
|
||||
shape_match = re.search(r"\{shape:\s*(\w+)\}", rest)
|
||||
shape = shape_match.group(1) if shape_match else None
|
||||
|
||||
# Extract label (remove shape definition)
|
||||
label = re.sub(r"\{[^}]*\}", "", rest).strip()
|
||||
if not label:
|
||||
label = node_id
|
||||
|
||||
# Determine kind
|
||||
kind = "task"
|
||||
if shape == "circle" or node_id.upper() in ("BEGIN", "START"):
|
||||
if not label or label == node_id:
|
||||
kind = "begin"
|
||||
elif node_id.upper() in ("END", "STOP"):
|
||||
kind = "end"
|
||||
elif shape == "diamond" or node_id.upper() in ("VALID", "CHECK", "DECISION"):
|
||||
kind = "decision"
|
||||
elif node_id.upper() in ("END", "STOP", "FINISH"):
|
||||
kind = "end"
|
||||
|
||||
nodes[node_id] = FlowNode(id=node_id, label=label, kind=kind)
|
||||
|
||||
# Build outgoing edge map
|
||||
outgoing: dict[str, list[FlowEdge]] = {}
|
||||
for edge in edges:
|
||||
if edge.src not in outgoing:
|
||||
outgoing[edge.src] = []
|
||||
outgoing[edge.src].append(edge)
|
||||
|
||||
# Find begin and end nodes
|
||||
begin_ids = [n.id for n in nodes.values() if n.kind == "begin"]
|
||||
end_ids = [n.id for n in nodes.values() if n.kind == "end"]
|
||||
|
||||
if not begin_ids:
|
||||
begin_ids = [list(nodes.keys())[0]] if nodes else []
|
||||
if not end_ids:
|
||||
end_ids = [list(nodes.keys())[-1]] if nodes else []
|
||||
|
||||
if len(begin_ids) != 1:
|
||||
raise FlowParseError(f"Expected exactly one BEGIN node, found {len(begin_ids)}")
|
||||
if len(end_ids) != 1:
|
||||
raise FlowParseError(f"Expected exactly one END node, found {len(end_ids)}")
|
||||
|
||||
return Flow(nodes=nodes, outgoing=outgoing, begin_id=begin_ids[0], end_id=end_ids[0])
|
||||
200
agentlite/src/agentlite/skills/flow_runner.py
Normal file
200
agentlite/src/agentlite/skills/flow_runner.py
Normal file
@@ -0,0 +1,200 @@
|
||||
"""Flow runner for executing flow-type skills.
|
||||
|
||||
This module provides FlowRunner for executing flowchart-based skills
|
||||
node by node, similar to kimi-cli's implementation.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from agentlite.agent import Agent
|
||||
from agentlite.skills.models import Flow, FlowEdge, FlowNode
|
||||
|
||||
|
||||
class FlowExecutionError(Exception):
|
||||
"""Raised when flow execution fails."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class FlowRunner:
|
||||
"""Executes flowchart-based skills.
|
||||
|
||||
FlowRunner executes a flowchart node by node, handling task nodes
|
||||
and decision nodes appropriately.
|
||||
|
||||
For task nodes: Executes the node's label as a prompt
|
||||
For decision nodes: Presents options and waits for user/agent choice
|
||||
|
||||
Example:
|
||||
>>> from agentlite.skills.models import Flow, FlowNode, FlowEdge
|
||||
>>> # Define a simple flow
|
||||
>>> flow = Flow(
|
||||
... nodes={
|
||||
... "start": FlowNode(id="start", label="Start", kind="begin"),
|
||||
... "task": FlowNode(id="task", label="Analyze code", kind="task"),
|
||||
... "end": FlowNode(id="end", label="End", kind="end"),
|
||||
... },
|
||||
... outgoing={
|
||||
... "start": [FlowEdge(src="start", dst="task")],
|
||||
... "task": [FlowEdge(src="task", dst="end")],
|
||||
... },
|
||||
... begin_id="start",
|
||||
... end_id="end",
|
||||
... )
|
||||
>>> runner = FlowRunner(flow, "my-flow")
|
||||
>>> output = await runner.run(agent, "Additional context")
|
||||
"""
|
||||
|
||||
def __init__(self, flow: "Flow", name: str = "flow"):
|
||||
"""Initialize the flow runner.
|
||||
|
||||
Args:
|
||||
flow: The flowchart to execute
|
||||
name: Name of the flow (for logging/debugging)
|
||||
"""
|
||||
self._flow = flow
|
||||
self._name = name
|
||||
|
||||
async def run(self, agent: "Agent", args: str = "") -> str:
|
||||
"""Execute the flow.
|
||||
|
||||
Args:
|
||||
agent: The agent to use for executing task nodes
|
||||
args: Additional arguments/context for the flow
|
||||
|
||||
Returns:
|
||||
The combined output from all executed nodes
|
||||
|
||||
Raises:
|
||||
FlowExecutionError: If execution fails
|
||||
"""
|
||||
current_id = self._flow.begin_id
|
||||
outputs: list[str] = []
|
||||
steps = 0
|
||||
max_steps = 100 # Prevent infinite loops
|
||||
|
||||
while steps < max_steps:
|
||||
steps += 1
|
||||
|
||||
node = self._flow.nodes.get(current_id)
|
||||
if node is None:
|
||||
raise FlowExecutionError(f"Node '{current_id}' not found in flow")
|
||||
|
||||
# Get outgoing edges
|
||||
edges = self._flow.outgoing.get(current_id, [])
|
||||
|
||||
# Handle different node types
|
||||
if node.kind == "end":
|
||||
# Flow complete
|
||||
break
|
||||
|
||||
elif node.kind == "begin":
|
||||
# Just move to next node
|
||||
if not edges:
|
||||
raise FlowExecutionError("BEGIN node has no outgoing edges")
|
||||
current_id = edges[0].dst
|
||||
continue
|
||||
|
||||
elif node.kind == "task":
|
||||
# Execute task
|
||||
output = await self._execute_task_node(agent, node, args)
|
||||
if output:
|
||||
outputs.append(output)
|
||||
|
||||
# Move to next node
|
||||
if not edges:
|
||||
raise FlowExecutionError(f"Task node '{current_id}' has no outgoing edges")
|
||||
current_id = edges[0].dst
|
||||
|
||||
elif node.kind == "decision":
|
||||
# Handle decision
|
||||
choice = await self._execute_decision_node(agent, node, edges, args)
|
||||
|
||||
# Find the edge matching the choice
|
||||
next_id = None
|
||||
for edge in edges:
|
||||
if edge.label and edge.label.lower() == choice.lower():
|
||||
next_id = edge.dst
|
||||
break
|
||||
|
||||
if next_id is None:
|
||||
raise FlowExecutionError(
|
||||
f"Invalid choice '{choice}' for decision node '{current_id}'"
|
||||
)
|
||||
|
||||
current_id = next_id
|
||||
|
||||
else:
|
||||
raise FlowExecutionError(f"Unknown node kind: {node.kind}")
|
||||
|
||||
if steps >= max_steps:
|
||||
raise FlowExecutionError("Flow exceeded maximum steps (possible infinite loop)")
|
||||
|
||||
return "\n\n".join(outputs)
|
||||
|
||||
async def _execute_task_node(self, agent: "Agent", node: "FlowNode", args: str) -> str:
|
||||
"""Execute a task node.
|
||||
|
||||
Args:
|
||||
agent: The agent to use
|
||||
node: The task node
|
||||
args: Additional arguments
|
||||
|
||||
Returns:
|
||||
The task output
|
||||
"""
|
||||
# Build prompt from node label and args
|
||||
prompt = node.label
|
||||
if args.strip():
|
||||
prompt = f"{prompt}\n\nContext: {args.strip()}"
|
||||
|
||||
# Execute using agent
|
||||
response = await agent.run(prompt)
|
||||
return response
|
||||
|
||||
async def _execute_decision_node(
|
||||
self, agent: "Agent", node: "FlowNode", edges: list["FlowEdge"], args: str
|
||||
) -> str:
|
||||
"""Execute a decision node.
|
||||
|
||||
Args:
|
||||
agent: The agent to use
|
||||
node: The decision node
|
||||
edges: Available outgoing edges (choices)
|
||||
args: Additional arguments
|
||||
|
||||
Returns:
|
||||
The chosen option
|
||||
"""
|
||||
# Build prompt with choices
|
||||
choices = [edge.label for edge in edges if edge.label]
|
||||
|
||||
prompt_lines = [
|
||||
node.label,
|
||||
"",
|
||||
"Available options:",
|
||||
*[f"- {choice}" for choice in choices],
|
||||
"",
|
||||
"Reply with one of the options above.",
|
||||
]
|
||||
|
||||
if args.strip():
|
||||
prompt_lines.extend(["", f"Context: {args.strip()}"])
|
||||
|
||||
prompt = "\n".join(prompt_lines)
|
||||
|
||||
# Get choice from agent
|
||||
response = await agent.run(prompt)
|
||||
|
||||
# Extract choice from response (find matching option)
|
||||
response_clean = response.strip().lower()
|
||||
for choice in choices:
|
||||
if choice.lower() in response_clean or response_clean in choice.lower():
|
||||
return choice
|
||||
|
||||
# If no exact match, return the first choice as default
|
||||
# (or could raise an error)
|
||||
return choices[0] if choices else ""
|
||||
154
agentlite/src/agentlite/skills/models.py
Normal file
154
agentlite/src/agentlite/skills/models.py
Normal file
@@ -0,0 +1,154 @@
|
||||
"""Skill system for AgentLite.
|
||||
|
||||
This module provides a skill system similar to kimi-cli, allowing agents
|
||||
to use modular, reusable skills defined in SKILL.md files.
|
||||
|
||||
Skills can be:
|
||||
- Standard: Text-based instructions loaded as prompts
|
||||
- Flow: Structured flowcharts (Mermaid/D2) for deterministic execution
|
||||
|
||||
Example:
|
||||
>>> from agentlite.skills import Skill, discover_skills
|
||||
>>> skills = discover_skills(Path("./skills"))
|
||||
>>> for skill in skills:
|
||||
... print(f"{skill.name}: {skill.description}")
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterable, Iterator
|
||||
from pathlib import Path
|
||||
from typing import Literal, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
SkillType = Literal["standard", "flow"]
|
||||
FlowNodeKind = Literal["begin", "end", "task", "decision"]
|
||||
|
||||
|
||||
class FlowNode(BaseModel):
|
||||
"""A node in a flowchart.
|
||||
|
||||
Attributes:
|
||||
id: Unique identifier for the node
|
||||
label: Display text or content for the node
|
||||
kind: Type of node (begin, end, task, decision)
|
||||
"""
|
||||
|
||||
id: str = Field(description="Unique node identifier")
|
||||
label: str = Field(description="Node display text")
|
||||
kind: FlowNodeKind = Field(description="Node type")
|
||||
|
||||
|
||||
class FlowEdge(BaseModel):
|
||||
"""An edge connecting two nodes in a flowchart.
|
||||
|
||||
Attributes:
|
||||
src: Source node ID
|
||||
dst: Destination node ID
|
||||
label: Optional label for the edge (used for decision branches)
|
||||
"""
|
||||
|
||||
src: str = Field(description="Source node ID")
|
||||
dst: str = Field(description="Destination node ID")
|
||||
label: Optional[str] = Field(default=None, description="Edge label for decisions")
|
||||
|
||||
|
||||
class Flow(BaseModel):
|
||||
"""A flowchart defining a structured workflow.
|
||||
|
||||
Flow skills use flowcharts to define deterministic, step-by-step
|
||||
workflows that the agent executes node by node.
|
||||
|
||||
Attributes:
|
||||
nodes: Dictionary mapping node IDs to FlowNode objects
|
||||
outgoing: Dictionary mapping node IDs to their outgoing edges
|
||||
begin_id: ID of the start node
|
||||
end_id: ID of the end node
|
||||
"""
|
||||
|
||||
nodes: dict[str, FlowNode] = Field(description="Node ID to node mapping")
|
||||
outgoing: dict[str, list[FlowEdge]] = Field(description="Node outgoing edges")
|
||||
begin_id: str = Field(description="Start node ID")
|
||||
end_id: str = Field(description="End node ID")
|
||||
|
||||
|
||||
class Skill(BaseModel):
|
||||
"""A skill definition for AgentLite.
|
||||
|
||||
Skills are modular, reusable capabilities defined in SKILL.md files.
|
||||
They can be standard (text-based) or flow-based (structured workflows).
|
||||
|
||||
Attributes:
|
||||
name: Unique skill name
|
||||
description: When and what the skill does (used for triggering)
|
||||
type: Skill type - "standard" or "flow"
|
||||
dir: Directory containing the skill files
|
||||
flow: Flow definition (only for flow-type skills)
|
||||
|
||||
Example SKILL.md:
|
||||
---
|
||||
name: code-reviewer
|
||||
description: Review code for bugs, style issues, and best practices
|
||||
type: standard
|
||||
---
|
||||
|
||||
# Code Reviewer
|
||||
|
||||
When reviewing code:
|
||||
1. Check for syntax errors
|
||||
2. Verify style guidelines
|
||||
3. Suggest improvements
|
||||
"""
|
||||
|
||||
name: str = Field(description="Unique skill name")
|
||||
description: str = Field(description="Skill description and triggering criteria")
|
||||
type: SkillType = Field(default="standard", description="Skill type")
|
||||
dir: Path = Field(description="Skill directory path")
|
||||
flow: Optional[Flow] = Field(default=None, description="Flow definition for flow-type skills")
|
||||
|
||||
@property
|
||||
def skill_md_file(self) -> Path:
|
||||
"""Path to the SKILL.md file."""
|
||||
return self.dir / "SKILL.md"
|
||||
|
||||
def read_content(self) -> str:
|
||||
"""Read the full SKILL.md content.
|
||||
|
||||
Returns:
|
||||
The content of the SKILL.md file
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If SKILL.md doesn't exist
|
||||
"""
|
||||
return self.skill_md_file.read_text(encoding="utf-8").strip()
|
||||
|
||||
|
||||
def normalize_skill_name(name: str) -> str:
|
||||
"""Normalize a skill name for lookup.
|
||||
|
||||
Args:
|
||||
name: The skill name to normalize
|
||||
|
||||
Returns:
|
||||
Lowercase version of the name for case-insensitive lookup
|
||||
"""
|
||||
return name.casefold()
|
||||
|
||||
|
||||
def index_skills(skills: Iterable[Skill]) -> dict[str, Skill]:
|
||||
"""Build a lookup table for skills by normalized name.
|
||||
|
||||
Args:
|
||||
skills: Iterable of Skill objects
|
||||
|
||||
Returns:
|
||||
Dictionary mapping normalized names to Skill objects
|
||||
|
||||
Example:
|
||||
>>> skills = [Skill(name="CodeReview", ...), Skill(name="TestWriter", ...)]
|
||||
>>> index = index_skills(skills)
|
||||
>>> index["codereview"].name
|
||||
"CodeReview"
|
||||
"""
|
||||
return {normalize_skill_name(skill.name): skill for skill in skills}
|
||||
177
agentlite/src/agentlite/skills/skill_tool.py
Normal file
177
agentlite/src/agentlite/skills/skill_tool.py
Normal file
@@ -0,0 +1,177 @@
|
||||
"""Skill tool for AgentLite.
|
||||
|
||||
This module provides a tool for executing skills within an agent.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from agentlite.tool import CallableTool2, ToolError, ToolOk, ToolResult
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from agentlite.agent import Agent
|
||||
from agentlite.skills.models import Skill
|
||||
|
||||
|
||||
class SkillParams(BaseModel):
|
||||
"""Parameters for executing a skill."""
|
||||
|
||||
skill_name: str = Field(description="Name of the skill to execute")
|
||||
args: str = Field(default="", description="Additional arguments or context for the skill")
|
||||
|
||||
|
||||
class SkillTool(CallableTool2[SkillParams]):
|
||||
"""Tool for executing skills.
|
||||
|
||||
This tool allows an agent to execute skills from its skill registry.
|
||||
Skills can be standard (text-based) or flow-based (structured workflows).
|
||||
|
||||
Example:
|
||||
>>> from agentlite.skills.discovery import discover_skills
|
||||
>>> from agentlite.skills.models import index_skills
|
||||
>>> # Discover and index skills
|
||||
>>> skills = discover_skills(Path("./skills"))
|
||||
>>> skill_index = index_skills(skills)
|
||||
>>> # Create skill tool
|
||||
>>> skill_tool = SkillTool(skill_index, parent_agent=agent)
|
||||
>>> # Execute a skill
|
||||
>>> result = await skill_tool(
|
||||
... {"skill_name": "code-review", "args": "Review this Python function..."}
|
||||
... )
|
||||
"""
|
||||
|
||||
name: str = "Skill"
|
||||
description: str = (
|
||||
"Execute a predefined skill. "
|
||||
"Skills provide specialized workflows and domain knowledge. "
|
||||
"Available skills are shown in the system context."
|
||||
)
|
||||
params: type[SkillParams] = SkillParams
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
skills: dict[str, "Skill"],
|
||||
parent_agent: "Agent" | None = None,
|
||||
):
|
||||
"""Initialize the skill tool.
|
||||
|
||||
Args:
|
||||
skills: Dictionary mapping normalized skill names to Skill objects
|
||||
parent_agent: The parent agent (used for executing skills)
|
||||
"""
|
||||
super().__init__()
|
||||
self._skills = skills
|
||||
self._parent_agent = parent_agent
|
||||
|
||||
async def __call__(self, params: SkillParams) -> ToolResult:
|
||||
"""Execute a skill.
|
||||
|
||||
Args:
|
||||
params: Skill execution parameters
|
||||
|
||||
Returns:
|
||||
ToolResult with the skill output or error
|
||||
"""
|
||||
from agentlite.skills.models import normalize_skill_name
|
||||
|
||||
if not params.skill_name:
|
||||
return ToolError(message="Skill name cannot be empty")
|
||||
|
||||
# Find the skill
|
||||
normalized_name = normalize_skill_name(params.skill_name)
|
||||
skill = self._skills.get(normalized_name)
|
||||
|
||||
if skill is None:
|
||||
available = ", ".join(sorted(self._skills.keys()))
|
||||
return ToolError(
|
||||
message=f"Skill '{params.skill_name}' not found. Available: {available or 'none'}"
|
||||
)
|
||||
|
||||
try:
|
||||
# Execute based on skill type
|
||||
if skill.type == "flow" and skill.flow is not None:
|
||||
return await self._execute_flow_skill(skill, params.args)
|
||||
else:
|
||||
return await self._execute_standard_skill(skill, params.args)
|
||||
|
||||
except Exception as e:
|
||||
return ToolError(message=f"Skill execution failed: {e}")
|
||||
|
||||
async def _execute_standard_skill(self, skill: "Skill", args: str) -> ToolResult:
|
||||
"""Execute a standard (text-based) skill.
|
||||
|
||||
Loads the SKILL.md content and uses it as a prompt for the agent.
|
||||
|
||||
Args:
|
||||
skill: The skill to execute
|
||||
args: Additional arguments from the user
|
||||
|
||||
Returns:
|
||||
ToolResult with the skill output
|
||||
"""
|
||||
# Read skill content
|
||||
content = skill.read_content()
|
||||
|
||||
# Parse frontmatter to get just the body
|
||||
from agentlite.skills.discovery import parse_frontmatter
|
||||
|
||||
frontmatter = parse_frontmatter(content)
|
||||
|
||||
# Extract body (remove frontmatter if present)
|
||||
if frontmatter and content.startswith("---"):
|
||||
end_idx = content.find("\n---", 3)
|
||||
if end_idx != -1:
|
||||
body = content[end_idx + 4 :].strip()
|
||||
else:
|
||||
body = content
|
||||
else:
|
||||
body = content
|
||||
|
||||
# Append user arguments if provided
|
||||
if args.strip():
|
||||
body = f"{body}\n\nUser request: {args.strip()}"
|
||||
|
||||
# Execute using parent agent if available
|
||||
if self._parent_agent is not None:
|
||||
# Create a temporary message with the skill content
|
||||
response = await self._parent_agent.run(body)
|
||||
return ToolOk(output=response, message=f"Skill '{skill.name}' executed successfully")
|
||||
else:
|
||||
# Return the skill content for the LLM to use
|
||||
return ToolOk(
|
||||
output=body, message=f"Skill '{skill.name}' loaded (no parent agent to execute)"
|
||||
)
|
||||
|
||||
async def _execute_flow_skill(self, skill: "Skill", args: str) -> ToolResult:
|
||||
"""Execute a flow-based skill.
|
||||
|
||||
Executes the flowchart node by node.
|
||||
|
||||
Args:
|
||||
skill: The flow skill to execute
|
||||
args: Additional arguments from the user
|
||||
|
||||
Returns:
|
||||
ToolResult with the flow output
|
||||
"""
|
||||
from agentlite.skills.flow_runner import FlowRunner
|
||||
|
||||
if skill.flow is None:
|
||||
return ToolError(message=f"Flow skill '{skill.name}' has no flow definition")
|
||||
|
||||
if self._parent_agent is None:
|
||||
return ToolError(message="Flow skills require a parent agent to execute")
|
||||
|
||||
# Create flow runner and execute
|
||||
runner = FlowRunner(skill.flow, skill.name)
|
||||
|
||||
try:
|
||||
output = await runner.run(self._parent_agent, args)
|
||||
return ToolOk(
|
||||
output=output, message=f"Flow skill '{skill.name}' completed successfully"
|
||||
)
|
||||
except Exception as e:
|
||||
return ToolError(message=f"Flow execution failed: {e}")
|
||||
111
agentlite/src/agentlite/subagent_config.py
Normal file
111
agentlite/src/agentlite/subagent_config.py
Normal file
@@ -0,0 +1,111 @@
|
||||
"""Subagent configuration models for AgentLite.
|
||||
|
||||
This module provides configuration models for defining subagents
|
||||
in a hierarchical agent architecture.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
|
||||
|
||||
class SubagentConfig(BaseModel):
|
||||
"""Configuration for a subagent.
|
||||
|
||||
Subagents are child agents that can be called by a parent agent
|
||||
using the Task tool. Each subagent has its own system prompt
|
||||
and can optionally have its own tools.
|
||||
|
||||
Attributes:
|
||||
name: Unique name for the subagent
|
||||
description: Description of what the subagent does
|
||||
system_prompt: System prompt for the subagent
|
||||
system_prompt_path: Path to a file containing the system prompt
|
||||
tools: List of tool paths to load (inherits from parent if not specified)
|
||||
exclude_tools: Tools to exclude from parent inheritance
|
||||
subagents: Nested subagents (for hierarchical structure)
|
||||
max_iterations: Maximum tool call iterations for this subagent
|
||||
|
||||
Example:
|
||||
>>> config = SubagentConfig(
|
||||
... name="coder",
|
||||
... description="Good at writing code",
|
||||
... system_prompt="You are a coding assistant.",
|
||||
... exclude_tools=["Task", "CreateSubagent"],
|
||||
... )
|
||||
"""
|
||||
|
||||
name: str = Field(description="Unique name for the subagent")
|
||||
description: str = Field(description="Description of what the subagent does")
|
||||
system_prompt: Optional[str] = Field(default=None, description="System prompt for the subagent")
|
||||
system_prompt_path: Optional[Path] = Field(
|
||||
default=None, description="Path to a file containing the system prompt"
|
||||
)
|
||||
tools: Optional[list[str]] = Field(
|
||||
default=None,
|
||||
description="List of tool import paths (e.g., 'agentlite.tools.file:ReadFile')",
|
||||
)
|
||||
exclude_tools: list[str] = Field(
|
||||
default_factory=list, description="Tool names to exclude from parent inheritance"
|
||||
)
|
||||
subagents: list[SubagentConfig] = Field(
|
||||
default_factory=list, description="Nested subagents (hierarchical structure)"
|
||||
)
|
||||
max_iterations: int = Field(
|
||||
default=80, description="Maximum tool call iterations", ge=1, le=100
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_system_prompt(self) -> SubagentConfig:
|
||||
"""Validate that either system_prompt or system_prompt_path is provided."""
|
||||
if self.system_prompt is None and self.system_prompt_path is None:
|
||||
raise ValueError("Either system_prompt or system_prompt_path must be provided")
|
||||
return self
|
||||
|
||||
def get_system_prompt(self) -> str:
|
||||
"""Get the system prompt text.
|
||||
|
||||
Returns:
|
||||
The system prompt string.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If system_prompt_path is specified but file doesn't exist.
|
||||
"""
|
||||
if self.system_prompt is not None:
|
||||
return self.system_prompt
|
||||
|
||||
if self.system_prompt_path is not None:
|
||||
return Path(self.system_prompt_path).read_text(encoding="utf-8").strip()
|
||||
|
||||
raise ValueError("No system prompt available")
|
||||
|
||||
|
||||
class SubagentSpec(BaseModel):
|
||||
"""Specification for loading a subagent from a file.
|
||||
|
||||
This is used when subagents are defined in separate YAML files,
|
||||
similar to kimi-cli's approach.
|
||||
|
||||
Attributes:
|
||||
path: Path to the subagent configuration file
|
||||
description: Description of the subagent
|
||||
"""
|
||||
|
||||
path: Path = Field(description="Path to subagent config file")
|
||||
description: str = Field(description="Description of the subagent")
|
||||
|
||||
def load(self) -> SubagentConfig:
|
||||
"""Load the subagent configuration from the file.
|
||||
|
||||
Returns:
|
||||
The loaded SubagentConfig.
|
||||
"""
|
||||
import yaml
|
||||
|
||||
with open(self.path, encoding="utf-8") as f:
|
||||
data = yaml.safe_load(f)
|
||||
|
||||
return SubagentConfig(**data)
|
||||
537
agentlite/src/agentlite/tool.py
Normal file
537
agentlite/src/agentlite/tool.py
Normal file
@@ -0,0 +1,537 @@
|
||||
"""Tool system for AgentLite.
|
||||
|
||||
This module provides the tool abstraction layer for defining and executing
|
||||
tools that can be called by LLM agents.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import inspect
|
||||
import json
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Iterable
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Callable,
|
||||
Optional,
|
||||
Protocol,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
Generic,
|
||||
get_type_hints,
|
||||
)
|
||||
|
||||
import jsonschema
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
||||
from agentlite.message import ToolCall
|
||||
|
||||
if TYPE_CHECKING:
|
||||
pass
|
||||
|
||||
|
||||
class ToolResult(BaseModel):
|
||||
"""The result of a tool execution.
|
||||
|
||||
Attributes:
|
||||
output: The output of the tool (string or structured data).
|
||||
is_error: Whether the tool execution resulted in an error.
|
||||
message: A message describing the result (for model consumption).
|
||||
|
||||
Example:
|
||||
>>> result = ToolOk(output="42")
|
||||
>>> print(result.output)
|
||||
42
|
||||
"""
|
||||
|
||||
output: str
|
||||
"""The output of the tool execution."""
|
||||
|
||||
is_error: bool = False
|
||||
"""Whether the execution resulted in an error."""
|
||||
|
||||
message: str = ""
|
||||
"""A message describing the result (for model consumption)."""
|
||||
|
||||
|
||||
class ToolOk(ToolResult):
|
||||
"""Successful tool execution result.
|
||||
|
||||
Example:
|
||||
>>> return ToolOk(output="File created successfully")
|
||||
"""
|
||||
|
||||
def __init__(self, output: str, message: str = ""):
|
||||
super().__init__(output=output, is_error=False, message=message or output)
|
||||
|
||||
|
||||
class ToolError(ToolResult):
|
||||
"""Failed tool execution result.
|
||||
|
||||
Example:
|
||||
>>> return ToolError(message="File not found")
|
||||
"""
|
||||
|
||||
def __init__(self, message: str, output: str = ""):
|
||||
super().__init__(output=output or message, is_error=True, message=message)
|
||||
|
||||
|
||||
class Tool(BaseModel):
|
||||
"""Definition of a tool that can be called by the model.
|
||||
|
||||
Attributes:
|
||||
name: The name of the tool.
|
||||
description: A description of what the tool does.
|
||||
parameters: JSON Schema for the tool parameters.
|
||||
|
||||
Example:
|
||||
>>> tool = Tool(
|
||||
... name="add",
|
||||
... description="Add two numbers",
|
||||
... parameters={
|
||||
... "type": "object",
|
||||
... "properties": {
|
||||
... "a": {"type": "number"},
|
||||
... "b": {"type": "number"},
|
||||
... },
|
||||
... "required": ["a", "b"],
|
||||
... },
|
||||
... )
|
||||
"""
|
||||
|
||||
name: str
|
||||
"""The name of the tool."""
|
||||
|
||||
description: str
|
||||
"""A description of what the tool does."""
|
||||
|
||||
parameters: dict[str, Any]
|
||||
"""JSON Schema for the tool parameters."""
|
||||
|
||||
def __init__(self, **data: Any):
|
||||
super().__init__(**data)
|
||||
# Validate the JSON schema
|
||||
try:
|
||||
jsonschema.validate(self.parameters, jsonschema.Draft202012Validator.META_SCHEMA)
|
||||
except jsonschema.ValidationError as e:
|
||||
raise ValueError(f"Invalid JSON schema for tool {self.name}: {e}") from e
|
||||
|
||||
@property
|
||||
def base(self) -> "Tool":
|
||||
"""Get the base Tool definition (returns self for Tool instances)."""
|
||||
return self
|
||||
|
||||
|
||||
class CallableTool(Tool, ABC):
|
||||
"""Abstract base class for callable tools.
|
||||
|
||||
Subclasses must implement the __call__ method to define the tool's behavior.
|
||||
|
||||
Example:
|
||||
>>> class AddTool(CallableTool):
|
||||
... name = "add"
|
||||
... description = "Add two numbers"
|
||||
... parameters = {
|
||||
... "type": "object",
|
||||
... "properties": {
|
||||
... "a": {"type": "number"},
|
||||
... "b": {"type": "number"},
|
||||
... },
|
||||
... "required": ["a", "b"],
|
||||
... }
|
||||
...
|
||||
... async def __call__(self, a: float, b: float) -> ToolResult:
|
||||
... return ToolOk(output=str(a + b))
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def __call__(self, *args: Any, **kwargs: Any) -> ToolResult:
|
||||
"""Execute the tool.
|
||||
|
||||
Args:
|
||||
*args: Positional arguments.
|
||||
**kwargs: Keyword arguments.
|
||||
|
||||
Returns:
|
||||
The result of the tool execution.
|
||||
"""
|
||||
...
|
||||
|
||||
@property
|
||||
def base(self) -> "Tool":
|
||||
"""Get the base Tool definition."""
|
||||
return Tool(
|
||||
name=self.name,
|
||||
description=self.description,
|
||||
parameters=self.parameters,
|
||||
)
|
||||
|
||||
async def call(self, arguments: dict[str, Any]) -> ToolResult:
|
||||
"""Call the tool with validated arguments.
|
||||
|
||||
Args:
|
||||
arguments: The arguments to pass to the tool.
|
||||
|
||||
Returns:
|
||||
The result of the tool execution.
|
||||
"""
|
||||
# Validate arguments against schema
|
||||
try:
|
||||
jsonschema.validate(arguments, self.parameters)
|
||||
except jsonschema.ValidationError as e:
|
||||
return ToolError(message=f"Invalid arguments: {e}")
|
||||
|
||||
# Call the tool
|
||||
try:
|
||||
if isinstance(arguments, list):
|
||||
result = await self.__call__(*arguments)
|
||||
elif isinstance(arguments, dict):
|
||||
result = await self.__call__(**arguments)
|
||||
else:
|
||||
result = await self.__call__(arguments)
|
||||
|
||||
if not isinstance(result, ToolResult):
|
||||
return ToolError(message=f"Tool returned invalid type: {type(result)}")
|
||||
return result
|
||||
except Exception as e:
|
||||
return ToolError(message=f"Tool execution failed: {e}")
|
||||
|
||||
|
||||
Params = TypeVar("Params", bound=BaseModel)
|
||||
|
||||
|
||||
class CallableTool2(ABC, Generic[Params]):
|
||||
"""Type-safe callable tool using Pydantic models for parameters.
|
||||
|
||||
This is the preferred way to define tools as it provides full type safety
|
||||
and automatic JSON schema generation.
|
||||
|
||||
Example:
|
||||
>>> class AddParams(BaseModel):
|
||||
... a: float
|
||||
... b: float
|
||||
>>> class AddTool(CallableTool2[AddParams]):
|
||||
... name = "add"
|
||||
... description = "Add two numbers"
|
||||
... params = AddParams
|
||||
...
|
||||
... async def __call__(self, params: AddParams) -> ToolResult:
|
||||
... return ToolOk(output=str(params.a + params.b))
|
||||
"""
|
||||
|
||||
name: str
|
||||
"""The name of the tool."""
|
||||
|
||||
description: str
|
||||
"""A description of what the tool does."""
|
||||
|
||||
params: type[Params]
|
||||
"""The Pydantic model class for parameters."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
params: type[Params] | None = None,
|
||||
):
|
||||
cls = self.__class__
|
||||
|
||||
self.name = name or getattr(cls, "name", "")
|
||||
if not self.name:
|
||||
raise ValueError("Tool name must be provided")
|
||||
|
||||
self.description = description or getattr(cls, "description", "")
|
||||
if not self.description:
|
||||
raise ValueError("Tool description must be provided")
|
||||
|
||||
self.params = params or getattr(cls, "params", None)
|
||||
if self.params is None:
|
||||
raise ValueError("Tool params must be provided")
|
||||
|
||||
# Generate JSON schema from Pydantic model
|
||||
self._schema = self.params.model_json_schema()
|
||||
|
||||
@property
|
||||
def base(self) -> Tool:
|
||||
"""Get the base Tool definition."""
|
||||
return Tool(
|
||||
name=self.name,
|
||||
description=self.description,
|
||||
parameters=self._schema,
|
||||
)
|
||||
|
||||
@abstractmethod
|
||||
async def __call__(self, params: Params) -> ToolResult:
|
||||
"""Execute the tool.
|
||||
|
||||
Args:
|
||||
params: The validated parameters.
|
||||
|
||||
Returns:
|
||||
The result of the tool execution.
|
||||
"""
|
||||
...
|
||||
|
||||
async def call(self, arguments: dict[str, Any]) -> ToolResult:
|
||||
"""Call the tool with validated arguments.
|
||||
|
||||
Args:
|
||||
arguments: The arguments to validate and pass to the tool.
|
||||
|
||||
Returns:
|
||||
The result of the tool execution.
|
||||
"""
|
||||
try:
|
||||
params = self.params.model_validate(arguments)
|
||||
except ValidationError as e:
|
||||
return ToolError(message=f"Invalid arguments: {e}")
|
||||
|
||||
try:
|
||||
result = await self.__call__(params)
|
||||
if not isinstance(result, ToolResult):
|
||||
return ToolError(message=f"Tool returned invalid type: {type(result)}")
|
||||
return result
|
||||
except Exception as e:
|
||||
return ToolError(message=f"Tool execution failed: {e}")
|
||||
|
||||
|
||||
# Import Generic here to avoid issues with type checking
|
||||
from typing import Generic
|
||||
|
||||
|
||||
class Toolset(Protocol):
|
||||
"""Protocol for tool collections.
|
||||
|
||||
A Toolset manages a collection of tools and handles tool calls.
|
||||
"""
|
||||
|
||||
@property
|
||||
def tools(self) -> list[Tool]:
|
||||
"""Get all tool definitions."""
|
||||
...
|
||||
|
||||
def handle(self, tool_call: ToolCall) -> "ToolResult | asyncio.Future[ToolResult]":
|
||||
"""Handle a tool call.
|
||||
|
||||
Args:
|
||||
tool_call: The tool call to handle.
|
||||
|
||||
Returns:
|
||||
The tool result or a future that resolves to the result.
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
ToolType = Union[CallableTool, CallableTool2[Any]]
|
||||
|
||||
|
||||
class SimpleToolset:
|
||||
"""A simple in-memory toolset.
|
||||
|
||||
This is the default toolset implementation that stores tools in a dictionary
|
||||
and executes them concurrently.
|
||||
|
||||
Example:
|
||||
>>> toolset = SimpleToolset()
|
||||
>>> toolset.add(MyTool())
|
||||
>>> result = await toolset.handle(tool_call)
|
||||
"""
|
||||
|
||||
def __init__(self, tools: Iterable[ToolType] | None = None):
|
||||
"""Initialize the toolset.
|
||||
|
||||
Args:
|
||||
tools: Optional initial tools to add.
|
||||
"""
|
||||
self._tools: dict[str, ToolType] = {}
|
||||
if tools:
|
||||
for tool in tools:
|
||||
self.add(tool)
|
||||
|
||||
def add(self, tool: ToolType) -> "SimpleToolset":
|
||||
"""Add a tool to the toolset.
|
||||
|
||||
Args:
|
||||
tool: The tool to add.
|
||||
|
||||
Returns:
|
||||
Self for chaining.
|
||||
|
||||
Raises:
|
||||
ValueError: If a tool with the same name already exists.
|
||||
"""
|
||||
if tool.name in self._tools:
|
||||
raise ValueError(f"Tool '{tool.name}' already exists")
|
||||
self._tools[tool.name] = tool
|
||||
return self
|
||||
|
||||
def remove(self, name: str) -> "SimpleToolset":
|
||||
"""Remove a tool from the toolset.
|
||||
|
||||
Args:
|
||||
name: The name of the tool to remove.
|
||||
|
||||
Returns:
|
||||
Self for chaining.
|
||||
|
||||
Raises:
|
||||
KeyError: If the tool doesn't exist.
|
||||
"""
|
||||
if name not in self._tools:
|
||||
raise KeyError(f"Tool '{name}' not found")
|
||||
del self._tools[name]
|
||||
return self
|
||||
|
||||
def get(self, name: str) -> ToolType | None:
|
||||
"""Get a tool by name.
|
||||
|
||||
Args:
|
||||
name: The name of the tool.
|
||||
|
||||
Returns:
|
||||
The tool if found, None otherwise.
|
||||
"""
|
||||
return self._tools.get(name)
|
||||
|
||||
def __contains__(self, name: str) -> bool:
|
||||
"""Check if a tool exists in the toolset."""
|
||||
return name in self._tools
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""Get the number of tools in the toolset."""
|
||||
return len(self._tools)
|
||||
|
||||
@property
|
||||
def tools(self) -> list[Tool]:
|
||||
"""Get all tool definitions."""
|
||||
result = []
|
||||
for tool in self._tools.values():
|
||||
if isinstance(tool, CallableTool):
|
||||
result.append(
|
||||
Tool(
|
||||
name=tool.name,
|
||||
description=tool.description,
|
||||
parameters=tool.parameters,
|
||||
)
|
||||
)
|
||||
else:
|
||||
result.append(tool.base)
|
||||
return result
|
||||
|
||||
def handle(self, tool_call: ToolCall) -> "asyncio.Future[ToolResult]":
|
||||
"""Handle a tool call.
|
||||
|
||||
Args:
|
||||
tool_call: The tool call to handle.
|
||||
|
||||
Returns:
|
||||
A future that resolves to the tool result.
|
||||
"""
|
||||
tool = self._tools.get(tool_call.function.name)
|
||||
if tool is None:
|
||||
future: asyncio.Future[ToolResult] = asyncio.get_event_loop().create_future()
|
||||
future.set_result(ToolError(message=f"Tool '{tool_call.function.name}' not found"))
|
||||
return future
|
||||
|
||||
# Parse arguments
|
||||
try:
|
||||
arguments = json.loads(tool_call.function.arguments or "{}")
|
||||
except json.JSONDecodeError as e:
|
||||
future = asyncio.get_event_loop().create_future()
|
||||
future.set_result(ToolError(message=f"Invalid JSON arguments: {e}"))
|
||||
return future
|
||||
|
||||
# Execute tool
|
||||
async def _execute() -> ToolResult:
|
||||
try:
|
||||
return await tool.call(arguments)
|
||||
except Exception as e:
|
||||
return ToolError(message=f"Tool execution failed: {e}")
|
||||
|
||||
return asyncio.create_task(_execute())
|
||||
|
||||
|
||||
def tool(
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
) -> Callable[[Callable[..., Any]], CallableTool]:
|
||||
"""Decorator to convert a function into a tool.
|
||||
|
||||
This decorator automatically generates the JSON schema from the function's
|
||||
type hints and docstring.
|
||||
|
||||
Args:
|
||||
name: Optional tool name (defaults to function name).
|
||||
description: Optional description (defaults to function docstring).
|
||||
|
||||
Returns:
|
||||
A decorator that converts the function into a CallableTool.
|
||||
|
||||
Example:
|
||||
>>> @tool()
|
||||
... async def add(a: float, b: float) -> float:
|
||||
... '''Add two numbers.'''
|
||||
... return a + b
|
||||
>>> agent = Agent(tools=[add])
|
||||
"""
|
||||
|
||||
def decorator(func: callable) -> CallableTool:
|
||||
sig = inspect.signature(func)
|
||||
try:
|
||||
type_hints = get_type_hints(func)
|
||||
except Exception:
|
||||
type_hints = {}
|
||||
|
||||
properties = {}
|
||||
required = []
|
||||
|
||||
for param_name, param in sig.parameters.items():
|
||||
if param.default is inspect.Parameter.empty:
|
||||
required.append(param_name)
|
||||
|
||||
param_type = type_hints.get(param_name, param.annotation)
|
||||
if param_type is inspect.Parameter.empty or param_type is None:
|
||||
param_type = str
|
||||
|
||||
# Map Python types to JSON schema types
|
||||
if param_type in (str,):
|
||||
properties[param_name] = {"type": "string"}
|
||||
elif param_type in (int,):
|
||||
properties[param_name] = {"type": "integer"}
|
||||
elif param_type in (float,):
|
||||
properties[param_name] = {"type": "number"}
|
||||
elif param_type in (bool,):
|
||||
properties[param_name] = {"type": "boolean"}
|
||||
else:
|
||||
properties[param_name] = {"type": "string"}
|
||||
|
||||
parameters = {
|
||||
"type": "object",
|
||||
"properties": properties,
|
||||
}
|
||||
if required:
|
||||
parameters["required"] = required
|
||||
|
||||
# Create tool class
|
||||
tool_name = name or func.__name__
|
||||
tool_description = description or (func.__doc__ or "No description provided")
|
||||
tool_parameters = parameters
|
||||
|
||||
class FunctionTool(CallableTool):
|
||||
name: str = tool_name
|
||||
description: str = tool_description
|
||||
parameters: dict[str, Any] = tool_parameters
|
||||
|
||||
async def __call__(self, *args: Any, **kwargs: Any) -> ToolResult:
|
||||
try:
|
||||
result = await func(*args, **kwargs)
|
||||
return ToolOk(output=str(result))
|
||||
except Exception as e:
|
||||
return ToolError(message=str(e))
|
||||
|
||||
return FunctionTool()
|
||||
|
||||
return decorator
|
||||
208
agentlite/src/agentlite/tools/__init__.py
Normal file
208
agentlite/src/agentlite/tools/__init__.py
Normal file
@@ -0,0 +1,208 @@
|
||||
"""Tool suite for AgentLite - A collection of tools inspired by kimi-cli.
|
||||
|
||||
This module provides a comprehensive set of tools for file operations,
|
||||
shell execution, web access, and more, with configuration support
|
||||
for enabling/disabling individual tools.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
from agentlite.tool import CallableTool2, ToolOk, ToolError, ToolResult, SimpleToolset
|
||||
from agentlite.tools.config import (
|
||||
ToolSuiteConfig,
|
||||
FileToolsConfig,
|
||||
ShellToolsConfig,
|
||||
WebToolsConfig,
|
||||
MultiAgentToolsConfig,
|
||||
ToolGroupConfig,
|
||||
)
|
||||
|
||||
# Import tool implementations
|
||||
from agentlite.tools.file.read import ReadFile
|
||||
from agentlite.tools.file.write import WriteFile
|
||||
from agentlite.tools.file.replace import StrReplaceFile
|
||||
from agentlite.tools.file.glob import Glob
|
||||
from agentlite.tools.file.grep import Grep
|
||||
from agentlite.tools.file.read_media import ReadMediaFile
|
||||
from agentlite.tools.shell.shell import Shell
|
||||
from agentlite.tools.web.fetch import FetchURL
|
||||
from agentlite.tools.misc.todo import SetTodoList
|
||||
from agentlite.tools.misc.think import Think
|
||||
|
||||
|
||||
class ConfigurableToolset(SimpleToolset):
|
||||
"""A toolset that supports configuration-based tool enabling/disabling.
|
||||
|
||||
This toolset loads tools based on a ToolSuiteConfig, only adding
|
||||
tools that are enabled in the configuration.
|
||||
|
||||
Example:
|
||||
>>> config = ToolSuiteConfig(
|
||||
... file_tools=FileToolsConfig(
|
||||
... tools={"WriteFile": False} # Disable WriteFile
|
||||
... )
|
||||
... )
|
||||
>>> toolset = ConfigurableToolset(config)
|
||||
>>> "ReadFile" in toolset # True
|
||||
True
|
||||
>>> "WriteFile" in toolset # False
|
||||
False
|
||||
"""
|
||||
|
||||
def __init__(self, config: ToolSuiteConfig | None = None, work_dir: Optional[str] = None):
|
||||
"""Initialize the configurable toolset.
|
||||
|
||||
Args:
|
||||
config: Tool suite configuration. If None, uses default config (all enabled).
|
||||
work_dir: Working directory for file operations. Defaults to current directory.
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
self.config = config or ToolSuiteConfig()
|
||||
self.work_dir = Path(work_dir) if work_dir else Path.cwd()
|
||||
|
||||
self._load_tools()
|
||||
|
||||
def _load_tools(self) -> None:
|
||||
"""Load tools based on configuration."""
|
||||
enabled = self.config.get_enabled_tools()
|
||||
|
||||
# File tools
|
||||
if "file" in enabled:
|
||||
self._load_file_tools(enabled["file"])
|
||||
|
||||
# Shell tools
|
||||
if "shell" in enabled:
|
||||
self._load_shell_tools(enabled["shell"])
|
||||
|
||||
# Web tools
|
||||
if "web" in enabled:
|
||||
self._load_web_tools(enabled["web"])
|
||||
|
||||
# Multi-agent tools
|
||||
if "multiagent" in enabled:
|
||||
self._load_multiagent_tools(enabled["multiagent"])
|
||||
|
||||
# Misc tools
|
||||
if "misc" in enabled:
|
||||
self._load_misc_tools(enabled["misc"])
|
||||
|
||||
def _load_file_tools(self, tool_names: list[str]) -> None:
|
||||
"""Load file operation tools."""
|
||||
cfg = self.config.file_tools
|
||||
|
||||
if "ReadFile" in tool_names:
|
||||
self.add(
|
||||
ReadFile(
|
||||
work_dir=self.work_dir,
|
||||
max_lines=cfg.max_lines,
|
||||
max_line_length=cfg.max_line_length,
|
||||
max_bytes=cfg.max_bytes,
|
||||
)
|
||||
)
|
||||
|
||||
if "WriteFile" in tool_names:
|
||||
self.add(
|
||||
WriteFile(
|
||||
work_dir=self.work_dir, allow_outside_work_dir=cfg.allow_write_outside_work_dir
|
||||
)
|
||||
)
|
||||
|
||||
if "StrReplaceFile" in tool_names:
|
||||
self.add(
|
||||
StrReplaceFile(
|
||||
work_dir=self.work_dir, allow_outside_work_dir=cfg.allow_write_outside_work_dir
|
||||
)
|
||||
)
|
||||
|
||||
if "Glob" in tool_names:
|
||||
self.add(Glob(work_dir=self.work_dir, max_matches=cfg.max_glob_matches))
|
||||
|
||||
if "Grep" in tool_names:
|
||||
self.add(Grep(work_dir=self.work_dir))
|
||||
|
||||
if "ReadMediaFile" in tool_names:
|
||||
self.add(ReadMediaFile(work_dir=self.work_dir))
|
||||
|
||||
def _load_shell_tools(self, tool_names: list[str]) -> None:
|
||||
"""Load shell execution tools."""
|
||||
cfg = self.config.shell_tools
|
||||
|
||||
if "Shell" in tool_names:
|
||||
self.add(
|
||||
Shell(
|
||||
timeout=cfg.timeout,
|
||||
max_timeout=cfg.max_timeout,
|
||||
blocked_commands=cfg.blocked_commands,
|
||||
)
|
||||
)
|
||||
|
||||
def _load_web_tools(self, tool_names: list[str]) -> None:
|
||||
"""Load web-related tools."""
|
||||
cfg = self.config.web_tools
|
||||
|
||||
if "FetchURL" in tool_names:
|
||||
self.add(
|
||||
FetchURL(
|
||||
timeout=cfg.timeout,
|
||||
user_agent=cfg.user_agent,
|
||||
max_content_length=cfg.max_content_length,
|
||||
)
|
||||
)
|
||||
|
||||
def _load_multiagent_tools(self, tool_names: list[str]) -> None:
|
||||
"""Load multi-agent tools."""
|
||||
# Multi-agent tools are intentionally disabled in this submodule
|
||||
# because nested subagents are not supported in subagent runtime.
|
||||
return
|
||||
|
||||
def _load_misc_tools(self, tool_names: list[str]) -> None:
|
||||
"""Load miscellaneous tools."""
|
||||
if "SetTodoList" in tool_names:
|
||||
self.add(SetTodoList())
|
||||
|
||||
if "Think" in tool_names:
|
||||
self.add(Think())
|
||||
|
||||
def reload(self, config: ToolSuiteConfig | None = None) -> None:
|
||||
"""Reload tools with a new configuration.
|
||||
|
||||
Args:
|
||||
config: New configuration. If None, reloads with current config.
|
||||
"""
|
||||
if config:
|
||||
self.config = config
|
||||
|
||||
# Clear existing tools
|
||||
self._tools.clear()
|
||||
|
||||
# Reload
|
||||
self._load_tools()
|
||||
|
||||
|
||||
# Convenience exports
|
||||
__all__ = [
|
||||
# Toolset
|
||||
"ConfigurableToolset",
|
||||
# Config classes
|
||||
"ToolSuiteConfig",
|
||||
"FileToolsConfig",
|
||||
"ShellToolsConfig",
|
||||
"WebToolsConfig",
|
||||
"MultiAgentToolsConfig",
|
||||
"ToolGroupConfig",
|
||||
# Tools
|
||||
"ReadFile",
|
||||
"WriteFile",
|
||||
"StrReplaceFile",
|
||||
"Glob",
|
||||
"Grep",
|
||||
"ReadMediaFile",
|
||||
"Shell",
|
||||
"FetchURL",
|
||||
"SetTodoList",
|
||||
"Think",
|
||||
]
|
||||
243
agentlite/src/agentlite/tools/config.py
Normal file
243
agentlite/src/agentlite/tools/config.py
Normal file
@@ -0,0 +1,243 @@
|
||||
"""Tool group configuration system for AgentLite.
|
||||
|
||||
This module provides configuration management for tool groups,
|
||||
allowing users to enable/disable specific tools.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ToolGroupConfig(BaseModel):
|
||||
"""Configuration for a group of tools.
|
||||
|
||||
This configuration allows users to enable or disable specific tools
|
||||
within the tool group. All tools are enabled by default.
|
||||
|
||||
Example:
|
||||
>>> config = ToolGroupConfig(
|
||||
... enabled=True,
|
||||
... tools={
|
||||
... "ReadFile": True,
|
||||
... "WriteFile": False, # Disabled
|
||||
... },
|
||||
... )
|
||||
"""
|
||||
|
||||
enabled: bool = Field(default=True, description="Whether the entire tool group is enabled")
|
||||
|
||||
tools: dict[str, bool] = Field(
|
||||
default_factory=dict,
|
||||
description="Individual tool enable/disable settings. True=enabled, False=disabled. "
|
||||
"Tools not listed here follow the default behavior (enabled).",
|
||||
)
|
||||
|
||||
default_tool_enabled: bool = Field(
|
||||
default=True, description="Default state for tools not explicitly listed in 'tools' dict"
|
||||
)
|
||||
|
||||
def is_tool_enabled(self, tool_name: str) -> bool:
|
||||
"""Check if a specific tool is enabled.
|
||||
|
||||
Args:
|
||||
tool_name: The name of the tool to check
|
||||
|
||||
Returns:
|
||||
True if the tool is enabled, False otherwise
|
||||
"""
|
||||
if not self.enabled:
|
||||
return False
|
||||
|
||||
# Check explicit setting
|
||||
if tool_name in self.tools:
|
||||
return self.tools[tool_name]
|
||||
|
||||
# Use default
|
||||
return self.default_tool_enabled
|
||||
|
||||
def enable_tool(self, tool_name: str) -> None:
|
||||
"""Enable a specific tool.
|
||||
|
||||
Args:
|
||||
tool_name: The name of the tool to enable
|
||||
"""
|
||||
self.tools[tool_name] = True
|
||||
|
||||
def disable_tool(self, tool_name: str) -> None:
|
||||
"""Disable a specific tool.
|
||||
|
||||
Args:
|
||||
tool_name: The name of the tool to disable
|
||||
"""
|
||||
self.tools[tool_name] = False
|
||||
|
||||
def set_tool_state(self, tool_name: str, enabled: bool) -> None:
|
||||
"""Set the enabled state of a specific tool.
|
||||
|
||||
Args:
|
||||
tool_name: The name of the tool
|
||||
enabled: True to enable, False to disable
|
||||
"""
|
||||
self.tools[tool_name] = enabled
|
||||
|
||||
|
||||
class FileToolsConfig(ToolGroupConfig):
|
||||
"""Configuration for file operation tools."""
|
||||
|
||||
max_lines: int = Field(
|
||||
default=1000, description="Maximum number of lines to read from a file", ge=1, le=10000
|
||||
)
|
||||
|
||||
max_line_length: int = Field(
|
||||
default=2000, description="Maximum length of a single line", ge=100, le=10000
|
||||
)
|
||||
|
||||
max_bytes: int = Field(
|
||||
default=100 * 1024, # 100KB
|
||||
description="Maximum bytes to read from a file",
|
||||
ge=1024,
|
||||
le=10 * 1024 * 1024, # 10MB
|
||||
)
|
||||
|
||||
allow_write_outside_work_dir: bool = Field(
|
||||
default=False, description="Allow writing files outside the working directory"
|
||||
)
|
||||
|
||||
max_glob_matches: int = Field(
|
||||
default=1000, description="Maximum number of glob matches to return", ge=1, le=10000
|
||||
)
|
||||
|
||||
|
||||
class ShellToolsConfig(ToolGroupConfig):
|
||||
"""Configuration for shell execution tools."""
|
||||
|
||||
timeout: int = Field(
|
||||
default=60, description="Default timeout for shell commands in seconds", ge=1, le=3600
|
||||
)
|
||||
|
||||
max_timeout: int = Field(
|
||||
default=300, description="Maximum allowed timeout for shell commands", ge=1, le=3600
|
||||
)
|
||||
|
||||
blocked_commands: list[str] = Field(
|
||||
default_factory=list, description="List of command patterns to block"
|
||||
)
|
||||
|
||||
|
||||
class WebToolsConfig(ToolGroupConfig):
|
||||
"""Configuration for web-related tools."""
|
||||
|
||||
timeout: int = Field(
|
||||
default=30, description="Timeout for HTTP requests in seconds", ge=1, le=300
|
||||
)
|
||||
|
||||
user_agent: str = Field(
|
||||
default="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
|
||||
description="User-Agent string for HTTP requests",
|
||||
)
|
||||
|
||||
max_content_length: int = Field(
|
||||
default=1024 * 1024, # 1MB
|
||||
description="Maximum content length to fetch",
|
||||
ge=1024,
|
||||
le=10 * 1024 * 1024, # 10MB
|
||||
)
|
||||
|
||||
|
||||
class MultiAgentToolsConfig(ToolGroupConfig):
|
||||
"""Configuration for multi-agent tools."""
|
||||
|
||||
enabled: bool = Field(
|
||||
default=False, description="Whether multi-agent tools are enabled. Disabled by default for subagent mode."
|
||||
)
|
||||
|
||||
max_steps: int = Field(
|
||||
default=50, description="Maximum steps for subagent execution", ge=1, le=1000
|
||||
)
|
||||
|
||||
inherit_context: bool = Field(
|
||||
default=False, description="Whether subagents inherit parent context"
|
||||
)
|
||||
|
||||
|
||||
class ToolSuiteConfig(BaseModel):
|
||||
"""Complete configuration for all tool groups.
|
||||
|
||||
This is the main configuration class that aggregates all tool group configs.
|
||||
|
||||
Example:
|
||||
>>> config = ToolSuiteConfig(
|
||||
... file_tools=FileToolsConfig(tools={"WriteFile": False}),
|
||||
... shell_tools=ShellToolsConfig(
|
||||
... enabled=False # Disable all shell tools
|
||||
... ),
|
||||
... )
|
||||
"""
|
||||
|
||||
file_tools: FileToolsConfig = Field(
|
||||
default_factory=FileToolsConfig, description="File operation tools configuration"
|
||||
)
|
||||
|
||||
shell_tools: ShellToolsConfig = Field(
|
||||
default_factory=ShellToolsConfig, description="Shell execution tools configuration"
|
||||
)
|
||||
|
||||
web_tools: WebToolsConfig = Field(
|
||||
default_factory=WebToolsConfig, description="Web-related tools configuration"
|
||||
)
|
||||
|
||||
multiagent_tools: MultiAgentToolsConfig = Field(
|
||||
default_factory=MultiAgentToolsConfig, description="Multi-agent tools configuration"
|
||||
)
|
||||
|
||||
misc_tools: ToolGroupConfig = Field(
|
||||
default_factory=ToolGroupConfig,
|
||||
description="Miscellaneous tools (todo, think, etc.) configuration",
|
||||
)
|
||||
|
||||
def get_enabled_tools(self) -> dict[str, list[str]]:
|
||||
"""Get a mapping of tool group names to their enabled tools.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping tool group names to lists of enabled tool names
|
||||
"""
|
||||
result: dict[str, list[str]] = {}
|
||||
|
||||
# File tools
|
||||
if self.file_tools.enabled:
|
||||
file_tools = [
|
||||
"ReadFile",
|
||||
"WriteFile",
|
||||
"StrReplaceFile",
|
||||
"Glob",
|
||||
"Grep",
|
||||
"ReadMediaFile",
|
||||
]
|
||||
result["file"] = [t for t in file_tools if self.file_tools.is_tool_enabled(t)]
|
||||
|
||||
# Shell tools
|
||||
if self.shell_tools.enabled:
|
||||
shell_tools = ["Shell"]
|
||||
result["shell"] = [t for t in shell_tools if self.shell_tools.is_tool_enabled(t)]
|
||||
|
||||
# Web tools
|
||||
if self.web_tools.enabled:
|
||||
web_tools = ["FetchURL"]
|
||||
result["web"] = [t for t in web_tools if self.web_tools.is_tool_enabled(t)]
|
||||
|
||||
# Multi-agent tools
|
||||
if self.multiagent_tools.enabled:
|
||||
multi_tools = ["Task", "CreateSubagent"]
|
||||
result["multiagent"] = [
|
||||
t for t in multi_tools if self.multiagent_tools.is_tool_enabled(t)
|
||||
]
|
||||
|
||||
# Misc tools
|
||||
if self.misc_tools.enabled:
|
||||
misc_tools = ["SetTodoList", "Think"]
|
||||
result["misc"] = [t for t in misc_tools if self.misc_tools.is_tool_enabled(t)]
|
||||
|
||||
return result
|
||||
20
agentlite/src/agentlite/tools/file/__init__.py
Normal file
20
agentlite/src/agentlite/tools/file/__init__.py
Normal file
@@ -0,0 +1,20 @@
|
||||
"""File operation tools for AgentLite.
|
||||
|
||||
This module provides tools for reading, writing, and manipulating files.
|
||||
"""
|
||||
|
||||
from agentlite.tools.file.read import ReadFile
|
||||
from agentlite.tools.file.write import WriteFile
|
||||
from agentlite.tools.file.replace import StrReplaceFile
|
||||
from agentlite.tools.file.glob import Glob
|
||||
from agentlite.tools.file.grep import Grep
|
||||
from agentlite.tools.file.read_media import ReadMediaFile
|
||||
|
||||
__all__ = [
|
||||
"ReadFile",
|
||||
"WriteFile",
|
||||
"StrReplaceFile",
|
||||
"Glob",
|
||||
"Grep",
|
||||
"ReadMediaFile",
|
||||
]
|
||||
154
agentlite/src/agentlite/tools/file/glob.py
Normal file
154
agentlite/src/agentlite/tools/file/glob.py
Normal file
@@ -0,0 +1,154 @@
|
||||
"""Glob tool for AgentLite.
|
||||
|
||||
This module provides a tool for searching files using glob patterns.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from typing import Optional
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from agentlite.tool import CallableTool2, ToolError, ToolOk, ToolResult
|
||||
|
||||
|
||||
class Params(BaseModel):
|
||||
"""Parameters for the Glob tool."""
|
||||
|
||||
pattern: str = Field(
|
||||
description="Glob pattern to match files/directories (e.g., '*.py', '**/*.txt')"
|
||||
)
|
||||
directory: Optional[str] = Field(
|
||||
description=(
|
||||
"Absolute path to the directory to search in (defaults to working directory)."
|
||||
),
|
||||
default=None,
|
||||
)
|
||||
include_dirs: bool = Field(
|
||||
description="Whether to include directories in results.",
|
||||
default=True,
|
||||
)
|
||||
|
||||
|
||||
class Glob(CallableTool2[Params]):
|
||||
"""Tool for searching files using glob patterns.
|
||||
|
||||
This tool finds files and directories matching a glob pattern.
|
||||
Supports recursive patterns with **.
|
||||
|
||||
Example:
|
||||
>>> tool = Glob(work_dir=Path("/tmp"))
|
||||
>>> result = await tool({"pattern": "*.py"})
|
||||
"""
|
||||
|
||||
name: str = "Glob"
|
||||
description: str = (
|
||||
"Search for files and directories matching a glob pattern. "
|
||||
"Supports recursive patterns with **. "
|
||||
"Returns paths relative to the search directory."
|
||||
)
|
||||
params: type[Params] = Params
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
work_dir: Path,
|
||||
max_matches: int = 1000,
|
||||
):
|
||||
"""Initialize the Glob tool.
|
||||
|
||||
Args:
|
||||
work_dir: The working directory for relative paths
|
||||
max_matches: Maximum number of matches to return
|
||||
"""
|
||||
super().__init__()
|
||||
self._work_dir = work_dir
|
||||
self._max_matches = max_matches
|
||||
|
||||
def _is_within_work_dir(self, path: Path) -> bool:
|
||||
"""Check if a path is within the working directory."""
|
||||
try:
|
||||
path.relative_to(self._work_dir.resolve())
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
async def __call__(self, params: Params) -> ToolResult:
|
||||
"""Execute the glob search.
|
||||
|
||||
Args:
|
||||
params: The search parameters
|
||||
|
||||
Returns:
|
||||
ToolResult with matching paths or error
|
||||
"""
|
||||
try:
|
||||
# Determine search directory
|
||||
if params.directory:
|
||||
search_dir = Path(params.directory).expanduser().resolve()
|
||||
if not search_dir.is_absolute():
|
||||
return ToolError(
|
||||
message=f"Directory must be an absolute path: {params.directory}",
|
||||
)
|
||||
# Security check
|
||||
if not self._is_within_work_dir(search_dir):
|
||||
return ToolError(
|
||||
message=(
|
||||
f"Directory `{params.directory}` is outside the working directory. "
|
||||
"You can only search within the working directory."
|
||||
),
|
||||
)
|
||||
else:
|
||||
search_dir = self._work_dir
|
||||
|
||||
# Check directory exists
|
||||
if not search_dir.exists():
|
||||
return ToolError(
|
||||
message=f"Directory `{search_dir}` does not exist.",
|
||||
)
|
||||
|
||||
if not search_dir.is_dir():
|
||||
return ToolError(
|
||||
message=f"`{search_dir}` is not a directory.",
|
||||
)
|
||||
|
||||
# Security check: prevent ** patterns at the root level
|
||||
if params.pattern.startswith("**") and not params.directory:
|
||||
return ToolError(
|
||||
message=(
|
||||
f"Pattern `{params.pattern}` starts with '**' which is not allowed "
|
||||
"without specifying a directory. This would recursively search all "
|
||||
"directories and may include large directories like `node_modules`. "
|
||||
"Use a more specific pattern or provide a directory."
|
||||
),
|
||||
)
|
||||
|
||||
# Perform glob search
|
||||
matches = list(search_dir.glob(params.pattern))
|
||||
|
||||
# Filter directories if not requested
|
||||
if not params.include_dirs:
|
||||
matches = [p for p in matches if p.is_file()]
|
||||
|
||||
# Sort for consistent output
|
||||
matches.sort()
|
||||
|
||||
# Limit matches
|
||||
truncated = False
|
||||
if len(matches) > self._max_matches:
|
||||
matches = matches[: self._max_matches]
|
||||
truncated = True
|
||||
|
||||
# Format output (relative to search directory)
|
||||
output = "\n".join(str(p.relative_to(search_dir)) for p in matches)
|
||||
|
||||
# Build message
|
||||
message = f"Found {len(matches)} matches for pattern `{params.pattern}`."
|
||||
if truncated:
|
||||
message += f" Only the first {self._max_matches} matches are returned."
|
||||
|
||||
return ToolOk(output=output, message=message)
|
||||
|
||||
except Exception as e:
|
||||
return ToolError(
|
||||
message=f"Failed to search for pattern `{params.pattern}`. Error: {e}",
|
||||
)
|
||||
303
agentlite/src/agentlite/tools/file/grep.py
Normal file
303
agentlite/src/agentlite/tools/file/grep.py
Normal file
@@ -0,0 +1,303 @@
|
||||
"""Grep tool for AgentLite.
|
||||
|
||||
This module provides a tool for searching file contents using regex patterns.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from typing import Optional
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from agentlite.tool import CallableTool2, ToolError, ToolOk, ToolResult
|
||||
|
||||
|
||||
class Params(BaseModel):
|
||||
"""Parameters for the Grep tool."""
|
||||
|
||||
pattern: str = Field(
|
||||
description="The regular expression pattern to search for in file contents"
|
||||
)
|
||||
path: str = Field(
|
||||
description=(
|
||||
"File or directory to search in. Defaults to current working directory. "
|
||||
"If specified, it must be an absolute path."
|
||||
),
|
||||
default=".",
|
||||
)
|
||||
glob: Optional[str] = Field(
|
||||
description=(
|
||||
"Glob pattern to filter files (e.g. `*.py`, `*.{ts,tsx}`). No filter by default."
|
||||
),
|
||||
default=None,
|
||||
)
|
||||
output_mode: str = Field(
|
||||
description=(
|
||||
"`content`: Show matching lines (supports `-B`, `-A`, `-C`, `-n`); "
|
||||
"`files_with_matches`: Show file paths; "
|
||||
"`count_matches`: Show total number of matches. "
|
||||
"Defaults to `files_with_matches`."
|
||||
),
|
||||
default="files_with_matches",
|
||||
)
|
||||
before_context: Optional[int] = Field(
|
||||
description=(
|
||||
"Number of lines to show before each match (the `-B` option). "
|
||||
"Requires `output_mode` to be `content`."
|
||||
),
|
||||
default=None,
|
||||
)
|
||||
after_context: Optional[int] = Field(
|
||||
description=(
|
||||
"Number of lines to show after each match (the `-A` option). "
|
||||
"Requires `output_mode` to be `content`."
|
||||
),
|
||||
default=None,
|
||||
)
|
||||
context: Optional[int] = Field(
|
||||
description=(
|
||||
"Number of lines to show before and after each match (the `-C` option). "
|
||||
"Requires `output_mode` to be `content`."
|
||||
),
|
||||
default=None,
|
||||
)
|
||||
line_number: bool = Field(
|
||||
description=(
|
||||
"Show line numbers in output (the `-n` option). Requires `output_mode` to be `content`."
|
||||
),
|
||||
default=False,
|
||||
)
|
||||
ignore_case: bool = Field(
|
||||
description="Case insensitive search (the `-i` option).",
|
||||
default=False,
|
||||
)
|
||||
|
||||
|
||||
class Grep(CallableTool2[Params]):
|
||||
"""Tool for searching file contents using regex patterns.
|
||||
|
||||
This tool searches file contents for matches to a regex pattern.
|
||||
Supports various output modes and context options.
|
||||
|
||||
Example:
|
||||
>>> tool = Grep(work_dir=Path("/tmp"))
|
||||
>>> result = await tool({"pattern": "def ", "glob": "*.py"})
|
||||
"""
|
||||
|
||||
name: str = "Grep"
|
||||
description: str = (
|
||||
"Search file contents using regular expressions. "
|
||||
"Supports various output modes and context options. "
|
||||
"Can search individual files or entire directories."
|
||||
)
|
||||
params: type[Params] = Params
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
work_dir: Path,
|
||||
):
|
||||
"""Initialize the Grep tool.
|
||||
|
||||
Args:
|
||||
work_dir: The working directory
|
||||
"""
|
||||
super().__init__()
|
||||
self._work_dir = work_dir
|
||||
|
||||
def _is_within_work_dir(self, path: Path) -> bool:
|
||||
"""Check if a path is within the working directory."""
|
||||
try:
|
||||
path.relative_to(self._work_dir.resolve())
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def _search_file(
|
||||
self,
|
||||
file_path: Path,
|
||||
pattern: re.Pattern,
|
||||
params: Params,
|
||||
) -> list[tuple[int, str]]:
|
||||
"""Search a single file for matches.
|
||||
|
||||
Args:
|
||||
file_path: Path to the file
|
||||
pattern: Compiled regex pattern
|
||||
params: Search parameters
|
||||
|
||||
Returns:
|
||||
List of (line_number, line_content) tuples
|
||||
"""
|
||||
try:
|
||||
content = file_path.read_text(encoding="utf-8", errors="replace")
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
lines = content.split("\n")
|
||||
matches = []
|
||||
|
||||
for i, line in enumerate(lines, 1):
|
||||
if pattern.search(line):
|
||||
matches.append((i, line))
|
||||
|
||||
return matches
|
||||
|
||||
def _format_matches(
|
||||
self,
|
||||
matches: dict[Path, list[tuple[int, str]]],
|
||||
params: Params,
|
||||
) -> str:
|
||||
"""Format matches according to output mode.
|
||||
|
||||
Args:
|
||||
matches: Dict of file_path -> list of (line_num, line) tuples
|
||||
params: Output parameters
|
||||
|
||||
Returns:
|
||||
Formatted output string
|
||||
"""
|
||||
if params.output_mode == "files_with_matches":
|
||||
return "\n".join(str(p) for p in sorted(matches.keys()))
|
||||
|
||||
if params.output_mode == "count_matches":
|
||||
total = sum(len(m) for m in matches.values())
|
||||
return f"Total matches: {total}"
|
||||
|
||||
# content mode
|
||||
output_lines = []
|
||||
|
||||
for file_path in sorted(matches.keys()):
|
||||
file_matches = matches[file_path]
|
||||
|
||||
# Read file for context
|
||||
try:
|
||||
content = file_path.read_text(encoding="utf-8", errors="replace")
|
||||
lines = content.split("\n")
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# Determine context lines
|
||||
before = params.context if params.context else params.before_context or 0
|
||||
after = params.context if params.context else params.after_context or 0
|
||||
|
||||
# Track which lines to include (to avoid duplicates)
|
||||
included_lines = set()
|
||||
|
||||
for match_line_num, _ in file_matches:
|
||||
start = max(1, match_line_num - before)
|
||||
end = min(len(lines), match_line_num + after)
|
||||
|
||||
for i in range(start, end + 1):
|
||||
included_lines.add(i)
|
||||
|
||||
# Build output for this file
|
||||
if output_lines:
|
||||
output_lines.append("")
|
||||
output_lines.append(f"File: {file_path}")
|
||||
|
||||
prev_line = 0
|
||||
for line_num in sorted(included_lines):
|
||||
# Add separator if there's a gap
|
||||
if prev_line and line_num > prev_line + 1:
|
||||
output_lines.append("--")
|
||||
|
||||
line = lines[line_num - 1]
|
||||
prefix = f"{line_num}:" if params.line_number else ""
|
||||
output_lines.append(f"{prefix}{line}")
|
||||
prev_line = line_num
|
||||
|
||||
return "\n".join(output_lines)
|
||||
async def __call__(self, params: Params) -> ToolResult:
|
||||
"""Execute the grep search.
|
||||
|
||||
Args:
|
||||
params: The search parameters
|
||||
|
||||
Returns:
|
||||
ToolResult with search results or error
|
||||
"""
|
||||
try:
|
||||
# Resolve path
|
||||
if params.path == ".":
|
||||
search_path = self._work_dir
|
||||
else:
|
||||
search_path = Path(params.path).expanduser().resolve()
|
||||
if not search_path.is_absolute():
|
||||
return ToolError(
|
||||
message=f"Path must be an absolute path: {params.path}",
|
||||
)
|
||||
# Security check
|
||||
if not self._is_within_work_dir(search_path):
|
||||
return ToolError(
|
||||
message=(
|
||||
f"Path `{params.path}` is outside the working directory. "
|
||||
"You can only search within the working directory."
|
||||
),
|
||||
)
|
||||
|
||||
# Check path exists
|
||||
if not search_path.exists():
|
||||
return ToolError(
|
||||
message=f"Path `{params.path}` does not exist.",
|
||||
)
|
||||
|
||||
# Compile pattern
|
||||
flags = re.IGNORECASE if params.ignore_case else 0
|
||||
try:
|
||||
pattern = re.compile(params.pattern, flags)
|
||||
except re.error as e:
|
||||
return ToolError(
|
||||
message=f"Invalid regex pattern: {e}",
|
||||
)
|
||||
|
||||
# Find files to search
|
||||
if search_path.is_file():
|
||||
files = [search_path]
|
||||
else:
|
||||
if params.glob:
|
||||
files = list(search_path.glob(params.glob))
|
||||
else:
|
||||
# Default: search all files recursively (with some exclusions)
|
||||
files = [
|
||||
p
|
||||
for p in search_path.rglob("*")
|
||||
if p.is_file()
|
||||
and not any(
|
||||
part.startswith(".") or part in ("node_modules", "__pycache__", ".git")
|
||||
for part in p.parts
|
||||
)
|
||||
]
|
||||
|
||||
# Filter to text files only
|
||||
files = [p for p in files if p.is_file()]
|
||||
|
||||
# Search files
|
||||
all_matches: dict[Path, list[tuple[int, str]]] = {}
|
||||
|
||||
for file_path in files:
|
||||
matches = self._search_file(file_path, pattern, params)
|
||||
if matches:
|
||||
all_matches[file_path] = matches
|
||||
|
||||
# Format output
|
||||
output = self._format_matches(all_matches, params)
|
||||
|
||||
# Build message
|
||||
total_files = len(all_matches)
|
||||
total_matches = sum(len(m) for m in all_matches.values())
|
||||
|
||||
if params.output_mode == "files_with_matches":
|
||||
message = f"Found matches in {total_files} file(s)."
|
||||
elif params.output_mode == "count_matches":
|
||||
message = f"Found {total_matches} total match(es)."
|
||||
else:
|
||||
message = f"Found {total_matches} match(es) in {total_files} file(s)."
|
||||
|
||||
return ToolOk(output=output, message=message)
|
||||
|
||||
except Exception as e:
|
||||
return ToolError(
|
||||
message=f"Failed to search. Error: {e}",
|
||||
)
|
||||
208
agentlite/src/agentlite/tools/file/read.py
Normal file
208
agentlite/src/agentlite/tools/file/read.py
Normal file
@@ -0,0 +1,208 @@
|
||||
"""ReadFile tool for AgentLite.
|
||||
|
||||
This module provides a tool for reading text files with line numbers.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from agentlite.tool import CallableTool2, ToolError, ToolOk, ToolResult
|
||||
|
||||
|
||||
class Params(BaseModel):
|
||||
"""Parameters for the ReadFile tool."""
|
||||
|
||||
path: str = Field(
|
||||
description=(
|
||||
"The path to the file to read. Absolute paths are required when reading files "
|
||||
"outside the working directory."
|
||||
)
|
||||
)
|
||||
line_offset: int = Field(
|
||||
description=(
|
||||
"The line number to start reading from. "
|
||||
"By default read from the beginning of the file. "
|
||||
"Set this when the file is too large to read at once."
|
||||
),
|
||||
default=1,
|
||||
ge=1,
|
||||
)
|
||||
n_lines: int = Field(
|
||||
description=(
|
||||
"The number of lines to read. "
|
||||
"By default read up to max_lines lines. "
|
||||
"Set this value when the file is too large to read at once."
|
||||
),
|
||||
default=1000,
|
||||
ge=1,
|
||||
)
|
||||
|
||||
|
||||
class ReadFile(CallableTool2[Params]):
|
||||
"""Tool for reading text files with line numbers.
|
||||
|
||||
This tool reads a text file and returns its contents with line numbers.
|
||||
It supports pagination for large files.
|
||||
|
||||
Example:
|
||||
>>> tool = ReadFile(work_dir=Path("/tmp"))
|
||||
>>> result = await tool({"path": "/tmp/test.txt"})
|
||||
"""
|
||||
|
||||
name: str = "ReadFile"
|
||||
description: str = (
|
||||
"Read a text file from the local filesystem. "
|
||||
"Returns the file content with line numbers. "
|
||||
"Supports reading specific line ranges for large files."
|
||||
)
|
||||
params: type[Params] = Params
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
work_dir: Path,
|
||||
max_lines: int = 1000,
|
||||
max_line_length: int = 2000,
|
||||
max_bytes: int = 100 * 1024,
|
||||
):
|
||||
"""Initialize the ReadFile tool.
|
||||
|
||||
Args:
|
||||
work_dir: The working directory for relative paths
|
||||
max_lines: Maximum number of lines to read
|
||||
max_line_length: Maximum length of a single line
|
||||
max_bytes: Maximum bytes to read from a file
|
||||
"""
|
||||
super().__init__()
|
||||
self._work_dir = work_dir
|
||||
self._max_lines = max_lines
|
||||
self._max_line_length = max_line_length
|
||||
self._max_bytes = max_bytes
|
||||
|
||||
def _is_within_work_dir(self, path: Path) -> bool:
|
||||
"""Check if a path is within the working directory."""
|
||||
try:
|
||||
path.relative_to(self._work_dir.resolve())
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
async def __call__(self, params: Params) -> ToolResult:
|
||||
"""Execute the read file operation.
|
||||
|
||||
Args:
|
||||
params: The read parameters
|
||||
|
||||
Returns:
|
||||
ToolResult with the file content or error
|
||||
"""
|
||||
if not params.path:
|
||||
return ToolError(
|
||||
message="File path cannot be empty.",
|
||||
)
|
||||
|
||||
try:
|
||||
# Resolve path
|
||||
path = Path(params.path).expanduser()
|
||||
if not path.is_absolute():
|
||||
path = self._work_dir / path
|
||||
path = path.resolve()
|
||||
|
||||
# Security check: if outside work_dir, must be absolute path
|
||||
if not self._is_within_work_dir(path) and not Path(params.path).is_absolute():
|
||||
return ToolError(
|
||||
message=(
|
||||
f"`{params.path}` is not an absolute path. "
|
||||
"You must provide an absolute path to read a file "
|
||||
"outside the working directory."
|
||||
),
|
||||
)
|
||||
|
||||
# Check file exists
|
||||
if not path.exists():
|
||||
return ToolError(
|
||||
message=f"`{params.path}` does not exist.",
|
||||
)
|
||||
|
||||
if not path.is_file():
|
||||
return ToolError(
|
||||
message=f"`{params.path}` is not a file.",
|
||||
)
|
||||
|
||||
# Read file content
|
||||
try:
|
||||
content = path.read_text(encoding="utf-8", errors="replace")
|
||||
except UnicodeDecodeError:
|
||||
return ToolError(
|
||||
message=f"`{params.path}` appears to be a binary file and cannot be read as text.",
|
||||
)
|
||||
|
||||
# Split into lines
|
||||
lines = content.split("\n")
|
||||
|
||||
# Apply line offset
|
||||
start_idx = params.line_offset - 1
|
||||
if start_idx >= len(lines):
|
||||
return ToolOk(
|
||||
output="",
|
||||
message=f"Line offset {params.line_offset} exceeds file length ({len(lines)} lines).",
|
||||
)
|
||||
|
||||
# Calculate end index
|
||||
end_idx = min(start_idx + params.n_lines, len(lines))
|
||||
end_idx = min(end_idx, start_idx + self._max_lines)
|
||||
|
||||
# Extract lines
|
||||
selected_lines = lines[start_idx:end_idx]
|
||||
|
||||
# Truncate long lines and count total bytes
|
||||
truncated_lines = []
|
||||
truncated_line_numbers = []
|
||||
total_bytes = 0
|
||||
max_bytes_reached = False
|
||||
|
||||
for i, line in enumerate(selected_lines):
|
||||
line_num = start_idx + i + 1
|
||||
original_line = line
|
||||
|
||||
# Truncate if needed
|
||||
if len(line) > self._max_line_length:
|
||||
line = line[: self._max_line_length]
|
||||
truncated_line_numbers.append(line_num)
|
||||
|
||||
# Check bytes limit
|
||||
line_bytes = len(line.encode("utf-8"))
|
||||
if total_bytes + line_bytes > self._max_bytes:
|
||||
max_bytes_reached = True
|
||||
break
|
||||
|
||||
total_bytes += line_bytes
|
||||
truncated_lines.append(line)
|
||||
|
||||
# Format with line numbers
|
||||
lines_with_no = []
|
||||
for line_num, line in enumerate(truncated_lines, start=start_idx + 1):
|
||||
lines_with_no.append(f"{line_num:6d}\t{line}")
|
||||
|
||||
# Build result
|
||||
output = "\n".join(lines_with_no)
|
||||
message = (
|
||||
f"{len(truncated_lines)} lines read from file starting from line {start_idx + 1}."
|
||||
)
|
||||
|
||||
if max_bytes_reached:
|
||||
message += f" Max {self._max_bytes} bytes reached."
|
||||
elif end_idx < len(lines):
|
||||
message += f" File has {len(lines)} lines total."
|
||||
|
||||
if truncated_line_numbers:
|
||||
message += f" Lines {truncated_line_numbers} were truncated."
|
||||
|
||||
return ToolOk(output=output, message=message)
|
||||
|
||||
except Exception as e:
|
||||
return ToolError(
|
||||
message=f"Failed to read {params.path}. Error: {e}",
|
||||
)
|
||||
183
agentlite/src/agentlite/tools/file/read_media.py
Normal file
183
agentlite/src/agentlite/tools/file/read_media.py
Normal file
@@ -0,0 +1,183 @@
|
||||
"""ReadMediaFile tool for AgentLite.
|
||||
|
||||
This module provides a tool for reading image and video files.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from typing import Optional
|
||||
|
||||
import base64
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from agentlite.tool import CallableTool2, ToolError, ToolOk, ToolResult
|
||||
|
||||
|
||||
class Params(BaseModel):
|
||||
"""Parameters for the ReadMediaFile tool."""
|
||||
|
||||
path: str = Field(
|
||||
description=(
|
||||
"The path to the media file to read. "
|
||||
"Absolute paths are required when reading files outside the working directory."
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class ReadMediaFile(CallableTool2[Params]):
|
||||
"""Tool for reading image and video files.
|
||||
|
||||
This tool reads media files and returns them as base64-encoded data URLs.
|
||||
Supports images (PNG, JPEG, GIF, etc.) and videos.
|
||||
|
||||
Example:
|
||||
>>> tool = ReadMediaFile(work_dir=Path("/tmp"))
|
||||
>>> result = await tool({"path": "image.png"})
|
||||
"""
|
||||
|
||||
name: str = "ReadMediaFile"
|
||||
description: str = (
|
||||
"Read an image or video file and return it as a base64-encoded data URL. "
|
||||
"Supported formats: PNG, JPEG, GIF, WebP, MP4, WebM, and others. "
|
||||
"Maximum file size: 100MB."
|
||||
)
|
||||
params: type[Params] = Params
|
||||
|
||||
# Supported media types
|
||||
IMAGE_EXTENSIONS = {".png", ".jpg", ".jpeg", ".gif", ".webp", ".bmp", ".svg"}
|
||||
VIDEO_EXTENSIONS = {".mp4", ".webm", ".mov", ".avi", ".mkv"}
|
||||
|
||||
# MIME type mapping
|
||||
MIME_TYPES = {
|
||||
".png": "image/png",
|
||||
".jpg": "image/jpeg",
|
||||
".jpeg": "image/jpeg",
|
||||
".gif": "image/gif",
|
||||
".webp": "image/webp",
|
||||
".bmp": "image/bmp",
|
||||
".svg": "image/svg+xml",
|
||||
".mp4": "video/mp4",
|
||||
".webm": "video/webm",
|
||||
".mov": "video/quicktime",
|
||||
".avi": "video/x-msvideo",
|
||||
".mkv": "video/x-matroska",
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
work_dir: Path,
|
||||
max_size_mb: int = 100,
|
||||
):
|
||||
"""Initialize the ReadMediaFile tool.
|
||||
|
||||
Args:
|
||||
work_dir: The working directory for relative paths
|
||||
max_size_mb: Maximum file size in MB
|
||||
"""
|
||||
super().__init__()
|
||||
self._work_dir = work_dir
|
||||
self._max_size = max_size_mb * 1024 * 1024
|
||||
|
||||
def _is_within_work_dir(self, path: Path) -> bool:
|
||||
"""Check if a path is within the working directory."""
|
||||
try:
|
||||
path.relative_to(self._work_dir.resolve())
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def _get_mime_type(self, path: Path) -> Optional[str]:
|
||||
"""Get MIME type for a file based on extension."""
|
||||
ext = path.suffix.lower()
|
||||
return self.MIME_TYPES.get(ext)
|
||||
|
||||
def _is_media_file(self, path: Path) -> bool:
|
||||
"""Check if a file is a supported media file."""
|
||||
ext = path.suffix.lower()
|
||||
return ext in self.IMAGE_EXTENSIONS or ext in self.VIDEO_EXTENSIONS
|
||||
async def __call__(self, params: Params) -> ToolResult:
|
||||
"""Execute the read media operation.
|
||||
|
||||
Args:
|
||||
params: The read parameters
|
||||
|
||||
Returns:
|
||||
ToolResult with base64 data URL or error
|
||||
"""
|
||||
if not params.path:
|
||||
return ToolError(
|
||||
message="File path cannot be empty.",
|
||||
)
|
||||
|
||||
try:
|
||||
# Resolve path
|
||||
path = Path(params.path).expanduser()
|
||||
if not path.is_absolute():
|
||||
path = self._work_dir / path
|
||||
path = path.resolve()
|
||||
|
||||
# Security check
|
||||
if not self._is_within_work_dir(path) and not Path(params.path).is_absolute():
|
||||
return ToolError(
|
||||
message=(
|
||||
f"`{params.path}` is not an absolute path. "
|
||||
"You must provide an absolute path to read a file "
|
||||
"outside the working directory."
|
||||
),
|
||||
)
|
||||
|
||||
# Check file exists
|
||||
if not path.exists():
|
||||
return ToolError(
|
||||
message=f"`{params.path}` does not exist.",
|
||||
)
|
||||
|
||||
if not path.is_file():
|
||||
return ToolError(
|
||||
message=f"`{params.path}` is not a file.",
|
||||
)
|
||||
|
||||
# Check it's a media file
|
||||
if not self._is_media_file(path):
|
||||
return ToolError(
|
||||
message=(
|
||||
f"`{params.path}` is not a supported media file. "
|
||||
f"Supported extensions: "
|
||||
f"{', '.join(sorted(self.IMAGE_EXTENSIONS | self.VIDEO_EXTENSIONS))}"
|
||||
),
|
||||
)
|
||||
|
||||
# Check file size
|
||||
file_size = path.stat().st_size
|
||||
if file_size > self._max_size:
|
||||
return ToolError(
|
||||
message=(
|
||||
f"`{params.path}` is too large ({file_size / 1024 / 1024:.1f}MB). "
|
||||
f"Maximum size is {self._max_size / 1024 / 1024:.0f}MB."
|
||||
),
|
||||
)
|
||||
|
||||
# Get MIME type
|
||||
mime_type = self._get_mime_type(path)
|
||||
if not mime_type:
|
||||
return ToolError(
|
||||
message=f"Could not determine MIME type for `{params.path}`.",
|
||||
)
|
||||
|
||||
# Read and encode file
|
||||
data = path.read_bytes()
|
||||
encoded = base64.b64encode(data).decode("ascii")
|
||||
data_url = f"data:{mime_type};base64,{encoded}"
|
||||
|
||||
return ToolOk(
|
||||
output=data_url,
|
||||
message=(
|
||||
f"Loaded {mime_type.split('/')[0]} file `{params.path}` ({file_size} bytes)."
|
||||
),
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return ToolError(
|
||||
message=f"Failed to read {params.path}. Error: {e}",
|
||||
)
|
||||
189
agentlite/src/agentlite/tools/file/replace.py
Normal file
189
agentlite/src/agentlite/tools/file/replace.py
Normal file
@@ -0,0 +1,189 @@
|
||||
"""StrReplaceFile tool for AgentLite.
|
||||
|
||||
This module provides a tool for editing files using string replacement.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from agentlite.tool import CallableTool2, ToolError, ToolOk, ToolResult
|
||||
|
||||
|
||||
class Edit(BaseModel):
|
||||
"""A single edit operation."""
|
||||
|
||||
old: str = Field(description="The old string to replace. Can be multi-line.")
|
||||
new: str = Field(description="The new string to replace with. Can be multi-line.")
|
||||
replace_all: bool = Field(
|
||||
description="Whether to replace all occurrences.",
|
||||
default=False,
|
||||
)
|
||||
|
||||
|
||||
class Params(BaseModel):
|
||||
"""Parameters for the StrReplaceFile tool."""
|
||||
|
||||
path: str = Field(
|
||||
description=(
|
||||
"The path to the file to edit. Absolute paths are required when editing files "
|
||||
"outside the working directory."
|
||||
)
|
||||
)
|
||||
edit: Union[Edit, list[Edit]] = Field(
|
||||
description=(
|
||||
"The edit(s) to apply to the file. "
|
||||
"You can provide a single edit or a list of edits here."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class StrReplaceFile(CallableTool2[Params]):
|
||||
"""Tool for editing files using string replacement.
|
||||
|
||||
This tool replaces strings in a file. It can perform single or multiple
|
||||
replacements, and optionally replace all occurrences.
|
||||
|
||||
Example:
|
||||
>>> tool = StrReplaceFile(work_dir=Path("/tmp"))
|
||||
>>> result = await tool({"path": "test.txt", "edit": {"old": "Hello", "new": "Hi"}})
|
||||
"""
|
||||
|
||||
name: str = "StrReplaceFile"
|
||||
description: str = (
|
||||
"Edit a file by replacing strings. "
|
||||
"Supports single or multiple edits, and can replace all occurrences. "
|
||||
"The old string must match exactly (including whitespace)."
|
||||
)
|
||||
params: type[Params] = Params
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
work_dir: Path,
|
||||
allow_outside_work_dir: bool = False,
|
||||
):
|
||||
"""Initialize the StrReplaceFile tool.
|
||||
|
||||
Args:
|
||||
work_dir: The working directory for relative paths
|
||||
allow_outside_work_dir: Whether to allow editing outside the working directory
|
||||
"""
|
||||
super().__init__()
|
||||
self._work_dir = work_dir
|
||||
self._allow_outside_work_dir = allow_outside_work_dir
|
||||
|
||||
def _is_within_work_dir(self, path: Path) -> bool:
|
||||
"""Check if a path is within the working directory."""
|
||||
try:
|
||||
path.relative_to(self._work_dir.resolve())
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def _apply_edit(self, content: str, edit: Edit) -> tuple[str, int]:
|
||||
"""Apply a single edit to the content.
|
||||
|
||||
Args:
|
||||
content: The original content
|
||||
edit: The edit to apply
|
||||
|
||||
Returns:
|
||||
Tuple of (new_content, replacements_count)
|
||||
"""
|
||||
if edit.replace_all:
|
||||
count = content.count(edit.old)
|
||||
new_content = content.replace(edit.old, edit.new)
|
||||
return new_content, count
|
||||
else:
|
||||
if edit.old in content:
|
||||
new_content = content.replace(edit.old, edit.new, 1)
|
||||
return new_content, 1
|
||||
return content, 0
|
||||
|
||||
async def __call__(self, params: Params) -> ToolResult:
|
||||
"""Execute the string replacement operation.
|
||||
|
||||
Args:
|
||||
params: The edit parameters
|
||||
|
||||
Returns:
|
||||
ToolResult with success message or error
|
||||
"""
|
||||
if not params.path:
|
||||
return ToolError(
|
||||
message="File path cannot be empty.",
|
||||
)
|
||||
|
||||
try:
|
||||
# Resolve path
|
||||
path = Path(params.path).expanduser()
|
||||
if not path.is_absolute():
|
||||
path = self._work_dir / path
|
||||
path = path.resolve()
|
||||
|
||||
# Security check
|
||||
if not self._is_within_work_dir(path):
|
||||
if not Path(params.path).is_absolute():
|
||||
return ToolError(
|
||||
message=(
|
||||
f"`{params.path}` is not an absolute path. "
|
||||
"You must provide an absolute path to edit a file "
|
||||
"outside the working directory."
|
||||
),
|
||||
)
|
||||
if not self._allow_outside_work_dir:
|
||||
return ToolError(
|
||||
message=(
|
||||
f"Editing outside the working directory is not allowed. "
|
||||
f"Path: {params.path}"
|
||||
),
|
||||
)
|
||||
|
||||
# Check file exists
|
||||
if not path.exists():
|
||||
return ToolError(
|
||||
message=f"`{params.path}` does not exist.",
|
||||
)
|
||||
|
||||
if not path.is_file():
|
||||
return ToolError(
|
||||
message=f"`{params.path}` is not a file.",
|
||||
)
|
||||
|
||||
# Read file content
|
||||
content = path.read_text(encoding="utf-8", errors="replace")
|
||||
original_content = content
|
||||
|
||||
# Normalize edits to list
|
||||
edits = [params.edit] if isinstance(params.edit, Edit) else params.edit
|
||||
|
||||
# Apply edits
|
||||
total_replacements = 0
|
||||
for edit in edits:
|
||||
content, count = self._apply_edit(content, edit)
|
||||
total_replacements += count
|
||||
|
||||
# Check if any changes were made
|
||||
if content == original_content:
|
||||
return ToolError(
|
||||
message="No replacements were made. The old string was not found in the file.",
|
||||
)
|
||||
|
||||
# Write back
|
||||
path.write_text(content, encoding="utf-8")
|
||||
|
||||
return ToolOk(
|
||||
output="",
|
||||
message=(
|
||||
f"File successfully edited. "
|
||||
f"Applied {len(edits)} edit(s) with {total_replacements} total replacement(s)."
|
||||
),
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return ToolError(
|
||||
message=f"Failed to edit {params.path}. Error: {e}",
|
||||
)
|
||||
157
agentlite/src/agentlite/tools/file/write.py
Normal file
157
agentlite/src/agentlite/tools/file/write.py
Normal file
@@ -0,0 +1,157 @@
|
||||
"""WriteFile tool for AgentLite.
|
||||
|
||||
This module provides a tool for writing files to the filesystem.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from typing import Literal
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from agentlite.tool import CallableTool2, ToolError, ToolOk, ToolResult
|
||||
|
||||
|
||||
class Params(BaseModel):
|
||||
"""Parameters for the WriteFile tool."""
|
||||
|
||||
path: str = Field(
|
||||
description=(
|
||||
"The path to the file to write. Absolute paths are required when writing files "
|
||||
"outside the working directory."
|
||||
)
|
||||
)
|
||||
content: str = Field(description="The content to write to the file")
|
||||
mode: Literal["overwrite", "append"] = Field(
|
||||
description=(
|
||||
"The mode to use to write to the file. "
|
||||
"Two modes are supported: `overwrite` for overwriting the whole file and "
|
||||
"`append` for appending to the end of an existing file."
|
||||
),
|
||||
default="overwrite",
|
||||
)
|
||||
|
||||
|
||||
class WriteFile(CallableTool2[Params]):
|
||||
"""Tool for writing files to the filesystem.
|
||||
|
||||
This tool writes content to a file, either overwriting or appending.
|
||||
|
||||
Example:
|
||||
>>> tool = WriteFile(work_dir=Path("/tmp"))
|
||||
>>> result = await tool({"path": "test.txt", "content": "Hello World"})
|
||||
"""
|
||||
|
||||
name: str = "WriteFile"
|
||||
description: str = (
|
||||
"Write content to a file on the local filesystem. "
|
||||
"Can create new files or overwrite/append to existing files."
|
||||
)
|
||||
params: type[Params] = Params
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
work_dir: Path,
|
||||
allow_outside_work_dir: bool = False,
|
||||
):
|
||||
"""Initialize the WriteFile tool.
|
||||
|
||||
Args:
|
||||
work_dir: The working directory for relative paths
|
||||
allow_outside_work_dir: Whether to allow writing outside the working directory
|
||||
"""
|
||||
super().__init__()
|
||||
self._work_dir = work_dir
|
||||
self._allow_outside_work_dir = allow_outside_work_dir
|
||||
|
||||
def _is_within_work_dir(self, path: Path) -> bool:
|
||||
"""Check if a path is within the working directory."""
|
||||
try:
|
||||
path.relative_to(self._work_dir.resolve())
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
async def __call__(self, params: Params) -> ToolResult:
|
||||
"""Execute the write file operation.
|
||||
|
||||
Args:
|
||||
params: The write parameters
|
||||
|
||||
Returns:
|
||||
ToolResult with success message or error
|
||||
"""
|
||||
if not params.path:
|
||||
return ToolError(
|
||||
message="File path cannot be empty.",
|
||||
)
|
||||
|
||||
try:
|
||||
# Resolve path
|
||||
path = Path(params.path).expanduser()
|
||||
if not path.is_absolute():
|
||||
path = self._work_dir / path
|
||||
path = path.resolve()
|
||||
|
||||
# Security check
|
||||
if not self._is_within_work_dir(path):
|
||||
if not Path(params.path).is_absolute():
|
||||
return ToolError(
|
||||
message=(
|
||||
f"`{params.path}` is not an absolute path. "
|
||||
"You must provide an absolute path to write a file "
|
||||
"outside the working directory."
|
||||
),
|
||||
)
|
||||
if not self._allow_outside_work_dir:
|
||||
return ToolError(
|
||||
message=(
|
||||
f"Writing outside the working directory is not allowed. "
|
||||
f"Path: {params.path}"
|
||||
),
|
||||
)
|
||||
|
||||
# Check parent directory exists
|
||||
if not path.parent.exists():
|
||||
return ToolError(
|
||||
message=f"Parent directory `{path.parent}` does not exist.",
|
||||
)
|
||||
|
||||
# Check valid mode
|
||||
if params.mode not in ("overwrite", "append"):
|
||||
return ToolError(
|
||||
message=f"Invalid mode: {params.mode}. Must be 'overwrite' or 'append'.",
|
||||
)
|
||||
|
||||
# Check if file exists
|
||||
file_existed = path.exists()
|
||||
old_content = ""
|
||||
if file_existed and path.is_file():
|
||||
old_content = path.read_text(encoding="utf-8", errors="replace")
|
||||
|
||||
# Calculate new content
|
||||
if params.mode == "append" and file_existed:
|
||||
new_content = old_content + params.content
|
||||
else:
|
||||
new_content = params.content
|
||||
|
||||
# Write file
|
||||
path.write_text(new_content, encoding="utf-8")
|
||||
|
||||
# Build success message
|
||||
action = (
|
||||
"overwritten"
|
||||
if params.mode == "overwrite" and file_existed
|
||||
else ("appended to" if params.mode == "append" and file_existed else "created")
|
||||
)
|
||||
file_size = path.stat().st_size
|
||||
|
||||
return ToolOk(
|
||||
output="",
|
||||
message=f"File `{params.path}` successfully {action}. Size: {file_size} bytes.",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return ToolError(
|
||||
message=f"Failed to write to {params.path}. Error: {e}",
|
||||
)
|
||||
9
agentlite/src/agentlite/tools/misc/__init__.py
Normal file
9
agentlite/src/agentlite/tools/misc/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
"""Miscellaneous tools for AgentLite.
|
||||
|
||||
This module provides utility tools like todo lists and thinking.
|
||||
"""
|
||||
|
||||
from agentlite.tools.misc.todo import SetTodoList
|
||||
from agentlite.tools.misc.think import Think
|
||||
|
||||
__all__ = ["SetTodoList", "Think"]
|
||||
69
agentlite/src/agentlite/tools/misc/think.py
Normal file
69
agentlite/src/agentlite/tools/misc/think.py
Normal file
@@ -0,0 +1,69 @@
|
||||
"""Think tool for AgentLite.
|
||||
|
||||
This module provides a tool for recording thoughts.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from agentlite.tool import CallableTool2, ToolOk, ToolResult
|
||||
|
||||
|
||||
class Params(BaseModel):
|
||||
"""Parameters for the Think tool."""
|
||||
|
||||
thought: str = Field(description="A thought to record")
|
||||
|
||||
|
||||
class Think(CallableTool2[Params]):
|
||||
"""Tool for recording thoughts.
|
||||
|
||||
This tool allows the agent to record its thinking process.
|
||||
Useful for debugging and understanding the agent's reasoning.
|
||||
|
||||
Example:
|
||||
>>> tool = Think()
|
||||
>>> result = await tool({"thought": "I should first check if the file exists..."})
|
||||
"""
|
||||
|
||||
name: str = "Think"
|
||||
description: str = (
|
||||
"Record a thought or reasoning step. "
|
||||
"Use this to think through problems before taking action. "
|
||||
"The thought will be logged but not returned to the user."
|
||||
)
|
||||
params: type[Params] = Params
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the Think tool."""
|
||||
super().__init__()
|
||||
self._thoughts: list[str] = []
|
||||
async def __call__(self, params: Params) -> ToolResult:
|
||||
"""Execute the thought recording.
|
||||
|
||||
Args:
|
||||
params: The thought parameters
|
||||
|
||||
Returns:
|
||||
ToolResult with success message
|
||||
"""
|
||||
self._thoughts.append(params.thought)
|
||||
|
||||
return ToolOk(
|
||||
output="",
|
||||
message=f"Thought recorded ({len(self._thoughts)} total thoughts)",
|
||||
)
|
||||
|
||||
def get_thoughts(self) -> list[str]:
|
||||
"""Get all recorded thoughts.
|
||||
|
||||
Returns:
|
||||
List of all recorded thoughts
|
||||
"""
|
||||
return self._thoughts.copy()
|
||||
|
||||
def clear_thoughts(self) -> None:
|
||||
"""Clear all recorded thoughts."""
|
||||
self._thoughts.clear()
|
||||
101
agentlite/src/agentlite/tools/misc/todo.py
Normal file
101
agentlite/src/agentlite/tools/misc/todo.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""SetTodoList tool for AgentLite.
|
||||
|
||||
This module provides a tool for managing todo lists.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from typing import Literal
|
||||
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from agentlite.tool import CallableTool2, ToolOk, ToolResult
|
||||
|
||||
|
||||
class Todo(BaseModel):
|
||||
"""A single todo item."""
|
||||
|
||||
title: str = Field(description="The title of the todo", min_length=1)
|
||||
status: Literal["pending", "in_progress", "done"] = Field(description="The status of the todo")
|
||||
|
||||
|
||||
class Params(BaseModel):
|
||||
"""Parameters for the SetTodoList tool."""
|
||||
|
||||
todos: list[Todo] = Field(description="The todo list to set")
|
||||
|
||||
|
||||
class SetTodoList(CallableTool2[Params]):
|
||||
"""Tool for managing todo lists.
|
||||
|
||||
This tool allows the agent to create and update a todo list.
|
||||
The todo list can be used to track tasks and progress.
|
||||
|
||||
Example:
|
||||
>>> tool = SetTodoList()
|
||||
>>> result = await tool(
|
||||
... {
|
||||
... "todos": [
|
||||
... {"title": "Read docs", "status": "done"},
|
||||
... {"title": "Write code", "status": "in_progress"},
|
||||
... ]
|
||||
... }
|
||||
... )
|
||||
"""
|
||||
|
||||
name: str = "SetTodoList"
|
||||
description: str = (
|
||||
"Set or update the todo list. "
|
||||
"Use this to track tasks and show progress. "
|
||||
"Each todo has a title and status (pending/in_progress/done)."
|
||||
)
|
||||
params: type[Params] = Params
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the SetTodoList tool."""
|
||||
super().__init__()
|
||||
self._todos: list[Todo] = []
|
||||
async def __call__(self, params: Params) -> ToolResult:
|
||||
"""Execute the todo list update.
|
||||
|
||||
Args:
|
||||
params: The todo list parameters
|
||||
|
||||
Returns:
|
||||
ToolResult with success message
|
||||
"""
|
||||
self._todos = params.todos
|
||||
|
||||
# Format output
|
||||
lines = []
|
||||
for todo in self._todos:
|
||||
status_emoji = {
|
||||
"pending": "⏳",
|
||||
"in_progress": "🔨",
|
||||
"done": "✅",
|
||||
}.get(todo.status, "❓")
|
||||
lines.append(f"{status_emoji} {todo.title}")
|
||||
|
||||
output = "\n".join(lines) if lines else "No todos."
|
||||
|
||||
# Count by status
|
||||
counts = {"pending": 0, "in_progress": 0, "done": 0}
|
||||
for todo in self._todos:
|
||||
if todo.status in counts:
|
||||
counts[todo.status] += 1
|
||||
|
||||
message = (
|
||||
f"Todo list updated: {len(self._todos)} items "
|
||||
f"({counts['done']} done, {counts['in_progress']} in progress, "
|
||||
f"{counts['pending']} pending)"
|
||||
)
|
||||
|
||||
return ToolOk(output=output, message=message)
|
||||
|
||||
def get_todos(self) -> list[Todo]:
|
||||
"""Get the current todo list.
|
||||
|
||||
Returns:
|
||||
The current list of todos
|
||||
"""
|
||||
return self._todos.copy()
|
||||
9
agentlite/src/agentlite/tools/multiagent/__init__.py
Normal file
9
agentlite/src/agentlite/tools/multiagent/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
"""Multi-agent tools for AgentLite.
|
||||
|
||||
This module provides tools for creating and managing subagents.
|
||||
"""
|
||||
|
||||
from agentlite.tools.multiagent.task import Task
|
||||
from agentlite.tools.multiagent.create import CreateSubagent
|
||||
|
||||
__all__ = []
|
||||
59
agentlite/src/agentlite/tools/multiagent/create.py
Normal file
59
agentlite/src/agentlite/tools/multiagent/create.py
Normal file
@@ -0,0 +1,59 @@
|
||||
"""CreateSubagent tool for AgentLite.
|
||||
|
||||
This module provides a tool for dynamically creating subagents.
|
||||
|
||||
In this rdev subagent integration, nested subagents are intentionally
|
||||
disabled. The tool is kept for API compatibility but it intentionally
|
||||
returns an explicit disabled error.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from agentlite.tool import CallableTool2, ToolError, ToolResult
|
||||
|
||||
|
||||
class Params(BaseModel):
|
||||
"""Parameters for the CreateSubagent tool."""
|
||||
|
||||
name: str = Field(description="The name of the subagent to create")
|
||||
prompt: str = Field(
|
||||
description=(
|
||||
"The system prompt for the subagent. "
|
||||
"This defines the subagent's personality and capabilities."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class CreateSubagent(CallableTool2[Params]):
|
||||
"""Tool for dynamically creating subagents.
|
||||
|
||||
This tool creates a new subagent with a custom system prompt.
|
||||
The subagent can then be used with the Task tool.
|
||||
|
||||
Example:
|
||||
>>> tool = CreateSubagent()
|
||||
>>> result = await tool({"name": "researcher", "prompt": "You are a research assistant..."})
|
||||
"""
|
||||
|
||||
name: str = "CreateSubagent"
|
||||
description: str = (
|
||||
"Create a new subagent with a custom system prompt. "
|
||||
"The subagent can be used to perform specialized tasks. "
|
||||
"Use the Task tool to run tasks with created subagents."
|
||||
)
|
||||
params: type[Params] = Params
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the CreateSubagent tool."""
|
||||
super().__init__()
|
||||
async def __call__(self, params: Params) -> ToolResult:
|
||||
"""Refuse to create nested subagents."""
|
||||
return ToolError(
|
||||
message=(
|
||||
"CreateSubagent tool is disabled in this subagent runtime. "
|
||||
f"Dynamic subagent creation is not allowed (requested '{params.name}')."
|
||||
),
|
||||
)
|
||||
99
agentlite/src/agentlite/tools/multiagent/task.py
Normal file
99
agentlite/src/agentlite/tools/multiagent/task.py
Normal file
@@ -0,0 +1,99 @@
|
||||
"""Task tool for AgentLite.
|
||||
|
||||
This module provides a tool for delegating tasks to subagents.
|
||||
|
||||
In this rdev subagent integration, nested subagents are intentionally
|
||||
disabled. The tool is kept for API compatibility but no longer executes
|
||||
delegation.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from agentlite.tool import CallableTool2, ToolError, ToolResult
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from agentlite.agent import Agent
|
||||
from agentlite.labor_market import LaborMarket
|
||||
|
||||
|
||||
class Params(BaseModel):
|
||||
"""Parameters for the Task tool."""
|
||||
|
||||
subagent_name: str = Field(description="The name of the subagent to call (must be registered)")
|
||||
prompt: str = Field(
|
||||
description=(
|
||||
"The task for the subagent to perform. "
|
||||
"Provide detailed instructions with all necessary context."
|
||||
),
|
||||
)
|
||||
description: str = Field(
|
||||
default="",
|
||||
description="A short (3-5 word) description of the task (for logging)",
|
||||
)
|
||||
|
||||
|
||||
class Task(CallableTool2[Params]):
|
||||
"""Tool for delegating tasks to subagents.
|
||||
|
||||
This tool allows a parent agent to delegate tasks to its subagents.
|
||||
The subagent must be registered in the parent's labor market.
|
||||
|
||||
Example:
|
||||
>>> # Parent agent has a "coder" subagent
|
||||
>>> tool = Task(parent_agent)
|
||||
>>> result = await tool(
|
||||
... {
|
||||
... "subagent_name": "coder",
|
||||
... "prompt": "Write a Python function to sort a list",
|
||||
... "description": "Write sorting function",
|
||||
... }
|
||||
... )
|
||||
"""
|
||||
|
||||
name: str = "Task"
|
||||
description: str = (
|
||||
"Delegate a task to a specialized subagent. "
|
||||
"The subagent must be registered in the parent agent's labor market. "
|
||||
"The subagent will execute independently and return its findings."
|
||||
)
|
||||
params: type[Params] = Params
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
labor_market: LaborMarket | None = None,
|
||||
parent_agent: Agent | None = None,
|
||||
max_iterations: int = 80,
|
||||
):
|
||||
"""Initialize the Task tool.
|
||||
|
||||
Args:
|
||||
labor_market: The LaborMarket containing subagents
|
||||
parent_agent: Alternative: the parent agent (uses its labor_market)
|
||||
max_iterations: Maximum iterations for subagent execution
|
||||
|
||||
Raises:
|
||||
ValueError: If neither labor_market nor parent_agent is provided.
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
if labor_market is not None:
|
||||
self._labor_market = labor_market
|
||||
elif parent_agent is not None:
|
||||
self._labor_market = parent_agent.labor_market
|
||||
else:
|
||||
raise ValueError("Either labor_market or parent_agent must be provided")
|
||||
|
||||
self._max_iterations = max_iterations
|
||||
|
||||
async def __call__(self, params: Params) -> ToolResult:
|
||||
"""Refuse to execute nested subagent delegation."""
|
||||
return ToolError(
|
||||
message=(
|
||||
"Task tool is disabled in this subagent runtime. "
|
||||
f"Nested subagent delegation is not allowed (requested '{params.subagent_name}')."
|
||||
),
|
||||
)
|
||||
8
agentlite/src/agentlite/tools/shell/__init__.py
Normal file
8
agentlite/src/agentlite/tools/shell/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""Shell tools for AgentLite.
|
||||
|
||||
This module provides tools for executing shell commands.
|
||||
"""
|
||||
|
||||
from agentlite.tools.shell.shell import Shell
|
||||
|
||||
__all__ = ["Shell"]
|
||||
166
agentlite/src/agentlite/tools/shell/shell.py
Normal file
166
agentlite/src/agentlite/tools/shell/shell.py
Normal file
@@ -0,0 +1,166 @@
|
||||
"""Shell tool for AgentLite.
|
||||
|
||||
This module provides a tool for executing shell commands.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from typing import Optional
|
||||
|
||||
import asyncio
|
||||
import platform
|
||||
import shlex
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from agentlite.tool import CallableTool2, ToolError, ToolOk, ToolResult
|
||||
|
||||
|
||||
class Params(BaseModel):
|
||||
"""Parameters for the Shell tool."""
|
||||
|
||||
command: str = Field(description="The shell command to execute.")
|
||||
timeout: int = Field(
|
||||
description=(
|
||||
"The timeout in seconds for the command to execute. "
|
||||
"If the command takes longer than this, it will be killed."
|
||||
),
|
||||
default=60,
|
||||
ge=1,
|
||||
le=3600,
|
||||
)
|
||||
|
||||
|
||||
class Shell(CallableTool2[Params]):
|
||||
"""Tool for executing shell commands.
|
||||
|
||||
This tool executes shell commands and returns their output.
|
||||
Supports configurable timeout and command blocking for security.
|
||||
|
||||
Example:
|
||||
>>> tool = Shell()
|
||||
>>> result = await tool({"command": "ls -la"})
|
||||
"""
|
||||
|
||||
name: str = "Shell"
|
||||
description: str = (
|
||||
"Execute a shell command and return its output. "
|
||||
"Supports bash on Unix/Linux/macOS and PowerShell on Windows. "
|
||||
"Use with caution - commands are executed with user permissions."
|
||||
)
|
||||
params: type[Params] = Params
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
timeout: int = 60,
|
||||
max_timeout: int = 300,
|
||||
blocked_commands: Optional[list[str]] = None,
|
||||
):
|
||||
"""Initialize the Shell tool.
|
||||
|
||||
Args:
|
||||
timeout: Default timeout in seconds
|
||||
max_timeout: Maximum allowed timeout
|
||||
blocked_commands: List of command patterns to block
|
||||
"""
|
||||
super().__init__()
|
||||
self._default_timeout = timeout
|
||||
self._max_timeout = max_timeout
|
||||
self._blocked_commands = blocked_commands or []
|
||||
self._is_windows = platform.system() == "Windows"
|
||||
|
||||
def _is_blocked(self, command: str) -> Optional[str]:
|
||||
"""Check if a command is blocked.
|
||||
|
||||
Args:
|
||||
command: The command to check
|
||||
|
||||
Returns:
|
||||
Block reason if blocked, None otherwise
|
||||
"""
|
||||
cmd_lower = command.lower().strip()
|
||||
|
||||
for blocked in self._blocked_commands:
|
||||
if blocked.lower() in cmd_lower:
|
||||
return f"Command contains blocked pattern: {blocked}"
|
||||
|
||||
return None
|
||||
async def __call__(self, params: Params) -> ToolResult:
|
||||
"""Execute the shell command.
|
||||
|
||||
Args:
|
||||
params: The command parameters
|
||||
|
||||
Returns:
|
||||
ToolResult with command output or error
|
||||
"""
|
||||
if not params.command:
|
||||
return ToolError(
|
||||
message="Command cannot be empty.",
|
||||
)
|
||||
|
||||
# Check if blocked
|
||||
if block_reason := self._is_blocked(params.command):
|
||||
return ToolError(
|
||||
message=f"Command blocked: {block_reason}",
|
||||
)
|
||||
|
||||
# Validate timeout
|
||||
timeout = min(params.timeout, self._max_timeout)
|
||||
|
||||
try:
|
||||
# Determine shell
|
||||
if self._is_windows:
|
||||
# Use PowerShell on Windows
|
||||
shell_cmd = ["powershell", "-Command", params.command]
|
||||
else:
|
||||
# Use bash on Unix/Linux/macOS
|
||||
shell_cmd = ["bash", "-c", params.command]
|
||||
|
||||
# Execute command
|
||||
process = await asyncio.create_subprocess_exec(
|
||||
*shell_cmd,
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.PIPE,
|
||||
)
|
||||
|
||||
try:
|
||||
stdout, stderr = await asyncio.wait_for(
|
||||
process.communicate(),
|
||||
timeout=timeout,
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
process.kill()
|
||||
await process.wait()
|
||||
return ToolError(
|
||||
message=f"Command timed out after {timeout} seconds.",
|
||||
)
|
||||
|
||||
# Decode output
|
||||
stdout_str = stdout.decode("utf-8", errors="replace")
|
||||
stderr_str = stderr.decode("utf-8", errors="replace")
|
||||
|
||||
# Build output
|
||||
output_parts = []
|
||||
if stdout_str:
|
||||
output_parts.append(stdout_str)
|
||||
if stderr_str:
|
||||
output_parts.append(f"[stderr]\n{stderr_str}")
|
||||
|
||||
output = "\n".join(output_parts)
|
||||
|
||||
if process.returncode == 0:
|
||||
return ToolOk(
|
||||
output=output,
|
||||
message=f"Command executed successfully (exit code 0).",
|
||||
)
|
||||
else:
|
||||
return ToolError(
|
||||
message=f"Command failed with exit code {process.returncode}.",
|
||||
output=output,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return ToolError(
|
||||
message=f"Failed to execute command. Error: {e}",
|
||||
)
|
||||
8
agentlite/src/agentlite/tools/web/__init__.py
Normal file
8
agentlite/src/agentlite/tools/web/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""Web tools for AgentLite.
|
||||
|
||||
This module provides tools for web access and search.
|
||||
"""
|
||||
|
||||
from agentlite.tools.web.fetch import FetchURL
|
||||
|
||||
__all__ = ["FetchURL"]
|
||||
174
agentlite/src/agentlite/tools/web/fetch.py
Normal file
174
agentlite/src/agentlite/tools/web/fetch.py
Normal file
@@ -0,0 +1,174 @@
|
||||
"""FetchURL tool for AgentLite.
|
||||
|
||||
This module provides a tool for fetching web page content.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from agentlite.tool import CallableTool2, ToolError, ToolOk, ToolResult
|
||||
|
||||
|
||||
class Params(BaseModel):
|
||||
"""Parameters for the FetchURL tool."""
|
||||
|
||||
url: str = Field(description="The URL to fetch content from.")
|
||||
|
||||
|
||||
class FetchURL(CallableTool2[Params]):
|
||||
"""Tool for fetching web page content.
|
||||
|
||||
This tool fetches the content of a web page and extracts the main text.
|
||||
Uses simple HTTP GET with configurable timeout.
|
||||
|
||||
Example:
|
||||
>>> tool = FetchURL()
|
||||
>>> result = await tool({"url": "https://example.com"})
|
||||
"""
|
||||
|
||||
name: str = "FetchURL"
|
||||
description: str = (
|
||||
"Fetch the content of a web page. "
|
||||
"Returns the HTML content or extracts main text if possible. "
|
||||
"Useful for reading documentation, articles, or API responses."
|
||||
)
|
||||
params: type[Params] = Params
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
timeout: int = 30,
|
||||
user_agent: str = (
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
|
||||
"(KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
||||
),
|
||||
max_content_length: int = 1024 * 1024, # 1MB
|
||||
):
|
||||
"""Initialize the FetchURL tool.
|
||||
|
||||
Args:
|
||||
timeout: Request timeout in seconds
|
||||
user_agent: User-Agent string
|
||||
max_content_length: Maximum content length to fetch
|
||||
"""
|
||||
super().__init__()
|
||||
self._timeout = timeout
|
||||
self._user_agent = user_agent
|
||||
self._max_content_length = max_content_length
|
||||
|
||||
def _extract_text(self, html: str) -> str:
|
||||
"""Simple HTML to text extraction.
|
||||
|
||||
Args:
|
||||
html: HTML content
|
||||
|
||||
Returns:
|
||||
Extracted text
|
||||
"""
|
||||
import re
|
||||
|
||||
# Remove script and style elements
|
||||
html = re.sub(r"<script[^\u003e]*>.*?</script>", "", html, flags=re.DOTALL)
|
||||
html = re.sub(r"<style[^\u003e]*>.*?</style>", "", html, flags=re.DOTALL)
|
||||
|
||||
# Remove HTML tags
|
||||
text = re.sub(r"<[^\u003e]+>", "", html)
|
||||
|
||||
# Decode HTML entities
|
||||
import html as html_module
|
||||
|
||||
text = html_module.unescape(text)
|
||||
|
||||
# Normalize whitespace
|
||||
text = re.sub(r"\s+", " ", text)
|
||||
|
||||
return text.strip()
|
||||
async def __call__(self, params: Params) -> ToolResult:
|
||||
"""Execute the URL fetch.
|
||||
|
||||
Args:
|
||||
params: The fetch parameters
|
||||
|
||||
Returns:
|
||||
ToolResult with page content or error
|
||||
"""
|
||||
if not params.url:
|
||||
return ToolError(
|
||||
message="URL cannot be empty.",
|
||||
)
|
||||
|
||||
try:
|
||||
# Create request with headers
|
||||
request = urllib.request.Request(
|
||||
params.url,
|
||||
headers={
|
||||
"User-Agent": self._user_agent,
|
||||
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
|
||||
"Accept-Language": "en-US,en;q=0.5",
|
||||
"Accept-Encoding": "identity",
|
||||
},
|
||||
)
|
||||
|
||||
# Fetch URL
|
||||
with urllib.request.urlopen(request, timeout=self._timeout) as response:
|
||||
# Check content length
|
||||
content_length = response.headers.get("Content-Length")
|
||||
if content_length and int(content_length) > self._max_content_length:
|
||||
return ToolError(
|
||||
message=(
|
||||
f"Content too large ({int(content_length)} bytes). "
|
||||
f"Maximum is {self._max_content_length} bytes."
|
||||
),
|
||||
)
|
||||
|
||||
# Read content
|
||||
content = response.read()
|
||||
|
||||
# Check size limit
|
||||
if len(content) > self._max_content_length:
|
||||
return ToolError(
|
||||
message=(
|
||||
f"Content too large ({len(content)} bytes). "
|
||||
f"Maximum is {self._max_content_length} bytes."
|
||||
),
|
||||
)
|
||||
|
||||
# Decode content
|
||||
try:
|
||||
text = content.decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
try:
|
||||
text = content.decode("latin-1")
|
||||
except UnicodeDecodeError:
|
||||
text = content.decode("utf-8", errors="replace")
|
||||
|
||||
# Extract text if HTML
|
||||
content_type = response.headers.get("Content-Type", "")
|
||||
if "text/html" in content_type:
|
||||
extracted = self._extract_text(text)
|
||||
return ToolOk(
|
||||
output=extracted,
|
||||
message=f"Fetched and extracted content from {params.url}",
|
||||
)
|
||||
else:
|
||||
return ToolOk(
|
||||
output=text,
|
||||
message=f"Fetched content from {params.url}",
|
||||
)
|
||||
|
||||
except urllib.error.HTTPError as e:
|
||||
return ToolError(
|
||||
message=f"HTTP error {e.code}: {e.reason}",
|
||||
)
|
||||
except urllib.error.URLError as e:
|
||||
return ToolError(
|
||||
message=f"URL error: {e.reason}",
|
||||
)
|
||||
except Exception as e:
|
||||
return ToolError(
|
||||
message=f"Failed to fetch {params.url}. Error: {e}",
|
||||
)
|
||||
82
agentlite/src/agentlite/tools/web/search.py
Normal file
82
agentlite/src/agentlite/tools/web/search.py
Normal file
@@ -0,0 +1,82 @@
|
||||
"""SearchWeb tool for AgentLite.
|
||||
|
||||
This module provides a tool for web search.
|
||||
|
||||
Note: This is a placeholder implementation. A real implementation would
|
||||
require integration with a search API like Google, Bing, or DuckDuckGo.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from agentlite.tool import CallableTool2, ToolError, ToolResult
|
||||
|
||||
|
||||
class Params(BaseModel):
|
||||
"""Parameters for the SearchWeb tool."""
|
||||
|
||||
query: str = Field(description="The search query string.")
|
||||
num_results: int = Field(
|
||||
description="Number of search results to return (max 10).",
|
||||
default=5,
|
||||
ge=1,
|
||||
le=10,
|
||||
)
|
||||
|
||||
|
||||
class SearchWeb(CallableTool2[Params]):
|
||||
"""Tool for web search.
|
||||
|
||||
This tool performs a web search and returns relevant results.
|
||||
|
||||
Note: This is a placeholder implementation. To use real search functionality,
|
||||
you need to integrate with a search API (Google, Bing, DuckDuckGo, etc.)
|
||||
and set the appropriate API keys.
|
||||
|
||||
Example:
|
||||
>>> tool = SearchWeb()
|
||||
>>> result = await tool({"query": "Python async programming"})
|
||||
"""
|
||||
|
||||
name: str = "SearchWeb"
|
||||
description: str = (
|
||||
"Search the web for information. "
|
||||
"Returns a list of relevant search results with titles and snippets. "
|
||||
"Note: Requires search API configuration to work properly."
|
||||
)
|
||||
params: type[Params] = Params
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
timeout: int = 30,
|
||||
user_agent: str = ("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"),
|
||||
):
|
||||
"""Initialize the SearchWeb tool.
|
||||
|
||||
Args:
|
||||
timeout: Request timeout in seconds
|
||||
user_agent: User-Agent string
|
||||
"""
|
||||
super().__init__()
|
||||
self._timeout = timeout
|
||||
self._user_agent = user_agent
|
||||
async def __call__(self, params: Params) -> ToolResult:
|
||||
"""Execute the web search.
|
||||
|
||||
Args:
|
||||
params: The search parameters
|
||||
|
||||
Returns:
|
||||
ToolResult with search results or error
|
||||
"""
|
||||
if not params.query:
|
||||
return ToolError(message="Search query cannot be empty.")
|
||||
|
||||
return ToolError(
|
||||
message=(
|
||||
"SearchWeb tool is disabled in this subagent runtime. "
|
||||
"Use FetchURL for direct URL content retrieval."
|
||||
),
|
||||
)
|
||||
Reference in New Issue
Block a user