feat: add a subagent frame

This commit is contained in:
tcmofashi
2026-04-03 22:15:53 +08:00
parent ce580d1f8b
commit 185361f2c3
72 changed files with 13062 additions and 0 deletions

View File

View File

@@ -0,0 +1,286 @@
"""Integration tests for Agent class.
This module tests the Agent class with mocked providers to verify
core functionality without making real API calls.
"""
from __future__ import annotations
import pytest
from agentlite import Agent, TextPart
@pytest.mark.integration
class TestAgentInitialization:
"""Tests for Agent initialization."""
def test_agent_initialization(self, mock_provider):
"""Test basic agent creation."""
agent = Agent(provider=mock_provider)
assert agent.provider is mock_provider
assert agent.system_prompt == "You are a helpful assistant."
assert agent.max_iterations == 80
assert agent.history == []
def test_agent_with_custom_system_prompt(self, mock_provider):
"""Test agent creation with custom system prompt."""
agent = Agent(provider=mock_provider, system_prompt="You are a specialized assistant.")
assert agent.system_prompt == "You are a specialized assistant."
def test_agent_with_tools(self, mock_provider, add_tool):
"""Test agent creation with tools."""
agent = Agent(provider=mock_provider, tools=[add_tool])
assert len(agent.tools.tools) == 1
assert agent.tools.tools[0].name == "add"
def test_agent_with_custom_max_iterations(self, mock_provider):
"""Test agent with custom max_iterations."""
agent = Agent(provider=mock_provider, max_iterations=5)
assert agent.max_iterations == 5
@pytest.mark.integration
class TestAgentRun:
"""Tests for Agent.run() method."""
@pytest.mark.asyncio
async def test_agent_run_simple(self, mock_provider):
"""Test simple non-streaming run."""
mock_provider.add_text_response("Hello there!")
agent = Agent(provider=mock_provider)
response = await agent.run("Hi")
assert response == "Hello there!"
@pytest.mark.asyncio
async def test_agent_run_adds_to_history(self, mock_provider):
"""Test that run adds messages to history."""
mock_provider.add_text_response("Response!")
agent = Agent(provider=mock_provider)
await agent.run("Hello")
# History should have user message and assistant response
assert len(agent.history) == 2
assert agent.history[0].role == "user"
assert agent.history[0].extract_text() == "Hello"
assert agent.history[1].role == "assistant"
@pytest.mark.asyncio
async def test_agent_run_multiple_messages(self, mock_provider):
"""Test multiple runs accumulate history."""
mock_provider.add_text_responses("Response 1", "Response 2")
agent = Agent(provider=mock_provider)
await agent.run("Message 1")
await agent.run("Message 2")
# Should have 4 messages total
assert len(agent.history) == 4
assert agent.history[0].role == "user"
assert agent.history[1].role == "assistant"
assert agent.history[2].role == "user"
assert agent.history[3].role == "assistant"
@pytest.mark.asyncio
async def test_agent_run_tracks_calls(self, mock_provider):
"""Test that provider.generate is called during run."""
mock_provider.add_text_response("Response!")
agent = Agent(provider=mock_provider)
await agent.run("Hello")
assert len(mock_provider.calls) == 1
call = mock_provider.calls[0]
assert call["system_prompt"] == "You are a helpful assistant."
assert len(call["history"]) == 1 # User message
@pytest.mark.integration
class TestAgentGenerate:
"""Tests for Agent.generate() method."""
@pytest.mark.asyncio
async def test_agent_generate_returns_message(self, mock_provider):
"""Test that generate returns a Message."""
mock_provider.add_text_response("Generated response")
agent = Agent(provider=mock_provider)
message = await agent.generate("Hello")
assert message.role == "assistant"
assert message.extract_text() == "Generated response"
@pytest.mark.asyncio
async def test_agent_generate_without_tool_loop(self, mock_provider):
"""Test that generate doesn't do tool calling loop."""
# Add tool call response
mock_provider.add_tool_call("add", {"a": 1, "b": 2}, "3")
agent = Agent(provider=mock_provider, tools=[])
message = await agent.generate("Calculate 1+2")
# Should return the tool call without executing it
assert message.has_tool_calls()
assert len(message.tool_calls) == 1
assert message.tool_calls[0].function.name == "add"
@pytest.mark.asyncio
async def test_agent_generate_adds_to_history(self, mock_provider):
"""Test that generate adds response to history."""
mock_provider.add_text_response("Response!")
agent = Agent(provider=mock_provider)
await agent.generate("Hello")
assert len(agent.history) == 2
assert agent.history[1].role == "assistant"
@pytest.mark.integration
class TestAgentHistory:
"""Tests for Agent history management."""
@pytest.mark.asyncio
async def test_agent_history_property_returns_copy(self, mock_provider):
"""Test that history property returns a copy."""
mock_provider.add_text_response("Response!")
agent = Agent(provider=mock_provider)
await agent.run("Hello")
history = agent.history
history.clear() # Modify the copy
# Original should still have messages
assert len(agent.history) == 2
@pytest.mark.asyncio
async def test_agent_clear_history(self, mock_provider):
"""Test clearing history."""
mock_provider.add_text_response("Response!")
agent = Agent(provider=mock_provider)
await agent.run("Hello")
agent.clear_history()
assert agent.history == []
@pytest.mark.asyncio
async def test_agent_add_message(self, mock_provider):
"""Test manually adding a message."""
agent = Agent(provider=mock_provider)
from agentlite import Message
agent.add_message(Message(role="user", content="Manual message"))
assert len(agent.history) == 1
assert agent.history[0].extract_text() == "Manual message"
@pytest.mark.integration
class TestAgentWithTools:
"""Tests for Agent with tools."""
@pytest.mark.asyncio
async def test_agent_with_tools_initialization(self, mock_provider, add_tool):
"""Test agent initialization with tools."""
agent = Agent(
provider=mock_provider, tools=[add_tool], system_prompt="You have access to tools."
)
assert len(agent.tools.tools) == 1
# Run to verify tools are passed to provider
mock_provider.add_text_response("I have tools available")
await agent.run("Hello")
# Check that tools were passed to provider
assert len(mock_provider.calls) == 1
assert len(mock_provider.calls[0]["tools"]) == 1
@pytest.mark.asyncio
async def test_agent_tool_call_execution(self, mock_provider, add_tool):
"""Test that agent executes tool calls."""
# First response: tool call
mock_provider.add_tool_call("add", {"a": 1, "b": 2}, "3")
# Second response: text after tool result
mock_provider.add_text_response("The sum is 3")
agent = Agent(provider=mock_provider, tools=[add_tool])
response = await agent.run("What is 1+2?")
assert "3" in response
# Should have made 2 calls to provider
assert len(mock_provider.calls) == 2
@pytest.mark.integration
class TestAgentMaxIterations:
"""Tests for max_iterations behavior."""
@pytest.mark.asyncio
async def test_agent_respects_max_iterations(self, mock_provider, add_tool):
"""Test that agent stops after max_iterations."""
# Always return tool calls to trigger iteration limit
for _ in range(10):
mock_provider.add_tool_call("add", {"a": 1, "b": 2}, "3")
agent = Agent(provider=mock_provider, tools=[add_tool], max_iterations=3)
response = await agent.run("Calculate")
# Should stop after max_iterations
assert len(mock_provider.calls) <= 3
assert "Maximum tool call iterations reached" in response
@pytest.mark.asyncio
async def test_agent_no_iterations_for_simple_response(self, mock_provider):
"""Test that simple responses don't count as iterations."""
mock_provider.add_text_response("Simple response")
agent = Agent(provider=mock_provider, max_iterations=1)
response = await agent.run("Hello")
assert response == "Simple response"
@pytest.mark.integration
class TestAgentStreaming:
"""Tests for streaming mode."""
@pytest.mark.asyncio
async def test_agent_run_streaming(self, mock_provider):
"""Test streaming run."""
mock_provider.add_text_response("Streamed response")
agent = Agent(provider=mock_provider)
stream = await agent.run("Hello", stream=True)
# Collect stream
chunks = []
async for chunk in stream:
chunks.append(chunk)
assert len(chunks) > 0
assert "".join(chunks) == "Streamed response"
@pytest.mark.asyncio
async def test_agent_streaming_adds_to_history(self, mock_provider):
"""Test that streaming adds messages to history."""
mock_provider.add_text_response("Response")
agent = Agent(provider=mock_provider)
stream = await agent.run("Hello", stream=True)
async for _ in stream:
pass
assert len(agent.history) == 2

View File

@@ -0,0 +1,348 @@
"""Integration tests for AgentLite with real API.
This script runs comprehensive tests against the real OpenAI API.
Requires OPENAI_API_KEY environment variable to be set.
Usage:
export OPENAI_API_KEY="sk-..."
python tests/integration/test_with_api.py
"""
import asyncio
import os
import sys
from pathlib import Path
import pytest
# Add src to path
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "src"))
from agentlite import Agent, OpenAIProvider, LLMClient, llm_complete
from agentlite.skills import discover_skills, SkillTool, index_skills_by_name
from agentlite.tools import ConfigurableToolset
# Test configuration
TEST_MODEL = "gpt-4o-mini" # Use mini for cost efficiency
HAS_OPENAI_API_KEY = bool(os.environ.get("OPENAI_API_KEY"))
pytestmark = pytest.mark.skipif(
not HAS_OPENAI_API_KEY, reason="OPENAI_API_KEY is required to run integration tests"
)
def get_provider():
"""Get OpenAI provider with API key."""
api_key = os.environ.get("OPENAI_API_KEY")
if not api_key:
print("❌ OPENAI_API_KEY not set!")
print("Please set your OpenAI API key:")
print(" export OPENAI_API_KEY='sk-...'")
sys.exit(1)
return OpenAIProvider(api_key=api_key, model=TEST_MODEL)
async def test_basic_agent():
"""Test 1: Basic Agent functionality."""
print("\n" + "=" * 60)
print("Test 1: Basic Agent Functionality")
print("=" * 60)
try:
provider = get_provider()
agent = Agent(
provider=provider,
system_prompt="You are a helpful assistant. Be concise.",
)
response = await agent.run("What is 2+2?")
print(f"✅ Agent responded: {response[:100]}...")
assert "4" in response, "Expected '4' in response"
print("✅ Basic Agent test PASSED")
return True
except Exception as e:
print(f"❌ Basic Agent test FAILED: {e}")
return False
async def test_agent_with_tools():
"""Test 2: Agent with tool suite."""
print("\n" + "=" * 60)
print("Test 2: Agent with Tool Suite")
print("=" * 60)
try:
from agentlite.tools import ToolSuiteConfig, ReadFile, Glob
provider = get_provider()
# Create toolset with file tools
config = ToolSuiteConfig()
toolset = ConfigurableToolset(config, work_dir=Path.cwd())
agent = Agent(
provider=provider,
system_prompt="You are a helpful assistant with file access.",
tools=toolset.tools,
)
print(f"✅ Agent created with {len(agent.tools.tools)} tools")
# Test simple query (without requiring file access)
response = await agent.run("List the Python files in the current directory")
print(f"✅ Agent with tools responded: {response[:100]}...")
print("✅ Agent with Tools test PASSED")
return True
except Exception as e:
print(f"❌ Agent with Tools test FAILED: {e}")
import traceback
traceback.print_exc()
return False
async def test_llm_client():
"""Test 3: LLMClient functionality."""
print("\n" + "=" * 60)
print("Test 3: LLMClient Functionality")
print("=" * 60)
try:
provider = get_provider()
client = LLMClient(provider=provider)
response = await client.complete(
user_prompt="What is the capital of France?",
system_prompt="You are a helpful assistant. Be concise.",
)
print(f"✅ LLMClient responded: {response.content[:100]}...")
assert "Paris" in response.content, "Expected 'Paris' in response"
print("✅ LLMClient test PASSED")
return True
except Exception as e:
print(f"❌ LLMClient test FAILED: {e}")
import traceback
traceback.print_exc()
return False
async def test_llm_streaming():
"""Test 4: LLM streaming."""
print("\n" + "=" * 60)
print("Test 4: LLM Streaming")
print("=" * 60)
try:
provider = get_provider()
client = LLMClient(provider=provider)
chunks = []
async for chunk in client.stream(
user_prompt="Count from 1 to 3",
system_prompt="You are a helpful assistant.",
):
chunks.append(chunk)
print(f" Chunk: {chunk[:20]}...")
full_response = "".join(chunks)
print(f"✅ Streamed response: {full_response[:100]}...")
print("✅ LLM Streaming test PASSED")
return True
except Exception as e:
print(f"❌ LLM Streaming test FAILED: {e}")
import traceback
traceback.print_exc()
return False
async def test_subagents():
"""Test 5: Subagent functionality."""
print("\n" + "=" * 60)
print("Test 5: Subagent Functionality")
print("=" * 60)
try:
from agentlite.labor_market import LaborMarket
from agentlite.tools.multiagent.task import Task
provider = get_provider()
# Create parent agent
parent = Agent(
provider=provider,
system_prompt="You are a coordinator agent.",
name="coordinator",
)
# Create subagent
coder = Agent(
provider=provider,
system_prompt="You are a coding specialist. Write clean, simple code.",
name="coder",
)
# Add subagent to parent
parent.add_subagent("coder", coder, "Writes code")
# Add Task tool
parent.tools.add(Task(labor_market=parent.labor_market))
print(f"✅ Created parent with {len(parent.labor_market)} subagent(s)")
print(f" Subagents: {parent.labor_market.list_subagents()}")
print("✅ Subagent test PASSED")
return True
except Exception as e:
print(f"❌ Subagent test FAILED: {e}")
import traceback
traceback.print_exc()
return False
async def test_skills():
"""Test 6: Skills functionality."""
print("\n" + "=" * 60)
print("Test 6: Skills Functionality")
print("=" * 60)
try:
# Discover example skills
skills_dir = Path(__file__).parent.parent.parent / "examples" / "skills"
if not skills_dir.exists():
print("⚠️ Skills directory not found, skipping")
return True
skills = discover_skills(skills_dir)
print(f"✅ Discovered {len(skills)} skill(s)")
for skill in skills:
print(f" - {skill.name} ({skill.type})")
if len(skills) == 0:
print("⚠️ No skills found, skipping further tests")
return True
# Test with agent
provider = get_provider()
agent = Agent(
provider=provider,
system_prompt="You are a helpful assistant.",
)
skill_index = index_skills_by_name(skills)
skill_tool = SkillTool(skill_index, parent_agent=agent)
agent.tools.add(skill_tool)
print(f"✅ Added SkillTool to agent")
print("✅ Skills test PASSED")
return True
except Exception as e:
print(f"❌ Skills test FAILED: {e}")
import traceback
traceback.print_exc()
return False
async def test_conversation_history():
"""Test 7: Conversation history."""
print("\n" + "=" * 60)
print("Test 7: Conversation History")
print("=" * 60)
try:
provider = get_provider()
agent = Agent(
provider=provider,
system_prompt="You are a helpful assistant.",
)
# First message
response1 = await agent.run("My name is Alice")
print(f"✅ Response 1: {response1[:50]}...")
# Second message (should remember context)
response2 = await agent.run("What is my name?")
print(f"✅ Response 2: {response2[:50]}...")
assert "Alice" in response2, "Expected agent to remember name"
print("✅ Conversation History test PASSED")
return True
except Exception as e:
print(f"❌ Conversation History test FAILED: {e}")
import traceback
traceback.print_exc()
return False
async def run_all_tests():
"""Run all integration tests."""
print("\n" + "=" * 60)
print("AgentLite Integration Tests with Real API")
print("=" * 60)
print(f"Model: {TEST_MODEL}")
# Check API key
if not os.environ.get("OPENAI_API_KEY"):
print("\n❌ OPENAI_API_KEY not set!")
print("\nTo run these tests, set your OpenAI API key:")
print(" export OPENAI_API_KEY='sk-...'")
print("\nGet your API key from: https://platform.openai.com/api-keys")
return []
results = []
# Run all tests
results.append(("Basic Agent", await test_basic_agent()))
results.append(("Agent with Tools", await test_agent_with_tools()))
results.append(("LLMClient", await test_llm_client()))
results.append(("LLM Streaming", await test_llm_streaming()))
results.append(("Subagents", await test_subagents()))
results.append(("Skills", await test_skills()))
results.append(("Conversation History", await test_conversation_history()))
# Print summary
print("\n" + "=" * 60)
print("Test Summary")
print("=" * 60)
passed = sum(1 for _, result in results if result)
total = len(results)
for name, result in results:
status = "✅ PASSED" if result else "❌ FAILED"
print(f"{status}: {name}")
print(f"\n{passed}/{total} tests passed")
if passed == total:
print("\n🎉 All tests passed!")
else:
print(f"\n⚠️ {total - passed} test(s) failed")
return results
if __name__ == "__main__":
results = asyncio.run(run_all_tests())
# Exit with error code if any tests failed
if results and not all(r for _, r in results):
sys.exit(1)