Discover the different types of reasoning agents available in the Swarms API. The /v1/reasoning-agent/types endpoint provides information about specialized reasoning architectures designed for different problem-solving approaches.
Premium Tier Required: The /v1/reasoning-agent/completions endpoint is restricted to Pro, Ultra, and Premium plan subscribers. Free tier users will receive a 403 error. Upgrade your account to access advanced reasoning capabilities.
Reasoning agents use advanced techniques like self-consistency, majority voting, and iterative refinement to improve answer quality and reliability.
Quick Start
import requests
import json
import os
from dotenv import load_dotenv
load_dotenv()
API_KEY = os.getenv("SWARMS_API_KEY")
BASE_URL = "https://api.swarms.world"
headers = {
"x-api-key": API_KEY,
"Content-Type": "application/json"
}
def get_reasoning_agent_types():
"""Get all available reasoning agent types"""
response = requests.get(
f"{BASE_URL}/v1/reasoning-agent/types",
headers=headers
)
if response.status_code == 200:
return response.json()
else:
print(f"Error: {response.status_code} - {response.text}")
return None
# Get reasoning agent types
reasoning_data = get_reasoning_agent_types()
if reasoning_data:
print("✅ Reasoning agent types retrieved successfully!")
print(json.dumps(reasoning_data, indent=2))
const API_KEY = process.env.SWARMS_API_KEY;
const BASE_URL = "https://api.swarms.world";
const headers = {
"x-api-key": API_KEY,
"Content-Type": "application/json"
};
async function getReasoningAgentTypes() {
try {
const response = await fetch(`${BASE_URL}/v1/reasoning-agent/types`, {
method: 'GET',
headers: headers
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data = await response.json();
console.log("✅ Reasoning agent types retrieved successfully!");
console.log(JSON.stringify(data, null, 2));
return data;
} catch (error) {
console.error('Error:', error);
return null;
}
}
// Get reasoning agent types
getReasoningAgentTypes();
# Get reasoning agent types
curl -X GET "https://api.swarms.world/v1/reasoning-agent/types" \
-H "x-api-key: your-api-key" \
-H "Content-Type": "application/json"
# Example response:
# {
# "reasoning-agent": "Basic reasoning agent",
# "reasoning-duo": "Two-agent reasoning system",
# "self-consistency": "Multiple samples for consistency",
# "ire": "Iterative refinement",
# "consistency-agent": "Consistency-focused agent"
# }
Reasoning Agent Type Comparison
| Type | Description | Best For | Samples | Quality Focus |
|---|
| reasoning-agent | Basic reasoning with structured thinking | Simple reasoning tasks | 1 | Balanced |
| reasoning-duo | Two agents with different approaches | Comparative analysis | 2 | Diversity |
| self-consistency | Multiple samples for consistency checking | High-stakes decisions | 3-5 | Consistency |
| ire | Iterative refinement through multiple passes | Complex problem-solving | Variable | Accuracy |
| consistency-agent | Focus on logical consistency | Mathematical/logical problems | 3 | Logical rigor |
Usage Examples
payload = {
"agent_name": "Basic Reasoner",
"model_name": "gpt-4o",
"system_prompt": "You are a logical reasoning assistant.",
"max_loops": 1,
"swarm_type": "reasoning-agent",
"num_samples": 1,
"task": "Solve this logic puzzle: If all bloops are razzes and some razzes are fizzles, are all bloops fizzles?"
}
payload = {
"agent_name": "Consistent Reasoner",
"model_name": "gpt-4o",
"system_prompt": "You are a reasoning agent that values consistency.",
"max_loops": 1,
"swarm_type": "self-consistency",
"num_samples": 3,
"task": "Determine the most logical conclusion from these premises: All humans are mortal. Socrates is human. Therefore..."
}
payload = {
"agent_name": "Refinement Agent",
"model_name": "gpt-4o",
"system_prompt": "You are an agent that improves through iteration.",
"max_loops": 3,
"swarm_type": "ire",
"num_samples": 2,
"task": "Design a more efficient algorithm for sorting a list of numbers."
}
payload = {
"agent_name": "Dual Reasoner",
"model_name": "gpt-4o",
"system_prompt": "You are a dual reasoning system.",
"max_loops": 1,
"swarm_type": "reasoning-duo",
"num_samples": 2,
"task": "Debate the pros and cons of renewable energy adoption."
}
Advanced Configuration
payload = {
"agent_name": "High Consistency Agent",
"model_name": "gpt-4o",
"max_loops": 1,
"swarm_type": "self-consistency",
"num_samples": 5, # Higher sample count for better consistency
"output_type": "dict", # Return detailed analysis
"task": "Analyze the potential risks of artificial general intelligence."
}
payload = {
"agent_name": "Deep Analysis Agent",
"model_name": "gpt-4o",
"max_loops": 5, # Multiple refinement iterations
"swarm_type": "ire",
"num_samples": 3,
"output_type": "final", # Return only final result
"task": "Develop a comprehensive business strategy for a tech startup."
}
payload = {
"agent_name": "Knowledge Agent",
"model_name": "gpt-4o",
"max_loops": 1,
"swarm_type": "consistency-agent",
"num_samples": 3,
"num_knowledge_items": 5, # Include knowledge base
"memory_capacity": 10,
"task": "Integrate these research findings into a coherent theory."
}
Output Types
Dictionary Output
List Output
Final Answer Only
# Returns structured analysis
payload = {
"swarm_type": "self-consistency",
"output_type": "dict",
"task": "Analyze market trends"
}
# Output: {"analysis": "...", "confidence": 0.85, "alternatives": [...]}
# Returns multiple options
payload = {
"swarm_type": "reasoning-duo",
"output_type": "list",
"task": "Generate business ideas"
}
# Output: ["Idea 1", "Idea 2", "Idea 3"]
# Returns only the best result
payload = {
"swarm_type": "ire",
"output_type": "final",
"task": "Solve this equation"
}
# Output: "x = 42"
Quality vs Speed
Cost Optimization
def optimize_reasoning_config(task_complexity, time_constraint):
"""Optimize reasoning configuration based on requirements"""
if time_constraint == "fast":
return {
"swarm_type": "reasoning-agent",
"num_samples": 1,
"max_loops": 1,
"model_name": "gpt-4o-mini"
}
elif task_complexity == "high":
return {
"swarm_type": "ire",
"num_samples": 5,
"max_loops": 3,
"model_name": "gpt-4o"
}
else: # balanced
return {
"swarm_type": "self-consistency",
"num_samples": 3,
"max_loops": 1,
"model_name": "gpt-4o"
}
def optimize_reasoning_cost(budget, quality_requirement):
"""Optimize for cost while maintaining quality"""
if budget < 0.1:
return {
"swarm_type": "reasoning-agent",
"model_name": "gpt-4o-mini",
"num_samples": 1
}
elif quality_requirement == "high":
return {
"swarm_type": "self-consistency",
"model_name": "gpt-4o",
"num_samples": 3,
"max_tokens": 2048
}
else:
return {
"swarm_type": "reasoning-duo",
"model_name": "gpt-4o-mini",
"num_samples": 2
}
Use Cases by Domain
payload = {
"agent_name": "Scientific Reasoner",
"swarm_type": "ire",
"num_samples": 3,
"max_loops": 4,
"task": "Design an experiment to test the hypothesis that X causes Y."
}
payload = {
"agent_name": "Strategy Consultant",
"swarm_type": "reasoning-duo",
"num_samples": 2,
"task": "Evaluate three potential market entry strategies for our product."
}
payload = {
"agent_name": "Legal Analyst",
"swarm_type": "self-consistency",
"num_samples": 5,
"task": "Analyze the legal implications of this contract clause."
}
payload = {
"agent_name": "Technical Expert",
"swarm_type": "consistency-agent",
"num_samples": 3,
"task": "Debug this software issue and propose a solution."
}
Best Practices
Configuration Guidelines
- Task Complexity: Match reasoning type to task complexity
- Sample Count: Use more samples for high-stakes decisions
- Iteration Count: Increase iterations for complex refinement
- Model Selection: Choose appropriate model based on requirements
Quality Assurance
- Consistency Checking: Use self-consistency for critical decisions
- Diverse Perspectives: Leverage reasoning-duo for balanced analysis
- Iterative Improvement: Apply IRE for complex problem-solving
- Validation: Always validate reasoning outputs
- Resource Usage: Monitor token usage and costs
- Response Time: Balance quality with response speed
- Scalability: Consider parallel processing for multiple tasks
- Caching: Cache reasoning results when appropriate
Error Handling
- Fallback Logic: Implement fallback to simpler reasoning types
- Timeout Handling: Set appropriate timeouts for long-running tasks
- Result Validation: Validate reasoning outputs for correctness
- Logging: Log reasoning processes for debugging