Get a comprehensive list of all AI models available through the Swarms API. This endpoint provides information about supported models from various providers including OpenAI, Anthropic, Groq, and others.
The /v1/models/available
endpoint returns all models currently supported by the Swarms API, including their capabilities and limitations.
Quick Start
import requests
import json
import os
from dotenv import load_dotenv
load_dotenv()
API_KEY = os.getenv("SWARMS_API_KEY")
BASE_URL = "https://swarms-api-285321057562.us-east1.run.app"
headers = {
"x-api-key": API_KEY,
"Content-Type": "application/json"
}
def get_available_models():
"""Get all available models"""
response = requests.get(
f"{BASE_URL}/v1/models/available",
headers=headers
)
if response.status_code == 200:
return response.json()
else:
print(f"Error: {response.status_code} - {response.text}")
return None
# Get available models
models_data = get_available_models()
if models_data:
print("✅ Available models retrieved successfully!")
print(json.dumps(models_data, indent=2))
Understanding the Response
The models endpoint returns a structured response with information about all available models:
{
"success": true,
"models": {
"openai": [
"gpt-4o",
"gpt-4o-mini",
"gpt-4-turbo",
"gpt-4",
"gpt-3.5-turbo"
],
"anthropic": [
"claude-sonnet-4-20250514",
"claude-sonnet-3.5",
"claude-haiku-3.5",
"claude-opus-4"
],
"groq": [
"llama-3.1-70b-versatile",
"llama-3.1-8b-instant",
"mixtral-8x7b-instruct",
"gemma-7b-it"
],
"other_providers": [
"custom-model-1",
"custom-model-2"
]
}
}
Model Selection Guide
For Text Generation
Creative Tasks
Analytical Tasks
Fast Responses
# Best for creative writing, brainstorming, content generation
recommended_models = [
"gpt-4o", # Most capable, balanced performance
"claude-sonnet-4-20250514", # Excellent for creative tasks
"gpt-4o-mini" # Fast and cost-effective
]
Model Capabilities
Vision Models
Streaming Models
Large Context Models
# Models that support image analysis
vision_models = [
"gpt-4o", # Best vision capabilities
"gpt-4-turbo", # Good vision support
"gpt-4o-mini" # Basic vision support
]
# Example: Vision-enabled agent
payload = {
"agent_config": {
"agent_name": "Vision Analyst",
"model_name": "gpt-4o", # Vision-capable model
"max_tokens": 2048
},
"task": "Describe this image in detail",
"img": "https://example.com/image.jpg"
}
Dynamic Model Selection
def select_best_model(task_type, priority="balanced"):
"""
Dynamically select the best model based on task type and priority
Args:
task_type: Type of task ("creative", "analytical", "fast", "vision")
priority: Priority ("quality", "speed", "cost", "balanced")
"""
# Get available models first
models_data = get_available_models()
if not models_data or not models_data.get("success"):
return "gpt-4o-mini" # Fallback
available_models = models_data.get("models", {})
# Model selection logic
if task_type == "creative":
if priority == "quality":
return "gpt-4o" if "gpt-4o" in available_models.get("openai", []) else "claude-sonnet-4-20250514"
elif priority == "speed":
return "gpt-4o-mini" if "gpt-4o-mini" in available_models.get("openai", []) else "claude-haiku-3.5"
else: # balanced
return "gpt-4o-mini"
elif task_type == "analytical":
return "gpt-4o" if "gpt-4o" in available_models.get("openai", []) else "claude-sonnet-4-20250514"
elif task_type == "fast":
return "gpt-4o-mini" if "gpt-4o-mini" in available_models.get("openai", []) else "llama-3.1-8b-instant"
elif task_type == "vision":
return "gpt-4o" if "gpt-4o" in available_models.get("openai", []) else "gpt-4-turbo"
return "gpt-4o-mini" # Default fallback
# Example usage
best_model = select_best_model("creative", "quality")
print(f"Selected model: {best_model}")
Model | Context Window | Best For | Speed | Cost |
---|
gpt-4o | 128k+ | Complex reasoning, vision | Medium | High |
gpt-4o-mini | 128k+ | General purpose, fast tasks | Fast | Low |
claude-sonnet-4-20250514 | 200k+ | Creative writing, analysis | Medium | High |
claude-haiku-3.5 | 200k+ | Fast responses | Fast | Low |
llama-3.1-70b | 128k | Complex tasks | Medium | Medium |
llama-3.1-8b | 128k | Simple tasks | Fast | Low |
Cost Optimization
def optimize_model_selection(task_complexity, budget_constraint):
"""
Select optimal model based on task complexity and budget
"""
if task_complexity == "low":
return "gpt-4o-mini" if budget_constraint == "strict" else "llama-3.1-8b-instant"
elif task_complexity == "medium":
return "gpt-4o-mini" if budget_constraint == "strict" else "gpt-4o"
else: # high complexity
return "claude-sonnet-4-20250514" if budget_constraint == "flexible" else "gpt-4o"
# Example usage
model = optimize_model_selection("high", "flexible")
print(f"Optimized model: {model}")
Error Handling
def get_available_models_with_fallback():
"""Get available models with proper error handling"""
try:
response = requests.get(
f"{BASE_URL}/v1/models/available",
headers=headers,
timeout=10
)
if response.status_code == 200:
data = response.json()
if data.get("success"):
return data.get("models", {})
else:
print("❌ API returned success=false")
return {}
elif response.status_code == 401:
print("❌ Authentication failed. Check your API key.")
return {}
elif response.status_code == 429:
print("❌ Rate limit exceeded. Please wait before retrying.")
return {}
else:
print(f"❌ HTTP {response.status_code}: {response.text}")
return {}
except requests.exceptions.Timeout:
print("❌ Request timed out")
return {}
except requests.exceptions.ConnectionError:
print("❌ Connection error")
return {}
except Exception as e:
print(f"❌ Unexpected error: {e}")
return {}
# Use with fallback
models = get_available_models_with_fallback()
if models:
print(f"Available providers: {list(models.keys())}")
Best Practices
- Cache Results: Cache the models list to avoid frequent API calls
- Handle Changes: Models may be added or removed over time
- Fallback Logic: Always have fallback models for reliability
- Version Awareness: Be aware of model versioning and deprecation
- Cost Monitoring: Track usage costs for different models
- Performance Testing: Test model performance for your specific use cases
- Documentation Review: Check model-specific limitations and capabilities
Integration Examples
Model Selection in Agent Creation
def create_agent_with_best_model(task_type):
"""Create an agent with the best available model for the task"""
models = get_available_models()
best_model = select_best_model(task_type)
return {
"agent_config": {
"agent_name": f"{task_type.title()} Agent",
"model_name": best_model,
"max_tokens": 2048,
"temperature": 0.7
},
"task": f"Perform {task_type} task"
}
Batch Processing with Model Optimization
def optimize_batch_processing(tasks):
"""Optimize model selection for batch processing"""
optimized_payloads = []
for task in tasks:
task_type = classify_task(task)
best_model = select_best_model(task_type, "cost")
optimized_payloads.append({
"agent_config": {
"agent_name": f"Optimized Agent {len(optimized_payloads) + 1}",
"model_name": best_model,
"max_tokens": 1024
},
"task": task
})
return optimized_payloads