The ai-agent template ships Python 3.10 + Node.js 20 plus the major LLM
and agent SDKs preinstalled in one resolved pip set:
- LLM clients:
openai, anthropic, litellm
- Agent frameworks:
langchain + langchain-openai + langchain-anthropic,
crewai, autogen-agentchat, llama-index-core + llama-index-llms-openai,
haystack-ai, pydantic-ai-slim
- Helpers:
instructor, tiktoken, tenacity
- MCP:
mcp, fastmcp
- Storage / tracing:
chromadb, arize-phoenix, opentelemetry
Pick it whenever your sandbox runs an LLM-driven agent — it removes a 30–60s
pip install from every cold boot.
Heavy ML deps (torch, transformers, sentence-transformers) are
intentionally not included to keep the image small. Use the OpenAI /
Anthropic embedding APIs (already wired through
llama-index-embeddings-openai) when you need embeddings, or build a
custom template for local inference.
What you’ll learn
- Picking
template="ai-agent" to skip framework pip install steps
- Verifying the agent-SDKs import cleanly inside the sandbox
- Running a minimal LangChain expression (without an LLM call) to prove
the framework is wired up
Prerequisites
export DECLAW_API_KEY="your-api-key"
export DECLAW_DOMAIN="your-declaw-instance.example.com:8080"
This example does not call any LLM — it only verifies the SDKs load.
For a full LLM-in-sandbox example, see
Cookbook → LLM Providers.
Code
import textwrap
from declaw import Sandbox
CHECK = textwrap.dedent("""
import importlib
targets = [
"openai", "anthropic", "litellm",
"langchain", "langchain_openai", "langchain_anthropic",
"crewai",
"autogen_agentchat",
"llama_index.core",
"haystack",
"pydantic_ai",
"instructor", "tiktoken",
"mcp", "fastmcp",
"chromadb",
"phoenix", "opentelemetry",
]
for name in targets:
try:
mod = importlib.import_module(name)
ver = getattr(mod, "__version__", "n/a")
except Exception as e:
ver = f"MISSING ({type(e).__name__})"
print(f" {name:32s} {ver}")
""")
LANGCHAIN_DEMO = textwrap.dedent("""
# No LLM call — proves the prompt + parser pipeline is intact.
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
prompt = ChatPromptTemplate.from_messages([
("system", "Echo {role} pings."),
("human", "ping #{n}"),
])
msg = prompt.format_messages(role="agent", n=42)
print("rendered prompt:", msg[1].content)
parser = StrOutputParser()
print("parser ok:", parser.parse("hello world"))
""")
def main() -> None:
sbx = Sandbox.create(template="ai-agent", timeout=180)
try:
print("=== framework SDK versions ===")
r = sbx.commands.run('python3 -c "' + CHECK.replace('"', r'\"') + '"')
print(r.stdout)
if r.exit_code != 0:
print("import failed:", r.stderr)
return
print("=== LangChain pipeline smoke test ===")
sbx.files.write("/tmp/lc.py", LANGCHAIN_DEMO)
r = sbx.commands.run("python3 /tmp/lc.py")
print(r.stdout)
if r.exit_code != 0:
print("langchain demo failed:", r.stderr)
finally:
sbx.kill()
if __name__ == "__main__":
main()
import "dotenv/config";
import { Sandbox } from "@declaw/sdk";
const CHECK = `
import importlib
targets = [
"openai", "anthropic", "litellm",
"langchain", "langchain_openai", "langchain_anthropic",
"crewai",
"autogen_agentchat",
"llama_index.core",
"haystack",
"pydantic_ai",
"instructor", "tiktoken",
"mcp", "fastmcp",
"chromadb",
"phoenix", "opentelemetry",
]
for name in targets:
try:
mod = importlib.import_module(name)
ver = getattr(mod, "__version__", "n/a")
except Exception as e:
ver = f"MISSING ({type(e).__name__})"
print(f" {name:32s} {ver}")
`;
const LANGCHAIN_DEMO = `
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
prompt = ChatPromptTemplate.from_messages([
("system", "Echo {role} pings."),
("human", "ping #{n}"),
])
msg = prompt.format_messages(role="agent", n=42)
print("rendered prompt:", msg[1].content)
parser = StrOutputParser()
print("parser ok:", parser.parse("hello world"))
`;
async function main(): Promise<void> {
const sbx = await Sandbox.create({ template: "ai-agent", timeout: 180 });
try {
console.log("=== framework SDK versions ===");
await sbx.files.write("/tmp/check.py", CHECK);
let r = await sbx.commands.run("python3 /tmp/check.py");
console.log(r.stdout);
if (r.exitCode !== 0) {
console.log("import failed:", r.stderr);
return;
}
console.log("=== LangChain pipeline smoke test ===");
await sbx.files.write("/tmp/lc.py", LANGCHAIN_DEMO);
r = await sbx.commands.run("python3 /tmp/lc.py");
console.log(r.stdout);
if (r.exitCode !== 0) {
console.log("langchain demo failed:", r.stderr);
}
} finally {
await sbx.kill();
}
}
main().catch(console.error);
Expected output
=== framework SDK versions ===
openai 2.31.0
anthropic ...
litellm ...
langchain 1.2.15
langchain_openai ...
langchain_anthropic ...
crewai 0.193.2
autogen_agentchat ...
llama_index.core ...
haystack ...
pydantic_ai ...
instructor ...
tiktoken ...
mcp ...
fastmcp ...
chromadb ...
phoenix ...
opentelemetry ...
=== LangChain pipeline smoke test ===
rendered prompt: ping #42
parser ok: hello world
When you actually run LLM calls from inside an ai-agent sandbox, attach a
SecurityPolicy with PII redaction + a network allowlist scoped to your
LLM provider. See
Agent-in-Sandbox → Fully Secured for
a worked example with all four guardrails enabled.