Basic Setup
The pattern is the same for all providers:
- Create an Observ instance with your API key
- Create your provider client
- Wrap it with
ob.providerName(client)
- Use the wrapped client normally
Set recall: true to enable semantic caching and reduce costs.
Provider Examples
import Anthropic from "@anthropic-ai/sdk";
import Observ from "observ-sdk";
const ob = new Observ({
apiKey: "your-observ-api-key",
recall: true, // Enable semantic caching
});
const client = new Anthropic({ apiKey: "your-anthropic-key" });
const wrappedClient = ob.anthropic(client);
const response = await wrappedClient.messages.create({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
messages: [{ role: "user", content: "Hello!" }],
});
console.log(response.content[0].text);
import OpenAI from "openai";
import Observ from "observ-sdk";
const ob = new Observ({
apiKey: "your-observ-api-key",
recall: true,
});
const client = new OpenAI({ apiKey: "your-openai-key" });
const wrappedClient = ob.openai(client);
const response = await wrappedClient.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: "Hello!" }],
});
console.log(response.choices[0].message.content);
import { Mistral } from "@mistralai/mistralai";
import Observ from "observ-sdk";
const ob = new Observ({
apiKey: "your-observ-api-key",
recall: true,
});
const client = new Mistral({ apiKey: "your-mistral-key" });
const wrappedClient = ob.mistral(client);
const response = await wrappedClient.chat.completions.create({
model: "mistral-large-latest",
messages: [{ role: "user", content: "Hello!" }],
});
console.log(response.choices[0].message.content);
import OpenAI from "openai";
import Observ from "observ-sdk";
const ob = new Observ({
apiKey: "your-observ-api-key",
recall: true,
});
const client = new OpenAI({
apiKey: "your-xai-key",
baseURL: "https://api.x.ai/v1",
});
const wrappedClient = ob.xai(client);
const response = await wrappedClient.chat.completions.create({
model: "grok-beta",
messages: [{ role: "user", content: "Hello!" }],
});
import OpenAI from "openai";
import Observ from "observ-sdk";
const ob = new Observ({
apiKey: "your-observ-api-key",
recall: true,
});
const client = new OpenAI({
apiKey: "your-openrouter-key",
baseURL: "https://openrouter.ai/v1",
});
const wrappedClient = ob.openrouter(client);
const response = await wrappedClient.chat.completions.create({
model: "anthropic/claude-3.5-sonnet",
messages: [{ role: "user", content: "Hello!" }],
});
import anthropic
from observ import Observ
ob = Observ(
api_key="your-observ-api-key",
recall=True, # Enable semantic caching
)
client = anthropic.Anthropic(api_key="your-anthropic-key")
wrapped_client = ob.anthropic(client)
response = wrapped_client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{"role": "user", "content": "Hello!"}],
)
print(response.content[0].text)
from openai import OpenAI
from observ import Observ
ob = Observ(
api_key="your-observ-api-key",
recall=True,
)
client = OpenAI(api_key="your-openai-key")
wrapped_client = ob.openai(client)
response = wrapped_client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello!"}],
)
print(response.choices[0].message.content)
from mistralai import Mistral
from observ import Observ
ob = Observ(
api_key="your-observ-api-key",
recall=True,
)
client = Mistral(api_key="your-mistral-key")
wrapped_client = ob.mistral(client)
response = wrapped_client.chat.completions.create(
model="mistral-large-latest",
messages=[{"role": "user", "content": "Hello!"}],
)
print(response.choices[0].message.content)
import google.generativeai as genai
from observ import Observ
ob = Observ(
api_key="your-observ-api-key",
recall=True,
)
genai.configure(api_key="your-google-key")
model = genai.GenerativeModel("gemini-1.5-pro")
wrapped_model = ob.gemini(model)
response = wrapped_model.generate_content("Hello!")
print(response.text)
Gemini is only supported in the Python SDK.
from openai import OpenAI
from observ import Observ
ob = Observ(
api_key="your-observ-api-key",
recall=True,
)
client = OpenAI(
api_key="your-xai-key",
base_url="https://api.x.ai/v1",
)
wrapped_client = ob.xai(client)
response = wrapped_client.chat.completions.create(
model="grok-beta",
messages=[{"role": "user", "content": "Hello!"}],
)
from openai import OpenAI
from observ import Observ
ob = Observ(
api_key="your-observ-api-key",
recall=True,
)
client = OpenAI(
api_key="your-openrouter-key",
base_url="https://openrouter.ai/v1",
)
wrapped_client = ob.openrouter(client)
response = wrapped_client.chat.completions.create(
model="anthropic/claude-3.5-sonnet",
messages=[{"role": "user", "content": "Hello!"}],
)
You can enhance your traces with session tracking and custom metadata.
Session Tracking
Chain .withSessionId() (TypeScript) or .with_session_id() (Python) before your API call:
const sessionId = "conversation_abc123";
const response = await wrappedClient.messages
.withSessionId(sessionId)
.create({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
messages: [{ role: "user", content: "Hello!" }],
});
session_id = "conversation_abc123"
response = wrapped_client.messages.with_session_id(session_id).create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{"role": "user", "content": "Hello!"}],
)
Chain .withMetadata() (TypeScript) or .with_metadata() (Python) to attach data:
const response = await wrappedClient.messages
.withMetadata({
user_id: "user_123",
feature: "chat",
version: "1.0.0"
})
.create({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
messages: [{ role: "user", content: "Hello!" }],
});
response = wrapped_client.messages.with_metadata({
"user_id": "user_123",
"feature": "chat",
"version": "1.0.0"
}).create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{"role": "user", "content": "Hello!"}],
)
Next Steps