These recipes cover the core tracing patterns. For conceptual background, see Observability & Tracing.
Setup
- Python
- JavaScript
Copy
from abvdev import ABV, observe
abv = ABV(api_key="sk-abv-...")
Copy
import { ABVClient } from "@abvdev/client";
import { NodeSDK } from "@opentelemetry/sdk-node";
import { ABVSpanProcessor } from "@abvdev/otel";
import {
startObservation,
startActiveObservation,
observe,
updateActiveTrace,
updateActiveObservation,
} from "@abvdev/tracing";
// Initialize OpenTelemetry with ABV processor
export const abvSpanProcessor = new ABVSpanProcessor({
apiKey: "sk-abv-...",
});
const sdk = new NodeSDK({ spanProcessors: [abvSpanProcessor] });
sdk.start();
const abv = new ABVClient({ apiKey: "sk-abv-..." });
Decorator Pattern
Wrap existing functions without modifying their internals. Input/output are captured automatically.- Python
- JavaScript
Copy
from abvdev import observe
@observe()
def process_query(query: str) -> str:
# Your logic here
return f"Processed: {query}"
@observe(name="llm-generation", as_type="generation")
def generate_response(prompt: str) -> str:
response = openai.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}]
)
return response.choices[0].message.content
# Just call normally - tracing happens automatically
result = process_query("What is AI?")
response = generate_response("Explain machine learning")
Copy
import { observe, updateActiveObservation } from "@abvdev/tracing";
const processQuery = observe(
async (query: string) => {
return `Processed: ${query}`;
},
{ name: "process-query", captureInput: true, captureOutput: true }
);
const generateResponse = observe(
async (prompt: string) => {
// Update observation with model details
updateActiveObservation({
model: "gpt-4",
asType: "generation",
});
const response = await openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: prompt }],
});
return response.choices[0].message.content;
},
{ name: "llm-generation", asType: "generation" }
);
// Call normally
const result = await processQuery("What is AI?");
const response = await generateResponse("Explain machine learning");
await abvSpanProcessor.forceFlush();
Context Manager Pattern
Automatic lifecycle management with explicit control over span attributes.- Python
- JavaScript
Copy
from abvdev import ABV
abv = ABV(api_key="sk-abv-...")
with abv.start_as_current_span(name="user-request") as span:
span.update(input={"query": "What is the capital of France?"})
# Nested generation
with abv.start_as_current_generation(
name="llm-call",
model="gpt-4",
model_parameters={"temperature": 0.7}
) as gen:
response = openai.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "What is the capital of France?"}]
)
gen.update(
output=response.choices[0].message.content,
usage_details={
"prompt_tokens": response.usage.prompt_tokens,
"completion_tokens": response.usage.completion_tokens,
}
)
span.update(output="Paris")
Copy
import { startActiveObservation, startObservation } from "@abvdev/tracing";
await startActiveObservation("user-request", async (span) => {
span.update({ input: { query: "What is the capital of France?" } });
// Nested generation - automatically parented
const generation = startObservation(
"llm-call",
{
model: "gpt-4",
input: [{ role: "user", content: "What is the capital of France?" }],
},
{ asType: "generation" }
);
const response = await openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: "What is the capital of France?" }],
});
generation
.update({
output: { content: response.choices[0].message.content },
usageDetails: {
input: response.usage?.prompt_tokens,
output: response.usage?.completion_tokens,
},
})
.end();
span.update({ output: "Paris" });
});
await abvSpanProcessor.forceFlush();
Manual Spans
Full control over span creation, nesting, and lifecycle.- Python
- JavaScript
Copy
from abvdev import ABV
abv = ABV(api_key="sk-abv-...")
# Create root span
span = abv.start_span(name="pipeline")
span.update(input={"query": "Search for documents"})
try:
# Create child spans
retriever = abv.start_span(name="retrieve-docs")
retriever.update(input={"query": "Search for documents"})
docs = search_documents("Search for documents")
retriever.update(output={"doc_count": len(docs)})
retriever.end()
# Generation span
gen = abv.start_span(name="generate-answer")
gen.update(
input={"docs": docs},
metadata={"model": "gpt-4"}
)
answer = generate_answer(docs)
gen.update(output=answer)
gen.end()
span.update(output=answer)
finally:
span.end()
abv.flush()
Copy
import { startObservation } from "@abvdev/tracing";
const span = startObservation("pipeline", {
input: { query: "Search for documents" },
});
// Child tool span
const retriever = span.startObservation(
"retrieve-docs",
{ input: { query: "Search for documents" } },
{ asType: "tool" }
);
const docs = await searchDocuments("Search for documents");
retriever.update({ output: { doc_count: docs.length } }).end();
// Child generation span
const gen = span.startObservation(
"generate-answer",
{
model: "gpt-4",
input: docs,
},
{ asType: "generation" }
);
const answer = await generateAnswer(docs);
gen.update({ output: answer }).end();
span.update({ output: answer }).end();
await abvSpanProcessor.forceFlush();
Add User & Session Context
Track users and group related traces into sessions.- Python
- JavaScript
Copy
@observe()
def handle_request(user_id: str, session_id: str, query: str):
# Update trace-level context
abv.update_current_trace(
user_id=user_id,
session_id=session_id,
metadata={"source": "web-app"},
tags=["production", "chat"]
)
return process_query(query)
handle_request("user-123", "session-456", "Hello!")
Copy
import { updateActiveTrace, startActiveObservation } from "@abvdev/tracing";
await startActiveObservation("handle-request", async (span) => {
updateActiveTrace({
userId: "user-123",
sessionId: "session-456",
metadata: { source: "web-app" },
tags: ["production", "chat"],
});
const result = await processQuery("Hello!");
span.update({ output: result });
});
Observation Types
Use specific types for better categorization in the ABV UI.| Type | Use Case |
|---|---|
span | General operations (default) |
generation | LLM API calls |
embedding | Text embeddings |
retriever | Document/vector search |
tool | External API calls, function invocations |
agent | Agent workflows |
chain | Multi-step pipelines |
evaluator | Quality assessment |
guardrail | Safety/validation checks |
- Python
- JavaScript
Copy
# Retriever
with abv.start_as_current_observation(as_type="retriever", name="search") as obs:
docs = vector_db.search(query)
obs.update(output={"documents": docs})
# Tool
with abv.start_as_current_observation(as_type="tool", name="api-call") as obs:
result = external_api.call(params)
obs.update(output=result)
# Agent
with abv.start_as_current_observation(as_type="agent", name="assistant") as obs:
response = agent.run(user_input)
obs.update(output=response)
Copy
// Retriever
const retriever = startObservation(
"search",
{ input: { query } },
{ asType: "retriever" }
);
const docs = await vectorDb.search(query);
retriever.update({ output: { documents: docs } }).end();
// Tool
const tool = startObservation(
"api-call",
{ input: params },
{ asType: "tool" }
);
const result = await externalApi.call(params);
tool.update({ output: result }).end();
// Agent
const agent = startObservation(
"assistant",
{ input: userInput },
{ asType: "agent" }
);
const response = await agentRun(userInput);
agent.update({ output: response }).end();
Error Handling
Capture errors with appropriate log levels.- Python
- JavaScript
Copy
@observe()
def risky_operation(data):
try:
result = process(data)
return result
except Exception as e:
abv.update_current_span(
level="ERROR",
status_message=str(e),
output={"error": str(e)}
)
raise
# Or with context manager
with abv.start_as_current_span(name="operation") as span:
try:
result = process(data)
span.update(output=result)
except Exception as e:
span.update(level="ERROR", status_message=str(e))
raise
Copy
const riskyOperation = observe(
async (data: any) => {
try {
return await process(data);
} catch (error) {
updateActiveObservation({
level: "ERROR",
statusMessage: error.message,
});
throw error;
}
},
{ name: "risky-operation" }
);
// With startActiveObservation - errors auto-captured
await startActiveObservation("operation", async (span) => {
const result = await process(data);
span.update({ output: result });
return result;
});
Get Trace URL
Share traces by getting the direct URL.- Python
- JavaScript
Copy
@observe()
def my_function():
trace_id = abv.get_current_trace_id()
trace_url = abv.get_trace_url(trace_id=trace_id)
print(f"View trace: {trace_url}")
return "done"
Copy
import { getActiveTraceId } from "@abvdev/tracing";
await startActiveObservation("my-function", async () => {
const traceId = getActiveTraceId();
const traceUrl = await abv.getTraceUrl(traceId);
console.log(`View trace: ${traceUrl}`);
});