This guide covers everything you need to build production-quality AI applications using the LLM Gateway with TypeScript and JavaScript.
npm install @abvdev/client
npm install @abvdev/tracing
import { ABVClient } from '@abvdev/client';
const abv = new ABVClient({
apiKey: 'sk_...'
});
const abv = new ABVClient({
apiKey: process.env.ABV_API_KEY
});
const abv = new ABVClient();
const abv = new ABVClient({
region: 'eu' // 'us' (default) or 'eu'
});
// abv-client.ts
import { ABVClient } from '@abvdev/client';
export const abv = new ABVClient();
// route-handler.ts
import { abv } from './abv-client';
export async function handleRequest(userMessage: string) {
const response = await abv.gateway.chat.completions.create({
provider: 'openai',
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: userMessage }]
});
return response.choices[0].message.content;
}
import type {
ChatCompletionParams,
ChatCompletionResponse,
ChatMessage
} from '@abvdev/client';
function extractResponse(response: ChatCompletionResponse): string {
return response.choices[0].message.content;
}
function buildMessages(
systemPrompt: string,
userMessage: string
): ChatMessage[] {
return [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: userMessage }
];
}
const params: ChatCompletionParams = {
provider: 'openai',
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: 'Hello' }],
temperature: 0.7,
max_tokens: 500
};
// TypeScript catches errors here
const response = await abv.gateway.chat.completions.create(params);
const stream = await abv.gateway.chat.completions.create({
provider: 'openai',
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: 'Tell me a story' }],
stream: true
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
process.stdout.write(content);
}
}
const stream = await abv.gateway.chat.completions.create({
provider: 'openai',
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: 'Explain TypeScript generics' }],
stream: true
});
let fullResponse = '';
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
fullResponse += content;
process.stdout.write(content);
}
}
console.log('\n\nComplete response length:', fullResponse.length);
try {
const response = await abv.gateway.chat.completions.create({
provider: 'openai',
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: 'Hello' }]
});
console.log(response.choices[0].message.content);
} catch (error) {
console.error('Gateway request failed:', error);
}
async function makeRequest(
messages: ChatMessage[]
): Promise<string> {
try {
const response = await abv.gateway.chat.completions.create({
provider: 'openai',
model: 'gpt-4o-mini',
messages
});
return response.choices[0].message.content;
} catch (error) {
if (error instanceof Error) {
// Check error message or properties to determine type
if (error.message.includes('rate limit')) {
console.error('Rate limited, need to slow down');
throw new Error('Service temporarily unavailable');
} else if (error.message.includes('authentication')) {
console.error('Authentication failed, check API key');
throw new Error('Configuration error');
}
}
// Generic error handling for unexpected failures
console.error('Unexpected error:', error);
throw new Error('Request failed');
}
}
async function makeRequestWithRetry(
messages: ChatMessage[],
maxRetries: number = 3
): Promise<string> {
let lastError: Error | null = null;
for (let attempt = 0; attempt < maxRetries; attempt++) {
try {
const response = await abv.gateway.chat.completions.create({
provider: 'openai',
model: 'gpt-4o-mini',
messages
});
return response.choices[0].message.content;
} catch (error) {
lastError = error instanceof Error ? error : new Error(String(error));
// Don't retry on authentication errors
if (lastError.message.includes('authentication')) {
throw lastError;
}
// On last attempt, throw the error
if (attempt === maxRetries - 1) {
throw lastError;
}
// Wait before retrying, with exponential backoff
const delay = Math.pow(2, attempt) * 1000;
await new Promise(resolve => setTimeout(resolve, delay));
}
}
throw lastError || new Error('Max retries exceeded');
}
class Conversation {
private messages: ChatMessage[] = [];
constructor(systemPrompt: string) {
this.messages.push({
role: 'system',
content: systemPrompt
});
}
async sendMessage(userMessage: string): Promise<string> {
// Add user message to history
this.messages.push({
role: 'user',
content: userMessage
});
// Get response from model
const response = await abv.gateway.chat.completions.create({
provider: 'openai',
model: 'gpt-4o-mini',
messages: this.messages
});
const assistantMessage = response.choices[0].message;
// Add assistant response to history
this.messages.push(assistantMessage);
return assistantMessage.content;
}
getHistory(): ChatMessage[] {
return [...this.messages]; // Return copy to prevent external modification
}
}
const conversation = new Conversation(
'You are a helpful assistant that explains programming concepts clearly.'
);
console.log(await conversation.sendMessage('What is a closure in JavaScript?'));
console.log(await conversation.sendMessage('Can you give me an example?'));
console.log(await conversation.sendMessage('How is this different from a regular function?'));
class Conversation {
private messages: ChatMessage[];
private readonly maxMessages: number;
constructor(systemPrompt: string, maxMessages: number = 20) {
this.messages = [{
role: 'system',
content: systemPrompt
}];
this.maxMessages = maxMessages;
}
async sendMessage(userMessage: string): Promise<string> {
this.messages.push({
role: 'user',
content: userMessage
});
// Keep only recent messages (but always keep system message)
if (this.messages.length > this.maxMessages) {
this.messages = [
this.messages[0], // System message
...this.messages.slice(-(this.maxMessages - 1)) // Recent messages
];
}
const response = await abv.gateway.chat.completions.create({
provider: 'openai',
model: 'gpt-4o-mini',
messages: this.messages
});
const assistantMessage = response.choices[0].message;
this.messages.push(assistantMessage);
return assistantMessage.content;
}
}
interface WeatherParams {
location: string;
unit: 'celsius' | 'fahrenheit';
}
async function getWeather(params: WeatherParams): Promise<string> {
// In a real application, this would call a weather API
return `The weather in ${params.location} is 72°${params.unit === 'celsius' ? 'C' : 'F'} and sunny`;
}
const tools = [{
type: 'function' as const,
function: {
name: 'get_weather',
description: 'Get the current weather for a location',
parameters: {
type: 'object',
properties: {
location: {
type: 'string',
description: 'The city and state, e.g. San Francisco, CA'
},
unit: {
type: 'string',
enum: ['celsius', 'fahrenheit']
}
},
required: ['location']
}
}
}];
const response = await abv.gateway.chat.completions.create({
provider: 'openai',
model: 'gpt-4o-mini',
messages: [
{ role: 'user', content: 'What\'s the weather in New York?' }
],
tools
});
// Check if model wants to call a function
const message = response.choices[0].message;
if (message.tool_calls) {
for (const toolCall of message.tool_calls) {
if (toolCall.function.name === 'get_weather') {
const params = JSON.parse(toolCall.function.arguments) as WeatherParams;
const result = await getWeather(params);
console.log(result);
}
}
}
import express from 'express';
import { abv } from './abv-client';
const app = express();
app.use(express.json());
app.post('/api/chat', async (req, res) => {
try {
const { message } = req.body;
const response = await abv.gateway.chat.completions.create({
provider: 'openai',
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: message }]
});
res.json({
response: response.choices[0].message.content
});
} catch (error) {
console.error('Chat error:', error);
res.status(500).json({ error: 'Failed to generate response' });
}
});
// app/api/chat/route.ts
import { NextResponse } from 'next/server';
import { abv } from '@/lib/abv-client';
export async function POST(request: Request) {
try {
const { message } = await request.json();
const response = await abv.gateway.chat.completions.create({
provider: 'openai',
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: message }]
});
return NextResponse.json({
response: response.choices[0].message.content
});
} catch (error) {
console.error('Chat error:', error);
return NextResponse.json(
{ error: 'Failed to generate response' },
{ status: 500 }
);
}
}
export async function POST(request: Request) {
const { message } = await request.json();
const stream = await abv.gateway.chat.completions.create({
provider: 'openai',
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: message }],
stream: true
});
const encoder = new TextEncoder();
const readable = new ReadableStream({
async start(controller) {
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
controller.enqueue(encoder.encode(content));
}
}
controller.close();
}
});
return new Response(readable, {
headers: { 'Content-Type': 'text/plain; charset=utf-8' }
});
}
// __tests__/chat-handler.test.ts
import { jest } from '@jest/globals';
// Mock the ABV client
jest.mock('./abv-client', () => ({
abv: {
gateway: {
chat: {
completions: {
create: jest.fn()
}
}
}
}
}));
import { abv } from './abv-client';
import { handleChatRequest } from './chat-handler';
test('handleChatRequest processes messages correctly', async () => {
const mockResponse = {
choices: [{
message: {
role: 'assistant',
content: 'This is a test response'
}
}]
};
(abv.gateway.chat.completions.create as jest.Mock).mockResolvedValue(mockResponse);
const result = await handleChatRequest('Hello');
expect(result).toBe('This is a test response');
});
test('gateway returns valid responses', async () => {
const response = await abv.gateway.chat.completions.create({
provider: 'openai',
model: 'gpt-4o-mini',
messages: [{ role: 'user', content: 'Say hello' }]
});
// Assert structure, not specific content
expect(response.choices).toHaveLength(1);
expect(response.choices[0].message.content).toBeTruthy();
expect(response.choices[0].message.role).toBe('assistant');
expect(response.usage).toBeDefined();
expect(response.usage.total_tokens).toBeGreaterThan(0);
});
Was this page helpful?