TypeScript
Complete TypeScript examples for AI Gateway.
Setup
Install the OpenAI SDK:
npm install openaiCreate a client:
import OpenAI from 'openai';
const openai = new OpenAI({
baseURL: 'https://api.transactional.dev/ai/v1',
apiKey: process.env.GATEWAY_API_KEY!,
});Basic Chat
async function chat(message: string): Promise<string> {
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: message }],
});
return response.choices[0].message.content ?? '';
}
const answer = await chat('What is TypeScript?');
console.log(answer);Conversation with History
interface Message {
role: 'system' | 'user' | 'assistant';
content: string;
}
class Conversation {
private messages: Message[] = [];
constructor(systemPrompt?: string) {
if (systemPrompt) {
this.messages.push({ role: 'system', content: systemPrompt });
}
}
async send(userMessage: string): Promise<string> {
this.messages.push({ role: 'user', content: userMessage });
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: this.messages,
});
const assistantMessage = response.choices[0].message.content ?? '';
this.messages.push({ role: 'assistant', content: assistantMessage });
return assistantMessage;
}
}
// Usage
const convo = new Conversation('You are a helpful coding assistant.');
console.log(await convo.send('What is a closure?'));
console.log(await convo.send('Can you give me an example?'));Streaming
async function streamChat(message: string): Promise<void> {
const stream = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: message }],
stream: true,
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || '';
process.stdout.write(content);
}
console.log(); // New line at end
}
await streamChat('Tell me a short story');Streaming with React
// app/api/chat/route.ts
import { OpenAIStream, StreamingTextResponse } from 'ai';
import OpenAI from 'openai';
const openai = new OpenAI({
baseURL: 'https://api.transactional.dev/ai/v1',
apiKey: process.env.GATEWAY_API_KEY!,
});
export async function POST(req: Request) {
const { messages } = await req.json();
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages,
stream: true,
});
const stream = OpenAIStream(response);
return new StreamingTextResponse(stream);
}Function Calling
const tools: OpenAI.ChatCompletionTool[] = [
{
type: 'function',
function: {
name: 'get_weather',
description: 'Get weather for a location',
parameters: {
type: 'object',
properties: {
location: { type: 'string', description: 'City name' },
unit: { type: 'string', enum: ['celsius', 'fahrenheit'] }
},
required: ['location']
}
}
}
];
async function chatWithTools(message: string) {
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: message }],
tools,
});
const toolCall = response.choices[0].message.tool_calls?.[0];
if (toolCall) {
const args = JSON.parse(toolCall.function.arguments);
console.log(`Function: ${toolCall.function.name}`);
console.log(`Arguments: ${JSON.stringify(args)}`);
// Call your actual function
const result = await getWeather(args.location, args.unit);
// Continue conversation with result
const finalResponse = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [
{ role: 'user', content: message },
response.choices[0].message,
{
role: 'tool',
tool_call_id: toolCall.id,
content: JSON.stringify(result),
}
],
});
return finalResponse.choices[0].message.content;
}
return response.choices[0].message.content;
}JSON Mode
interface Fruit {
name: string;
color: string;
taste: string;
}
async function getStructuredData(): Promise<Fruit[]> {
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [
{ role: 'system', content: 'Output valid JSON only.' },
{ role: 'user', content: 'List 3 fruits with name, color, and taste.' }
],
response_format: { type: 'json_object' },
});
const content = response.choices[0].message.content ?? '{}';
const data = JSON.parse(content);
return data.fruits;
}Error Handling
async function robustChat(message: string): Promise<string> {
const maxRetries = 3;
for (let attempt = 0; attempt < maxRetries; attempt++) {
try {
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: message }],
});
return response.choices[0].message.content ?? '';
} catch (error) {
if (error instanceof OpenAI.APIError) {
if (error.status === 429) {
const retryAfter = parseInt(error.headers?.['retry-after'] ?? '5');
console.log(`Rate limited, retrying in ${retryAfter}s...`);
await sleep(retryAfter * 1000);
continue;
}
if (error.status >= 500) {
console.log(`Server error, retrying...`);
await sleep(1000 * Math.pow(2, attempt));
continue;
}
throw error;
}
throw error;
}
}
throw new Error('Max retries exceeded');
}
function sleep(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms));
}Type Safety
import OpenAI from 'openai';
import type {
ChatCompletionMessageParam,
ChatCompletionTool,
} from 'openai/resources/chat/completions';
// Type-safe messages
const messages: ChatCompletionMessageParam[] = [
{ role: 'system', content: 'You are helpful.' },
{ role: 'user', content: 'Hello!' },
];
// Type-safe tools
const tools: ChatCompletionTool[] = [
{
type: 'function',
function: {
name: 'greet',
parameters: { type: 'object', properties: {} }
}
}
];Environment Setup
// lib/openai.ts
import OpenAI from 'openai';
if (!process.env.GATEWAY_API_KEY) {
throw new Error('Missing GATEWAY_API_KEY environment variable');
}
export const openai = new OpenAI({
baseURL: 'https://api.transactional.dev/ai/v1',
apiKey: process.env.GATEWAY_API_KEY,
});# .env
GATEWAY_API_KEY=gw_sk_your_key_here