Python

Complete Python examples for AI Gateway.

Setup

Install the OpenAI SDK:

pip install openai

Create a client:

import os
from openai import OpenAI
 
client = OpenAI(
    base_url="https://api.transactional.dev/ai/v1",
    api_key=os.environ["GATEWAY_API_KEY"]
)

Basic Chat

def chat(message: str) -> str:
    response = client.chat.completions.create(
        model="gpt-4o",
        messages=[{"role": "user", "content": message}]
    )
    return response.choices[0].message.content
 
answer = chat("What is Python?")
print(answer)

Conversation with History

from typing import List, Dict
 
class Conversation:
    def __init__(self, system_prompt: str = None):
        self.messages: List[Dict[str, str]] = []
        if system_prompt:
            self.messages.append({"role": "system", "content": system_prompt})
 
    def send(self, user_message: str) -> str:
        self.messages.append({"role": "user", "content": user_message})
 
        response = client.chat.completions.create(
            model="gpt-4o",
            messages=self.messages
        )
 
        assistant_message = response.choices[0].message.content
        self.messages.append({"role": "assistant", "content": assistant_message})
 
        return assistant_message
 
# Usage
convo = Conversation("You are a helpful coding assistant.")
print(convo.send("What is a list comprehension?"))
print(convo.send("Can you give me an example?"))

Streaming

def stream_chat(message: str):
    stream = client.chat.completions.create(
        model="gpt-4o",
        messages=[{"role": "user", "content": message}],
        stream=True
    )
 
    for chunk in stream:
        content = chunk.choices[0].delta.content or ""
        print(content, end="", flush=True)
    print()  # New line at end
 
stream_chat("Tell me a short story")

Async Support

import asyncio
from openai import AsyncOpenAI
 
async_client = AsyncOpenAI(
    base_url="https://api.transactional.dev/ai/v1",
    api_key=os.environ["GATEWAY_API_KEY"]
)
 
async def async_chat(message: str) -> str:
    response = await async_client.chat.completions.create(
        model="gpt-4o",
        messages=[{"role": "user", "content": message}]
    )
    return response.choices[0].message.content
 
# Run async
answer = asyncio.run(async_chat("What is asyncio?"))
print(answer)

Async Streaming

async def async_stream_chat(message: str):
    stream = await async_client.chat.completions.create(
        model="gpt-4o",
        messages=[{"role": "user", "content": message}],
        stream=True
    )
 
    async for chunk in stream:
        content = chunk.choices[0].delta.content or ""
        print(content, end="", flush=True)
    print()

Function Calling

import json
 
tools = [
    {
        "type": "function",
        "function": {
            "name": "get_weather",
            "description": "Get weather for a location",
            "parameters": {
                "type": "object",
                "properties": {
                    "location": {"type": "string", "description": "City name"},
                    "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}
                },
                "required": ["location"]
            }
        }
    }
]
 
def chat_with_tools(message: str) -> str:
    response = client.chat.completions.create(
        model="gpt-4o",
        messages=[{"role": "user", "content": message}],
        tools=tools
    )
 
    tool_call = response.choices[0].message.tool_calls
    if tool_call:
        tool_call = tool_call[0]
        args = json.loads(tool_call.function.arguments)
        print(f"Function: {tool_call.function.name}")
        print(f"Arguments: {args}")
 
        # Call your actual function
        result = get_weather(args["location"], args.get("unit", "celsius"))
 
        # Continue conversation with result
        final_response = client.chat.completions.create(
            model="gpt-4o",
            messages=[
                {"role": "user", "content": message},
                response.choices[0].message,
                {
                    "role": "tool",
                    "tool_call_id": tool_call.id,
                    "content": json.dumps(result)
                }
            ]
        )
 
        return final_response.choices[0].message.content
 
    return response.choices[0].message.content

JSON Mode

from pydantic import BaseModel
from typing import List
 
class Fruit(BaseModel):
    name: str
    color: str
    taste: str
 
def get_structured_data() -> List[Fruit]:
    response = client.chat.completions.create(
        model="gpt-4o",
        messages=[
            {"role": "system", "content": "Output valid JSON only."},
            {"role": "user", "content": "List 3 fruits with name, color, and taste."}
        ],
        response_format={"type": "json_object"}
    )
 
    content = response.choices[0].message.content
    data = json.loads(content)
    return [Fruit(**fruit) for fruit in data["fruits"]]
 
fruits = get_structured_data()
for fruit in fruits:
    print(f"{fruit.name}: {fruit.color}, tastes {fruit.taste}")

Error Handling

from openai import APIError, RateLimitError, APIConnectionError
import time
 
def robust_chat(message: str, max_retries: int = 3) -> str:
    for attempt in range(max_retries):
        try:
            response = client.chat.completions.create(
                model="gpt-4o",
                messages=[{"role": "user", "content": message}]
            )
            return response.choices[0].message.content
 
        except RateLimitError as e:
            retry_after = int(e.response.headers.get("retry-after", 5))
            print(f"Rate limited, retrying in {retry_after}s...")
            time.sleep(retry_after)
 
        except APIConnectionError as e:
            print(f"Connection error, retrying...")
            time.sleep(2 ** attempt)
 
        except APIError as e:
            if e.status_code >= 500:
                print(f"Server error, retrying...")
                time.sleep(2 ** attempt)
            else:
                raise
 
    raise Exception("Max retries exceeded")

FastAPI Integration

from fastapi import FastAPI
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
 
app = FastAPI()
 
class ChatRequest(BaseModel):
    message: str
    stream: bool = False
 
@app.post("/chat")
async def chat_endpoint(request: ChatRequest):
    if request.stream:
        return StreamingResponse(
            stream_response(request.message),
            media_type="text/event-stream"
        )
 
    response = await async_client.chat.completions.create(
        model="gpt-4o",
        messages=[{"role": "user", "content": request.message}]
    )
    return {"response": response.choices[0].message.content}
 
async def stream_response(message: str):
    stream = await async_client.chat.completions.create(
        model="gpt-4o",
        messages=[{"role": "user", "content": message}],
        stream=True
    )
    async for chunk in stream:
        content = chunk.choices[0].delta.content or ""
        if content:
            yield f"data: {content}\n\n"
    yield "data: [DONE]\n\n"

Environment Setup

# config.py
import os
from openai import OpenAI
 
def get_client() -> OpenAI:
    api_key = os.environ.get("GATEWAY_API_KEY")
    if not api_key:
        raise ValueError("Missing GATEWAY_API_KEY environment variable")
 
    return OpenAI(
        base_url="https://api.transactional.dev/ai/v1",
        api_key=api_key
    )
# .env
GATEWAY_API_KEY=gw_sk_your_key_here

Next Steps