Skip to main content
Complete guide to integrating OpenAI models with Lumina.

Installation

npm install @uselumina/sdk openai

Basic Usage

import { initLumina } from '@uselumina/sdk';
import OpenAI from 'openai';

const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
const lumina = initLumina({
  endpoint: 'http://localhost:9411/v1/traces',
  service_name: 'openai-app',
});

const response = await lumina.traceLLM(
  () =>
    openai.chat.completions.create({
      model: 'gpt-4',
      messages: [{ role: 'user', content: 'Hello!' }],
    }),
  {
    name: 'chat-completion',
    system: 'openai',
    prompt: 'Hello!',
  }
);

Supported Models

ModelCost CalculationToken Tracking
GPT-4
GPT-4 Turbo
GPT-3.5 Turbo

Streaming

const stream = await lumina.traceLLM(
  () =>
    openai.chat.completions.create({
      model: 'gpt-4',
      messages: [{ role: 'user', content: 'Hello!' }],
      stream: true,
    }),
  { name: 'chat-stream', system: 'openai' }
);

for await (const chunk of stream) {
  process.stdout.write(chunk.choices[0]?.delta?.content || '');
}

Function Calling

await lumina.traceLLM(
  () =>
    openai.chat.completions.create({
      model: 'gpt-4',
      messages: [{ role: 'user', content: 'What's the weather?' }],
      functions: [
        {
          name: 'get_weather',
          description: 'Get weather for location',
          parameters: {
            type: 'object',
            properties: {
              location: { type: 'string' },
            },
          },
        },
      ],
    }),
  { name: 'function-call', system: 'openai' }
);