JavaScript/TypeScript SDK
Powerful JavaScript SDK with full support for Node.js, browsers, and various frontend frameworks
Node.js 14+TypeScriptESM & CJSReact/Vue/Angular
Install
usingnpm
Terminal
npm install openaiusingyarn
yarn add openaiusingpnpm
pnpm add openaiTypeScript Support: SDK comes with TypeScript type definitions, no need to install additional @types packages.
Getting Started
Node.js Environment
// CommonJS
const OpenAI = require('openai');
// ES Modules (recommended)
import OpenAI from 'openai';
// Initialize client
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY || 'your-api-key',
baseURL: 'https://api.n1n.ai/v1' // usingn1n.aiEndpoint
});
// Basic conversation
async function chat() {
const completion = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
messages: [
{ role: "system", content: "You are a helpful assistant." },
{ role: "user", content: "Hello, how are you?" }
]
});
console.log(completion.choices[0].message.content);
}
chat();TypeScriptExample
import OpenAI from 'openai';
import {
ChatCompletionMessage,
ChatCompletionMessageParam
} from 'openai/resources/chat';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY!,
baseURL: 'https://api.n1n.ai/v1'
});
interface ChatOptions {
model?: string;
temperature?: number;
maxTokens?: number;
}
async function createChat(
messages: ChatCompletionMessageParam[],
options: ChatOptions = {}
): Promise<string> {
const completion = await openai.chat.completions.create({
model: options.model || "gpt-3.5-turbo",
messages,
temperature: options.temperature || 0.7,
max_tokens: options.maxTokens
});
return completion.choices[0].message.content || '';
}
// usingExample
const response = await createChat([
{ role: "user", content: "Explain TypeScript in one sentence" }
]);Browser Environment
Security Notice: Do not expose API keys directly in the frontend! Recommend proxying API requests through backend.
// Frontend calling through backend proxy
async function callAPI(message) {
const response = await fetch('/api/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ message })
});
const data = await response.json();
return data.reply;
}
// Backend API route (Next.js example)
// pages/api/chat.js or app/api/chat/route.js
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY
});
export async function POST(request) {
const { message } = await request.json();
const completion = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
messages: [{ role: "user", content: message }]
});
return Response.json({
reply: completion.choices[0].message.content
});
}Advanced Features
Streaming Response
// Node.js streaming response
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY
});
async function streamChat() {
const stream = await openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Tell me a story' }],
stream: true,
});
for await (const chunk of stream) {
process.stdout.write(chunk.choices[0]?.delta?.content || '');
}
}
// Handling streaming response in browser
async function streamInBrowser() {
const response = await fetch('/api/stream', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ message: 'Tell me a story' })
});
const reader = response.body.getReader();
const decoder = new TextDecoder();
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
// UpdateUI
updateUI(chunk);
}
}Error Handling
import OpenAI from 'openai';
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
maxRetries: 3, // Auto retry
});
async function safeChat(message) {
try {
const completion = await openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: message }]
});
return completion.choices[0].message.content;
} catch (error) {
if (error instanceof OpenAI.APIError) {
console.error('API Error:', error.status, error.message);
if (error.status === 401) {
throw new Error('Invalid API key');
} else if (error.status === 429) {
throw new Error('Rate limit exceeded');
} else if (error.status >= 500) {
throw new Error('Server error, please retry');
}
}
throw error;
}
}
// Using AbortController to cancel requests
const controller = new AbortController();
const promise = openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Hello' }]
}, {
signal: controller.signal
});
// Cancel request
setTimeout(() => controller.abort(), 1000);Function Calling
// Define functions
const functions = [
{
name: 'get_weather',
description: 'Get the current weather',
parameters: {
type: 'object',
properties: {
location: {
type: 'string',
description: 'The city and state'
},
unit: {
type: 'string',
enum: ['celsius', 'fahrenheit']
}
},
required: ['location']
}
}
];
// Implement function
async function getWeather(location, unit = 'celsius') {
// Actually call weather API
return JSON.stringify({
location,
temperature: 22,
unit,
description: 'Sunny'
});
}
// Using function calling
const response = await openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [
{ role: 'user', content: "What's the weather in New York?" }
],
functions,
function_call: 'auto'
});
const message = response.choices[0].message;
if (message.function_call) {
const functionName = message.function_call.name;
const functionArgs = JSON.parse(message.function_call.arguments);
let functionResult;
if (functionName === 'get_weather') {
functionResult = await getWeather(
functionArgs.location,
functionArgs.unit
);
}
// Send function result back to model
const secondResponse = await openai.chat.completions.create({
model: 'gpt-3.5-turbo',
messages: [
{ role: 'user', content: "What's the weather in New York?" },
message,
{
role: 'function',
name: functionName,
content: functionResult
}
]
});
console.log(secondResponse.choices[0].message.content);
}ReactIntegrateExample
import { useState, useCallback } from 'react';
// Custom Hook
function useChat() {
const [messages, setMessages] = useState([]);
const [isLoading, setIsLoading] = useState(false);
const [error, setError] = useState(null);
const sendMessage = useCallback(async (content) => {
setIsLoading(true);
setError(null);
// Add user message
const userMessage = { role: 'user', content };
setMessages(prev => [...prev, userMessage]);
try {
const response = await fetch('/api/chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
messages: [...messages, userMessage]
})
});
if (!response.ok) throw new Error('API request failed');
const data = await response.json();
// Add AI response
setMessages(prev => [...prev, {
role: 'assistant',
content: data.reply
}]);
} catch (err) {
setError(err.message);
} finally {
setIsLoading(false);
}
}, [messages]);
const clearMessages = useCallback(() => {
setMessages([]);
}, []);
return {
messages,
isLoading,
error,
sendMessage,
clearMessages
};
}
// ReactComponent
function ChatComponent() {
const { messages, isLoading, error, sendMessage } = useChat();
const [input, setInput] = useState('');
const handleSubmit = (e) => {
e.preventDefault();
if (input.trim()) {
sendMessage(input);
setInput('');
}
};
return (
<div className="chat-container">
<div className="messages">
{messages.map((msg, index) => (
<div key={index} className={`message ${msg.role}`}>
{msg.content}
</div>
))}
{isLoading && <div className="loading">AI is thinking...</div>}
{error && <div className="error">{error}</div>}
</div>
<form onSubmit={handleSubmit}>
<input
type="text"
value={input}
onChange={(e) => setInput(e.target.value)}
placeholder="Enter message..."
disabled={isLoading}
/>
<button type="submit" disabled={isLoading}>
Send
</button>
</form>
</div>
);
}Configuration Options
| Option | Type | Description |
|---|---|---|
| apiKey | string | API key |
| baseURL | string | API base URL |
| timeout | number | Request timeout (milliseconds) |
| maxRetries | number | Maximum retry attempts |
| httpAgent | Agent | Custom HTTP agent |
Best Practices
Environment Variable Management
Use .env file to manage sensitive information:
OPENAI_API_KEY=sk-xxxx
OPENAI_BASE_URL=https://api.n1n.ai/v1
OPENAI_BASE_URL=https://api.n1n.ai/v1
Type Safety
Use TypeScript for complete type inference and auto-completion
Performance Optimization
- • Reuse OpenAI instances
- • Use streaming responses
- • Implement request caching
Error Handling
- • Catch all exceptions
- • Implement retry logic
- • User-friendly error messages