Python SDK Complete Tutorial
Learn from scratch how to use the Python SDK to call LLM APIs and build intelligent applications. This tutorial covers everything from environment setup to advanced features.
Getting Started
1. Install SDK
# Install via pip pip install openai # Or via conda conda install -c conda-forge openai # Install a specific version pip install openai==1.6.0
2. Basic Configuration
import os
from openai import OpenAI
# Method 1: Environment variable
os.environ["OPENAI_API_KEY"] = "your-api-key"
client = OpenAI()
# Method 2: Pass directly
client = OpenAI(
api_key="your-api-key",
base_url="https://api.openai.com/v1" # Optional: custom endpoint
)
# Method 3: Load from config file
import json
with open('config.json') as f:
config = json.load(f)
client = OpenAI(api_key=config['api_key'])Basic Features
Chat Completions
# Simple conversation
response = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a friendly assistant"},
{"role": "user", "content": "Introduce Python"}
]
)
print(response.choices[0].message.content)
# Streaming output
stream = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Write an article"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="")Advanced Features
Function Calling
# Define functions
functions = [
{
"name": "get_weather",
"description": "Get weather information",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
]
# Call model with functions
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "How is the weather in Beijing?"}],
functions=functions,
function_call="auto"
)
# Handle function call
if response.choices[0].message.function_call:
function_name = response.choices[0].message.function_call.name
function_args = json.loads(
response.choices[0].message.function_call.arguments
)
# Execute actual function
if function_name == "get_weather":
weather_data = get_weather(**function_args)
# Return the result to the model
second_response = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "user", "content": "How is the weather in Beijing?"},
response.choices[0].message,
{
"role": "function",
"name": function_name,
"content": str(weather_data)
}
]
)Error Handling
import time
from openai import OpenAI, RateLimitError, APIError
def call_api_with_retry(prompt, max_retries=3):
"""API call with retry mechanism"""
for attempt in range(max_retries):
try:
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}],
timeout=30 # Set timeout
)
return response
except RateLimitError as e:
# Rate limited, wait and retry
wait_time = 2 ** attempt # Exponential backoff
print(f"Rate limited, retrying in {wait_time}s ...")
time.sleep(wait_time)
except APIError as e:
# API error
print(f"API error: {e}")
if attempt == max_retries - 1:
raise
except Exception as e:
# Other errors
print(f"Unknown error: {e}")
raise
return NonePerformance Optimization
Batch Processing
import asyncio
from openai import AsyncOpenAI
# Async client
async_client = AsyncOpenAI(api_key="your-key")
async def process_batch(prompts):
"""Process a batch asynchronously"""
tasks = []
for prompt in prompts:
task = async_client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}]
)
tasks.append(task)
# Execute concurrently
responses = await asyncio.gather(*tasks)
return responses
# Usage example
prompts = ["Question 1", "Question 2", "Question 3"]
results = asyncio.run(process_batch(prompts))Utility Helper Class
class LLMHelper:
"""LLM helper utility class"""
def __init__(self, api_key, model="gpt-4"):
self.client = OpenAI(api_key=api_key)
self.model = model
self.conversation_history = []
def chat(self, message, remember=True):
"""Chat method"""
if remember:
self.conversation_history.append(
{"role": "user", "content": message}
)
messages = self.conversation_history if remember else [
{"role": "user", "content": message}
]
response = self.client.chat.completions.create(
model=self.model,
messages=messages
)
assistant_message = response.choices[0].message.content
if remember:
self.conversation_history.append(
{"role": "assistant", "content": assistant_message}
)
return assistant_message
def summarize(self, text, max_length=100):
"""Summarize text"""
prompt = f"Summarize the following text within {max_length} characters: \n{text}"
return self.chat(prompt, remember=False)
def translate(self, text, target_language="English"):
"""Translate text"""
prompt = f"Translate the following text into {target_language}: \n{text}"
return self.chat(prompt, remember=False)
def clear_history(self):
"""Clear conversation history"""
self.conversation_history = []
# Usage example
helper = LLMHelper(api_key="your-key")
response = helper.chat("Hello")
summary = helper.summarize("A long text...")
translation = helper.translate("Hello", "English")Start Your Python Development Journey
Master the Python SDK and quickly build powerful AI applications.
Get API Key