Stable Diffusion API Tutorial
Open-source image generation model with advanced features like ControlNet and LoRA
Open-source model
Fully open
Highly controllable
Precise ControlNet control
LoRA fine-tuning
Custom styles
Multiple modes
txt2img/img2img
1. Basic image generation
Getting Started
import requests
import json
# Stable Diffusion API basic usage
def generate_image(prompt: str, model: str = "stable-diffusion-xl"):
"""Generate an image using Stable Diffusion"""
url = "https://api.n1n.ai/v1/images/generations"
headers = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json"
}
data = {
"model": model, # stable-diffusion-xl, stable-diffusion-v1-5
"prompt": prompt,
"negative_prompt": "low quality, blurry, distorted",
"num_inference_steps": 30, # Inference steps
"guidance_scale": 7.5, # Prompt guidance strength
"width": 1024,
"height": 1024,
"seed": -1, # -1 means random seed
"num_images": 1
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
result = response.json()
return result["images"][0]["url"]
else:
raise Exception(f"Generation failed: {response.text}")
# SDXL advanced parameters
def generate_sdxl_advanced(prompt: str, style_preset: str = None):
"""Advanced SDXL generation"""
data = {
"model": "stable-diffusion-xl",
"prompt": prompt,
"negative_prompt": "ugly, tiling, poorly drawn, out of frame",
"num_inference_steps": 50,
"guidance_scale": 8.0,
"width": 1024,
"height": 1024,
"sampler": "DPM++ 2M Karras", # Sampler
"scheduler": "karras", # Scheduler
"style_preset": style_preset, # anime, photographic, digital-art
"clip_guidance_preset": "FAST_BLUE",
"refiner": True # Enable refiner
}
response = requests.post(url, headers=headers, json=data)
return response.json()2. Available models
SDXL 1.0
Latest high-quality model
Resolution: 1024x1024
SD 1.5
Classic stable version
Resolution: 512x512
SD 2.1
Improved version
Resolution: 768x768
SDXL Turbo
Fast generation
Resolution: 1024x1024
SD Inpaint
For inpainting
Resolution: 512x512
SDXL Refiner
Detail enhancement
Resolution: 1024x1024
3. ControlNet precise control
Advanced control features
# ControlNet guided generation
def generate_with_controlnet(image_url: str, prompt: str, control_type: str):
"""Precisely control image generation with ControlNet"""
url = "https://api.n1n.ai/v1/images/controlnet"
data = {
"model": "stable-diffusion-xl",
"prompt": prompt,
"control_image": image_url, # Control image
"control_type": control_type, # canny, depth, openpose, scribble
"control_strength": 1.0, # Control strength
"num_inference_steps": 30,
"guidance_scale": 7.5
}
response = requests.post(url, headers=headers, json=data)
return response.json()
# Image-to-image (img2img)
def image_to_image(source_image_url: str, prompt: str, strength: float = 0.75):
"""Generate a new image based on an existing image"""
url = "https://api.n1n.ai/v1/images/img2img"
data = {
"model": "stable-diffusion-xl",
"init_image": source_image_url,
"prompt": prompt,
"strength": strength, # 0-1, higher value means more change
"num_inference_steps": 50,
"guidance_scale": 7.5
}
response = requests.post(url, headers=headers, json=data)
return response.json()
# Inpainting (local repainting)
def inpaint(image_url: str, mask_url: str, prompt: str):
"""Repaint local areas"""
url = "https://api.n1n.ai/v1/images/inpaint"
data = {
"model": "stable-diffusion-xl-inpaint",
"image": image_url,
"mask": mask_url,
"prompt": prompt,
"num_inference_steps": 50,
"guidance_scale": 8.0,
"strength": 0.99
}
response = requests.post(url, headers=headers, json=data)
return response.json()4. LoRA model fine-tuning
Style customization
# LoRA model fine-tuning usage
class SDLoRAManager:
"""Manage and use LoRA models"""
def __init__(self, api_key: str):
self.api_key = api_key
self.base_url = "https://api.n1n.ai/v1"
def generate_with_lora(self, prompt: str, lora_models: list):
"""Generate with LoRA models"""
data = {
"model": "stable-diffusion-xl",
"prompt": prompt,
"loras": [
{
"name": "realistic_vision",
"weight": 0.8 # LoRA weight
},
{
"name": "detail_enhancer",
"weight": 0.5
}
],
"num_inference_steps": 30,
"guidance_scale": 7.5,
"width": 1024,
"height": 1024
}
response = requests.post(
f"{self.base_url}/images/generate-lora",
headers={"Authorization": f"Bearer {self.api_key}"},
json=data
)
return response.json()
def list_available_loras(self):
"""List available LoRA models"""
response = requests.get(
f"{self.base_url}/models/loras",
headers={"Authorization": f"Bearer {self.api_key}"}
)
return response.json()
def train_custom_lora(self, dataset: list, config: dict):
"""Train a custom LoRA (requires elevated permissions)"""
data = {
"base_model": "stable-diffusion-xl",
"dataset": dataset, # Training images and labels
"training_config": {
"learning_rate": 1e-4,
"num_epochs": 100,
"batch_size": 1,
"gradient_accumulation_steps": 4,
"lora_rank": 32
}
}
response = requests.post(
f"{self.base_url}/models/train-lora",
headers={"Authorization": f"Bearer {self.api_key}"},
json=data
)
return response.json()
# Batch generate different styles
def batch_generate_styles(prompt: str):
manager = SDLoRAManager(API_KEY)
styles = [
{"name": "anime", "lora": "anime_style", "weight": 0.9},
{"name": "realistic", "lora": "photorealistic", "weight": 0.8},
{"name": "fantasy", "lora": "fantasy_art", "weight": 0.7},
{"name": "cyberpunk", "lora": "cyberpunk_style", "weight": 0.85}
]
results = []
for style in styles:
result = manager.generate_with_lora(
prompt=f"{prompt}, {style['name']} style",
lora_models=[{"name": style['lora'], "weight": style['weight']}]
)
results.append(result)
return results5. Best practices
🎯 Parameter optimization
- ✅ Steps: 20-50 to balance quality and speed
- ✅ CFG Scale: 7-10 suits most scenarios
- ✅ Use negative prompts to improve quality
- ✅ Fix seed to ensure reproducibility
💡 Prompt techniques
- ✅ Use weighting syntax: (word:1.2)
- ✅ Add quality keywords: masterpiece, best quality
- ✅ Specify art styles and artists
- ✅ Use BREAK to separate concepts