Files
pics/training/example_lora_config.toml

55 lines
1.6 KiB
TOML

# example LoRA training config for kohya_ss (SDXL)
# copy this, rename it, and edit the paths/settings for your lora
# then train with: .\train_lora.ps1 training/my_lora.toml
[model_arguments]
pretrained_model_name_or_path = "E:/animepics/models/checkpoints/noobai-xl.safetensors"
# set to true for vpred models (NoobAI-XL uses vpred)
v_parameterization = true
zero_terminal_snr = true
[saving_arguments]
save_every_n_epochs = 1
save_model_as = "safetensors"
output_dir = "E:/animepics/models/loras"
output_name = "my_lora_v1"
[dataset_arguments]
# dataset dir structure: training_data/<lora_name>/img/<repeats>_<trigger>/
train_data_dir = "E:/animepics/training_data/my_lora/img"
resolution = "1024,1024"
enable_bucket = true
min_bucket_reso = 512
max_bucket_reso = 2048
bucket_reso_steps = 64
caption_extension = ".txt"
shuffle_caption = true
keep_tokens = 1
[training_arguments]
output_dir = "E:/animepics/models/loras"
logging_dir = "E:/animepics/kohya_ss/logs"
max_train_epochs = 10
train_batch_size = 1
gradient_accumulation_steps = 1
gradient_checkpointing = true
mixed_precision = "bf16"
save_precision = "bf16"
seed = 42
max_token_length = 225
xformers = true
# learning rates — good defaults for NoobAI-XL
learning_rate = 0.0001
unet_lr = 0.0001
text_encoder_lr = 0.00005
lr_scheduler = "cosine_with_restarts"
lr_warmup_steps = 100
optimizer_type = "AdamW8bit"
[network_arguments]
network_module = "networks.lora"
network_dim = 32 # rank — higher = more capacity, 16-64 is typical
network_alpha = 16 # usually half of dim
# optional: train only specific layers
# network_args = ["conv_dim=16", "conv_alpha=8"]