1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 | # variables related to the pretrained model
$pretrained_model_name_or_path = "C:\SD_models\nai-full.ckpt"
$v2 = 0 # set to 1 for true or 0 for false
$v_model = 0 # set to 1 for true or 0 for false
# variables related to the training dataset and output directory
$train_dir = "G:\kohya_train_dir\hll4-pre"
$image_folder = "G:\hll4_pre_test"
$output_dir = "G:\kohya_train_dir\output\hll4-pre"
$kohya_finetune_repo_path = "G:\kohya_ss"
$logging_dir = "G:\kohya_logs\hll4-pre"
# variables related to the training process
$max_resolution = "512,512"
$learning_rate = 8e-6
$lr_scheduler = "polynomial" # Default is constant, cosine is good too
$lr_warmup_steps = 50
$dataset_repeats = 1
$train_batch_size = 16
$epochs = 10
$save_every_n_epochs = 1
$mixed_precision = "fp16" # use bf16 if getting NaN loss
$save_precision = "fp16" # or float
$seed = 23
$num_cpu_threads_per_process = 12
$train_text_encoder = 1 # set to 1 to train text encoder otherwise set to 0
$prepare_latents = 0 # set 1 for faster training. set 0 if --random_crop or --color_aug are used
# variables related to the resulting diffuser model. If input is ckpt or tensors then it is not applicable
$convert_to_safetensors = 1 # set to 1 to convert resulting diffuser to ckpt
$convert_to_ckpt = 0 # set to 1 to convert resulting diffuser to ckpt
### You should not need to change things below
# Set variables to useful values using ternary operator
$v_model = ($v_model -eq 0) ? $null : "--v_parameterization"
$v2 = ($v2 -eq 0) ? $null : "--v2"
$train_text_encoder = ($train_text_encoder -eq 0) ? $null : "--train_text_encoder"
# stop script on error
$ErrorActionPreference = "Stop"
# define a list of substrings to search for
$substrings_v2 = "stable-diffusion-2-1-base", "stable-diffusion-2-base"
# check if $v2 and $v_model are empty and if $pretrained_model_name_or_path contains any of the substrings in the v2 list
if ($v2 -eq $null -and $v_model -eq $null -and ($substrings_v2 | Where-Object { $pretrained_model_name_or_path -match $_ }).Count -gt 0) {
Write-Host("SD v2 model detected. Setting --v2 parameter")
$v2 = "--v2"
$v_model = $null
}
# define a list of substrings to search for v-objective
$substrings_v_model = "stable-diffusion-2-1", "stable-diffusion-2"
# check if $v2 and $v_model are empty and if $pretrained_model_name_or_path contains any of the substrings in the v_model list
if ($v2 -eq $null -and $v_model -eq $null -and ($substrings_v_model | Where-Object { $pretrained_model_name_or_path -match $_ }).Count -gt 0) {
Write-Host("SD v2 v_model detected. Setting --v2 parameter and --v_parameterization")
$v2 = "--v2"
$v_model = "--v_parameterization"
}
# activate venv
cd $kohya_finetune_repo_path
.\venv\Scripts\activate
# create caption json file
if (!(Test-Path -Path $train_dir)) {
New-Item -Path $train_dir -ItemType "directory"
}
# backup old json files
if (Test-Path $train_dir"\meta_cap.json") { Move-Item -Force $train_dir"\meta_cap.json" $train_dir"\meta_cap.json.bak"}
if (Test-Path $train_dir"\meta_lat.json") { Move-Item -Force $train_dir"\meta_lat.json" $train_dir"\meta_lat.json.bak"}
python $kohya_finetune_repo_path\finetune\merge_dd_tags_to_metadata.py `
$image_folder $train_dir"\meta_cap.json" --caption_extension ".txt"
<#
Write-Host("Cleaning tags.. ")
python $kohya_finetune_repo_path\finetune\clean_captions_and_tags.py `
$train_dir"\meta_cap.json" $train_dir"\meta_cap.json" --debug
#>
# create image buckets
if (!($prepare_latents -eq 0)) {
python $kohya_finetune_repo_path\finetune\prepare_buckets_latents.py `
$image_folder `
$train_dir"\meta_cap.json" `
$train_dir"\meta_lat.json" `
$pretrained_model_name_or_path `
--max_resolution $max_resolution --mixed_precision $mixed_precision --min_bucket_reso 320 --max_bucket_reso 960 `
--batch_size 8 --max_data_loader_n_workers 4 --skip_existing
}
$json = ($prepare_latents -eq 0) ? "meta_cap.json" : "meta_lat.json"
accelerate launch --num_cpu_threads_per_process $num_cpu_threads_per_process $kohya_finetune_repo_path\fine_tune.py `
$v2 `
$v_model `
--pretrained_model_name_or_path=$pretrained_model_name_or_path `
--in_json $train_dir\$json `
--train_data_dir="$image_folder" `
--output_dir=$output_dir `
--train_batch_size=$train_batch_size `
--dataset_repeats=$dataset_repeats `
--learning_rate=$learning_rate `
--lr_scheduler=$lr_scheduler `
--lr_warmup_steps=$lr_warmup_steps `
--max_train_epochs=$epochs `
--use_8bit_adam --xformers --gradient_checkpointing `
--enable_bucket --resolution $max_resolution --min_bucket_reso 320 --max_bucket_reso 960 --random_crop `
--mixed_precision=$mixed_precision `
--save_every_n_epochs=$save_every_n_epochs `
--seed=$seed --clip_skip 2 --max_token_length 225 --shuffle_caption --caption_tag_dropout_rate 0.05 `
$train_text_encoder --logging_dir $logging_dir `
--save_precision=$save_precision --use_safetensors --output_name "hll4-pre-test-rcrop-dropout5"
# --cache_latents --save_state --resume "G:\kohya_train_dir\output\hll4\-state" --random_crop --color_aug --flip_aug --max_data_loader_n_workers 8 --keep_tokens 2
# check if $output_dir\last is a directory... therefore it is a diffuser model
if (Test-Path "$output_dir\last" -PathType Container) {
if ($convert_to_ckpt) {
Write-Host("Converting diffuser model $output_dir\last to $output_dir\last.ckpt")
python "$kohya_finetune_repo_path\tools\convert_diffusers20_original_sd.py" `
$output_dir\last `
$output_dir\last.ckpt `
--$save_precision
}
if ($convert_to_safetensors) {
Write-Host("Converting diffuser model $output_dir\last to $output_dir\last.safetensors")
python "$kohya_finetune_repo_path\tools\convert_diffusers20_original_sd.py" `
$output_dir\last `
$output_dir\last.safetensors `
--$save_precision
}
}
# define a list of substrings to search for inference file
$substrings_sd_model = ".ckpt", ".safetensors"
$matching_extension = foreach ($ext in $substrings_sd_model) {
Get-ChildItem $output_dir -File | Where-Object { $_.Extension -contains $ext }
}
if ($matching_extension.Count -gt 0) {
# copy the file named "v2-inference.yaml" from the "v2_inference" folder to $output_dir as last.yaml
if ( $v2 -ne $null -and $v_model -ne $null) {
Write-Host("Saving v2-inference-v.yaml as $output_dir\last.yaml")
Copy-Item -Path "$kohya_finetune_repo_path\v2_inference\v2-inference-v.yaml" -Destination "$output_dir\last.yaml"
}
elseif ( $v2 -ne $null ) {
Write-Host("Saving v2-inference.yaml as $output_dir\last.yaml")
Copy-Item -Path "$kohya_finetune_repo_path\v2_inference\v2-inference.yaml" -Destination "$output_dir\last.yaml"
}
}
|