LoRa Flux never converge and when used in inference has basically no effect
Closed this issue · 1 comments
As per title. I'm training locally with a 4060ti 16gb. I've tried every possible configuration but I'm always facing the same problem. My lora do not converge. The loss start very low, then there's this sudden spike upwards and then it stabylizes. I'll attach an image of my latest trainings just for you to see, I'll also attach the latest settings used. If anyone can help it would be much appreciated.
EDIT: I want to add that in these particular settings the text encoder is not trained but I usually train with it.
{
"LoRA_type": "Flux1",
"LyCORIS_preset": "full",
"adaptive_noise_scale": 0,
"additional_parameters": "",
"ae": "C:/AI/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/models/vae/FLUX1/ae.safetensors",
"apply_t5_attn_mask": true,
"async_upload": false,
"block_alphas": "",
"block_dims": "",
"block_lr_zero_threshold": "",
"bucket_no_upscale": true,
"bucket_reso_steps": 64,
"bypass_mode": false,
"cache_latents": true,
"cache_latents_to_disk": true,
"caption_dropout_every_n_epochs": 0,
"caption_dropout_rate": 0,
"caption_extension": ".txt",
"clip_l": "C:/AI/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/models/clip/clip_l.safetensors",
"clip_skip": 1,
"color_aug": false,
"constrain": 0,
"conv_alpha": 1,
"conv_block_alphas": "",
"conv_block_dims": "",
"conv_dim": 1,
"cpu_offload_checkpointing": false,
"dataset_config": "",
"debiased_estimation_loss": true,
"decompose_both": false,
"dim_from_weights": false,
"discrete_flow_shift": 3.1582,
"dora_wd": false,
"down_lr_weight": "",
"dynamo_backend": "no",
"dynamo_mode": "default",
"dynamo_use_dynamic": false,
"dynamo_use_fullgraph": false,
"enable_all_linear": false,
"enable_bucket": true,
"epoch": 10,
"extra_accelerate_launch_args": "",
"factor": -1,
"flip_aug": false,
"flux1_cache_text_encoder_outputs": true,
"flux1_cache_text_encoder_outputs_to_disk": true,
"flux1_checkbox": true,
"fp8_base": true,
"fp8_base_unet": false,
"full_bf16": false,
"full_fp16": false,
"gpu_ids": "",
"gradient_accumulation_steps": 1,
"gradient_checkpointing": true,
"guidance_scale": 1,
"highvram": true,
"huber_c": 0.1,
"huber_schedule": "snr",
"huggingface_path_in_repo": "",
"huggingface_repo_id": "",
"huggingface_repo_type": "",
"huggingface_repo_visibility": "",
"huggingface_token": "",
"img_attn_dim": "",
"img_mlp_dim": "",
"img_mod_dim": "",
"in_dims": "",
"ip_noise_gamma": 0,
"ip_noise_gamma_random_strength": false,
"keep_tokens": 0,
"learning_rate": 0.01,
"log_config": false,
"log_tracker_config": "",
"log_tracker_name": "",
"log_with": "tensorboard",
"logging_dir": "C:/AI/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/models/loras/flatee/logs",
"loraplus_lr_ratio": 0,
"loraplus_text_encoder_lr_ratio": 0,
"loraplus_unet_lr_ratio": 0,
"loss_type": "l2",
"lowvram": false,
"lr_scheduler": "constant",
"lr_scheduler_args": "",
"lr_scheduler_num_cycles": 1,
"lr_scheduler_power": 1,
"lr_scheduler_type": "",
"lr_warmup": 0,
"lr_warmup_steps": 0,
"main_process_port": 0,
"masked_loss": false,
"max_bucket_reso": 2048,
"max_data_loader_n_workers": 2,
"max_grad_norm": 1,
"max_resolution": "512,512",
"max_timestep": 1000,
"max_token_length": 225,
"max_train_epochs": 0,
"max_train_steps": 0,
"mem_eff_attn": false,
"mem_eff_save": false,
"metadata_author": "",
"metadata_description": "",
"metadata_license": "",
"metadata_tags": "",
"metadata_title": "",
"mid_lr_weight": "",
"min_bucket_reso": 256,
"min_snr_gamma": 0,
"min_timestep": 0,
"mixed_precision": "bf16",
"model_list": "custom",
"model_prediction_type": "raw",
"module_dropout": 0,
"multi_gpu": false,
"multires_noise_discount": 0.3,
"multires_noise_iterations": 0,
"network_alpha": 1,
"network_dim": 4,
"network_dropout": 0,
"network_weights": "",
"noise_offset": 0,
"noise_offset_random_strength": false,
"noise_offset_type": "Original",
"num_cpu_threads_per_process": 2,
"num_machines": 1,
"num_processes": 1,
"optimizer": "Adafactor",
"optimizer_args": "",
"output_dir": "C:/AI/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/models/loras/flatee",
"output_name": "Flatee_0.4",
"persistent_data_loader_workers": true,
"pretrained_model_name_or_path": "C:/AI/kohya_ss/models/flux1-dev2pro.safetensors",
"prior_loss_weight": 1,
"random_crop": false,
"rank_dropout": 0,
"rank_dropout_scale": false,
"reg_data_dir": "",
"rescaled": false,
"resume": "",
"resume_from_huggingface": "",
"sample_every_n_epochs": 2,
"sample_every_n_steps": 0,
"sample_prompts": "The Smiths' family portrait captures a warm and inviting scene. The parents, dressed in formal clothing, stand at the center of the portrait. The father, tall and broad-shouldered, stands behind the mother, who is seated on a wooden chair with a gentle smile. She wears a classic dress, while their three children are gathered around her. The eldest child, a boy, stands proudly beside his father, while the younger girl and boy sit on either side of their mother, both wearing playful grins. The backdrop of the portrait is a rustic wooden wall, adding a touch of simplicity and charm to the scene. The soft, warm lighting casts gentle shadows, highlighting the details in their clothing and faces. The overall tones of the photo exude a cozy, family-centered feeling, with the whole family framed in a harmonious and affectionate pose, their smiles genuine and welcoming. --w 512 --h 512 --d 42\nA gathering of fairies takes place in the heart of a magical forest. The fairies, each with delicate, translucent wings that shimmer with soft light, are sitting or hovering in a circle around a glowing, enchanted tree stump. They are small in stature, with flowing dresses made of flower petals and leaves, in vibrant colors such as pink, purple, and green. Their hair is long and flowing, adorned with tiny flowers and vines. The magical forest around them is filled with towering trees covered in glowing moss and large, luminescent mushrooms growing at the base of the trunks. Floating, glowing particles drift through the air, adding to the enchanted atmosphere. The forest floor is covered with soft moss, flowers, and scattered leaves. Above the fairies, beams of moonlight filter through the thick canopy, creating a soft, ethereal glow over the entire scene. The fairies are engaged in quiet conversation, their faces calm and serene, as they meet in this peaceful, mystical environment. --w 512 --h 512 --d 42",
"sample_sampler": "euler_a",
"save_as_bool": false,
"save_every_n_epochs": 1,
"save_every_n_steps": 0,
"save_last_n_steps": 0,
"save_last_n_steps_state": 0,
"save_model_as": "safetensors",
"save_precision": "bf16",
"save_state": false,
"save_state_on_train_end": false,
"save_state_to_huggingface": false,
"scale_v_pred_loss_like_noise_pred": false,
"scale_weight_norms": 0,
"sdxl": false,
"sdxl_cache_text_encoder_outputs": false,
"sdxl_no_half_vae": false,
"seed": 42,
"shuffle_caption": false,
"single_dim": "",
"single_mod_dim": "",
"split_mode": false,
"split_qkv": false,
"stop_text_encoder_training": 0,
"t5xxl": "C:/AI/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/models/clip/t5/t5xxl_fp16.safetensors",
"t5xxl_lr": 0,
"t5xxl_max_token_length": 512,
"text_encoder_lr": 0.001,
"timestep_sampling": "sigma",
"train_batch_size": 1,
"train_blocks": "all",
"train_data_dir": "D:\AI\LoRa_Training\FLATEE\images",
"train_double_block_indices": "all",
"train_norm": false,
"train_on_input": true,
"train_single_block_indices": "all",
"train_t5xxl": false,
"training_comment": "",
"txt_attn_dim": "",
"txt_mlp_dim": "",
"txt_mod_dim": "",
"unet_lr": 0.01,
"unit": 1,
"up_lr_weight": "",
"use_cp": false,
"use_scalar": false,
"use_tucker": false,
"v2": false,
"v_parameterization": false,
"v_pred_like_loss": 0,
"vae": "",
"vae_batch_size": 0,
"wandb_api_key": "",
"wandb_run_name": "",
"weighted_captions": false,
"xformers": "sdpa"
}
Sorry, seems I found some good settings. Now everything is working as intended. I'll share here if someone is interested
{
"LoRA_type": "Flux1",
"LyCORIS_preset": "full",
"adaptive_noise_scale": 0,
"additional_parameters": "",
"ae": "C:/AI/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/models/vae/FLUX1/ae.safetensors",
"apply_t5_attn_mask": true,
"async_upload": false,
"block_alphas": "",
"block_dims": "",
"block_lr_zero_threshold": "",
"bucket_no_upscale": true,
"bucket_reso_steps": 64,
"bypass_mode": false,
"cache_latents": true,
"cache_latents_to_disk": true,
"caption_dropout_every_n_epochs": 0,
"caption_dropout_rate": 0,
"caption_extension": ".txt",
"clip_l": "C:/AI/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/models/clip/clip_l.safetensors",
"clip_skip": 1,
"color_aug": false,
"constrain": 0,
"conv_alpha": 1,
"conv_block_alphas": "",
"conv_block_dims": "",
"conv_dim": 1,
"cpu_offload_checkpointing": false,
"dataset_config": "",
"debiased_estimation_loss": false,
"decompose_both": false,
"dim_from_weights": false,
"discrete_flow_shift": 3,
"dora_wd": false,
"down_lr_weight": "",
"dynamo_backend": "no",
"dynamo_mode": "default",
"dynamo_use_dynamic": false,
"dynamo_use_fullgraph": false,
"enable_all_linear": false,
"enable_bucket": true,
"epoch": 10,
"extra_accelerate_launch_args": "",
"factor": -1,
"flip_aug": false,
"flux1_cache_text_encoder_outputs": true,
"flux1_cache_text_encoder_outputs_to_disk": true,
"flux1_checkbox": true,
"fp8_base": true,
"fp8_base_unet": false,
"full_bf16": true,
"full_fp16": false,
"gpu_ids": "",
"gradient_accumulation_steps": 1,
"gradient_checkpointing": true,
"guidance_scale": 1,
"highvram": true,
"huber_c": 0.1,
"huber_schedule": "snr",
"huggingface_path_in_repo": "",
"huggingface_repo_id": "",
"huggingface_repo_type": "",
"huggingface_repo_visibility": "",
"huggingface_token": "",
"img_attn_dim": "",
"img_mlp_dim": "",
"img_mod_dim": "",
"in_dims": "",
"ip_noise_gamma": 0,
"ip_noise_gamma_random_strength": false,
"keep_tokens": 0,
"learning_rate": 0.0005,
"log_config": false,
"log_tracker_config": "",
"log_tracker_name": "",
"log_with": "",
"logging_dir": "C:/AI/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/models/loras/prettycom/logs",
"loraplus_lr_ratio": 0,
"loraplus_text_encoder_lr_ratio": 0,
"loraplus_unet_lr_ratio": 0,
"loss_type": "l2",
"lowvram": false,
"lr_scheduler": "constant",
"lr_scheduler_args": "",
"lr_scheduler_num_cycles": 1,
"lr_scheduler_power": 1,
"lr_scheduler_type": "",
"lr_warmup": 0,
"lr_warmup_steps": 0,
"main_process_port": 0,
"masked_loss": false,
"max_bucket_reso": 2048,
"max_data_loader_n_workers": 0,
"max_grad_norm": 1,
"max_resolution": "512,512",
"max_timestep": 1000,
"max_token_length": 75,
"max_train_epochs": 0,
"max_train_steps": 0,
"mem_eff_attn": false,
"mem_eff_save": false,
"metadata_author": "",
"metadata_description": "",
"metadata_license": "",
"metadata_tags": "",
"metadata_title": "",
"mid_lr_weight": "",
"min_bucket_reso": 256,
"min_snr_gamma": 7,
"min_timestep": 0,
"mixed_precision": "bf16",
"model_list": "custom",
"model_prediction_type": "raw",
"module_dropout": 0,
"multi_gpu": false,
"multires_noise_discount": 0.3,
"multires_noise_iterations": 0,
"network_alpha": 16,
"network_dim": 4,
"network_dropout": 0,
"network_weights": "",
"noise_offset": 0.05,
"noise_offset_random_strength": false,
"noise_offset_type": "Original",
"num_cpu_threads_per_process": 2,
"num_machines": 1,
"num_processes": 1,
"optimizer": "AdamW8bit",
"optimizer_args": "",
"output_dir": "C:/AI/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/models/loras/prettycom",
"output_name": "prettycom_V1",
"persistent_data_loader_workers": false,
"pretrained_model_name_or_path": "C:/AI/kohya_ss/models/flux1-dev2pro.safetensors",
"prior_loss_weight": 1,
"random_crop": false,
"rank_dropout": 0,
"rank_dropout_scale": false,
"reg_data_dir": "",
"rescaled": false,
"resume": "",
"resume_from_huggingface": "",
"sample_every_n_epochs": 1,
"sample_every_n_steps": 0,
"sample_prompts": "Sample1. --w 512 --h 512 --d 42\nSample2 --w 512 --h 512 --d 42",
"sample_sampler": "euler",
"save_as_bool": false,
"save_every_n_epochs": 1,
"save_every_n_steps": 0,
"save_last_n_steps": 0,
"save_last_n_steps_state": 0,
"save_model_as": "safetensors",
"save_precision": "bf16",
"save_state": false,
"save_state_on_train_end": false,
"save_state_to_huggingface": false,
"scale_v_pred_loss_like_noise_pred": false,
"scale_weight_norms": 0,
"sdxl": false,
"sdxl_cache_text_encoder_outputs": true,
"sdxl_no_half_vae": true,
"seed": 42,
"shuffle_caption": false,
"single_dim": "",
"single_mod_dim": "",
"split_mode": false,
"split_qkv": false,
"stop_text_encoder_training": 0,
"t5xxl": "C:/AI/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/models/clip/t5/t5xxl_fp16.safetensors",
"t5xxl_lr": 0,
"t5xxl_max_token_length": 512,
"text_encoder_lr": 2e-05,
"timestep_sampling": "sigmoid",
"train_batch_size": 1,
"train_blocks": "all",
"train_data_dir": "D:/AI/LoRa_Training/PRETTYCOM/images/",
"train_double_block_indices": "all",
"train_norm": false,
"train_on_input": true,
"train_single_block_indices": "all",
"train_t5xxl": false,
"training_comment": "",
"txt_attn_dim": "",
"txt_mlp_dim": "",
"txt_mod_dim": "",
"unet_lr": 0.0005,
"unit": 1,
"up_lr_weight": "",
"use_cp": false,
"use_scalar": false,
"use_tucker": false,
"v2": false,
"v_parameterization": false,
"v_pred_like_loss": 0,
"vae": "",
"vae_batch_size": 0,
"wandb_api_key": "",
"wandb_run_name": "",
"weighted_captions": false,
"xformers": "sdpa"
}{
"LoRA_type": "Flux1",
"LyCORIS_preset": "full",
"adaptive_noise_scale": 0,
"additional_parameters": "",
"ae": "C:/AI/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/models/vae/FLUX1/ae.safetensors",
"apply_t5_attn_mask": true,
"async_upload": false,
"block_alphas": "",
"block_dims": "",
"block_lr_zero_threshold": "",
"bucket_no_upscale": true,
"bucket_reso_steps": 64,
"bypass_mode": false,
"cache_latents": true,
"cache_latents_to_disk": true,
"caption_dropout_every_n_epochs": 0,
"caption_dropout_rate": 0,
"caption_extension": ".txt",
"clip_l": "C:/AI/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/models/clip/clip_l.safetensors",
"clip_skip": 1,
"color_aug": false,
"constrain": 0,
"conv_alpha": 1,
"conv_block_alphas": "",
"conv_block_dims": "",
"conv_dim": 1,
"cpu_offload_checkpointing": false,
"dataset_config": "",
"debiased_estimation_loss": false,
"decompose_both": false,
"dim_from_weights": false,
"discrete_flow_shift": 3,
"dora_wd": false,
"down_lr_weight": "",
"dynamo_backend": "no",
"dynamo_mode": "default",
"dynamo_use_dynamic": false,
"dynamo_use_fullgraph": false,
"enable_all_linear": false,
"enable_bucket": true,
"epoch": 10,
"extra_accelerate_launch_args": "",
"factor": -1,
"flip_aug": false,
"flux1_cache_text_encoder_outputs": true,
"flux1_cache_text_encoder_outputs_to_disk": true,
"flux1_checkbox": true,
"fp8_base": true,
"fp8_base_unet": false,
"full_bf16": true,
"full_fp16": false,
"gpu_ids": "",
"gradient_accumulation_steps": 1,
"gradient_checkpointing": true,
"guidance_scale": 1,
"highvram": true,
"huber_c": 0.1,
"huber_schedule": "snr",
"huggingface_path_in_repo": "",
"huggingface_repo_id": "",
"huggingface_repo_type": "",
"huggingface_repo_visibility": "",
"huggingface_token": "",
"img_attn_dim": "",
"img_mlp_dim": "",
"img_mod_dim": "",
"in_dims": "",
"ip_noise_gamma": 0,
"ip_noise_gamma_random_strength": false,
"keep_tokens": 0,
"learning_rate": 0.0005,
"log_config": false,
"log_tracker_config": "",
"log_tracker_name": "",
"log_with": "",
"logging_dir": "C:/AI/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/models/loras/prettycom/logs",
"loraplus_lr_ratio": 0,
"loraplus_text_encoder_lr_ratio": 0,
"loraplus_unet_lr_ratio": 0,
"loss_type": "l2",
"lowvram": false,
"lr_scheduler": "constant",
"lr_scheduler_args": "",
"lr_scheduler_num_cycles": 1,
"lr_scheduler_power": 1,
"lr_scheduler_type": "",
"lr_warmup": 0,
"lr_warmup_steps": 0,
"main_process_port": 0,
"masked_loss": false,
"max_bucket_reso": 2048,
"max_data_loader_n_workers": 0,
"max_grad_norm": 1,
"max_resolution": "512,512",
"max_timestep": 1000,
"max_token_length": 75,
"max_train_epochs": 0,
"max_train_steps": 0,
"mem_eff_attn": false,
"mem_eff_save": false,
"metadata_author": "",
"metadata_description": "",
"metadata_license": "",
"metadata_tags": "",
"metadata_title": "",
"mid_lr_weight": "",
"min_bucket_reso": 256,
"min_snr_gamma": 7,
"min_timestep": 0,
"mixed_precision": "bf16",
"model_list": "custom",
"model_prediction_type": "raw",
"module_dropout": 0,
"multi_gpu": false,
"multires_noise_discount": 0.3,
"multires_noise_iterations": 0,
"network_alpha": 16,
"network_dim": 4,
"network_dropout": 0,
"network_weights": "",
"noise_offset": 0.05,
"noise_offset_random_strength": false,
"noise_offset_type": "Original",
"num_cpu_threads_per_process": 2,
"num_machines": 1,
"num_processes": 1,
"optimizer": "AdamW8bit",
"optimizer_args": "",
"output_dir": "C:/AI/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/models/loras/prettycom",
"output_name": "prettycom_V1",
"persistent_data_loader_workers": false,
"pretrained_model_name_or_path": "C:/AI/kohya_ss/models/flux1-dev2pro.safetensors",
"prior_loss_weight": 1,
"random_crop": false,
"rank_dropout": 0,
"rank_dropout_scale": false,
"reg_data_dir": "",
"rescaled": false,
"resume": "",
"resume_from_huggingface": "",
"sample_every_n_epochs": 1,
"sample_every_n_steps": 0,
"sample_prompts": "Sample1 --w 512 --h 512 --d 42\nSample2--w 512 --h 512 --d 42",
"sample_sampler": "euler",
"save_as_bool": false,
"save_every_n_epochs": 1,
"save_every_n_steps": 0,
"save_last_n_steps": 0,
"save_last_n_steps_state": 0,
"save_model_as": "safetensors",
"save_precision": "bf16",
"save_state": false,
"save_state_on_train_end": false,
"save_state_to_huggingface": false,
"scale_v_pred_loss_like_noise_pred": false,
"scale_weight_norms": 0,
"sdxl": false,
"sdxl_cache_text_encoder_outputs": true,
"sdxl_no_half_vae": true,
"seed": 42,
"shuffle_caption": false,
"single_dim": "",
"single_mod_dim": "",
"split_mode": false,
"split_qkv": false,
"stop_text_encoder_training": 0,
"t5xxl": "C:/AI/ComfyUI_windows_portable_nvidia/ComfyUI_windows_portable/ComfyUI/models/clip/t5/t5xxl_fp16.safetensors",
"t5xxl_lr": 0,
"t5xxl_max_token_length": 512,
"text_encoder_lr": 2e-05,
"timestep_sampling": "sigmoid",
"train_batch_size": 1,
"train_blocks": "all",
"train_data_dir": "D:/AI/LoRa_Training/PRETTYCOM/images/",
"train_double_block_indices": "all",
"train_norm": false,
"train_on_input": true,
"train_single_block_indices": "all",
"train_t5xxl": false,
"training_comment": "",
"txt_attn_dim": "",
"txt_mlp_dim": "",
"txt_mod_dim": "",
"unet_lr": 0.0005,
"unit": 1,
"up_lr_weight": "",
"use_cp": false,
"use_scalar": false,
"use_tucker": false,
"v2": false,
"v_parameterization": false,
"v_pred_like_loss": 0,
"vae": "",
"vae_batch_size": 0,
"wandb_api_key": "",
"wandb_run_name": "",
"weighted_captions": false,
"xformers": "sdpa"
}