-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmulti_ref_finetuning.sh
More file actions
32 lines (32 loc) · 1.19 KB
/
multi_ref_finetuning.sh
File metadata and controls
32 lines (32 loc) · 1.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
accelerate launch --use_deepspeed --num_processes 32 \
--num_machines ${WORLD_SIZE} \
--machine_rank ${RANK} \
--main_process_ip ${MASTER_ADDR} \
--main_process_port ${MASTER_PORT} \
--mixed_precision "bf16" \
--deepspeed_multinode_launcher standard \
--zero_stage 2 --offload_param_device none --offload_optimizer_device none --gradient_accumulation_steps 4 --zero3_init_flag false \
finetune_sdxl.py \
--pretrained_model_name_or_path="stabilityai/stable-diffusion-xl-base-1.0" \
--pretrained_easy_ref_path="{outputs/single_ref_finetuning/checkpoint-250000/pytorch_model/mp_rank_00_model_states.pt}" \
--image_encoder_path="Qwen/Qwen2-VL-2B-Instruct" \
--data_json_file="{data.json}" \
--data_root_path="{image_path}" \
--mixed_precision="bf16" \
--resolution=1024 \
--train_batch_size=1 \
--dataloader_num_workers=2 \
--learning_rate=2e-05 \
--weight_decay=0.0 \
--lr_warmup_steps=0 \
--t_drop_rate=0.05 \
--truncate_rate=0.4 \
--num_train_epochs=100 \
--gradient_accumulation_steps=4 \
--num_reference_tokens=64 \
--multi_ref_finetuning \
--unfreeze_mllm \
--use_lora \
--lora_rank=128 \
--output_dir="{outputs/multi_ref_finetuning}" \
--save_steps=20000