GiorgioV commited on
Commit
7e8382c
·
verified ·
1 Parent(s): 4aeea8c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -37
app.py CHANGED
@@ -60,43 +60,9 @@ pipe.load_lora_weights(
60
  weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
61
  adapter_name="lightx2v_2", **kwargs_lora
62
  )
63
-
64
- # Загрузка LoRA High
65
- pipe.load_lora_weights(
66
- "GiorgioV/LoRA_for_WAN_22",
67
- weight_name="W22_LoRA_High.safetensors",
68
- adapter_name="lora_high"
69
- )
70
- kwargs_lora_high = {"load_into_transformer_2": True}
71
- pipe.load_lora_weights(
72
- "GiorgioV/LoRA_for_WAN_22",
73
- weight_name="W22_LoRA_High.safetensors",
74
- adapter_name="lora_high_2",
75
- **kwargs_lora_high
76
- )
77
-
78
- # Загрузка LoRA Low
79
- pipe.load_lora_weights(
80
- "GiorgioV/LoRA_for_WAN_22",
81
- weight_name="W22_LoRA_Low.safetensors",
82
- adapter_name="lora_low"
83
- )
84
- kwargs_lora_low = {"load_into_transformer_2": True}
85
- pipe.load_lora_weights(
86
- "GiorgioV/LoRA_for_WAN_22",
87
- weight_name="W22_LoRA_Low.safetensors",
88
- adapter_name="lora_low_2",
89
- **kwargs_lora_low
90
- )
91
-
92
- pipe.set_adapters(["lightx2v", "lightx2v_2", "lora_high", "lora_high_2", "lora_low", "lora_low_2"], adapter_weights=[1., 1., 0.1, 0.1, 0.1, 0.1])
93
- pipe.fuse_lora(adapter_names=["lightx2v"], lora_scale=2.0, components=["transformer"])
94
- pipe.fuse_lora(adapter_names=["lora_high"], lora_scale=1.0, components=["transformer"])
95
- pipe.fuse_lora(adapter_names=["lora_low"], lora_scale=1.0, components=["transformer"])
96
-
97
- pipe.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1.0, components=["transformer_2"])
98
- pipe.fuse_lora(adapter_names=["lora_high_2"], lora_scale=0.7, components=["transformer_2"])
99
- pipe.fuse_lora(adapter_names=["lora_low_2"], lora_scale=0.7, components=["transformer_2"])
100
  pipe.unload_lora_weights()
101
 
102
  quantize_(pipe.text_encoder, Int8WeightOnlyConfig())
 
60
  weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
61
  adapter_name="lightx2v_2", **kwargs_lora
62
  )
63
+ pipe.set_adapters(["lightx2v", "lightx2v_2"], adapter_weights=[1., 1.])
64
+ pipe.fuse_lora(adapter_names=["lightx2v"], lora_scale=3., components=["transformer"])
65
+ pipe.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1., components=["transformer_2"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  pipe.unload_lora_weights()
67
 
68
  quantize_(pipe.text_encoder, Int8WeightOnlyConfig())