Bordoglor commited on
Commit
47744e9
·
verified ·
1 Parent(s): f8bf084

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -1
app.py CHANGED
@@ -24,6 +24,7 @@ def infer(
24
  width,
25
  height,
26
  guidance_scale,
 
27
  num_inference_steps,
28
  progress=gr.Progress(track_tqdm=True),
29
  ):
@@ -42,8 +43,9 @@ def infer(
42
  if model == "Ramzes":
43
  pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch_dtype)
44
  pipe.unet = PeftModel.from_pretrained(pipe.unet, "Bordoglor/Ramzes_adapter_sd_v1.5", subfolder="unet")
45
- #pipe.load_lora_weights("Bordoglor/Ramzes_adapter_sd_v1.5", subfolder="unet")
46
  pipe.text_encoder = PeftModel.from_pretrained(pipe.text_encoder, "Bordoglor/Ramzes_adapter_sd_v1.5", subfolder="text_encoder")
 
 
47
  else:
48
  pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch_dtype)
49
  pipe = pipe.to(device)
@@ -138,6 +140,14 @@ with gr.Blocks(css=css) as demo:
138
  value=7.0, # Replace with defaults that work for your model
139
  )
140
 
 
 
 
 
 
 
 
 
141
  num_inference_steps = gr.Slider(
142
  label="Number of inference steps",
143
  minimum=1,
@@ -158,6 +168,7 @@ with gr.Blocks(css=css) as demo:
158
  width,
159
  height,
160
  guidance_scale,
 
161
  num_inference_steps,
162
  ],
163
  outputs=[result, seed],
 
24
  width,
25
  height,
26
  guidance_scale,
27
+ lora_scale,
28
  num_inference_steps,
29
  progress=gr.Progress(track_tqdm=True),
30
  ):
 
43
  if model == "Ramzes":
44
  pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch_dtype)
45
  pipe.unet = PeftModel.from_pretrained(pipe.unet, "Bordoglor/Ramzes_adapter_sd_v1.5", subfolder="unet")
 
46
  pipe.text_encoder = PeftModel.from_pretrained(pipe.text_encoder, "Bordoglor/Ramzes_adapter_sd_v1.5", subfolder="text_encoder")
47
+ pipe.unet.load_state_dict({k: lora_scale*v for k, v in pipe.unet.state_dict().items()})
48
+ pipe.text_encoder.load_state_dict({k: lora_scale*v for k, v in pipe.text_encoder.state_dict().items()})
49
  else:
50
  pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch_dtype)
51
  pipe = pipe.to(device)
 
140
  value=7.0, # Replace with defaults that work for your model
141
  )
142
 
143
+ lora_scale = gr.Slider(
144
+ label="LoRA scale",
145
+ minimum=0.0,
146
+ maximum=1.0,
147
+ step=0.05,
148
+ value=0.9
149
+ )
150
+
151
  num_inference_steps = gr.Slider(
152
  label="Number of inference steps",
153
  minimum=1,
 
168
  width,
169
  height,
170
  guidance_scale,
171
+ lora_scale,
172
  num_inference_steps,
173
  ],
174
  outputs=[result, seed],