Spaces:
Running
on
A10G
Running
on
A10G
cache examples
Browse files
app.py
CHANGED
|
@@ -81,12 +81,6 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 81 |
if device == 'cuda':
|
| 82 |
pipe_inference.to('cpu')
|
| 83 |
torch.cuda.empty_cache()
|
| 84 |
-
# pipe_inversion = pipe_inversion.to(device)
|
| 85 |
-
# if image_editor is not None:
|
| 86 |
-
# image_editor = image_editor.to('cpu')
|
| 87 |
-
|
| 88 |
-
print(f"#### 1 #### pipe_inversion.device: {pipe_inversion.device}")
|
| 89 |
-
print(f"#### 2 #### pipe_inference.device: {pipe_inference.device}")
|
| 90 |
|
| 91 |
inversion_state = ImageEditorDemo.invert(pipe_inversion.to(device), input_image, description_prompt, config,
|
| 92 |
[rnri_iterations, rnri_alpha, rnri_lr], device)
|
|
@@ -94,9 +88,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 94 |
pipe_inversion.to('cpu')
|
| 95 |
torch.cuda.empty_cache()
|
| 96 |
pipe_inference.to(device)
|
| 97 |
-
|
| 98 |
-
print(f"#### 3 #### pipe_inversion.device: {pipe_inversion.device}")
|
| 99 |
-
print(f"#### 4 #### pipe_inference.device: {pipe_inference.device}")
|
| 100 |
gr.Info('Input has set!')
|
| 101 |
return inversion_state, "Input has set!"
|
| 102 |
|
|
@@ -105,14 +97,9 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 105 |
if inversion_state is None:
|
| 106 |
raise gr.Error("Set inputs before editing. Progress indication below")
|
| 107 |
|
| 108 |
-
print(f"#### 5 #### pipe_inversion.device: {pipe_inversion.device}")
|
| 109 |
-
print(f"#### 6 #### pipe_inference.device: {pipe_inference.device}")
|
| 110 |
-
|
| 111 |
image = ImageEditorDemo.edit(pipe_inference, target_prompt, inversion_state['latent'], inversion_state['noise'],
|
| 112 |
inversion_state['cfg'], inversion_state['cfg'].edit_guidance_scale)
|
| 113 |
|
| 114 |
-
print(f"#### 7 #### pipe_inversion.device: {pipe_inversion.device}")
|
| 115 |
-
print(f"#### 8 #### pipe_inference.device: {pipe_inference.device}")
|
| 116 |
return image
|
| 117 |
|
| 118 |
|
|
@@ -202,6 +189,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 202 |
examples='examples',
|
| 203 |
inputs=[input_image, description_prompt, target_prompt, edit_guidance_scale, num_inference_steps,
|
| 204 |
inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
|
|
|
| 205 |
)
|
| 206 |
|
| 207 |
gr.Markdown(f"""Disclaimer: Performance may be inferior to the reported in the paper due to hardware limitation.
|
|
|
|
| 81 |
if device == 'cuda':
|
| 82 |
pipe_inference.to('cpu')
|
| 83 |
torch.cuda.empty_cache()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
|
| 85 |
inversion_state = ImageEditorDemo.invert(pipe_inversion.to(device), input_image, description_prompt, config,
|
| 86 |
[rnri_iterations, rnri_alpha, rnri_lr], device)
|
|
|
|
| 88 |
pipe_inversion.to('cpu')
|
| 89 |
torch.cuda.empty_cache()
|
| 90 |
pipe_inference.to(device)
|
| 91 |
+
|
|
|
|
|
|
|
| 92 |
gr.Info('Input has set!')
|
| 93 |
return inversion_state, "Input has set!"
|
| 94 |
|
|
|
|
| 97 |
if inversion_state is None:
|
| 98 |
raise gr.Error("Set inputs before editing. Progress indication below")
|
| 99 |
|
|
|
|
|
|
|
|
|
|
| 100 |
image = ImageEditorDemo.edit(pipe_inference, target_prompt, inversion_state['latent'], inversion_state['noise'],
|
| 101 |
inversion_state['cfg'], inversion_state['cfg'].edit_guidance_scale)
|
| 102 |
|
|
|
|
|
|
|
| 103 |
return image
|
| 104 |
|
| 105 |
|
|
|
|
| 189 |
examples='examples',
|
| 190 |
inputs=[input_image, description_prompt, target_prompt, edit_guidance_scale, num_inference_steps,
|
| 191 |
inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
| 192 |
+
cache_examples=True,
|
| 193 |
)
|
| 194 |
|
| 195 |
gr.Markdown(f"""Disclaimer: Performance may be inferior to the reported in the paper due to hardware limitation.
|