Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -42,6 +42,9 @@ footer {
|
|
| 42 |
repo_nsfw_classifier = pipeline("image-classification", model="Falconsai/nsfw_image_detection")
|
| 43 |
|
| 44 |
repo_default = DiffusionPipeline.from_pretrained("fluently/Fluently-XL-Final", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
# repo_large = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16, add_watermarker=False, revision="refs/pr/1")
|
| 47 |
# repo_large.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="base")
|
|
@@ -51,7 +54,7 @@ repo_customs = {
|
|
| 51 |
"Default": repo_default,
|
| 52 |
# "Realistic": DiffusionPipeline.from_pretrained("ehristoforu/Visionix-alpha", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
|
| 53 |
# "Anime": DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.1", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
|
| 54 |
-
|
| 55 |
# "Large": repo_neo,
|
| 56 |
}
|
| 57 |
|
|
@@ -71,7 +74,7 @@ def get_seed(seed):
|
|
| 71 |
@spaces.GPU(duration=60)
|
| 72 |
def generate(input=DEFAULT_INPUT, filter_input="", negative_input=DEFAULT_NEGATIVE_INPUT, model=DEFAULT_MODEL, height=DEFAULT_HEIGHT, width=DEFAULT_WIDTH, steps=1, guidance=0, number=1, seed=None):
|
| 73 |
|
| 74 |
-
repo = repo_customs[model or "Default"]
|
| 75 |
filter_input = filter_input or ""
|
| 76 |
negative_input = negative_input or DEFAULT_NEGATIVE_INPUT
|
| 77 |
steps_set = steps
|
|
@@ -89,22 +92,14 @@ def generate(input=DEFAULT_INPUT, filter_input="", negative_input=DEFAULT_NEGATI
|
|
| 89 |
elif model == "Pixel":
|
| 90 |
steps_set = 15
|
| 91 |
guidance_set = 1.5
|
| 92 |
-
|
| 93 |
-
repo.load_lora_weights("artificialguybr/PixelArtRedmond", adapter_name="base")
|
| 94 |
-
repo.load_lora_weights("nerijs/pixel-art-xl", adapter_name="base2")
|
| 95 |
-
repo.set_adapters(["base", "base2"], adapter_weights=[1, 1])
|
| 96 |
elif model == "Large":
|
| 97 |
steps_set = 25
|
| 98 |
guidance_set = 3.5
|
| 99 |
else:
|
| 100 |
steps_set = 25
|
| 101 |
guidance_set = 7
|
| 102 |
-
|
| 103 |
-
repo.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="base")
|
| 104 |
-
print(2)
|
| 105 |
-
repo.set_adapters(["base"], adapter_weights=[0.7])
|
| 106 |
-
print(3)
|
| 107 |
-
repo.to(DEVICE)
|
| 108 |
|
| 109 |
if not steps:
|
| 110 |
steps = steps_set
|
|
|
|
| 42 |
repo_nsfw_classifier = pipeline("image-classification", model="Falconsai/nsfw_image_detection")
|
| 43 |
|
| 44 |
repo_default = DiffusionPipeline.from_pretrained("fluently/Fluently-XL-Final", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
|
| 45 |
+
repo.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="default_base")
|
| 46 |
+
repo.load_lora_weights("artificialguybr/PixelArtRedmond", adapter_name="pixel_base")
|
| 47 |
+
repo.load_lora_weights("nerijs/pixel-art-xl", adapter_name="pixel_base_2")
|
| 48 |
|
| 49 |
# repo_large = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16, add_watermarker=False, revision="refs/pr/1")
|
| 50 |
# repo_large.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="base")
|
|
|
|
| 54 |
"Default": repo_default,
|
| 55 |
# "Realistic": DiffusionPipeline.from_pretrained("ehristoforu/Visionix-alpha", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
|
| 56 |
# "Anime": DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.1", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
|
| 57 |
+
"Pixel": repo_default,
|
| 58 |
# "Large": repo_neo,
|
| 59 |
}
|
| 60 |
|
|
|
|
| 74 |
@spaces.GPU(duration=60)
|
| 75 |
def generate(input=DEFAULT_INPUT, filter_input="", negative_input=DEFAULT_NEGATIVE_INPUT, model=DEFAULT_MODEL, height=DEFAULT_HEIGHT, width=DEFAULT_WIDTH, steps=1, guidance=0, number=1, seed=None):
|
| 76 |
|
| 77 |
+
repo = repo_customs[model or "Default"].to(DEVICE)
|
| 78 |
filter_input = filter_input or ""
|
| 79 |
negative_input = negative_input or DEFAULT_NEGATIVE_INPUT
|
| 80 |
steps_set = steps
|
|
|
|
| 92 |
elif model == "Pixel":
|
| 93 |
steps_set = 15
|
| 94 |
guidance_set = 1.5
|
| 95 |
+
repo.set_adapters(["pixel_base", "pixel_base_2"], adapter_weights=[1, 1])
|
|
|
|
|
|
|
|
|
|
| 96 |
elif model == "Large":
|
| 97 |
steps_set = 25
|
| 98 |
guidance_set = 3.5
|
| 99 |
else:
|
| 100 |
steps_set = 25
|
| 101 |
guidance_set = 7
|
| 102 |
+
repo.set_adapters(["default_base"], adapter_weights=[0.7])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
|
| 104 |
if not steps:
|
| 105 |
steps = steps_set
|