rahul7star commited on
Commit
5925ea4
·
verified ·
1 Parent(s): cb796fa

Update app_exp.py

Browse files
Files changed (1) hide show
  1. app_exp.py +179 -117
app_exp.py CHANGED
@@ -1,37 +1,50 @@
 
 
1
  import os
2
  import sys
3
  import subprocess
4
  import tempfile
5
  import numpy as np
 
 
6
  from PIL import Image
 
7
 
8
- import torch
9
- import gradio as gr
10
- from torchvision.io import write_video
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- # Define paths
 
 
13
  REPO_PATH = "LongCat-Video"
14
  CHECKPOINT_DIR = os.path.join(REPO_PATH, "weights", "LongCat-Video")
15
 
16
- # Clone the repository if it doesn't exist
17
  if not os.path.exists(REPO_PATH):
18
  print(f"Cloning LongCat-Video repository to '{REPO_PATH}'...")
19
- try:
20
- subprocess.run(
21
- ["git", "clone", "https://github.com/meituan-longcat/LongCat-Video.git", REPO_PATH],
22
- check=True,
23
- capture_output=True
24
- )
25
- print("Repository cloned successfully.")
26
- except subprocess.CalledProcessError as e:
27
- print(f"Error cloning repository: {e.stderr.decode()}")
28
- sys.exit(1)
29
-
30
- # Add the cloned repository to the Python path to allow imports
31
  sys.path.insert(0, os.path.abspath(REPO_PATH))
32
 
33
- # Now that the repo is in the path, we can import its modules
34
- from huggingface_hub import snapshot_download
35
  from longcat_video.pipeline_longcat_video import LongCatVideoPipeline
36
  from longcat_video.modules.scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler
37
  from longcat_video.modules.autoencoder_kl_wan import AutoencoderKLWan
@@ -40,136 +53,146 @@ from longcat_video.context_parallel import context_parallel_util
40
  from transformers import AutoTokenizer, UMT5EncoderModel
41
  from diffusers.utils import export_to_video
42
 
43
- # Download model weights from Hugging Face Hub if they don't exist
44
  if not os.path.exists(CHECKPOINT_DIR):
45
  print(f"Downloading model weights to '{CHECKPOINT_DIR}'...")
46
- try:
47
- snapshot_download(
48
- repo_id="meituan-longcat/LongCat-Video",
49
- local_dir=CHECKPOINT_DIR,
50
- local_dir_use_symlinks=False, # Use False for better Windows compatibility
51
- ignore_patterns=["*.md", "*.gitattributes", "assets/*"] # ignore non-essential files
52
- )
53
- print("Model weights downloaded successfully.")
54
- except Exception as e:
55
- print(f"Error downloading model weights: {e}")
56
- sys.exit(1)
57
-
58
- # Global placeholder for the pipeline and device configuration
59
- pipe = None
60
  device = "cuda" if torch.cuda.is_available() else "cpu"
61
  torch_dtype = torch.bfloat16 if device == "cuda" else torch.float32
62
 
63
- # --- Initialize models ---
64
- cp_split_hw = context_parallel_util.get_optimal_split(1)
65
- tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT_DIR, subfolder="tokenizer", torch_dtype=torch_dtype)
66
- text_encoder = UMT5EncoderModel.from_pretrained(CHECKPOINT_DIR, subfolder="text_encoder", torch_dtype=torch_dtype)
67
- vae = AutoencoderKLWan.from_pretrained(CHECKPOINT_DIR, subfolder="vae", torch_dtype=torch_dtype)
68
- scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(CHECKPOINT_DIR, subfolder="scheduler", torch_dtype=torch_dtype)
69
-
70
- dit = LongCatVideoTransformer3DModel.from_pretrained(CHECKPOINT_DIR,
71
- enable_flashattn3=True,
72
- enable_flashattn2=False,
73
- enable_xformers=True,
74
- subfolder="dit",
75
- cp_split_hw=cp_split_hw,
76
- torch_dtype=torch_dtype)
77
-
78
- pipe = LongCatVideoPipeline(
79
- tokenizer=tokenizer,
80
- text_encoder=text_encoder,
81
- vae=vae,
82
- scheduler=scheduler,
83
- dit=dit,
84
- )
85
- pipe.to(device)
86
-
87
- # --- Load LoRAs ---
88
- cfg_lora = os.path.join(CHECKPOINT_DIR, 'lora/cfg_step_lora.safetensors')
89
- refine_lora = os.path.join(CHECKPOINT_DIR, 'lora/refinement_lora.safetensors')
90
- pipe.dit.load_lora(cfg_lora, 'cfg_step_lora')
91
- pipe.dit.load_lora(refine_lora, 'refinement_lora')
92
-
93
- # --- Enable Cache-DiT for DiT transformer ---
94
- cache_dit.enable_cache(pipe.dit)
95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  def torch_gc():
97
  if torch.cuda.is_available():
98
  torch.cuda.empty_cache()
99
  torch.cuda.ipc_collect()
100
 
101
- # --- Video generation function ---
102
  def generate_video(
103
  mode,
104
  prompt,
105
  neg_prompt,
106
  image,
107
- num_frames,
108
  seed,
109
  use_distill,
110
  use_refine,
 
 
111
  ):
112
  if pipe is None:
113
- raise gr.Error("Models not loaded.")
114
 
 
 
 
115
  generator = torch.Generator(device=device).manual_seed(int(seed))
116
  is_distill = use_distill or use_refine
117
 
118
- if is_distill:
119
- pipe.dit.enable_loras(['cfg_step_lora'])
120
- num_inference_steps = 16
121
- guidance_scale = 1.0
122
- current_neg_prompt = ""
123
- else:
124
- num_inference_steps = 50
125
- guidance_scale = 4.0
126
- current_neg_prompt = neg_prompt
127
 
128
  if mode == "t2v":
129
  output = pipe.generate_t2v(
130
  prompt=prompt,
131
- negative_prompt=current_neg_prompt,
132
- height=480,
133
- width=832,
134
  num_frames=num_frames,
135
  num_inference_steps=num_inference_steps,
 
136
  guidance_scale=guidance_scale,
137
- generator=generator,
138
  )[0]
139
- else: # i2v
140
- pil_image = Image.fromarray(image)
141
  output = pipe.generate_i2v(
142
- image=pil_image,
143
  prompt=prompt,
144
- negative_prompt=current_neg_prompt,
145
- resolution="480p",
146
  num_frames=num_frames,
147
  num_inference_steps=num_inference_steps,
 
148
  guidance_scale=guidance_scale,
149
- generator=generator,
150
  )[0]
151
 
152
- if is_distill:
153
- pipe.dit.disable_all_loras()
154
-
155
  torch_gc()
156
 
157
- # Optional refinement
158
  if use_refine:
 
159
  pipe.dit.enable_loras(['refinement_lora'])
160
  pipe.dit.enable_bsa()
161
 
162
  stage1_video_pil = [(frame * 255).astype(np.uint8) for frame in output]
163
  stage1_video_pil = [Image.fromarray(img) for img in stage1_video_pil]
164
-
165
  refine_image = Image.fromarray(image) if mode == 'i2v' else None
 
166
  output = pipe.generate_refine(
167
  image=refine_image,
168
  prompt=prompt,
169
  stage1_video=stage1_video_pil,
170
- num_cond_frames=1 if mode == 'i2v' else 0,
171
  num_inference_steps=50,
172
- generator=generator,
173
  )[0]
174
 
175
  pipe.dit.disable_all_loras()
@@ -177,35 +200,74 @@ def generate_video(
177
  torch_gc()
178
 
179
  # Export video
 
180
  with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as temp_video_file:
181
- export_to_video(output, temp_video_file.name, fps=15)
182
  return temp_video_file.name
183
 
184
- # --- Gradio UI ---
 
 
185
  css = ".fillable{max-width: 960px !important}"
 
186
  with gr.Blocks(css=css) as demo:
187
- gr.Markdown("# 🎬 LongCat-Video Optimized")
188
- with gr.Row():
189
- with gr.Column(scale=2):
190
- prompt_input = gr.Textbox(label="Prompt", lines=4)
191
- neg_prompt_input = gr.Textbox(label="Negative Prompt", lines=2, value="ugly, blurry, low quality")
192
- seed_input = gr.Number(label="Seed", value=42, precision=0)
193
- frames_slider = gr.Slider(16, 128, value=48, step=1, label="Number of Frames")
194
- distill_checkbox = gr.Checkbox(label="Use Distill Mode", value=True)
195
- refine_checkbox = gr.Checkbox(label="Use Refine Mode", value=False)
196
- t2v_button = gr.Button("Generate Video")
197
- with gr.Column(scale=3):
198
- video_output = gr.Video(label="Generated Video", interactive=False)
 
 
 
 
 
 
 
 
199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  t2v_button.click(
201
- fn=generate_video,
202
- inputs=[
203
- gr.State("t2v"), prompt_input, neg_prompt_input,
204
- gr.State(None), frames_slider, seed_input,
205
- distill_checkbox, refine_checkbox
206
- ],
207
- outputs=video_output
 
 
 
 
 
 
208
  )
209
 
 
 
 
210
  if __name__ == "__main__":
211
- demo.launch()
 
1
+ import gradio as gr
2
+ import torch
3
  import os
4
  import sys
5
  import subprocess
6
  import tempfile
7
  import numpy as np
8
+ import site
9
+ import importlib
10
  from PIL import Image
11
+ from huggingface_hub import snapshot_download, hf_hub_download
12
 
13
+ # ============================================================
14
+ # 0️⃣ FlashAttention 3 Setup
15
+ # ============================================================
16
+ try:
17
+ print("Attempting to download and install FlashAttention wheel...")
18
+ flash_attention_wheel = hf_hub_download(
19
+ repo_id="rahul7star/flash-attn-3",
20
+ repo_type="model",
21
+ filename="128/flash_attn_3-3.0.0b1-cp39-abi3-linux_x86_64.whl",
22
+ )
23
+ subprocess.run(["pip", "install", flash_attention_wheel], check=True)
24
+ site.addsitedir(site.getsitepackages()[0])
25
+ importlib.invalidate_caches()
26
+ print("✅ FlashAttention installed successfully.")
27
+ enable_fa3 = True
28
+ except Exception as e:
29
+ print(f"⚠️ Could not install FlashAttention: {e}")
30
+ print("Continuing without FlashAttention...")
31
+ enable_fa3 = False
32
 
33
+ # ============================================================
34
+ # 1️⃣ Repository Setup
35
+ # ============================================================
36
  REPO_PATH = "LongCat-Video"
37
  CHECKPOINT_DIR = os.path.join(REPO_PATH, "weights", "LongCat-Video")
38
 
 
39
  if not os.path.exists(REPO_PATH):
40
  print(f"Cloning LongCat-Video repository to '{REPO_PATH}'...")
41
+ subprocess.run(
42
+ ["git", "clone", "https://github.com/meituan-longcat/LongCat-Video.git", REPO_PATH],
43
+ check=True
44
+ )
45
+
 
 
 
 
 
 
 
46
  sys.path.insert(0, os.path.abspath(REPO_PATH))
47
 
 
 
48
  from longcat_video.pipeline_longcat_video import LongCatVideoPipeline
49
  from longcat_video.modules.scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler
50
  from longcat_video.modules.autoencoder_kl_wan import AutoencoderKLWan
 
53
  from transformers import AutoTokenizer, UMT5EncoderModel
54
  from diffusers.utils import export_to_video
55
 
56
+ # Download weights if not present
57
  if not os.path.exists(CHECKPOINT_DIR):
58
  print(f"Downloading model weights to '{CHECKPOINT_DIR}'...")
59
+ snapshot_download(
60
+ repo_id="meituan-longcat/LongCat-Video",
61
+ local_dir=CHECKPOINT_DIR,
62
+ local_dir_use_symlinks=False,
63
+ ignore_patterns=["*.md", "*.gitattributes", "assets/*"]
64
+ )
65
+
66
+ # ============================================================
67
+ # 2️⃣ Device & Models
68
+ # ============================================================
 
 
 
 
69
  device = "cuda" if torch.cuda.is_available() else "cpu"
70
  torch_dtype = torch.bfloat16 if device == "cuda" else torch.float32
71
 
72
+ print(f"Device: {device}, dtype: {torch_dtype}")
73
+
74
+ pipe = None
75
+ try:
76
+ cp_split_hw = context_parallel_util.get_optimal_split(1)
77
+
78
+ tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT_DIR, subfolder="tokenizer", torch_dtype=torch_dtype)
79
+ text_encoder = UMT5EncoderModel.from_pretrained(CHECKPOINT_DIR, subfolder="text_encoder", torch_dtype=torch_dtype)
80
+
81
+ vae = AutoencoderKLWan.from_pretrained(CHECKPOINT_DIR, subfolder="vae", torch_dtype=torch_dtype)
82
+ scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(CHECKPOINT_DIR, subfolder="scheduler", torch_dtype=torch_dtype)
83
+
84
+ dit = LongCatVideoTransformer3DModel.from_pretrained(
85
+ CHECKPOINT_DIR,
86
+ enable_flashattn3=enable_fa3,
87
+ enable_flashattn2=False,
88
+ enable_xformers=True,
89
+ subfolder="dit",
90
+ cp_split_hw=cp_split_hw,
91
+ torch_dtype=torch_dtype
92
+ )
93
+
94
+ pipe = LongCatVideoPipeline(
95
+ tokenizer=tokenizer,
96
+ text_encoder=text_encoder,
97
+ vae=vae,
98
+ scheduler=scheduler,
99
+ dit=dit,
100
+ )
101
+ pipe.to(device)
 
 
102
 
103
+ # Load LoRA weights
104
+ pipe.dit.load_lora(os.path.join(CHECKPOINT_DIR, 'lora/cfg_step_lora.safetensors'), 'cfg_step_lora')
105
+ pipe.dit.load_lora(os.path.join(CHECKPOINT_DIR, 'lora/refinement_lora.safetensors'), 'refinement_lora')
106
+
107
+ print("✅ Models loaded successfully")
108
+
109
+ except Exception as e:
110
+ print(f"❌ Failed to load models: {e}")
111
+ pipe = None
112
+
113
+ # ============================================================
114
+ # 3️⃣ Generation Helpers
115
+ # ============================================================
116
  def torch_gc():
117
  if torch.cuda.is_available():
118
  torch.cuda.empty_cache()
119
  torch.cuda.ipc_collect()
120
 
 
121
  def generate_video(
122
  mode,
123
  prompt,
124
  neg_prompt,
125
  image,
126
+ height, width, resolution,
127
  seed,
128
  use_distill,
129
  use_refine,
130
+ duration_sec,
131
+ progress=gr.Progress(track_tqdm=True)
132
  ):
133
  if pipe is None:
134
+ raise gr.Error("Models failed to load")
135
 
136
+ # Adaptive FPS for faster testing
137
+ fps = 15 if use_distill else 30
138
+ num_frames = int(duration_sec * fps)
139
  generator = torch.Generator(device=device).manual_seed(int(seed))
140
  is_distill = use_distill or use_refine
141
 
142
+ # Stage 1
143
+ progress(0.2, desc="Stage 1: Base Video Generation")
144
+ pipe.dit.enable_loras(['cfg_step_lora'] if is_distill else [])
145
+
146
+ num_inference_steps = 12 if is_distill else 24
147
+ guidance_scale = 2.0 if is_distill else 4.0
148
+ curr_neg_prompt = "" if is_distill else neg_prompt
 
 
149
 
150
  if mode == "t2v":
151
  output = pipe.generate_t2v(
152
  prompt=prompt,
153
+ negative_prompt=curr_neg_prompt,
154
+ height=height,
155
+ width=width,
156
  num_frames=num_frames,
157
  num_inference_steps=num_inference_steps,
158
+ use_distill=is_distill,
159
  guidance_scale=guidance_scale,
160
+ generator=generator
161
  )[0]
162
+ else:
163
+ pil_img = Image.fromarray(image)
164
  output = pipe.generate_i2v(
165
+ image=pil_img,
166
  prompt=prompt,
167
+ negative_prompt=curr_neg_prompt,
168
+ resolution=resolution,
169
  num_frames=num_frames,
170
  num_inference_steps=num_inference_steps,
171
+ use_distill=is_distill,
172
  guidance_scale=guidance_scale,
173
+ generator=generator
174
  )[0]
175
 
176
+ pipe.dit.disable_all_loras()
 
 
177
  torch_gc()
178
 
179
+ # Stage 2: Optional refinement
180
  if use_refine:
181
+ progress(0.5, desc="Stage 2: Refinement")
182
  pipe.dit.enable_loras(['refinement_lora'])
183
  pipe.dit.enable_bsa()
184
 
185
  stage1_video_pil = [(frame * 255).astype(np.uint8) for frame in output]
186
  stage1_video_pil = [Image.fromarray(img) for img in stage1_video_pil]
 
187
  refine_image = Image.fromarray(image) if mode == 'i2v' else None
188
+
189
  output = pipe.generate_refine(
190
  image=refine_image,
191
  prompt=prompt,
192
  stage1_video=stage1_video_pil,
193
+ num_cond_frames=1 if mode=='i2v' else 0,
194
  num_inference_steps=50,
195
+ generator=generator
196
  )[0]
197
 
198
  pipe.dit.disable_all_loras()
 
200
  torch_gc()
201
 
202
  # Export video
203
+ progress(1.0, desc="Exporting video")
204
  with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as temp_video_file:
205
+ export_to_video(output, temp_video_file.name, fps=fps)
206
  return temp_video_file.name
207
 
208
+ # ============================================================
209
+ # 4️⃣ Gradio UI
210
+ # ============================================================
211
  css = ".fillable{max-width: 960px !important}"
212
+
213
  with gr.Blocks(css=css) as demo:
214
+ gr.Markdown("# 🎬 LongCat-Video")
215
+ gr.Markdown("13.6B parameter dense video-generation model by Meituan — [[Model](https://huggingface.co/meituan-longcat/LongCat-Video)]")
216
+
217
+ with gr.Tabs() as tabs:
218
+ with gr.TabItem("Text-to-Video"):
219
+ mode_t2v = gr.State("t2v")
220
+ with gr.Row():
221
+ with gr.Column(scale=2):
222
+ prompt_t2v = gr.Textbox(label="Prompt", lines=4)
223
+ neg_prompt_t2v = gr.Textbox(label="Negative Prompt", lines=2, value="blurry, low quality")
224
+ height_t2v = gr.Slider(256, 1024, step=64, value=480, label="Height")
225
+ width_t2v = gr.Slider(256, 1024, step=64, value=832, label="Width")
226
+ seed_t2v = gr.Number(value=42, label="Seed")
227
+ distill_t2v = gr.Checkbox(value=True, label="Use Distill Mode")
228
+ refine_t2v = gr.Checkbox(value=False, label="Use Refine Mode")
229
+ duration_t2v = gr.Slider(1, 20, step=1, value=2, label="Video Duration (seconds)")
230
+
231
+ t2v_button = gr.Button("Generate Video")
232
+ with gr.Column(scale=3):
233
+ video_output_t2v = gr.Video(label="Generated Video")
234
 
235
+ with gr.TabItem("Image-to-Video"):
236
+ mode_i2v = gr.State("i2v")
237
+ with gr.Row():
238
+ with gr.Column(scale=2):
239
+ image_i2v = gr.Image(type="numpy", label="Input Image")
240
+ prompt_i2v = gr.Textbox(label="Prompt", lines=4)
241
+ neg_prompt_i2v = gr.Textbox(label="Negative Prompt", lines=2, value="blurry, low quality")
242
+ resolution_i2v = gr.Dropdown(["480p","720p"], value="480p", label="Resolution")
243
+ seed_i2v = gr.Number(value=42, label="Seed")
244
+ distill_i2v = gr.Checkbox(value=True, label="Use Distill Mode")
245
+ refine_i2v = gr.Checkbox(value=False, label="Use Refine Mode")
246
+ duration_i2v = gr.Slider(1, 20, step=1, value=2, label="Video Duration (seconds)")
247
+
248
+ i2v_button = gr.Button("Generate Video")
249
+ with gr.Column(scale=3):
250
+ video_output_i2v = gr.Video(label="Generated Video")
251
+
252
+ # Event binding
253
  t2v_button.click(
254
+ generate_video,
255
+ inputs=[mode_t2v, prompt_t2v, neg_prompt_t2v, gr.State(None),
256
+ height_t2v, width_t2v, gr.State("480p"),
257
+ seed_t2v, distill_t2v, refine_t2v, duration_t2v],
258
+ outputs=video_output_t2v
259
+ )
260
+
261
+ i2v_button.click(
262
+ generate_video,
263
+ inputs=[mode_i2v, prompt_i2v, neg_prompt_i2v, image_i2v,
264
+ gr.State(None), gr.State(None), resolution_i2v,
265
+ seed_i2v, distill_i2v, refine_i2v, duration_i2v],
266
+ outputs=video_output_i2v
267
  )
268
 
269
+ # ============================================================
270
+ # 5️⃣ Launch
271
+ # ============================================================
272
  if __name__ == "__main__":
273
+ demo.launch()