playmak3r commited on
Commit
0713000
Β·
1 Parent(s): 3cae7ac
Files changed (2) hide show
  1. app.py +1288 -0
  2. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,1288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ VibeVoice Gradio Demo - High-Quality Dialogue Generation Interface with Streaming Support
3
+ """
4
+
5
+ from typing import Iterator, Optional, List, Dict, Any
6
+ import argparse, os, time, traceback, json, sys, tempfile
7
+ from pathlib import Path
8
+ from datetime import datetime
9
+ import threading
10
+ import numpy as np
11
+ import gradio as gr
12
+ import librosa
13
+ import soundfile as sf
14
+ import torch
15
+
16
+ from vibevoice.modular.configuration_vibevoice import VibeVoiceConfig
17
+ from vibevoice.modular.modeling_vibevoice_inference import VibeVoiceForConditionalGenerationInference
18
+ from vibevoice.modular.lora_loading import load_lora_assets
19
+ from vibevoice.processor.vibevoice_processor import VibeVoiceProcessor
20
+ from vibevoice.modular.streamer import AudioStreamer
21
+ from transformers.utils import logging
22
+ from transformers import set_seed
23
+
24
+ logging.set_verbosity_info()
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class VibeVoiceDemo:
29
+ def __init__(self, model_path: str, device: str = "cuda", inference_steps: int = 5, adapter_path: Optional[str] = None):
30
+ """Initialize the VibeVoice demo with model loading."""
31
+ self.model_path = model_path
32
+ self.device = device
33
+ self.inference_steps = inference_steps
34
+ self.adapter_path = adapter_path
35
+ self.loaded_adapter_root: Optional[str] = None
36
+ self.is_generating = False # Track generation state
37
+ self.stop_generation = False # Flag to stop generation
38
+ self.current_streamer = None # Track current audio streamer
39
+ self.load_model()
40
+ self.setup_voice_presets()
41
+ self.load_example_scripts() # Load example scripts
42
+
43
+ def load_model(self):
44
+ """Load the VibeVoice model and processor."""
45
+ print(f"Loading processor & model from {self.model_path}")
46
+ self.loaded_adapter_root = None
47
+ # Normalize potential 'mpx'
48
+ if self.device.lower() == "mpx":
49
+ print("Note: device 'mpx' detected, treating it as 'mps'.")
50
+ self.device = "mps"
51
+ if self.device == "mps" and not torch.backends.mps.is_available():
52
+ print("Warning: MPS not available. Falling back to CPU.")
53
+ self.device = "cpu"
54
+ print(f"Using device: {self.device}")
55
+ # Load processor
56
+ self.processor = VibeVoiceProcessor.from_pretrained(self.model_path)
57
+ # Decide dtype & attention
58
+ if self.device == "mps":
59
+ load_dtype = torch.float32
60
+ attn_impl_primary = "sdpa"
61
+ elif self.device == "cuda":
62
+ load_dtype = torch.bfloat16
63
+ attn_impl_primary = "flash_attention_2"
64
+ else:
65
+ load_dtype = torch.float32
66
+ attn_impl_primary = "sdpa"
67
+ print(f"Using device: {self.device}, torch_dtype: {load_dtype}, attn_implementation: {attn_impl_primary}")
68
+ # Load model
69
+ try:
70
+ if self.device == "mps":
71
+ self.model = VibeVoiceForConditionalGenerationInference.from_pretrained(
72
+ self.model_path,
73
+ torch_dtype=load_dtype,
74
+ attn_implementation=attn_impl_primary,
75
+ device_map=None,
76
+ )
77
+ self.model.to("mps")
78
+ elif self.device == "cuda":
79
+ self.model = VibeVoiceForConditionalGenerationInference.from_pretrained(
80
+ self.model_path,
81
+ torch_dtype=load_dtype,
82
+ device_map="cuda",
83
+ attn_implementation=attn_impl_primary,
84
+ )
85
+ else:
86
+ self.model = VibeVoiceForConditionalGenerationInference.from_pretrained(
87
+ self.model_path,
88
+ torch_dtype=load_dtype,
89
+ device_map="cpu",
90
+ attn_implementation=attn_impl_primary,
91
+ )
92
+ except Exception as e:
93
+ if attn_impl_primary == 'flash_attention_2':
94
+ print(f"[ERROR] : {type(e).__name__}: {e}")
95
+ print(traceback.format_exc())
96
+ fallback_attn = "sdpa"
97
+ print(f"Falling back to attention implementation: {fallback_attn}")
98
+ self.model = VibeVoiceForConditionalGenerationInference.from_pretrained(
99
+ self.model_path,
100
+ torch_dtype=load_dtype,
101
+ device_map=(self.device if self.device in ("cuda", "cpu") else None),
102
+ attn_implementation=fallback_attn,
103
+ )
104
+ if self.device == "mps":
105
+ self.model.to("mps")
106
+ else:
107
+ raise e
108
+ if self.adapter_path:
109
+ print(f"Loading fine-tuned assets from {self.adapter_path}")
110
+ report = load_lora_assets(self.model, self.adapter_path)
111
+ loaded_components = [
112
+ name for name, loaded in (
113
+ ("language LoRA", report.language_model),
114
+ ("diffusion head LoRA", report.diffusion_head_lora),
115
+ ("diffusion head weights", report.diffusion_head_full),
116
+ ("acoustic connector", report.acoustic_connector),
117
+ ("semantic connector", report.semantic_connector),
118
+ )
119
+ if loaded
120
+ ]
121
+ if loaded_components:
122
+ print(f"Loaded components: {', '.join(loaded_components)}")
123
+ else:
124
+ print("Warning: no adapter components were loaded; check the checkpoint path.")
125
+ if report.adapter_root is not None:
126
+ self.loaded_adapter_root = str(report.adapter_root)
127
+ print(f"Adapter assets resolved to: {self.loaded_adapter_root}")
128
+ else:
129
+ self.loaded_adapter_root = self.adapter_path
130
+
131
+ self.model.eval()
132
+
133
+ # Use SDE solver by default
134
+ self.model.model.noise_scheduler = self.model.model.noise_scheduler.from_config(
135
+ self.model.model.noise_scheduler.config,
136
+ algorithm_type='sde-dpmsolver++',
137
+ beta_schedule='squaredcos_cap_v2'
138
+ )
139
+ self.model.set_ddpm_inference_steps(num_steps=self.inference_steps)
140
+
141
+ if hasattr(self.model.model, 'language_model'):
142
+ print(f"Language model attention: {self.model.model.language_model.config._attn_implementation}")
143
+
144
+ def setup_voice_presets(self):
145
+ """Setup voice presets by scanning the voices directory."""
146
+ voices_dir = os.path.join(os.path.dirname(__file__), "voices")
147
+
148
+ # Check if voices directory exists
149
+ if not os.path.exists(voices_dir):
150
+ print(f"Warning: Voices directory not found at {voices_dir}")
151
+ self.voice_presets = {}
152
+ self.available_voices = {}
153
+ return
154
+
155
+ # Scan for all WAV files in the voices directory
156
+ self.voice_presets = {}
157
+
158
+ # Get all .wav files in the voices directory
159
+ wav_files = [f for f in os.listdir(voices_dir)
160
+ if f.lower().endswith(('.wav', '.mp3', '.flac', '.ogg', '.m4a', '.aac')) and os.path.isfile(os.path.join(voices_dir, f))]
161
+
162
+ # Create dictionary with filename (without extension) as key
163
+ for wav_file in wav_files:
164
+ # Remove .wav extension to get the name
165
+ name = os.path.splitext(wav_file)[0]
166
+ # Create full path
167
+ full_path = os.path.join(voices_dir, wav_file)
168
+ self.voice_presets[name] = full_path
169
+
170
+ # Sort the voice presets alphabetically by name for better UI
171
+ self.voice_presets = dict(sorted(self.voice_presets.items()))
172
+
173
+ # Filter out voices that don't exist (this is now redundant but kept for safety)
174
+ self.available_voices = {
175
+ name: path for name, path in self.voice_presets.items()
176
+ if os.path.exists(path)
177
+ }
178
+
179
+ if not self.available_voices:
180
+ raise gr.Error("No voice presets found. Please add .wav files to the demo/voices directory.")
181
+
182
+ print(f"Found {len(self.available_voices)} voice files in {voices_dir}")
183
+ print(f"Available voices: {', '.join(self.available_voices.keys())}")
184
+
185
+ def read_audio(self, audio_path: str, target_sr: int = 24000) -> np.ndarray:
186
+ """Read and preprocess audio file."""
187
+ try:
188
+ wav, sr = sf.read(audio_path)
189
+ if len(wav.shape) > 1:
190
+ wav = np.mean(wav, axis=1)
191
+ if sr != target_sr:
192
+ wav = librosa.resample(wav, orig_sr=sr, target_sr=target_sr)
193
+ return wav
194
+ except Exception as e:
195
+ print(f"Error reading audio {audio_path}: {e}")
196
+ return np.array([])
197
+
198
+ def generate_podcast_streaming(self,
199
+ num_speakers: int,
200
+ script: str,
201
+ speaker_1: str = None,
202
+ speaker_2: str = None,
203
+ speaker_3: str = None,
204
+ speaker_4: str = None,
205
+ cfg_scale: float = 1.3,
206
+ disable_voice_cloning: bool = False) -> Iterator[tuple]:
207
+ try:
208
+
209
+ # Reset stop flag and set generating state
210
+ self.stop_generation = False
211
+ self.is_generating = True
212
+
213
+ # Validate inputs
214
+ if not script.strip():
215
+ self.is_generating = False
216
+ raise gr.Error("Error: Please provide a script.")
217
+
218
+ # Defend against common mistake
219
+ script = script.replace("’", "'")
220
+
221
+ if num_speakers < 1 or num_speakers > 4:
222
+ self.is_generating = False
223
+ raise gr.Error("Error: Number of speakers must be between 1 and 4.")
224
+
225
+ # Collect selected speakers
226
+ selected_speakers = [speaker_1, speaker_2, speaker_3, speaker_4][:num_speakers]
227
+
228
+ # Validate speaker selections
229
+ for i, speaker in enumerate(selected_speakers):
230
+ if not speaker or speaker not in self.available_voices:
231
+ self.is_generating = False
232
+ raise gr.Error(f"Error: Please select a valid speaker for Speaker {i+1}.")
233
+
234
+ voice_cloning_enabled = not disable_voice_cloning
235
+
236
+ # Build initial log
237
+ log = f"πŸŽ™οΈ Generating podcast with {num_speakers} speakers\n"
238
+ log += f"πŸ“Š Parameters: CFG Scale={cfg_scale}, Inference Steps={self.inference_steps}\n"
239
+ log += f"🎭 Speakers: {', '.join(selected_speakers)}\n"
240
+ log += f"πŸ”Š Voice cloning: {'Enabled' if voice_cloning_enabled else 'Disabled'}\n"
241
+ if self.loaded_adapter_root:
242
+ log += f"🧩 LoRA: {self.loaded_adapter_root}\n"
243
+
244
+ # Check for stop signal
245
+ if self.stop_generation:
246
+ self.is_generating = False
247
+ yield None, "πŸ›‘ Generation stopped by user", gr.update(visible=False)
248
+ return
249
+
250
+ # Load voice samples when voice cloning is enabled
251
+ voice_samples = None
252
+ if voice_cloning_enabled:
253
+ voice_samples = []
254
+ for speaker_name in selected_speakers:
255
+ audio_path = self.available_voices[speaker_name]
256
+ audio_data = self.read_audio(audio_path)
257
+ if len(audio_data) == 0:
258
+ self.is_generating = False
259
+ raise gr.Error(f"Error: Failed to load audio for {speaker_name}")
260
+ voice_samples.append(audio_data)
261
+
262
+ # log += f"βœ… Loaded {len(voice_samples)} voice samples\n"
263
+
264
+ # Check for stop signal
265
+ if self.stop_generation:
266
+ self.is_generating = False
267
+ yield None, "πŸ›‘ Generation stopped by user", gr.update(visible=False)
268
+ return
269
+
270
+ # Parse script to assign speaker ID's
271
+ lines = script.strip().split('\n')
272
+ formatted_script_lines = []
273
+
274
+ for line in lines:
275
+ line = line.strip()
276
+ if not line:
277
+ continue
278
+
279
+ # Check if line already has speaker format
280
+ if line.startswith('Speaker ') and ':' in line:
281
+ formatted_script_lines.append(line)
282
+ else:
283
+ # Auto-assign to speakers in rotation
284
+ speaker_id = len(formatted_script_lines) % num_speakers
285
+ formatted_script_lines.append(f"Speaker {speaker_id}: {line}")
286
+
287
+ formatted_script = '\n'.join(formatted_script_lines)
288
+ log += f"πŸ“ Formatted script with {len(formatted_script_lines)} turns\n\n"
289
+ log += "πŸ”„ Processing with VibeVoice (streaming mode)...\n"
290
+
291
+ # Check for stop signal before processing
292
+ if self.stop_generation:
293
+ self.is_generating = False
294
+ yield None, "πŸ›‘ Generation stopped by user", gr.update(visible=False)
295
+ return
296
+
297
+ start_time = time.time()
298
+
299
+ processor_kwargs = {
300
+ "text": [formatted_script],
301
+ "padding": True,
302
+ "return_tensors": "pt",
303
+ "return_attention_mask": True,
304
+ }
305
+ processor_kwargs["voice_samples"] = [voice_samples] if voice_samples is not None else None
306
+
307
+ inputs = self.processor(**processor_kwargs)
308
+ # Move tensors to device
309
+ target_device = self.device if self.device in ("cuda", "mps") else "cpu"
310
+ for k, v in inputs.items():
311
+ if torch.is_tensor(v):
312
+ inputs[k] = v.to(target_device)
313
+
314
+ # Create audio streamer
315
+ audio_streamer = AudioStreamer(
316
+ batch_size=1,
317
+ stop_signal=None,
318
+ timeout=None
319
+ )
320
+
321
+ # Store current streamer for potential stopping
322
+ self.current_streamer = audio_streamer
323
+
324
+ # Start generation in a separate thread
325
+ generation_thread = threading.Thread(
326
+ target=self._generate_with_streamer,
327
+ args=(inputs, cfg_scale, audio_streamer, voice_cloning_enabled)
328
+ )
329
+ generation_thread.start()
330
+
331
+ # Wait for generation to actually start producing audio
332
+ time.sleep(1) # Reduced from 3 to 1 second
333
+
334
+ # Check for stop signal after thread start
335
+ if self.stop_generation:
336
+ audio_streamer.end()
337
+ generation_thread.join(timeout=5.0) # Wait up to 5 seconds for thread to finish
338
+ self.is_generating = False
339
+ yield None, "πŸ›‘ Generation stopped by user", gr.update(visible=False)
340
+ return
341
+
342
+ # Collect audio chunks as they arrive
343
+ sample_rate = 24000
344
+ all_audio_chunks = [] # For final statistics
345
+ pending_chunks = [] # Buffer for accumulating small chunks
346
+ chunk_count = 0
347
+ last_yield_time = time.time()
348
+ min_yield_interval = 15 # Yield every 15 seconds
349
+ min_chunk_size = sample_rate * 30 # At least 2 seconds of audio
350
+
351
+ # Get the stream for the first (and only) sample
352
+ audio_stream = audio_streamer.get_stream(0)
353
+
354
+ has_yielded_audio = False
355
+ has_received_chunks = False # Track if we received any chunks at all
356
+
357
+ for audio_chunk in audio_stream:
358
+ # Check for stop signal in the streaming loop
359
+ if self.stop_generation:
360
+ audio_streamer.end()
361
+ break
362
+
363
+ chunk_count += 1
364
+ has_received_chunks = True # Mark that we received at least one chunk
365
+
366
+ # Convert tensor to numpy
367
+ if torch.is_tensor(audio_chunk):
368
+ # Convert bfloat16 to float32 first, then to numpy
369
+ if audio_chunk.dtype == torch.bfloat16:
370
+ audio_chunk = audio_chunk.float()
371
+ audio_np = audio_chunk.cpu().numpy().astype(np.float32)
372
+ else:
373
+ audio_np = np.array(audio_chunk, dtype=np.float32)
374
+
375
+ # Ensure audio is 1D and properly normalized
376
+ if len(audio_np.shape) > 1:
377
+ audio_np = audio_np.squeeze()
378
+
379
+ # Convert to 16-bit for Gradio
380
+ audio_16bit = convert_to_16_bit_wav(audio_np)
381
+
382
+ # Store for final statistics
383
+ all_audio_chunks.append(audio_16bit)
384
+
385
+ # Add to pending chunks buffer
386
+ pending_chunks.append(audio_16bit)
387
+
388
+ # Calculate pending audio size
389
+ pending_audio_size = sum(len(chunk) for chunk in pending_chunks)
390
+ current_time = time.time()
391
+ time_since_last_yield = current_time - last_yield_time
392
+
393
+ # Decide whether to yield
394
+ should_yield = False
395
+ if not has_yielded_audio and pending_audio_size >= min_chunk_size:
396
+ # First yield: wait for minimum chunk size
397
+ should_yield = True
398
+ has_yielded_audio = True
399
+ elif has_yielded_audio and (pending_audio_size >= min_chunk_size or time_since_last_yield >= min_yield_interval):
400
+ # Subsequent yields: either enough audio or enough time has passed
401
+ should_yield = True
402
+
403
+ if should_yield and pending_chunks:
404
+ # Concatenate and yield only the new audio chunks
405
+ new_audio = np.concatenate(pending_chunks)
406
+ new_duration = len(new_audio) / sample_rate
407
+ total_duration = sum(len(chunk) for chunk in all_audio_chunks) / sample_rate
408
+
409
+ log_update = log + f"🎡 Streaming: {total_duration:.1f}s generated (chunk {chunk_count})\n"
410
+
411
+ # Yield streaming audio chunk and keep complete_audio as None during streaming
412
+ yield (sample_rate, new_audio), None, log_update, gr.update(visible=True)
413
+
414
+ # Clear pending chunks after yielding
415
+ pending_chunks = []
416
+ last_yield_time = current_time
417
+
418
+ # Yield any remaining chunks
419
+ if pending_chunks:
420
+ final_new_audio = np.concatenate(pending_chunks)
421
+ total_duration = sum(len(chunk) for chunk in all_audio_chunks) / sample_rate
422
+ log_update = log + f"🎡 Streaming final chunk: {total_duration:.1f}s total\n"
423
+ yield (sample_rate, final_new_audio), None, log_update, gr.update(visible=True)
424
+ has_yielded_audio = True # Mark that we yielded audio
425
+
426
+ # Wait for generation to complete (with timeout to prevent hanging)
427
+ generation_thread.join(timeout=5.0) # Increased timeout to 5 seconds
428
+
429
+ # If thread is still alive after timeout, force end
430
+ if generation_thread.is_alive():
431
+ print("Warning: Generation thread did not complete within timeout")
432
+ audio_streamer.end()
433
+ generation_thread.join(timeout=5.0)
434
+
435
+ # Clean up
436
+ self.current_streamer = None
437
+ self.is_generating = False
438
+
439
+ generation_time = time.time() - start_time
440
+
441
+ # Check if stopped by user
442
+ if self.stop_generation:
443
+ yield None, None, "πŸ›‘ Generation stopped by user", gr.update(visible=False)
444
+ return
445
+
446
+ # Debug logging
447
+ # print(f"Debug: has_received_chunks={has_received_chunks}, chunk_count={chunk_count}, all_audio_chunks length={len(all_audio_chunks)}")
448
+
449
+ # Check if we received any chunks but didn't yield audio
450
+ if has_received_chunks and not has_yielded_audio and all_audio_chunks:
451
+ # We have chunks but didn't meet the yield criteria, yield them now
452
+ complete_audio = np.concatenate(all_audio_chunks)
453
+ final_duration = len(complete_audio) / sample_rate
454
+
455
+ final_log = log + f"⏱️ Generation completed in {generation_time:.2f} seconds\n"
456
+ final_log += f"🎡 Final audio duration: {final_duration:.2f} seconds\n"
457
+ final_log += f"πŸ“Š Total chunks: {chunk_count}\n"
458
+ final_log += "✨ Generation successful! Complete audio is ready.\n"
459
+ final_log += "πŸ’‘ Not satisfied? You can regenerate or adjust the CFG scale for different results."
460
+
461
+ # Yield the complete audio
462
+ yield None, (sample_rate, complete_audio), final_log, gr.update(visible=False)
463
+ return
464
+
465
+ if not has_received_chunks:
466
+ error_log = log + f"\n❌ Error: No audio chunks were received from the model. Generation time: {generation_time:.2f}s"
467
+ yield None, None, error_log, gr.update(visible=False)
468
+ return
469
+
470
+ if not has_yielded_audio:
471
+ error_log = log + f"\n❌ Error: Audio was generated but not streamed. Chunk count: {chunk_count}"
472
+ yield None, None, error_log, gr.update(visible=False)
473
+ return
474
+
475
+ # Prepare the complete audio
476
+ if all_audio_chunks:
477
+ complete_audio = np.concatenate(all_audio_chunks)
478
+ final_duration = len(complete_audio) / sample_rate
479
+
480
+ final_log = log + f"⏱️ Generation completed in {generation_time:.2f} seconds\n"
481
+ final_log += f"🎡 Final audio duration: {final_duration:.2f} seconds\n"
482
+ final_log += f"πŸ“Š Total chunks: {chunk_count}\n"
483
+ final_log += "✨ Generation successful! Complete audio is ready in the 'Complete Audio' tab.\n"
484
+ final_log += "πŸ’‘ Not satisfied? You can regenerate or adjust the CFG scale for different results."
485
+
486
+ # Final yield: Clear streaming audio and provide complete audio
487
+ yield None, (sample_rate, complete_audio), final_log, gr.update(visible=False)
488
+ else:
489
+ final_log = log + "❌ No audio was generated."
490
+ yield None, None, final_log, gr.update(visible=False)
491
+
492
+ except gr.Error as e:
493
+ # Handle Gradio-specific errors (like input validation)
494
+ self.is_generating = False
495
+ self.current_streamer = None
496
+ error_msg = f"❌ Input Error: {str(e)}"
497
+ print(error_msg)
498
+ yield None, None, error_msg, gr.update(visible=False)
499
+
500
+ except Exception as e:
501
+ self.is_generating = False
502
+ self.current_streamer = None
503
+ error_msg = f"❌ An unexpected error occurred: {str(e)}"
504
+ print(error_msg)
505
+ import traceback
506
+ traceback.print_exc()
507
+ yield None, None, error_msg, gr.update(visible=False)
508
+
509
+ def _generate_with_streamer(self, inputs, cfg_scale, audio_streamer, voice_cloning_enabled: bool):
510
+ """Helper method to run generation with streamer in a separate thread."""
511
+ try:
512
+ # Check for stop signal before starting generation
513
+ if self.stop_generation:
514
+ audio_streamer.end()
515
+ return
516
+
517
+ # Define a stop check function that can be called from generate
518
+ def check_stop_generation():
519
+ return self.stop_generation
520
+
521
+ outputs = self.model.generate(
522
+ **inputs,
523
+ max_new_tokens=None,
524
+ cfg_scale=cfg_scale,
525
+ tokenizer=self.processor.tokenizer,
526
+ generation_config={
527
+ 'do_sample': False,
528
+ },
529
+ audio_streamer=audio_streamer,
530
+ stop_check_fn=check_stop_generation, # Pass the stop check function
531
+ verbose=False, # Disable verbose in streaming mode
532
+ refresh_negative=True,
533
+ is_prefill=voice_cloning_enabled,
534
+ )
535
+
536
+ except Exception as e:
537
+ print(f"Error in generation thread: {e}")
538
+ traceback.print_exc()
539
+ # Make sure to end the stream on error
540
+ audio_streamer.end()
541
+
542
+ def stop_audio_generation(self):
543
+ """Stop the current audio generation process."""
544
+ self.stop_generation = True
545
+ if self.current_streamer is not None:
546
+ try:
547
+ self.current_streamer.end()
548
+ except Exception as e:
549
+ print(f"Error stopping streamer: {e}")
550
+ print("πŸ›‘ Audio generation stop requested")
551
+
552
+ def load_example_scripts(self):
553
+ """Load example scripts from the text_examples directory."""
554
+ examples_dir = os.path.join(os.path.dirname(__file__), "text_examples")
555
+ self.example_scripts = []
556
+
557
+ # Check if text_examples directory exists
558
+ if not os.path.exists(examples_dir):
559
+ print(f"Warning: text_examples directory not found at {examples_dir}")
560
+ return
561
+
562
+ # Get all .txt files in the text_examples directory
563
+ txt_files = sorted([f for f in os.listdir(examples_dir)
564
+ if f.lower().endswith('.txt') and os.path.isfile(os.path.join(examples_dir, f))])
565
+
566
+ for txt_file in txt_files:
567
+ file_path = os.path.join(examples_dir, txt_file)
568
+
569
+ import re
570
+ # Check if filename contains a time pattern like "45min", "90min", etc.
571
+ time_pattern = re.search(r'(\d+)min', txt_file.lower())
572
+ if time_pattern:
573
+ minutes = int(time_pattern.group(1))
574
+ if minutes > 15:
575
+ print(f"Skipping {txt_file}: duration {minutes} minutes exceeds 15-minute limit")
576
+ continue
577
+
578
+ try:
579
+ with open(file_path, 'r', encoding='utf-8') as f:
580
+ script_content = f.read().strip()
581
+
582
+ # Remove empty lines and lines with only whitespace
583
+ script_content = '\n'.join(line for line in script_content.split('\n') if line.strip())
584
+
585
+ if not script_content:
586
+ continue
587
+
588
+ # Parse the script to determine number of speakers
589
+ num_speakers = self._get_num_speakers_from_script(script_content)
590
+
591
+ # Add to examples list as [num_speakers, script_content]
592
+ self.example_scripts.append([num_speakers, script_content])
593
+ print(f"Loaded example: {txt_file} with {num_speakers} speakers")
594
+
595
+ except Exception as e:
596
+ print(f"Error loading example script {txt_file}: {e}")
597
+
598
+ if self.example_scripts:
599
+ print(f"Successfully loaded {len(self.example_scripts)} example scripts")
600
+ else:
601
+ print("No example scripts were loaded")
602
+
603
+ def _get_num_speakers_from_script(self, script: str) -> int:
604
+ """Determine the number of unique speakers in a script."""
605
+ import re
606
+ speakers = set()
607
+
608
+ lines = script.strip().split('\n')
609
+ for line in lines:
610
+ # Use regex to find speaker patterns
611
+ match = re.match(r'^Speaker\s+(\d+)\s*:', line.strip(), re.IGNORECASE)
612
+ if match:
613
+ speaker_id = int(match.group(1))
614
+ speakers.add(speaker_id)
615
+
616
+ # If no speakers found, default to 1
617
+ if not speakers:
618
+ return 1
619
+
620
+ # Return the maximum speaker ID + 1 (assuming 0-based indexing)
621
+ # or the count of unique speakers if they're 1-based
622
+ max_speaker = max(speakers)
623
+ min_speaker = min(speakers)
624
+
625
+ if min_speaker == 0:
626
+ return max_speaker + 1
627
+ else:
628
+ # Assume 1-based indexing, return the count
629
+ return len(speakers)
630
+
631
+
632
+ def create_demo_interface(demo_instance: VibeVoiceDemo):
633
+ """Create the Gradio interface with streaming support."""
634
+
635
+ # Custom CSS for high-end aesthetics with lighter theme
636
+ custom_css = """
637
+ /* Modern light theme with gradients */
638
+ .gradio-container {
639
+ background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%);
640
+ font-family: 'SF Pro Display', -apple-system, BlinkMacSystemFont, sans-serif;
641
+ }
642
+
643
+ /* Header styling */
644
+ .main-header {
645
+ background: linear-gradient(90deg, #667eea 0%, #764ba2 100%);
646
+ padding: 2rem;
647
+ border-radius: 20px;
648
+ margin-bottom: 2rem;
649
+ text-align: center;
650
+ box-shadow: 0 10px 40px rgba(102, 126, 234, 0.3);
651
+ }
652
+
653
+ .main-header h1 {
654
+ color: white;
655
+ font-size: 2.5rem;
656
+ font-weight: 700;
657
+ margin: 0;
658
+ text-shadow: 0 2px 4px rgba(0,0,0,0.3);
659
+ }
660
+
661
+ .main-header p {
662
+ color: rgba(255,255,255,0.9);
663
+ font-size: 1.1rem;
664
+ margin: 0.5rem 0 0 0;
665
+ }
666
+
667
+ /* Card styling */
668
+ .settings-card, .generation-card {
669
+ background: rgba(255, 255, 255, 0.8);
670
+ backdrop-filter: blur(10px);
671
+ border: 1px solid rgba(226, 232, 240, 0.8);
672
+ border-radius: 16px;
673
+ padding: 1.5rem;
674
+ margin-bottom: 1rem;
675
+ box-shadow: 0 8px 32px rgba(0, 0, 0, 0.1);
676
+ }
677
+
678
+ /* Speaker selection styling */
679
+ .speaker-grid {
680
+ display: grid;
681
+ gap: 1rem;
682
+ margin-bottom: 1rem;
683
+ }
684
+
685
+ .speaker-item {
686
+ background: linear-gradient(135deg, #e2e8f0 0%, #cbd5e1 100%);
687
+ border: 1px solid rgba(148, 163, 184, 0.4);
688
+ border-radius: 12px;
689
+ padding: 1rem;
690
+ color: #374151;
691
+ font-weight: 500;
692
+ }
693
+
694
+ /* Streaming indicator */
695
+ .streaming-indicator {
696
+ display: inline-block;
697
+ width: 10px;
698
+ height: 10px;
699
+ background: #22c55e;
700
+ border-radius: 50%;
701
+ margin-right: 8px;
702
+ animation: pulse 1.5s infinite;
703
+ }
704
+
705
+ @keyframes pulse {
706
+ 0% { opacity: 1; transform: scale(1); }
707
+ 50% { opacity: 0.5; transform: scale(1.1); }
708
+ 100% { opacity: 1; transform: scale(1); }
709
+ }
710
+
711
+ /* Queue status styling */
712
+ .queue-status {
713
+ background: linear-gradient(135deg, #f0f9ff 0%, #e0f2fe 100%);
714
+ border: 1px solid rgba(14, 165, 233, 0.3);
715
+ border-radius: 8px;
716
+ padding: 0.75rem;
717
+ margin: 0.5rem 0;
718
+ text-align: center;
719
+ font-size: 0.9rem;
720
+ color: #0369a1;
721
+ }
722
+
723
+ .generate-btn {
724
+ background: linear-gradient(135deg, #059669 0%, #0d9488 100%);
725
+ border: none;
726
+ border-radius: 12px;
727
+ padding: 1rem 2rem;
728
+ color: white;
729
+ font-weight: 600;
730
+ font-size: 1.1rem;
731
+ box-shadow: 0 4px 20px rgba(5, 150, 105, 0.4);
732
+ transition: all 0.3s ease;
733
+ }
734
+
735
+ .generate-btn:hover {
736
+ transform: translateY(-2px);
737
+ box-shadow: 0 6px 25px rgba(5, 150, 105, 0.6);
738
+ }
739
+
740
+ .stop-btn {
741
+ background: linear-gradient(135deg, #ef4444 0%, #dc2626 100%);
742
+ border: none;
743
+ border-radius: 12px;
744
+ padding: 1rem 2rem;
745
+ color: white;
746
+ font-weight: 600;
747
+ font-size: 1.1rem;
748
+ box-shadow: 0 4px 20px rgba(239, 68, 68, 0.4);
749
+ transition: all 0.3s ease;
750
+ }
751
+
752
+ .stop-btn:hover {
753
+ transform: translateY(-2px);
754
+ box-shadow: 0 6px 25px rgba(239, 68, 68, 0.6);
755
+ }
756
+
757
+ /* Audio player styling */
758
+ .audio-output {
759
+ background: linear-gradient(135deg, #f1f5f9 0%, #e2e8f0 100%);
760
+ border-radius: 16px;
761
+ padding: 1.5rem;
762
+ border: 1px solid rgba(148, 163, 184, 0.3);
763
+ }
764
+
765
+ .complete-audio-section {
766
+ margin-top: 1rem;
767
+ padding: 1rem;
768
+ background: linear-gradient(135deg, #f0fdf4 0%, #dcfce7 100%);
769
+ border: 1px solid rgba(34, 197, 94, 0.3);
770
+ border-radius: 12px;
771
+ }
772
+
773
+ /* Text areas */
774
+ .script-input, .log-output {
775
+ background: rgba(255, 255, 255, 0.9) !important;
776
+ border: 1px solid rgba(148, 163, 184, 0.4) !important;
777
+ border-radius: 12px !important;
778
+ color: #1e293b !important;
779
+ font-family: 'JetBrains Mono', monospace !important;
780
+ }
781
+
782
+ .script-input::placeholder {
783
+ color: #64748b !important;
784
+ }
785
+
786
+ /* Sliders */
787
+ .slider-container {
788
+ background: rgba(248, 250, 252, 0.8);
789
+ border: 1px solid rgba(226, 232, 240, 0.6);
790
+ border-radius: 8px;
791
+ padding: 1rem;
792
+ margin: 0.5rem 0;
793
+ }
794
+
795
+ /* Labels and text */
796
+ .gradio-container label {
797
+ color: #374151 !important;
798
+ font-weight: 600 !important;
799
+ }
800
+
801
+ .gradio-container .markdown {
802
+ color: #1f2937 !important;
803
+ }
804
+
805
+ /* Responsive design */
806
+ @media (max-width: 768px) {
807
+ .main-header h1 { font-size: 2rem; }
808
+ .settings-card, .generation-card { padding: 1rem; }
809
+ }
810
+
811
+ /* Random example button styling - more subtle professional color */
812
+ .random-btn {
813
+ background: linear-gradient(135deg, #64748b 0%, #475569 100%);
814
+ border: none;
815
+ border-radius: 12px;
816
+ padding: 1rem 1.5rem;
817
+ color: white;
818
+ font-weight: 600;
819
+ font-size: 1rem;
820
+ box-shadow: 0 4px 20px rgba(100, 116, 139, 0.3);
821
+ transition: all 0.3s ease;
822
+ display: inline-flex;
823
+ align-items: center;
824
+ gap: 0.5rem;
825
+ }
826
+
827
+ .random-btn:hover {
828
+ transform: translateY(-2px);
829
+ box-shadow: 0 6px 25px rgba(100, 116, 139, 0.4);
830
+ background: linear-gradient(135deg, #475569 0%, #334155 100%);
831
+ }
832
+ """
833
+
834
+ with gr.Blocks(
835
+ title="VibeVoice - AI Podcast Generator",
836
+ css=custom_css,
837
+ theme=gr.themes.Soft(
838
+ primary_hue="blue",
839
+ secondary_hue="purple",
840
+ neutral_hue="slate",
841
+ )
842
+ ) as interface:
843
+
844
+ # Header
845
+ gr.HTML("""
846
+ <div class="main-header">
847
+ <h1>πŸŽ™οΈ Vibe Podcasting </h1>
848
+ <p>Generating Long-form Multi-speaker AI Podcast with VibeVoice</p>
849
+ </div>
850
+ """)
851
+
852
+ with gr.Row():
853
+ # Left column - Settings
854
+ with gr.Column(scale=1, elem_classes="settings-card"):
855
+ gr.Markdown("### πŸŽ›οΈ **Podcast Settings**")
856
+
857
+ # Number of speakers
858
+ num_speakers = gr.Slider(
859
+ minimum=1,
860
+ maximum=4,
861
+ value=2,
862
+ step=1,
863
+ label="Number of Speakers",
864
+ elem_classes="slider-container"
865
+ )
866
+
867
+ # Speaker selection
868
+ gr.Markdown("### 🎭 **Speaker Selection**")
869
+
870
+ available_speaker_names = list(demo_instance.available_voices.keys())
871
+ # default_speakers = available_speaker_names[:4] if len(available_speaker_names) >= 4 else available_speaker_names
872
+ default_speakers = ['en-Alice_woman', 'en-Carter_man', 'en-Frank_man', 'en-Maya_woman']
873
+
874
+ speaker_selections = []
875
+ for i in range(4):
876
+ default_value = default_speakers[i] if i < len(default_speakers) else None
877
+ speaker = gr.Dropdown(
878
+ choices=available_speaker_names,
879
+ value=default_value,
880
+ label=f"Speaker {i+1}",
881
+ visible=(i < 2), # Initially show only first 2 speakers
882
+ elem_classes="speaker-item"
883
+ )
884
+ speaker_selections.append(speaker)
885
+
886
+ # Advanced settings
887
+ gr.Markdown("### βš™οΈ **Advanced Settings**")
888
+
889
+ # Sampling parameters (contains all generation settings)
890
+ with gr.Accordion("Generation Parameters", open=False):
891
+ cfg_scale = gr.Slider(
892
+ minimum=1.0,
893
+ maximum=2.0,
894
+ value=1.3,
895
+ step=0.05,
896
+ label="CFG Scale (Guidance Strength)",
897
+ # info="Higher values increase adherence to text",
898
+ elem_classes="slider-container"
899
+ )
900
+ disable_voice_cloning = gr.Checkbox(
901
+ value=False,
902
+ label="Disable voice cloning (skip conditioning voice prompts)",
903
+ info="When enabled, sets is_prefill=False so the model ignores provided speaker audio."
904
+ )
905
+
906
+ # Right column - Generation
907
+ with gr.Column(scale=2, elem_classes="generation-card"):
908
+ gr.Markdown("### πŸ“ **Script Input**")
909
+
910
+ script_input = gr.Textbox(
911
+ label="Conversation Script",
912
+ placeholder="""Enter your podcast script here. You can format it as:
913
+
914
+ Speaker 1: Welcome to our podcast today!
915
+ Speaker 2: Thanks for having me. I'm excited to discuss...
916
+
917
+ Or paste text directly and it will auto-assign speakers.""",
918
+ lines=12,
919
+ max_lines=20,
920
+ elem_classes="script-input"
921
+ )
922
+
923
+ # Button row with Random Example on the left and Generate on the right
924
+ with gr.Row():
925
+ # Random example button (now on the left)
926
+ random_example_btn = gr.Button(
927
+ "🎲 Random Example",
928
+ size="lg",
929
+ variant="secondary",
930
+ elem_classes="random-btn",
931
+ scale=1 # Smaller width
932
+ )
933
+
934
+ # Generate button (now on the right)
935
+ generate_btn = gr.Button(
936
+ "πŸš€ Generate Podcast",
937
+ size="lg",
938
+ variant="primary",
939
+ elem_classes="generate-btn",
940
+ scale=2 # Wider than random button
941
+ )
942
+
943
+ # Stop button
944
+ stop_btn = gr.Button(
945
+ "πŸ›‘ Stop Generation",
946
+ size="lg",
947
+ variant="stop",
948
+ elem_classes="stop-btn",
949
+ visible=False
950
+ )
951
+
952
+ # Streaming status indicator
953
+ streaming_status = gr.HTML(
954
+ value="""
955
+ <div style="background: linear-gradient(135deg, #dcfce7 0%, #bbf7d0 100%);
956
+ border: 1px solid rgba(34, 197, 94, 0.3);
957
+ border-radius: 8px;
958
+ padding: 0.75rem;
959
+ margin: 0.5rem 0;
960
+ text-align: center;
961
+ font-size: 0.9rem;
962
+ color: #166534;">
963
+ <span class="streaming-indicator"></span>
964
+ <strong>LIVE STREAMING</strong> - Audio is being generated in real-time
965
+ </div>
966
+ """,
967
+ visible=False,
968
+ elem_id="streaming-status"
969
+ )
970
+
971
+ # Output section
972
+ gr.Markdown("### 🎡 **Generated Podcast**")
973
+
974
+ # Streaming audio output (outside of tabs for simpler handling)
975
+ audio_output = gr.Audio(
976
+ label="Streaming Audio (Real-time)",
977
+ type="numpy",
978
+ elem_classes="audio-output",
979
+ streaming=True, # Enable streaming mode
980
+ autoplay=True,
981
+ show_download_button=False, # Explicitly show download button
982
+ visible=True
983
+ )
984
+
985
+ # Complete audio output (non-streaming)
986
+ complete_audio_output = gr.Audio(
987
+ label="Complete Podcast (Download after generation)",
988
+ type="numpy",
989
+ elem_classes="audio-output complete-audio-section",
990
+ streaming=False, # Non-streaming mode
991
+ autoplay=False,
992
+ show_download_button=True, # Explicitly show download button
993
+ visible=False # Initially hidden, shown when audio is ready
994
+ )
995
+
996
+ gr.Markdown("""
997
+ *πŸ’‘ **Streaming**: Audio plays as it's being generated (may have slight pauses)
998
+ *πŸ’‘ **Complete Audio**: Will appear below after generation finishes*
999
+ """)
1000
+
1001
+ # Generation log
1002
+ log_output = gr.Textbox(
1003
+ label="Generation Log",
1004
+ lines=8,
1005
+ max_lines=15,
1006
+ interactive=False,
1007
+ elem_classes="log-output"
1008
+ )
1009
+
1010
+ def update_speaker_visibility(num_speakers):
1011
+ updates = []
1012
+ for i in range(4):
1013
+ updates.append(gr.update(visible=(i < num_speakers)))
1014
+ return updates
1015
+
1016
+ num_speakers.change(
1017
+ fn=update_speaker_visibility,
1018
+ inputs=[num_speakers],
1019
+ outputs=speaker_selections
1020
+ )
1021
+
1022
+ # Main generation function with streaming
1023
+ def generate_podcast_wrapper(num_speakers, script, speaker_1, speaker_2, speaker_3, speaker_4, cfg_scale, disable_voice_cloning):
1024
+ """Wrapper function to handle the streaming generation call."""
1025
+ try:
1026
+ speakers = [speaker_1, speaker_2, speaker_3, speaker_4]
1027
+
1028
+ # Clear outputs and reset visibility at start
1029
+ yield None, gr.update(value=None, visible=False), "πŸŽ™οΈ Starting generation...", gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)
1030
+
1031
+ # The generator will yield multiple times
1032
+ final_log = "Starting generation..."
1033
+
1034
+ for streaming_audio, complete_audio, log, streaming_visible in demo_instance.generate_podcast_streaming(
1035
+ num_speakers=int(num_speakers),
1036
+ script=script,
1037
+ speaker_1=speakers[0],
1038
+ speaker_2=speakers[1],
1039
+ speaker_3=speakers[2],
1040
+ speaker_4=speakers[3],
1041
+ cfg_scale=cfg_scale,
1042
+ disable_voice_cloning=disable_voice_cloning
1043
+ ):
1044
+ final_log = log
1045
+
1046
+ # Check if we have complete audio (final yield)
1047
+ if complete_audio is not None:
1048
+ # Final state: clear streaming, show complete audio
1049
+ yield None, gr.update(value=complete_audio, visible=True), log, gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
1050
+ else:
1051
+ # Streaming state: update streaming audio only
1052
+ if streaming_audio is not None:
1053
+ yield streaming_audio, gr.update(visible=False), log, streaming_visible, gr.update(visible=False), gr.update(visible=True)
1054
+ else:
1055
+ # No new audio, just update status
1056
+ yield None, gr.update(visible=False), log, streaming_visible, gr.update(visible=False), gr.update(visible=True)
1057
+
1058
+ except Exception as e:
1059
+ error_msg = f"❌ A critical error occurred in the wrapper: {str(e)}"
1060
+ print(error_msg)
1061
+ import traceback
1062
+ traceback.print_exc()
1063
+ # Reset button states on error
1064
+ yield None, gr.update(value=None, visible=False), error_msg, gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
1065
+
1066
+ def stop_generation_handler():
1067
+ """Handle stopping generation."""
1068
+ demo_instance.stop_audio_generation()
1069
+ # Return values for: log_output, streaming_status, generate_btn, stop_btn
1070
+ return "πŸ›‘ Generation stopped.", gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
1071
+
1072
+ # Add a clear audio function
1073
+ def clear_audio_outputs():
1074
+ """Clear both audio outputs before starting new generation."""
1075
+ return None, gr.update(value=None, visible=False)
1076
+
1077
+ # Connect generation button with streaming outputs
1078
+ generate_btn.click(
1079
+ fn=clear_audio_outputs,
1080
+ inputs=[],
1081
+ outputs=[audio_output, complete_audio_output],
1082
+ queue=False
1083
+ ).then( # Immediate UI update to hide Generate, show Stop (non-queued)
1084
+ fn=lambda: (gr.update(visible=False), gr.update(visible=True)),
1085
+ inputs=[],
1086
+ outputs=[generate_btn, stop_btn],
1087
+ queue=False
1088
+ ).then(
1089
+ fn=generate_podcast_wrapper,
1090
+ inputs=[num_speakers, script_input] + speaker_selections + [cfg_scale, disable_voice_cloning],
1091
+ outputs=[audio_output, complete_audio_output, log_output, streaming_status, generate_btn, stop_btn],
1092
+ queue=True # Enable Gradio's built-in queue
1093
+ )
1094
+
1095
+ # Connect stop button
1096
+ stop_btn.click(
1097
+ fn=stop_generation_handler,
1098
+ inputs=[],
1099
+ outputs=[log_output, streaming_status, generate_btn, stop_btn],
1100
+ queue=False # Don't queue stop requests
1101
+ ).then(
1102
+ # Clear both audio outputs after stopping
1103
+ fn=lambda: (None, None),
1104
+ inputs=[],
1105
+ outputs=[audio_output, complete_audio_output],
1106
+ queue=False
1107
+ )
1108
+
1109
+ # Function to randomly select an example
1110
+ def load_random_example():
1111
+ """Randomly select and load an example script."""
1112
+ import random
1113
+
1114
+ # Get available examples
1115
+ if hasattr(demo_instance, 'example_scripts') and demo_instance.example_scripts:
1116
+ example_scripts = demo_instance.example_scripts
1117
+ else:
1118
+ # Fallback to default
1119
+ example_scripts = [
1120
+ [2, "Speaker 0: Welcome to our AI podcast demonstration!\nSpeaker 1: Thanks for having me. This is exciting!"]
1121
+ ]
1122
+
1123
+ # Randomly select one
1124
+ if example_scripts:
1125
+ selected = random.choice(example_scripts)
1126
+ num_speakers_value = selected[0]
1127
+ script_value = selected[1]
1128
+
1129
+ # Return the values to update the UI
1130
+ return num_speakers_value, script_value
1131
+
1132
+ # Default values if no examples
1133
+ return 2, ""
1134
+
1135
+ # Connect random example button
1136
+ random_example_btn.click(
1137
+ fn=load_random_example,
1138
+ inputs=[],
1139
+ outputs=[num_speakers, script_input],
1140
+ queue=False # Don't queue this simple operation
1141
+ )
1142
+
1143
+ # Add usage tips
1144
+ gr.Markdown("""
1145
+ ### πŸ’‘ **Usage Tips**
1146
+
1147
+ - Click **πŸš€ Generate Podcast** to start audio generation
1148
+ - **Live Streaming** tab shows audio as it's generated (may have slight pauses)
1149
+ - **Complete Audio** tab provides the full, uninterrupted podcast after generation
1150
+ - During generation, you can click **πŸ›‘ Stop Generation** to interrupt the process
1151
+ - The streaming indicator shows real-time generation progress
1152
+ """)
1153
+
1154
+ # Add example scripts
1155
+ gr.Markdown("### πŸ“š **Example Scripts**")
1156
+
1157
+ # Use dynamically loaded examples if available, otherwise provide a default
1158
+ if hasattr(demo_instance, 'example_scripts') and demo_instance.example_scripts:
1159
+ example_scripts = demo_instance.example_scripts
1160
+ else:
1161
+ # Fallback to a simple default example if no scripts loaded
1162
+ example_scripts = [
1163
+ [1, "Speaker 1: Welcome to our AI podcast demonstration! This is a sample script showing how VibeVoice can generate natural-sounding speech."]
1164
+ ]
1165
+
1166
+ gr.Examples(
1167
+ examples=example_scripts,
1168
+ inputs=[num_speakers, script_input],
1169
+ label="Try these example scripts:"
1170
+ )
1171
+
1172
+ # --- Risks & limitations (footer) ---
1173
+ gr.Markdown(
1174
+ """
1175
+ ## Risks and limitations
1176
+
1177
+ While efforts have been made to optimize it through various techniques, it may still produce outputs that are unexpected, biased, or inaccurate. VibeVoice inherits any biases, errors, or omissions produced by its base model (specifically, Qwen2.5 1.5b in this release).
1178
+ Potential for Deepfakes and Disinformation: High-quality synthetic speech can be misused to create convincing fake audio content for impersonation, fraud, or spreading disinformation. Users must ensure transcripts are reliable, check content accuracy, and avoid using generated content in misleading ways. Users are expected to use the generated content and to deploy the models in a lawful manner, in full compliance with all applicable laws and regulations in the relevant jurisdictions. It is best practice to disclose the use of AI when sharing AI-generated content.
1179
+ """,
1180
+ elem_classes="generation-card", # ε―ι€‰οΌšε€η”¨ε‘η‰‡ζ ·εΌ
1181
+ )
1182
+ return interface
1183
+
1184
+
1185
+ def convert_to_16_bit_wav(data):
1186
+ # Check if data is a tensor and move to cpu
1187
+ if torch.is_tensor(data):
1188
+ data = data.detach().cpu().numpy()
1189
+
1190
+ # Ensure data is numpy array
1191
+ data = np.array(data)
1192
+
1193
+ # Normalize to range [-1, 1] if it's not already
1194
+ if np.max(np.abs(data)) > 1.0:
1195
+ data = data / np.max(np.abs(data))
1196
+
1197
+ # Scale to 16-bit integer range
1198
+ data = (data * 32767).astype(np.int16)
1199
+ return data
1200
+
1201
+
1202
+ def parse_args():
1203
+ parser = argparse.ArgumentParser(description="VibeVoice Gradio Demo")
1204
+ parser.add_argument(
1205
+ "--model_path",
1206
+ type=str,
1207
+ default="/tmp/vibevoice-model",
1208
+ help="Path to the VibeVoice model directory",
1209
+ )
1210
+ parser.add_argument(
1211
+ "--device",
1212
+ type=str,
1213
+ default=("cuda" if torch.cuda.is_available() else ("mps" if torch.backends.mps.is_available() else "cpu")),
1214
+ help="Device for inference: cuda | mps | cpu",
1215
+ )
1216
+ parser.add_argument(
1217
+ "--inference_steps",
1218
+ type=int,
1219
+ default=10,
1220
+ help="Number of inference steps for DDPM (not exposed to users)",
1221
+ )
1222
+ parser.add_argument(
1223
+ "--share",
1224
+ action="store_true",
1225
+ help="Share the demo publicly via Gradio",
1226
+ )
1227
+ parser.add_argument(
1228
+ "--port",
1229
+ type=int,
1230
+ default=7860,
1231
+ help="Port to run the demo on",
1232
+ )
1233
+ parser.add_argument(
1234
+ "--checkpoint_path",
1235
+ type=str,
1236
+ default=None,
1237
+ help="Path to a fine-tuned checkpoint directory containing LoRA adapters (optional)",
1238
+ )
1239
+
1240
+ return parser.parse_args()
1241
+
1242
+
1243
+ def main():
1244
+ """Main function to run the demo."""
1245
+ args = parse_args()
1246
+
1247
+ set_seed(42) # Set a fixed seed for reproducibility
1248
+
1249
+ print("πŸŽ™οΈ Initializing VibeVoice Demo with Streaming Support...")
1250
+
1251
+ # Initialize demo instance
1252
+ demo_instance = VibeVoiceDemo(
1253
+ model_path=args.model_path,
1254
+ device=args.device,
1255
+ inference_steps=args.inference_steps,
1256
+ adapter_path=args.checkpoint_path,
1257
+ )
1258
+
1259
+ # Create interface
1260
+ interface = create_demo_interface(demo_instance)
1261
+
1262
+ print(f"πŸš€ Launching demo on port {args.port}")
1263
+ print(f"πŸ“ Model path: {args.model_path}")
1264
+ print(f"🎭 Available voices: {len(demo_instance.available_voices)}")
1265
+ print(f"πŸ”΄ Streaming mode: ENABLED")
1266
+ print(f"πŸ”’ Session isolation: ENABLED")
1267
+
1268
+ # Launch the interface
1269
+ try:
1270
+ interface.queue(
1271
+ max_size=20, # Maximum queue size
1272
+ default_concurrency_limit=1 # Process one request at a time
1273
+ ).launch(
1274
+ share=args.share,
1275
+ # server_port=args.port,
1276
+ server_name="0.0.0.0" if args.share else "127.0.0.1",
1277
+ show_error=True,
1278
+ show_api=False # Hide API docs for cleaner interface
1279
+ )
1280
+ except KeyboardInterrupt:
1281
+ print("\nπŸ›‘ Shutting down gracefully...")
1282
+ except Exception as e:
1283
+ print(f"❌ Server error: {e}")
1284
+ raise
1285
+
1286
+
1287
+ if __name__ == "__main__":
1288
+ main()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ git+https://github.com/vibevoice-community/VibeVoice
2
+ soundfile