Spaces:
Runtime error
Runtime error
Commit
·
f0b144f
1
Parent(s):
f24e967
Update app.py
Browse files
app.py
CHANGED
|
@@ -164,7 +164,7 @@ def get_bev(seed):
|
|
| 164 |
semantic = Image.open(semantic_path)
|
| 165 |
return semantic, heightmap
|
| 166 |
|
| 167 |
-
def get_video(seed, num_frames):
|
| 168 |
device = torch.device('cuda')
|
| 169 |
rng_cuda = torch.Generator(device=device)
|
| 170 |
rng_cuda = rng_cuda.manual_seed(seed)
|
|
@@ -172,6 +172,8 @@ def get_video(seed, num_frames):
|
|
| 172 |
torch.cuda.manual_seed(seed)
|
| 173 |
net_G.voxel.next_world(device, world_dir, checkpoint)
|
| 174 |
cam_mode = cfg.inference_args.camera_mode
|
|
|
|
|
|
|
| 175 |
current_outdir = os.path.join(world_dir, 'camera_{:02d}'.format(cam_mode))
|
| 176 |
os.makedirs(current_outdir, exist_ok=True)
|
| 177 |
z = torch.empty(1, net_G.style_dims, dtype=torch.float32, device=device)
|
|
@@ -188,7 +190,11 @@ markdown=f'''
|
|
| 188 |
- [Project Page](https://scene-dreamer.github.io/)
|
| 189 |
- [arXiv Link](https://arxiv.org/abs/2302.01330)
|
| 190 |
Licensed under the S-Lab License.
|
| 191 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 192 |
'''
|
| 193 |
|
| 194 |
with gr.Blocks() as demo:
|
|
@@ -198,23 +204,25 @@ with gr.Blocks() as demo:
|
|
| 198 |
with gr.Column():
|
| 199 |
with gr.Row():
|
| 200 |
with gr.Column():
|
| 201 |
-
semantic = gr.Image(type="pil", shape=(2048, 2048))
|
| 202 |
with gr.Column():
|
| 203 |
-
height = gr.Image(type="pil", shape=(2048, 2048))
|
| 204 |
with gr.Row():
|
| 205 |
# with gr.Column():
|
| 206 |
# image = gr.Image(type='pil', shape(540, 960))
|
| 207 |
with gr.Column():
|
| 208 |
-
video=gr.Video()
|
| 209 |
with gr.Row():
|
| 210 |
-
|
| 211 |
-
|
|
|
|
|
|
|
| 212 |
|
| 213 |
with gr.Row():
|
| 214 |
btn = gr.Button(value="Generate BEV")
|
| 215 |
btn_2=gr.Button(value="Render")
|
| 216 |
|
| 217 |
btn.click(get_bev,[user_seed],[semantic, height])
|
| 218 |
-
btn_2.click(get_video,[user_seed, num_frames],[video])
|
| 219 |
|
| 220 |
demo.launch(debug=True)
|
|
|
|
| 164 |
semantic = Image.open(semantic_path)
|
| 165 |
return semantic, heightmap
|
| 166 |
|
| 167 |
+
def get_video(seed, num_frames, reso_h, reso_w):
|
| 168 |
device = torch.device('cuda')
|
| 169 |
rng_cuda = torch.Generator(device=device)
|
| 170 |
rng_cuda = rng_cuda.manual_seed(seed)
|
|
|
|
| 172 |
torch.cuda.manual_seed(seed)
|
| 173 |
net_G.voxel.next_world(device, world_dir, checkpoint)
|
| 174 |
cam_mode = cfg.inference_args.camera_mode
|
| 175 |
+
cfg.inference_args.cam_maxstep = num_frames
|
| 176 |
+
cfg.inference_args.resolution_hw = [reso_h, reso_w]
|
| 177 |
current_outdir = os.path.join(world_dir, 'camera_{:02d}'.format(cam_mode))
|
| 178 |
os.makedirs(current_outdir, exist_ok=True)
|
| 179 |
z = torch.empty(1, net_G.style_dims, dtype=torch.float32, device=device)
|
|
|
|
| 190 |
- [Project Page](https://scene-dreamer.github.io/)
|
| 191 |
- [arXiv Link](https://arxiv.org/abs/2302.01330)
|
| 192 |
Licensed under the S-Lab License.
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
We offer a sampled scene whose BEVs are shown on the right. You can also use the button "Generate BEV" to randomly sample a new 3D world represented by a height map and a semantic map. But it requires a long time.
|
| 196 |
+
|
| 197 |
+
To render video, push the button "Render" to generate a camera trajectory flying through the world. You can specify rendering options as shown below!
|
| 198 |
'''
|
| 199 |
|
| 200 |
with gr.Blocks() as demo:
|
|
|
|
| 204 |
with gr.Column():
|
| 205 |
with gr.Row():
|
| 206 |
with gr.Column():
|
| 207 |
+
semantic = gr.Image(value='./test/colormap.png',type="pil", shape=(2048, 2048))
|
| 208 |
with gr.Column():
|
| 209 |
+
height = gr.Image(value='./test/heightmap.png', type="pil", shape=(2048, 2048))
|
| 210 |
with gr.Row():
|
| 211 |
# with gr.Column():
|
| 212 |
# image = gr.Image(type='pil', shape(540, 960))
|
| 213 |
with gr.Column():
|
| 214 |
+
video = gr.Video()
|
| 215 |
with gr.Row():
|
| 216 |
+
num_frames = gr.Slider(minimum=10, maximum=200, value=20, step=1, label='Number of rendered frames')
|
| 217 |
+
user_seed = gr.Slider(minimum=0, maximum=999999, value=8888, step=1, label='Random seed')
|
| 218 |
+
resolution_h = gr.Slider(minimum=256, maximum=2160, value=270, step=1, label='Height of rendered image')
|
| 219 |
+
resolution_w = gr.Slider(minimum=256, maximum=3840, value=480, step=1, label='Width of rendered image')
|
| 220 |
|
| 221 |
with gr.Row():
|
| 222 |
btn = gr.Button(value="Generate BEV")
|
| 223 |
btn_2=gr.Button(value="Render")
|
| 224 |
|
| 225 |
btn.click(get_bev,[user_seed],[semantic, height])
|
| 226 |
+
btn_2.click(get_video,[user_seed, num_frames, resolution_h, resolution_w], [video])
|
| 227 |
|
| 228 |
demo.launch(debug=True)
|