BrianatCambridge commited on
Commit
6d84d96
·
verified ·
1 Parent(s): 63751d0

Upload modeling_qwen3_vl.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modeling_qwen3_vl.py +2021 -0
modeling_qwen3_vl.py ADDED
@@ -0,0 +1,2021 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/qwen3_vl/modular_qwen3_vl.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_qwen3_vl.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+ from dataclasses import dataclass
23
+ from typing import Any, Callable, Optional, Union, Dict
24
+ import gc
25
+ import math
26
+
27
+ import torch
28
+ import torch.nn as nn
29
+ import torch.nn.functional as F
30
+ import torch.distributed as dist
31
+
32
+ from transformers.activations import ACT2FN
33
+ from transformers.cache_utils import Cache, DynamicCache
34
+ from transformers.generation import GenerationMixin
35
+ from transformers.integrations import use_kernel_forward_from_hub
36
+ from transformers.masking_utils import create_causal_mask
37
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
38
+ from transformers.modeling_layers import GradientCheckpointingLayer
39
+ from transformers.modeling_outputs import BaseModelOutputWithPast, ModelOutput
40
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
41
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
42
+ from transformers.processing_utils import Unpack
43
+ from transformers.utils import TransformersKwargs, auto_docstring, is_torchdynamo_compiling
44
+ from transformers.utils.deprecation import deprecate_kwarg
45
+ from transformers.utils.generic import check_model_inputs
46
+ from qwenvl.model.configuration_qwen3_vl import Qwen3VLConfig, Qwen3VLTextConfig, Qwen3VLVisionConfig
47
+ from liger_kernel.transformers.model.loss_utils import LigerForCausalLMLoss
48
+ from qwenvl.model.modeling_whisper import WhisperEncoder
49
+ from qwenvl.model.ttt.ttt_layer import TTTWrapper, SSMGating
50
+ from qwenvl.model.ttt.configs import ModelConfig as TTTModelConfig
51
+
52
+ class Qwen3VLVisionMLP(nn.Module):
53
+ def __init__(self, config):
54
+ super().__init__()
55
+ self.hidden_size = config.hidden_size
56
+ self.intermediate_size = config.intermediate_size
57
+ self.linear_fc1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=True)
58
+ self.linear_fc2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=True)
59
+ self.act_fn = ACT2FN[config.hidden_act]
60
+
61
+ def forward(self, hidden_state):
62
+ return self.linear_fc2(self.act_fn(self.linear_fc1(hidden_state)))
63
+
64
+
65
+ class Qwen3VLVisionPatchEmbed(nn.Module):
66
+ def __init__(self, config) -> None:
67
+ super().__init__()
68
+ self.patch_size = config.patch_size
69
+ self.temporal_patch_size = config.temporal_patch_size
70
+ self.in_channels = config.in_channels
71
+ self.embed_dim = config.hidden_size
72
+
73
+ kernel_size = [self.temporal_patch_size, self.patch_size, self.patch_size]
74
+ self.proj = nn.Conv3d(self.in_channels, self.embed_dim, kernel_size=kernel_size, stride=kernel_size, bias=True)
75
+
76
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
77
+ target_dtype = self.proj.weight.dtype
78
+ hidden_states = hidden_states.view(
79
+ -1, self.in_channels, self.temporal_patch_size, self.patch_size, self.patch_size
80
+ )
81
+ hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(-1, self.embed_dim)
82
+ return hidden_states
83
+
84
+
85
+ class Qwen3VLVisionRotaryEmbedding(nn.Module):
86
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
87
+
88
+ def __init__(self, dim: int, theta: float = 10000.0) -> None:
89
+ super().__init__()
90
+ inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
91
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
92
+
93
+ def forward(self, seqlen: int) -> torch.Tensor:
94
+ seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
95
+ freqs = torch.outer(seq, self.inv_freq)
96
+ return freqs
97
+
98
+
99
+ class Qwen3VLVisionPatchMerger(nn.Module):
100
+ def __init__(self, config: Qwen3VLVisionConfig, use_postshuffle_norm=False) -> None:
101
+ super().__init__()
102
+ self.hidden_size = config.hidden_size * (config.spatial_merge_size**2)
103
+ self.use_postshuffle_norm = use_postshuffle_norm
104
+ self.norm = nn.LayerNorm(self.hidden_size if use_postshuffle_norm else config.hidden_size, eps=1e-6)
105
+ self.linear_fc1 = nn.Linear(self.hidden_size, self.hidden_size)
106
+ self.act_fn = nn.GELU()
107
+ self.linear_fc2 = nn.Linear(self.hidden_size, config.out_hidden_size)
108
+
109
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
110
+ x = self.norm(x.view(-1, self.hidden_size) if self.use_postshuffle_norm else x).view(-1, self.hidden_size)
111
+ x = self.linear_fc2(self.act_fn(self.linear_fc1(x)))
112
+ return x
113
+
114
+
115
+ def rotate_half(x):
116
+ """Rotates half the hidden dims of the input."""
117
+ x1 = x[..., : x.shape[-1] // 2]
118
+ x2 = x[..., x.shape[-1] // 2 :]
119
+ return torch.cat((-x2, x1), dim=-1)
120
+
121
+
122
+ def apply_rotary_pos_emb_vision(
123
+ q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
124
+ ) -> tuple[torch.Tensor, torch.Tensor]:
125
+ orig_q_dtype = q.dtype
126
+ orig_k_dtype = k.dtype
127
+ q, k = q.float(), k.float()
128
+ cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float()
129
+ q_embed = (q * cos) + (rotate_half(q) * sin)
130
+ k_embed = (k * cos) + (rotate_half(k) * sin)
131
+ q_embed = q_embed.to(orig_q_dtype)
132
+ k_embed = k_embed.to(orig_k_dtype)
133
+ return q_embed, k_embed
134
+
135
+
136
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
137
+ """
138
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
139
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
140
+ """
141
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
142
+ if n_rep == 1:
143
+ return hidden_states
144
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
145
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
146
+
147
+
148
+ def eager_attention_forward(
149
+ module: nn.Module,
150
+ query: torch.Tensor,
151
+ key: torch.Tensor,
152
+ value: torch.Tensor,
153
+ attention_mask: Optional[torch.Tensor],
154
+ scaling: float,
155
+ dropout: float = 0.0,
156
+ **kwargs: Unpack[TransformersKwargs],
157
+ ):
158
+ key_states = repeat_kv(key, module.num_key_value_groups)
159
+ value_states = repeat_kv(value, module.num_key_value_groups)
160
+
161
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
162
+ if attention_mask is not None:
163
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
164
+ attn_weights = attn_weights + causal_mask
165
+
166
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
167
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
168
+ attn_output = torch.matmul(attn_weights, value_states)
169
+ attn_output = attn_output.transpose(1, 2).contiguous()
170
+
171
+ return attn_output, attn_weights
172
+
173
+
174
+ class Qwen3VLVisionAttention(nn.Module):
175
+ def __init__(self, config: Qwen3VLVisionConfig) -> None:
176
+ super().__init__()
177
+ self.dim = config.hidden_size
178
+ self.num_heads = config.num_heads
179
+ self.head_dim = self.dim // self.num_heads
180
+ self.num_key_value_groups = 1 # needed for eager attention
181
+ self.qkv = nn.Linear(self.dim, self.dim * 3, bias=True)
182
+ self.proj = nn.Linear(self.dim, self.dim)
183
+ self.scaling = self.head_dim**-0.5
184
+ self.config = config
185
+ self.attention_dropout = 0.0
186
+ self.is_causal = False
187
+
188
+ def forward(
189
+ self,
190
+ hidden_states: torch.Tensor,
191
+ cu_seqlens: torch.Tensor,
192
+ rotary_pos_emb: Optional[torch.Tensor] = None,
193
+ position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
194
+ **kwargs,
195
+ ) -> torch.Tensor:
196
+ seq_length = hidden_states.shape[0]
197
+ query_states, key_states, value_states = (
198
+ self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0)
199
+ )
200
+ cos, sin = position_embeddings
201
+ query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin)
202
+
203
+ query_states = query_states.transpose(0, 1).unsqueeze(0)
204
+ key_states = key_states.transpose(0, 1).unsqueeze(0)
205
+ value_states = value_states.transpose(0, 1).unsqueeze(0)
206
+
207
+ attention_interface: Callable = eager_attention_forward
208
+ if self.config._attn_implementation != "eager":
209
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
210
+
211
+ if self.config._attn_implementation == "flash_attention_2":
212
+ # Flash Attention 2: Use cu_seqlens for variable length attention
213
+ max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
214
+ attn_output, _ = attention_interface(
215
+ self,
216
+ query_states,
217
+ key_states,
218
+ value_states,
219
+ attention_mask=None,
220
+ scaling=self.scaling,
221
+ dropout=0.0 if not self.training else self.attention_dropout,
222
+ cu_seq_lens_q=cu_seqlens,
223
+ cu_seq_lens_k=cu_seqlens,
224
+ max_length_q=max_seqlen,
225
+ max_length_k=max_seqlen,
226
+ is_causal=False,
227
+ **kwargs,
228
+ )
229
+ else:
230
+ # Other implementations: Process each chunk separately
231
+ lengths = cu_seqlens[1:] - cu_seqlens[:-1]
232
+ splits = [
233
+ torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states)
234
+ ]
235
+
236
+ attn_outputs = [
237
+ attention_interface(
238
+ self,
239
+ q,
240
+ k,
241
+ v,
242
+ attention_mask=None,
243
+ scaling=self.scaling,
244
+ dropout=0.0 if not self.training else self.attention_dropout,
245
+ is_causal=False,
246
+ **kwargs,
247
+ )[0]
248
+ for q, k, v in zip(*splits)
249
+ ]
250
+ attn_output = torch.cat(attn_outputs, dim=1)
251
+
252
+ attn_output = attn_output.reshape(seq_length, -1).contiguous()
253
+ attn_output = self.proj(attn_output)
254
+ return attn_output
255
+
256
+
257
+ class Qwen3VLVisionBlock(GradientCheckpointingLayer):
258
+ def __init__(self, config, attn_implementation: str = "sdpa") -> None:
259
+ super().__init__()
260
+ self.norm1 = nn.LayerNorm(config.hidden_size, eps=1e-6)
261
+ self.norm2 = nn.LayerNorm(config.hidden_size, eps=1e-6)
262
+ self.attn = Qwen3VLVisionAttention(config=config)
263
+ self.mlp = Qwen3VLVisionMLP(config=config)
264
+
265
+ def forward(
266
+ self,
267
+ hidden_states: torch.Tensor,
268
+ cu_seqlens: torch.Tensor,
269
+ rotary_pos_emb: Optional[torch.Tensor] = None,
270
+ position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
271
+ **kwargs,
272
+ ) -> torch.Tensor:
273
+ hidden_states = hidden_states + self.attn(
274
+ self.norm1(hidden_states),
275
+ cu_seqlens=cu_seqlens,
276
+ rotary_pos_emb=rotary_pos_emb,
277
+ position_embeddings=position_embeddings,
278
+ **kwargs,
279
+ )
280
+ hidden_states = hidden_states + self.mlp(self.norm2(hidden_states))
281
+ return hidden_states
282
+
283
+
284
+ class Qwen3VLTextRotaryEmbedding(nn.Module):
285
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
286
+
287
+ def __init__(self, config: Qwen3VLTextConfig, device=None):
288
+ super().__init__()
289
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
290
+ self.rope_type = config.rope_scaling.get("rope_type", "default")
291
+ else:
292
+ self.rope_type = "default"
293
+ self.max_seq_len_cached = config.max_position_embeddings
294
+ self.original_max_seq_len = config.max_position_embeddings
295
+
296
+ self.config = config
297
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
298
+
299
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
300
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
301
+ self.original_inv_freq = self.inv_freq
302
+
303
+ self.mrope_section = config.rope_scaling.get("mrope_section", [24, 20, 20])
304
+
305
+ def apply_interleaved_mrope(self, freqs, mrope_section):
306
+ """Apply interleaved MRoPE to 3D rotary embeddings.
307
+ Reorganizes frequency layout from chunked [TTT...HHH...WWW] to
308
+ interleaved [THTHWHTHW...TT], preserving frequency continuity.
309
+ args:
310
+ x: (3, bs, seq_len, head_dim // 2)
311
+ mrope_section: (3,)
312
+ returns:
313
+ x_t: (bs, seq_len, head_dim // 2)
314
+ """
315
+ freqs_t = freqs[0] # just overwrite the first dimension T
316
+ for dim, offset in enumerate((1, 2), start=1): # H, W
317
+ length = mrope_section[dim] * 3
318
+ idx = slice(offset, length, 3)
319
+ freqs_t[..., idx] = freqs[dim, ..., idx]
320
+ return freqs_t
321
+
322
+ @torch.no_grad()
323
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
324
+ def forward(self, x, position_ids):
325
+ # In contrast to other models, Qwen3VL has different position ids for the grids
326
+ # So we expand the inv_freq to shape (3, ...)
327
+ if position_ids.ndim == 2:
328
+ position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
329
+ inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1)
330
+ position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions)
331
+
332
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
333
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
334
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3)
335
+ freqs = self.apply_interleaved_mrope(freqs, self.mrope_section)
336
+ emb = torch.cat((freqs, freqs), dim=-1)
337
+ cos = emb.cos() * self.attention_scaling
338
+ sin = emb.sin() * self.attention_scaling
339
+
340
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
341
+
342
+
343
+ @use_kernel_forward_from_hub("RMSNorm")
344
+ class Qwen3VLTextRMSNorm(nn.Module):
345
+ def __init__(self, hidden_size, eps: float = 1e-6) -> None:
346
+ """
347
+ Qwen3VLTextRMSNorm is equivalent to T5LayerNorm
348
+ """
349
+ super().__init__()
350
+ self.weight = nn.Parameter(torch.ones(hidden_size))
351
+ self.variance_epsilon = eps
352
+
353
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
354
+ input_dtype = hidden_states.dtype
355
+ hidden_states = hidden_states.to(torch.float32)
356
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
357
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
358
+ return self.weight * hidden_states.to(input_dtype)
359
+
360
+ def extra_repr(self):
361
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
362
+
363
+
364
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
365
+ """Applies Rotary Position Embedding to the query and key tensors.
366
+
367
+ Args:
368
+ q (`torch.Tensor`): The query tensor.
369
+ k (`torch.Tensor`): The key tensor.
370
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
371
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
372
+ position_ids (`torch.Tensor`, *optional*):
373
+ Deprecated and unused.
374
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
375
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
376
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
377
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
378
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
379
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
380
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
381
+ Returns:
382
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
383
+ """
384
+ cos = cos.unsqueeze(unsqueeze_dim)
385
+ sin = sin.unsqueeze(unsqueeze_dim)
386
+ q_embed = (q * cos) + (rotate_half(q) * sin)
387
+ k_embed = (k * cos) + (rotate_half(k) * sin)
388
+ return q_embed, k_embed
389
+
390
+
391
+ class Qwen3VLTextAttention(nn.Module):
392
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
393
+
394
+ def __init__(self, config: Qwen3VLTextConfig, layer_idx: int):
395
+ super().__init__()
396
+ self.config = config
397
+ self.layer_idx = layer_idx
398
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
399
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
400
+ self.scaling = self.head_dim**-0.5
401
+ self.attention_dropout = config.attention_dropout
402
+ self.is_causal = True
403
+
404
+ self.q_proj = nn.Linear(
405
+ config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
406
+ )
407
+ self.k_proj = nn.Linear(
408
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
409
+ )
410
+ self.v_proj = nn.Linear(
411
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
412
+ )
413
+ self.o_proj = nn.Linear(
414
+ config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
415
+ )
416
+ self.q_norm = Qwen3VLTextRMSNorm(self.head_dim, eps=config.rms_norm_eps) # unlike olmo, only on the head dim!
417
+ self.k_norm = Qwen3VLTextRMSNorm(
418
+ self.head_dim, eps=config.rms_norm_eps
419
+ ) # thus post q_norm does not need reshape
420
+
421
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
422
+ def forward(
423
+ self,
424
+ hidden_states: torch.Tensor,
425
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
426
+ attention_mask: Optional[torch.Tensor],
427
+ past_key_values: Optional[Cache] = None,
428
+ cache_position: Optional[torch.LongTensor] = None,
429
+ **kwargs: Unpack[FlashAttentionKwargs],
430
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
431
+ input_shape = hidden_states.shape[:-1]
432
+ hidden_shape = (*input_shape, -1, self.head_dim)
433
+
434
+ query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
435
+ key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
436
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
437
+
438
+ cos, sin = position_embeddings
439
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
440
+
441
+ if past_key_values is not None:
442
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
443
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
444
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
445
+
446
+ attention_interface: Callable = eager_attention_forward
447
+ if self.config._attn_implementation != "eager":
448
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
449
+
450
+ attn_output, attn_weights = attention_interface(
451
+ self,
452
+ query_states,
453
+ key_states,
454
+ value_states,
455
+ attention_mask,
456
+ dropout=0.0 if not self.training else self.attention_dropout,
457
+ scaling=self.scaling,
458
+ **kwargs,
459
+ )
460
+
461
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
462
+ attn_output = self.o_proj(attn_output)
463
+ return attn_output, attn_weights
464
+
465
+
466
+ class Qwen3VLTextMLP(nn.Module):
467
+ def __init__(self, config):
468
+ super().__init__()
469
+ self.config = config
470
+ self.hidden_size = config.hidden_size
471
+ self.intermediate_size = config.intermediate_size
472
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
473
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
474
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
475
+ self.act_fn = ACT2FN[config.hidden_act]
476
+
477
+ def forward(self, x):
478
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
479
+ return down_proj
480
+
481
+
482
+ class Qwen3VLTextDecoderLayer(GradientCheckpointingLayer):
483
+ def __init__(self, config: Qwen3VLTextConfig, layer_idx: int):
484
+ super().__init__()
485
+ self.hidden_size = config.hidden_size
486
+
487
+ self.self_attn = Qwen3VLTextAttention(config=config, layer_idx=layer_idx)
488
+
489
+ self.mlp = Qwen3VLTextMLP(config)
490
+ self.input_layernorm = Qwen3VLTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
491
+ self.post_attention_layernorm = Qwen3VLTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
492
+
493
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
494
+ def forward(
495
+ self,
496
+ hidden_states: torch.Tensor,
497
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
498
+ attention_mask: Optional[torch.Tensor] = None,
499
+ position_ids: Optional[torch.LongTensor] = None,
500
+ past_key_values: Optional[Cache] = None,
501
+ use_cache: Optional[bool] = False,
502
+ cache_position: Optional[torch.LongTensor] = None,
503
+ **kwargs: Unpack[TransformersKwargs],
504
+ ) -> torch.Tensor:
505
+ residual = hidden_states
506
+ hidden_states = self.input_layernorm(hidden_states)
507
+ # Self Attention
508
+ hidden_states, _ = self.self_attn(
509
+ hidden_states=hidden_states,
510
+ attention_mask=attention_mask,
511
+ position_ids=position_ids,
512
+ past_key_values=past_key_values,
513
+ use_cache=use_cache,
514
+ cache_position=cache_position,
515
+ position_embeddings=position_embeddings,
516
+ **kwargs,
517
+ )
518
+ hidden_states = residual + hidden_states
519
+
520
+ # Fully Connected
521
+ residual = hidden_states
522
+ hidden_states = self.post_attention_layernorm(hidden_states)
523
+ hidden_states = self.mlp(hidden_states)
524
+ hidden_states = residual + hidden_states
525
+ return hidden_states
526
+
527
+
528
+ @dataclass
529
+ @auto_docstring(
530
+ custom_intro="""
531
+ Base class for Llava outputs, with hidden states and attentions.
532
+ """
533
+ )
534
+ class Qwen3VLModelOutputWithPast(ModelOutput):
535
+ r"""
536
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
537
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
538
+
539
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
540
+ `past_key_values` input) to speed up sequential decoding.
541
+ rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
542
+ The rope index difference between sequence length and multimodal rope.
543
+ """
544
+
545
+ last_hidden_state: Optional[torch.FloatTensor] = None
546
+ past_key_values: Optional[Cache] = None
547
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
548
+ attentions: Optional[tuple[torch.FloatTensor]] = None
549
+ rope_deltas: Optional[torch.LongTensor] = None
550
+ labels: Optional[torch.LongTensor] = None
551
+ memory_triplets: Optional[dict] = None
552
+
553
+
554
+ @auto_docstring
555
+ class Qwen3VLPreTrainedModel(PreTrainedModel):
556
+ config: Qwen3VLConfig
557
+ base_model_prefix = "model"
558
+ supports_gradient_checkpointing = True
559
+ _no_split_modules = ["Qwen3VLTextDecoderLayer", "Qwen3VLVisionBlock"]
560
+ _skip_keys_device_placement = "past_key_values"
561
+ _supports_flash_attn = True
562
+ _supports_sdpa = True
563
+
564
+ _can_compile_fullgraph = True
565
+ _supports_attention_backend = True
566
+ _can_record_outputs = {
567
+ "hidden_states": Qwen3VLTextDecoderLayer,
568
+ "attentions": Qwen3VLTextAttention,
569
+ }
570
+
571
+
572
+ class Qwen3VLVisionModel(Qwen3VLPreTrainedModel):
573
+ config: Qwen3VLVisionConfig
574
+ _no_split_modules = ["Qwen3VLVisionBlock"]
575
+
576
+ def __init__(self, config, *inputs, **kwargs) -> None:
577
+ super().__init__(config, *inputs, **kwargs)
578
+ self.spatial_merge_size = config.spatial_merge_size
579
+ self.patch_size = config.patch_size
580
+ self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size
581
+
582
+ self.patch_embed = Qwen3VLVisionPatchEmbed(
583
+ config=config,
584
+ )
585
+
586
+ self.pos_embed = nn.Embedding(config.num_position_embeddings, config.hidden_size)
587
+ self.num_grid_per_side = int(config.num_position_embeddings**0.5)
588
+
589
+ head_dim = config.hidden_size // config.num_heads
590
+ self.rotary_pos_emb = Qwen3VLVisionRotaryEmbedding(head_dim // 2)
591
+
592
+ self.blocks = nn.ModuleList([Qwen3VLVisionBlock(config) for _ in range(config.depth)])
593
+ self.merger = Qwen3VLVisionPatchMerger(
594
+ config=config,
595
+ use_postshuffle_norm=False,
596
+ )
597
+
598
+ self.deepstack_visual_indexes = config.deepstack_visual_indexes
599
+ self.deepstack_merger_list = nn.ModuleList(
600
+ [
601
+ Qwen3VLVisionPatchMerger(
602
+ config=config,
603
+ use_postshuffle_norm=True,
604
+ )
605
+ for _ in range(len(config.deepstack_visual_indexes))
606
+ ]
607
+ )
608
+
609
+ self.gradient_checkpointing = False
610
+
611
+ def rot_pos_emb(self, grid_thw: torch.Tensor) -> torch.Tensor:
612
+ merge_size = self.spatial_merge_size
613
+
614
+ max_hw = int(grid_thw[:, 1:].max().item())
615
+ freq_table = self.rotary_pos_emb(max_hw) # (max_hw, dim // 2)
616
+ device = freq_table.device
617
+
618
+ total_tokens = int(torch.prod(grid_thw, dim=1).sum().item())
619
+ pos_ids = torch.empty((total_tokens, 2), dtype=torch.long, device=device)
620
+
621
+ offset = 0
622
+ for num_frames, height, width in grid_thw:
623
+ merged_h, merged_w = height // merge_size, width // merge_size
624
+
625
+ block_rows = torch.arange(merged_h, device=device) # block row indices
626
+ block_cols = torch.arange(merged_w, device=device) # block col indices
627
+ intra_row = torch.arange(merge_size, device=device) # intra-block row offsets
628
+ intra_col = torch.arange(merge_size, device=device) # intra-block col offsets
629
+
630
+ # Compute full-resolution positions
631
+ row_idx = block_rows[:, None, None, None] * merge_size + intra_row[None, None, :, None]
632
+ col_idx = block_cols[None, :, None, None] * merge_size + intra_col[None, None, None, :]
633
+
634
+ row_idx = row_idx.expand(merged_h, merged_w, merge_size, merge_size).reshape(-1)
635
+ col_idx = col_idx.expand(merged_h, merged_w, merge_size, merge_size).reshape(-1)
636
+
637
+ coords = torch.stack((row_idx, col_idx), dim=-1)
638
+
639
+ if num_frames > 1:
640
+ coords = coords.repeat(num_frames, 1)
641
+
642
+ num_tokens = coords.shape[0]
643
+ pos_ids[offset : offset + num_tokens] = coords
644
+ offset += num_tokens
645
+
646
+ embeddings = freq_table[pos_ids] # lookup rotary embeddings
647
+ embeddings = embeddings.flatten(1)
648
+ return embeddings
649
+
650
+ def fast_pos_embed_interpolate(self, grid_thw):
651
+ grid_ts, grid_hs, grid_ws = grid_thw[:, 0], grid_thw[:, 1], grid_thw[:, 2]
652
+
653
+ idx_list = [[] for _ in range(4)]
654
+ weight_list = [[] for _ in range(4)]
655
+
656
+ for t, h, w in zip(grid_ts, grid_hs, grid_ws):
657
+ h_idxs = torch.linspace(0, self.num_grid_per_side - 1, h)
658
+ w_idxs = torch.linspace(0, self.num_grid_per_side - 1, w)
659
+
660
+ h_idxs_floor = h_idxs.int()
661
+ w_idxs_floor = w_idxs.int()
662
+ h_idxs_ceil = (h_idxs.int() + 1).clip(max=self.num_grid_per_side - 1)
663
+ w_idxs_ceil = (w_idxs.int() + 1).clip(max=self.num_grid_per_side - 1)
664
+
665
+ dh = h_idxs - h_idxs_floor
666
+ dw = w_idxs - w_idxs_floor
667
+
668
+ base_h = h_idxs_floor * self.num_grid_per_side
669
+ base_h_ceil = h_idxs_ceil * self.num_grid_per_side
670
+
671
+ indices = [
672
+ (base_h[None].T + w_idxs_floor[None]).flatten(),
673
+ (base_h[None].T + w_idxs_ceil[None]).flatten(),
674
+ (base_h_ceil[None].T + w_idxs_floor[None]).flatten(),
675
+ (base_h_ceil[None].T + w_idxs_ceil[None]).flatten(),
676
+ ]
677
+
678
+ weights = [
679
+ ((1 - dh)[None].T * (1 - dw)[None]).flatten(),
680
+ ((1 - dh)[None].T * dw[None]).flatten(),
681
+ (dh[None].T * (1 - dw)[None]).flatten(),
682
+ (dh[None].T * dw[None]).flatten(),
683
+ ]
684
+
685
+ for i in range(4):
686
+ idx_list[i].extend(indices[i].tolist())
687
+ weight_list[i].extend(weights[i].tolist())
688
+
689
+ idx_tensor = torch.tensor(idx_list, dtype=torch.long, device=self.pos_embed.weight.device)
690
+ weight_tensor = torch.tensor(
691
+ weight_list, dtype=self.pos_embed.weight.dtype, device=self.pos_embed.weight.device
692
+ )
693
+ pos_embeds = self.pos_embed(idx_tensor) * weight_tensor[:, :, None]
694
+ patch_pos_embeds = pos_embeds[0] + pos_embeds[1] + pos_embeds[2] + pos_embeds[3]
695
+
696
+ patch_pos_embeds = patch_pos_embeds.split([h * w for h, w in zip(grid_hs, grid_ws)])
697
+
698
+ patch_pos_embeds_permute = []
699
+ merge_size = self.config.spatial_merge_size
700
+ for pos_embed, t, h, w in zip(patch_pos_embeds, grid_ts, grid_hs, grid_ws):
701
+ pos_embed = pos_embed.repeat(t, 1)
702
+ pos_embed = (
703
+ pos_embed.view(t, h // merge_size, merge_size, w // merge_size, merge_size, -1)
704
+ .permute(0, 1, 3, 2, 4, 5)
705
+ .flatten(0, 4)
706
+ )
707
+ patch_pos_embeds_permute.append(pos_embed)
708
+ patch_pos_embeds = torch.cat(patch_pos_embeds_permute)
709
+ return patch_pos_embeds
710
+
711
+ def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor:
712
+ """
713
+ Args:
714
+ hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
715
+ The final hidden states of the model.
716
+ grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
717
+ The temporal, height and width of feature shape of each image in LLM.
718
+
719
+ Returns:
720
+ `torch.Tensor`: hidden_states.
721
+ """
722
+ hidden_states = self.patch_embed(hidden_states)
723
+
724
+ pos_embeds = self.fast_pos_embed_interpolate(grid_thw)
725
+ hidden_states = hidden_states + pos_embeds
726
+
727
+ rotary_pos_emb = self.rot_pos_emb(grid_thw)
728
+
729
+ seq_len, _ = hidden_states.size()
730
+ hidden_states = hidden_states.reshape(seq_len, -1)
731
+ rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
732
+ emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
733
+ position_embeddings = (emb.cos(), emb.sin())
734
+
735
+ cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
736
+ dim=0,
737
+ # Select dtype based on the following factors:
738
+ # - FA2 requires that cu_seqlens_q must have dtype int32
739
+ # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
740
+ # See https://github.com/huggingface/transformers/pull/34852 for more information
741
+ dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
742
+ )
743
+ cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
744
+
745
+ deepstack_feature_lists = []
746
+ for layer_num, blk in enumerate(self.blocks):
747
+ hidden_states = blk(
748
+ hidden_states,
749
+ cu_seqlens=cu_seqlens,
750
+ position_embeddings=position_embeddings,
751
+ **kwargs,
752
+ )
753
+ if layer_num in self.deepstack_visual_indexes:
754
+ deepstack_feature = self.deepstack_merger_list[self.deepstack_visual_indexes.index(layer_num)](
755
+ hidden_states
756
+ )
757
+ deepstack_feature_lists.append(deepstack_feature)
758
+
759
+ hidden_states = self.merger(hidden_states)
760
+
761
+ return hidden_states, deepstack_feature_lists
762
+
763
+
764
+ @auto_docstring(
765
+ custom_intro=(
766
+ "Text part of Qwen3VL, "
767
+ "not a pure text-only model, as DeepStack integrates visual features into the early hidden states."
768
+ )
769
+ )
770
+ class Qwen3VLTextModel(Qwen3VLPreTrainedModel):
771
+ config: Qwen3VLTextConfig
772
+ _no_split_modules = ["Qwen3VLTextDecoderLayer"]
773
+
774
+ def __init__(self, config: Qwen3VLTextConfig):
775
+ super().__init__(config)
776
+ self.padding_idx = config.pad_token_id
777
+ self.vocab_size = config.vocab_size
778
+
779
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
780
+ self.layers = nn.ModuleList(
781
+ [Qwen3VLTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
782
+ )
783
+ self.norm = Qwen3VLTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
784
+ self.rotary_emb = Qwen3VLTextRotaryEmbedding(config=config)
785
+ self.gradient_checkpointing = False
786
+ self.search_type = "none"
787
+ self.workingmemsize = 0
788
+
789
+ # Initialize weights and apply final processing
790
+ self.post_init()
791
+
792
+ @check_model_inputs
793
+ @auto_docstring
794
+ def forward(
795
+ self,
796
+ input_ids: Optional[torch.LongTensor] = None,
797
+ attention_mask: Optional[torch.Tensor] = None,
798
+ position_ids: Optional[torch.LongTensor] = None,
799
+ past_key_values: Optional[Cache] = None,
800
+ inputs_embeds: Optional[torch.FloatTensor] = None,
801
+ use_cache: Optional[bool] = None,
802
+ cache_position: Optional[torch.LongTensor] = None,
803
+ # args for deepstack
804
+ visual_pos_masks: Optional[torch.Tensor] = None,
805
+ deepstack_visual_embeds: Optional[list[torch.Tensor]] = None,
806
+ **kwargs: Unpack[FlashAttentionKwargs],
807
+ ) -> Union[tuple, BaseModelOutputWithPast]:
808
+ r"""
809
+ visual_pos_masks (`torch.Tensor` of shape `(batch_size, seqlen)`, *optional*):
810
+ The mask of the visual positions.
811
+ deepstack_visual_embeds (`list[torch.Tensor]`, *optional*):
812
+ The deepstack visual embeddings. The shape is (num_layers, visual_seqlen, embed_dim).
813
+ The feature is extracted from the different visual encoder layers, and fed to the decoder
814
+ hidden states. It's from the paper DeepStack(https://arxiv.org/abs/2406.04334).
815
+ """
816
+ if (input_ids is None) ^ (inputs_embeds is not None):
817
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
818
+
819
+ # torch.jit.trace() doesn't support cache objects in the output
820
+ if use_cache and past_key_values is None and not torch.jit.is_tracing():
821
+ past_key_values = DynamicCache(config=self.config)
822
+
823
+ if inputs_embeds is None:
824
+ inputs_embeds = self.embed_tokens(input_ids)
825
+
826
+ if cache_position is None:
827
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
828
+ cache_position = torch.arange(
829
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
830
+ )
831
+
832
+ # the hard coded `3` is for temporal, height and width.
833
+ if position_ids is None:
834
+ position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1)
835
+ elif position_ids.ndim == 2:
836
+ position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
837
+
838
+ if position_ids.ndim == 3 and position_ids.shape[0] == 4:
839
+ text_position_ids = position_ids[0]
840
+ position_ids = position_ids[1:]
841
+ else:
842
+ text_position_ids = position_ids[0]
843
+
844
+ attention_mask = create_causal_mask(
845
+ config=self.config,
846
+ input_embeds=inputs_embeds,
847
+ attention_mask=attention_mask,
848
+ cache_position=cache_position,
849
+ past_key_values=past_key_values,
850
+ position_ids=text_position_ids,
851
+ )
852
+
853
+ hidden_states = inputs_embeds
854
+
855
+ # create position embeddings to be shared across the decoder layers
856
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
857
+
858
+ # decoder layers
859
+ for layer_idx, decoder_layer in enumerate(self.layers):
860
+ layer_outputs = decoder_layer(
861
+ hidden_states,
862
+ attention_mask=attention_mask,
863
+ position_ids=text_position_ids,
864
+ past_key_values=past_key_values,
865
+ cache_position=cache_position,
866
+ position_embeddings=position_embeddings,
867
+ **kwargs,
868
+ )
869
+ hidden_states = layer_outputs
870
+
871
+ # add visual features to the hidden states of first several layers
872
+ if deepstack_visual_embeds is not None and layer_idx in range(len(deepstack_visual_embeds)):
873
+ hidden_states = self._deepstack_process(
874
+ hidden_states,
875
+ visual_pos_masks,
876
+ deepstack_visual_embeds[layer_idx],
877
+ )
878
+
879
+ hidden_states = self.norm(hidden_states)
880
+
881
+ return BaseModelOutputWithPast(
882
+ last_hidden_state=hidden_states,
883
+ past_key_values=past_key_values,
884
+ )
885
+
886
+ def _deepstack_process(
887
+ self, hidden_states: torch.Tensor, visual_pos_masks: torch.Tensor, visual_embeds: torch.Tensor
888
+ ):
889
+ visual_pos_masks = visual_pos_masks.to(hidden_states.device)
890
+ visual_embeds = visual_embeds.to(hidden_states.device, hidden_states.dtype)
891
+ local_this = hidden_states[visual_pos_masks, :].clone() + visual_embeds
892
+ hidden_states[visual_pos_masks, :] = local_this
893
+ return hidden_states
894
+
895
+
896
+ @auto_docstring
897
+ class Qwen3VLModel(Qwen3VLPreTrainedModel):
898
+ base_model_prefix = ""
899
+ _checkpoint_conversion_mapping = {}
900
+ # Reference: fix gemma3 grad acc #37208
901
+ accepts_loss_kwargs = False
902
+ config: Qwen3VLConfig
903
+ _no_split_modules = ["Qwen3VLTextDecoderLayer", "Qwen3VLVisionBlock"]
904
+
905
+ def __init__(self, config):
906
+ super().__init__(config)
907
+ self.visual = Qwen3VLVisionModel._from_config(config.vision_config)
908
+ self.config = config
909
+ self.audio = WhisperEncoder._from_config(
910
+ config.audio_config, attn_implementation=config._attn_implementation
911
+ )
912
+ self.language_model = Qwen3VLTextModel._from_config(config.text_config)
913
+ self.rope_deltas = None # cache rope_deltas here
914
+ self.fixed_memory_size = 0
915
+ self.fixed_memory_size_audio = 0
916
+ self.stepsize = 0
917
+ self.ttt_type = "simsample"
918
+ self.search_type = "none"
919
+
920
+ # Initialize weights and apply final processing
921
+ self.post_init()
922
+
923
+ def init_ttt_layers(
924
+ self,
925
+ num_heads=8,
926
+ ttt_gating=True,
927
+ ttt_minibatch_size=1024,
928
+ ttt_cg_type="ttt_mlp_cg",
929
+ CG_max_iter=0,
930
+ ttt_hidden_size=4,
931
+ ttt_base_lr=0.1,
932
+ freeze_ttt=False,
933
+ memgroupsize=0,
934
+ workingmemsize=0,
935
+ ):
936
+ hidden_size = self.config.text_config.hidden_size
937
+ total_embed_num = len(self.config.vision_config.deepstack_visual_indexes) + 1
938
+ # model_dim = hidden_size * total_embed_num
939
+ self.ttt_configs = TTTModelConfig(
940
+ model_dim=hidden_size,
941
+ num_heads=num_heads,
942
+ num_layers=1,
943
+ ttt_base_lr=ttt_base_lr,
944
+ mini_batch_size=ttt_minibatch_size,
945
+ ssm_layer=ttt_cg_type,
946
+ ttt_hidden_size=ttt_hidden_size,
947
+ )
948
+ self.ttt_minibatch_size = ttt_minibatch_size
949
+ self.ttt_layers = TTTWrapper(self.ttt_configs, CG_max_iter=CG_max_iter)
950
+ self.ttt_use_gating = ttt_gating
951
+ if ttt_gating:
952
+ self.ttt_gating = SSMGating(hidden_size)
953
+ self.use_ttt = True
954
+ self.freeze_ttt = freeze_ttt
955
+ self.memgroupsize = memgroupsize
956
+
957
+ def init_mem_search(self, search_type, workingmemsize=0):
958
+ self.hidden_size = self.config.text_config.hidden_size
959
+ if "attn" in search_type:
960
+ self.search_query = nn.Parameter(torch.randn(self.hidden_size))
961
+ self.search_alpha = nn.Parameter(torch.zeros(1))
962
+ self.search_type = search_type
963
+ self.workingmemsize = workingmemsize
964
+ if "kvcache" in self.search_type:
965
+ self.language_model.search_type = self.search_type
966
+ self.language_model.workingmemsize = self.workingmemsize
967
+
968
+ def get_input_embeddings(self):
969
+ return self.language_model.get_input_embeddings()
970
+
971
+ def set_input_embeddings(self, value):
972
+ self.language_model.set_input_embeddings(value)
973
+
974
+ def set_decoder(self, decoder):
975
+ self.language_model = decoder
976
+
977
+ def get_decoder(self):
978
+ return self.language_model
979
+
980
+ def get_rope_index(
981
+ self,
982
+ input_ids: Optional[torch.LongTensor] = None,
983
+ image_grid_thw: Optional[torch.LongTensor] = None,
984
+ video_grid_thw: Optional[torch.LongTensor] = None,
985
+ attention_mask: Optional[torch.Tensor] = None,
986
+ ) -> tuple[torch.Tensor, torch.Tensor]:
987
+ """Different from the original implementation, Qwen3VL use timestamps rather than absolute time position ids."""
988
+
989
+ # Since we use timestamps to seperate videos, like <t1> <vision_start> <frame1> <vision_end> <t2> <vision_start> <frame2> <vision_end>, the video_grid_thw should also be split
990
+ if video_grid_thw is not None:
991
+ video_grid_thw = torch.repeat_interleave(video_grid_thw, video_grid_thw[:, 0], dim=0)
992
+ video_grid_thw[:, 0] = 1
993
+
994
+ spatial_merge_size = self.config.vision_config.spatial_merge_size
995
+ image_token_id = self.config.image_token_id
996
+ video_token_id = self.config.video_token_id
997
+ vision_start_token_id = self.config.vision_start_token_id
998
+ mrope_position_deltas = []
999
+ if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
1000
+ total_input_ids = input_ids
1001
+ if attention_mask is None:
1002
+ attention_mask = torch.ones_like(total_input_ids)
1003
+ position_ids = torch.ones(
1004
+ 3,
1005
+ input_ids.shape[0],
1006
+ input_ids.shape[1],
1007
+ dtype=input_ids.dtype,
1008
+ device=input_ids.device,
1009
+ )
1010
+ image_index, video_index = 0, 0
1011
+ attention_mask = attention_mask.to(total_input_ids.device)
1012
+ for i, input_ids in enumerate(total_input_ids):
1013
+ input_ids = input_ids[attention_mask[i] == 1]
1014
+ image_nums, video_nums = 0, 0
1015
+ vision_start_indices = torch.argwhere(input_ids == vision_start_token_id).squeeze(1)
1016
+ vision_tokens = input_ids[vision_start_indices + 1]
1017
+ image_nums = (vision_tokens == image_token_id).sum()
1018
+ video_nums = (vision_tokens == video_token_id).sum()
1019
+ input_tokens = input_ids.tolist()
1020
+ llm_pos_ids_list: list = []
1021
+ st = 0
1022
+ remain_images, remain_videos = image_nums, video_nums
1023
+ for _ in range(image_nums + video_nums):
1024
+ if image_token_id in input_tokens and remain_images > 0:
1025
+ ed_image = input_tokens.index(image_token_id, st)
1026
+ else:
1027
+ ed_image = len(input_tokens) + 1
1028
+ if video_token_id in input_tokens and remain_videos > 0:
1029
+ ed_video = input_tokens.index(video_token_id, st)
1030
+ else:
1031
+ ed_video = len(input_tokens) + 1
1032
+ if ed_image < ed_video:
1033
+ t, h, w = (
1034
+ image_grid_thw[image_index][0],
1035
+ image_grid_thw[image_index][1],
1036
+ image_grid_thw[image_index][2],
1037
+ )
1038
+ image_index += 1
1039
+ remain_images -= 1
1040
+ ed = ed_image
1041
+
1042
+ else:
1043
+ t, h, w = (
1044
+ video_grid_thw[video_index][0],
1045
+ video_grid_thw[video_index][1],
1046
+ video_grid_thw[video_index][2],
1047
+ )
1048
+ video_index += 1
1049
+ remain_videos -= 1
1050
+ ed = ed_video
1051
+ llm_grid_t, llm_grid_h, llm_grid_w = (
1052
+ t.item(),
1053
+ h.item() // spatial_merge_size,
1054
+ w.item() // spatial_merge_size,
1055
+ )
1056
+ text_len = ed - st
1057
+
1058
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
1059
+ llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
1060
+
1061
+ # t_index is always 0 because llm_grid_t is always 1 (we use timestamps to encode the temporal information for videos)
1062
+ t_index = torch.arange(llm_grid_t).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
1063
+ h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten()
1064
+ w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten()
1065
+ llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx)
1066
+ st = ed + llm_grid_t * llm_grid_h * llm_grid_w
1067
+
1068
+ if st < len(input_tokens):
1069
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
1070
+ text_len = len(input_tokens) - st
1071
+ llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
1072
+
1073
+ llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
1074
+ position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device)
1075
+ mrope_position_deltas.append(llm_positions.max() + 1 - len(total_input_ids[i]))
1076
+ mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1)
1077
+ return position_ids, mrope_position_deltas
1078
+ else:
1079
+ if attention_mask is not None:
1080
+ position_ids = attention_mask.long().cumsum(-1) - 1
1081
+ position_ids.masked_fill_(attention_mask == 0, 1)
1082
+ position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device)
1083
+ max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0]
1084
+ mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1]
1085
+ else:
1086
+ position_ids = (
1087
+ torch.arange(input_ids.shape[1], device=input_ids.device)
1088
+ .view(1, 1, -1)
1089
+ .expand(3, input_ids.shape[0], -1)
1090
+ )
1091
+ mrope_position_deltas = torch.zeros(
1092
+ [input_ids.shape[0], 1],
1093
+ device=input_ids.device,
1094
+ dtype=input_ids.dtype,
1095
+ )
1096
+
1097
+ return position_ids, mrope_position_deltas
1098
+
1099
+ def get_audio_features(
1100
+ self, audio_features: torch.FloatTensor
1101
+ ):
1102
+ audio_features = audio_features.type(self.audio.dtype)
1103
+ audio_embeds = self.audio(audio_features)
1104
+ return audio_embeds
1105
+
1106
+ def get_video_features(
1107
+ self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None
1108
+ ):
1109
+ """
1110
+ Encodes videos into continuous embeddings that can be forwarded to the language model. The deepstack visual features are also returned.
1111
+
1112
+ Args:
1113
+ pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1114
+ The tensors corresponding to the input videos.
1115
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1116
+ The temporal, height and width of feature shape of each video in LLM.
1117
+ """
1118
+ # Same implementation as for images
1119
+ return self.get_image_features(pixel_values_videos, video_grid_thw)
1120
+
1121
+ def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None):
1122
+ """
1123
+ Encodes images into continuous embeddings that can be forwarded to the language model. The deepstack visual features are also returned.
1124
+
1125
+ Args:
1126
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1127
+ The tensors corresponding to the input images.
1128
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1129
+ The temporal, height and width of feature shape of each image in LLM.
1130
+ """
1131
+ pixel_values = pixel_values.type(self.visual.dtype)
1132
+ image_embeds, deepstack_image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
1133
+ split_sizes = (image_grid_thw.prod(-1) // self.visual.spatial_merge_size**2).tolist()
1134
+ image_embeds = torch.split(image_embeds, split_sizes)
1135
+ return image_embeds, deepstack_image_embeds
1136
+
1137
+ def get_placeholder_mask(
1138
+ self,
1139
+ input_ids: torch.LongTensor,
1140
+ inputs_embeds: torch.FloatTensor,
1141
+ image_features: Optional[torch.FloatTensor] = None,
1142
+ video_features: Optional[torch.FloatTensor] = None,
1143
+ audio_features: Optional[torch.FloatTensor] = None,
1144
+ ):
1145
+ """
1146
+ Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
1147
+ equal to the length of multimodal features. If the lengths are different, an error is raised.
1148
+ """
1149
+ if input_ids is None:
1150
+ special_image_mask = inputs_embeds == self.get_input_embeddings()(
1151
+ torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
1152
+ )
1153
+ special_image_mask = special_image_mask.all(-1)
1154
+ special_video_mask = inputs_embeds == self.get_input_embeddings()(
1155
+ torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device)
1156
+ )
1157
+ special_video_mask = special_video_mask.all(-1)
1158
+ special_audio_mask = inputs_embeds == self.get_input_embeddings()(
1159
+ torch.tensor(self.config.audio_token_id, dtype=torch.long, device=inputs_embeds.device)
1160
+ )
1161
+ special_audio_mask = special_audio_mask.all(-1)
1162
+ else:
1163
+ special_image_mask = input_ids == self.config.image_token_id
1164
+ special_video_mask = input_ids == self.config.video_token_id
1165
+ special_audio_mask = input_ids == self.config.audio_token_id
1166
+
1167
+ n_image_tokens = special_image_mask.sum()
1168
+ special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
1169
+ if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel():
1170
+ raise ValueError(
1171
+ f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}"
1172
+ )
1173
+
1174
+ n_video_tokens = special_video_mask.sum()
1175
+ special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
1176
+ # if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel():
1177
+ # if (special_video_mask != 0).sum().item() != video_features.numel():
1178
+ # raise ValueError(
1179
+ # f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}"
1180
+ # )
1181
+
1182
+ n_audio_tokens = special_audio_mask.sum()
1183
+ special_audio_mask = special_audio_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
1184
+ if audio_features is not None and inputs_embeds[special_audio_mask].numel() != audio_features.numel():
1185
+ raise ValueError(
1186
+ f"Audio features and audio tokens do not match: tokens: {n_audio_tokens}, features {audio_features.shape[0]}"
1187
+ )
1188
+
1189
+ return special_image_mask, special_video_mask, special_audio_mask
1190
+
1191
+ def recursive_attention(
1192
+ self,
1193
+ video_embeds,
1194
+ deepstack_video_embeds=None,
1195
+ downsample_ids=None,
1196
+ memory_size=None,
1197
+ stepsize=None,
1198
+ mergemode="",
1199
+ ):
1200
+ if deepstack_video_embeds is not None:
1201
+ embed_size = video_embeds.size(-1)
1202
+ video_embeds = torch.cat([video_embeds] + deepstack_video_embeds, dim=-1)
1203
+ if downsample_ids is None:
1204
+ downsample_ids = torch.tensor([n for n in range(0, video_embeds.size(0))]).to(video_embeds.device)
1205
+ memory_size = self.fixed_memory_size if memory_size is None else memory_size
1206
+ stepsize = self.stepsize if stepsize is None else stepsize
1207
+ running_embeds = video_embeds[:memory_size]
1208
+ running_downsample_ids = downsample_ids[:memory_size]
1209
+ for step_id in range(memory_size, video_embeds.size(0), stepsize):
1210
+ incoming_data = video_embeds[step_id:step_id+stepsize]
1211
+ incoming_downids = downsample_ids[step_id:step_id+stepsize]
1212
+ running_embeds = torch.cat([running_embeds, incoming_data], dim=0)
1213
+ fullsize = running_embeds.size(0)
1214
+ running_downsample_ids = torch.cat([running_downsample_ids, incoming_downids], dim=0)
1215
+ emb_norms = F.normalize(running_embeds, dim=-1)
1216
+ similarities = (emb_norms[:-1] * emb_norms[1:]).sum(dim=-1)
1217
+ top_sim_ids = (-similarities.squeeze(0)).topk(memory_size)[1] + 1
1218
+ top_sim_ids = torch.sort(top_sim_ids).values
1219
+ if "simsample" in self.ttt_type and mergemode != "sim":
1220
+ running_embeds = running_embeds[top_sim_ids]
1221
+ else:
1222
+ starts = torch.cat([top_sim_ids.new_zeros(1), top_sim_ids[:-1]], dim=0)
1223
+ lengths = top_sim_ids - starts
1224
+ row_idx = torch.arange(fullsize).unsqueeze(1).to(top_sim_ids.device) # [N, 1]
1225
+ group_range = (row_idx >= starts) & (row_idx < top_sim_ids) # [N, M]
1226
+ P_matrix = group_range.to(running_embeds.dtype) / lengths.to(running_embeds.dtype)
1227
+ running_embeds = torch.einsum("nm,nd->md", P_matrix, running_embeds)
1228
+ running_downsample_ids = running_downsample_ids[top_sim_ids.to(running_downsample_ids.device)]
1229
+ running_embeds_deepstack = None
1230
+ if deepstack_video_embeds is not None:
1231
+ running_embeds = running_embeds.view(running_embeds.size(0), -1, embed_size)
1232
+ running_embeds_deepstack = [running_embeds[:, i+1, :] for i in range(running_embeds.size(1) - 1)]
1233
+ running_embeds = running_embeds[:, 0, :]
1234
+ return running_embeds, running_embeds_deepstack, running_downsample_ids
1235
+
1236
+ def ttt_padding(self, video_embeds):
1237
+ padding_length = 0
1238
+ minibatchsize = self.ttt_minibatch_size
1239
+ if video_embeds.size(1) % minibatchsize != 0:
1240
+ padding_length = minibatchsize - video_embeds.size(1) % minibatchsize
1241
+ padding = video_embeds.new_zeros(video_embeds.size(0), padding_length, video_embeds.size(-1))
1242
+ video_embeds = torch.cat([video_embeds, padding], dim=1)
1243
+ return video_embeds, padding_length
1244
+
1245
+ def forward_ttt_layers(self, pixel_values_videos, video_grid_thw, input_embeds):
1246
+ if "ttt" in self.ttt_type:
1247
+ freqs_cis = self.ttt_layers._precompute_freqs_cis_3d(
1248
+ video_grid_thw[0, 1]//2, video_grid_thw[0, 2]//2, max(video_grid_thw[0, 0]*2, 128)).to(pixel_values_videos.device)
1249
+
1250
+ if self.fixed_memory_size > 0:
1251
+ state_track = None
1252
+ running_downsample_ids, running_embeddings, running_deepstack_embeds = None, None, None
1253
+ stepsize = video_grid_thw[0, 1] * video_grid_thw[0, 2]
1254
+ step = 0
1255
+ num_frame_per_chunk = 256
1256
+ # with torch.no_grad():
1257
+ for i in range(0, video_grid_thw[0, 0], num_frame_per_chunk):
1258
+ current_thw = video_grid_thw.clone()
1259
+ current_thw[0, 0] = min(num_frame_per_chunk, video_grid_thw[0, 0] - i)
1260
+ video_embeds, incoming_deepstack_embeds = self.get_video_features(pixel_values_videos[i*stepsize:(i+num_frame_per_chunk)*stepsize], current_thw)
1261
+ video_embeds = torch.cat(video_embeds, dim=0).to(input_embeds.device, input_embeds.dtype)
1262
+ if "ttt" in self.ttt_type:
1263
+ embed_dim = video_embeds.size(-1)
1264
+ if "concat" in self.ttt_type:
1265
+ video_embeds = torch.stack([video_embeds] + incoming_deepstack_embeds, dim=0)
1266
+ else:
1267
+ video_embeds = video_embeds.unsqueeze(0)
1268
+ # residual_video_emb = video_embeds
1269
+ incoming_embeddings, padding_length = self.ttt_padding(video_embeds)
1270
+ incoming_embeddings, state_track = self.ttt_layers(
1271
+ incoming_embeddings,
1272
+ freqs_cis=freqs_cis[step:step+incoming_embeddings.size(1)] if freqs_cis is not None else None,
1273
+ state_track=state_track,
1274
+ )
1275
+ if padding_length > 0:
1276
+ incoming_embeddings = incoming_embeddings[:, :-padding_length]
1277
+ if self.ttt_use_gating:
1278
+ incoming_embeddings = self.ttt_gating(incoming_embeddings) + video_embeds
1279
+ incoming_embeddings = incoming_embeddings.transpose(0, 1).reshape(incoming_embeddings.size(1), -1)
1280
+ else:
1281
+ incoming_embeddings = video_embeds
1282
+ incoming_downsample_ids = torch.tensor([n for n in range(0, incoming_embeddings.size(0))]).to(video_embeds.device) + step
1283
+ step += incoming_downsample_ids.size(0)
1284
+ # print(step)
1285
+
1286
+ if not self.training:
1287
+ torch.cuda.empty_cache()
1288
+
1289
+ if running_embeddings is not None:
1290
+ running_embeddings = torch.cat([running_embeddings, incoming_embeddings], dim=0)
1291
+ running_downsample_ids = torch.cat([running_downsample_ids, incoming_downsample_ids], dim=0)
1292
+ if "concat" not in self.ttt_type:
1293
+ running_deepstack_embeds = [torch.cat(
1294
+ [r_emb, i_emb], dim=0) for r_emb, i_emb in zip(running_deepstack_embeds, incoming_deepstack_embeds)]
1295
+ running_embeddings, running_deepstack_embeds, running_downsample_ids = self.recursive_attention(
1296
+ running_embeddings, deepstack_video_embeds=running_deepstack_embeds, downsample_ids=running_downsample_ids)
1297
+ else:
1298
+ running_embeddings, _, running_downsample_ids = self.recursive_attention(
1299
+ running_embeddings, downsample_ids=running_downsample_ids)
1300
+ else:
1301
+ running_embeddings = incoming_embeddings
1302
+ running_downsample_ids = incoming_downsample_ids
1303
+ running_deepstack_embeds = incoming_deepstack_embeds
1304
+ if incoming_embeddings.size(0) > self.fixed_memory_size:
1305
+ if "concat" not in self.ttt_type:
1306
+ running_embeddings, running_deepstack_embeds, running_downsample_ids = self.recursive_attention(
1307
+ running_embeddings, deepstack_video_embeds=running_deepstack_embeds, downsample_ids=running_downsample_ids)
1308
+ else:
1309
+ running_embeddings, _, running_downsample_ids = self.recursive_attention(
1310
+ running_embeddings, downsample_ids=running_downsample_ids)
1311
+ downsample_ids = running_downsample_ids
1312
+ video_embeds = running_embeddings
1313
+ deepstack_video_embeds = running_deepstack_embeds
1314
+ else:
1315
+ video_embeds, padding_length = self.ttt_padding(video_embeds)
1316
+ video_embeds, state_track = self.ttt_layers(video_embeds, freqs_cis=freqs_cis)
1317
+
1318
+ if padding_length > 0:
1319
+ video_embeds = video_embeds[:, :-padding_length]
1320
+
1321
+ if self.ttt_use_gating:
1322
+ video_embeds = self.ttt_gating(video_embeds) + residual_video_emb
1323
+
1324
+ video_embeds, _, downsample_ids = self.recursive_attention(video_embeds.squeeze(0), downsample_ids=downsample_ids)
1325
+ # Split
1326
+ if "concat" in self.ttt_type:
1327
+ video_embeds = video_embeds.view(video_embeds.size(0), -1, embed_dim)
1328
+ deepstack_video_embeds = [video_embeds[:, i+1, :] for i in range(video_embeds.size(1) - 1)]
1329
+ video_embeds = video_embeds[:, 0, :]
1330
+ return video_embeds, deepstack_video_embeds, downsample_ids
1331
+
1332
+ def compute_segment_logits_with_similarity_minmax(self, X, s, alpha=1.0, eps=1e-8):
1333
+ T, N = X.shape
1334
+
1335
+ X_norm = F.normalize(X, p=2, dim=1, eps=eps) # (T, N)
1336
+
1337
+ # sim_forward[t] ~ cos(x_t, x_{t+1}) for t=0..T-2
1338
+ sim_forward = (X_norm[:-1] * X_norm[1:]).sum(dim=1) # (T-1,)
1339
+ ones = torch.ones(1, device=X.device, dtype=X.dtype)
1340
+ sim_left = torch.cat([ones, sim_forward]) # (T,)
1341
+ sim_right = torch.cat([sim_forward, ones]) # (T,)
1342
+
1343
+ sim_mean = (sim_left + sim_right) / 2.0 # (T,)
1344
+ dissim = (1.0 - sim_mean) / 2.0 # (T,) approx in [0, 2]
1345
+
1346
+ # 2. Min-max normalize scores to [0, 1]
1347
+ s_min = s.min()
1348
+ s_max = s.max()
1349
+ s_norm = (s - s_min) / (s_max - s_min + eps) # [0,1]
1350
+
1351
+ simscaling = torch.sigmoid(self.search_alpha)
1352
+
1353
+ z = (1 - simscaling) * s_norm + simscaling * dissim
1354
+ return z
1355
+
1356
+ def gumbel_top_k(self, scores, K, tau=1.0, eps=1e-8):
1357
+ T = scores.shape[0]
1358
+ # 1) Sample Gumbel noise
1359
+ u = torch.rand_like(scores)
1360
+ gumbel = -torch.log(-torch.log(u + eps) + eps)
1361
+
1362
+ # 2) Gumbel-perturbed scores
1363
+ perturbed = scores + gumbel
1364
+
1365
+ # 3) Take Top-K
1366
+ topk_vals, topk_idx = torch.topk(perturbed, K, dim=0) # (K,), (K,)
1367
+
1368
+ # 4) Turn those K values into a softmax distribution (optional smoothing)
1369
+ topk_weights = F.softmax(topk_vals / tau, dim=0) # (K,)
1370
+
1371
+ # 5) Scatter back into a (T,) vector
1372
+ weights = torch.zeros_like(scores)
1373
+ weights[topk_idx] = topk_weights
1374
+
1375
+ return topk_idx, weights
1376
+
1377
+ def select_representatives_gumbel_top_k_ste(self, X, s, K, tau=1.0):
1378
+ # 1) Gumbel-Top-K selection
1379
+ idx, w = self.gumbel_top_k(s, K, tau=tau) # idx: (K,), w: (T,)
1380
+
1381
+ # 2) Hard representatives: actual selected rows
1382
+ reps_hard = X[idx] # (K, N)
1383
+ soft_vec = torch.matmul(w, X) # (N,)
1384
+ reps_soft = soft_vec.unsqueeze(0).expand(K, -1) # (K, N)
1385
+
1386
+ reps = reps_soft + (reps_hard - reps_soft).detach()
1387
+
1388
+ return reps, idx, w
1389
+
1390
+ def importance_pool(self, video_embed, importance, memory_size):
1391
+ T, N = video_embed.shape
1392
+ if "gumbel" not in self.search_type:
1393
+ if "softmax" in self.search_type:
1394
+ importance = torch.softmax(importance, dim=-1)
1395
+ elif "sigmoid" in self.search_type:
1396
+ importance = torch.sigmoid(importance)
1397
+ importance = self.compute_segment_logits_with_similarity_minmax(video_embed, importance)
1398
+ top_sim_ids = torch.sort(importance.topk(memory_size)[1]).values
1399
+ video_embed = importance.unsqueeze(1) * video_embed
1400
+ starts = torch.cat([top_sim_ids.new_zeros(1) - 1, top_sim_ids[:-1]], dim=0)
1401
+ lengths = top_sim_ids - starts
1402
+ row_idx = torch.arange(T).unsqueeze(1).to(top_sim_ids.device) # [N, 1]
1403
+ group_range = (row_idx > starts) & (row_idx <= top_sim_ids) # [N, M]
1404
+ P_matrix = group_range.to(video_embed.dtype) / lengths.to(video_embed.dtype)
1405
+ video_embed = torch.einsum("nm,nd->md", P_matrix, video_embed)
1406
+ else:
1407
+ importance = self.compute_segment_logits_with_similarity_minmax(video_embed, importance)
1408
+ video_embed, top_sim_ids, weights = self.select_representatives_gumbel_top_k_ste(video_embed, importance, memory_size)
1409
+ return video_embed, top_sim_ids
1410
+
1411
+ def chunk_memory(self, video_embeds, deepstack_video_embeds, downsample_ids, hidden_states):
1412
+ if "attn" in self.search_type:
1413
+ attention_scores = torch.einsum("k,ijk->ij", self.search_query, hidden_states)
1414
+ attention_scores = F.softmax(attention_scores / math.sqrt(self.search_query.size(0)), dim=-1)
1415
+ search_query = torch.einsum("ij,ijk->ik", attention_scores, hidden_states).unsqueeze(1)
1416
+ else:
1417
+ search_query = hidden_states.sum(dim=1, keepdim=True)
1418
+ scores = torch.einsum("ij,j->i", video_embeds, search_query.squeeze(0).squeeze(0)) / math.sqrt(self.hidden_size)
1419
+ memsize = min(video_embeds.size(0), self.workingmemsize)
1420
+ video_embeds, local_downsample_ids = self.importance_pool(video_embeds, scores, memsize)
1421
+ deepstack_video_embeds = [emb[local_downsample_ids] for emb in deepstack_video_embeds]
1422
+ downsample_ids = downsample_ids[local_downsample_ids]
1423
+ return video_embeds, deepstack_video_embeds, downsample_ids
1424
+
1425
+ @auto_docstring
1426
+ @check_model_inputs
1427
+ def forward(
1428
+ self,
1429
+ input_ids: torch.LongTensor = None,
1430
+ attention_mask: Optional[torch.Tensor] = None,
1431
+ position_ids: Optional[torch.LongTensor] = None,
1432
+ past_key_values: Optional[Cache] = None,
1433
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1434
+ pixel_values: Optional[torch.Tensor] = None,
1435
+ pixel_values_videos: Optional[torch.FloatTensor] = None,
1436
+ image_grid_thw: Optional[torch.LongTensor] = None,
1437
+ video_grid_thw: Optional[torch.LongTensor] = None,
1438
+ cache_position: Optional[torch.LongTensor] = None,
1439
+ audio_feature: Optional[torch.Tensor] = None,
1440
+ distillround: bool = False,
1441
+ labels: Optional[torch.LongTensor] = None,
1442
+ memory_triplets: Optional[torch.Tensor] = None,
1443
+ **kwargs: Unpack[TransformersKwargs],
1444
+ ) -> Union[tuple, Qwen3VLModelOutputWithPast]:
1445
+ r"""
1446
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1447
+ The temporal, height and width of feature shape of each image in LLM.
1448
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1449
+ The temporal, height and width of feature shape of each video in LLM.
1450
+ """
1451
+ if (input_ids is None) ^ (inputs_embeds is not None):
1452
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1453
+
1454
+ if inputs_embeds is None:
1455
+ inputs_embeds = self.get_input_embeddings()(input_ids)
1456
+
1457
+ image_mask = None
1458
+ video_mask = None
1459
+
1460
+ if pixel_values is not None:
1461
+ image_embeds, deepstack_image_embeds = self.get_image_features(pixel_values, image_grid_thw)
1462
+ image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
1463
+ image_mask, _ = self.get_placeholder_mask(
1464
+ input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds
1465
+ )
1466
+ inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
1467
+
1468
+ all_memory = None
1469
+ if pixel_values_videos is not None:
1470
+ if self.fixed_memory_size > 0:
1471
+ if memory_triplets is not None:
1472
+ video_embeds, deepstack_video_embeds, downsample_ids = memory_triplets["video_embeds"], memory_triplets["deepstack_video_embeds"], memory_triplets["downsample_ids"]
1473
+ elif self.freeze_ttt:
1474
+ with torch.no_grad():
1475
+ video_embeds, deepstack_video_embeds, downsample_ids = self.forward_ttt_layers(pixel_values_videos, video_grid_thw, inputs_embeds)
1476
+ if self.search_type != "none" and memory_triplets is None:
1477
+ all_memory = {
1478
+ "video_embeds": video_embeds,
1479
+ "deepstack_video_embeds": deepstack_video_embeds,
1480
+ "downsample_ids": downsample_ids,
1481
+ }
1482
+ memsize = self.workingmemsize // 8
1483
+ video_embeds, deepstack_video_embeds, downsample_ids = self.recursive_attention(
1484
+ video_embeds, deepstack_video_embeds=deepstack_video_embeds, downsample_ids=downsample_ids, memory_size=memsize)
1485
+ else:
1486
+ video_embeds, deepstack_video_embeds, downsample_ids = self.forward_ttt_layers(pixel_values_videos, video_grid_thw, inputs_embeds)
1487
+ if self.search_type != "none" and memory_triplets is None:
1488
+ all_memory = {
1489
+ "video_embeds": video_embeds,
1490
+ "deepstack_video_embeds": deepstack_video_embeds,
1491
+ "downsample_ids": downsample_ids,
1492
+ }
1493
+ memsize = self.workingmemsize // 8
1494
+ video_embeds, deepstack_video_embeds, downsample_ids = self.recursive_attention(
1495
+ video_embeds, deepstack_video_embeds=deepstack_video_embeds, downsample_ids=downsample_ids, memory_size=memsize)
1496
+ _, video_mask, _ = self.get_placeholder_mask(
1497
+ input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
1498
+ )
1499
+ else:
1500
+ video_embeds, deepstack_video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw)
1501
+ video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
1502
+ _, video_mask, _ = self.get_placeholder_mask(
1503
+ input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
1504
+ )
1505
+ inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
1506
+ if torch.cuda.current_device() == 0:
1507
+ print(f"RANK 0 video embeds shape: {video_embeds.shape}")
1508
+
1509
+ audio_downsample_ids = None
1510
+ if audio_feature is not None:
1511
+ audio_embeds = self.get_audio_features(audio_feature)
1512
+ if torch.cuda.current_device() == 0:
1513
+ print(f"RANK 0 audio embeds shape: {audio_embeds.shape}")
1514
+ _, _, audio_mask = self.get_placeholder_mask(
1515
+ input_ids, inputs_embeds=inputs_embeds, audio_features=audio_embeds
1516
+ )
1517
+ if self.fixed_memory_size > 0 and not distillround:
1518
+ audio_embeds, _, audio_downsample_ids = self.recursive_attention(
1519
+ audio_embeds.view(-1, audio_embeds.size(-1)),
1520
+ memory_size=self.fixed_memory_size_audio,
1521
+ stepsize=self.stepsize,
1522
+ mergemode="sim",
1523
+ )
1524
+ else:
1525
+ inputs_embeds = inputs_embeds.masked_scatter(audio_mask, audio_embeds)
1526
+ # print(f"Both shapes: {video_embeds.shape}, {audio_embeds.shape}")
1527
+
1528
+ visual_pos_masks = None
1529
+ deepstack_visual_embeds = None
1530
+ if image_mask is not None and video_mask is not None:
1531
+ # aggregate visual_pos_masks and deepstack_visual_embeds
1532
+ image_mask = image_mask[..., 0]
1533
+ video_mask = video_mask[..., 0]
1534
+ visual_pos_masks = image_mask | video_mask
1535
+ deepstack_visual_embeds = []
1536
+ image_mask_joint = image_mask[visual_pos_masks]
1537
+ video_mask_joint = video_mask[visual_pos_masks]
1538
+ for img_embed, vid_embed in zip(deepstack_image_embeds, deepstack_video_embeds):
1539
+ embed_joint = img_embed.new_zeros(visual_pos_masks.sum(), img_embed.shape[-1]).to(img_embed.device)
1540
+ embed_joint[image_mask_joint, :] = img_embed
1541
+ embed_joint[video_mask_joint, :] = vid_embed
1542
+ deepstack_visual_embeds.append(embed_joint)
1543
+ elif image_mask is not None:
1544
+ image_mask = image_mask[..., 0]
1545
+ visual_pos_masks = image_mask
1546
+ deepstack_visual_embeds = deepstack_image_embeds
1547
+ elif video_mask is not None:
1548
+ visual_pos_masks = video_mask[..., 0]
1549
+ deepstack_visual_embeds = deepstack_video_embeds
1550
+
1551
+ if position_ids is None:
1552
+ attention_mask_tensor = (
1553
+ attention_mask if not isinstance(attention_mask, dict) else attention_mask["full_attention"]
1554
+ )
1555
+ if attention_mask_tensor is not None and attention_mask_tensor.ndim == 4:
1556
+ attention_mask_tensor = torch.diagonal(attention_mask_tensor[:, 0], dim1=1, dim2=2)
1557
+ # Only apply conversion for floating point tensors (inverted masks)
1558
+ if attention_mask_tensor.dtype.is_floating_point:
1559
+ attention_mask_tensor = attention_mask_tensor / torch.finfo(attention_mask_tensor.dtype).min
1560
+ attention_mask_tensor = (1.0 - attention_mask_tensor).int()
1561
+
1562
+ # Calculate RoPE index once per generation in the pre-fill stage only.
1563
+ # When compiling, we can't check tensor values thus we check only input length
1564
+ # It is safe to assume that `length!=1` means we're in pre-fill because compiled
1565
+ # models currently cannot do asssisted decoding
1566
+ prefill_compiled_stage = is_torchdynamo_compiling() and (
1567
+ (input_ids is not None and input_ids.shape[1] != 1)
1568
+ or (inputs_embeds is not None and inputs_embeds.shape[1] != 1)
1569
+ )
1570
+ prefill_noncompiled_stage = not is_torchdynamo_compiling() and (
1571
+ (cache_position is not None and cache_position[0] == 0)
1572
+ or (past_key_values is None or past_key_values.get_seq_length() == 0)
1573
+ )
1574
+ if (prefill_compiled_stage or prefill_noncompiled_stage) or self.rope_deltas is None:
1575
+ position_ids, rope_deltas = self.get_rope_index(
1576
+ input_ids,
1577
+ image_grid_thw,
1578
+ video_grid_thw,
1579
+ attention_mask=attention_mask_tensor,
1580
+ )
1581
+ self.rope_deltas = rope_deltas
1582
+ # then use the prev pre-calculated rope-deltas to get the correct position ids
1583
+ else:
1584
+ batch_size, seq_length, _ = inputs_embeds.shape
1585
+ delta = (
1586
+ (cache_position[0] + self.rope_deltas).to(inputs_embeds.device)
1587
+ if cache_position is not None
1588
+ else 0
1589
+ )
1590
+ position_ids = torch.arange(seq_length, device=inputs_embeds.device)
1591
+ position_ids = position_ids.view(1, -1).expand(batch_size, -1)
1592
+ if cache_position is not None: # otherwise `deltas` is an int `0`
1593
+ delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0)
1594
+ position_ids = position_ids.add(delta)
1595
+ position_ids = position_ids.unsqueeze(0).expand(3, -1, -1)
1596
+
1597
+ if self.fixed_memory_size > 0 and pixel_values_videos is not None and not distillround:
1598
+ video_mask_sel = torch.where(visual_pos_masks == 1)[1][downsample_ids]
1599
+ if audio_feature is not None and audio_downsample_ids is not None:
1600
+ audio_mask_sel = torch.where(audio_mask[:, :, 0] == 1)[1][audio_downsample_ids]
1601
+ keep_mask = ~(audio_mask[:, :, 0] + visual_pos_masks)
1602
+ keep_mask[:, video_mask_sel] = True
1603
+ keep_mask[:, audio_mask_sel] = True
1604
+ keep_ids = torch.where(keep_mask)[1]
1605
+ inputs_embeds = inputs_embeds[:, keep_ids]
1606
+ inputs_embeds = inputs_embeds.masked_scatter(video_mask[:, keep_ids], video_embeds)
1607
+ inputs_embeds = inputs_embeds.masked_scatter(audio_mask[:, keep_ids], audio_embeds)
1608
+ else:
1609
+ keep_mask = ~visual_pos_masks
1610
+ keep_mask[:, video_mask_sel] = True
1611
+ keep_ids = torch.where(keep_mask)[1]
1612
+ inputs_embeds = inputs_embeds[:, keep_ids]
1613
+ inputs_embeds = inputs_embeds.masked_scatter(video_mask[:, keep_ids], video_embeds)
1614
+ if labels is not None:
1615
+ labels = labels[:, keep_ids]
1616
+ attention_mask = attention_mask[:, keep_ids]
1617
+ visual_pos_masks = visual_pos_masks[:, keep_ids]
1618
+ newposids = []
1619
+ for posids in position_ids:
1620
+ newposid = posids[:, keep_ids]
1621
+ newposids.append(newposid)
1622
+ position_ids = torch.stack(newposids, dim=0)
1623
+
1624
+ outputs = self.language_model(
1625
+ input_ids=None,
1626
+ position_ids=position_ids,
1627
+ attention_mask=attention_mask,
1628
+ past_key_values=past_key_values,
1629
+ inputs_embeds=inputs_embeds,
1630
+ cache_position=cache_position,
1631
+ visual_pos_masks=visual_pos_masks,
1632
+ deepstack_visual_embeds=deepstack_visual_embeds,
1633
+ **kwargs,
1634
+ )
1635
+ if self.search_type != "none" and all_memory is not None:
1636
+ query_start_pos = torch.where(input_ids == 151653)[1][-1].item() + 2 - input_ids.size(1)
1637
+ if len(torch.where(input_ids == 151644)[1]) > 2:
1638
+ gen_start_pos = torch.where(input_ids == 151644)[1][2].item() + 2 - input_ids.size(1)
1639
+ else:
1640
+ gen_start_pos = -1
1641
+ hidden_states = outputs.last_hidden_state[:, query_start_pos:gen_start_pos]
1642
+ video_embeds, deepstack_video_embeds, downsample_ids = self.chunk_memory(
1643
+ all_memory["video_embeds"],
1644
+ all_memory["deepstack_video_embeds"],
1645
+ all_memory["downsample_ids"],
1646
+ hidden_states,
1647
+ )
1648
+ memory_triplets = {
1649
+ "video_embeds": video_embeds,
1650
+ "deepstack_video_embeds": deepstack_video_embeds,
1651
+ "downsample_ids": downsample_ids,
1652
+ }
1653
+ else:
1654
+ memory_triplets = None
1655
+ torch.cuda.empty_cache()
1656
+
1657
+ return Qwen3VLModelOutputWithPast(
1658
+ last_hidden_state=outputs.last_hidden_state,
1659
+ past_key_values=outputs.past_key_values,
1660
+ rope_deltas=self.rope_deltas,
1661
+ labels=labels,
1662
+ memory_triplets=memory_triplets,
1663
+ )
1664
+
1665
+
1666
+ @dataclass
1667
+ @auto_docstring(
1668
+ custom_intro="""
1669
+ Base class for Qwen3VL causal language model (or autoregressive) outputs.
1670
+ """
1671
+ )
1672
+ class Qwen3VLCausalLMOutputWithPast(ModelOutput):
1673
+ r"""
1674
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
1675
+ Language modeling loss (for next-token prediction).
1676
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
1677
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
1678
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1679
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
1680
+
1681
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
1682
+ `past_key_values` input) to speed up sequential decoding.
1683
+ rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
1684
+ The rope index difference between sequence length and multimodal rope.
1685
+ """
1686
+
1687
+ loss: Optional[torch.FloatTensor] = None
1688
+ logits: Optional[torch.FloatTensor] = None
1689
+ past_key_values: Optional[Cache] = None
1690
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
1691
+ attentions: Optional[tuple[torch.FloatTensor]] = None
1692
+ rope_deltas: Optional[torch.LongTensor] = None
1693
+ memory_triplets: Optional[Dict[str, torch.Tensor]] = None
1694
+
1695
+
1696
+ class Qwen3VLForConditionalGeneration(Qwen3VLPreTrainedModel, GenerationMixin):
1697
+ _checkpoint_conversion_mapping = {}
1698
+ _tied_weights_keys = ["lm_head.weight"]
1699
+ # Reference: fix gemma3 grad acc #37208
1700
+ accepts_loss_kwargs = False
1701
+ config: Qwen3VLConfig
1702
+
1703
+ def __init__(self, config):
1704
+ super().__init__(config)
1705
+ self.model = Qwen3VLModel(config)
1706
+ self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
1707
+
1708
+ self.post_init()
1709
+
1710
+ def get_input_embeddings(self):
1711
+ return self.model.get_input_embeddings()
1712
+
1713
+ def set_input_embeddings(self, value):
1714
+ self.model.set_input_embeddings(value)
1715
+
1716
+ def set_decoder(self, decoder):
1717
+ self.model.set_decoder(decoder)
1718
+
1719
+ def get_decoder(self):
1720
+ return self.model.get_decoder()
1721
+
1722
+ def get_video_features(
1723
+ self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None
1724
+ ):
1725
+ return self.model.get_video_features(pixel_values_videos, video_grid_thw)
1726
+
1727
+ def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None):
1728
+ return self.model.get_image_features(pixel_values, image_grid_thw)
1729
+
1730
+ # Make modules available through conditional class for BC
1731
+ @property
1732
+ def language_model(self):
1733
+ return self.model.language_model
1734
+
1735
+ @property
1736
+ def visual(self):
1737
+ return self.model.visual
1738
+
1739
+ @check_model_inputs
1740
+ def forward(
1741
+ self,
1742
+ input_ids: torch.LongTensor = None,
1743
+ attention_mask: Optional[torch.Tensor] = None,
1744
+ position_ids: Optional[torch.LongTensor] = None,
1745
+ past_key_values: Optional[Cache] = None,
1746
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1747
+ labels: Optional[torch.LongTensor] = None,
1748
+ pixel_values: Optional[torch.Tensor] = None,
1749
+ pixel_values_videos: Optional[torch.FloatTensor] = None,
1750
+ image_grid_thw: Optional[torch.LongTensor] = None,
1751
+ video_grid_thw: Optional[torch.LongTensor] = None,
1752
+ cache_position: Optional[torch.LongTensor] = None,
1753
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1754
+ audio_feature: Optional[torch.Tensor] = None,
1755
+ train_type: Optional[str] = "sft",
1756
+ distillround: bool = False,
1757
+ memory_triplets: Optional[dict] = None,
1758
+ **kwargs: Unpack[TransformersKwargs],
1759
+ ) -> Union[tuple, Qwen3VLCausalLMOutputWithPast]:
1760
+ r"""
1761
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1762
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1763
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1764
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1765
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1766
+ The temporal, height and width of feature shape of each image in LLM.
1767
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1768
+ The temporal, height and width of feature shape of each video in LLM.
1769
+
1770
+ Example:
1771
+ TODO: Add example
1772
+ """
1773
+ outputs = self.model(
1774
+ input_ids=input_ids,
1775
+ pixel_values=pixel_values,
1776
+ pixel_values_videos=pixel_values_videos,
1777
+ image_grid_thw=image_grid_thw,
1778
+ video_grid_thw=video_grid_thw,
1779
+ position_ids=position_ids,
1780
+ attention_mask=attention_mask,
1781
+ past_key_values=past_key_values,
1782
+ inputs_embeds=inputs_embeds,
1783
+ cache_position=cache_position,
1784
+ audio_feature=audio_feature,
1785
+ labels=labels,
1786
+ distillround=distillround,
1787
+ memory_triplets=memory_triplets,
1788
+ **kwargs,
1789
+ )
1790
+
1791
+ if outputs.labels is not None:
1792
+ labels = outputs.labels
1793
+
1794
+ hidden_states = outputs[0]
1795
+
1796
+ loss = None
1797
+ logits = None
1798
+
1799
+ shift_labels = kwargs.pop("shift_labels", None)
1800
+ return_logits = kwargs.pop("return_logits", False)
1801
+ memory_triplets = outputs.memory_triplets
1802
+
1803
+ if self.training and (labels is not None or shift_labels is not None):
1804
+ loss = LigerForCausalLMLoss(
1805
+ hidden_states=hidden_states,
1806
+ lm_head_weight=self.lm_head.weight,
1807
+ labels=labels,
1808
+ shift_labels=shift_labels,
1809
+ hidden_size=self.config.text_config.hidden_size,
1810
+ **kwargs,
1811
+ )
1812
+ if return_logits:
1813
+ distill_labels = labels[0, 1:]
1814
+ start_idx = torch.where((distill_labels != -100)==True)[0][0]
1815
+ logits = self.lm_head(hidden_states[:, start_idx:, :])
1816
+ elif memory_triplets is None:
1817
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1818
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1819
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
1820
+
1821
+ if labels is not None:
1822
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size)
1823
+
1824
+ return Qwen3VLCausalLMOutputWithPast(
1825
+ loss=loss,
1826
+ logits=logits,
1827
+ past_key_values=outputs.past_key_values,
1828
+ rope_deltas=outputs.rope_deltas,
1829
+ memory_triplets=memory_triplets,
1830
+ )
1831
+
1832
+ def prepare_inputs_for_generation(
1833
+ self,
1834
+ input_ids,
1835
+ past_key_values=None,
1836
+ attention_mask=None,
1837
+ inputs_embeds=None,
1838
+ cache_position=None,
1839
+ position_ids=None,
1840
+ use_cache=True,
1841
+ pixel_values=None,
1842
+ pixel_values_videos=None,
1843
+ image_grid_thw=None,
1844
+ video_grid_thw=None,
1845
+ audio_feature=None,
1846
+ **kwargs,
1847
+ ):
1848
+ # Overwritten -- in specific circumstances we don't want to forward image inputs to the model
1849
+
1850
+ model_inputs = super().prepare_inputs_for_generation(
1851
+ input_ids,
1852
+ past_key_values=past_key_values,
1853
+ attention_mask=attention_mask,
1854
+ inputs_embeds=inputs_embeds,
1855
+ cache_position=cache_position,
1856
+ position_ids=position_ids,
1857
+ pixel_values=pixel_values,
1858
+ pixel_values_videos=pixel_values_videos,
1859
+ image_grid_thw=image_grid_thw,
1860
+ video_grid_thw=video_grid_thw,
1861
+ use_cache=use_cache,
1862
+ audio_feature=audio_feature,
1863
+ **kwargs,
1864
+ )
1865
+
1866
+ # Qwen3VL position_ids are prepareed with rope_deltas in forward
1867
+ model_inputs["position_ids"] = None
1868
+
1869
+ if cache_position[0] != 0:
1870
+ model_inputs["pixel_values"] = None
1871
+ model_inputs["pixel_values_videos"] = None
1872
+ model_inputs["audio_feature"] = None
1873
+
1874
+ return model_inputs
1875
+
1876
+ def _get_image_nums_and_video_nums(
1877
+ self,
1878
+ input_ids: Optional[torch.LongTensor],
1879
+ inputs_embeds: Optional[torch.Tensor] = None,
1880
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1881
+ """
1882
+ Get the number of images and videos for each sample to calculate the separation length of the sample tensor.
1883
+ These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications.
1884
+
1885
+ Args:
1886
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1887
+ Indices of input sequence tokens in the vocabulary.
1888
+
1889
+ Returns:
1890
+ image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`)
1891
+ video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`)
1892
+ """
1893
+ image_token_id = self.config.image_token_id
1894
+ video_token_id = self.config.video_token_id
1895
+ vision_start_token_id = self.config.vision_start_token_id
1896
+
1897
+ if inputs_embeds is not None:
1898
+ vision_start_mask = (
1899
+ inputs_embeds
1900
+ == self.get_input_embeddings()(
1901
+ torch.tensor(vision_start_token_id, dtype=torch.long, device=inputs_embeds.device)
1902
+ )
1903
+ )[..., 0]
1904
+ image_mask = (
1905
+ inputs_embeds
1906
+ == self.get_input_embeddings()(
1907
+ torch.tensor(image_token_id, dtype=torch.long, device=inputs_embeds.device)
1908
+ )
1909
+ )[..., 0]
1910
+ video_mask = (
1911
+ inputs_embeds
1912
+ == self.get_input_embeddings()(
1913
+ torch.tensor(video_token_id, dtype=torch.long, device=inputs_embeds.device)
1914
+ )
1915
+ )[..., 0]
1916
+ else:
1917
+ vision_start_mask = input_ids == vision_start_token_id
1918
+ image_mask = input_ids == image_token_id
1919
+ video_mask = input_ids == video_token_id
1920
+
1921
+ vision_first_mask = torch.roll(vision_start_mask, shifts=1, dims=1)
1922
+ image_nums = torch.sum(vision_first_mask & image_mask, dim=1)
1923
+ video_nums = torch.sum(vision_first_mask & video_mask, dim=1)
1924
+
1925
+ return image_nums, video_nums
1926
+
1927
+ def _expand_inputs_for_generation(
1928
+ self,
1929
+ expand_size: int = 1,
1930
+ is_encoder_decoder: bool = False,
1931
+ input_ids: Optional[torch.LongTensor] = None,
1932
+ **model_kwargs,
1933
+ ) -> tuple[torch.LongTensor, dict[str, Any]]:
1934
+ # Overwritten -- Support for expanding tensors without a batch size dimension
1935
+ # e.g., pixel_values, image_grid_thw, pixel_values_videos, video_grid_thw, second_per_grid_t
1936
+ # pixel_values.shape[0] is sum(seqlen_images for samples)
1937
+ # image_grid_thw.shape[0] is sum(num_images for samples)
1938
+
1939
+ if expand_size == 1:
1940
+ return input_ids, model_kwargs
1941
+
1942
+ visual_keys = ["pixel_values", "image_grid_thw", "pixel_values_videos", "video_grid_thw", "second_per_grid_ts"]
1943
+
1944
+ def _expand_dict_for_generation_visual(dict_to_expand):
1945
+ image_grid_thw = model_kwargs.get("image_grid_thw", None)
1946
+ video_grid_thw = model_kwargs.get("video_grid_thw", None)
1947
+ image_nums, video_nums = self._get_image_nums_and_video_nums(
1948
+ input_ids, inputs_embeds=model_kwargs.get("inputs_embeds", None)
1949
+ )
1950
+
1951
+ def _repeat_interleave_samples(x, lengths, repeat_times):
1952
+ samples = torch.split(x, lengths)
1953
+ repeat_args = [repeat_times] + [1] * (x.dim() - 1)
1954
+ result = torch.cat([sample.repeat(*repeat_args) for sample in samples], dim=0)
1955
+ return result
1956
+
1957
+ for key in dict_to_expand:
1958
+ if key == "pixel_values":
1959
+ # split images into samples
1960
+ samples = torch.split(image_grid_thw, list(image_nums))
1961
+ # compute the sequence length of images for each sample
1962
+ lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
1963
+ dict_to_expand[key] = _repeat_interleave_samples(
1964
+ dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1965
+ )
1966
+ elif key == "image_grid_thw":
1967
+ # get the num of images for each sample
1968
+ lengths = list(image_nums)
1969
+ dict_to_expand[key] = _repeat_interleave_samples(
1970
+ dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1971
+ )
1972
+ elif key == "pixel_values_videos":
1973
+ samples = torch.split(video_grid_thw, list(video_nums))
1974
+ lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
1975
+ dict_to_expand[key] = _repeat_interleave_samples(
1976
+ dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1977
+ )
1978
+ elif key == "video_grid_thw":
1979
+ lengths = list(video_nums)
1980
+ dict_to_expand[key] = _repeat_interleave_samples(
1981
+ dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1982
+ )
1983
+ elif key == "second_per_grid_ts":
1984
+ dict_to_expand[key] = _repeat_interleave_samples(
1985
+ dict_to_expand[key], lengths=list(video_nums), repeat_times=expand_size
1986
+ )
1987
+ return dict_to_expand
1988
+
1989
+ def _expand_dict_for_generation(dict_to_expand):
1990
+ for key in dict_to_expand:
1991
+ if (
1992
+ key != "cache_position"
1993
+ and dict_to_expand[key] is not None
1994
+ and isinstance(dict_to_expand[key], torch.Tensor)
1995
+ and key not in visual_keys
1996
+ ):
1997
+ dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0)
1998
+ return dict_to_expand
1999
+
2000
+ model_kwargs = _expand_dict_for_generation_visual(model_kwargs)
2001
+
2002
+ if input_ids is not None:
2003
+ input_ids = input_ids.repeat_interleave(expand_size, dim=0)
2004
+
2005
+ model_kwargs = _expand_dict_for_generation(model_kwargs)
2006
+
2007
+ if is_encoder_decoder:
2008
+ if model_kwargs.get("encoder_outputs") is None:
2009
+ raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.")
2010
+ model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"])
2011
+
2012
+ return input_ids, model_kwargs
2013
+
2014
+
2015
+ __all__ = [
2016
+ "Qwen3VLVisionModel",
2017
+ "Qwen3VLForConditionalGeneration",
2018
+ "Qwen3VLModel",
2019
+ "Qwen3VLPreTrainedModel",
2020
+ "Qwen3VLTextModel",
2021
+ ]