354 lines
17 KiB
Python
354 lines
17 KiB
Python
|
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
||
|
# This file was automatically generated from src/transformers/models/glm4v/modular_glm4v.py.
|
||
|
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
||
|
# the file from the modular. If any change should be done, please apply the change to the
|
||
|
# modular_glm4v.py file directly. One of our CI enforces this.
|
||
|
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
||
|
# coding=utf-8
|
||
|
# Copyright 2025 The ZhipuAI Inc. team and HuggingFace Inc. team. All rights reserved.
|
||
|
#
|
||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||
|
# you may not use this file except in compliance with the License.
|
||
|
# You may obtain a copy of the License at
|
||
|
#
|
||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||
|
#
|
||
|
# Unless required by applicable law or agreed to in writing, software
|
||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
|
# See the License for the specific language governing permissions and
|
||
|
# limitations under the License.
|
||
|
from ...configuration_utils import PretrainedConfig
|
||
|
from ...modeling_rope_utils import rope_config_validation
|
||
|
|
||
|
|
||
|
class Glm4vVisionConfig(PretrainedConfig):
|
||
|
r"""
|
||
|
This is the configuration class to store the configuration of a [`Glm4vVisionModel`]. It is used to instantiate an Glm4vVisionModel
|
||
|
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield
|
||
|
a similar configuration to that of
|
||
|
GLM-4.1V-9B-Thinking [THUDM/GLM-4.1V-9B-Thinking](https://huggingface.co/THUDM/GLM-4.1V-9B-Thinking).
|
||
|
|
||
|
Args:
|
||
|
hidden_size (`int`, *optional*, defaults to 1536):
|
||
|
Dimensionality of the encoder layers and the pooler layer.
|
||
|
depth (`int`, *optional*, defaults to 24):
|
||
|
Number of layers (depth) in the model.
|
||
|
attention_bias (`bool`, *optional*, defaults to `False`):
|
||
|
Whether to add a bias to the queries, keys and values.
|
||
|
intermediate_size (`int`, *optional*, defaults to 13696):
|
||
|
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
||
|
hidden_act (`str` or `function`, *optional*, defaults to `"selu"`):
|
||
|
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
||
|
`"relu"`, `"selu"` and `"gelu_new"` are supported.
|
||
|
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
|
||
|
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
||
|
attention_dropout (`float`, *optional*, defaults to 0.0):
|
||
|
Dropout probability for attention weights.
|
||
|
projection_dropout (`float`, *optional*, defaults to 0.0):
|
||
|
Dropout probability for the projection layer.
|
||
|
initializer_range (`float`, *optional*, defaults to 0.02):
|
||
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
||
|
image_size (`int` or `list[int]`, *optional*, defaults to `[336, 336]`):
|
||
|
The size (resolution) of each image.
|
||
|
patch_size (`int`, *optional*, defaults to `14`):
|
||
|
The size (resolution) of each patch.
|
||
|
num_channels (`int`, *optional*, defaults to 3):
|
||
|
The number of input channels.
|
||
|
out_hidden_size (`int`, *optional*, defaults to 4096):
|
||
|
The output hidden size of the vision model.
|
||
|
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
|
||
|
The epsilon used by the rms normalization layers.
|
||
|
spatial_merge_size (`int`, *optional*, defaults to 2):
|
||
|
The size used for merging spatial dimensions.
|
||
|
temporal_patch_size (`int`, *optional*, defaults to 2):
|
||
|
The size used for patches along the temporal dimension.
|
||
|
Example:
|
||
|
|
||
|
```python
|
||
|
>>> from transformers import Glm4vVisionConfig, Glm4vVisionModel
|
||
|
|
||
|
>>> # Initializing a Glm4vVisionConfig GLM-4.1V-9B style configuration
|
||
|
>>> configuration = Glm4vVisionConfig()
|
||
|
|
||
|
>>> # Initializing a model (with random weights) from the GLM-4.1V-9B configuration
|
||
|
>>> model = Glm4vVisionModel(configuration)
|
||
|
|
||
|
>>> # Accessing the model configuration
|
||
|
>>> configuration = model.config
|
||
|
```"""
|
||
|
|
||
|
model_type = "glm4v"
|
||
|
base_config_key = "vision_config"
|
||
|
|
||
|
def __init__(
|
||
|
self,
|
||
|
depth=24,
|
||
|
hidden_size=1536,
|
||
|
hidden_act="silu",
|
||
|
attention_bias=False,
|
||
|
attention_dropout=0.0,
|
||
|
num_heads=12,
|
||
|
in_channels=3,
|
||
|
image_size=336,
|
||
|
patch_size=14,
|
||
|
rms_norm_eps=1e-05,
|
||
|
spatial_merge_size=2,
|
||
|
temporal_patch_size=1,
|
||
|
out_hidden_size=4096,
|
||
|
intermediate_size=13696,
|
||
|
initializer_range=0.02,
|
||
|
**kwargs,
|
||
|
):
|
||
|
super().__init__(**kwargs)
|
||
|
|
||
|
self.depth = depth
|
||
|
self.hidden_size = hidden_size
|
||
|
self.hidden_act = hidden_act
|
||
|
self.num_heads = num_heads
|
||
|
self.in_channels = in_channels
|
||
|
self.image_size = image_size
|
||
|
self.patch_size = patch_size
|
||
|
self.spatial_merge_size = spatial_merge_size
|
||
|
self.temporal_patch_size = temporal_patch_size
|
||
|
self.out_hidden_size = out_hidden_size
|
||
|
self.intermediate_size = intermediate_size
|
||
|
self.initializer_range = initializer_range
|
||
|
self.rms_norm_eps = rms_norm_eps
|
||
|
self.attention_bias = attention_bias
|
||
|
self.attention_dropout = attention_dropout
|
||
|
|
||
|
|
||
|
class Glm4vTextConfig(PretrainedConfig):
|
||
|
r"""
|
||
|
This is the configuration class to store the configuration of a [`Glm4vModel`]. It is used to instantiate a
|
||
|
GLM-4.1V model according to the specified arguments, defining the model architecture. Instantiating a
|
||
|
configuration with the defaults will yield a similar configuration to that of
|
||
|
GLM-4.1V-9B-Thinking [THUDM/GLM-4.1V-9B-Thinking](https://huggingface.co/THUDM/GLM-4.1V-9B-Thinking).
|
||
|
|
||
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
||
|
documentation from [`PretrainedConfig`] for more information.
|
||
|
|
||
|
Args:
|
||
|
vocab_size (`int`, *optional*, defaults to 151552):
|
||
|
Vocabulary size of the Glm4v model. Defines the number of different tokens that can be represented by the
|
||
|
`inputs_ids` passed when calling [`Glm4vModel`]
|
||
|
hidden_size (`int`, *optional*, defaults to 4096):
|
||
|
Dimension of the hidden representations.
|
||
|
intermediate_size (`int`, *optional*, defaults to 13696):
|
||
|
Dimension of the MLP representations.
|
||
|
num_hidden_layers (`int`, *optional*, defaults to 40):
|
||
|
Number of hidden layers in the Transformer encoder.
|
||
|
num_attention_heads (`int`, *optional*, defaults to 32):
|
||
|
Number of attention heads for each attention layer in the Transformer encoder.
|
||
|
num_key_value_heads (`int`, *optional*, defaults to 2):
|
||
|
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
||
|
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
||
|
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
||
|
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
||
|
by meanpooling all the original heads within that group. For more details checkout [this
|
||
|
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
|
||
|
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
||
|
The non-linear activation function (function or string) in the decoder.
|
||
|
max_position_embeddings (`int`, *optional*, defaults to 32768):
|
||
|
The maximum sequence length that this model might ever be used with.
|
||
|
initializer_range (`float`, *optional*, defaults to 0.02):
|
||
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
||
|
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
|
||
|
The epsilon used by the rms normalization layers.
|
||
|
use_cache (`bool`, *optional*, defaults to `True`):
|
||
|
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
||
|
relevant if `config.is_decoder=True`.
|
||
|
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
||
|
Whether the model's input and output word embeddings should be tied.
|
||
|
rope_theta (`float`, *optional*, defaults to 10000.0):
|
||
|
The base period of the RoPE embeddings.
|
||
|
attention_dropout (`float`, *optional*, defaults to 0.0):
|
||
|
The dropout ratio for the attention probabilities.
|
||
|
rope_scaling (`Dict`, *optional*):
|
||
|
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
|
||
|
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
|
||
|
accordingly.
|
||
|
Expected contents:
|
||
|
`rope_type` (`str`):
|
||
|
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
|
||
|
'llama3'], with 'default' being the original RoPE implementation.
|
||
|
`factor` (`float`, *optional*):
|
||
|
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
|
||
|
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
|
||
|
original maximum pre-trained length.
|
||
|
`original_max_position_embeddings` (`int`, *optional*):
|
||
|
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
|
||
|
pretraining.
|
||
|
`attention_factor` (`float`, *optional*):
|
||
|
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
|
||
|
computation. If unspecified, it defaults to value recommended by the implementation, using the
|
||
|
`factor` field to infer the suggested value.
|
||
|
image_token_id (`int`, *optional*):
|
||
|
Token index used as placeholder for image embeddings.
|
||
|
video_token_id (`int`, *optional*):
|
||
|
Token index used as placeholder for video embeddings.
|
||
|
|
||
|
```python
|
||
|
>>> from transformers import Glm4vTextModel, Glm4vConfig
|
||
|
|
||
|
>>> # Initializing a GLM-4.1V style configuration
|
||
|
>>> configuration = Glm4vConfig()
|
||
|
|
||
|
>>> # Initializing a model from the GLM-4.1V style configuration
|
||
|
>>> model = Glm4vTextModel(configuration)
|
||
|
|
||
|
>>> # Accessing the model configuration
|
||
|
>>> configuration = model.config
|
||
|
```"""
|
||
|
|
||
|
model_type = "glm4v_text"
|
||
|
base_config_key = "text_config"
|
||
|
keys_to_ignore_at_inference = ["past_key_values"]
|
||
|
# Default tensor parallel plan for base model `Glm4v`
|
||
|
base_model_tp_plan = {
|
||
|
"layers.*.self_attn.q_proj": "colwise",
|
||
|
"layers.*.self_attn.k_proj": "colwise",
|
||
|
"layers.*.self_attn.v_proj": "colwise",
|
||
|
"layers.*.self_attn.o_proj": "rowwise",
|
||
|
"layers.*.mlp.gate_up_proj": "colwise_rep", # we need to replicate here due to the `chunk` operation
|
||
|
"layers.*.mlp.down_proj": "rowwise_rep", # we need to replicate here due to the `chunk` operation
|
||
|
}
|
||
|
base_model_pp_plan = {
|
||
|
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
|
||
|
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
|
||
|
"norm": (["hidden_states"], ["hidden_states"]),
|
||
|
}
|
||
|
|
||
|
def __init__(
|
||
|
self,
|
||
|
vocab_size=151552,
|
||
|
hidden_size=4096,
|
||
|
intermediate_size=13696,
|
||
|
num_hidden_layers=40,
|
||
|
num_attention_heads=32,
|
||
|
num_key_value_heads=2,
|
||
|
hidden_act="silu",
|
||
|
max_position_embeddings=32768,
|
||
|
initializer_range=0.02,
|
||
|
rms_norm_eps=1e-05,
|
||
|
use_cache=True,
|
||
|
tie_word_embeddings=False,
|
||
|
rope_theta=10000.0,
|
||
|
attention_dropout=0.0,
|
||
|
rope_scaling=None,
|
||
|
image_token_id=None,
|
||
|
video_token_id=None,
|
||
|
**kwargs,
|
||
|
):
|
||
|
self.vocab_size = vocab_size
|
||
|
self.max_position_embeddings = max_position_embeddings
|
||
|
self.hidden_size = hidden_size
|
||
|
self.intermediate_size = intermediate_size
|
||
|
self.num_hidden_layers = num_hidden_layers
|
||
|
self.num_attention_heads = num_attention_heads
|
||
|
|
||
|
# for backward compatibility
|
||
|
if num_key_value_heads is None:
|
||
|
num_key_value_heads = num_attention_heads
|
||
|
|
||
|
self.num_key_value_heads = num_key_value_heads
|
||
|
self.hidden_act = hidden_act
|
||
|
self.initializer_range = initializer_range
|
||
|
self.rms_norm_eps = rms_norm_eps
|
||
|
self.use_cache = use_cache
|
||
|
self.rope_theta = rope_theta
|
||
|
self.attention_dropout = attention_dropout
|
||
|
self.rope_scaling = rope_scaling
|
||
|
|
||
|
# Validate the correctness of rotary position embeddings parameters
|
||
|
# BC: if there is a 'type' field, move it to 'rope_type'.
|
||
|
if self.rope_scaling is not None and "type" in self.rope_scaling:
|
||
|
self.rope_scaling["rope_type"] = self.rope_scaling["type"]
|
||
|
rope_config_validation(self, ignore_keys={"mrope_section"})
|
||
|
self.image_token_id = image_token_id
|
||
|
self.video_token_id = video_token_id
|
||
|
|
||
|
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
|
||
|
|
||
|
|
||
|
class Glm4vConfig(PretrainedConfig):
|
||
|
r"""
|
||
|
This is the configuration class to store the configuration of a [`Glm4vModel`]. It is used to instantiate a
|
||
|
GLM-4.1V model according to the specified arguments, defining the model architecture. Instantiating a
|
||
|
configuration with the defaults will yield a similar configuration to that of
|
||
|
GLM-4.1V-9B-Thinking [THUDM/GLM-4.1V-9B-Thinking](https://huggingface.co/THUDM/GLM-4.1V-9B-Thinking).
|
||
|
|
||
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
||
|
documentation from [`PretrainedConfig`] for more information.
|
||
|
|
||
|
|
||
|
Args:
|
||
|
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vTextConfig`):
|
||
|
The config object or dictionary of the text backbone.
|
||
|
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vVisionConfig`):
|
||
|
The config object or dictionary of the vision backbone.
|
||
|
image_token_id (`int`, *optional*, defaults to 151343):
|
||
|
The image token index to encode the image prompt.
|
||
|
video_token_id (`int`, *optional*, defaults to 151344):
|
||
|
The video token index to encode the image prompt.
|
||
|
image_start_token_id (`int`, *optional*, defaults to 151339):
|
||
|
The image start token index to encode the start of image.
|
||
|
image_end_token_id (`int`, *optional*, defaults to 151340):
|
||
|
The image end token index to encode the end of image.
|
||
|
video_start_token_id (`int`, *optional*, defaults to 151341):
|
||
|
The video start token index to encode the start of video.
|
||
|
video_end_token_id (`int`, *optional*, defaults to 151342):
|
||
|
The video end token index to encode the end of video.
|
||
|
|
||
|
```python
|
||
|
>>> from transformers import Glm4vForConditionalGeneration, Glm4vConfig
|
||
|
|
||
|
>>> # Initializing a GLM-4.1V style configuration
|
||
|
>>> configuration = Glm4vConfig()
|
||
|
|
||
|
>>> # Initializing a model from the GLM-4.1V style configuration
|
||
|
>>> model = Glm4vForConditionalGeneration(configuration)
|
||
|
|
||
|
>>> # Accessing the model configuration
|
||
|
>>> configuration = model.config
|
||
|
```"""
|
||
|
|
||
|
model_type = "glm4v"
|
||
|
sub_configs = {"vision_config": Glm4vVisionConfig, "text_config": Glm4vTextConfig}
|
||
|
keys_to_ignore_at_inference = ["past_key_values"]
|
||
|
|
||
|
def __init__(
|
||
|
self,
|
||
|
text_config=None,
|
||
|
vision_config=None,
|
||
|
image_token_id=151343,
|
||
|
video_token_id=151344,
|
||
|
image_start_token_id=151339,
|
||
|
image_end_token_id=151340,
|
||
|
video_start_token_id=151341,
|
||
|
video_end_token_id=151342,
|
||
|
**kwargs,
|
||
|
):
|
||
|
super().__init__(**kwargs)
|
||
|
if isinstance(vision_config, dict):
|
||
|
self.vision_config = self.sub_configs["vision_config"](**vision_config)
|
||
|
elif vision_config is None:
|
||
|
self.vision_config = self.sub_configs["vision_config"]()
|
||
|
|
||
|
if isinstance(text_config, dict):
|
||
|
self.text_config = self.sub_configs["text_config"](**text_config)
|
||
|
elif text_config is None:
|
||
|
# For BC use all kwargs to init `TextConfig`
|
||
|
self.text_config = self.sub_configs["text_config"](**kwargs)
|
||
|
|
||
|
self.image_token_id = image_token_id
|
||
|
self.video_token_id = video_token_id
|
||
|
self.video_start_token_id = video_start_token_id
|
||
|
self.video_end_token_id = video_end_token_id
|
||
|
self.image_start_token_id = image_start_token_id
|
||
|
self.image_end_token_id = image_end_token_id
|
||
|
|
||
|
|
||
|
__all__ = ["Glm4vConfig", "Glm4vTextConfig"]
|