Adding all project files
This commit is contained in:
parent
6c9e127bdc
commit
cd4316ad0f
42289 changed files with 8009643 additions and 0 deletions
|
@ -0,0 +1,27 @@
|
|||
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from ...utils import _LazyModule
|
||||
from ...utils.import_utils import define_import_structure
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .configuration_modernbert import *
|
||||
from .modeling_modernbert import *
|
||||
else:
|
||||
import sys
|
||||
|
||||
_file = globals()["__file__"]
|
||||
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,224 @@
|
|||
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
||||
# This file was automatically generated from src/transformers/models/modernbert/modular_modernbert.py.
|
||||
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
||||
# the file from the modular. If any change should be done, please apply the change to the
|
||||
# modular_modernbert.py file directly. One of our CI enforces this.
|
||||
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
||||
# Copyright 2024 Answer.AI, LightOn, and contributors, and the HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
|
||||
|
||||
class ModernBertConfig(PretrainedConfig):
|
||||
r"""
|
||||
This is the configuration class to store the configuration of a [`ModernBertModel`]. It is used to instantiate an ModernBert
|
||||
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
||||
defaults will yield a similar configuration to that of the ModernBERT-base.
|
||||
e.g. [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base)
|
||||
|
||||
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
||||
documentation from [`PretrainedConfig`] for more information.
|
||||
|
||||
Args:
|
||||
vocab_size (`int`, *optional*, defaults to 50368):
|
||||
Vocabulary size of the ModernBert model. Defines the number of different tokens that can be represented by the
|
||||
`inputs_ids` passed when calling [`ModernBertModel`]
|
||||
hidden_size (`int`, *optional*, defaults to 768):
|
||||
Dimension of the hidden representations.
|
||||
intermediate_size (`int`, *optional*, defaults to 1152):
|
||||
Dimension of the MLP representations.
|
||||
num_hidden_layers (`int`, *optional*, defaults to 22):
|
||||
Number of hidden layers in the Transformer decoder.
|
||||
num_attention_heads (`int`, *optional*, defaults to 12):
|
||||
Number of attention heads for each attention layer in the Transformer decoder.
|
||||
hidden_activation (`str` or `function`, *optional*, defaults to `"gelu"`):
|
||||
The non-linear activation function (function or string) in the decoder. Will default to `"gelu"`
|
||||
if not specified.
|
||||
max_position_embeddings (`int`, *optional*, defaults to 8192):
|
||||
The maximum sequence length that this model might ever be used with.
|
||||
initializer_range (`float`, *optional*, defaults to 0.02):
|
||||
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
||||
initializer_cutoff_factor (`float`, *optional*, defaults to 2.0):
|
||||
The cutoff factor for the truncated_normal_initializer for initializing all weight matrices.
|
||||
norm_eps (`float`, *optional*, defaults to 1e-05):
|
||||
The epsilon used by the rms normalization layers.
|
||||
norm_bias (`bool`, *optional*, defaults to `False`):
|
||||
Whether to use bias in the normalization layers.
|
||||
pad_token_id (`int`, *optional*, defaults to 50283):
|
||||
Padding token id.
|
||||
eos_token_id (`int`, *optional*, defaults to 50282):
|
||||
End of stream token id.
|
||||
bos_token_id (`int`, *optional*, defaults to 50281):
|
||||
Beginning of stream token id.
|
||||
cls_token_id (`int`, *optional*, defaults to 50281):
|
||||
Classification token id.
|
||||
sep_token_id (`int`, *optional*, defaults to 50282):
|
||||
Separation token id.
|
||||
global_rope_theta (`float`, *optional*, defaults to 160000.0):
|
||||
The base period of the global RoPE embeddings.
|
||||
attention_bias (`bool`, *optional*, defaults to `False`):
|
||||
Whether to use a bias in the query, key, value and output projection layers during self-attention.
|
||||
attention_dropout (`float`, *optional*, defaults to 0.0):
|
||||
The dropout ratio for the attention probabilities.
|
||||
global_attn_every_n_layers (`int`, *optional*, defaults to 3):
|
||||
The number of layers between global attention layers.
|
||||
local_attention (`int`, *optional*, defaults to 128):
|
||||
The window size for local attention.
|
||||
local_rope_theta (`float`, *optional*, defaults to 10000.0):
|
||||
The base period of the local RoPE embeddings.
|
||||
embedding_dropout (`float`, *optional*, defaults to 0.0):
|
||||
The dropout ratio for the embeddings.
|
||||
mlp_bias (`bool`, *optional*, defaults to `False`):
|
||||
Whether to use bias in the MLP layers.
|
||||
mlp_dropout (`float`, *optional*, defaults to 0.0):
|
||||
The dropout ratio for the MLP layers.
|
||||
decoder_bias (`bool`, *optional*, defaults to `True`):
|
||||
Whether to use bias in the decoder layers.
|
||||
classifier_pooling (`str`, *optional*, defaults to `"cls"`):
|
||||
The pooling method for the classifier. Should be either `"cls"` or `"mean"`. In local attention layers, the
|
||||
CLS token doesn't attend to all tokens on long sequences.
|
||||
classifier_dropout (`float`, *optional*, defaults to 0.0):
|
||||
The dropout ratio for the classifier.
|
||||
classifier_bias (`bool`, *optional*, defaults to `False`):
|
||||
Whether to use bias in the classifier.
|
||||
classifier_activation (`str`, *optional*, defaults to `"gelu"`):
|
||||
The activation function for the classifier.
|
||||
deterministic_flash_attn (`bool`, *optional*, defaults to `False`):
|
||||
Whether to use deterministic flash attention. If `False`, inference will be faster but not deterministic.
|
||||
sparse_prediction (`bool`, *optional*, defaults to `False`):
|
||||
Whether to use sparse prediction for the masked language model instead of returning the full dense logits.
|
||||
sparse_pred_ignore_index (`int`, *optional*, defaults to -100):
|
||||
The index to ignore for the sparse prediction.
|
||||
reference_compile (`bool`, *optional*):
|
||||
Whether to compile the layers of the model which were compiled during pretraining. If `None`, then parts of
|
||||
the model will be compiled if 1) `triton` is installed, 2) the model is not on MPS, 3) the model is not
|
||||
shared between devices, and 4) the model is not resized after initialization. If `True`, then the model may
|
||||
be faster in some scenarios.
|
||||
repad_logits_with_grad (`bool`, *optional*, defaults to `False`):
|
||||
When True, ModernBertForMaskedLM keeps track of the logits' gradient when repadding for output. This only
|
||||
applies when using Flash Attention 2 with passed labels. Otherwise output logits always have a gradient.
|
||||
|
||||
Examples:
|
||||
|
||||
```python
|
||||
>>> from transformers import ModernBertModel, ModernBertConfig
|
||||
|
||||
>>> # Initializing a ModernBert style configuration
|
||||
>>> configuration = ModernBertConfig()
|
||||
|
||||
>>> # Initializing a model from the modernbert-base style configuration
|
||||
>>> model = ModernBertModel(configuration)
|
||||
|
||||
>>> # Accessing the model configuration
|
||||
>>> configuration = model.config
|
||||
```"""
|
||||
|
||||
model_type = "modernbert"
|
||||
attribute_map = {"rope_theta": "global_rope_theta"}
|
||||
keys_to_ignore_at_inference = ["past_key_values"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_size=50368,
|
||||
hidden_size=768,
|
||||
intermediate_size=1152,
|
||||
num_hidden_layers=22,
|
||||
num_attention_heads=12,
|
||||
hidden_activation="gelu",
|
||||
max_position_embeddings=8192,
|
||||
initializer_range=0.02,
|
||||
initializer_cutoff_factor=2.0,
|
||||
norm_eps=1e-5,
|
||||
norm_bias=False,
|
||||
pad_token_id=50283,
|
||||
eos_token_id=50282,
|
||||
bos_token_id=50281,
|
||||
cls_token_id=50281,
|
||||
sep_token_id=50282,
|
||||
global_rope_theta=160000.0,
|
||||
attention_bias=False,
|
||||
attention_dropout=0.0,
|
||||
global_attn_every_n_layers=3,
|
||||
local_attention=128,
|
||||
local_rope_theta=10000.0,
|
||||
embedding_dropout=0.0,
|
||||
mlp_bias=False,
|
||||
mlp_dropout=0.0,
|
||||
decoder_bias=True,
|
||||
classifier_pooling: Literal["cls", "mean"] = "cls",
|
||||
classifier_dropout=0.0,
|
||||
classifier_bias=False,
|
||||
classifier_activation="gelu",
|
||||
deterministic_flash_attn=False,
|
||||
sparse_prediction=False,
|
||||
sparse_pred_ignore_index=-100,
|
||||
reference_compile=None,
|
||||
repad_logits_with_grad=False,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(
|
||||
pad_token_id=pad_token_id,
|
||||
bos_token_id=bos_token_id,
|
||||
eos_token_id=eos_token_id,
|
||||
cls_token_id=cls_token_id,
|
||||
sep_token_id=sep_token_id,
|
||||
**kwargs,
|
||||
)
|
||||
self.vocab_size = vocab_size
|
||||
self.max_position_embeddings = max_position_embeddings
|
||||
self.hidden_size = hidden_size
|
||||
self.intermediate_size = intermediate_size
|
||||
self.num_hidden_layers = num_hidden_layers
|
||||
self.num_attention_heads = num_attention_heads
|
||||
self.initializer_range = initializer_range
|
||||
self.initializer_cutoff_factor = initializer_cutoff_factor
|
||||
self.norm_eps = norm_eps
|
||||
self.norm_bias = norm_bias
|
||||
self.global_rope_theta = global_rope_theta
|
||||
self.attention_bias = attention_bias
|
||||
self.attention_dropout = attention_dropout
|
||||
self.hidden_activation = hidden_activation
|
||||
self.global_attn_every_n_layers = global_attn_every_n_layers
|
||||
self.local_attention = local_attention
|
||||
self.local_rope_theta = local_rope_theta
|
||||
self.embedding_dropout = embedding_dropout
|
||||
self.mlp_bias = mlp_bias
|
||||
self.mlp_dropout = mlp_dropout
|
||||
self.decoder_bias = decoder_bias
|
||||
self.classifier_pooling = classifier_pooling
|
||||
self.classifier_dropout = classifier_dropout
|
||||
self.classifier_bias = classifier_bias
|
||||
self.classifier_activation = classifier_activation
|
||||
self.deterministic_flash_attn = deterministic_flash_attn
|
||||
self.sparse_prediction = sparse_prediction
|
||||
self.sparse_pred_ignore_index = sparse_pred_ignore_index
|
||||
self.reference_compile = reference_compile
|
||||
self.repad_logits_with_grad = repad_logits_with_grad
|
||||
|
||||
if self.classifier_pooling not in ["cls", "mean"]:
|
||||
raise ValueError(
|
||||
f'Invalid value for `classifier_pooling`, should be either "cls" or "mean", but is {self.classifier_pooling}.'
|
||||
)
|
||||
|
||||
def to_dict(self):
|
||||
output = super().to_dict()
|
||||
output.pop("reference_compile", None)
|
||||
return output
|
||||
|
||||
|
||||
__all__ = ["ModernBertConfig"]
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue