Adding all project files
This commit is contained in:
parent
6c9e127bdc
commit
cd4316ad0f
42289 changed files with 8009643 additions and 0 deletions
29
venv/Lib/site-packages/transformers/models/xlm/__init__.py
Normal file
29
venv/Lib/site-packages/transformers/models/xlm/__init__.py
Normal file
|
@ -0,0 +1,29 @@
|
|||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from ...utils import _LazyModule
|
||||
from ...utils.import_utils import define_import_structure
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .configuration_xlm import *
|
||||
from .modeling_tf_xlm import *
|
||||
from .modeling_xlm import *
|
||||
from .tokenization_xlm import *
|
||||
else:
|
||||
import sys
|
||||
|
||||
_file = globals()["__file__"]
|
||||
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,241 @@
|
|||
# coding=utf-8
|
||||
# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""XLM configuration"""
|
||||
|
||||
from collections import OrderedDict
|
||||
from collections.abc import Mapping
|
||||
|
||||
from ...configuration_utils import PretrainedConfig
|
||||
from ...onnx import OnnxConfig
|
||||
from ...utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
class XLMConfig(PretrainedConfig):
|
||||
"""
|
||||
This is the configuration class to store the configuration of a [`XLMModel`] or a [`TFXLMModel`]. It is used to
|
||||
instantiate a XLM model according to the specified arguments, defining the model architecture. Instantiating a
|
||||
configuration with the defaults will yield a similar configuration to that of the
|
||||
[FacebookAI/xlm-mlm-en-2048](https://huggingface.co/FacebookAI/xlm-mlm-en-2048) architecture.
|
||||
|
||||
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
||||
documentation from [`PretrainedConfig`] for more information.
|
||||
|
||||
Args:
|
||||
vocab_size (`int`, *optional*, defaults to 30145):
|
||||
Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
|
||||
`inputs_ids` passed when calling [`XLMModel`] or [`TFXLMModel`].
|
||||
emb_dim (`int`, *optional*, defaults to 2048):
|
||||
Dimensionality of the encoder layers and the pooler layer.
|
||||
n_layer (`int`, *optional*, defaults to 12):
|
||||
Number of hidden layers in the Transformer encoder.
|
||||
n_head (`int`, *optional*, defaults to 16):
|
||||
Number of attention heads for each attention layer in the Transformer encoder.
|
||||
dropout (`float`, *optional*, defaults to 0.1):
|
||||
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
||||
attention_dropout (`float`, *optional*, defaults to 0.1):
|
||||
The dropout probability for the attention mechanism
|
||||
gelu_activation (`bool`, *optional*, defaults to `True`):
|
||||
Whether or not to use *gelu* for the activations instead of *relu*.
|
||||
sinusoidal_embeddings (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not to use sinusoidal positional embeddings instead of absolute positional embeddings.
|
||||
causal (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not the model should behave in a causal manner. Causal models use a triangular attention mask in
|
||||
order to only attend to the left-side context instead if a bidirectional context.
|
||||
asm (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not to use an adaptive log softmax projection layer instead of a linear layer for the prediction
|
||||
layer.
|
||||
n_langs (`int`, *optional*, defaults to 1):
|
||||
The number of languages the model handles. Set to 1 for monolingual models.
|
||||
use_lang_emb (`bool`, *optional*, defaults to `True`)
|
||||
Whether to use language embeddings. Some models use additional language embeddings, see [the multilingual
|
||||
models page](http://huggingface.co/transformers/multilingual.html#xlm-language-embeddings) for information
|
||||
on how to use them.
|
||||
max_position_embeddings (`int`, *optional*, defaults to 512):
|
||||
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
||||
just in case (e.g., 512 or 1024 or 2048).
|
||||
embed_init_std (`float`, *optional*, defaults to 2048^-0.5):
|
||||
The standard deviation of the truncated_normal_initializer for initializing the embedding matrices.
|
||||
init_std (`int`, *optional*, defaults to 50257):
|
||||
The standard deviation of the truncated_normal_initializer for initializing all weight matrices except the
|
||||
embedding matrices.
|
||||
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
|
||||
The epsilon used by the layer normalization layers.
|
||||
bos_index (`int`, *optional*, defaults to 0):
|
||||
The index of the beginning of sentence token in the vocabulary.
|
||||
eos_index (`int`, *optional*, defaults to 1):
|
||||
The index of the end of sentence token in the vocabulary.
|
||||
pad_index (`int`, *optional*, defaults to 2):
|
||||
The index of the padding token in the vocabulary.
|
||||
unk_index (`int`, *optional*, defaults to 3):
|
||||
The index of the unknown token in the vocabulary.
|
||||
mask_index (`int`, *optional*, defaults to 5):
|
||||
The index of the masking token in the vocabulary.
|
||||
is_encoder(`bool`, *optional*, defaults to `True`):
|
||||
Whether or not the initialized model should be a transformer encoder or decoder as seen in Vaswani et al.
|
||||
summary_type (`string`, *optional*, defaults to "first"):
|
||||
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
|
||||
|
||||
Has to be one of the following options:
|
||||
|
||||
- `"last"`: Take the last token hidden state (like XLNet).
|
||||
- `"first"`: Take the first token hidden state (like BERT).
|
||||
- `"mean"`: Take the mean of all tokens hidden states.
|
||||
- `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
|
||||
- `"attn"`: Not implemented now, use multi-head attention.
|
||||
summary_use_proj (`bool`, *optional*, defaults to `True`):
|
||||
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
|
||||
|
||||
Whether or not to add a projection after the vector extraction.
|
||||
summary_activation (`str`, *optional*):
|
||||
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
|
||||
|
||||
Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
|
||||
summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
|
||||
Used in the sequence classification and multiple choice models.
|
||||
|
||||
Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
|
||||
summary_first_dropout (`float`, *optional*, defaults to 0.1):
|
||||
Used in the sequence classification and multiple choice models.
|
||||
|
||||
The dropout ratio to be used after the projection and activation.
|
||||
start_n_top (`int`, *optional*, defaults to 5):
|
||||
Used in the SQuAD evaluation script.
|
||||
end_n_top (`int`, *optional*, defaults to 5):
|
||||
Used in the SQuAD evaluation script.
|
||||
mask_token_id (`int`, *optional*, defaults to 0):
|
||||
Model agnostic parameter to identify masked tokens when generating text in an MLM context.
|
||||
lang_id (`int`, *optional*, defaults to 1):
|
||||
The ID of the language used by the model. This parameter is used when generating text in a given language.
|
||||
|
||||
Examples:
|
||||
|
||||
```python
|
||||
>>> from transformers import XLMConfig, XLMModel
|
||||
|
||||
>>> # Initializing a XLM configuration
|
||||
>>> configuration = XLMConfig()
|
||||
|
||||
>>> # Initializing a model (with random weights) from the configuration
|
||||
>>> model = XLMModel(configuration)
|
||||
|
||||
>>> # Accessing the model configuration
|
||||
>>> configuration = model.config
|
||||
```"""
|
||||
|
||||
model_type = "xlm"
|
||||
attribute_map = {
|
||||
"hidden_size": "emb_dim",
|
||||
"num_attention_heads": "n_heads",
|
||||
"num_hidden_layers": "n_layers",
|
||||
"n_words": "vocab_size", # For backward compatibility
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_size=30145,
|
||||
emb_dim=2048,
|
||||
n_layers=12,
|
||||
n_heads=16,
|
||||
dropout=0.1,
|
||||
attention_dropout=0.1,
|
||||
gelu_activation=True,
|
||||
sinusoidal_embeddings=False,
|
||||
causal=False,
|
||||
asm=False,
|
||||
n_langs=1,
|
||||
use_lang_emb=True,
|
||||
max_position_embeddings=512,
|
||||
embed_init_std=2048**-0.5,
|
||||
layer_norm_eps=1e-12,
|
||||
init_std=0.02,
|
||||
bos_index=0,
|
||||
eos_index=1,
|
||||
pad_index=2,
|
||||
unk_index=3,
|
||||
mask_index=5,
|
||||
is_encoder=True,
|
||||
summary_type="first",
|
||||
summary_use_proj=True,
|
||||
summary_activation=None,
|
||||
summary_proj_to_labels=True,
|
||||
summary_first_dropout=0.1,
|
||||
start_n_top=5,
|
||||
end_n_top=5,
|
||||
mask_token_id=0,
|
||||
lang_id=0,
|
||||
pad_token_id=2,
|
||||
bos_token_id=0,
|
||||
**kwargs,
|
||||
):
|
||||
"""Constructs XLMConfig."""
|
||||
self.vocab_size = vocab_size
|
||||
self.emb_dim = emb_dim
|
||||
self.n_layers = n_layers
|
||||
self.n_heads = n_heads
|
||||
self.dropout = dropout
|
||||
self.attention_dropout = attention_dropout
|
||||
self.gelu_activation = gelu_activation
|
||||
self.sinusoidal_embeddings = sinusoidal_embeddings
|
||||
self.causal = causal
|
||||
self.asm = asm
|
||||
self.n_langs = n_langs
|
||||
self.use_lang_emb = use_lang_emb
|
||||
self.layer_norm_eps = layer_norm_eps
|
||||
self.bos_index = bos_index
|
||||
self.eos_index = eos_index
|
||||
self.pad_index = pad_index
|
||||
self.unk_index = unk_index
|
||||
self.mask_index = mask_index
|
||||
self.is_encoder = is_encoder
|
||||
self.max_position_embeddings = max_position_embeddings
|
||||
self.embed_init_std = embed_init_std
|
||||
self.init_std = init_std
|
||||
self.summary_type = summary_type
|
||||
self.summary_use_proj = summary_use_proj
|
||||
self.summary_activation = summary_activation
|
||||
self.summary_proj_to_labels = summary_proj_to_labels
|
||||
self.summary_first_dropout = summary_first_dropout
|
||||
self.start_n_top = start_n_top
|
||||
self.end_n_top = end_n_top
|
||||
self.mask_token_id = mask_token_id
|
||||
self.lang_id = lang_id
|
||||
|
||||
if "n_words" in kwargs:
|
||||
self.n_words = kwargs["n_words"]
|
||||
|
||||
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, **kwargs)
|
||||
|
||||
|
||||
# Copied from transformers.models.bert.configuration_bert.BertOnnxConfig
|
||||
class XLMOnnxConfig(OnnxConfig):
|
||||
@property
|
||||
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
||||
if self.task == "multiple-choice":
|
||||
dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
|
||||
else:
|
||||
dynamic_axis = {0: "batch", 1: "sequence"}
|
||||
return OrderedDict(
|
||||
[
|
||||
("input_ids", dynamic_axis),
|
||||
("attention_mask", dynamic_axis),
|
||||
("token_type_ids", dynamic_axis),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
__all__ = ["XLMConfig", "XLMOnnxConfig"]
|
1356
venv/Lib/site-packages/transformers/models/xlm/modeling_tf_xlm.py
Normal file
1356
venv/Lib/site-packages/transformers/models/xlm/modeling_tf_xlm.py
Normal file
File diff suppressed because it is too large
Load diff
1663
venv/Lib/site-packages/transformers/models/xlm/modeling_xlm.py
Normal file
1663
venv/Lib/site-packages/transformers/models/xlm/modeling_xlm.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,577 @@
|
|||
# coding=utf-8
|
||||
# Copyright 2019 The Open AI Team Authors and The HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Tokenization classes for XLM."""
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import unicodedata
|
||||
from typing import Optional
|
||||
|
||||
from ...tokenization_utils import PreTrainedTokenizer
|
||||
from ...utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
VOCAB_FILES_NAMES = {
|
||||
"vocab_file": "vocab.json",
|
||||
"merges_file": "merges.txt",
|
||||
}
|
||||
|
||||
|
||||
def get_pairs(word):
|
||||
"""
|
||||
Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
|
||||
strings)
|
||||
"""
|
||||
pairs = set()
|
||||
prev_char = word[0]
|
||||
for char in word[1:]:
|
||||
pairs.add((prev_char, char))
|
||||
prev_char = char
|
||||
return pairs
|
||||
|
||||
|
||||
def lowercase_and_remove_accent(text):
|
||||
"""
|
||||
Lowercase and strips accents from a piece of text based on
|
||||
https://github.com/facebookresearch/XLM/blob/master/tools/lowercase_and_remove_accent.py
|
||||
"""
|
||||
text = " ".join(text)
|
||||
text = text.lower()
|
||||
text = unicodedata.normalize("NFD", text)
|
||||
output = []
|
||||
for char in text:
|
||||
cat = unicodedata.category(char)
|
||||
if cat == "Mn":
|
||||
continue
|
||||
output.append(char)
|
||||
return "".join(output).lower().split(" ")
|
||||
|
||||
|
||||
def replace_unicode_punct(text):
|
||||
"""
|
||||
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
|
||||
"""
|
||||
text = text.replace(",", ",")
|
||||
text = re.sub(r"。\s*", ". ", text)
|
||||
text = text.replace("、", ",")
|
||||
text = text.replace("”", '"')
|
||||
text = text.replace("“", '"')
|
||||
text = text.replace("∶", ":")
|
||||
text = text.replace(":", ":")
|
||||
text = text.replace("?", "?")
|
||||
text = text.replace("《", '"')
|
||||
text = text.replace("》", '"')
|
||||
text = text.replace(")", ")")
|
||||
text = text.replace("!", "!")
|
||||
text = text.replace("(", "(")
|
||||
text = text.replace(";", ";")
|
||||
text = text.replace("1", "1")
|
||||
text = text.replace("」", '"')
|
||||
text = text.replace("「", '"')
|
||||
text = text.replace("0", "0")
|
||||
text = text.replace("3", "3")
|
||||
text = text.replace("2", "2")
|
||||
text = text.replace("5", "5")
|
||||
text = text.replace("6", "6")
|
||||
text = text.replace("9", "9")
|
||||
text = text.replace("7", "7")
|
||||
text = text.replace("8", "8")
|
||||
text = text.replace("4", "4")
|
||||
text = re.sub(r".\s*", ". ", text)
|
||||
text = text.replace("~", "~")
|
||||
text = text.replace("’", "'")
|
||||
text = text.replace("…", "...")
|
||||
text = text.replace("━", "-")
|
||||
text = text.replace("〈", "<")
|
||||
text = text.replace("〉", ">")
|
||||
text = text.replace("【", "[")
|
||||
text = text.replace("】", "]")
|
||||
text = text.replace("%", "%")
|
||||
return text
|
||||
|
||||
|
||||
def remove_non_printing_char(text):
|
||||
"""
|
||||
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
|
||||
"""
|
||||
output = []
|
||||
for char in text:
|
||||
cat = unicodedata.category(char)
|
||||
if cat.startswith("C"):
|
||||
continue
|
||||
output.append(char)
|
||||
return "".join(output)
|
||||
|
||||
|
||||
def romanian_preprocessing(text):
|
||||
"""Sennrich's WMT16 scripts for Romanian preprocessing, used by model `FacebookAI/xlm-mlm-enro-1024`"""
|
||||
# https://github.com/rsennrich/wmt16-scripts/blob/master/preprocess/normalise-romanian.py
|
||||
text = text.replace("\u015e", "\u0218").replace("\u015f", "\u0219")
|
||||
text = text.replace("\u0162", "\u021a").replace("\u0163", "\u021b")
|
||||
# https://github.com/rsennrich/wmt16-scripts/blob/master/preprocess/remove-diacritics.py
|
||||
text = text.replace("\u0218", "S").replace("\u0219", "s") # s-comma
|
||||
text = text.replace("\u021a", "T").replace("\u021b", "t") # t-comma
|
||||
text = text.replace("\u0102", "A").replace("\u0103", "a")
|
||||
text = text.replace("\u00c2", "A").replace("\u00e2", "a")
|
||||
text = text.replace("\u00ce", "I").replace("\u00ee", "i")
|
||||
return text
|
||||
|
||||
|
||||
class XLMTokenizer(PreTrainedTokenizer):
|
||||
"""
|
||||
Construct an XLM tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:
|
||||
|
||||
- Moses preprocessing and tokenization for most supported languages.
|
||||
- Language specific tokenization for Chinese (Jieba), Japanese (KyTea) and Thai (PyThaiNLP).
|
||||
- Optionally lowercases and normalizes all inputs text.
|
||||
- The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like
|
||||
"__classify__") to a vocabulary.
|
||||
- The `lang2id` attribute maps the languages supported by the model with their IDs if provided (automatically set
|
||||
for pretrained vocabularies).
|
||||
- The `id2lang` attributes does reverse mapping if provided (automatically set for pretrained vocabularies).
|
||||
|
||||
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
||||
this superclass for more information regarding those methods.
|
||||
|
||||
Args:
|
||||
vocab_file (`str`):
|
||||
Vocabulary file.
|
||||
merges_file (`str`):
|
||||
Merges file.
|
||||
unk_token (`str`, *optional*, defaults to `"<unk>"`):
|
||||
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
||||
token instead.
|
||||
bos_token (`str`, *optional*, defaults to `"<s>"`):
|
||||
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
|
||||
|
||||
<Tip>
|
||||
|
||||
When building a sequence using special tokens, this is not the token that is used for the beginning of
|
||||
sequence. The token used is the `cls_token`.
|
||||
|
||||
</Tip>
|
||||
|
||||
sep_token (`str`, *optional*, defaults to `"</s>"`):
|
||||
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
|
||||
sequence classification or for a text and a question for question answering. It is also used as the last
|
||||
token of a sequence built with special tokens.
|
||||
pad_token (`str`, *optional*, defaults to `"<pad>"`):
|
||||
The token used for padding, for example when batching sequences of different lengths.
|
||||
cls_token (`str`, *optional*, defaults to `"</s>"`):
|
||||
The classifier token which is used when doing sequence classification (classification of the whole sequence
|
||||
instead of per-token classification). It is the first token of the sequence when built with special tokens.
|
||||
mask_token (`str`, *optional*, defaults to `"<special1>"`):
|
||||
The token used for masking values. This is the token used when training this model with masked language
|
||||
modeling. This is the token which the model will try to predict.
|
||||
additional_special_tokens (`List[str]`, *optional*, defaults to `['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>']`):
|
||||
List of additional special tokens.
|
||||
lang2id (`Dict[str, int]`, *optional*):
|
||||
Dictionary mapping languages string identifiers to their IDs.
|
||||
id2lang (`Dict[int, str]`, *optional*):
|
||||
Dictionary mapping language IDs to their string identifiers.
|
||||
do_lowercase_and_remove_accent (`bool`, *optional*, defaults to `True`):
|
||||
Whether to lowercase and remove accents when tokenizing.
|
||||
"""
|
||||
|
||||
vocab_files_names = VOCAB_FILES_NAMES
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_file,
|
||||
merges_file,
|
||||
unk_token="<unk>",
|
||||
bos_token="<s>",
|
||||
sep_token="</s>",
|
||||
pad_token="<pad>",
|
||||
cls_token="</s>",
|
||||
mask_token="<special1>",
|
||||
additional_special_tokens=[
|
||||
"<special0>",
|
||||
"<special1>",
|
||||
"<special2>",
|
||||
"<special3>",
|
||||
"<special4>",
|
||||
"<special5>",
|
||||
"<special6>",
|
||||
"<special7>",
|
||||
"<special8>",
|
||||
"<special9>",
|
||||
],
|
||||
lang2id=None,
|
||||
id2lang=None,
|
||||
do_lowercase_and_remove_accent=True,
|
||||
**kwargs,
|
||||
):
|
||||
try:
|
||||
import sacremoses
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"You need to install sacremoses to use XLMTokenizer. "
|
||||
"See https://pypi.org/project/sacremoses/ for installation."
|
||||
)
|
||||
|
||||
self.sm = sacremoses
|
||||
|
||||
# cache of sm.MosesPunctNormalizer instance
|
||||
self.cache_moses_punct_normalizer = {}
|
||||
# cache of sm.MosesTokenizer instance
|
||||
self.cache_moses_tokenizer = {}
|
||||
self.lang_with_custom_tokenizer = {"zh", "th", "ja"}
|
||||
# True for current supported model (v1.2.0), False for XLM-17 & 100
|
||||
self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent
|
||||
self.lang2id = lang2id
|
||||
self.id2lang = id2lang
|
||||
if lang2id is not None and id2lang is not None:
|
||||
assert len(lang2id) == len(id2lang)
|
||||
|
||||
self.ja_word_tokenizer = None
|
||||
self.zh_word_tokenizer = None
|
||||
|
||||
with open(vocab_file, encoding="utf-8") as vocab_handle:
|
||||
self.encoder = json.load(vocab_handle)
|
||||
self.decoder = {v: k for k, v in self.encoder.items()}
|
||||
with open(merges_file, encoding="utf-8") as merges_handle:
|
||||
merges = merges_handle.read().split("\n")[:-1]
|
||||
merges = [tuple(merge.split()[:2]) for merge in merges]
|
||||
self.bpe_ranks = dict(zip(merges, range(len(merges))))
|
||||
self.cache = {}
|
||||
super().__init__(
|
||||
unk_token=unk_token,
|
||||
bos_token=bos_token,
|
||||
sep_token=sep_token,
|
||||
pad_token=pad_token,
|
||||
cls_token=cls_token,
|
||||
mask_token=mask_token,
|
||||
additional_special_tokens=additional_special_tokens,
|
||||
lang2id=lang2id,
|
||||
id2lang=id2lang,
|
||||
do_lowercase_and_remove_accent=do_lowercase_and_remove_accent,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@property
|
||||
def do_lower_case(self):
|
||||
return self.do_lowercase_and_remove_accent
|
||||
|
||||
def moses_punct_norm(self, text, lang):
|
||||
if lang not in self.cache_moses_punct_normalizer:
|
||||
punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
|
||||
self.cache_moses_punct_normalizer[lang] = punct_normalizer
|
||||
else:
|
||||
punct_normalizer = self.cache_moses_punct_normalizer[lang]
|
||||
return punct_normalizer.normalize(text)
|
||||
|
||||
def moses_tokenize(self, text, lang):
|
||||
if lang not in self.cache_moses_tokenizer:
|
||||
moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
|
||||
self.cache_moses_tokenizer[lang] = moses_tokenizer
|
||||
else:
|
||||
moses_tokenizer = self.cache_moses_tokenizer[lang]
|
||||
return moses_tokenizer.tokenize(text, return_str=False, escape=False)
|
||||
|
||||
def moses_pipeline(self, text, lang):
|
||||
text = replace_unicode_punct(text)
|
||||
text = self.moses_punct_norm(text, lang)
|
||||
text = remove_non_printing_char(text)
|
||||
return text
|
||||
|
||||
def ja_tokenize(self, text):
|
||||
if self.ja_word_tokenizer is None:
|
||||
try:
|
||||
import Mykytea
|
||||
|
||||
self.ja_word_tokenizer = Mykytea.Mykytea(
|
||||
f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin"
|
||||
)
|
||||
except (AttributeError, ImportError):
|
||||
logger.error(
|
||||
"Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper"
|
||||
" (https://github.com/chezou/Mykytea-python) with the following steps"
|
||||
)
|
||||
logger.error("1. git clone git@github.com:neubig/kytea.git && cd kytea")
|
||||
logger.error("2. autoreconf -i")
|
||||
logger.error("3. ./configure --prefix=$HOME/local")
|
||||
logger.error("4. make && make install")
|
||||
logger.error("5. pip install kytea")
|
||||
raise
|
||||
return list(self.ja_word_tokenizer.getWS(text))
|
||||
|
||||
@property
|
||||
def vocab_size(self):
|
||||
return len(self.encoder)
|
||||
|
||||
def get_vocab(self):
|
||||
return dict(self.encoder, **self.added_tokens_encoder)
|
||||
|
||||
def bpe(self, token):
|
||||
word = tuple(token[:-1]) + (token[-1] + "</w>",)
|
||||
if token in self.cache:
|
||||
return self.cache[token]
|
||||
pairs = get_pairs(word)
|
||||
|
||||
if not pairs:
|
||||
return token + "</w>"
|
||||
|
||||
while True:
|
||||
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
|
||||
if bigram not in self.bpe_ranks:
|
||||
break
|
||||
first, second = bigram
|
||||
new_word = []
|
||||
i = 0
|
||||
while i < len(word):
|
||||
try:
|
||||
j = word.index(first, i)
|
||||
except ValueError:
|
||||
new_word.extend(word[i:])
|
||||
break
|
||||
else:
|
||||
new_word.extend(word[i:j])
|
||||
i = j
|
||||
|
||||
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
|
||||
new_word.append(first + second)
|
||||
i += 2
|
||||
else:
|
||||
new_word.append(word[i])
|
||||
i += 1
|
||||
new_word = tuple(new_word)
|
||||
word = new_word
|
||||
if len(word) == 1:
|
||||
break
|
||||
else:
|
||||
pairs = get_pairs(word)
|
||||
word = " ".join(word)
|
||||
if word == "\n </w>":
|
||||
word = "\n</w>"
|
||||
self.cache[token] = word
|
||||
return word
|
||||
|
||||
def _tokenize(self, text, lang="en", bypass_tokenizer=False):
|
||||
"""
|
||||
Tokenize a string given language code. For Chinese, Japanese and Thai, we use a language specific tokenizer.
|
||||
Otherwise, we use Moses.
|
||||
|
||||
Details of tokenization:
|
||||
|
||||
- [sacremoses](https://github.com/alvations/sacremoses): port of Moses
|
||||
- Install with `pip install sacremoses`
|
||||
- [pythainlp](https://github.com/PyThaiNLP/pythainlp): Thai tokenizer
|
||||
- Install with `pip install pythainlp`
|
||||
- [kytea](https://github.com/chezou/Mykytea-python): Japanese tokenizer, wrapper of
|
||||
[KyTea](https://github.com/neubig/kytea)
|
||||
- Install with the following steps:
|
||||
|
||||
::
|
||||
|
||||
git clone git@github.com:neubig/kytea.git && cd kytea autoreconf -i ./configure --prefix=$HOME/local
|
||||
make && make install pip install kytea
|
||||
|
||||
- [jieba](https://github.com/fxsjy/jieba): Chinese tokenizer (*)
|
||||
- Install with `pip install jieba`
|
||||
|
||||
(*) The original XLM used [Stanford
|
||||
Segmenter](https://nlp.stanford.edu/software/stanford-segmenter-2018-10-16.zip). However, the wrapper
|
||||
(`nltk.tokenize.stanford_segmenter`) is slow due to JVM overhead, and it will be deprecated. Jieba is a lot
|
||||
faster and pip-installable. Note there is some mismatch with the Stanford Segmenter. It should be fine if you
|
||||
fine-tune the model with Chinese supervisionself. If you want the same exact behaviour, use the original XLM
|
||||
[preprocessing script](https://github.com/facebookresearch/XLM/tree/master/tools) to tokenize the sentence
|
||||
externally, and set `bypass_tokenizer=True` to bypass the tokenizer.
|
||||
|
||||
Args:
|
||||
- lang: ISO language code (default = 'en') (string). Languages should belong of the model supported
|
||||
languages. However, we don't enforce it.
|
||||
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
|
||||
(bool). If True, we only apply BPE.
|
||||
|
||||
Returns:
|
||||
List of tokens.
|
||||
"""
|
||||
if lang and self.lang2id and lang not in self.lang2id:
|
||||
logger.error(
|
||||
"Supplied language code not found in lang2id mapping. Please check that your language is supported by"
|
||||
" the loaded pretrained model."
|
||||
)
|
||||
if bypass_tokenizer:
|
||||
text = text.split()
|
||||
elif lang not in self.lang_with_custom_tokenizer:
|
||||
text = self.moses_pipeline(text, lang=lang)
|
||||
# TODO: make sure we are using `FacebookAI/xlm-mlm-enro-1024`, since XLM-100 doesn't have this step
|
||||
if lang == "ro":
|
||||
text = romanian_preprocessing(text)
|
||||
text = self.moses_tokenize(text, lang=lang)
|
||||
elif lang == "th":
|
||||
text = self.moses_pipeline(text, lang=lang)
|
||||
try:
|
||||
if "pythainlp" not in sys.modules:
|
||||
from pythainlp.tokenize import word_tokenize as th_word_tokenize
|
||||
else:
|
||||
th_word_tokenize = sys.modules["pythainlp"].word_tokenize
|
||||
except (AttributeError, ImportError):
|
||||
logger.error(
|
||||
"Make sure you install PyThaiNLP (https://github.com/PyThaiNLP/pythainlp) with the following steps"
|
||||
)
|
||||
logger.error("1. pip install pythainlp")
|
||||
raise
|
||||
text = th_word_tokenize(text)
|
||||
elif lang == "zh":
|
||||
try:
|
||||
if "jieba" not in sys.modules:
|
||||
import jieba
|
||||
else:
|
||||
jieba = sys.modules["jieba"]
|
||||
except (AttributeError, ImportError):
|
||||
logger.error("Make sure you install Jieba (https://github.com/fxsjy/jieba) with the following steps")
|
||||
logger.error("1. pip install jieba")
|
||||
raise
|
||||
text = " ".join(jieba.cut(text))
|
||||
text = self.moses_pipeline(text, lang=lang)
|
||||
text = text.split()
|
||||
elif lang == "ja":
|
||||
text = self.moses_pipeline(text, lang=lang)
|
||||
text = self.ja_tokenize(text)
|
||||
else:
|
||||
raise ValueError("It should not reach here")
|
||||
|
||||
if self.do_lowercase_and_remove_accent and not bypass_tokenizer:
|
||||
text = lowercase_and_remove_accent(text)
|
||||
|
||||
split_tokens = []
|
||||
for token in text:
|
||||
if token:
|
||||
split_tokens.extend(list(self.bpe(token).split(" ")))
|
||||
|
||||
return split_tokens
|
||||
|
||||
def _convert_token_to_id(self, token):
|
||||
"""Converts a token (str) in an id using the vocab."""
|
||||
return self.encoder.get(token, self.encoder.get(self.unk_token))
|
||||
|
||||
def _convert_id_to_token(self, index):
|
||||
"""Converts an index (integer) in a token (str) using the vocab."""
|
||||
return self.decoder.get(index, self.unk_token)
|
||||
|
||||
def convert_tokens_to_string(self, tokens):
|
||||
"""Converts a sequence of tokens (string) in a single string."""
|
||||
out_string = "".join(tokens).replace("</w>", " ").strip()
|
||||
return out_string
|
||||
|
||||
def build_inputs_with_special_tokens(
|
||||
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
|
||||
) -> list[int]:
|
||||
"""
|
||||
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
||||
adding special tokens. An XLM sequence has the following format:
|
||||
|
||||
- single sequence: `<s> X </s>`
|
||||
- pair of sequences: `<s> A </s> B </s>`
|
||||
|
||||
Args:
|
||||
token_ids_0 (`List[int]`):
|
||||
List of IDs to which the special tokens will be added.
|
||||
token_ids_1 (`List[int]`, *optional*):
|
||||
Optional second list of IDs for sequence pairs.
|
||||
|
||||
Returns:
|
||||
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
||||
|
||||
"""
|
||||
bos = [self.bos_token_id]
|
||||
sep = [self.sep_token_id]
|
||||
|
||||
if token_ids_1 is None:
|
||||
return bos + token_ids_0 + sep
|
||||
return bos + token_ids_0 + sep + token_ids_1 + sep
|
||||
|
||||
def get_special_tokens_mask(
|
||||
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
|
||||
) -> list[int]:
|
||||
"""
|
||||
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
||||
special tokens using the tokenizer `prepare_for_model` method.
|
||||
|
||||
Args:
|
||||
token_ids_0 (`List[int]`):
|
||||
List of IDs.
|
||||
token_ids_1 (`List[int]`, *optional*):
|
||||
Optional second list of IDs for sequence pairs.
|
||||
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not the token list is already formatted with special tokens for the model.
|
||||
|
||||
Returns:
|
||||
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
||||
"""
|
||||
|
||||
if already_has_special_tokens:
|
||||
return super().get_special_tokens_mask(
|
||||
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
||||
)
|
||||
|
||||
if token_ids_1 is not None:
|
||||
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
|
||||
return [1] + ([0] * len(token_ids_0)) + [1]
|
||||
|
||||
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
|
||||
if not os.path.isdir(save_directory):
|
||||
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
||||
return
|
||||
vocab_file = os.path.join(
|
||||
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
||||
)
|
||||
merge_file = os.path.join(
|
||||
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
|
||||
)
|
||||
|
||||
with open(vocab_file, "w", encoding="utf-8") as f:
|
||||
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
|
||||
|
||||
index = 0
|
||||
with open(merge_file, "w", encoding="utf-8") as writer:
|
||||
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
|
||||
if index != token_index:
|
||||
logger.warning(
|
||||
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
|
||||
" Please check that the tokenizer is not corrupted!"
|
||||
)
|
||||
index = token_index
|
||||
writer.write(" ".join(bpe_tokens) + "\n")
|
||||
index += 1
|
||||
|
||||
return vocab_file, merge_file
|
||||
|
||||
def __getstate__(self):
|
||||
state = self.__dict__.copy()
|
||||
state["sm"] = None
|
||||
return state
|
||||
|
||||
def __setstate__(self, d):
|
||||
self.__dict__ = d
|
||||
|
||||
try:
|
||||
import sacremoses
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"You need to install sacremoses to use XLMTokenizer. "
|
||||
"See https://pypi.org/project/sacremoses/ for installation."
|
||||
)
|
||||
|
||||
self.sm = sacremoses
|
||||
|
||||
|
||||
__all__ = ["XLMTokenizer"]
|
Loading…
Add table
Add a link
Reference in a new issue