Adding all project files

This commit is contained in:
Martina Burlando 2025-08-02 02:00:33 +02:00
parent 6c9e127bdc
commit cd4316ad0f
42289 changed files with 8009643 additions and 0 deletions

View file

@ -0,0 +1,6 @@
from .base_tokenizer import BaseTokenizer
from .bert_wordpiece import BertWordPieceTokenizer
from .byte_level_bpe import ByteLevelBPETokenizer
from .char_level_bpe import CharBPETokenizer
from .sentencepiece_bpe import SentencePieceBPETokenizer
from .sentencepiece_unigram import SentencePieceUnigramTokenizer

View file

@ -0,0 +1,418 @@
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import AddedToken, EncodeInput, Encoding, InputSequence, Tokenizer
from tokenizers.decoders import Decoder
from tokenizers.models import Model
from tokenizers.normalizers import Normalizer
from tokenizers.pre_tokenizers import PreTokenizer
from tokenizers.processors import PostProcessor
Offsets = Tuple[int, int]
class BaseTokenizer:
def __init__(self, tokenizer: Tokenizer, parameters=None):
self._tokenizer = tokenizer
self._parameters = parameters if parameters is not None else {}
def __repr__(self):
return "Tokenizer(vocabulary_size={}, {})".format(
self._tokenizer.get_vocab_size(),
", ".join(k + "=" + str(v) for k, v in self._parameters.items()),
)
def num_special_tokens_to_add(self, is_pair: bool) -> int:
"""
Return the number of special tokens that would be added for single/pair sentences.
:param is_pair: Boolean indicating if the input would be a single sentence or a pair
:return:
"""
return self._tokenizer.num_special_tokens_to_add(is_pair)
def get_vocab(self, with_added_tokens: bool = True) -> Dict[str, int]:
"""Returns the vocabulary
Args:
with_added_tokens: boolean:
Whether to include the added tokens in the vocabulary
Returns:
The vocabulary
"""
return self._tokenizer.get_vocab(with_added_tokens=with_added_tokens)
def get_added_tokens_decoder(self) -> Dict[int, AddedToken]:
"""Returns the added reverse vocabulary
Returns:
The added vocabulary mapping ints to AddedTokens
"""
return self._tokenizer.get_added_tokens_decoder()
def get_vocab_size(self, with_added_tokens: bool = True) -> int:
"""Return the size of vocabulary, with or without added tokens.
Args:
with_added_tokens: (`optional`) bool:
Whether to count in added special tokens or not
Returns:
Size of vocabulary
"""
return self._tokenizer.get_vocab_size(with_added_tokens=with_added_tokens)
def enable_padding(
self,
direction: Optional[str] = "right",
pad_to_multiple_of: Optional[int] = None,
pad_id: Optional[int] = 0,
pad_type_id: Optional[int] = 0,
pad_token: Optional[str] = "[PAD]",
length: Optional[int] = None,
):
"""Change the padding strategy
Args:
direction: (`optional`) str:
Can be one of: `right` or `left`
pad_to_multiple_of: (`optional`) unsigned int:
If specified, the padding length should always snap to the next multiple of
the given value. For example if we were going to pad with a length of 250 but
`pad_to_multiple_of=8` then we will pad to 256.
pad_id: (`optional`) unsigned int:
The indice to be used when padding
pad_type_id: (`optional`) unsigned int:
The type indice to be used when padding
pad_token: (`optional`) str:
The pad token to be used when padding
length: (`optional`) unsigned int:
If specified, the length at which to pad. If not specified
we pad using the size of the longest sequence in a batch
"""
return self._tokenizer.enable_padding(
direction=direction,
pad_to_multiple_of=pad_to_multiple_of,
pad_id=pad_id,
pad_type_id=pad_type_id,
pad_token=pad_token,
length=length,
)
def no_padding(self):
"""Disable padding"""
return self._tokenizer.no_padding()
@property
def padding(self) -> Optional[dict]:
"""Get the current padding parameters
Returns:
None if padding is disabled, a dict with the currently set parameters
if the padding is enabled.
"""
return self._tokenizer.padding
def enable_truncation(self, max_length: int, stride: Optional[int] = 0, strategy: Optional[str] = "longest_first"):
"""Change the truncation options
Args:
max_length: unsigned int:
The maximum length at which to truncate
stride: (`optional`) unsigned int:
The length of the previous first sequence to be included
in the overflowing sequence
strategy: (`optional`) str:
Can be one of `longest_first`, `only_first` or `only_second`
"""
return self._tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy)
def no_truncation(self):
"""Disable truncation"""
return self._tokenizer.no_truncation()
@property
def truncation(self) -> Optional[dict]:
"""Get the current truncation parameters
Returns:
None if truncation is disabled, a dict with the current truncation parameters if
truncation is enabled
"""
return self._tokenizer.truncation
def add_tokens(self, tokens: List[Union[str, AddedToken]]) -> int:
"""Add the given tokens to the vocabulary
Args:
tokens: List[Union[str, AddedToken]]:
A list of tokens to add to the vocabulary. Each token can either be
a string, or an instance of AddedToken
Returns:
The number of tokens that were added to the vocabulary
"""
return self._tokenizer.add_tokens(tokens)
def add_special_tokens(self, special_tokens: List[Union[str, AddedToken]]) -> int:
"""Add the given special tokens to the vocabulary, and treat them as special tokens.
The special tokens will never be processed by the model, and will be
removed while decoding.
Args:
tokens: List[Union[str, AddedToken]]:
A list of special tokens to add to the vocabulary. Each token can either be
a string, or an instance of AddedToken
Returns:
The number of tokens that were added to the vocabulary
"""
return self._tokenizer.add_special_tokens(special_tokens)
def normalize(self, sequence: str) -> str:
"""Normalize the given sequence
Args:
sequence: str:
The sequence to normalize
Returns:
The normalized string
"""
return self._tokenizer.normalize(sequence)
def encode(
self,
sequence: InputSequence,
pair: Optional[InputSequence] = None,
is_pretokenized: bool = False,
add_special_tokens: bool = True,
) -> Encoding:
"""Encode the given sequence and pair. This method can process raw text sequences as well
as already pre-tokenized sequences.
Args:
sequence: InputSequence:
The sequence we want to encode. This sequence can be either raw text or
pre-tokenized, according to the `is_pretokenized` argument:
- If `is_pretokenized=False`: `InputSequence` is expected to be `str`
- If `is_pretokenized=True`: `InputSequence` is expected to be
`Union[List[str], Tuple[str]]`
is_pretokenized: bool:
Whether the input is already pre-tokenized.
add_special_tokens: bool:
Whether to add the special tokens while encoding.
Returns:
An Encoding
"""
if sequence is None:
raise ValueError("encode: `sequence` can't be `None`")
return self._tokenizer.encode(sequence, pair, is_pretokenized, add_special_tokens)
def encode_batch(
self,
inputs: List[EncodeInput],
is_pretokenized: bool = False,
add_special_tokens: bool = True,
) -> List[Encoding]:
"""Encode the given inputs. This method accept both raw text sequences as well as already
pre-tokenized sequences.
Args:
inputs: List[EncodeInput]:
A list of single sequences or pair sequences to encode. Each `EncodeInput` is
expected to be of the following form:
`Union[InputSequence, Tuple[InputSequence, InputSequence]]`
Each `InputSequence` can either be raw text or pre-tokenized,
according to the `is_pretokenized` argument:
- If `is_pretokenized=False`: `InputSequence` is expected to be `str`
- If `is_pretokenized=True`: `InputSequence` is expected to be
`Union[List[str], Tuple[str]]`
is_pretokenized: bool:
Whether the input is already pre-tokenized.
add_special_tokens: bool:
Whether to add the special tokens while encoding.
Returns:
A list of Encoding
"""
if inputs is None:
raise ValueError("encode_batch: `inputs` can't be `None`")
return self._tokenizer.encode_batch(inputs, is_pretokenized, add_special_tokens)
def decode(self, ids: List[int], skip_special_tokens: Optional[bool] = True) -> str:
"""Decode the given list of ids to a string sequence
Args:
ids: List[unsigned int]:
A list of ids to be decoded
skip_special_tokens: (`optional`) boolean:
Whether to remove all the special tokens from the output string
Returns:
The decoded string
"""
if ids is None:
raise ValueError("None input is not valid. Should be a list of integers.")
return self._tokenizer.decode(ids, skip_special_tokens=skip_special_tokens)
def decode_batch(self, sequences: List[List[int]], skip_special_tokens: Optional[bool] = True) -> str:
"""Decode the list of sequences to a list of string sequences
Args:
sequences: List[List[unsigned int]]:
A list of sequence of ids to be decoded
skip_special_tokens: (`optional`) boolean:
Whether to remove all the special tokens from the output strings
Returns:
A list of decoded strings
"""
if sequences is None:
raise ValueError("None input is not valid. Should be list of list of integers.")
return self._tokenizer.decode_batch(sequences, skip_special_tokens=skip_special_tokens)
def token_to_id(self, token: str) -> Optional[int]:
"""Convert the given token to its corresponding id
Args:
token: str:
The token to convert
Returns:
The corresponding id if it exists, None otherwise
"""
return self._tokenizer.token_to_id(token)
def id_to_token(self, id: int) -> Optional[str]:
"""Convert the given token id to its corresponding string
Args:
token: id:
The token id to convert
Returns:
The corresponding string if it exists, None otherwise
"""
return self._tokenizer.id_to_token(id)
def save_model(self, directory: str, prefix: Optional[str] = None):
"""Save the current model to the given directory
Args:
directory: str:
A path to the destination directory
prefix: (Optional) str:
An optional prefix, used to prefix each file name
"""
return self._tokenizer.model.save(directory, prefix=prefix)
def save(self, path: str, pretty: bool = True):
"""Save the current Tokenizer at the given path
Args:
path: str:
A path to the destination Tokenizer file
"""
return self._tokenizer.save(path, pretty)
def to_str(self, pretty: bool = False):
"""Get a serialized JSON version of the Tokenizer as a str
Args:
pretty: bool:
Whether the JSON string should be prettified
Returns:
str
"""
return self._tokenizer.to_str(pretty)
def post_process(
self, encoding: Encoding, pair: Optional[Encoding] = None, add_special_tokens: bool = True
) -> Encoding:
"""Apply all the post-processing steps to the given encodings.
The various steps are:
1. Truncate according to global params (provided to `enable_truncation`)
2. Apply the PostProcessor
3. Pad according to global params. (provided to `enable_padding`)
Args:
encoding: Encoding:
The main Encoding to post process
pair: Optional[Encoding]:
An optional pair Encoding
add_special_tokens: bool:
Whether to add special tokens
Returns:
The resulting Encoding
"""
return self._tokenizer.post_process(encoding, pair, add_special_tokens)
@property
def model(self) -> Model:
return self._tokenizer.model
@model.setter
def model(self, model: Model):
self._tokenizer.model = model
@property
def normalizer(self) -> Normalizer:
return self._tokenizer.normalizer
@normalizer.setter
def normalizer(self, normalizer: Normalizer):
self._tokenizer.normalizer = normalizer
@property
def pre_tokenizer(self) -> PreTokenizer:
return self._tokenizer.pre_tokenizer
@pre_tokenizer.setter
def pre_tokenizer(self, pre_tokenizer: PreTokenizer):
self._tokenizer.pre_tokenizer = pre_tokenizer
@property
def post_processor(self) -> PostProcessor:
return self._tokenizer.post_processor
@post_processor.setter
def post_processor(self, post_processor: PostProcessor):
self._tokenizer.post_processor = post_processor
@property
def decoder(self) -> Decoder:
return self._tokenizer.decoder
@decoder.setter
def decoder(self, decoder: Decoder):
self._tokenizer.decoder = decoder

View file

@ -0,0 +1,151 @@
from typing import Dict, Iterator, List, Optional, Union
from tokenizers import AddedToken, Tokenizer, decoders, trainers
from tokenizers.models import WordPiece
from tokenizers.normalizers import BertNormalizer
from tokenizers.pre_tokenizers import BertPreTokenizer
from tokenizers.processors import BertProcessing
from .base_tokenizer import BaseTokenizer
class BertWordPieceTokenizer(BaseTokenizer):
"""Bert WordPiece Tokenizer"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
unk_token: Union[str, AddedToken] = "[UNK]",
sep_token: Union[str, AddedToken] = "[SEP]",
cls_token: Union[str, AddedToken] = "[CLS]",
pad_token: Union[str, AddedToken] = "[PAD]",
mask_token: Union[str, AddedToken] = "[MASK]",
clean_text: bool = True,
handle_chinese_chars: bool = True,
strip_accents: Optional[bool] = None,
lowercase: bool = True,
wordpieces_prefix: str = "##",
):
if vocab is not None:
tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(unk_token)))
else:
tokenizer = Tokenizer(WordPiece(unk_token=str(unk_token)))
# Let the tokenizer know about special tokens if they are part of the vocab
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
if tokenizer.token_to_id(str(sep_token)) is not None:
tokenizer.add_special_tokens([str(sep_token)])
if tokenizer.token_to_id(str(cls_token)) is not None:
tokenizer.add_special_tokens([str(cls_token)])
if tokenizer.token_to_id(str(pad_token)) is not None:
tokenizer.add_special_tokens([str(pad_token)])
if tokenizer.token_to_id(str(mask_token)) is not None:
tokenizer.add_special_tokens([str(mask_token)])
tokenizer.normalizer = BertNormalizer(
clean_text=clean_text,
handle_chinese_chars=handle_chinese_chars,
strip_accents=strip_accents,
lowercase=lowercase,
)
tokenizer.pre_tokenizer = BertPreTokenizer()
if vocab is not None:
sep_token_id = tokenizer.token_to_id(str(sep_token))
if sep_token_id is None:
raise TypeError("sep_token not found in the vocabulary")
cls_token_id = tokenizer.token_to_id(str(cls_token))
if cls_token_id is None:
raise TypeError("cls_token not found in the vocabulary")
tokenizer.post_processor = BertProcessing((str(sep_token), sep_token_id), (str(cls_token), cls_token_id))
tokenizer.decoder = decoders.WordPiece(prefix=wordpieces_prefix)
parameters = {
"model": "BertWordPiece",
"unk_token": unk_token,
"sep_token": sep_token,
"cls_token": cls_token,
"pad_token": pad_token,
"mask_token": mask_token,
"clean_text": clean_text,
"handle_chinese_chars": handle_chinese_chars,
"strip_accents": strip_accents,
"lowercase": lowercase,
"wordpieces_prefix": wordpieces_prefix,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab: str, **kwargs):
vocab = WordPiece.read_file(vocab)
return BertWordPieceTokenizer(vocab, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
special_tokens: List[Union[str, AddedToken]] = [
"[PAD]",
"[UNK]",
"[CLS]",
"[SEP]",
"[MASK]",
],
show_progress: bool = True,
wordpieces_prefix: str = "##",
):
"""Train the model using the given files"""
trainer = trainers.WordPieceTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
special_tokens=special_tokens,
show_progress=show_progress,
continuing_subword_prefix=wordpieces_prefix,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
special_tokens: List[Union[str, AddedToken]] = [
"[PAD]",
"[UNK]",
"[CLS]",
"[SEP]",
"[MASK]",
],
show_progress: bool = True,
wordpieces_prefix: str = "##",
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.WordPieceTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
special_tokens=special_tokens,
show_progress=show_progress,
continuing_subword_prefix=wordpieces_prefix,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)

View file

@ -0,0 +1,122 @@
from typing import Dict, Iterator, List, Optional, Tuple, Union
from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers
from tokenizers.models import BPE
from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str
from .base_tokenizer import BaseTokenizer
class ByteLevelBPETokenizer(BaseTokenizer):
"""ByteLevelBPETokenizer
Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model
"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
merges: Optional[Union[str, List[Tuple[str, str]]]] = None,
add_prefix_space: bool = False,
lowercase: bool = False,
dropout: Optional[float] = None,
unicode_normalizer: Optional[str] = None,
continuing_subword_prefix: Optional[str] = None,
end_of_word_suffix: Optional[str] = None,
trim_offsets: bool = False,
):
if vocab is not None and merges is not None:
tokenizer = Tokenizer(
BPE(
vocab,
merges,
dropout=dropout,
continuing_subword_prefix=continuing_subword_prefix or "",
end_of_word_suffix=end_of_word_suffix or "",
)
)
else:
tokenizer = Tokenizer(BPE())
# Check for Unicode normalization first (before everything else)
normalizers = []
if unicode_normalizer:
normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
if lowercase:
normalizers += [Lowercase()]
# Create the normalizer structure
if len(normalizers) > 0:
if len(normalizers) > 1:
tokenizer.normalizer = Sequence(normalizers)
else:
tokenizer.normalizer = normalizers[0]
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets)
parameters = {
"model": "ByteLevelBPE",
"add_prefix_space": add_prefix_space,
"lowercase": lowercase,
"dropout": dropout,
"unicode_normalizer": unicode_normalizer,
"continuing_subword_prefix": continuing_subword_prefix,
"end_of_word_suffix": end_of_word_suffix,
"trim_offsets": trim_offsets,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return ByteLevelBPETokenizer(vocab, merges, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
show_progress: bool = True,
special_tokens: List[Union[str, AddedToken]] = [],
):
"""Train the model using the given files"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
show_progress=show_progress,
special_tokens=special_tokens,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
show_progress: bool = True,
special_tokens: List[Union[str, AddedToken]] = [],
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
show_progress=show_progress,
special_tokens=special_tokens,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)

View file

@ -0,0 +1,150 @@
from typing import Dict, Iterator, List, Optional, Tuple, Union
from .. import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
from ..models import BPE
from ..normalizers import BertNormalizer, Lowercase, Sequence, unicode_normalizer_from_str
from .base_tokenizer import BaseTokenizer
class CharBPETokenizer(BaseTokenizer):
"""Original BPE Tokenizer
Represents the BPE algorithm, as introduced by Rico Sennrich
(https://arxiv.org/abs/1508.07909)
The defaults settings corresponds to OpenAI GPT BPE tokenizers and differs from the original
Sennrich subword-nmt implementation by the following options that you can deactivate:
- adding a normalizer to clean up the text (deactivate with `bert_normalizer=False`) by:
* removing any control characters and replacing all whitespaces by the classic one.
* handle chinese chars by putting spaces around them.
* strip all accents.
- spitting on punctuation in addition to whitespaces (deactivate it with
`split_on_whitespace_only=True`)
"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
merges: Optional[Union[str, List[Tuple[str, str]]]] = None,
unk_token: Union[str, AddedToken] = "<unk>",
suffix: str = "</w>",
dropout: Optional[float] = None,
lowercase: bool = False,
unicode_normalizer: Optional[str] = None,
bert_normalizer: bool = True,
split_on_whitespace_only: bool = False,
):
if vocab is not None and merges is not None:
tokenizer = Tokenizer(
BPE(
vocab,
merges,
dropout=dropout,
unk_token=str(unk_token),
end_of_word_suffix=suffix,
)
)
else:
tokenizer = Tokenizer(BPE(unk_token=str(unk_token), dropout=dropout, end_of_word_suffix=suffix))
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
# Check for Unicode normalization first (before everything else)
normalizers = []
if unicode_normalizer:
normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
if bert_normalizer:
normalizers += [BertNormalizer(lowercase=False)]
if lowercase:
normalizers += [Lowercase()]
# Create the normalizer structure
if len(normalizers) > 0:
if len(normalizers) > 1:
tokenizer.normalizer = Sequence(normalizers)
else:
tokenizer.normalizer = normalizers[0]
if split_on_whitespace_only:
tokenizer.pre_tokenizer = pre_tokenizers.WhitespaceSplit()
else:
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
tokenizer.decoder = decoders.BPEDecoder(suffix=suffix)
parameters = {
"model": "BPE",
"unk_token": unk_token,
"suffix": suffix,
"dropout": dropout,
"lowercase": lowercase,
"unicode_normalizer": unicode_normalizer,
"bert_normalizer": bert_normalizer,
"split_on_whitespace_only": split_on_whitespace_only,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return CharBPETokenizer(vocab, merges, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
suffix: Optional[str] = "</w>",
show_progress: bool = True,
):
"""Train the model using the given files"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
end_of_word_suffix=suffix,
show_progress=show_progress,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
suffix: Optional[str] = "</w>",
show_progress: bool = True,
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
end_of_word_suffix=suffix,
show_progress=show_progress,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)

View file

@ -0,0 +1,103 @@
from typing import Dict, Iterator, List, Optional, Tuple, Union
from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
from tokenizers.models import BPE
from tokenizers.normalizers import NFKC
from .base_tokenizer import BaseTokenizer
class SentencePieceBPETokenizer(BaseTokenizer):
"""SentencePiece BPE Tokenizer
Represents the BPE algorithm, with the pretokenization used by SentencePiece
"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
merges: Optional[Union[str, List[Tuple[str, str]]]] = None,
unk_token: Union[str, AddedToken] = "<unk>",
replacement: str = "",
add_prefix_space: bool = True,
dropout: Optional[float] = None,
fuse_unk: Optional[bool] = False,
):
if vocab is not None and merges is not None:
tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk))
else:
tokenizer = Tokenizer(BPE(dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk))
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
tokenizer.normalizer = NFKC()
prepend_scheme = "always" if add_prefix_space else "never"
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
parameters = {
"model": "SentencePieceBPE",
"unk_token": unk_token,
"replacement": replacement,
"add_prefix_space": add_prefix_space,
"dropout": dropout,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return SentencePieceBPETokenizer(vocab, merges, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
show_progress: bool = True,
):
"""Train the model using the given files"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
show_progress=show_progress,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
show_progress: bool = True,
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
show_progress=show_progress,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)

View file

@ -0,0 +1,196 @@
import json
import os
from typing import Iterator, List, Optional, Union, Tuple
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.models import Unigram
from .base_tokenizer import BaseTokenizer
class SentencePieceUnigramTokenizer(BaseTokenizer):
"""SentencePiece Unigram Tokenizer
Represents the Unigram algorithm, with the pretokenization used by SentencePiece
"""
def __init__(
self,
vocab: Optional[List[Tuple[str, float]]] = None,
replacement: str = "",
add_prefix_space: bool = True,
):
if vocab is not None:
# Let Unigram(..) fail if only one of them is None
tokenizer = Tokenizer(Unigram(vocab))
else:
tokenizer = Tokenizer(Unigram())
tokenizer.normalizer = normalizers.Sequence(
[normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " ")]
)
prepend_scheme = "always" if add_prefix_space else "never"
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
parameters = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(tokenizer, parameters)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 8000,
show_progress: bool = True,
special_tokens: Optional[List[Union[str, AddedToken]]] = None,
initial_alphabet: Optional[List[str]] = None,
unk_token: Optional[str] = None,
):
"""
Train the model using the given files
Args:
files (:obj:`List[str]`):
A list of path to the files that we should use for training
vocab_size (:obj:`int`):
The size of the final vocabulary, including all tokens and alphabet.
show_progress (:obj:`bool`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
A list of special tokens the model should know of.
initial_alphabet (:obj:`List[str]`, `optional`):
A list of characters to include in the initial alphabet, even
if not seen in the training dataset.
If the strings contain more than one character, only the first one
is kept.
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
"""
if special_tokens is None:
special_tokens = []
if initial_alphabet is None:
initial_alphabet = []
trainer = trainers.UnigramTrainer(
vocab_size=vocab_size,
special_tokens=special_tokens,
show_progress=show_progress,
initial_alphabet=initial_alphabet,
unk_token=unk_token,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 8000,
show_progress: bool = True,
special_tokens: Optional[List[Union[str, AddedToken]]] = None,
initial_alphabet: Optional[List[str]] = None,
unk_token: Optional[str] = None,
length: Optional[int] = None,
):
"""
Train the model using the given iterator
Args:
iterator (:obj:`Union[Iterator[str], Iterator[Iterator[str]]]`):
Any iterator over strings or list of strings
vocab_size (:obj:`int`):
The size of the final vocabulary, including all tokens and alphabet.
show_progress (:obj:`bool`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
A list of special tokens the model should know of.
initial_alphabet (:obj:`List[str]`, `optional`):
A list of characters to include in the initial alphabet, even
if not seen in the training dataset.
If the strings contain more than one character, only the first one
is kept.
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
length (:obj:`int`, `optional`):
The total number of sequences in the iterator. This is used to
provide meaningful progress tracking
"""
if special_tokens is None:
special_tokens = []
if initial_alphabet is None:
initial_alphabet = []
trainer = trainers.UnigramTrainer(
vocab_size=vocab_size,
special_tokens=special_tokens,
show_progress=show_progress,
initial_alphabet=initial_alphabet,
unk_token=unk_token,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
@staticmethod
def from_spm(filename: str):
try:
import sys
sys.path.append(".")
import sentencepiece_model_pb2 as model
except Exception:
raise Exception(
"You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required."
)
m = model.ModelProto()
m.ParseFromString(open(filename, "rb").read())
precompiled_charsmap = m.normalizer_spec.precompiled_charsmap
vocab = [(piece.piece, piece.score) for piece in m.pieces]
unk_id = m.trainer_spec.unk_id
model_type = m.trainer_spec.model_type
byte_fallback = m.trainer_spec.byte_fallback
if model_type != 1:
raise Exception(
"You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
)
replacement = ""
add_prefix_space = True
tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback))
if precompiled_charsmap:
tokenizer.normalizer = normalizers.Sequence(
[
normalizers.Precompiled(precompiled_charsmap),
normalizers.Replace(Regex(" {2,}"), " "),
]
)
else:
tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(" {2,}"), " ")])
prepend_scheme = "always" if add_prefix_space else "never"
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme)
parameters = {
"model": "SentencePieceUnigram",
}
obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters)
BaseTokenizer.__init__(obj, tokenizer, parameters)
return obj