581 lines
26 KiB
Python
581 lines
26 KiB
Python
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
from typing import Dict, List, Optional, Union
|
|
|
|
import safetensors
|
|
import torch
|
|
from huggingface_hub.utils import validate_hf_hub_args
|
|
from torch import nn
|
|
|
|
from ..models.modeling_utils import load_state_dict
|
|
from ..utils import _get_model_file, is_accelerate_available, is_transformers_available, logging
|
|
|
|
|
|
if is_transformers_available():
|
|
from transformers import PreTrainedModel, PreTrainedTokenizer
|
|
|
|
if is_accelerate_available():
|
|
from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
|
|
|
|
logger = logging.get_logger(__name__)
|
|
|
|
TEXT_INVERSION_NAME = "learned_embeds.bin"
|
|
TEXT_INVERSION_NAME_SAFE = "learned_embeds.safetensors"
|
|
|
|
|
|
@validate_hf_hub_args
|
|
def load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs):
|
|
cache_dir = kwargs.pop("cache_dir", None)
|
|
force_download = kwargs.pop("force_download", False)
|
|
proxies = kwargs.pop("proxies", None)
|
|
local_files_only = kwargs.pop("local_files_only", None)
|
|
hf_token = kwargs.pop("hf_token", None)
|
|
revision = kwargs.pop("revision", None)
|
|
subfolder = kwargs.pop("subfolder", None)
|
|
weight_name = kwargs.pop("weight_name", None)
|
|
use_safetensors = kwargs.pop("use_safetensors", None)
|
|
|
|
allow_pickle = False
|
|
if use_safetensors is None:
|
|
use_safetensors = True
|
|
allow_pickle = True
|
|
|
|
user_agent = {
|
|
"file_type": "text_inversion",
|
|
"framework": "pytorch",
|
|
}
|
|
state_dicts = []
|
|
for pretrained_model_name_or_path in pretrained_model_name_or_paths:
|
|
if not isinstance(pretrained_model_name_or_path, (dict, torch.Tensor)):
|
|
# 3.1. Load textual inversion file
|
|
model_file = None
|
|
|
|
# Let's first try to load .safetensors weights
|
|
if (use_safetensors and weight_name is None) or (
|
|
weight_name is not None and weight_name.endswith(".safetensors")
|
|
):
|
|
try:
|
|
model_file = _get_model_file(
|
|
pretrained_model_name_or_path,
|
|
weights_name=weight_name or TEXT_INVERSION_NAME_SAFE,
|
|
cache_dir=cache_dir,
|
|
force_download=force_download,
|
|
proxies=proxies,
|
|
local_files_only=local_files_only,
|
|
token=hf_token,
|
|
revision=revision,
|
|
subfolder=subfolder,
|
|
user_agent=user_agent,
|
|
)
|
|
state_dict = safetensors.torch.load_file(model_file, device="cpu")
|
|
except Exception as e:
|
|
if not allow_pickle:
|
|
raise e
|
|
|
|
model_file = None
|
|
|
|
if model_file is None:
|
|
model_file = _get_model_file(
|
|
pretrained_model_name_or_path,
|
|
weights_name=weight_name or TEXT_INVERSION_NAME,
|
|
cache_dir=cache_dir,
|
|
force_download=force_download,
|
|
proxies=proxies,
|
|
local_files_only=local_files_only,
|
|
token=hf_token,
|
|
revision=revision,
|
|
subfolder=subfolder,
|
|
user_agent=user_agent,
|
|
)
|
|
state_dict = load_state_dict(model_file)
|
|
else:
|
|
state_dict = pretrained_model_name_or_path
|
|
|
|
state_dicts.append(state_dict)
|
|
|
|
return state_dicts
|
|
|
|
|
|
class TextualInversionLoaderMixin:
|
|
r"""
|
|
Load Textual Inversion tokens and embeddings to the tokenizer and text encoder.
|
|
"""
|
|
|
|
def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"): # noqa: F821
|
|
r"""
|
|
Processes prompts that include a special token corresponding to a multi-vector textual inversion embedding to
|
|
be replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
|
|
inversion token or if the textual inversion token is a single vector, the input prompt is returned.
|
|
|
|
Parameters:
|
|
prompt (`str` or list of `str`):
|
|
The prompt or prompts to guide the image generation.
|
|
tokenizer (`PreTrainedTokenizer`):
|
|
The tokenizer responsible for encoding the prompt into input tokens.
|
|
|
|
Returns:
|
|
`str` or list of `str`: The converted prompt
|
|
"""
|
|
if not isinstance(prompt, List):
|
|
prompts = [prompt]
|
|
else:
|
|
prompts = prompt
|
|
|
|
prompts = [self._maybe_convert_prompt(p, tokenizer) for p in prompts]
|
|
|
|
if not isinstance(prompt, List):
|
|
return prompts[0]
|
|
|
|
return prompts
|
|
|
|
def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"): # noqa: F821
|
|
r"""
|
|
Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds
|
|
to a multi-vector textual inversion embedding, this function will process the prompt so that the special token
|
|
is replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
|
|
inversion token or a textual inversion token that is a single vector, the input prompt is simply returned.
|
|
|
|
Parameters:
|
|
prompt (`str`):
|
|
The prompt to guide the image generation.
|
|
tokenizer (`PreTrainedTokenizer`):
|
|
The tokenizer responsible for encoding the prompt into input tokens.
|
|
|
|
Returns:
|
|
`str`: The converted prompt
|
|
"""
|
|
tokens = tokenizer.tokenize(prompt)
|
|
unique_tokens = set(tokens)
|
|
for token in unique_tokens:
|
|
if token in tokenizer.added_tokens_encoder:
|
|
replacement = token
|
|
i = 1
|
|
while f"{token}_{i}" in tokenizer.added_tokens_encoder:
|
|
replacement += f" {token}_{i}"
|
|
i += 1
|
|
|
|
prompt = prompt.replace(token, replacement)
|
|
|
|
return prompt
|
|
|
|
def _check_text_inv_inputs(self, tokenizer, text_encoder, pretrained_model_name_or_paths, tokens):
|
|
if tokenizer is None:
|
|
raise ValueError(
|
|
f"{self.__class__.__name__} requires `self.tokenizer` or passing a `tokenizer` of type `PreTrainedTokenizer` for calling"
|
|
f" `{self.load_textual_inversion.__name__}`"
|
|
)
|
|
|
|
if text_encoder is None:
|
|
raise ValueError(
|
|
f"{self.__class__.__name__} requires `self.text_encoder` or passing a `text_encoder` of type `PreTrainedModel` for calling"
|
|
f" `{self.load_textual_inversion.__name__}`"
|
|
)
|
|
|
|
if len(pretrained_model_name_or_paths) > 1 and len(pretrained_model_name_or_paths) != len(tokens):
|
|
raise ValueError(
|
|
f"You have passed a list of models of length {len(pretrained_model_name_or_paths)}, and list of tokens of length {len(tokens)} "
|
|
f"Make sure both lists have the same length."
|
|
)
|
|
|
|
valid_tokens = [t for t in tokens if t is not None]
|
|
if len(set(valid_tokens)) < len(valid_tokens):
|
|
raise ValueError(f"You have passed a list of tokens that contains duplicates: {tokens}")
|
|
|
|
@staticmethod
|
|
def _retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer):
|
|
all_tokens = []
|
|
all_embeddings = []
|
|
for state_dict, token in zip(state_dicts, tokens):
|
|
if isinstance(state_dict, torch.Tensor):
|
|
if token is None:
|
|
raise ValueError(
|
|
"You are trying to load a textual inversion embedding that has been saved as a PyTorch tensor. Make sure to pass the name of the corresponding token in this case: `token=...`."
|
|
)
|
|
loaded_token = token
|
|
embedding = state_dict
|
|
elif len(state_dict) == 1:
|
|
# diffusers
|
|
loaded_token, embedding = next(iter(state_dict.items()))
|
|
elif "string_to_param" in state_dict:
|
|
# A1111
|
|
loaded_token = state_dict["name"]
|
|
embedding = state_dict["string_to_param"]["*"]
|
|
else:
|
|
raise ValueError(
|
|
f"Loaded state dictionary is incorrect: {state_dict}. \n\n"
|
|
"Please verify that the loaded state dictionary of the textual embedding either only has a single key or includes the `string_to_param`"
|
|
" input key."
|
|
)
|
|
|
|
if token is not None and loaded_token != token:
|
|
logger.info(f"The loaded token: {loaded_token} is overwritten by the passed token {token}.")
|
|
else:
|
|
token = loaded_token
|
|
|
|
if token in tokenizer.get_vocab():
|
|
raise ValueError(
|
|
f"Token {token} already in tokenizer vocabulary. Please choose a different token name or remove {token} and embedding from the tokenizer and text encoder."
|
|
)
|
|
|
|
all_tokens.append(token)
|
|
all_embeddings.append(embedding)
|
|
|
|
return all_tokens, all_embeddings
|
|
|
|
@staticmethod
|
|
def _extend_tokens_and_embeddings(tokens, embeddings, tokenizer):
|
|
all_tokens = []
|
|
all_embeddings = []
|
|
|
|
for embedding, token in zip(embeddings, tokens):
|
|
if f"{token}_1" in tokenizer.get_vocab():
|
|
multi_vector_tokens = [token]
|
|
i = 1
|
|
while f"{token}_{i}" in tokenizer.added_tokens_encoder:
|
|
multi_vector_tokens.append(f"{token}_{i}")
|
|
i += 1
|
|
|
|
raise ValueError(
|
|
f"Multi-vector Token {multi_vector_tokens} already in tokenizer vocabulary. Please choose a different token name or remove the {multi_vector_tokens} and embedding from the tokenizer and text encoder."
|
|
)
|
|
|
|
is_multi_vector = len(embedding.shape) > 1 and embedding.shape[0] > 1
|
|
if is_multi_vector:
|
|
all_tokens += [token] + [f"{token}_{i}" for i in range(1, embedding.shape[0])]
|
|
all_embeddings += [e for e in embedding] # noqa: C416
|
|
else:
|
|
all_tokens += [token]
|
|
all_embeddings += [embedding[0]] if len(embedding.shape) > 1 else [embedding]
|
|
|
|
return all_tokens, all_embeddings
|
|
|
|
@validate_hf_hub_args
|
|
def load_textual_inversion(
|
|
self,
|
|
pretrained_model_name_or_path: Union[str, List[str], Dict[str, torch.Tensor], List[Dict[str, torch.Tensor]]],
|
|
token: Optional[Union[str, List[str]]] = None,
|
|
tokenizer: Optional["PreTrainedTokenizer"] = None, # noqa: F821
|
|
text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821
|
|
**kwargs,
|
|
):
|
|
r"""
|
|
Load Textual Inversion embeddings into the text encoder of [`StableDiffusionPipeline`] (both 🤗 Diffusers and
|
|
Automatic1111 formats are supported).
|
|
|
|
Parameters:
|
|
pretrained_model_name_or_path (`str` or `os.PathLike` or `List[str or os.PathLike]` or `Dict` or `List[Dict]`):
|
|
Can be either one of the following or a list of them:
|
|
|
|
- A string, the *model id* (for example `sd-concepts-library/low-poly-hd-logos-icons`) of a
|
|
pretrained model hosted on the Hub.
|
|
- A path to a *directory* (for example `./my_text_inversion_directory/`) containing the textual
|
|
inversion weights.
|
|
- A path to a *file* (for example `./my_text_inversions.pt`) containing textual inversion weights.
|
|
- A [torch state
|
|
dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
|
|
|
|
token (`str` or `List[str]`, *optional*):
|
|
Override the token to use for the textual inversion weights. If `pretrained_model_name_or_path` is a
|
|
list, then `token` must also be a list of equal length.
|
|
text_encoder ([`~transformers.CLIPTextModel`], *optional*):
|
|
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
|
|
If not specified, function will take self.tokenizer.
|
|
tokenizer ([`~transformers.CLIPTokenizer`], *optional*):
|
|
A `CLIPTokenizer` to tokenize text. If not specified, function will take self.tokenizer.
|
|
weight_name (`str`, *optional*):
|
|
Name of a custom weight file. This should be used when:
|
|
|
|
- The saved textual inversion file is in 🤗 Diffusers format, but was saved under a specific weight
|
|
name such as `text_inv.bin`.
|
|
- The saved textual inversion file is in the Automatic1111 format.
|
|
cache_dir (`Union[str, os.PathLike]`, *optional*):
|
|
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
|
|
is not used.
|
|
force_download (`bool`, *optional*, defaults to `False`):
|
|
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
|
|
cached versions if they exist.
|
|
|
|
proxies (`Dict[str, str]`, *optional*):
|
|
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
|
|
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
|
|
local_files_only (`bool`, *optional*, defaults to `False`):
|
|
Whether to only load local model weights and configuration files or not. If set to `True`, the model
|
|
won't be downloaded from the Hub.
|
|
hf_token (`str` or *bool*, *optional*):
|
|
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
|
|
`diffusers-cli login` (stored in `~/.huggingface`) is used.
|
|
revision (`str`, *optional*, defaults to `"main"`):
|
|
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
|
|
allowed by Git.
|
|
subfolder (`str`, *optional*, defaults to `""`):
|
|
The subfolder location of a model file within a larger model repository on the Hub or locally.
|
|
mirror (`str`, *optional*):
|
|
Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
|
|
guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
|
|
information.
|
|
|
|
Example:
|
|
|
|
To load a Textual Inversion embedding vector in 🤗 Diffusers format:
|
|
|
|
```py
|
|
from diffusers import StableDiffusionPipeline
|
|
import torch
|
|
|
|
model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
|
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
|
|
|
|
pipe.load_textual_inversion("sd-concepts-library/cat-toy")
|
|
|
|
prompt = "A <cat-toy> backpack"
|
|
|
|
image = pipe(prompt, num_inference_steps=50).images[0]
|
|
image.save("cat-backpack.png")
|
|
```
|
|
|
|
To load a Textual Inversion embedding vector in Automatic1111 format, make sure to download the vector first
|
|
(for example from [civitAI](https://civitai.com/models/3036?modelVersionId=9857)) and then load the vector
|
|
locally:
|
|
|
|
```py
|
|
from diffusers import StableDiffusionPipeline
|
|
import torch
|
|
|
|
model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
|
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
|
|
|
|
pipe.load_textual_inversion("./charturnerv2.pt", token="charturnerv2")
|
|
|
|
prompt = "charturnerv2, multiple views of the same character in the same outfit, a character turnaround of a woman wearing a black jacket and red shirt, best quality, intricate details."
|
|
|
|
image = pipe(prompt, num_inference_steps=50).images[0]
|
|
image.save("character.png")
|
|
```
|
|
|
|
"""
|
|
# 1. Set correct tokenizer and text encoder
|
|
tokenizer = tokenizer or getattr(self, "tokenizer", None)
|
|
text_encoder = text_encoder or getattr(self, "text_encoder", None)
|
|
|
|
# 2. Normalize inputs
|
|
pretrained_model_name_or_paths = (
|
|
[pretrained_model_name_or_path]
|
|
if not isinstance(pretrained_model_name_or_path, list)
|
|
else pretrained_model_name_or_path
|
|
)
|
|
tokens = [token] if not isinstance(token, list) else token
|
|
if tokens[0] is None:
|
|
tokens = tokens * len(pretrained_model_name_or_paths)
|
|
|
|
# 3. Check inputs
|
|
self._check_text_inv_inputs(tokenizer, text_encoder, pretrained_model_name_or_paths, tokens)
|
|
|
|
# 4. Load state dicts of textual embeddings
|
|
state_dicts = load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs)
|
|
|
|
# 4.1 Handle the special case when state_dict is a tensor that contains n embeddings for n tokens
|
|
if len(tokens) > 1 and len(state_dicts) == 1:
|
|
if isinstance(state_dicts[0], torch.Tensor):
|
|
state_dicts = list(state_dicts[0])
|
|
if len(tokens) != len(state_dicts):
|
|
raise ValueError(
|
|
f"You have passed a state_dict contains {len(state_dicts)} embeddings, and list of tokens of length {len(tokens)} "
|
|
f"Make sure both have the same length."
|
|
)
|
|
|
|
# 4. Retrieve tokens and embeddings
|
|
tokens, embeddings = self._retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer)
|
|
|
|
# 5. Extend tokens and embeddings for multi vector
|
|
tokens, embeddings = self._extend_tokens_and_embeddings(tokens, embeddings, tokenizer)
|
|
|
|
# 6. Make sure all embeddings have the correct size
|
|
expected_emb_dim = text_encoder.get_input_embeddings().weight.shape[-1]
|
|
if any(expected_emb_dim != emb.shape[-1] for emb in embeddings):
|
|
raise ValueError(
|
|
"Loaded embeddings are of incorrect shape. Expected each textual inversion embedding "
|
|
"to be of shape {input_embeddings.shape[-1]}, but are {embeddings.shape[-1]} "
|
|
)
|
|
|
|
# 7. Now we can be sure that loading the embedding matrix works
|
|
# < Unsafe code:
|
|
|
|
# 7.1 Offload all hooks in case the pipeline was cpu offloaded before make sure, we offload and onload again
|
|
is_model_cpu_offload = False
|
|
is_sequential_cpu_offload = False
|
|
if self.hf_device_map is None:
|
|
for _, component in self.components.items():
|
|
if isinstance(component, nn.Module):
|
|
if hasattr(component, "_hf_hook"):
|
|
is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
|
|
is_sequential_cpu_offload = (
|
|
isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
|
|
or hasattr(component._hf_hook, "hooks")
|
|
and isinstance(component._hf_hook.hooks[0], AlignDevicesHook)
|
|
)
|
|
logger.info(
|
|
"Accelerate hooks detected. Since you have called `load_textual_inversion()`, the previous hooks will be first removed. Then the textual inversion parameters will be loaded and the hooks will be applied again."
|
|
)
|
|
if is_sequential_cpu_offload or is_model_cpu_offload:
|
|
remove_hook_from_module(component, recurse=is_sequential_cpu_offload)
|
|
|
|
# 7.2 save expected device and dtype
|
|
device = text_encoder.device
|
|
dtype = text_encoder.dtype
|
|
|
|
# 7.3 Increase token embedding matrix
|
|
text_encoder.resize_token_embeddings(len(tokenizer) + len(tokens))
|
|
input_embeddings = text_encoder.get_input_embeddings().weight
|
|
|
|
# 7.4 Load token and embedding
|
|
for token, embedding in zip(tokens, embeddings):
|
|
# add tokens and get ids
|
|
tokenizer.add_tokens(token)
|
|
token_id = tokenizer.convert_tokens_to_ids(token)
|
|
input_embeddings.data[token_id] = embedding
|
|
logger.info(f"Loaded textual inversion embedding for {token}.")
|
|
|
|
input_embeddings.to(dtype=dtype, device=device)
|
|
|
|
# 7.5 Offload the model again
|
|
if is_model_cpu_offload:
|
|
self.enable_model_cpu_offload(device=device)
|
|
elif is_sequential_cpu_offload:
|
|
self.enable_sequential_cpu_offload(device=device)
|
|
|
|
# / Unsafe Code >
|
|
|
|
def unload_textual_inversion(
|
|
self,
|
|
tokens: Optional[Union[str, List[str]]] = None,
|
|
tokenizer: Optional["PreTrainedTokenizer"] = None,
|
|
text_encoder: Optional["PreTrainedModel"] = None,
|
|
):
|
|
r"""
|
|
Unload Textual Inversion embeddings from the text encoder of [`StableDiffusionPipeline`]
|
|
|
|
Example:
|
|
```py
|
|
from diffusers import AutoPipelineForText2Image
|
|
import torch
|
|
|
|
pipeline = AutoPipelineForText2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
|
|
|
|
# Example 1
|
|
pipeline.load_textual_inversion("sd-concepts-library/gta5-artwork")
|
|
pipeline.load_textual_inversion("sd-concepts-library/moeb-style")
|
|
|
|
# Remove all token embeddings
|
|
pipeline.unload_textual_inversion()
|
|
|
|
# Example 2
|
|
pipeline.load_textual_inversion("sd-concepts-library/moeb-style")
|
|
pipeline.load_textual_inversion("sd-concepts-library/gta5-artwork")
|
|
|
|
# Remove just one token
|
|
pipeline.unload_textual_inversion("<moe-bius>")
|
|
|
|
# Example 3: unload from SDXL
|
|
pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
|
|
embedding_path = hf_hub_download(
|
|
repo_id="linoyts/web_y2k", filename="web_y2k_emb.safetensors", repo_type="model"
|
|
)
|
|
|
|
# load embeddings to the text encoders
|
|
state_dict = load_file(embedding_path)
|
|
|
|
# load embeddings of text_encoder 1 (CLIP ViT-L/14)
|
|
pipeline.load_textual_inversion(
|
|
state_dict["clip_l"],
|
|
tokens=["<s0>", "<s1>"],
|
|
text_encoder=pipeline.text_encoder,
|
|
tokenizer=pipeline.tokenizer,
|
|
)
|
|
# load embeddings of text_encoder 2 (CLIP ViT-G/14)
|
|
pipeline.load_textual_inversion(
|
|
state_dict["clip_g"],
|
|
tokens=["<s0>", "<s1>"],
|
|
text_encoder=pipeline.text_encoder_2,
|
|
tokenizer=pipeline.tokenizer_2,
|
|
)
|
|
|
|
# Unload explicitly from both text encoders and tokenizers
|
|
pipeline.unload_textual_inversion(
|
|
tokens=["<s0>", "<s1>"], text_encoder=pipeline.text_encoder, tokenizer=pipeline.tokenizer
|
|
)
|
|
pipeline.unload_textual_inversion(
|
|
tokens=["<s0>", "<s1>"], text_encoder=pipeline.text_encoder_2, tokenizer=pipeline.tokenizer_2
|
|
)
|
|
```
|
|
"""
|
|
|
|
tokenizer = tokenizer or getattr(self, "tokenizer", None)
|
|
text_encoder = text_encoder or getattr(self, "text_encoder", None)
|
|
|
|
# Get textual inversion tokens and ids
|
|
token_ids = []
|
|
last_special_token_id = None
|
|
|
|
if tokens:
|
|
if isinstance(tokens, str):
|
|
tokens = [tokens]
|
|
for added_token_id, added_token in tokenizer.added_tokens_decoder.items():
|
|
if not added_token.special:
|
|
if added_token.content in tokens:
|
|
token_ids.append(added_token_id)
|
|
else:
|
|
last_special_token_id = added_token_id
|
|
if len(token_ids) == 0:
|
|
raise ValueError("No tokens to remove found")
|
|
else:
|
|
tokens = []
|
|
for added_token_id, added_token in tokenizer.added_tokens_decoder.items():
|
|
if not added_token.special:
|
|
token_ids.append(added_token_id)
|
|
tokens.append(added_token.content)
|
|
else:
|
|
last_special_token_id = added_token_id
|
|
|
|
# Delete from tokenizer
|
|
for token_id, token_to_remove in zip(token_ids, tokens):
|
|
del tokenizer._added_tokens_decoder[token_id]
|
|
del tokenizer._added_tokens_encoder[token_to_remove]
|
|
|
|
# Make all token ids sequential in tokenizer
|
|
key_id = 1
|
|
for token_id in tokenizer.added_tokens_decoder:
|
|
if token_id > last_special_token_id and token_id > last_special_token_id + key_id:
|
|
token = tokenizer._added_tokens_decoder[token_id]
|
|
tokenizer._added_tokens_decoder[last_special_token_id + key_id] = token
|
|
del tokenizer._added_tokens_decoder[token_id]
|
|
tokenizer._added_tokens_encoder[token.content] = last_special_token_id + key_id
|
|
key_id += 1
|
|
tokenizer._update_trie()
|
|
# set correct total vocab size after removing tokens
|
|
tokenizer._update_total_vocab_size()
|
|
|
|
# Delete from text encoder
|
|
text_embedding_dim = text_encoder.get_input_embeddings().embedding_dim
|
|
temp_text_embedding_weights = text_encoder.get_input_embeddings().weight
|
|
text_embedding_weights = temp_text_embedding_weights[: last_special_token_id + 1]
|
|
to_append = []
|
|
for i in range(last_special_token_id + 1, temp_text_embedding_weights.shape[0]):
|
|
if i not in token_ids:
|
|
to_append.append(temp_text_embedding_weights[i].unsqueeze(0))
|
|
if len(to_append) > 0:
|
|
to_append = torch.cat(to_append, dim=0)
|
|
text_embedding_weights = torch.cat([text_embedding_weights, to_append], dim=0)
|
|
text_embeddings_filtered = nn.Embedding(text_embedding_weights.shape[0], text_embedding_dim)
|
|
text_embeddings_filtered.weight.data = text_embedding_weights
|
|
text_encoder.set_input_embeddings(text_embeddings_filtered)
|