1280 lines
52 KiB
Python
1280 lines
52 KiB
Python
# coding=utf-8
|
|
# Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
"""PyTorch CLIP model."""
|
|
|
|
from dataclasses import dataclass
|
|
from typing import Any, Callable, Optional, Union
|
|
|
|
import torch
|
|
from torch import nn
|
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
|
|
|
from ...activations import ACT2FN
|
|
from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask
|
|
from ...modeling_layers import GradientCheckpointingLayer
|
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
|
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
|
|
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging, torch_int
|
|
from .configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig
|
|
|
|
|
|
logger = logging.get_logger(__name__)
|
|
|
|
|
|
# contrastive loss function, adapted from
|
|
# https://sachinruk.github.io/blog/2021-03-07-clip.html
|
|
def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
|
|
return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
|
|
|
|
|
|
def clip_loss(similarity: torch.Tensor) -> torch.Tensor:
|
|
caption_loss = contrastive_loss(similarity)
|
|
image_loss = contrastive_loss(similarity.t())
|
|
return (caption_loss + image_loss) / 2.0
|
|
|
|
|
|
def _get_vector_norm(tensor: torch.Tensor) -> torch.Tensor:
|
|
"""
|
|
This method is equivalent to tensor.norm(p=2, dim=-1, keepdim=True) and used to make
|
|
model `executorch` exportable. See issue https://github.com/pytorch/executorch/issues/3566
|
|
"""
|
|
square_tensor = torch.pow(tensor, 2)
|
|
sum_tensor = torch.sum(square_tensor, dim=-1, keepdim=True)
|
|
normed_tensor = torch.pow(sum_tensor, 0.5)
|
|
return normed_tensor
|
|
|
|
|
|
@dataclass
|
|
@auto_docstring(
|
|
custom_intro="""
|
|
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
|
|
"""
|
|
)
|
|
class CLIPVisionModelOutput(ModelOutput):
|
|
r"""
|
|
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
|
The image embeddings obtained by applying the projection layer to the pooler_output.
|
|
"""
|
|
|
|
image_embeds: Optional[torch.FloatTensor] = None
|
|
last_hidden_state: Optional[torch.FloatTensor] = None
|
|
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
|
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
|
|
|
|
|
|
@dataclass
|
|
@auto_docstring(
|
|
custom_intro="""
|
|
Base class for text model's outputs that also contains a pooling of the last hidden states.
|
|
"""
|
|
)
|
|
class CLIPTextModelOutput(ModelOutput):
|
|
r"""
|
|
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
|
The text embeddings obtained by applying the projection layer to the pooler_output.
|
|
"""
|
|
|
|
text_embeds: Optional[torch.FloatTensor] = None
|
|
last_hidden_state: Optional[torch.FloatTensor] = None
|
|
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
|
|
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
|
|
|
|
|
|
@dataclass
|
|
@auto_docstring
|
|
class CLIPOutput(ModelOutput):
|
|
r"""
|
|
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
|
|
Contrastive loss for image-text similarity.
|
|
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
|
|
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
|
|
similarity scores.
|
|
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
|
|
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
|
|
similarity scores.
|
|
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
|
|
The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPTextModel`].
|
|
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
|
|
The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPVisionModel`].
|
|
text_model_output (`BaseModelOutputWithPooling`):
|
|
The output of the [`CLIPTextModel`].
|
|
vision_model_output (`BaseModelOutputWithPooling`):
|
|
The output of the [`CLIPVisionModel`].
|
|
"""
|
|
|
|
loss: Optional[torch.FloatTensor] = None
|
|
logits_per_image: Optional[torch.FloatTensor] = None
|
|
logits_per_text: Optional[torch.FloatTensor] = None
|
|
text_embeds: Optional[torch.FloatTensor] = None
|
|
image_embeds: Optional[torch.FloatTensor] = None
|
|
text_model_output: BaseModelOutputWithPooling = None
|
|
vision_model_output: BaseModelOutputWithPooling = None
|
|
|
|
def to_tuple(self) -> tuple[Any]:
|
|
return tuple(
|
|
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
|
|
for k in self.keys()
|
|
)
|
|
|
|
|
|
class CLIPVisionEmbeddings(nn.Module):
|
|
def __init__(self, config: CLIPVisionConfig):
|
|
super().__init__()
|
|
self.config = config
|
|
self.embed_dim = config.hidden_size
|
|
self.image_size = config.image_size
|
|
self.patch_size = config.patch_size
|
|
|
|
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
|
|
|
|
self.patch_embedding = nn.Conv2d(
|
|
in_channels=config.num_channels,
|
|
out_channels=self.embed_dim,
|
|
kernel_size=self.patch_size,
|
|
stride=self.patch_size,
|
|
bias=False,
|
|
)
|
|
|
|
self.num_patches = (self.image_size // self.patch_size) ** 2
|
|
self.num_positions = self.num_patches + 1
|
|
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
|
|
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
|
|
|
|
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
|
|
"""
|
|
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
|
|
images. This method is also adapted to support torch.jit tracing.
|
|
|
|
Adapted from:
|
|
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
|
|
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
|
|
"""
|
|
|
|
num_patches = embeddings.shape[1] - 1
|
|
position_embedding = self.position_embedding.weight.unsqueeze(0)
|
|
num_positions = position_embedding.shape[1] - 1
|
|
|
|
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
|
|
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
|
|
return self.position_embedding(self.position_ids)
|
|
|
|
class_pos_embed = position_embedding[:, :1]
|
|
patch_pos_embed = position_embedding[:, 1:]
|
|
|
|
dim = embeddings.shape[-1]
|
|
|
|
new_height = height // self.patch_size
|
|
new_width = width // self.patch_size
|
|
|
|
sqrt_num_positions = torch_int(num_positions**0.5)
|
|
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
|
|
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
|
|
|
|
patch_pos_embed = nn.functional.interpolate(
|
|
patch_pos_embed,
|
|
size=(new_height, new_width),
|
|
mode="bicubic",
|
|
align_corners=False,
|
|
)
|
|
|
|
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
|
|
|
|
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
|
|
|
|
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor:
|
|
batch_size, _, height, width = pixel_values.shape
|
|
if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size):
|
|
raise ValueError(
|
|
f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size})."
|
|
)
|
|
target_dtype = self.patch_embedding.weight.dtype
|
|
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
|
|
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
|
|
|
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
|
|
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
|
if interpolate_pos_encoding:
|
|
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
|
|
else:
|
|
embeddings = embeddings + self.position_embedding(self.position_ids)
|
|
return embeddings
|
|
|
|
|
|
class CLIPTextEmbeddings(nn.Module):
|
|
def __init__(self, config: CLIPTextConfig):
|
|
super().__init__()
|
|
embed_dim = config.hidden_size
|
|
|
|
self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
|
|
self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
|
|
|
|
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
|
self.register_buffer(
|
|
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
|
|
)
|
|
|
|
def forward(
|
|
self,
|
|
input_ids: Optional[torch.LongTensor] = None,
|
|
position_ids: Optional[torch.LongTensor] = None,
|
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
) -> torch.Tensor:
|
|
seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
|
|
max_position_embedding = self.position_embedding.weight.shape[0]
|
|
|
|
if seq_length > max_position_embedding:
|
|
raise ValueError(
|
|
f"Sequence length must be less than max_position_embeddings (got `sequence length`: "
|
|
f"{seq_length} and max_position_embeddings: {max_position_embedding}"
|
|
)
|
|
|
|
if position_ids is None:
|
|
position_ids = self.position_ids[:, :seq_length]
|
|
|
|
if inputs_embeds is None:
|
|
inputs_embeds = self.token_embedding(input_ids)
|
|
|
|
position_embeddings = self.position_embedding(position_ids)
|
|
embeddings = inputs_embeds + position_embeddings
|
|
|
|
return embeddings
|
|
|
|
|
|
def eager_attention_forward(
|
|
module: nn.Module,
|
|
query: torch.Tensor,
|
|
key: torch.Tensor,
|
|
value: torch.Tensor,
|
|
attention_mask: Optional[torch.Tensor],
|
|
scaling: float,
|
|
dropout: float = 0.0,
|
|
output_attentions: bool = True,
|
|
**kwargs,
|
|
):
|
|
attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling
|
|
if attention_mask is not None:
|
|
attn_weights = attn_weights + attention_mask
|
|
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
|
|
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
|
|
|
|
attn_output = torch.matmul(attn_weights, value)
|
|
attn_output = attn_output.transpose(1, 2).contiguous()
|
|
if not output_attentions:
|
|
attn_weights = None
|
|
return attn_output, attn_weights
|
|
|
|
|
|
class CLIPAttention(nn.Module):
|
|
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
|
|
|
def __init__(self, config: Union[CLIPVisionConfig, CLIPTextConfig]):
|
|
super().__init__()
|
|
self.config = config
|
|
self.embed_dim = config.hidden_size
|
|
self.num_heads = config.num_attention_heads
|
|
self.head_dim = self.embed_dim // self.num_heads
|
|
if self.head_dim * self.num_heads != self.embed_dim:
|
|
raise ValueError(
|
|
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
|
f" {self.num_heads})."
|
|
)
|
|
self.scale = self.head_dim**-0.5
|
|
self.dropout = config.attention_dropout
|
|
self.is_causal = False
|
|
|
|
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
|
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
|
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
|
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states: torch.Tensor,
|
|
attention_mask: Optional[torch.Tensor] = None,
|
|
causal_attention_mask: Optional[torch.Tensor] = None,
|
|
output_attentions: Optional[bool] = False,
|
|
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
|
|
"""Input shape: Batch x Time x Channel"""
|
|
|
|
batch_size, seq_length, embed_dim = hidden_states.shape
|
|
|
|
queries = self.q_proj(hidden_states)
|
|
keys = self.k_proj(hidden_states)
|
|
values = self.v_proj(hidden_states)
|
|
|
|
queries = queries.view(batch_size, seq_length, -1, self.head_dim).transpose(1, 2)
|
|
keys = keys.view(batch_size, seq_length, -1, self.head_dim).transpose(1, 2)
|
|
values = values.view(batch_size, seq_length, -1, self.head_dim).transpose(1, 2)
|
|
# CLIP text model uses both `causal_attention_mask` and `attention_mask`
|
|
# in case FA2 kernel is called, `is_causal` should be inferred from `causal_attention_mask`
|
|
if self.config._attn_implementation == "flash_attention_2":
|
|
self.is_causal = causal_attention_mask is not None
|
|
else:
|
|
if attention_mask is not None and causal_attention_mask is not None:
|
|
attention_mask = attention_mask + causal_attention_mask
|
|
elif causal_attention_mask is not None:
|
|
attention_mask = causal_attention_mask
|
|
|
|
attention_interface: Callable = eager_attention_forward
|
|
if self.config._attn_implementation != "eager":
|
|
if self.config._attn_implementation == "sdpa" and output_attentions:
|
|
logger.warning_once(
|
|
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
|
|
'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
|
)
|
|
else:
|
|
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
|
|
|
|
attn_output, attn_weights = attention_interface(
|
|
self,
|
|
queries,
|
|
keys,
|
|
values,
|
|
attention_mask,
|
|
is_causal=self.is_causal,
|
|
scaling=self.scale,
|
|
dropout=0.0 if not self.training else self.dropout,
|
|
output_attentions=output_attentions,
|
|
)
|
|
|
|
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
|
|
attn_output = self.out_proj(attn_output)
|
|
|
|
if not output_attentions:
|
|
attn_weights = None
|
|
return attn_output, attn_weights
|
|
|
|
|
|
class CLIPMLP(nn.Module):
|
|
def __init__(self, config):
|
|
super().__init__()
|
|
self.config = config
|
|
self.activation_fn = ACT2FN[config.hidden_act]
|
|
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
|
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
|
|
|
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
|
hidden_states = self.fc1(hidden_states)
|
|
hidden_states = self.activation_fn(hidden_states)
|
|
hidden_states = self.fc2(hidden_states)
|
|
return hidden_states
|
|
|
|
|
|
class CLIPEncoderLayer(GradientCheckpointingLayer):
|
|
def __init__(self, config: Union[CLIPVisionConfig, CLIPTextConfig]):
|
|
super().__init__()
|
|
self.embed_dim = config.hidden_size
|
|
self.self_attn = CLIPAttention(config)
|
|
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
|
self.mlp = CLIPMLP(config)
|
|
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states: torch.Tensor,
|
|
attention_mask: torch.Tensor,
|
|
causal_attention_mask: torch.Tensor,
|
|
output_attentions: Optional[bool] = False,
|
|
) -> tuple[torch.FloatTensor]:
|
|
"""
|
|
Args:
|
|
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
|
attention_mask (`torch.FloatTensor`): attention mask of size
|
|
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
|
`(config.encoder_attention_heads,)`.
|
|
output_attentions (`bool`, *optional*):
|
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
|
returned tensors for more detail.
|
|
"""
|
|
residual = hidden_states
|
|
|
|
hidden_states = self.layer_norm1(hidden_states)
|
|
hidden_states, attn_weights = self.self_attn(
|
|
hidden_states=hidden_states,
|
|
attention_mask=attention_mask,
|
|
causal_attention_mask=causal_attention_mask,
|
|
output_attentions=output_attentions,
|
|
)
|
|
hidden_states = residual + hidden_states
|
|
|
|
residual = hidden_states
|
|
hidden_states = self.layer_norm2(hidden_states)
|
|
hidden_states = self.mlp(hidden_states)
|
|
hidden_states = residual + hidden_states
|
|
|
|
outputs = (hidden_states,)
|
|
|
|
if output_attentions:
|
|
outputs += (attn_weights,)
|
|
|
|
return outputs
|
|
|
|
|
|
@auto_docstring
|
|
class CLIPPreTrainedModel(PreTrainedModel):
|
|
config: CLIPConfig
|
|
base_model_prefix = "clip"
|
|
supports_gradient_checkpointing = True
|
|
_supports_sdpa = True
|
|
_supports_flash_attn = True
|
|
_supports_flex_attn = True
|
|
_supports_attention_backend = True
|
|
|
|
def _init_weights(self, module):
|
|
"""Initialize the weights"""
|
|
factor = self.config.initializer_factor
|
|
if isinstance(module, CLIPTextEmbeddings):
|
|
module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
|
|
module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
|
|
elif isinstance(module, CLIPVisionEmbeddings):
|
|
factor = self.config.initializer_factor
|
|
nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
|
|
nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
|
|
nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
|
|
elif isinstance(module, CLIPAttention):
|
|
factor = self.config.initializer_factor
|
|
in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
|
|
out_proj_std = (module.embed_dim**-0.5) * factor
|
|
nn.init.normal_(module.q_proj.weight, std=in_proj_std)
|
|
nn.init.normal_(module.k_proj.weight, std=in_proj_std)
|
|
nn.init.normal_(module.v_proj.weight, std=in_proj_std)
|
|
nn.init.normal_(module.out_proj.weight, std=out_proj_std)
|
|
elif isinstance(module, CLIPMLP):
|
|
factor = self.config.initializer_factor
|
|
in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
|
|
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
|
|
nn.init.normal_(module.fc1.weight, std=fc_std)
|
|
nn.init.normal_(module.fc2.weight, std=in_proj_std)
|
|
elif isinstance(module, CLIPModel):
|
|
nn.init.normal_(
|
|
module.text_projection.weight,
|
|
std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
|
|
)
|
|
nn.init.normal_(
|
|
module.visual_projection.weight,
|
|
std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
|
|
)
|
|
elif isinstance(module, CLIPVisionModelWithProjection):
|
|
nn.init.normal_(
|
|
module.visual_projection.weight,
|
|
std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
|
|
)
|
|
elif isinstance(module, CLIPTextModelWithProjection):
|
|
nn.init.normal_(
|
|
module.text_projection.weight,
|
|
std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
|
|
)
|
|
elif isinstance(module, CLIPForImageClassification):
|
|
nn.init.normal_(
|
|
module.classifier.weight,
|
|
std=self.config.vision_config.hidden_size**-0.5 * self.config.initializer_factor,
|
|
)
|
|
|
|
if isinstance(module, nn.LayerNorm):
|
|
module.bias.data.zero_()
|
|
module.weight.data.fill_(1.0)
|
|
if isinstance(module, nn.Linear) and module.bias is not None:
|
|
module.bias.data.zero_()
|
|
|
|
|
|
class CLIPEncoder(nn.Module):
|
|
"""
|
|
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
|
[`CLIPEncoderLayer`].
|
|
|
|
Args:
|
|
config: CLIPConfig
|
|
"""
|
|
|
|
def __init__(self, config: CLIPConfig):
|
|
super().__init__()
|
|
self.config = config
|
|
self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
|
self.gradient_checkpointing = False
|
|
|
|
def forward(
|
|
self,
|
|
inputs_embeds,
|
|
attention_mask: Optional[torch.Tensor] = None,
|
|
causal_attention_mask: Optional[torch.Tensor] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
) -> BaseModelOutput:
|
|
r"""
|
|
Args:
|
|
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
|
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
|
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
|
than the model's internal embedding lookup matrix.
|
|
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
|
|
|
- 1 for tokens that are **not masked**,
|
|
- 0 for tokens that are **masked**.
|
|
|
|
[What are attention masks?](../glossary#attention-mask)
|
|
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
Causal mask for the text model. Mask values selected in `[0, 1]`:
|
|
|
|
- 1 for tokens that are **not masked**,
|
|
- 0 for tokens that are **masked**.
|
|
|
|
[What are attention masks?](../glossary#attention-mask)
|
|
output_attentions (`bool`, *optional*):
|
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
|
returned tensors for more detail.
|
|
output_hidden_states (`bool`, *optional*):
|
|
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
|
for more detail.
|
|
return_dict (`bool`, *optional*):
|
|
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
|
"""
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
output_hidden_states = (
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
)
|
|
|
|
encoder_states = () if output_hidden_states else None
|
|
all_attentions = () if output_attentions else None
|
|
|
|
hidden_states = inputs_embeds
|
|
for idx, encoder_layer in enumerate(self.layers):
|
|
if output_hidden_states:
|
|
encoder_states = encoder_states + (hidden_states,)
|
|
layer_outputs = encoder_layer(
|
|
hidden_states,
|
|
attention_mask,
|
|
causal_attention_mask,
|
|
output_attentions=output_attentions,
|
|
)
|
|
|
|
hidden_states = layer_outputs[0]
|
|
|
|
if output_attentions:
|
|
all_attentions = all_attentions + (layer_outputs[1],)
|
|
|
|
if output_hidden_states:
|
|
encoder_states = encoder_states + (hidden_states,)
|
|
|
|
return BaseModelOutput(
|
|
last_hidden_state=hidden_states,
|
|
hidden_states=encoder_states,
|
|
attentions=all_attentions,
|
|
)
|
|
|
|
|
|
class CLIPTextTransformer(nn.Module):
|
|
def __init__(self, config: CLIPTextConfig):
|
|
super().__init__()
|
|
self.config = config
|
|
embed_dim = config.hidden_size
|
|
self.embeddings = CLIPTextEmbeddings(config)
|
|
self.encoder = CLIPEncoder(config)
|
|
self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
|
|
|
# For `pooled_output` computation
|
|
self.eos_token_id = config.eos_token_id
|
|
|
|
# For attention mask, it differs between `flash_attention_2` and other attention implementations
|
|
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
|
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
input_ids: Optional[torch.Tensor] = None,
|
|
attention_mask: Optional[torch.Tensor] = None,
|
|
position_ids: Optional[torch.Tensor] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
) -> BaseModelOutputWithPooling:
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
output_hidden_states = (
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
)
|
|
|
|
if input_ids is None:
|
|
raise ValueError("You have to specify input_ids")
|
|
|
|
input_shape = input_ids.size()
|
|
input_ids = input_ids.view(-1, input_shape[-1])
|
|
|
|
hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
|
|
|
|
# CLIP's text model uses causal mask, prepare it here.
|
|
# https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
|
|
causal_attention_mask = _create_4d_causal_attention_mask(
|
|
input_shape, hidden_states.dtype, device=hidden_states.device
|
|
)
|
|
|
|
# expand attention_mask
|
|
if attention_mask is not None and not self._use_flash_attention_2:
|
|
# [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len]
|
|
attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
|
|
|
|
encoder_outputs: BaseModelOutput = self.encoder(
|
|
inputs_embeds=hidden_states,
|
|
attention_mask=attention_mask,
|
|
causal_attention_mask=causal_attention_mask,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
)
|
|
|
|
last_hidden_state = encoder_outputs.last_hidden_state
|
|
last_hidden_state = self.final_layer_norm(last_hidden_state)
|
|
|
|
if self.eos_token_id == 2:
|
|
# The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
|
|
# A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added
|
|
# ------------------------------------------------------------
|
|
# text_embeds.shape = [batch_size, sequence_length, transformer.width]
|
|
# take features from the eot embedding (eot_token is the highest number in each sequence)
|
|
# casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14
|
|
pooled_output = last_hidden_state[
|
|
torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
|
|
input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),
|
|
]
|
|
else:
|
|
# The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)
|
|
pooled_output = last_hidden_state[
|
|
torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
|
|
# We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`)
|
|
# Note: we assume each sequence (along batch dim.) contains an `eos_token_id` (e.g. prepared by the tokenizer)
|
|
(input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id)
|
|
.int()
|
|
.argmax(dim=-1),
|
|
]
|
|
|
|
return BaseModelOutputWithPooling(
|
|
last_hidden_state=last_hidden_state,
|
|
pooler_output=pooled_output,
|
|
hidden_states=encoder_outputs.hidden_states,
|
|
attentions=encoder_outputs.attentions,
|
|
)
|
|
|
|
|
|
@auto_docstring(
|
|
custom_intro="""
|
|
The text model from CLIP without any head or projection on top.
|
|
"""
|
|
)
|
|
class CLIPTextModel(CLIPPreTrainedModel):
|
|
config: CLIPTextConfig
|
|
|
|
_no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"]
|
|
|
|
def __init__(self, config: CLIPTextConfig):
|
|
super().__init__(config)
|
|
self.text_model = CLIPTextTransformer(config)
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
def get_input_embeddings(self) -> nn.Module:
|
|
return self.text_model.embeddings.token_embedding
|
|
|
|
def set_input_embeddings(self, value):
|
|
self.text_model.embeddings.token_embedding = value
|
|
|
|
@can_return_tuple
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
input_ids: Optional[torch.Tensor] = None,
|
|
attention_mask: Optional[torch.Tensor] = None,
|
|
position_ids: Optional[torch.Tensor] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
) -> BaseModelOutputWithPooling:
|
|
r"""
|
|
Examples:
|
|
|
|
```python
|
|
>>> from transformers import AutoTokenizer, CLIPTextModel
|
|
|
|
>>> model = CLIPTextModel.from_pretrained("openai/clip-vit-base-patch32")
|
|
>>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
|
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
|
|
|
|
>>> outputs = model(**inputs)
|
|
>>> last_hidden_state = outputs.last_hidden_state
|
|
>>> pooled_output = outputs.pooler_output # pooled (EOS token) states
|
|
```"""
|
|
|
|
return self.text_model(
|
|
input_ids=input_ids,
|
|
attention_mask=attention_mask,
|
|
position_ids=position_ids,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
)
|
|
|
|
|
|
class CLIPVisionTransformer(nn.Module):
|
|
def __init__(self, config: CLIPVisionConfig):
|
|
super().__init__()
|
|
self.config = config
|
|
embed_dim = config.hidden_size
|
|
|
|
self.embeddings = CLIPVisionEmbeddings(config)
|
|
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
|
self.encoder = CLIPEncoder(config)
|
|
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
|
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
pixel_values: Optional[torch.FloatTensor] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
interpolate_pos_encoding: Optional[bool] = False,
|
|
) -> BaseModelOutputWithPooling:
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
output_hidden_states = (
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
)
|
|
|
|
if pixel_values is None:
|
|
raise ValueError("You have to specify pixel_values")
|
|
|
|
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
|
|
hidden_states = self.pre_layrnorm(hidden_states)
|
|
|
|
encoder_outputs: BaseModelOutput = self.encoder(
|
|
inputs_embeds=hidden_states,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
)
|
|
|
|
last_hidden_state = encoder_outputs.last_hidden_state
|
|
pooled_output = last_hidden_state[:, 0, :]
|
|
pooled_output = self.post_layernorm(pooled_output)
|
|
|
|
return BaseModelOutputWithPooling(
|
|
last_hidden_state=last_hidden_state,
|
|
pooler_output=pooled_output,
|
|
hidden_states=encoder_outputs.hidden_states,
|
|
attentions=encoder_outputs.attentions,
|
|
)
|
|
|
|
|
|
@auto_docstring(
|
|
custom_intro="""
|
|
The vision model from CLIP without any head or projection on top.
|
|
"""
|
|
)
|
|
class CLIPVisionModel(CLIPPreTrainedModel):
|
|
config: CLIPVisionConfig
|
|
main_input_name = "pixel_values"
|
|
_no_split_modules = ["CLIPEncoderLayer"]
|
|
|
|
def __init__(self, config: CLIPVisionConfig):
|
|
super().__init__(config)
|
|
self.vision_model = CLIPVisionTransformer(config)
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
def get_input_embeddings(self) -> nn.Module:
|
|
return self.vision_model.embeddings.patch_embedding
|
|
|
|
@can_return_tuple
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
pixel_values: Optional[torch.FloatTensor] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
interpolate_pos_encoding: bool = False,
|
|
) -> BaseModelOutputWithPooling:
|
|
r"""
|
|
Example:
|
|
|
|
```python
|
|
>>> from PIL import Image
|
|
>>> import requests
|
|
>>> from transformers import AutoProcessor, CLIPVisionModel
|
|
|
|
>>> model = CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32")
|
|
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
|
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
|
>>> image = Image.open(requests.get(url, stream=True).raw)
|
|
|
|
>>> inputs = processor(images=image, return_tensors="pt")
|
|
|
|
>>> outputs = model(**inputs)
|
|
>>> last_hidden_state = outputs.last_hidden_state
|
|
>>> pooled_output = outputs.pooler_output # pooled CLS states
|
|
```"""
|
|
|
|
return self.vision_model(
|
|
pixel_values=pixel_values,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
interpolate_pos_encoding=interpolate_pos_encoding,
|
|
)
|
|
|
|
|
|
@auto_docstring
|
|
class CLIPModel(CLIPPreTrainedModel):
|
|
config: CLIPConfig
|
|
_no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer", "CLIPVisionEmbeddings"]
|
|
|
|
def __init__(self, config: CLIPConfig):
|
|
super().__init__(config)
|
|
|
|
if not isinstance(config.text_config, CLIPTextConfig):
|
|
raise TypeError(
|
|
"config.text_config is expected to be of type CLIPTextConfig but is of type"
|
|
f" {type(config.text_config)}."
|
|
)
|
|
|
|
if not isinstance(config.vision_config, CLIPVisionConfig):
|
|
raise TypeError(
|
|
"config.vision_config is expected to be of type CLIPVisionConfig but is of type"
|
|
f" {type(config.vision_config)}."
|
|
)
|
|
|
|
text_config = config.text_config
|
|
vision_config = config.vision_config
|
|
|
|
self.projection_dim = config.projection_dim
|
|
self.text_embed_dim = text_config.hidden_size
|
|
self.vision_embed_dim = vision_config.hidden_size
|
|
|
|
text_model = CLIPTextModel._from_config(text_config)
|
|
self.text_model = text_model.text_model
|
|
|
|
vision_model = CLIPVisionModel._from_config(vision_config)
|
|
self.vision_model = vision_model.vision_model
|
|
|
|
self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
|
|
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
|
|
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
|
|
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
@auto_docstring
|
|
def get_text_features(
|
|
self,
|
|
input_ids: Optional[torch.Tensor] = None,
|
|
attention_mask: Optional[torch.Tensor] = None,
|
|
position_ids: Optional[torch.Tensor] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
) -> torch.FloatTensor:
|
|
r"""
|
|
Returns:
|
|
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
|
|
applying the projection layer to the pooled output of [`CLIPTextModel`].
|
|
|
|
Examples:
|
|
|
|
```python
|
|
>>> from transformers import AutoTokenizer, CLIPModel
|
|
|
|
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
|
>>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
|
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
|
|
>>> text_features = model.get_text_features(**inputs)
|
|
```"""
|
|
# Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
output_hidden_states = (
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
)
|
|
|
|
text_outputs: BaseModelOutputWithPooling = self.text_model(
|
|
input_ids=input_ids,
|
|
attention_mask=attention_mask,
|
|
position_ids=position_ids,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
)
|
|
|
|
pooled_output = text_outputs.pooler_output
|
|
text_features = self.text_projection(pooled_output)
|
|
|
|
return text_features
|
|
|
|
@auto_docstring
|
|
def get_image_features(
|
|
self,
|
|
pixel_values: Optional[torch.FloatTensor] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
interpolate_pos_encoding: bool = False,
|
|
) -> torch.FloatTensor:
|
|
r"""
|
|
Returns:
|
|
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
|
|
applying the projection layer to the pooled output of [`CLIPVisionModel`].
|
|
|
|
Examples:
|
|
|
|
```python
|
|
>>> from PIL import Image
|
|
>>> import requests
|
|
>>> from transformers import AutoProcessor, CLIPModel
|
|
|
|
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
|
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
|
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
|
>>> image = Image.open(requests.get(url, stream=True).raw)
|
|
|
|
>>> inputs = processor(images=image, return_tensors="pt")
|
|
|
|
>>> image_features = model.get_image_features(**inputs)
|
|
```"""
|
|
# Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
output_hidden_states = (
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
)
|
|
|
|
vision_outputs: BaseModelOutputWithPooling = self.vision_model(
|
|
pixel_values=pixel_values,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
interpolate_pos_encoding=interpolate_pos_encoding,
|
|
)
|
|
|
|
pooled_output = vision_outputs.pooler_output
|
|
image_features = self.visual_projection(pooled_output)
|
|
|
|
return image_features
|
|
|
|
@can_return_tuple
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
input_ids: Optional[torch.LongTensor] = None,
|
|
pixel_values: Optional[torch.FloatTensor] = None,
|
|
attention_mask: Optional[torch.Tensor] = None,
|
|
position_ids: Optional[torch.LongTensor] = None,
|
|
return_loss: Optional[bool] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
interpolate_pos_encoding: bool = False,
|
|
) -> CLIPOutput:
|
|
r"""
|
|
return_loss (`bool`, *optional*):
|
|
Whether or not to return the contrastive loss.
|
|
|
|
Examples:
|
|
|
|
```python
|
|
>>> from PIL import Image
|
|
>>> import requests
|
|
>>> from transformers import AutoProcessor, CLIPModel
|
|
|
|
>>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
|
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
|
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
|
>>> image = Image.open(requests.get(url, stream=True).raw)
|
|
|
|
>>> inputs = processor(
|
|
... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
|
|
... )
|
|
|
|
>>> outputs = model(**inputs)
|
|
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
|
|
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
|
|
```"""
|
|
# Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
output_hidden_states = (
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
)
|
|
|
|
vision_outputs: BaseModelOutputWithPooling = self.vision_model(
|
|
pixel_values=pixel_values,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
interpolate_pos_encoding=interpolate_pos_encoding,
|
|
)
|
|
|
|
text_outputs: BaseModelOutputWithPooling = self.text_model(
|
|
input_ids=input_ids,
|
|
attention_mask=attention_mask,
|
|
position_ids=position_ids,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
)
|
|
|
|
image_embeds = vision_outputs.pooler_output
|
|
image_embeds = self.visual_projection(image_embeds)
|
|
|
|
text_embeds = text_outputs.pooler_output
|
|
text_embeds = self.text_projection(text_embeds)
|
|
|
|
# normalized features
|
|
image_embeds = image_embeds / _get_vector_norm(image_embeds)
|
|
text_embeds = text_embeds / _get_vector_norm(text_embeds)
|
|
|
|
# cosine similarity as logits
|
|
logits_per_text = torch.matmul(text_embeds, image_embeds.t().to(text_embeds.device))
|
|
logits_per_text = logits_per_text * self.logit_scale.exp().to(text_embeds.device)
|
|
|
|
logits_per_image = logits_per_text.t()
|
|
|
|
loss = None
|
|
if return_loss:
|
|
loss = clip_loss(logits_per_text)
|
|
|
|
return CLIPOutput(
|
|
loss=loss,
|
|
logits_per_image=logits_per_image,
|
|
logits_per_text=logits_per_text,
|
|
text_embeds=text_embeds,
|
|
image_embeds=image_embeds,
|
|
text_model_output=text_outputs,
|
|
vision_model_output=vision_outputs,
|
|
)
|
|
|
|
|
|
@auto_docstring
|
|
class CLIPTextModelWithProjection(CLIPPreTrainedModel):
|
|
config: CLIPTextConfig
|
|
|
|
_no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"]
|
|
|
|
def __init__(self, config: CLIPTextConfig):
|
|
super().__init__(config)
|
|
|
|
text_model = CLIPTextModel._from_config(config)
|
|
self.text_model = text_model.text_model
|
|
|
|
self.text_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
|
|
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
def get_input_embeddings(self) -> nn.Module:
|
|
return self.text_model.embeddings.token_embedding
|
|
|
|
def set_input_embeddings(self, value):
|
|
self.text_model.embeddings.token_embedding = value
|
|
|
|
@can_return_tuple
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
input_ids: Optional[torch.Tensor] = None,
|
|
attention_mask: Optional[torch.Tensor] = None,
|
|
position_ids: Optional[torch.Tensor] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
) -> CLIPTextModelOutput:
|
|
r"""
|
|
Examples:
|
|
|
|
```python
|
|
>>> from transformers import AutoTokenizer, CLIPTextModelWithProjection
|
|
|
|
>>> model = CLIPTextModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
|
|
>>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
|
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
|
|
|
|
>>> outputs = model(**inputs)
|
|
>>> text_embeds = outputs.text_embeds
|
|
```"""
|
|
|
|
text_outputs: BaseModelOutputWithPooling = self.text_model(
|
|
input_ids=input_ids,
|
|
attention_mask=attention_mask,
|
|
position_ids=position_ids,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
)
|
|
pooled_output = text_outputs.pooler_output
|
|
text_embeds = self.text_projection(pooled_output)
|
|
|
|
return CLIPTextModelOutput(
|
|
text_embeds=text_embeds,
|
|
last_hidden_state=text_outputs.last_hidden_state,
|
|
hidden_states=text_outputs.hidden_states,
|
|
attentions=text_outputs.attentions,
|
|
)
|
|
|
|
|
|
@auto_docstring
|
|
class CLIPVisionModelWithProjection(CLIPPreTrainedModel):
|
|
config: CLIPVisionConfig
|
|
main_input_name = "pixel_values"
|
|
|
|
def __init__(self, config: CLIPVisionConfig):
|
|
super().__init__(config)
|
|
|
|
vision_model = CLIPVisionModel._from_config(config)
|
|
self.vision_model = vision_model.vision_model
|
|
|
|
self.visual_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
|
|
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
def get_input_embeddings(self) -> nn.Module:
|
|
return self.vision_model.embeddings.patch_embedding
|
|
|
|
@can_return_tuple
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
pixel_values: Optional[torch.FloatTensor] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
interpolate_pos_encoding: bool = False,
|
|
) -> CLIPVisionModelOutput:
|
|
r"""
|
|
Examples:
|
|
|
|
```python
|
|
>>> from PIL import Image
|
|
>>> import requests
|
|
>>> from transformers import AutoProcessor, CLIPVisionModelWithProjection
|
|
|
|
>>> model = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
|
|
>>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
|
|
|
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
|
>>> image = Image.open(requests.get(url, stream=True).raw)
|
|
|
|
>>> inputs = processor(images=image, return_tensors="pt")
|
|
|
|
>>> outputs = model(**inputs)
|
|
>>> image_embeds = outputs.image_embeds
|
|
```"""
|
|
|
|
vision_outputs: BaseModelOutputWithPooling = self.vision_model(
|
|
pixel_values=pixel_values,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
interpolate_pos_encoding=interpolate_pos_encoding,
|
|
)
|
|
pooled_output = vision_outputs.pooler_output
|
|
image_embeds = self.visual_projection(pooled_output)
|
|
|
|
return CLIPVisionModelOutput(
|
|
image_embeds=image_embeds,
|
|
last_hidden_state=vision_outputs.last_hidden_state,
|
|
hidden_states=vision_outputs.hidden_states,
|
|
attentions=vision_outputs.attentions,
|
|
)
|
|
|
|
|
|
@auto_docstring(
|
|
custom_intro="""
|
|
CLIP vision encoder with an image classification head on top (a linear layer on top of the pooled final hidden states of
|
|
the patch tokens) e.g. for ImageNet.
|
|
"""
|
|
)
|
|
class CLIPForImageClassification(CLIPPreTrainedModel):
|
|
main_input_name = "pixel_values"
|
|
|
|
def __init__(self, config: CLIPConfig) -> None:
|
|
super().__init__(config)
|
|
|
|
self.num_labels = config.num_labels
|
|
vision_model = CLIPVisionModel._from_config(config.vision_config)
|
|
self.vision_model = vision_model.vision_model
|
|
|
|
# Classifier head
|
|
self.classifier = (
|
|
nn.Linear(config.vision_config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
|
|
)
|
|
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
@can_return_tuple
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
pixel_values: Optional[torch.Tensor] = None,
|
|
labels: Optional[torch.Tensor] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
) -> ImageClassifierOutput:
|
|
r"""
|
|
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
|
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
|
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
|
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
|
"""
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
output_hidden_states = (
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
)
|
|
|
|
outputs: BaseModelOutputWithPooling = self.vision_model(
|
|
pixel_values,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
)
|
|
|
|
sequence_output = outputs.last_hidden_state
|
|
|
|
# average pool the patch tokens
|
|
sequence_output = torch.mean(sequence_output[:, 1:, :], dim=1)
|
|
# apply classifier
|
|
logits = self.classifier(sequence_output)
|
|
|
|
loss = None
|
|
if labels is not None:
|
|
# move labels to correct device to enable model parallelism
|
|
labels = labels.to(logits.device)
|
|
if self.config.problem_type is None:
|
|
if self.num_labels == 1:
|
|
self.config.problem_type = "regression"
|
|
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
|
self.config.problem_type = "single_label_classification"
|
|
else:
|
|
self.config.problem_type = "multi_label_classification"
|
|
|
|
if self.config.problem_type == "regression":
|
|
loss_fct = MSELoss()
|
|
if self.num_labels == 1:
|
|
loss = loss_fct(logits.squeeze(), labels.squeeze())
|
|
else:
|
|
loss = loss_fct(logits, labels)
|
|
elif self.config.problem_type == "single_label_classification":
|
|
loss_fct = CrossEntropyLoss()
|
|
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
|
elif self.config.problem_type == "multi_label_classification":
|
|
loss_fct = BCEWithLogitsLoss()
|
|
loss = loss_fct(logits, labels)
|
|
|
|
return ImageClassifierOutput(
|
|
loss=loss,
|
|
logits=logits,
|
|
hidden_states=outputs.hidden_states,
|
|
attentions=outputs.attentions,
|
|
)
|
|
|
|
|
|
__all__ = [
|
|
"CLIPModel",
|
|
"CLIPPreTrainedModel",
|
|
"CLIPTextModel",
|
|
"CLIPTextModelWithProjection",
|
|
"CLIPVisionModel",
|
|
"CLIPVisionModelWithProjection",
|
|
"CLIPForImageClassification",
|
|
]
|