# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for InstructBLIP. Largely copy of Blip2Processor with addition of a tokenizer for the Q-Former. """ import os from typing import Optional, Union from ...image_processing_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import ( AddedToken, BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy, ) from ...utils import TensorType, logging from ...video_utils import VideoInput from ..auto import AutoTokenizer logger = logging.get_logger(__name__) class InstructBlipVideoProcessor(ProcessorMixin): r""" Constructs an InstructBLIPVideo processor which wraps a InstructBLIP image processor and a LLaMa/T5 tokenizer into a single processor. [`InstructBlipVideoProcessor`] offers all the functionalities of [`InstructBlipVideoImageProcessor`] and [`AutoTokenizer`]. See the docstring of [`~InstructBlipVideoProcessor.__call__`] and [`~InstructBlipVideoProcessor.decode`] for more information. Args: video_processor (`InstructBlipVideoVideoProcessor`): An instance of [`InstructBlipVideoVideoProcessor`]. The video processor is a required input. tokenizer (`AutoTokenizer`): An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input. qformer_tokenizer (`AutoTokenizer`): An instance of ['PreTrainedTokenizer`]. The Q-Former tokenizer is a required input. num_query_tokens (`int`, *optional*): Number of tokens used by the Qformer as queries, should be same as in model's config. """ attributes = ["video_processor", "tokenizer", "qformer_tokenizer"] video_processor_class = "AutoVideoProcessor" tokenizer_class = "AutoTokenizer" qformer_tokenizer_class = "AutoTokenizer" def __init__(self, video_processor, tokenizer, qformer_tokenizer, num_query_tokens=None, **kwargs): if not hasattr(tokenizer, "video_token"): self.video_token = AddedToken("