646 lines
15 KiB
Python
646 lines
15 KiB
Python
# This file is autogenerated by the command `make fix-copies`, do not edit.
|
|
from ..utils import DummyObject, requires_backends
|
|
|
|
|
|
class Cache(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class CacheConfig(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class DynamicCache(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class EncoderDecoderCache(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class HQQQuantizedCache(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class HybridCache(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class OffloadedCache(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class OffloadedStaticCache(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class QuantizedCache(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class QuantizedCacheConfig(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class QuantoQuantizedCache(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class SinkCache(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class SlidingWindowCache(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class StaticCache(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class GlueDataset(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class GlueDataTrainingArguments(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class LineByLineTextDataset(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class LineByLineWithRefDataset(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class LineByLineWithSOPTextDataset(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class SquadDataset(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class SquadDataTrainingArguments(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class TextDataset(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class TextDatasetForNextSentencePrediction(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class AlternatingCodebooksLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class BayesianDetectorConfig(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class BayesianDetectorModel(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class BeamScorer(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class BeamSearchScorer(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class ClassifierFreeGuidanceLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class ConstrainedBeamSearchScorer(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class Constraint(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class ConstraintListState(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class DisjunctiveConstraint(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class EncoderNoRepeatNGramLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class EncoderRepetitionPenaltyLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class EosTokenCriteria(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class EpsilonLogitsWarper(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class EtaLogitsWarper(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class ExponentialDecayLengthPenalty(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class ForcedBOSTokenLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class ForcedEOSTokenLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class GenerationMixin(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class HammingDiversityLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class InfNanRemoveLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class LogitNormalization(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class LogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class LogitsProcessorList(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class MaxLengthCriteria(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class MaxTimeCriteria(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class MinLengthLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class MinNewTokensLengthLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class MinPLogitsWarper(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class NoBadWordsLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class NoRepeatNGramLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class PhrasalConstraint(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class PrefixConstrainedLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class RepetitionPenaltyLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class SequenceBiasLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class StoppingCriteria(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class StoppingCriteriaList(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class StopStringCriteria(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class SuppressTokensAtBeginLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class SuppressTokensLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class SynthIDTextWatermarkDetector(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class SynthIDTextWatermarkingConfig(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class SynthIDTextWatermarkLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class TemperatureLogitsWarper(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class TopKLogitsWarper(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class TopPLogitsWarper(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class TypicalLogitsWarper(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class UnbatchedClassifierFreeGuidanceLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class WatermarkDetector(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class WatermarkLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class WhisperTimeStampLogitsProcessor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class TorchExportableModuleWithStaticCache(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
def convert_and_export_with_cache(*args, **kwargs):
|
|
requires_backends(convert_and_export_with_cache, ["torch"])
|
|
|
|
|
|
class AttentionMaskInterface(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
def model_addition_debugger_context(*args, **kwargs):
|
|
requires_backends(model_addition_debugger_context, ["torch"])
|
|
|
|
|
|
class GradientCheckpointingLayer(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
ROPE_INIT_FUNCTIONS = None
|
|
|
|
|
|
def dynamic_rope_update(*args, **kwargs):
|
|
requires_backends(dynamic_rope_update, ["torch"])
|
|
|
|
|
|
class AttentionInterface(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class PreTrainedModel(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
class Adafactor(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
def get_constant_schedule(*args, **kwargs):
|
|
requires_backends(get_constant_schedule, ["torch"])
|
|
|
|
|
|
def get_constant_schedule_with_warmup(*args, **kwargs):
|
|
requires_backends(get_constant_schedule_with_warmup, ["torch"])
|
|
|
|
|
|
def get_cosine_schedule_with_warmup(*args, **kwargs):
|
|
requires_backends(get_cosine_schedule_with_warmup, ["torch"])
|
|
|
|
|
|
def get_cosine_with_hard_restarts_schedule_with_warmup(*args, **kwargs):
|
|
requires_backends(get_cosine_with_hard_restarts_schedule_with_warmup, ["torch"])
|
|
|
|
|
|
def get_inverse_sqrt_schedule(*args, **kwargs):
|
|
requires_backends(get_inverse_sqrt_schedule, ["torch"])
|
|
|
|
|
|
def get_linear_schedule_with_warmup(*args, **kwargs):
|
|
requires_backends(get_linear_schedule_with_warmup, ["torch"])
|
|
|
|
|
|
def get_polynomial_decay_schedule_with_warmup(*args, **kwargs):
|
|
requires_backends(get_polynomial_decay_schedule_with_warmup, ["torch"])
|
|
|
|
|
|
def get_scheduler(*args, **kwargs):
|
|
requires_backends(get_scheduler, ["torch"])
|
|
|
|
|
|
def get_wsd_schedule(*args, **kwargs):
|
|
requires_backends(get_wsd_schedule, ["torch"])
|
|
|
|
|
|
class Conv1D(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
def apply_chunking_to_forward(*args, **kwargs):
|
|
requires_backends(apply_chunking_to_forward, ["torch"])
|
|
|
|
|
|
def prune_layer(*args, **kwargs):
|
|
requires_backends(prune_layer, ["torch"])
|
|
|
|
|
|
class Trainer(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|
|
|
|
|
|
def torch_distributed_zero_first(*args, **kwargs):
|
|
requires_backends(torch_distributed_zero_first, ["torch"])
|
|
|
|
|
|
class Seq2SeqTrainer(metaclass=DummyObject):
|
|
_backends = ["torch"]
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
requires_backends(self, ["torch"])
|